Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 7a3a28d87f | |||
| 240cded8a9 | |||
| 31e241f052 | |||
| de579682a0 | |||
| 9acbd5046c | |||
| 8d63c88e69 | |||
| cffb0af60e | |||
| 857b7a127a | |||
| 66b24ece48 | |||
| 0cc3b466e0 |
20
CHANGELOG.md
20
CHANGELOG.md
@@ -11,6 +11,26 @@ Sections:
|
||||
|
||||
---
|
||||
|
||||
## [0.6.4] - 2026-01-18
|
||||
|
||||
### Fixed
|
||||
- Fixed long Home Assistant startup times caused by background repository enrichment running too early.
|
||||
|
||||
### Changed
|
||||
- Background repository enrichment is now started only after Home Assistant has fully started.
|
||||
- Repository cache updates now run fully asynchronous without blocking Home Assistant startup.
|
||||
|
||||
### Internal
|
||||
- Improved alignment with Home Assistant startup lifecycle.
|
||||
- No functional changes to store behavior or UI.
|
||||
|
||||
## [0.6.3] - 2026-01-18
|
||||
|
||||
### Changed
|
||||
- Improved Store performance for large indexes by avoiding full metadata enrichment during list refresh.
|
||||
- Repository details are loaded on demand, reducing initial load time and network requests.
|
||||
- Index refresh is skipped when the index content has not changed.
|
||||
|
||||
## [0.6.2] - 2026-01-18
|
||||
|
||||
### Added
|
||||
|
||||
@@ -3,9 +3,10 @@ from __future__ import annotations
|
||||
import logging
|
||||
from datetime import timedelta
|
||||
|
||||
from homeassistant.core import HomeAssistant
|
||||
from homeassistant.core import HomeAssistant, callback
|
||||
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED
|
||||
from homeassistant.components.panel_custom import async_register_panel
|
||||
from homeassistant.helpers.event import async_track_time_interval
|
||||
from homeassistant.helpers.event import async_track_time_interval, async_call_later
|
||||
from homeassistant.helpers.discovery import async_load_platform
|
||||
|
||||
from .core import BCSCore, BCSConfig, BCSError
|
||||
@@ -36,6 +37,7 @@ async def async_setup(hass: HomeAssistant, config: dict) -> bool:
|
||||
BCSApiView,
|
||||
BCSReadmeView,
|
||||
BCSVersionsView,
|
||||
BCSRepoDetailView,
|
||||
BCSCustomRepoView,
|
||||
BCSInstallView,
|
||||
BCSUpdateView,
|
||||
@@ -49,6 +51,7 @@ async def async_setup(hass: HomeAssistant, config: dict) -> bool:
|
||||
hass.http.register_view(BCSApiView(core))
|
||||
hass.http.register_view(BCSReadmeView(core))
|
||||
hass.http.register_view(BCSVersionsView(core))
|
||||
hass.http.register_view(BCSRepoDetailView(core))
|
||||
hass.http.register_view(BCSCustomRepoView(core))
|
||||
hass.http.register_view(BCSInstallView(core))
|
||||
hass.http.register_view(BCSUpdateView(core))
|
||||
@@ -62,17 +65,30 @@ async def async_setup(hass: HomeAssistant, config: dict) -> bool:
|
||||
frontend_url_path="bahmcloud-store",
|
||||
webcomponent_name="bahmcloud-store-panel",
|
||||
# IMPORTANT: bump v to avoid caching old JS
|
||||
module_url="/api/bahmcloud_store_static/panel.js?v=105",
|
||||
module_url="/api/bahmcloud_store_static/panel.js?v=107",
|
||||
sidebar_title="Bahmcloud Store",
|
||||
sidebar_icon="mdi:store",
|
||||
require_admin=True,
|
||||
config={},
|
||||
)
|
||||
|
||||
# IMPORTANT:
|
||||
# Do NOT block Home Assistant startup with network-heavy refreshes.
|
||||
# We wait until HA has fully started, then kick off the initial refresh.
|
||||
async def _startup_refresh() -> None:
|
||||
try:
|
||||
await core.full_refresh(source="startup")
|
||||
except BCSError as e:
|
||||
_LOGGER.error("Initial refresh failed: %s", e)
|
||||
except Exception:
|
||||
_LOGGER.exception("Unexpected error during initial refresh")
|
||||
|
||||
@callback
|
||||
def _schedule_startup_refresh(_event=None) -> None:
|
||||
# Give HA a short head-start (UI, recorder, etc.) before we start fetching lots of data.
|
||||
async_call_later(hass, 30, lambda _now: hass.async_create_task(_startup_refresh()))
|
||||
|
||||
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, _schedule_startup_refresh)
|
||||
|
||||
async def periodic(_now) -> None:
|
||||
try:
|
||||
|
||||
@@ -89,9 +89,27 @@ class BCSCore:
|
||||
self.last_index_hash: str | None = None
|
||||
self.last_index_loaded_at: float | None = None
|
||||
|
||||
# Fast refresh: skip expensive processing when index/custom repos unchanged
|
||||
self._last_refresh_signature: str | None = None
|
||||
|
||||
self._install_lock = asyncio.Lock()
|
||||
self._installed_cache: dict[str, Any] = {}
|
||||
|
||||
# Phase P1/P2: local repo cache + background enrichment
|
||||
# The cache persists provider/meta/latest data so the UI can show more
|
||||
# information immediately and we can later do delta refresh.
|
||||
self._repo_cache: dict[str, Any] = {}
|
||||
self._repo_cache_loaded: bool = False
|
||||
self._repo_cache_flush_task: asyncio.Task | None = None
|
||||
|
||||
# Background enrichment worker (non-blocking)
|
||||
self._bg_enrich_task: asyncio.Task | None = None
|
||||
self._bg_enrich_pending: set[str] = set()
|
||||
self._bg_enrich_ttl_seconds: int = 6 * 3600
|
||||
self._bg_enrich_max_parallel: int = 3
|
||||
self._bg_signal_interval_seconds: float = 2.0
|
||||
self._bg_last_signal_ts: float = 0.0
|
||||
|
||||
# Phase F2: backups before install/update
|
||||
self._backup_root = Path(self.hass.config.path(".bcs_backups"))
|
||||
self._backup_keep_per_domain: int = 5
|
||||
@@ -101,6 +119,15 @@ class BCSCore:
|
||||
self.version = await self._read_manifest_version_async()
|
||||
await self._refresh_installed_cache()
|
||||
|
||||
# Load persisted repo cache once at startup.
|
||||
try:
|
||||
self._repo_cache = await self.storage.get_repo_cache_map()
|
||||
if not isinstance(self._repo_cache, dict):
|
||||
self._repo_cache = {}
|
||||
except Exception:
|
||||
self._repo_cache = {}
|
||||
self._repo_cache_loaded = True
|
||||
|
||||
# After a successful HA restart, restart-required is no longer relevant.
|
||||
self._clear_restart_required_issue()
|
||||
|
||||
@@ -178,6 +205,19 @@ class BCSCore:
|
||||
|
||||
custom_repos = await self.storage.list_custom_repos()
|
||||
|
||||
# Fast path: if index + custom repos did not change, skip expensive work.
|
||||
try:
|
||||
custom_sig = [(c.id, (c.url or '').strip(), (c.name or '').strip()) for c in (custom_repos or [])]
|
||||
custom_sig.sort()
|
||||
refresh_signature = json.dumps({"index_hash": self.last_index_hash, "custom": custom_sig}, sort_keys=True)
|
||||
except Exception:
|
||||
refresh_signature = f"{self.last_index_hash}:{len(custom_repos or [])}"
|
||||
|
||||
if self._last_refresh_signature and refresh_signature == self._last_refresh_signature and self.repos:
|
||||
_LOGGER.debug("BCS refresh skipped (no changes detected)")
|
||||
return
|
||||
|
||||
|
||||
merged: dict[str, RepoItem] = {}
|
||||
|
||||
for item in index_repos:
|
||||
@@ -194,9 +234,15 @@ class BCSCore:
|
||||
for r in merged.values():
|
||||
r.provider = detect_provider(r.url)
|
||||
|
||||
await self._enrich_and_resolve(merged)
|
||||
# Apply persisted cache (provider/meta/latest) to all repos so the list
|
||||
# view can show richer data immediately.
|
||||
self._apply_repo_cache(merged)
|
||||
|
||||
await self._enrich_installed_only(merged)
|
||||
self.repos = merged
|
||||
|
||||
self._last_refresh_signature = refresh_signature
|
||||
|
||||
_LOGGER.info(
|
||||
"BCS refresh complete: repos=%s (index=%s, custom=%s)",
|
||||
len(self.repos),
|
||||
@@ -204,6 +250,159 @@ class BCSCore:
|
||||
len([r for r in self.repos.values() if r.source == "custom"]),
|
||||
)
|
||||
|
||||
# Start/continue background enrichment for repos (non-blocking).
|
||||
self._schedule_background_enrich(list(self.repos.keys()))
|
||||
|
||||
def _apply_repo_cache(self, merged: dict[str, RepoItem]) -> None:
|
||||
"""Apply persisted cached enrichment data to repo items (no network IO)."""
|
||||
if not self._repo_cache_loaded or not isinstance(self._repo_cache, dict) or not self._repo_cache:
|
||||
return
|
||||
|
||||
for rid, r in merged.items():
|
||||
entry = self._repo_cache.get(str(rid))
|
||||
if not isinstance(entry, dict):
|
||||
continue
|
||||
if (entry.get("url") or "").strip() != (r.url or "").strip():
|
||||
continue
|
||||
|
||||
# Provider basics
|
||||
r.provider = entry.get("provider") or r.provider
|
||||
r.owner = entry.get("owner") or r.owner
|
||||
r.provider_repo_name = entry.get("provider_repo_name") or r.provider_repo_name
|
||||
r.provider_description = entry.get("provider_description") or r.provider_description
|
||||
r.default_branch = entry.get("default_branch") or r.default_branch
|
||||
|
||||
# Latest version
|
||||
r.latest_version = entry.get("latest_version") or r.latest_version
|
||||
r.latest_version_source = entry.get("latest_version_source") or r.latest_version_source
|
||||
|
||||
# Metadata
|
||||
r.meta_source = entry.get("meta_source") or r.meta_source
|
||||
r.meta_name = entry.get("meta_name") or r.meta_name
|
||||
r.meta_description = entry.get("meta_description") or r.meta_description
|
||||
r.meta_category = entry.get("meta_category") or r.meta_category
|
||||
r.meta_author = entry.get("meta_author") or r.meta_author
|
||||
r.meta_maintainer = entry.get("meta_maintainer") or r.meta_maintainer
|
||||
|
||||
# Keep a stable name fallback
|
||||
if r.meta_name:
|
||||
r.name = r.meta_name
|
||||
elif not r.name:
|
||||
r.name = r.provider_repo_name or r.url
|
||||
|
||||
def _schedule_repo_cache_flush(self) -> None:
|
||||
if self._repo_cache_flush_task and not self._repo_cache_flush_task.done():
|
||||
return
|
||||
|
||||
async def _flush_delayed() -> None:
|
||||
await asyncio.sleep(5)
|
||||
try:
|
||||
await self.storage.set_repo_cache_map(self._repo_cache)
|
||||
except Exception:
|
||||
_LOGGER.debug("BCS repo cache flush failed", exc_info=True)
|
||||
|
||||
self._repo_cache_flush_task = self.hass.async_create_task(_flush_delayed())
|
||||
|
||||
def _cache_entry_is_stale(self, entry: dict[str, Any]) -> bool:
|
||||
try:
|
||||
checked_at = int(entry.get("checked_at") or 0)
|
||||
except Exception:
|
||||
checked_at = 0
|
||||
if checked_at <= 0:
|
||||
return True
|
||||
return (time.time() - checked_at) > self._bg_enrich_ttl_seconds
|
||||
|
||||
def _schedule_background_enrich(self, repo_ids: list[str]) -> None:
|
||||
"""Queue repos for background enrichment and ensure worker is running."""
|
||||
if not repo_ids:
|
||||
return
|
||||
|
||||
now = time.time()
|
||||
for rid in repo_ids:
|
||||
rid = str(rid)
|
||||
r = self.repos.get(rid)
|
||||
if not r:
|
||||
continue
|
||||
|
||||
# Already enriched in memory? Still consider staleness from cache.
|
||||
entry = self._repo_cache.get(rid) if isinstance(self._repo_cache, dict) else None
|
||||
stale = True
|
||||
if isinstance(entry, dict) and (entry.get("url") or "").strip() == (r.url or "").strip():
|
||||
stale = self._cache_entry_is_stale(entry)
|
||||
|
||||
# If we already have fields in memory and the cache isn't stale, skip.
|
||||
if (r.latest_version or r.meta_source or r.provider_description) and not stale:
|
||||
continue
|
||||
|
||||
# Always enqueue missing/stale entries.
|
||||
self._bg_enrich_pending.add(rid)
|
||||
|
||||
if self._bg_enrich_task and not self._bg_enrich_task.done():
|
||||
return
|
||||
|
||||
self._bg_enrich_task = self.hass.async_create_task(self._background_enrich_worker())
|
||||
|
||||
async def _background_enrich_worker(self) -> None:
|
||||
"""Background worker to enrich repos and update the persistent cache."""
|
||||
sem = asyncio.Semaphore(self._bg_enrich_max_parallel)
|
||||
|
||||
async def _enrich_one(rid: str) -> None:
|
||||
async with sem:
|
||||
r = self.repos.get(rid)
|
||||
if not r:
|
||||
return
|
||||
|
||||
entry = self._repo_cache.get(rid) if isinstance(self._repo_cache, dict) else None
|
||||
if isinstance(entry, dict) and (entry.get("url") or "").strip() == (r.url or "").strip():
|
||||
if not self._cache_entry_is_stale(entry) and (r.latest_version or r.meta_source or r.provider_description):
|
||||
return
|
||||
|
||||
try:
|
||||
await self._enrich_one_repo(r)
|
||||
except Exception:
|
||||
_LOGGER.debug("BCS background enrich failed for %s", rid, exc_info=True)
|
||||
# still mark checked_at to avoid tight retry loops
|
||||
self._repo_cache[rid] = {
|
||||
"url": r.url,
|
||||
"checked_at": int(time.time()),
|
||||
}
|
||||
self._schedule_repo_cache_flush()
|
||||
return
|
||||
|
||||
# Update persistent cache entry
|
||||
self._repo_cache[rid] = {
|
||||
"url": r.url,
|
||||
"provider": r.provider,
|
||||
"owner": r.owner,
|
||||
"provider_repo_name": r.provider_repo_name,
|
||||
"provider_description": r.provider_description,
|
||||
"default_branch": r.default_branch,
|
||||
"latest_version": r.latest_version,
|
||||
"latest_version_source": r.latest_version_source,
|
||||
"meta_source": r.meta_source,
|
||||
"meta_name": r.meta_name,
|
||||
"meta_description": r.meta_description,
|
||||
"meta_category": r.meta_category,
|
||||
"meta_author": r.meta_author,
|
||||
"meta_maintainer": r.meta_maintainer,
|
||||
"checked_at": int(time.time()),
|
||||
}
|
||||
self._schedule_repo_cache_flush()
|
||||
|
||||
# Throttle UI/entity updates
|
||||
if (time.time() - self._bg_last_signal_ts) >= self._bg_signal_interval_seconds:
|
||||
self._bg_last_signal_ts = time.time()
|
||||
self.signal_updated()
|
||||
|
||||
while self._bg_enrich_pending:
|
||||
# Drain in small batches so we don't monopolize the loop
|
||||
batch: list[str] = []
|
||||
while self._bg_enrich_pending and len(batch) < (self._bg_enrich_max_parallel * 2):
|
||||
batch.append(self._bg_enrich_pending.pop())
|
||||
|
||||
await asyncio.gather(*(_enrich_one(rid) for rid in batch), return_exceptions=True)
|
||||
await asyncio.sleep(0) # yield
|
||||
|
||||
async def _enrich_and_resolve(self, merged: dict[str, RepoItem]) -> None:
|
||||
sem = asyncio.Semaphore(6)
|
||||
|
||||
@@ -238,6 +437,178 @@ class BCSCore:
|
||||
|
||||
await asyncio.gather(*(process_one(r) for r in merged.values()), return_exceptions=True)
|
||||
|
||||
|
||||
async def _enrich_installed_only(self, merged: dict[str, RepoItem]) -> None:
|
||||
"""Enrich only installed repos (fast refresh for large indexes).
|
||||
|
||||
This keeps the backend responsive even with thousands of repositories.
|
||||
Details for non-installed repos are fetched on-demand.
|
||||
"""
|
||||
installed_map: dict[str, Any] = getattr(self, "_installed_cache", {}) or {}
|
||||
if not isinstance(installed_map, dict) or not installed_map:
|
||||
return
|
||||
|
||||
to_process: list[RepoItem] = []
|
||||
for rid in installed_map.keys():
|
||||
r = merged.get(str(rid))
|
||||
if r:
|
||||
to_process.append(r)
|
||||
|
||||
if not to_process:
|
||||
return
|
||||
|
||||
sem = asyncio.Semaphore(6)
|
||||
|
||||
async def process_one(r: RepoItem) -> None:
|
||||
async with sem:
|
||||
await self._enrich_one_repo(r)
|
||||
|
||||
await asyncio.gather(*(process_one(r) for r in to_process), return_exceptions=True)
|
||||
|
||||
async def _enrich_one_repo(self, r: RepoItem) -> None:
|
||||
"""Fetch provider info + metadata for a single repo item."""
|
||||
info: RepoInfo = await fetch_repo_info(self.hass, r.url)
|
||||
|
||||
r.provider = info.provider or r.provider
|
||||
r.owner = info.owner or r.owner
|
||||
r.provider_repo_name = info.repo_name
|
||||
r.provider_description = info.description
|
||||
r.default_branch = info.default_branch or r.default_branch
|
||||
|
||||
r.latest_version = info.latest_version
|
||||
r.latest_version_source = info.latest_version_source
|
||||
|
||||
md: RepoMetadata = await fetch_repo_metadata(self.hass, r.url, r.default_branch)
|
||||
r.meta_source = md.source
|
||||
if md.name:
|
||||
r.meta_name = md.name
|
||||
r.name = md.name
|
||||
r.meta_description = md.description
|
||||
if md.category:
|
||||
r.meta_category = md.category
|
||||
r.meta_author = md.author
|
||||
r.meta_maintainer = md.maintainer
|
||||
|
||||
# Keep a stable name fallback
|
||||
if not r.name:
|
||||
r.name = r.provider_repo_name or r.url
|
||||
|
||||
# Persist into local cache (non-blocking, throttled flush).
|
||||
self._update_repo_cache_from_item(r)
|
||||
|
||||
async def ensure_repo_details(self, repo_id: str) -> RepoItem | None:
|
||||
"""Ensure provider/meta/latest fields are loaded for a repo.
|
||||
|
||||
Used by the UI when a repo detail view is opened.
|
||||
"""
|
||||
r = self.get_repo(repo_id)
|
||||
if not r:
|
||||
return None
|
||||
|
||||
# If we already have a latest_version (or provider_description), consider it enriched.
|
||||
if r.latest_version or r.provider_description or r.meta_source:
|
||||
return r
|
||||
|
||||
try:
|
||||
await self._enrich_one_repo(r)
|
||||
except Exception:
|
||||
_LOGGER.debug("BCS ensure_repo_details failed for %s", repo_id, exc_info=True)
|
||||
return r
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Phase P1/P2: local cache + background enrichment
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def _apply_repo_cache(self, merged: dict[str, RepoItem]) -> None:
|
||||
"""Apply persisted cache fields to repo items.
|
||||
|
||||
This makes the list view richer immediately (without remote requests).
|
||||
"""
|
||||
cache = self._repo_cache if isinstance(self._repo_cache, dict) else {}
|
||||
now = int(time.time())
|
||||
|
||||
for rid, r in merged.items():
|
||||
entry = cache.get(str(rid))
|
||||
if not isinstance(entry, dict):
|
||||
continue
|
||||
|
||||
# Safety: ensure cache belongs to the same URL.
|
||||
if str(entry.get("url") or "").strip() != str(r.url or "").strip():
|
||||
continue
|
||||
|
||||
# Provider fields
|
||||
r.provider = entry.get("provider") or r.provider
|
||||
r.owner = entry.get("owner") or r.owner
|
||||
r.provider_repo_name = entry.get("provider_repo_name") or r.provider_repo_name
|
||||
r.provider_description = entry.get("provider_description") or r.provider_description
|
||||
r.default_branch = entry.get("default_branch") or r.default_branch
|
||||
|
||||
# Latest version
|
||||
r.latest_version = entry.get("latest_version") or r.latest_version
|
||||
r.latest_version_source = entry.get("latest_version_source") or r.latest_version_source
|
||||
|
||||
# Metadata
|
||||
r.meta_source = entry.get("meta_source") or r.meta_source
|
||||
r.meta_name = entry.get("meta_name") or r.meta_name
|
||||
r.meta_description = entry.get("meta_description") or r.meta_description
|
||||
r.meta_category = entry.get("meta_category") or r.meta_category
|
||||
r.meta_author = entry.get("meta_author") or r.meta_author
|
||||
r.meta_maintainer = entry.get("meta_maintainer") or r.meta_maintainer
|
||||
|
||||
# Stable display name
|
||||
if r.meta_name:
|
||||
r.name = r.meta_name
|
||||
elif not r.name:
|
||||
r.name = r.provider_repo_name or r.url
|
||||
|
||||
# Mark as stale if the cache is old (used by background enrich).
|
||||
checked_at = int(entry.get("checked_at") or 0)
|
||||
entry["_stale"] = (checked_at <= 0) or ((now - checked_at) > self._bg_enrich_ttl_seconds)
|
||||
|
||||
def _update_repo_cache_from_item(self, r: RepoItem) -> None:
|
||||
"""Update in-memory cache from a repo item and schedule a flush."""
|
||||
if not self._repo_cache_loaded:
|
||||
return
|
||||
|
||||
rid = str(r.id)
|
||||
now = int(time.time())
|
||||
entry = {
|
||||
"url": str(r.url or ""),
|
||||
"provider": r.provider,
|
||||
"owner": r.owner,
|
||||
"provider_repo_name": r.provider_repo_name,
|
||||
"provider_description": r.provider_description,
|
||||
"default_branch": r.default_branch,
|
||||
"latest_version": r.latest_version,
|
||||
"latest_version_source": r.latest_version_source,
|
||||
"meta_source": r.meta_source,
|
||||
"meta_name": r.meta_name,
|
||||
"meta_description": r.meta_description,
|
||||
"meta_category": r.meta_category,
|
||||
"meta_author": r.meta_author,
|
||||
"meta_maintainer": r.meta_maintainer,
|
||||
"checked_at": now,
|
||||
}
|
||||
|
||||
if not isinstance(self._repo_cache, dict):
|
||||
self._repo_cache = {}
|
||||
self._repo_cache[rid] = entry
|
||||
self._schedule_repo_cache_flush()
|
||||
|
||||
def _schedule_repo_cache_flush(self) -> None:
|
||||
if self._repo_cache_flush_task and not self._repo_cache_flush_task.done():
|
||||
return
|
||||
|
||||
async def _flush_later() -> None:
|
||||
try:
|
||||
await asyncio.sleep(5)
|
||||
if isinstance(self._repo_cache, dict):
|
||||
await self.storage.set_repo_cache_map(self._repo_cache)
|
||||
except Exception:
|
||||
_LOGGER.debug("BCS repo cache flush failed", exc_info=True)
|
||||
|
||||
self._repo_cache_flush_task = self.hass.async_create_task(_flush_later())
|
||||
|
||||
def _add_cache_buster(self, url: str) -> str:
|
||||
parts = urlsplit(url)
|
||||
q = dict(parse_qsl(parts.query, keep_blank_values=True))
|
||||
@@ -329,6 +700,7 @@ class BCSCore:
|
||||
name=name,
|
||||
url=repo_url,
|
||||
source="index",
|
||||
meta_category=str(r.get("category")) if r.get("category") else None,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"domain": "bahmcloud_store",
|
||||
"name": "Bahmcloud Store",
|
||||
"version": "0.6.2",
|
||||
"version": "0.6.4",
|
||||
"documentation": "https://git.bahmcloud.de/bahmcloud/bahmcloud_store",
|
||||
"platforms": ["update"],
|
||||
"requirements": [],
|
||||
|
||||
@@ -389,10 +389,29 @@ class BahmcloudStorePanel extends HTMLElement {
|
||||
}
|
||||
|
||||
this._update();
|
||||
this._loadRepoDetails(repoId);
|
||||
this._loadReadme(repoId);
|
||||
this._loadVersions(repoId);
|
||||
}
|
||||
|
||||
|
||||
async _loadRepoDetails(repoId) {
|
||||
if (!this._hass || !repoId) return;
|
||||
try {
|
||||
const resp = await this._hass.callApi("get", `bcs/repo?repo_id=${encodeURIComponent(repoId)}`);
|
||||
if (resp?.ok && resp.repo) {
|
||||
this._detailRepo = resp.repo;
|
||||
// Also update the cached list item if present
|
||||
const repos = Array.isArray(this._data?.repos) ? this._data.repos : [];
|
||||
const idx = repos.findIndex((r) => this._safeId(r?.id) === repoId);
|
||||
if (idx >= 0) repos[idx] = resp.repo;
|
||||
this._update();
|
||||
}
|
||||
} catch (e) {
|
||||
// ignore: details are optional
|
||||
}
|
||||
}
|
||||
|
||||
async _loadVersions(repoId) {
|
||||
if (!this._hass) return;
|
||||
if (!repoId) return;
|
||||
|
||||
@@ -442,3 +442,55 @@ class BCSRestartView(HomeAssistantView):
|
||||
except Exception as e:
|
||||
_LOGGER.exception("BCS restart failed: %s", e)
|
||||
return web.json_response({"ok": False, "message": str(e) or "Restart failed"}, status=500)
|
||||
|
||||
class BCSRepoDetailView(HomeAssistantView):
|
||||
url = "/api/bcs/repo"
|
||||
name = "api:bcs_repo"
|
||||
requires_auth = True
|
||||
|
||||
def __init__(self, core: Any) -> None:
|
||||
self.core: BCSCore = core
|
||||
|
||||
async def get(self, request: web.Request) -> web.Response:
|
||||
repo_id = (request.query.get("repo_id") or "").strip()
|
||||
if not repo_id:
|
||||
return web.json_response({"ok": False, "message": "Missing repo_id"}, status=400)
|
||||
|
||||
try:
|
||||
repo = await self.core.ensure_repo_details(repo_id)
|
||||
if not repo:
|
||||
return web.json_response({"ok": False, "message": "Repo not found"}, status=404)
|
||||
|
||||
inst = self.core.get_installed(repo_id) or {}
|
||||
installed = bool(inst)
|
||||
domains = inst.get("domains") or []
|
||||
if not isinstance(domains, list):
|
||||
domains = []
|
||||
|
||||
return web.json_response({
|
||||
"ok": True,
|
||||
"repo": {
|
||||
"id": repo.id,
|
||||
"name": repo.name,
|
||||
"url": repo.url,
|
||||
"source": repo.source,
|
||||
"owner": repo.owner,
|
||||
"provider": repo.provider,
|
||||
"repo_name": repo.provider_repo_name,
|
||||
"description": repo.provider_description or repo.meta_description,
|
||||
"default_branch": repo.default_branch,
|
||||
"latest_version": repo.latest_version,
|
||||
"latest_version_source": repo.latest_version_source,
|
||||
"category": repo.meta_category,
|
||||
"meta_author": repo.meta_author,
|
||||
"meta_maintainer": repo.meta_maintainer,
|
||||
"meta_source": repo.meta_source,
|
||||
"installed": installed,
|
||||
"installed_version": inst.get("installed_version"),
|
||||
"installed_manifest_version": inst.get("installed_manifest_version"),
|
||||
"installed_domains": domains,
|
||||
}
|
||||
}, status=200)
|
||||
except Exception as e:
|
||||
_LOGGER.exception("BCS repo details failed: %s", e)
|
||||
return web.json_response({"ok": False, "message": str(e) or "Repo details failed"}, status=500)
|
||||
|
||||
Reference in New Issue
Block a user