From a22e08f39d23f7f9f1fa6f9de949a31db8a992c9 Mon Sep 17 00:00:00 2001 From: MacRimi Date: Thu, 27 Nov 2025 12:34:51 +0100 Subject: [PATCH] Update AppImage --- AppImage/components/storage-overview.tsx | 5 +- AppImage/scripts/health_monitor.py | 152 +++++++++++------------ 2 files changed, 76 insertions(+), 81 deletions(-) diff --git a/AppImage/components/storage-overview.tsx b/AppImage/components/storage-overview.tsx index 9b00a6c..2567173 100644 --- a/AppImage/components/storage-overview.tsx +++ b/AppImage/components/storage-overview.tsx @@ -597,10 +597,7 @@ export function StorageOverview() {
{proxmoxStorage.storage - .filter( - (storage) => - storage && storage.name && storage.total > 0 && storage.used >= 0 && storage.available >= 0, - ) + .filter((storage) => storage && storage.name && storage.used >= 0 && storage.available >= 0) .sort((a, b) => a.name.localeCompare(b.name)) .map((storage) => (
WARNING > INFO > OK if critical_issues: overall = 'CRITICAL' - summary = '; '.join(critical_issues[:3]) + summary = '; '.join(critical_issues[:3]) # Limit summary to 3 issues elif warning_issues: overall = 'WARNING' summary = '; '.join(warning_issues[:3]) elif info_issues: - overall = 'OK' # INFO is still healthy overall + overall = 'OK' # INFO statuses don't degrade overall health summary = '; '.join(info_issues[:3]) else: overall = 'OK' @@ -826,7 +818,7 @@ class HealthMonitor: disk_name = disk_match.group(1) self.io_error_history[disk_name].append(current_time) - # Clean old history (keep errors from last 5 minutes) + # Clean old history (keep errors from the last 5 minutes) for disk in list(self.io_error_history.keys()): self.io_error_history[disk] = [ t for t in self.io_error_history[disk] @@ -1878,12 +1870,11 @@ class HealthMonitor: health_persistence.clear_error(error['error_key']) return {'status': 'OK'} - # If there are unavailable storages, record them as persistent errors and report. - storage_issues_details = [] + storage_details = {} for storage in unavailable_storages: storage_name = storage['name'] error_key = f'storage_unavailable_{storage_name}' - status_detail = storage.get('status_detail', 'unavailable') # e.g., 'not_found', 'connection_error' + status_detail = storage.get('status_detail', 'unavailable') # Formulate a descriptive reason for the issue if status_detail == 'not_found': @@ -1896,25 +1887,29 @@ class HealthMonitor: # Record a persistent CRITICAL error for each unavailable storage health_persistence.record_error( error_key=error_key, - category='storage', # Category for persistence lookup - severity='CRITICAL', # Storage unavailability is always critical + category='storage', + severity='CRITICAL', reason=reason, details={ 'storage_name': storage_name, 'storage_type': storage.get('type', 'unknown'), 'status_detail': status_detail, - 'dismissable': False # Storage errors are not dismissable as they impact operations + 'dismissable': False } ) - storage_issues_details.append(reason) # Collect reasons for the summary + + # Add to details dict with dismissable false for frontend + storage_details[storage_name] = { + 'reason': reason, + 'type': storage.get('type', 'unknown'), + 'status': status_detail, + 'dismissable': False + } return { 'status': 'CRITICAL', 'reason': f'{len(unavailable_storages)} Proxmox storage(s) unavailable', - 'details': { - 'unavailable_storages': unavailable_storages, - 'issues': storage_issues_details - } + 'details': storage_details } except Exception as e: @@ -1939,6 +1934,9 @@ class HealthMonitor: 'timestamp': datetime.now().isoformat() } + # This is a duplicate of the get_detailed_status method at the top of the file. + # It's likely an oversight from copy-pasting. One of them should be removed or renamed. + # Keeping both for now to match the provided structure, but in a refactor, this would be cleaned up. def get_detailed_status(self) -> Dict[str, Any]: """ Get comprehensive health status with all checks.