Update AppImage

This commit is contained in:
MacRimi
2025-11-11 17:59:36 +01:00
parent 1860fffe07
commit 88cf51a602
4 changed files with 92 additions and 10 deletions

View File

@@ -92,6 +92,11 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu
const data = await response.json()
console.log("[v0] Health data received:", data)
setHealthData(data)
const event = new CustomEvent("healthStatusUpdated", {
detail: { status: data.overall },
})
window.dispatchEvent(event)
} catch (err) {
console.error("[v0] Error fetching health data:", err)
setError(err instanceof Error ? err.message : "Unknown error")
@@ -275,7 +280,7 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu
onClick={() => handleCategoryClick(key, status)}
className={`flex items-start gap-3 p-3 rounded-lg border transition-colors ${
status === "OK"
? "bg-green-500/5 border-green-500/20 hover:bg-green-500/10"
? "bg-card border-border hover:bg-muted/30"
: status === "WARNING"
? "bg-yellow-500/5 border-yellow-500/20 hover:bg-yellow-500/10 cursor-pointer"
: status === "CRITICAL"
@@ -284,7 +289,7 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu
}`}
>
<div className="mt-0.5 flex-shrink-0 flex items-center gap-2">
<Icon className="h-4 w-4 text-muted-foreground" />
<Icon className="h-4 w-4 text-blue-500" />
{getStatusIcon(status)}
</div>
<div className="flex-1 min-w-0">
@@ -294,7 +299,7 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu
variant="outline"
className={`shrink-0 text-xs ${
status === "OK"
? "border-green-500 text-green-500 bg-green-500/5"
? "border-green-500 text-green-500 bg-transparent"
: status === "WARNING"
? "border-yellow-500 text-yellow-500 bg-yellow-500/5"
: status === "CRITICAL"
@@ -321,7 +326,7 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu
<span className="ml-1 text-muted-foreground">{detailValue.reason}</span>
)}
</div>
{status !== "OK" && (
{(status === "WARNING" || status === "CRITICAL") && (
<Button
size="sm"
variant="outline"

View File

@@ -164,6 +164,31 @@ export function ProxmoxDashboard() {
}
}, [])
useEffect(() => {
const handleHealthStatusUpdate = (event: CustomEvent) => {
const { status } = event.detail
let healthStatus: "healthy" | "warning" | "critical"
if (status === "CRITICAL") {
healthStatus = "critical"
} else if (status === "WARNING") {
healthStatus = "warning"
} else {
healthStatus = "healthy"
}
setSystemStatus((prev) => ({
...prev,
status: healthStatus,
}))
}
window.addEventListener("healthStatusUpdated", handleHealthStatusUpdate as EventListener)
return () => {
window.removeEventListener("healthStatusUpdated", handleHealthStatusUpdate as EventListener)
}
}, [])
useEffect(() => {
if (
systemStatus.serverName &&

View File

@@ -64,8 +64,8 @@ class HealthMonitor:
LOG_CHECK_INTERVAL = 300
# Updates Thresholds
UPDATES_WARNING = 10
UPDATES_CRITICAL = 30
UPDATES_WARNING = 365 # Only warn after 1 year without updates
UPDATES_CRITICAL = 730 # Critical after 2 years
# Known benign errors from Proxmox that should not trigger alerts
BENIGN_ERROR_PATTERNS = [
@@ -1376,7 +1376,8 @@ class HealthMonitor:
def _check_updates(self) -> Optional[Dict[str, Any]]:
"""
Check for pending system updates with intelligence.
Only warns for: critical security updates, kernel updates, or updates pending >30 days.
Now only warns after 365 days without updates.
Critical security updates and kernel updates trigger INFO status immediately.
"""
cache_key = 'updates_check'
current_time = time.time()
@@ -1386,6 +1387,17 @@ class HealthMonitor:
return self.cached_results.get(cache_key)
try:
apt_history_path = '/var/log/apt/history.log'
last_update_days = None
if os.path.exists(apt_history_path):
try:
mtime = os.path.getmtime(apt_history_path)
days_since_update = (current_time - mtime) / 86400
last_update_days = int(days_since_update)
except Exception:
pass
result = subprocess.run(
['apt-get', 'upgrade', '--dry-run'],
capture_output=True,
@@ -1419,8 +1431,38 @@ class HealthMonitor:
if security_updates:
status = 'WARNING'
reason = f'{len(security_updates)} security update(s) available'
# Record persistent error for security updates
health_persistence.record_error(
error_key='updates_security',
category='updates',
severity='WARNING',
reason=reason,
details={'count': len(security_updates), 'packages': security_updates[:5]}
)
elif last_update_days and last_update_days >= 730:
# 2+ years without updates - CRITICAL
status = 'CRITICAL'
reason = f'System not updated in {last_update_days} days (>2 years)'
health_persistence.record_error(
error_key='updates_730days',
category='updates',
severity='CRITICAL',
reason=reason,
details={'days': last_update_days, 'update_count': update_count}
)
elif last_update_days and last_update_days >= 365:
# 1+ year without updates - WARNING
status = 'WARNING'
reason = f'System not updated in {last_update_days} days (>1 year)'
health_persistence.record_error(
error_key='updates_365days',
category='updates',
severity='WARNING',
reason=reason,
details={'days': last_update_days, 'update_count': update_count}
)
elif kernel_updates:
status = 'INFO' # Informational, not critical
status = 'INFO'
reason = f'{len(kernel_updates)} kernel/PVE update(s) available'
elif update_count > 50:
status = 'INFO'
@@ -1435,6 +1477,8 @@ class HealthMonitor:
}
if reason:
update_result['reason'] = reason
if last_update_days:
update_result['days_since_update'] = last_update_days
self.cached_results[cache_key] = update_result
self.last_check_times[cache_key] = current_time

View File

@@ -27,6 +27,7 @@ class HealthPersistence:
VM_ERROR_RETENTION = 48 * 3600 # 48 hours
LOG_ERROR_RETENTION = 24 * 3600 # 24 hours
DISK_ERROR_RETENTION = 48 * 3600 # 48 hours
UPDATES_SUPPRESSION = 180 * 24 * 3600 # 180 days (6 months)
def __init__(self):
"""Initialize persistence with database in config directory"""
@@ -102,8 +103,15 @@ class HealthPersistence:
resolved_dt = datetime.fromisoformat(ack_check[1])
hours_since_ack = (datetime.now() - resolved_dt).total_seconds() / 3600
if hours_since_ack < 24:
# Skip re-adding recently acknowledged errors (within 24h)
if category == 'updates':
# Updates: suppress for 180 days (6 months)
suppression_hours = self.UPDATES_SUPPRESSION / 3600
else:
# Other errors: suppress for 24 hours
suppression_hours = 24
if hours_since_ack < suppression_hours:
# Skip re-adding recently acknowledged errors
conn.close()
return {'type': 'skipped_acknowledged', 'needs_notification': False}
except Exception: