mirror of
https://github.com/MacRimi/ProxMenux.git
synced 2026-05-02 04:16:24 +00:00
update health_monitor.py
This commit is contained in:
@@ -150,7 +150,7 @@ class HealthMonitor:
|
|||||||
r'zfs.*scrub (started|finished|in progress)',
|
r'zfs.*scrub (started|finished|in progress)',
|
||||||
r'zpool.*resilver',
|
r'zpool.*resilver',
|
||||||
|
|
||||||
# ── LXC/Container normal operations ──
|
# <EFBFBD><EFBFBD><EFBFBD>─ LXC/Container normal operations ──
|
||||||
r'lxc.*monitor',
|
r'lxc.*monitor',
|
||||||
r'systemd\[1\]: (started|stopped) .*\.scope',
|
r'systemd\[1\]: (started|stopped) .*\.scope',
|
||||||
|
|
||||||
@@ -837,15 +837,20 @@ class HealthMonitor:
|
|||||||
return self.cached_results.get(cache_key)
|
return self.cached_results.get(cache_key)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Use shared journalctl cache to avoid duplicate calls
|
# Read temperature directly from sensors command (not journalctl)
|
||||||
journalctl_output = self._get_journalctl_10min_warnings()
|
result = subprocess.run(
|
||||||
|
['sensors', '-u'],
|
||||||
|
capture_output=True, text=True, timeout=3
|
||||||
|
)
|
||||||
|
|
||||||
if journalctl_output:
|
|
||||||
temps = []
|
temps = []
|
||||||
for line in journalctl_output.split('\n'):
|
if result.returncode == 0 and result.stdout:
|
||||||
if 'temp' in line.lower() and '_input' in line:
|
for line in result.stdout.split('\n'):
|
||||||
|
# Look for temperature input lines like "temp1_input: 42.000"
|
||||||
|
if '_input' in line and 'temp' in line.lower():
|
||||||
try:
|
try:
|
||||||
temp = float(line.split(':')[1].strip())
|
temp = float(line.split(':')[1].strip())
|
||||||
|
if 0 < temp < 150: # Sanity check for valid temp range
|
||||||
temps.append(temp)
|
temps.append(temp)
|
||||||
except:
|
except:
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -967,23 +967,21 @@ class HealthPersistence:
|
|||||||
cutoff_events = (now - timedelta(days=30)).isoformat()
|
cutoff_events = (now - timedelta(days=30)).isoformat()
|
||||||
cursor.execute('DELETE FROM events WHERE timestamp < ?', (cutoff_events,))
|
cursor.execute('DELETE FROM events WHERE timestamp < ?', (cutoff_events,))
|
||||||
|
|
||||||
# ── Auto-resolve transient log errors after system reboot ──
|
# ── Auto-resolve transient errors after system stabilizes ──
|
||||||
# OOM, service failures, timeouts are transient - a reboot resolves them.
|
# Transient errors (OOM, high CPU, service failures) resolve themselves.
|
||||||
# If the system has been up for >1 hour and these errors haven't recurred,
|
# If the system has been up for >10 minutes and these errors haven't recurred,
|
||||||
# they are from a previous boot and should be auto-resolved.
|
# they are stale and should be auto-resolved.
|
||||||
#
|
|
||||||
# Logic: If uptime > 1 hour AND error.last_seen is not within the last 30 minutes,
|
|
||||||
# the error is stale (from before the current stable state) and should be resolved.
|
|
||||||
try:
|
try:
|
||||||
|
import psutil
|
||||||
# Get system uptime
|
# Get system uptime
|
||||||
with open('/proc/uptime', 'r') as f:
|
with open('/proc/uptime', 'r') as f:
|
||||||
uptime_seconds = float(f.read().split()[0])
|
uptime_seconds = float(f.read().split()[0])
|
||||||
|
|
||||||
# Only auto-resolve if system has been stable for at least 1 hour
|
# Only auto-resolve if system has been stable for at least 10 minutes
|
||||||
if uptime_seconds > 3600: # 1 hour
|
if uptime_seconds > 600: # 10 minutes
|
||||||
# Resolve transient log errors that haven't been seen in the last 30 minutes
|
stale_cutoff = (now - timedelta(minutes=10)).isoformat()
|
||||||
# If they were real current issues, journalctl -b 0 would have detected them recently
|
|
||||||
stale_cutoff = (now - timedelta(minutes=30)).isoformat()
|
# 1. Resolve transient log errors (OOM, service failures)
|
||||||
cursor.execute('''
|
cursor.execute('''
|
||||||
UPDATE errors
|
UPDATE errors
|
||||||
SET resolved_at = ?
|
SET resolved_at = ?
|
||||||
@@ -999,6 +997,42 @@ class HealthPersistence:
|
|||||||
OR reason LIKE '%timeout%'
|
OR reason LIKE '%timeout%'
|
||||||
OR reason LIKE '%critical error%')
|
OR reason LIKE '%critical error%')
|
||||||
''', (now_iso, stale_cutoff))
|
''', (now_iso, stale_cutoff))
|
||||||
|
|
||||||
|
# 2. Auto-resolve CPU errors if current CPU is normal (<75%)
|
||||||
|
try:
|
||||||
|
current_cpu = psutil.cpu_percent(interval=0.1)
|
||||||
|
if current_cpu < 75:
|
||||||
|
cursor.execute('''
|
||||||
|
UPDATE errors
|
||||||
|
SET resolved_at = ?
|
||||||
|
WHERE category = 'temperature'
|
||||||
|
AND resolved_at IS NULL
|
||||||
|
AND acknowledged = 0
|
||||||
|
AND last_seen < ?
|
||||||
|
AND (error_key = 'cpu_usage'
|
||||||
|
OR reason LIKE '%CPU >%sustained%'
|
||||||
|
OR reason LIKE '%Sustained high CPU%')
|
||||||
|
''', (now_iso, stale_cutoff))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# 3. Auto-resolve memory errors if current memory is normal (<80%)
|
||||||
|
try:
|
||||||
|
current_mem = psutil.virtual_memory().percent
|
||||||
|
if current_mem < 80:
|
||||||
|
cursor.execute('''
|
||||||
|
UPDATE errors
|
||||||
|
SET resolved_at = ?
|
||||||
|
WHERE category = 'memory'
|
||||||
|
AND resolved_at IS NULL
|
||||||
|
AND acknowledged = 0
|
||||||
|
AND last_seen < ?
|
||||||
|
AND (reason LIKE '%Memory >%'
|
||||||
|
OR reason LIKE '%RAM usage%')
|
||||||
|
''', (now_iso, stale_cutoff))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
pass # If we can't read uptime, skip this cleanup
|
pass # If we can't read uptime, skip this cleanup
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user