From c0ec74fb129222a304c527a2a98c236a330e4e7d Mon Sep 17 00:00:00 2001 From: MacRimi Date: Tue, 4 Nov 2025 09:14:29 +0100 Subject: [PATCH 01/41] Update AppImage --- AppImage/components/auth-setup.tsx | 221 ++++++++++++++++++++++ AppImage/components/login.tsx | 141 ++++++++++++++ AppImage/components/proxmox-dashboard.tsx | 115 +++++++++++ AppImage/components/ui/input.tsx | 2 +- AppImage/components/ui/label.tsx | 17 ++ AppImage/scripts/flask_server.py | 66 +++++++ 6 files changed, 561 insertions(+), 1 deletion(-) create mode 100644 AppImage/components/auth-setup.tsx create mode 100644 AppImage/components/login.tsx create mode 100644 AppImage/components/ui/label.tsx diff --git a/AppImage/components/auth-setup.tsx b/AppImage/components/auth-setup.tsx new file mode 100644 index 0000000..a6ff242 --- /dev/null +++ b/AppImage/components/auth-setup.tsx @@ -0,0 +1,221 @@ +"use client" + +import { useState, useEffect } from "react" +import { Button } from "./ui/button" +import { Dialog, DialogContent } from "./ui/dialog" +import { Input } from "./ui/input" +import { Label } from "./ui/label" +import { Shield, Lock, User, AlertCircle } from "lucide-react" +import { getApiUrl } from "../lib/api-config" + +interface AuthSetupProps { + onComplete: () => void +} + +export function AuthSetup({ onComplete }: AuthSetupProps) { + const [open, setOpen] = useState(false) + const [step, setStep] = useState<"choice" | "setup">("choice") + const [username, setUsername] = useState("") + const [password, setPassword] = useState("") + const [confirmPassword, setConfirmPassword] = useState("") + const [error, setError] = useState("") + const [loading, setLoading] = useState(false) + + useEffect(() => { + // Check if onboarding is complete and auth setup is needed + const hasSeenOnboarding = localStorage.getItem("proxmenux-onboarding-seen") + const authSetupComplete = localStorage.getItem("proxmenux-auth-setup-complete") + + if (hasSeenOnboarding && !authSetupComplete) { + // Small delay to show after onboarding closes + setTimeout(() => setOpen(true), 500) + } + }, []) + + const handleSkipAuth = async () => { + setLoading(true) + setError("") + + try { + const response = await fetch(getApiUrl("/api/auth/setup"), { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ enable_auth: false }), + }) + + if (!response.ok) throw new Error("Failed to save preference") + + localStorage.setItem("proxmenux-auth-setup-complete", "true") + setOpen(false) + onComplete() + } catch (err) { + setError("Failed to save preference. Please try again.") + } finally { + setLoading(false) + } + } + + const handleSetupAuth = async () => { + setError("") + + if (!username || !password) { + setError("Please fill in all fields") + return + } + + if (password !== confirmPassword) { + setError("Passwords do not match") + return + } + + if (password.length < 6) { + setError("Password must be at least 6 characters") + return + } + + setLoading(true) + + try { + const response = await fetch(getApiUrl("/api/auth/setup"), { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + username, + password, + enable_auth: true, + }), + }) + + const data = await response.json() + + if (!response.ok) { + throw new Error(data.error || "Failed to setup authentication") + } + + // Save token + localStorage.setItem("proxmenux-auth-token", data.token) + localStorage.setItem("proxmenux-auth-setup-complete", "true") + + setOpen(false) + onComplete() + } catch (err) { + setError(err instanceof Error ? err.message : "Failed to setup authentication") + } finally { + setLoading(false) + } + } + + return ( + + + {step === "choice" ? ( +
+
+
+ +
+

Protect Your Dashboard?

+

+ Add an extra layer of security to protect your Proxmox data when accessing from non-private networks. +

+
+ +
+ + +
+ +

You can always enable this later in Settings

+
+ ) : ( +
+
+
+ +
+

Setup Authentication

+

Create a username and password to protect your dashboard

+
+ + {error && ( +
+ +

{error}

+
+ )} + +
+
+ +
+ + setUsername(e.target.value)} + className="pl-10" + disabled={loading} + /> +
+
+ +
+ +
+ + setPassword(e.target.value)} + className="pl-10" + disabled={loading} + /> +
+
+ +
+ +
+ + setConfirmPassword(e.target.value)} + className="pl-10" + disabled={loading} + /> +
+
+
+ +
+ + +
+
+ )} +
+
+ ) +} diff --git a/AppImage/components/login.tsx b/AppImage/components/login.tsx new file mode 100644 index 0000000..3f8ca87 --- /dev/null +++ b/AppImage/components/login.tsx @@ -0,0 +1,141 @@ +"use client" + +import type React from "react" + +import { useState } from "react" +import { Button } from "./ui/button" +import { Input } from "./ui/input" +import { Label } from "./ui/label" +import { Lock, User, AlertCircle, Server } from "lucide-react" +import { getApiUrl } from "../lib/api-config" +import Image from "next/image" + +interface LoginProps { + onLogin: () => void +} + +export function Login({ onLogin }: LoginProps) { + const [username, setUsername] = useState("") + const [password, setPassword] = useState("") + const [error, setError] = useState("") + const [loading, setLoading] = useState(false) + + const handleLogin = async (e: React.FormEvent) => { + e.preventDefault() + setError("") + + if (!username || !password) { + setError("Please enter username and password") + return + } + + setLoading(true) + + try { + const response = await fetch(getApiUrl("/api/auth/login"), { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ username, password }), + }) + + const data = await response.json() + + if (!response.ok) { + throw new Error(data.error || "Login failed") + } + + // Save token + localStorage.setItem("proxmenux-auth-token", data.token) + onLogin() + } catch (err) { + setError(err instanceof Error ? err.message : "Login failed") + } finally { + setLoading(false) + } + } + + return ( +
+
+
+
+
+ ProxMenux Logo { + const target = e.target as HTMLImageElement + target.style.display = "none" + const fallback = target.parentElement?.querySelector(".fallback-icon") + if (fallback) { + fallback.classList.remove("hidden") + } + }} + /> + +
+
+
+

ProxMenux Monitor

+

Sign in to access your dashboard

+
+
+ +
+
+ {error && ( +
+ +

{error}

+
+ )} + +
+ +
+ + setUsername(e.target.value)} + className="pl-10" + disabled={loading} + autoComplete="username" + /> +
+
+ +
+ +
+ + setPassword(e.target.value)} + className="pl-10" + disabled={loading} + autoComplete="current-password" + /> +
+
+ + +
+
+ +

ProxMenux Monitor v1.0.0

+
+
+ ) +} diff --git a/AppImage/components/proxmox-dashboard.tsx b/AppImage/components/proxmox-dashboard.tsx index da744ba..27d2a4b 100644 --- a/AppImage/components/proxmox-dashboard.tsx +++ b/AppImage/components/proxmox-dashboard.tsx @@ -11,6 +11,8 @@ import { VirtualMachines } from "./virtual-machines" import Hardware from "./hardware" import { SystemLogs } from "./system-logs" import { OnboardingCarousel } from "./onboarding-carousel" +import { AuthSetup } from "./auth-setup" +import { Login } from "./login" import { getApiUrl } from "../lib/api-config" import { RefreshCw, @@ -63,6 +65,10 @@ export function ProxmoxDashboard() { const [activeTab, setActiveTab] = useState("overview") const [showNavigation, setShowNavigation] = useState(true) const [lastScrollY, setLastScrollY] = useState(0) + const [authChecked, setAuthChecked] = useState(false) + const [authRequired, setAuthRequired] = useState(false) + const [isAuthenticated, setIsAuthenticated] = useState(false) + const [authSetupComplete, setAuthSetupComplete] = useState(false) const fetchSystemData = useCallback(async () => { console.log("[v0] Fetching system data from Flask server...") @@ -219,10 +225,119 @@ export function ProxmoxDashboard() { } } + const setupTokenRefresh = () => { + let refreshTimeout: ReturnType + + const refreshToken = async () => { + const token = localStorage.getItem("proxmenux-auth-token") + if (!token) return + + try { + const response = await fetch(getApiUrl("/api/auth/refresh"), { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${token}`, + }, + }) + + if (response.ok) { + const data = await response.json() + localStorage.setItem("proxmenux-auth-token", data.token) + console.log("[v0] Token refreshed successfully") + } + } catch (error) { + console.error("[v0] Failed to refresh token:", error) + } + } + + const resetRefreshTimer = () => { + clearTimeout(refreshTimeout) + // Refresh token every 25 minutes (before 30 min expiry) + refreshTimeout = setTimeout(refreshToken, 25 * 60 * 1000) + } + + // Refresh on user activity + const events = ["mousedown", "keydown", "scroll", "touchstart"] + events.forEach((event) => { + window.addEventListener(event, resetRefreshTimer, { passive: true }) + }) + + resetRefreshTimer() + + return () => { + clearTimeout(refreshTimeout) + events.forEach((event) => { + window.removeEventListener(event, resetRefreshTimer) + }) + } + } + + const handleAuthSetupComplete = () => { + setAuthSetupComplete(true) + setIsAuthenticated(true) + } + + const handleLoginSuccess = () => { + setIsAuthenticated(true) + setupTokenRefresh() + } + + useEffect(() => { + const checkAuth = async () => { + try { + const token = localStorage.getItem("proxmenux-auth-token") + const headers: HeadersInit = { "Content-Type": "application/json" } + + if (token) { + headers["Authorization"] = `Bearer ${token}` + } + + const response = await fetch(getApiUrl("/api/auth/status"), { + headers, + }) + + const data = await response.json() + + setAuthRequired(data.auth_enabled) + setIsAuthenticated(data.authenticated) + setAuthSetupComplete(localStorage.getItem("proxmenux-auth-setup-complete") === "true") + setAuthChecked(true) + + // Setup token refresh if authenticated + if (data.authenticated && token) { + setupTokenRefresh() + } + } catch (error) { + console.error("[v0] Failed to check auth status:", error) + setAuthChecked(true) + } + } + + checkAuth() + }, []) + + if (!authChecked) { + return ( +
+
+
+

Loading...

+
+
+ ) + } + + if (authRequired && !isAuthenticated) { + return + } + return (
+ {!authSetupComplete && } + {!isServerConnected && (
diff --git a/AppImage/components/ui/input.tsx b/AppImage/components/ui/input.tsx index 31bbca4..d32a72e 100644 --- a/AppImage/components/ui/input.tsx +++ b/AppImage/components/ui/input.tsx @@ -9,7 +9,7 @@ const Input = React.forwardRef(({ className, type, , + React.ComponentPropsWithoutRef & VariantProps +>(({ className, ...props }, ref) => ( + +)) +Label.displayName = LabelPrimitive.Root.displayName + +export { Label } diff --git a/AppImage/scripts/flask_server.py b/AppImage/scripts/flask_server.py index 783c611..2090fb4 100644 --- a/AppImage/scripts/flask_server.py +++ b/AppImage/scripts/flask_server.py @@ -22,6 +22,72 @@ import xml.etree.ElementTree as ET # Added for XML parsing import math # Imported math for format_bytes function import urllib.parse # Added for URL encoding import platform # Added for platform.release() +import hashlib +import secrets +import jwt +from functools import wraps +from pathlib import Path + +# Authentication configuration +AUTH_CONFIG_DIR = Path.home() / ".config" / "proxmenux-monitor" +AUTH_CONFIG_FILE = AUTH_CONFIG_DIR / "auth.json" +JWT_SECRET = secrets.token_hex(32) # Generate a random secret for JWT +SESSION_TIMEOUT = 30 * 60 # 30 minutes in seconds + +# Ensure config directory exists +AUTH_CONFIG_DIR.mkdir(parents=True, exist_ok=True) + +def hash_password(password: str) -> str: + """Hash a password using SHA-256""" + return hashlib.sha256(password.encode()).hexdigest() + +def load_auth_config(): + """Load authentication configuration from file""" + if not AUTH_CONFIG_FILE.exists(): + return {"auth_enabled": False} + + try: + with open(AUTH_CONFIG_FILE, 'r') as f: + return json.load(f) + except: + return {"auth_enabled": False} + +def save_auth_config(config): + """Save authentication configuration to file""" + with open(AUTH_CONFIG_FILE, 'w') as f: + json.dump(config, f, indent=2) + +def require_auth(f): + """Decorator to require authentication for endpoints""" + @wraps(f) + def decorated_function(*args, **kwargs): + auth_config = load_auth_config() + + # If auth is not enabled, allow access + if not auth_config.get("auth_enabled", False): + return f(*args, **kwargs) + + # Check for Authorization header + auth_header = request.headers.get('Authorization') + if not auth_header or not auth_header.startswith('Bearer '): + return jsonify({"error": "Authentication required"}), 401 + + token = auth_header.split(' ')[1] + + try: + # Verify JWT token + payload = jwt.decode(token, JWT_SECRET, algorithms=['HS256']) + + # Check if token is expired + if time.time() > payload.get('exp', 0): + return jsonify({"error": "Token expired"}), 401 + + return f(*args, **kwargs) + except jwt.InvalidTokenError: + return jsonify({"error": "Invalid token"}), 401 + + return decorated_function + app = Flask(__name__) CORS(app) # Enable CORS for Next.js frontend From 37f6cd96a473650a09b46ba58db3cd1ae7b8a1c0 Mon Sep 17 00:00:00 2001 From: MacRimi Date: Tue, 4 Nov 2025 09:46:11 +0100 Subject: [PATCH 02/41] Update flask_server.py --- AppImage/scripts/flask_server.py | 204 +++++++++++++++++++++++++++++++ 1 file changed, 204 insertions(+) diff --git a/AppImage/scripts/flask_server.py b/AppImage/scripts/flask_server.py index 2090fb4..919c328 100644 --- a/AppImage/scripts/flask_server.py +++ b/AppImage/scripts/flask_server.py @@ -88,6 +88,210 @@ def require_auth(f): return decorated_function +# Authentication endpoints +@app.route('/api/auth/status', methods=['GET']) +def auth_status(): + """Check if authentication is enabled and if current session is valid""" + try: + auth_config = load_auth_config() + is_enabled = auth_config.get("auth_enabled", False) + + # Check if user has valid token + is_authenticated = False + if is_enabled: + auth_header = request.headers.get('Authorization') + if auth_header and auth_header.startswith('Bearer '): + token = auth_header.split(' ')[1] + try: + payload = jwt.decode(token, JWT_SECRET, algorithms=['HS256']) + if time.time() <= payload.get('exp', 0): + is_authenticated = True + except: + pass + + return jsonify({ + "auth_enabled": is_enabled, + "authenticated": is_authenticated or not is_enabled + }) + except Exception as e: + return jsonify({"error": str(e)}), 500 + +@app.route('/api/auth/setup', methods=['POST']) +def auth_setup(): + """Setup authentication for the first time""" + try: + data = request.get_json() + username = data.get('username', '').strip() + password = data.get('password', '').strip() + + if not username or not password: + return jsonify({"error": "Username and password are required"}), 400 + + if len(password) < 6: + return jsonify({"error": "Password must be at least 6 characters"}), 400 + + # Hash password and save config + password_hash = hash_password(password) + + auth_config = { + "auth_enabled": True, + "username": username, + "password_hash": password_hash, + "created_at": datetime.now().isoformat() + } + + save_auth_config(auth_config) + + # Generate JWT token + token = jwt.encode({ + 'username': username, + 'exp': time.time() + SESSION_TIMEOUT + }, JWT_SECRET, algorithm='HS256') + + return jsonify({ + "success": True, + "token": token + }) + except Exception as e: + return jsonify({"error": str(e)}), 500 + +@app.route('/api/auth/skip', methods=['POST']) +def auth_skip(): + """Skip authentication setup""" + try: + auth_config = { + "auth_enabled": False, + "skipped": True, + "skipped_at": datetime.now().isoformat() + } + + save_auth_config(auth_config) + + return jsonify({"success": True}) + except Exception as e: + return jsonify({"error": str(e)}), 500 + +@app.route('/api/auth/login', methods=['POST']) +def auth_login(): + """Login with username and password""" + try: + data = request.get_json() + username = data.get('username', '').strip() + password = data.get('password', '').strip() + + if not username or not password: + return jsonify({"error": "Username and password are required"}), 400 + + # Load auth config + auth_config = load_auth_config() + + if not auth_config.get("auth_enabled", False): + return jsonify({"error": "Authentication is not enabled"}), 400 + + # Verify credentials + stored_username = auth_config.get("username", "") + stored_password_hash = auth_config.get("password_hash", "") + + if username != stored_username or hash_password(password) != stored_password_hash: + return jsonify({"error": "Invalid username or password"}), 401 + + # Generate JWT token + token = jwt.encode({ + 'username': username, + 'exp': time.time() + SESSION_TIMEOUT + }, JWT_SECRET, algorithm='HS256') + + return jsonify({ + "success": True, + "token": token + }) + except Exception as e: + return jsonify({"error": str(e)}), 500 + +@app.route('/api/auth/refresh', methods=['POST']) +def auth_refresh(): + """Refresh JWT token""" + try: + auth_header = request.headers.get('Authorization') + if not auth_header or not auth_header.startswith('Bearer '): + return jsonify({"error": "No token provided"}), 401 + + token = auth_header.split(' ')[1] + + try: + # Verify current token + payload = jwt.decode(token, JWT_SECRET, algorithms=['HS256']) + username = payload.get('username') + + # Generate new token + new_token = jwt.encode({ + 'username': username, + 'exp': time.time() + SESSION_TIMEOUT + }, JWT_SECRET, algorithm='HS256') + + return jsonify({ + "success": True, + "token": new_token + }) + except jwt.InvalidTokenError: + return jsonify({"error": "Invalid token"}), 401 + except Exception as e: + return jsonify({"error": str(e)}), 500 + +@app.route('/api/auth/logout', methods=['POST']) +@require_auth +def auth_logout(): + """Logout (client should delete token)""" + return jsonify({"success": True}) + +@app.route('/api/auth/disable', methods=['POST']) +@require_auth +def auth_disable(): + """Disable authentication""" + try: + auth_config = { + "auth_enabled": False, + "disabled_at": datetime.now().isoformat() + } + + save_auth_config(auth_config) + + return jsonify({"success": True}) + except Exception as e: + return jsonify({"error": str(e)}), 500 + +@app.route('/api/auth/change-password', methods=['POST']) +@require_auth +def auth_change_password(): + """Change password""" + try: + data = request.get_json() + current_password = data.get('current_password', '').strip() + new_password = data.get('new_password', '').strip() + + if not current_password or not new_password: + return jsonify({"error": "Current and new password are required"}), 400 + + if len(new_password) < 6: + return jsonify({"error": "New password must be at least 6 characters"}), 400 + + # Load auth config + auth_config = load_auth_config() + + # Verify current password + stored_password_hash = auth_config.get("password_hash", "") + if hash_password(current_password) != stored_password_hash: + return jsonify({"error": "Current password is incorrect"}), 401 + + # Update password + auth_config["password_hash"] = hash_password(new_password) + auth_config["updated_at"] = datetime.now().isoformat() + + save_auth_config(auth_config) + + return jsonify({"success": True}) + except Exception as e: + return jsonify({"error": str(e)}), 500 app = Flask(__name__) CORS(app) # Enable CORS for Next.js frontend From 5669ce207ce0384ec89a9e250342e4ee354e4a95 Mon Sep 17 00:00:00 2001 From: MacRimi Date: Tue, 4 Nov 2025 11:03:09 +0100 Subject: [PATCH 03/41] Update flask_server.py --- AppImage/scripts/flask_server.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/AppImage/scripts/flask_server.py b/AppImage/scripts/flask_server.py index 919c328..c4e6185 100644 --- a/AppImage/scripts/flask_server.py +++ b/AppImage/scripts/flask_server.py @@ -28,6 +28,9 @@ import jwt from functools import wraps from pathlib import Path +app = Flask(__name__) +CORS(app) # Enable CORS for Next.js frontend + # Authentication configuration AUTH_CONFIG_DIR = Path.home() / ".config" / "proxmenux-monitor" AUTH_CONFIG_FILE = AUTH_CONFIG_DIR / "auth.json" @@ -293,8 +296,8 @@ def auth_change_password(): except Exception as e: return jsonify({"error": str(e)}), 500 -app = Flask(__name__) -CORS(app) # Enable CORS for Next.js frontend +# app = Flask(__name__) +# CORS(app) # Enable CORS for Next.js frontend def identify_gpu_type(name, vendor=None, bus=None, driver=None): """ From ab0e59215c558fc9c88790dcfa7a35228ccc95f5 Mon Sep 17 00:00:00 2001 From: MacRimi Date: Tue, 4 Nov 2025 11:34:46 +0100 Subject: [PATCH 04/41] Aupdate version ProxMenux Monitor --- AppImage/components/login.tsx | 2 +- AppImage/components/proxmox-dashboard.tsx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/AppImage/components/login.tsx b/AppImage/components/login.tsx index 3f8ca87..38f3aac 100644 --- a/AppImage/components/login.tsx +++ b/AppImage/components/login.tsx @@ -134,7 +134,7 @@ export function Login({ onLogin }: LoginProps) {
-

ProxMenux Monitor v1.0.0

+

ProxMenux Monitor v1.0.1

) diff --git a/AppImage/components/proxmox-dashboard.tsx b/AppImage/components/proxmox-dashboard.tsx index 27d2a4b..f736556 100644 --- a/AppImage/components/proxmox-dashboard.tsx +++ b/AppImage/components/proxmox-dashboard.tsx @@ -636,7 +636,7 @@ export function ProxmoxDashboard() {
)} - )} - - {/* SATA Information */} - {selectedDisk.sata_version && ( + ) : (
- SATA Version - {selectedDisk.sata_version} + PCIe Link Speed + Detecting...
)} - - {/* SAS Information */} - {selectedDisk.sas_version && ( -
- SAS Version - {selectedDisk.sas_version} -
- )} - {selectedDisk.sas_speed && ( -
- SAS Speed - {selectedDisk.sas_speed} -
- )} - - {/* Generic Link Speed */} - {selectedDisk.link_speed && - !selectedDisk.pcie_gen && - !selectedDisk.sata_version && - !selectedDisk.sas_version && ( -
- Link Speed - {selectedDisk.link_speed} -
- )} )} + {/* SATA Information */} + {!selectedDisk.name.startsWith("nvme") && selectedDisk.sata_version && ( +
+ SATA Version + {selectedDisk.sata_version} +
+ )} + + {/* SAS Information */} + {!selectedDisk.name.startsWith("nvme") && selectedDisk.sas_version && ( +
+ SAS Version + {selectedDisk.sas_version} +
+ )} + {!selectedDisk.name.startsWith("nvme") && selectedDisk.sas_speed && ( +
+ SAS Speed + {selectedDisk.sas_speed} +
+ )} + + {/* Generic Link Speed - only show if no specific interface info */} + {!selectedDisk.name.startsWith("nvme") && + selectedDisk.link_speed && + !selectedDisk.pcie_gen && + !selectedDisk.sata_version && + !selectedDisk.sas_version && ( +
+ Link Speed + {selectedDisk.link_speed} +
+ )} + {selectedDisk.model && (
Model From 9a32d1c0f7ecbb3fad2519d2a6730989b3181c55 Mon Sep 17 00:00:00 2001 From: MacRimi Date: Tue, 4 Nov 2025 13:47:00 +0100 Subject: [PATCH 09/41] Update flask_server.py --- AppImage/scripts/flask_server.py | 152 ++++++++++++++++++++++++++++++- 1 file changed, 151 insertions(+), 1 deletion(-) diff --git a/AppImage/scripts/flask_server.py b/AppImage/scripts/flask_server.py index c4e6185..2a8240c 100644 --- a/AppImage/scripts/flask_server.py +++ b/AppImage/scripts/flask_server.py @@ -1190,6 +1190,149 @@ def get_disk_hardware_info(disk_name): """Placeholder for disk hardware info - to be populated by lsblk later.""" return {} +def get_pcie_link_speed(disk_name): + """Get PCIe link speed information for NVMe drives""" + pcie_info = { + 'pcie_gen': None, + 'pcie_width': None, + 'pcie_max_gen': None, + 'pcie_max_width': None + } + + try: + # For NVMe drives, get PCIe information from sysfs + if disk_name.startswith('nvme'): + # Extract controller number (nvme0, nvme1, etc.) + controller = disk_name.rstrip('n0123456789p') + + # Path to PCIe device in sysfs + sys_path = f'/sys/class/nvme/{controller}/device' + + if os.path.exists(sys_path): + # Get current link speed + try: + with open(f'{sys_path}/current_link_speed', 'r') as f: + speed_str = f.read().strip() + # Format: "8.0 GT/s PCIe" or "16 GT/s" + if 'GT/s' in speed_str: + gt_s = float(speed_str.split()[0]) + # Convert GT/s to PCIe generation + # PCIe 1.0 = 2.5 GT/s, 2.0 = 5 GT/s, 3.0 = 8 GT/s, 4.0 = 16 GT/s, 5.0 = 32 GT/s + if gt_s <= 2.5: + pcie_info['pcie_gen'] = '1.0' + elif gt_s <= 5.0: + pcie_info['pcie_gen'] = '2.0' + elif gt_s <= 8.0: + pcie_info['pcie_gen'] = '3.0' + elif gt_s <= 16.0: + pcie_info['pcie_gen'] = '4.0' + else: + pcie_info['pcie_gen'] = '5.0' + except: + pass + + # Get current link width + try: + with open(f'{sys_path}/current_link_width', 'r') as f: + width = f.read().strip() + pcie_info['pcie_width'] = f'x{width}' + except: + pass + + # Get maximum link speed + try: + with open(f'{sys_path}/max_link_speed', 'r') as f: + speed_str = f.read().strip() + if 'GT/s' in speed_str: + gt_s = float(speed_str.split()[0]) + if gt_s <= 2.5: + pcie_info['pcie_max_gen'] = '1.0' + elif gt_s <= 5.0: + pcie_info['pcie_max_gen'] = '2.0' + elif gt_s <= 8.0: + pcie_info['pcie_max_gen'] = '3.0' + elif gt_s <= 16.0: + pcie_info['pcie_max_gen'] = '4.0' + else: + pcie_info['pcie_max_gen'] = '5.0' + except: + pass + + # Get maximum link width + try: + with open(f'{sys_path}/max_link_width', 'r') as f: + width = f.read().strip() + pcie_info['pcie_max_width'] = f'x{width}' + except: + pass + + # Alternative method using lspci if sysfs doesn't work + if not pcie_info['pcie_gen']: + try: + # Get PCI address for this NVMe device + pci_address = os.path.basename(os.readlink(f'{sys_path}')) + + # Use lspci to get detailed PCIe information + result = subprocess.run(['lspci', '-vvv', '-s', pci_address], + capture_output=True, text=True, timeout=5) + if result.returncode == 0: + for line in result.stdout.split('\n'): + # Look for "LnkSta:" line which shows current link status + if 'LnkSta:' in line: + # Example: "LnkSta: Speed 8GT/s, Width x4" + if 'Speed' in line: + speed_match = re.search(r'Speed\s+([\d.]+)GT/s', line) + if speed_match: + gt_s = float(speed_match.group(1)) + if gt_s <= 2.5: + pcie_info['pcie_gen'] = '1.0' + elif gt_s <= 5.0: + pcie_info['pcie_gen'] = '2.0' + elif gt_s <= 8.0: + pcie_info['pcie_gen'] = '3.0' + elif gt_s <= 16.0: + pcie_info['pcie_gen'] = '4.0' + else: + pcie_info['pcie_gen'] = '5.0' + + if 'Width' in line: + width_match = re.search(r'Width\s+x(\d+)', line) + if width_match: + pcie_info['pcie_width'] = f'x{width_match.group(1)}' + + # Look for "LnkCap:" line which shows maximum capabilities + elif 'LnkCap:' in line: + if 'Speed' in line: + speed_match = re.search(r'Speed\s+([\d.]+)GT/s', line) + if speed_match: + gt_s = float(speed_match.group(1)) + if gt_s <= 2.5: + pcie_info['pcie_max_gen'] = '1.0' + elif gt_s <= 5.0: + pcie_info['pcie_max_gen'] = '2.0' + elif gt_s <= 8.0: + pcie_info['pcie_max_gen'] = '3.0' + elif gt_s <= 16.0: + pcie_info['pcie_max_gen'] = '4.0' + else: + pcie_info['pcie_max_gen'] = '5.0' + + if 'Width' in line: + width_match = re.search(r'Width\s+x(\d+)', line) + if width_match: + pcie_info['pcie_max_width'] = f'x{width_match.group(1)}' + except Exception as e: + # print(f"[v0] Error getting PCIe info via lspci for {disk_name}: {e}") + pass + + except Exception as e: + # print(f"[v0] Error getting PCIe link speed for {disk_name}: {e}") + pass + + return pcie_info + +# END OF ADDED FUNCTION + def get_smart_data(disk_name): """Get SMART data for a specific disk - Enhanced with multiple device type attempts""" smart_data = { @@ -2821,7 +2964,7 @@ def get_detailed_gpu_info(gpu): if 'clients' in json_data: client_count = len(json_data['clients']) - for client_id, client_data in json_data['clients'].items(): + for client_id, client_data in json_data['clients']: client_name = client_data.get('name', 'Unknown') client_pid = client_data.get('pid', 'Unknown') @@ -4159,6 +4302,10 @@ def get_hardware_info(): except: pass + pcie_info = {} + if disk_name.startswith('nvme'): + pcie_info = get_pcie_link_speed(disk_name) + # Build storage device with all available information storage_device = { 'name': disk_name, @@ -4174,6 +4321,9 @@ def get_hardware_info(): 'sata_version': sata_version, } + if pcie_info: + storage_device.update(pcie_info) + # Add family if available (from smartctl) try: result_smart = subprocess.run(['smartctl', '-i', f'/dev/{disk_name}'], From 73a170a5f197b4c7b344d04d9ed0e8b5a2f6905d Mon Sep 17 00:00:00 2001 From: MacRimi Date: Tue, 4 Nov 2025 14:00:01 +0100 Subject: [PATCH 10/41] Update hardware.tsx --- AppImage/components/hardware.tsx | 84 +++++++++++++++++++------------- 1 file changed, 49 insertions(+), 35 deletions(-) diff --git a/AppImage/components/hardware.tsx b/AppImage/components/hardware.tsx index 6f4398c..0a75bb6 100644 --- a/AppImage/components/hardware.tsx +++ b/AppImage/components/hardware.tsx @@ -171,6 +171,22 @@ export default function Hardware() { refreshInterval: 5000, }) + useEffect(() => { + if (hardwareData?.storage_devices) { + console.log("[v0] Storage devices data from backend:", hardwareData.storage_devices) + hardwareData.storage_devices.forEach((device) => { + if (device.name.startsWith("nvme")) { + console.log(`[v0] NVMe device ${device.name}:`, { + pcie_gen: device.pcie_gen, + pcie_width: device.pcie_width, + pcie_max_gen: device.pcie_max_gen, + pcie_max_width: device.pcie_max_width, + }) + } + }) + } + }, [hardwareData]) + const [selectedGPU, setSelectedGPU] = useState(null) const [realtimeGPUData, setRealtimeGPUData] = useState(null) const [detailsLoading, setDetailsLoading] = useState(false) @@ -1762,46 +1778,44 @@ export default function Hardware() { {selectedDisk.name}
- {selectedDisk.name && ( -
- Type - {(() => { - const getDiskTypeBadge = (diskName: string, rotationRate: number | string | undefined) => { - let diskType = "HDD" +
+ Type + {(() => { + const getDiskTypeBadge = (diskName: string, rotationRate: number | string | undefined) => { + let diskType = "HDD" - if (diskName.startsWith("nvme")) { - diskType = "NVMe" - } else if (rotationRate !== undefined && rotationRate !== null) { - const rateNum = typeof rotationRate === "string" ? Number.parseInt(rotationRate) : rotationRate - if (rateNum === 0 || isNaN(rateNum)) { - diskType = "SSD" - } - } else if (typeof rotationRate === "string" && rotationRate.includes("Solid State")) { + if (diskName.startsWith("nvme")) { + diskType = "NVMe" + } else if (rotationRate !== undefined && rotationRate !== null) { + const rateNum = typeof rotationRate === "string" ? Number.parseInt(rotationRate) : rotationRate + if (rateNum === 0 || isNaN(rateNum)) { diskType = "SSD" } - - const badgeStyles: Record = { - NVMe: { - className: "bg-purple-500/10 text-purple-500 border-purple-500/20", - label: "NVMe SSD", - }, - SSD: { - className: "bg-cyan-500/10 text-cyan-500 border-cyan-500/20", - label: "SSD", - }, - HDD: { - className: "bg-blue-500/10 text-blue-500 border-blue-500/20", - label: "HDD", - }, - } - return badgeStyles[diskType] + } else if (typeof rotationRate === "string" && rotationRate.includes("Solid State")) { + diskType = "SSD" } - const diskBadge = getDiskTypeBadge(selectedDisk.name, selectedDisk.rotation_rate) - return {diskBadge.label} - })()} -
- )} + const badgeStyles: Record = { + NVMe: { + className: "bg-purple-500/10 text-purple-500 border-purple-500/20", + label: "NVMe SSD", + }, + SSD: { + className: "bg-cyan-500/10 text-cyan-500 border-cyan-500/20", + label: "SSD", + }, + HDD: { + className: "bg-blue-500/10 text-blue-500 border-blue-500/20", + label: "HDD", + }, + } + return badgeStyles[diskType] + } + + const diskBadge = getDiskTypeBadge(selectedDisk.name, selectedDisk.rotation_rate) + return {diskBadge.label} + })()} +
{selectedDisk.size && (
From 65fd847251b519a5311a325d3276d364ce35ebd5 Mon Sep 17 00:00:00 2001 From: MacRimi Date: Tue, 4 Nov 2025 14:24:34 +0100 Subject: [PATCH 11/41] Update flask_server.py --- AppImage/scripts/flask_server.py | 204 ++++++++++++------------------- 1 file changed, 80 insertions(+), 124 deletions(-) diff --git a/AppImage/scripts/flask_server.py b/AppImage/scripts/flask_server.py index 2a8240c..ff56ca8 100644 --- a/AppImage/scripts/flask_server.py +++ b/AppImage/scripts/flask_server.py @@ -1202,136 +1202,96 @@ def get_pcie_link_speed(disk_name): try: # For NVMe drives, get PCIe information from sysfs if disk_name.startswith('nvme'): - # Extract controller number (nvme0, nvme1, etc.) - controller = disk_name.rstrip('n0123456789p') + controller = disk_name.split('n')[0] # nvme0n1 -> nvme0 + + print(f"[v0] Getting PCIe info for {disk_name}, controller: {controller}") # Path to PCIe device in sysfs sys_path = f'/sys/class/nvme/{controller}/device' + print(f"[v0] Checking sys_path: {sys_path}, exists: {os.path.exists(sys_path)}") + if os.path.exists(sys_path): - # Get current link speed try: - with open(f'{sys_path}/current_link_speed', 'r') as f: - speed_str = f.read().strip() - # Format: "8.0 GT/s PCIe" or "16 GT/s" - if 'GT/s' in speed_str: - gt_s = float(speed_str.split()[0]) - # Convert GT/s to PCIe generation - # PCIe 1.0 = 2.5 GT/s, 2.0 = 5 GT/s, 3.0 = 8 GT/s, 4.0 = 16 GT/s, 5.0 = 32 GT/s - if gt_s <= 2.5: - pcie_info['pcie_gen'] = '1.0' - elif gt_s <= 5.0: - pcie_info['pcie_gen'] = '2.0' - elif gt_s <= 8.0: - pcie_info['pcie_gen'] = '3.0' - elif gt_s <= 16.0: - pcie_info['pcie_gen'] = '4.0' - else: - pcie_info['pcie_gen'] = '5.0' - except: - pass - - # Get current link width - try: - with open(f'{sys_path}/current_link_width', 'r') as f: - width = f.read().strip() - pcie_info['pcie_width'] = f'x{width}' - except: - pass - - # Get maximum link speed - try: - with open(f'{sys_path}/max_link_speed', 'r') as f: - speed_str = f.read().strip() - if 'GT/s' in speed_str: - gt_s = float(speed_str.split()[0]) - if gt_s <= 2.5: - pcie_info['pcie_max_gen'] = '1.0' - elif gt_s <= 5.0: - pcie_info['pcie_max_gen'] = '2.0' - elif gt_s <= 8.0: - pcie_info['pcie_max_gen'] = '3.0' - elif gt_s <= 16.0: - pcie_info['pcie_max_gen'] = '4.0' - else: - pcie_info['pcie_max_gen'] = '5.0' - except: - pass - - # Get maximum link width - try: - with open(f'{sys_path}/max_link_width', 'r') as f: - width = f.read().strip() - pcie_info['pcie_max_width'] = f'x{width}' - except: - pass - - # Alternative method using lspci if sysfs doesn't work - if not pcie_info['pcie_gen']: - try: - # Get PCI address for this NVMe device - pci_address = os.path.basename(os.readlink(f'{sys_path}')) - - # Use lspci to get detailed PCIe information - result = subprocess.run(['lspci', '-vvv', '-s', pci_address], - capture_output=True, text=True, timeout=5) - if result.returncode == 0: - for line in result.stdout.split('\n'): - # Look for "LnkSta:" line which shows current link status - if 'LnkSta:' in line: - # Example: "LnkSta: Speed 8GT/s, Width x4" - if 'Speed' in line: - speed_match = re.search(r'Speed\s+([\d.]+)GT/s', line) - if speed_match: - gt_s = float(speed_match.group(1)) - if gt_s <= 2.5: - pcie_info['pcie_gen'] = '1.0' - elif gt_s <= 5.0: - pcie_info['pcie_gen'] = '2.0' - elif gt_s <= 8.0: - pcie_info['pcie_gen'] = '3.0' - elif gt_s <= 16.0: - pcie_info['pcie_gen'] = '4.0' - else: - pcie_info['pcie_gen'] = '5.0' - - if 'Width' in line: - width_match = re.search(r'Width\s+x(\d+)', line) - if width_match: - pcie_info['pcie_width'] = f'x{width_match.group(1)}' + pci_address = os.path.basename(os.readlink(sys_path)) + print(f"[v0] PCI address for {disk_name}: {pci_address}") + + # Use lspci to get detailed PCIe information + result = subprocess.run(['lspci', '-vvv', '-s', pci_address], + capture_output=True, text=True, timeout=5) + if result.returncode == 0: + print(f"[v0] lspci output for {pci_address}:") + for line in result.stdout.split('\n'): + # Look for "LnkSta:" line which shows current link status + if 'LnkSta:' in line: + print(f"[v0] Found LnkSta: {line}") + # Example: "LnkSta: Speed 8GT/s, Width x4" + if 'Speed' in line: + speed_match = re.search(r'Speed\s+([\d.]+)GT/s', line) + if speed_match: + gt_s = float(speed_match.group(1)) + if gt_s <= 2.5: + pcie_info['pcie_gen'] = '1.0' + elif gt_s <= 5.0: + pcie_info['pcie_gen'] = '2.0' + elif gt_s <= 8.0: + pcie_info['pcie_gen'] = '3.0' + elif gt_s <= 16.0: + pcie_info['pcie_gen'] = '4.0' + else: + pcie_info['pcie_gen'] = '5.0' + print(f"[v0] Current PCIe gen: {pcie_info['pcie_gen']}") - # Look for "LnkCap:" line which shows maximum capabilities - elif 'LnkCap:' in line: - if 'Speed' in line: - speed_match = re.search(r'Speed\s+([\d.]+)GT/s', line) - if speed_match: - gt_s = float(speed_match.group(1)) - if gt_s <= 2.5: - pcie_info['pcie_max_gen'] = '1.0' - elif gt_s <= 5.0: - pcie_info['pcie_max_gen'] = '2.0' - elif gt_s <= 8.0: - pcie_info['pcie_max_gen'] = '3.0' - elif gt_s <= 16.0: - pcie_info['pcie_max_gen'] = '4.0' - else: - pcie_info['pcie_max_gen'] = '5.0' - - if 'Width' in line: - width_match = re.search(r'Width\s+x(\d+)', line) - if width_match: - pcie_info['pcie_max_width'] = f'x{width_match.group(1)}' - except Exception as e: - # print(f"[v0] Error getting PCIe info via lspci for {disk_name}: {e}") - pass + if 'Width' in line: + width_match = re.search(r'Width\s+x(\d+)', line) + if width_match: + pcie_info['pcie_width'] = f'x{width_match.group(1)}' + print(f"[v0] Current PCIe width: {pcie_info['pcie_width']}") + + # Look for "LnkCap:" line which shows maximum capabilities + elif 'LnkCap:' in line: + print(f"[v0] Found LnkCap: {line}") + if 'Speed' in line: + speed_match = re.search(r'Speed\s+([\d.]+)GT/s', line) + if speed_match: + gt_s = float(speed_match.group(1)) + if gt_s <= 2.5: + pcie_info['pcie_max_gen'] = '1.0' + elif gt_s <= 5.0: + pcie_info['pcie_max_gen'] = '2.0' + elif gt_s <= 8.0: + pcie_info['pcie_max_gen'] = '3.0' + elif gt_s <= 16.0: + pcie_info['pcie_max_gen'] = '4.0' + else: + pcie_info['pcie_max_gen'] = '5.0' + print(f"[v0] Max PCIe gen: {pcie_info['pcie_max_gen']}") + + if 'Width' in line: + width_match = re.search(r'Width\s+x(\d+)', line) + if width_match: + pcie_info['pcie_max_width'] = f'x{width_match.group(1)}' + print(f"[v0] Max PCIe width: {pcie_info['pcie_max_width']}") + else: + print(f"[v0] lspci failed with return code: {result.returncode}") + except Exception as e: + print(f"[v0] Error getting PCIe info via lspci: {e}") + import traceback + traceback.print_exc() + else: + print(f"[v0] sys_path does not exist: {sys_path}") + alt_sys_path = f'/sys/block/{disk_name}/device/device' + print(f"[v0] Trying alternative path: {alt_sys_path}, exists: {os.path.exists(alt_sys_path)}") except Exception as e: - # print(f"[v0] Error getting PCIe link speed for {disk_name}: {e}") - pass + print(f"[v0] Error in get_pcie_link_speed for {disk_name}: {e}") + import traceback + traceback.print_exc() + print(f"[v0] Final PCIe info for {disk_name}: {pcie_info}") return pcie_info -# END OF ADDED FUNCTION +# END OF CHANGES FOR get_pcie_link_speed def get_smart_data(disk_name): """Get SMART data for a specific disk - Enhanced with multiple device type attempts""" @@ -3539,7 +3499,7 @@ def get_detailed_gpu_info(gpu): mem_clock = clocks['GFX_MCLK'] if 'value' in mem_clock: detailed_info['clock_memory'] = f"{mem_clock['value']} MHz" - # print(f"[v0] Memory Clock: {detailed_info['clock_memory']}", flush=True) + # print(f"[v0] Memory Clock: {detailed_info['clock_memory']} MHz", flush=True) pass data_retrieved = True @@ -4890,7 +4850,7 @@ def api_network_interface_metrics(interface_name): for point in all_data: filtered_point = {'time': point.get('time')} # Add network fields if they exist - for key in ['netin', 'netout', 'diskread', 'diskwrite']: + for key in ['netin', 'netout']: if key in point: filtered_point[key] = point[key] rrd_data.append(filtered_point) @@ -5802,10 +5762,6 @@ def api_prometheus(): mem_used_bytes = mem_used * 1024 * 1024 # Convert MiB to bytes mem_total_bytes = mem_total * 1024 * 1024 - metrics.append(f'# HELP proxmox_gpu_memory_used_bytes GPU memory used in bytes') - metrics.append(f'# TYPE proxmox_gpu_memory_used_bytes gauge') - metrics.append(f'proxmox_gpu_memory_used_bytes{{node="{node}",gpu="{gpu_name}",vendor="{gpu_vendor}",slot="{gpu_slot}"}} {mem_used_bytes} {timestamp}') - metrics.append(f'# HELP proxmox_gpu_memory_total_bytes GPU memory total in bytes') metrics.append(f'# TYPE proxmox_gpu_memory_total_bytes gauge') metrics.append(f'proxmox_gpu_memory_total_bytes{{node="{node}",gpu="{gpu_name}",vendor="{gpu_vendor}",slot="{gpu_slot}"}} {mem_total_bytes} {timestamp}') From 711d57d91f033abd7dfc9e9e9663bc97cb89dfd7 Mon Sep 17 00:00:00 2001 From: MacRimi Date: Tue, 4 Nov 2025 15:09:23 +0100 Subject: [PATCH 12/41] Update flask_server.py --- AppImage/scripts/flask_server.py | 79 ++++++++++++++++++++++++++++++-- 1 file changed, 76 insertions(+), 3 deletions(-) diff --git a/AppImage/scripts/flask_server.py b/AppImage/scripts/flask_server.py index ff56ca8..e3bbb2c 100644 --- a/AppImage/scripts/flask_server.py +++ b/AppImage/scripts/flask_server.py @@ -1202,8 +1202,14 @@ def get_pcie_link_speed(disk_name): try: # For NVMe drives, get PCIe information from sysfs if disk_name.startswith('nvme'): - controller = disk_name.split('n')[0] # nvme0n1 -> nvme0 + # Extract controller name properly using regex + import re + match = re.match(r'(nvme\d+)n\d+', disk_name) + if not match: + print(f"[v0] Could not extract controller from {disk_name}") + return pcie_info + controller = match.group(1) # nvme0n1 -> nvme0 print(f"[v0] Getting PCIe info for {disk_name}, controller: {controller}") # Path to PCIe device in sysfs @@ -1282,6 +1288,74 @@ def get_pcie_link_speed(disk_name): print(f"[v0] sys_path does not exist: {sys_path}") alt_sys_path = f'/sys/block/{disk_name}/device/device' print(f"[v0] Trying alternative path: {alt_sys_path}, exists: {os.path.exists(alt_sys_path)}") + + if os.path.exists(alt_sys_path): + try: + # Get PCI address from the alternative path + pci_address = os.path.basename(os.readlink(alt_sys_path)) + print(f"[v0] PCI address from alt path for {disk_name}: {pci_address}") + + # Use lspci to get detailed PCIe information + result = subprocess.run(['lspci', '-vvv', '-s', pci_address], + capture_output=True, text=True, timeout=5) + if result.returncode == 0: + print(f"[v0] lspci output for {pci_address} (from alt path):") + for line in result.stdout.split('\n'): + # Look for "LnkSta:" line which shows current link status + if 'LnkSta:' in line: + print(f"[v0] Found LnkSta: {line}") + if 'Speed' in line: + speed_match = re.search(r'Speed\s+([\d.]+)GT/s', line) + if speed_match: + gt_s = float(speed_match.group(1)) + if gt_s <= 2.5: + pcie_info['pcie_gen'] = '1.0' + elif gt_s <= 5.0: + pcie_info['pcie_gen'] = '2.0' + elif gt_s <= 8.0: + pcie_info['pcie_gen'] = '3.0' + elif gt_s <= 16.0: + pcie_info['pcie_gen'] = '4.0' + else: + pcie_info['pcie_gen'] = '5.0' + print(f"[v0] Current PCIe gen: {pcie_info['pcie_gen']}") + + if 'Width' in line: + width_match = re.search(r'Width\s+x(\d+)', line) + if width_match: + pcie_info['pcie_width'] = f'x{width_match.group(1)}' + print(f"[v0] Current PCIe width: {pcie_info['pcie_width']}") + + # Look for "LnkCap:" line which shows maximum capabilities + elif 'LnkCap:' in line: + print(f"[v0] Found LnkCap: {line}") + if 'Speed' in line: + speed_match = re.search(r'Speed\s+([\d.]+)GT/s', line) + if speed_match: + gt_s = float(speed_match.group(1)) + if gt_s <= 2.5: + pcie_info['pcie_max_gen'] = '1.0' + elif gt_s <= 5.0: + pcie_info['pcie_max_gen'] = '2.0' + elif gt_s <= 8.0: + pcie_info['pcie_max_gen'] = '3.0' + elif gt_s <= 16.0: + pcie_info['pcie_max_gen'] = '4.0' + else: + pcie_info['pcie_max_gen'] = '5.0' + print(f"[v0] Max PCIe gen: {pcie_info['pcie_max_gen']}") + + if 'Width' in line: + width_match = re.search(r'Width\s+x(\d+)', line) + if width_match: + pcie_info['pcie_max_width'] = f'x{width_match.group(1)}' + print(f"[v0] Max PCIe width: {pcie_info['pcie_max_width']}") + else: + print(f"[v0] lspci failed with return code: {result.returncode}") + except Exception as e: + print(f"[v0] Error getting PCIe info from alt path: {e}") + import traceback + traceback.print_exc() except Exception as e: print(f"[v0] Error in get_pcie_link_speed for {disk_name}: {e}") @@ -1291,7 +1365,7 @@ def get_pcie_link_speed(disk_name): print(f"[v0] Final PCIe info for {disk_name}: {pcie_info}") return pcie_info -# END OF CHANGES FOR get_pcie_link_speed +# get_pcie_link_speed function definition ends here def get_smart_data(disk_name): """Get SMART data for a specific disk - Enhanced with multiple device type attempts""" @@ -3736,7 +3810,6 @@ def get_detailed_gpu_info(gpu): else: # print(f"[v0] No fdinfo section found in device data", flush=True) pass - detailed_info['processes'] = [] if data_retrieved: detailed_info['has_monitoring_tool'] = True From 1d6b8951e8cd06476189b1a550f399eed42efa29 Mon Sep 17 00:00:00 2001 From: MacRimi Date: Tue, 4 Nov 2025 15:28:27 +0100 Subject: [PATCH 13/41] Update hardware.tsx --- AppImage/components/hardware.tsx | 47 +++++++++----------------------- 1 file changed, 13 insertions(+), 34 deletions(-) diff --git a/AppImage/components/hardware.tsx b/AppImage/components/hardware.tsx index 0a75bb6..53e0b2a 100644 --- a/AppImage/components/hardware.tsx +++ b/AppImage/components/hardware.tsx @@ -1686,7 +1686,6 @@ export default function Hardware() { ? `${device.pcie_max_gen} ${device.pcie_max_width}`.trim() : null - // Check if running at lower speed than maximum const isLowerSpeed = max && current !== max return { @@ -1750,8 +1749,8 @@ export default function Hardware() { {linkSpeed && (
{linkSpeed.text} - {linkSpeed.isWarning && linkSpeed.maxText && ( - (max: {linkSpeed.maxText}) + {linkSpeed.maxText && linkSpeed.isWarning && ( + (max: {linkSpeed.maxText}) )}
)} @@ -1837,47 +1836,27 @@ export default function Hardware() { <>
Current Link Speed - + {selectedDisk.pcie_gen || "PCIe"} {selectedDisk.pcie_width || ""}
{selectedDisk.pcie_max_gen && selectedDisk.pcie_max_width && (
Maximum Link Speed - + {selectedDisk.pcie_max_gen} {selectedDisk.pcie_max_width}
)} - {/* Warning if running at lower speed */} - {selectedDisk.pcie_max_gen && - selectedDisk.pcie_max_width && - `${selectedDisk.pcie_gen} ${selectedDisk.pcie_width}` !== - `${selectedDisk.pcie_max_gen} ${selectedDisk.pcie_max_width}` && ( -
-
- - - -
-

Performance Notice

-

- This drive is running at {selectedDisk.pcie_gen} {selectedDisk.pcie_width} but - supports up to {selectedDisk.pcie_max_gen} {selectedDisk.pcie_max_width}. Check if the - slot supports the maximum speed or if the drive is properly seated. -

-
-
-
- )} ) : (
From 8abef33840c02052557ef6123eef835a465f810f Mon Sep 17 00:00:00 2001 From: MacRimi Date: Tue, 4 Nov 2025 17:37:32 +0100 Subject: [PATCH 14/41] Update build_appimage.sh --- AppImage/scripts/build_appimage.sh | 25 +------------------------ 1 file changed, 1 insertion(+), 24 deletions(-) diff --git a/AppImage/scripts/build_appimage.sh b/AppImage/scripts/build_appimage.sh index 66493f3..9e84245 100644 --- a/AppImage/scripts/build_appimage.sh +++ b/AppImage/scripts/build_appimage.sh @@ -279,6 +279,7 @@ pip3 install --target "$APP_DIR/usr/lib/python3/dist-packages" \ flask-cors \ psutil \ requests \ + PyJWT \ googletrans==4.0.0-rc1 \ httpx==0.13.3 \ httpcore==0.9.1 \ @@ -321,10 +322,6 @@ echo "🔧 Installing hardware monitoring tools..." mkdir -p "$WORK_DIR/debs" cd "$WORK_DIR/debs" - -# ============================================================== - - echo "📥 Downloading hardware monitoring tools (dynamic via APT)..." dl_pkg() { @@ -361,21 +358,12 @@ dl_pkg() { return 1 } -mkdir -p "$WORK_DIR/debs" -cd "$WORK_DIR/debs" - - dl_pkg "ipmitool.deb" "ipmitool" || true dl_pkg "libfreeipmi17.deb" "libfreeipmi17" || true dl_pkg "lm-sensors.deb" "lm-sensors" || true dl_pkg "nut-client.deb" "nut-client" || true dl_pkg "libupsclient.deb" "libupsclient6" "libupsclient5" "libupsclient4" || true - -# dl_pkg "nvidia-smi.deb" "nvidia-smi" "nvidia-utils" "nvidia-utils-535" "nvidia-utils-550" || true -# dl_pkg "intel-gpu-tools.deb" "intel-gpu-tools" || true -# dl_pkg "radeontop.deb" "radeontop" || true - echo "📦 Extracting .deb packages into AppDir..." extracted_count=0 shopt -s nullglob @@ -395,7 +383,6 @@ else echo "✅ Extracted $extracted_count package(s)" fi - if [ -d "$APP_DIR/bin" ]; then echo "📋 Normalizing /bin -> /usr/bin" mkdir -p "$APP_DIR/usr/bin" @@ -403,24 +390,20 @@ if [ -d "$APP_DIR/bin" ]; then rm -rf "$APP_DIR/bin" fi - echo "🔍 Sanity check (ldd + presence of libfreeipmi)" export LD_LIBRARY_PATH="$APP_DIR/lib:$APP_DIR/lib/x86_64-linux-gnu:$APP_DIR/usr/lib:$APP_DIR/usr/lib/x86_64-linux-gnu" - if ! find "$APP_DIR/usr/lib" "$APP_DIR/lib" -maxdepth 3 -name 'libfreeipmi.so.17*' | grep -q .; then echo "❌ libfreeipmi.so.17 not found inside AppDir (ipmitool will fail)" exit 1 fi - if [ -x "$APP_DIR/usr/bin/ipmitool" ] && ldd "$APP_DIR/usr/bin/ipmitool" | grep -q 'not found'; then echo "❌ ipmitool has unresolved libs:" ldd "$APP_DIR/usr/bin/ipmitool" | grep 'not found' || true exit 1 fi - if [ -x "$APP_DIR/usr/bin/upsc" ] && ldd "$APP_DIR/usr/bin/upsc" | grep -q 'not found'; then echo "⚠️ upsc has unresolved libs, trying to auto-fix..." missing="$(ldd "$APP_DIR/usr/bin/upsc" | awk '/not found/{print $1}' | tr -d ' ')" @@ -463,12 +446,6 @@ echo "✅ Sanity check OK (ipmitool/upsc ready; libfreeipmi present)" [ -x "$APP_DIR/usr/bin/intel_gpu_top" ] && echo " • intel-gpu-tools: OK" || echo " • intel-gpu-tools: missing" [ -x "$APP_DIR/usr/bin/radeontop" ] && echo " • radeontop: OK" || echo " • radeontop: missing" - - -# ============================================================== - - - # Build AppImage echo "🔨 Building unified AppImage v${VERSION}..." cd "$WORK_DIR" From 6c5eb156a15b37c4cd5685c9a5f28e5be8f16c36 Mon Sep 17 00:00:00 2001 From: MacRimi Date: Tue, 4 Nov 2025 18:07:13 +0100 Subject: [PATCH 15/41] Update AppImage --- AppImage/components/auth-setup.tsx | 32 +- AppImage/components/onboarding-carousel.tsx | 97 ++++- AppImage/components/proxmox-dashboard.tsx | 53 ++- AppImage/components/settings.tsx | 425 ++++++++++++++++++++ AppImage/components/sidebar.tsx | 7 +- 5 files changed, 573 insertions(+), 41 deletions(-) create mode 100644 AppImage/components/settings.tsx diff --git a/AppImage/components/auth-setup.tsx b/AppImage/components/auth-setup.tsx index a6ff242..7967cde 100644 --- a/AppImage/components/auth-setup.tsx +++ b/AppImage/components/auth-setup.tsx @@ -22,12 +22,9 @@ export function AuthSetup({ onComplete }: AuthSetupProps) { const [loading, setLoading] = useState(false) useEffect(() => { - // Check if onboarding is complete and auth setup is needed const hasSeenOnboarding = localStorage.getItem("proxmenux-onboarding-seen") - const authSetupComplete = localStorage.getItem("proxmenux-auth-setup-complete") - if (hasSeenOnboarding && !authSetupComplete) { - // Small delay to show after onboarding closes + if (hasSeenOnboarding) { setTimeout(() => setOpen(true), 500) } }, []) @@ -37,19 +34,25 @@ export function AuthSetup({ onComplete }: AuthSetupProps) { setError("") try { - const response = await fetch(getApiUrl("/api/auth/setup"), { + console.log("[v0] Skipping authentication setup...") + const response = await fetch(getApiUrl("/api/auth/skip"), { method: "POST", headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ enable_auth: false }), }) - if (!response.ok) throw new Error("Failed to save preference") + const data = await response.json() + console.log("[v0] Auth skip response:", data) - localStorage.setItem("proxmenux-auth-setup-complete", "true") + if (!response.ok) { + throw new Error(data.error || "Failed to skip authentication") + } + + console.log("[v0] Authentication skipped successfully") setOpen(false) onComplete() } catch (err) { - setError("Failed to save preference. Please try again.") + console.error("[v0] Auth skip error:", err) + setError(err instanceof Error ? err.message : "Failed to save preference") } finally { setLoading(false) } @@ -76,29 +79,32 @@ export function AuthSetup({ onComplete }: AuthSetupProps) { setLoading(true) try { + console.log("[v0] Setting up authentication...") const response = await fetch(getApiUrl("/api/auth/setup"), { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ username, password, - enable_auth: true, }), }) const data = await response.json() + console.log("[v0] Auth setup response:", data) if (!response.ok) { throw new Error(data.error || "Failed to setup authentication") } - // Save token - localStorage.setItem("proxmenux-auth-token", data.token) - localStorage.setItem("proxmenux-auth-setup-complete", "true") + if (data.token) { + localStorage.setItem("proxmenux-auth-token", data.token) + console.log("[v0] Authentication setup successful") + } setOpen(false) onComplete() } catch (err) { + console.error("[v0] Auth setup error:", err) setError(err instanceof Error ? err.message : "Failed to setup authentication") } finally { setLoading(false) diff --git a/AppImage/components/onboarding-carousel.tsx b/AppImage/components/onboarding-carousel.tsx index 32fddb6..866fecb 100644 --- a/AppImage/components/onboarding-carousel.tsx +++ b/AppImage/components/onboarding-carousel.tsx @@ -17,8 +17,13 @@ import { Cpu, FileText, Rocket, + Zap, + Shield, + Link2, + Gauge, } from "lucide-react" import Image from "next/image" +import { Checkbox } from "./ui/checkbox" interface OnboardingSlide { id: number @@ -27,6 +32,7 @@ interface OnboardingSlide { image?: string icon: React.ReactNode gradient: string + features?: { icon: React.ReactNode; text: string }[] } const slides: OnboardingSlide[] = [ @@ -40,6 +46,35 @@ const slides: OnboardingSlide[] = [ }, { id: 1, + title: "What's New in This Version", + description: "We've added exciting new features and improvements to make ProxMenux Monitor even better!", + icon: , + gradient: "from-amber-500 via-orange-500 to-red-500", + features: [ + { + icon: , + text: "Proxy Support - Access ProxMenux through reverse proxies with full functionality", + }, + { + icon: , + text: "Authentication System - Secure your dashboard with password protection", + }, + { + icon: , + text: "PCIe Link Speed Detection - View NVMe drive connection speeds and detect performance issues", + }, + { + icon: , + text: "Enhanced Storage Display - Better formatting for disk sizes (auto-converts GB to TB when needed)", + }, + { + icon: , + text: "SATA/SAS Information - View detailed interface information for all storage devices", + }, + ], + }, + { + id: 2, title: "System Overview", description: "Monitor your server's status in real-time: CPU, memory, temperature, system load and more. Everything in an intuitive and easy-to-understand dashboard.", @@ -48,7 +83,7 @@ const slides: OnboardingSlide[] = [ gradient: "from-blue-500 to-cyan-500", }, { - id: 2, + id: 3, title: "Storage Management", description: "Visualize the status of all your disks and volumes. Detailed information on capacity, usage, SMART health, temperature and performance of each storage device.", @@ -57,7 +92,7 @@ const slides: OnboardingSlide[] = [ gradient: "from-cyan-500 to-teal-500", }, { - id: 3, + id: 4, title: "Network Metrics", description: "Monitor network traffic in real-time. Bandwidth statistics, active interfaces, transfer speeds and historical usage graphs.", @@ -66,7 +101,7 @@ const slides: OnboardingSlide[] = [ gradient: "from-teal-500 to-green-500", }, { - id: 4, + id: 5, title: "Virtual Machines & Containers", description: "Manage all your VMs and LXC containers from one place. Status, allocated resources, current usage and quick controls for each virtual machine.", @@ -75,7 +110,7 @@ const slides: OnboardingSlide[] = [ gradient: "from-green-500 to-emerald-500", }, { - id: 5, + id: 6, title: "Hardware Information", description: "Complete details of your server hardware: CPU, RAM, GPU, disks, network, UPS and more. Technical specifications, models, serial numbers and status of each component.", @@ -84,7 +119,7 @@ const slides: OnboardingSlide[] = [ gradient: "from-emerald-500 to-blue-500", }, { - id: 6, + id: 7, title: "System Logs", description: "Access system logs in real-time. Filter by event type, search for specific errors and keep complete track of your server activity. Download the displayed logs for further analysis.", @@ -93,7 +128,7 @@ const slides: OnboardingSlide[] = [ gradient: "from-blue-500 to-indigo-500", }, { - id: 7, + id: 8, title: "Ready for the Future!", description: "ProxMenux Monitor is prepared to receive updates and improvements that will be added gradually, improving the user experience and being able to execute ProxMenux functions from the web panel.", @@ -106,6 +141,7 @@ export function OnboardingCarousel() { const [open, setOpen] = useState(false) const [currentSlide, setCurrentSlide] = useState(0) const [direction, setDirection] = useState<"next" | "prev">("next") + const [dontShowAgain, setDontShowAgain] = useState(false) useEffect(() => { const hasSeenOnboarding = localStorage.getItem("proxmenux-onboarding-seen") @@ -119,6 +155,9 @@ export function OnboardingCarousel() { setDirection("next") setCurrentSlide(currentSlide + 1) } else { + if (dontShowAgain) { + localStorage.setItem("proxmenux-onboarding-seen", "true") + } setOpen(false) } } @@ -131,11 +170,9 @@ export function OnboardingCarousel() { } const handleSkip = () => { - setOpen(false) - } - - const handleDontShowAgain = () => { - localStorage.setItem("proxmenux-onboarding-seen", "true") + if (dontShowAgain) { + localStorage.setItem("proxmenux-onboarding-seen", "true") + } setOpen(false) } @@ -205,6 +242,20 @@ export function OnboardingCarousel() {

+ {slide.features && ( +
+ {slide.features.map((feature, index) => ( +
+
{feature.icon}
+

{feature.text}

+
+ ))} +
+ )} + {/* Progress dots */}
{slides.map((_, index) => ( @@ -255,17 +306,19 @@ export function OnboardingCarousel() {
- {/* Don't show again */} - {currentSlide === slides.length - 1 && ( -
- -
- )} +
+ setDontShowAgain(checked as boolean)} + /> + +
diff --git a/AppImage/components/proxmox-dashboard.tsx b/AppImage/components/proxmox-dashboard.tsx index f736556..c81974a 100644 --- a/AppImage/components/proxmox-dashboard.tsx +++ b/AppImage/components/proxmox-dashboard.tsx @@ -13,6 +13,7 @@ import { SystemLogs } from "./system-logs" import { OnboardingCarousel } from "./onboarding-carousel" import { AuthSetup } from "./auth-setup" import { Login } from "./login" +import { Settings } from "./settings" import { getApiUrl } from "../lib/api-config" import { RefreshCw, @@ -27,6 +28,7 @@ import { Box, Cpu, FileText, + SettingsIcon, } from "lucide-react" import Image from "next/image" import { ThemeToggle } from "./theme-toggle" @@ -220,6 +222,8 @@ export function ProxmoxDashboard() { return "Hardware" case "logs": return "System Logs" + case "settings": + return "Settings" default: return "Navigation Menu" } @@ -285,31 +289,47 @@ export function ProxmoxDashboard() { useEffect(() => { const checkAuth = async () => { + console.log("[v0] Checking authentication status...") try { const token = localStorage.getItem("proxmenux-auth-token") const headers: HeadersInit = { "Content-Type": "application/json" } if (token) { headers["Authorization"] = `Bearer ${token}` + console.log("[v0] Found token in localStorage") + } else { + console.log("[v0] No token found in localStorage") } - const response = await fetch(getApiUrl("/api/auth/status"), { + const apiUrl = getApiUrl("/api/auth/status") + console.log("[v0] Calling auth status API:", apiUrl) + + const response = await fetch(apiUrl, { headers, }) const data = await response.json() + console.log("[v0] Auth status response:", data) + + const authConfigured = data.auth_enabled || data.authenticated setAuthRequired(data.auth_enabled) setIsAuthenticated(data.authenticated) - setAuthSetupComplete(localStorage.getItem("proxmenux-auth-setup-complete") === "true") + setAuthSetupComplete(authConfigured) setAuthChecked(true) - // Setup token refresh if authenticated + console.log("[v0] Auth state:", { + authRequired: data.auth_enabled, + isAuthenticated: data.authenticated, + authSetupComplete: authConfigured, + }) + if (data.authenticated && token) { setupTokenRefresh() } } catch (error) { console.error("[v0] Failed to check auth status:", error) + setAuthSetupComplete(false) setAuthChecked(true) } } @@ -456,7 +476,7 @@ export function ProxmoxDashboard() { >
- + System Logs + + Settings + @@ -601,6 +627,21 @@ export function ProxmoxDashboard() { System Logs +
@@ -633,6 +674,10 @@ export function ProxmoxDashboard() { + + + +
+ + ) } From e8e4b728cede2c87b0a3c08ea00421c5db5c767d Mon Sep 17 00:00:00 2001 From: MacRimi Date: Tue, 4 Nov 2025 23:00:37 +0100 Subject: [PATCH 34/41] Update proxmox-dashboard.tsx --- AppImage/components/proxmox-dashboard.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/AppImage/components/proxmox-dashboard.tsx b/AppImage/components/proxmox-dashboard.tsx index 0798046..71651d1 100644 --- a/AppImage/components/proxmox-dashboard.tsx +++ b/AppImage/components/proxmox-dashboard.tsx @@ -556,7 +556,7 @@ export function ProxmoxDashboard() { - + ) } From 5b0d55c1a24ffcb3380b6a4493ccaf6a1e6a216f Mon Sep 17 00:00:00 2001 From: MacRimi Date: Wed, 5 Nov 2025 18:30:31 +0100 Subject: [PATCH 35/41] Update health_monitor.py --- AppImage/scripts/health_monitor.py | 1479 +++++++++++++++++++++------- 1 file changed, 1124 insertions(+), 355 deletions(-) diff --git a/AppImage/scripts/health_monitor.py b/AppImage/scripts/health_monitor.py index 39f72be..7eea8f1 100644 --- a/AppImage/scripts/health_monitor.py +++ b/AppImage/scripts/health_monitor.py @@ -1,407 +1,1176 @@ """ -Health Monitor Module -Provides comprehensive health checks for the Proxmox system including: -- CPU and Memory usage -- Storage health (pools, disks, remote storage) -- Network health (interface errors) -- VM status -- System events/logs errors +ProxMenux Health Monitor Module +Provides comprehensive, lightweight health checks for Proxmox systems. +Optimized for minimal system impact with intelligent thresholds and hysteresis. + +Author: MacRimi +Version: 1.0 (Light Health Logic) """ import psutil import subprocess import json -from typing import Dict, List, Any +import time +import os +from typing import Dict, List, Any, Tuple +from datetime import datetime, timedelta +from collections import defaultdict class HealthMonitor: - """Monitors system health across multiple components""" + """ + Monitors system health across multiple components with minimal impact. + Implements hysteresis, intelligent caching, and progressive escalation. + """ - # Thresholds - CPU_WARNING = 75 - CPU_CRITICAL = 90 - MEMORY_WARNING = 75 - MEMORY_CRITICAL = 90 + # CPU Thresholds + CPU_WARNING = 85 + CPU_CRITICAL = 95 + CPU_RECOVERY = 75 + CPU_WARNING_DURATION = 60 # seconds + CPU_CRITICAL_DURATION = 120 # seconds + CPU_RECOVERY_DURATION = 120 # seconds + + # Memory Thresholds + MEMORY_WARNING = 85 + MEMORY_CRITICAL = 95 + MEMORY_DURATION = 60 # seconds + SWAP_WARNING_DURATION = 300 # 5 minutes + SWAP_CRITICAL_PERCENT = 5 # 5% of RAM + SWAP_CRITICAL_DURATION = 120 # 2 minutes + + # Storage Thresholds + STORAGE_WARNING = 85 + STORAGE_CRITICAL = 95 + + # Temperature Thresholds + TEMP_WARNING = 80 + TEMP_CRITICAL = 90 + + # Network Thresholds + NETWORK_LATENCY_WARNING = 100 # ms + NETWORK_LATENCY_CRITICAL = 300 # ms + NETWORK_TIMEOUT = 0.9 # seconds + NETWORK_INACTIVE_DURATION = 600 # 10 minutes + + # Log Thresholds + LOG_ERRORS_WARNING = 5 + LOG_ERRORS_CRITICAL = 6 + LOG_WARNINGS_WARNING = 10 + LOG_WARNINGS_CRITICAL = 30 + LOG_CHECK_INTERVAL = 300 # 5 minutes + + # Critical keywords for immediate escalation + CRITICAL_LOG_KEYWORDS = [ + 'I/O error', 'EXT4-fs error', 'XFS', 'LVM activation failed', + 'md/raid: device failed', 'Out of memory', 'kernel panic', + 'filesystem read-only', 'cannot mount' + ] + + # PVE Critical Services + PVE_SERVICES = ['pveproxy', 'pvedaemon', 'pvestatd', 'pve-cluster'] def __init__(self): - self.checks = [] - + """Initialize health monitor with state tracking""" + self.state_history = defaultdict(list) # For hysteresis + self.last_check_times = {} # Cache check times + self.cached_results = {} # Cache results + self.network_baseline = {} # Network traffic baseline + self.io_error_history = defaultdict(list) # I/O error tracking + def get_overall_status(self) -> Dict[str, Any]: - """Get overall health status summary""" - checks = self.run_all_checks() + """Get overall health status summary with minimal overhead""" + details = self.get_detailed_status() - # Determine overall status - critical_count = sum(1 for c in checks if c['status'] == 'critical') - warning_count = sum(1 for c in checks if c['status'] == 'warning') + overall_status = details.get('overall', 'OK') + summary = details.get('summary', '') - if critical_count > 0: - overall_status = 'critical' - elif warning_count > 0: - overall_status = 'warning' - else: - overall_status = 'healthy' + # Count statuses + critical_count = 0 + warning_count = 0 + ok_count = 0 + + for category, data in details.get('details', {}).items(): + if isinstance(data, dict): + status = data.get('status', 'OK') + if status == 'CRITICAL': + critical_count += 1 + elif status == 'WARNING': + warning_count += 1 + elif status == 'OK': + ok_count += 1 return { 'status': overall_status, + 'summary': summary, 'critical_count': critical_count, 'warning_count': warning_count, - 'healthy_count': len(checks) - critical_count - warning_count, - 'total_checks': len(checks), - 'timestamp': psutil.boot_time() + 'ok_count': ok_count, + 'timestamp': datetime.now().isoformat() } def get_detailed_status(self) -> Dict[str, Any]: - """Get detailed health status with all checks""" - checks = self.run_all_checks() - overall = self.get_overall_status() + """ + Get comprehensive health status with all checks. + Returns JSON structure matching the specification. + """ + details = {} + critical_issues = [] + warning_issues = [] + + # Priority 1: Services PVE / FS / Storage + services_status = self._check_pve_services() + details['services'] = services_status + if services_status['status'] == 'CRITICAL': + critical_issues.append(services_status.get('reason', 'Service failure')) + elif services_status['status'] == 'WARNING': + warning_issues.append(services_status.get('reason', 'Service issue')) + + storage_status = self._check_storage_comprehensive() + details['storage'] = storage_status + for storage_name, storage_data in storage_status.items(): + if isinstance(storage_data, dict): + if storage_data.get('status') == 'CRITICAL': + critical_issues.append(f"{storage_name}: {storage_data.get('reason', 'Storage failure')}") + elif storage_data.get('status') == 'WARNING': + warning_issues.append(f"{storage_name}: {storage_data.get('reason', 'Storage issue')}") + + # Priority 2: Disks / I/O + disks_status = self._check_disks_io() + details['disks'] = disks_status + for disk_name, disk_data in disks_status.items(): + if isinstance(disk_data, dict): + if disk_data.get('status') == 'CRITICAL': + critical_issues.append(f"{disk_name}: {disk_data.get('reason', 'Disk failure')}") + elif disk_data.get('status') == 'WARNING': + warning_issues.append(f"{disk_name}: {disk_data.get('reason', 'Disk issue')}") + + # Priority 3: VM/CT + vms_status = self._check_vms_cts() + details['vms'] = vms_status + if vms_status.get('status') == 'CRITICAL': + critical_issues.append(vms_status.get('reason', 'VM/CT failure')) + elif vms_status.get('status') == 'WARNING': + warning_issues.append(vms_status.get('reason', 'VM/CT issue')) + + # Priority 4: Network + network_status = self._check_network_comprehensive() + details['network'] = network_status + if network_status.get('status') == 'CRITICAL': + critical_issues.append(network_status.get('reason', 'Network failure')) + elif network_status.get('status') == 'WARNING': + warning_issues.append(network_status.get('reason', 'Network issue')) + + # Priority 5: CPU/RAM + cpu_status = self._check_cpu_with_hysteresis() + details['cpu'] = cpu_status + if cpu_status.get('status') == 'WARNING': + warning_issues.append(cpu_status.get('reason', 'CPU high')) + + memory_status = self._check_memory_comprehensive() + details['memory'] = memory_status + if memory_status.get('status') == 'CRITICAL': + critical_issues.append(memory_status.get('reason', 'Memory critical')) + elif memory_status.get('status') == 'WARNING': + warning_issues.append(memory_status.get('reason', 'Memory high')) + + # Priority 6: Logs + logs_status = self._check_logs_lightweight() + details['logs'] = logs_status + if logs_status.get('status') == 'CRITICAL': + critical_issues.append(logs_status.get('reason', 'Critical log errors')) + elif logs_status.get('status') == 'WARNING': + warning_issues.append(logs_status.get('reason', 'Log warnings')) + + # Priority 7: Extras (Security, Certificates, Uptime) + security_status = self._check_security() + details['security'] = security_status + if security_status.get('status') == 'WARNING': + warning_issues.append(security_status.get('reason', 'Security issue')) + + # Determine overall status + if critical_issues: + overall = 'CRITICAL' + summary = '; '.join(critical_issues[:3]) # Top 3 critical issues + elif warning_issues: + overall = 'WARNING' + summary = '; '.join(warning_issues[:3]) # Top 3 warnings + else: + overall = 'OK' + summary = 'All systems operational' return { 'overall': overall, - 'checks': checks + 'summary': summary, + 'details': details, + 'timestamp': datetime.now().isoformat() } - def run_all_checks(self) -> List[Dict[str, Any]]: - """Run all health checks and return results""" - checks = [] - - # CPU Check - checks.append(self.check_cpu()) - - # Memory Check - checks.append(self.check_memory()) - - # Storage Checks - checks.extend(self.check_storage()) - - # Network Checks - checks.extend(self.check_network()) - - # VM Checks - checks.extend(self.check_vms()) - - # Events/Logs Check - checks.append(self.check_events()) - - return checks - - def check_cpu(self) -> Dict[str, Any]: - """Check CPU usage""" - cpu_percent = psutil.cpu_percent(interval=1) - - if cpu_percent >= self.CPU_CRITICAL: - status = 'critical' - message = f'CPU usage is critically high at {cpu_percent:.1f}%' - elif cpu_percent >= self.CPU_WARNING: - status = 'warning' - message = f'CPU usage is elevated at {cpu_percent:.1f}%' - else: - status = 'healthy' - message = f'CPU usage is normal at {cpu_percent:.1f}%' - - return { - 'category': 'System', - 'name': 'CPU Usage', - 'status': status, - 'value': f'{cpu_percent:.1f}%', - 'message': message, - 'details': { - 'usage': cpu_percent, - 'cores': psutil.cpu_count(), - 'warning_threshold': self.CPU_WARNING, - 'critical_threshold': self.CPU_CRITICAL - } - } - - def check_memory(self) -> Dict[str, Any]: - """Check memory usage""" - memory = psutil.virtual_memory() - mem_percent = memory.percent - - if mem_percent >= self.MEMORY_CRITICAL: - status = 'critical' - message = f'Memory usage is critically high at {mem_percent:.1f}%' - elif mem_percent >= self.MEMORY_WARNING: - status = 'warning' - message = f'Memory usage is elevated at {mem_percent:.1f}%' - else: - status = 'healthy' - message = f'Memory usage is normal at {mem_percent:.1f}%' - - return { - 'category': 'System', - 'name': 'Memory Usage', - 'status': status, - 'value': f'{mem_percent:.1f}%', - 'message': message, - 'details': { - 'usage': mem_percent, - 'total': memory.total, - 'available': memory.available, - 'used': memory.used, - 'warning_threshold': self.MEMORY_WARNING, - 'critical_threshold': self.MEMORY_CRITICAL - } - } - - def check_storage(self) -> List[Dict[str, Any]]: - """Check storage health including ZFS pools and disks""" - checks = [] - - # Check ZFS pools + def _check_cpu_with_hysteresis(self) -> Dict[str, Any]: + """ + Check CPU with hysteresis to avoid flapping alerts. + Requires sustained high usage before triggering. + """ try: - result = subprocess.run(['zpool', 'status'], capture_output=True, text=True, timeout=5) - if result.returncode == 0: - output = result.stdout - - # Parse pool status - pools = self._parse_zpool_status(output) - for pool in pools: - if pool['state'] == 'DEGRADED': - status = 'critical' - message = f"Pool '{pool['name']}' is degraded" - elif pool['state'] == 'FAULTED': - status = 'critical' - message = f"Pool '{pool['name']}' is faulted" - elif pool['state'] == 'OFFLINE': - status = 'critical' - message = f"Pool '{pool['name']}' is offline" - elif pool['errors'] > 0: - status = 'warning' - message = f"Pool '{pool['name']}' has {pool['errors']} errors" - else: - status = 'healthy' - message = f"Pool '{pool['name']}' is healthy" - - checks.append({ - 'category': 'Storage', - 'name': f"ZFS Pool: {pool['name']}", - 'status': status, - 'value': pool['state'], - 'message': message, - 'details': pool - }) - except Exception as e: - checks.append({ - 'category': 'Storage', - 'name': 'ZFS Pools', - 'status': 'warning', - 'value': 'Unknown', - 'message': f'Could not check ZFS pools: {str(e)}', - 'details': {'error': str(e)} - }) - - # Check disk partitions - partitions = psutil.disk_partitions() - for partition in partitions: - try: - usage = psutil.disk_usage(partition.mountpoint) - percent = usage.percent - - if percent >= 95: - status = 'critical' - message = f"Disk '{partition.mountpoint}' is critically full at {percent:.1f}%" - elif percent >= 85: - status = 'warning' - message = f"Disk '{partition.mountpoint}' is getting full at {percent:.1f}%" - else: - status = 'healthy' - message = f"Disk '{partition.mountpoint}' has sufficient space ({percent:.1f}% used)" - - checks.append({ - 'category': 'Storage', - 'name': f"Disk: {partition.mountpoint}", - 'status': status, - 'value': f'{percent:.1f}%', - 'message': message, - 'details': { - 'device': partition.device, - 'mountpoint': partition.mountpoint, - 'fstype': partition.fstype, - 'total': usage.total, - 'used': usage.used, - 'free': usage.free, - 'percent': percent - } - }) - except PermissionError: - continue - - return checks - - def check_network(self) -> List[Dict[str, Any]]: - """Check network interface health (errors, not inactive interfaces)""" - checks = [] - - # Get network interface stats - net_io = psutil.net_io_counters(pernic=True) - net_if_stats = psutil.net_if_stats() - - for interface, stats in net_io.items(): - # Skip loopback - if interface == 'lo': - continue + # Get CPU usage (1 second sample to minimize impact) + cpu_percent = psutil.cpu_percent(interval=1) + current_time = time.time() - # Only check active interfaces - if interface in net_if_stats and net_if_stats[interface].isup: - errors = stats.errin + stats.errout - drops = stats.dropin + stats.dropout - - if errors > 100 or drops > 100: - status = 'critical' - message = f"Interface '{interface}' has {errors} errors and {drops} dropped packets" - elif errors > 10 or drops > 10: - status = 'warning' - message = f"Interface '{interface}' has {errors} errors and {drops} dropped packets" - else: - status = 'healthy' - message = f"Interface '{interface}' is operating normally" - - checks.append({ - 'category': 'Network', - 'name': f"Interface: {interface}", - 'status': status, - 'value': 'Active', - 'message': message, - 'details': { - 'errors_in': stats.errin, - 'errors_out': stats.errout, - 'drops_in': stats.dropin, - 'drops_out': stats.dropout, - 'bytes_sent': stats.bytes_sent, - 'bytes_recv': stats.bytes_recv - } - }) - - return checks - - def check_vms(self) -> List[Dict[str, Any]]: - """Check VM status""" - checks = [] - - try: - # Get VM list from qm - result = subprocess.run(['qm', 'list'], capture_output=True, text=True, timeout=5) - if result.returncode == 0: - lines = result.stdout.strip().split('\n')[1:] # Skip header - - running_count = 0 - stopped_count = 0 - error_count = 0 - - for line in lines: - if line.strip(): - parts = line.split() - if len(parts) >= 3: - vm_status = parts[2] - if vm_status == 'running': - running_count += 1 - elif vm_status == 'stopped': - stopped_count += 1 - else: - error_count += 1 - - if error_count > 0: - status = 'warning' - message = f'{error_count} VMs in unexpected state' - else: - status = 'healthy' - message = f'{running_count} running, {stopped_count} stopped' - - checks.append({ - 'category': 'Virtual Machines', - 'name': 'VM Status', - 'status': status, - 'value': f'{running_count + stopped_count} total', - 'message': message, - 'details': { - 'running': running_count, - 'stopped': stopped_count, - 'errors': error_count - } - }) - except Exception as e: - checks.append({ - 'category': 'Virtual Machines', - 'name': 'VM Status', - 'status': 'warning', - 'value': 'Unknown', - 'message': f'Could not check VM status: {str(e)}', - 'details': {'error': str(e)} + # Track state history + state_key = 'cpu_usage' + self.state_history[state_key].append({ + 'value': cpu_percent, + 'time': current_time }) - - return checks + + # Keep only recent history (last 5 minutes) + self.state_history[state_key] = [ + entry for entry in self.state_history[state_key] + if current_time - entry['time'] < 300 + ] + + # Check for sustained high usage + critical_duration = sum( + 1 for entry in self.state_history[state_key] + if entry['value'] >= self.CPU_CRITICAL and + current_time - entry['time'] <= self.CPU_CRITICAL_DURATION + ) + + warning_duration = sum( + 1 for entry in self.state_history[state_key] + if entry['value'] >= self.CPU_WARNING and + current_time - entry['time'] <= self.CPU_WARNING_DURATION + ) + + recovery_duration = sum( + 1 for entry in self.state_history[state_key] + if entry['value'] < self.CPU_RECOVERY and + current_time - entry['time'] <= self.CPU_RECOVERY_DURATION + ) + + # Determine status with hysteresis + if critical_duration >= 2: # 2+ readings in critical range + status = 'CRITICAL' + reason = f'CPU >{self.CPU_CRITICAL}% for {self.CPU_CRITICAL_DURATION}s' + elif warning_duration >= 2 and recovery_duration < 2: + status = 'WARNING' + reason = f'CPU >{self.CPU_WARNING}% for {self.CPU_WARNING_DURATION}s' + else: + status = 'OK' + reason = None + + # Get temperature if available (checked once per minute max) + temp_status = self._check_cpu_temperature() + + result = { + 'status': status, + 'usage': round(cpu_percent, 1), + 'cores': psutil.cpu_count() + } + + if reason: + result['reason'] = reason + + if temp_status: + result['temperature'] = temp_status + if temp_status.get('status') == 'CRITICAL': + result['status'] = 'CRITICAL' + result['reason'] = temp_status.get('reason') + elif temp_status.get('status') == 'WARNING' and status == 'OK': + result['status'] = 'WARNING' + result['reason'] = temp_status.get('reason') + + return result + + except Exception as e: + return {'status': 'UNKNOWN', 'reason': f'CPU check failed: {str(e)}'} - def check_events(self) -> Dict[str, Any]: - """Check system events/logs for errors""" + def _check_cpu_temperature(self) -> Dict[str, Any]: + """Check CPU temperature (cached, max 1 check per minute)""" + cache_key = 'cpu_temp' + current_time = time.time() + + # Check cache + if cache_key in self.last_check_times: + if current_time - self.last_check_times[cache_key] < 60: + return self.cached_results.get(cache_key, {}) + try: - # Check journalctl for recent errors + # Try lm-sensors first result = subprocess.run( - ['journalctl', '-p', 'err', '-n', '100', '--no-pager'], + ['sensors', '-A', '-u'], capture_output=True, text=True, - timeout=5 + timeout=2 ) if result.returncode == 0: - error_lines = [line for line in result.stdout.split('\n') if line.strip()] - error_count = len(error_lines) + temps = [] + for line in result.stdout.split('\n'): + if 'temp' in line.lower() and '_input' in line: + try: + temp = float(line.split(':')[1].strip()) + temps.append(temp) + except: + continue - if error_count > 50: - status = 'critical' - message = f'{error_count} errors in recent logs' - elif error_count > 10: - status = 'warning' - message = f'{error_count} errors in recent logs' - else: - status = 'healthy' - message = f'{error_count} errors in recent logs (normal)' - - return { - 'category': 'System Events', - 'name': 'Error Logs', - 'status': status, - 'value': f'{error_count} errors', - 'message': message, - 'details': { - 'error_count': error_count, - 'recent_errors': error_lines[:5] # Last 5 errors + if temps: + max_temp = max(temps) + + if max_temp >= self.TEMP_CRITICAL: + status = 'CRITICAL' + reason = f'CPU temperature {max_temp}°C ≥{self.TEMP_CRITICAL}°C' + elif max_temp >= self.TEMP_WARNING: + status = 'WARNING' + reason = f'CPU temperature {max_temp}°C ≥{self.TEMP_WARNING}°C' + else: + status = 'OK' + reason = None + + temp_result = { + 'status': status, + 'value': round(max_temp, 1), + 'unit': '°C' } + if reason: + temp_result['reason'] = reason + + self.cached_results[cache_key] = temp_result + self.last_check_times[cache_key] = current_time + return temp_result + + # If sensors not available, return UNKNOWN (doesn't penalize) + unknown_result = {'status': 'UNKNOWN', 'reason': 'No temperature sensors available'} + self.cached_results[cache_key] = unknown_result + self.last_check_times[cache_key] = current_time + return unknown_result + + except Exception: + unknown_result = {'status': 'UNKNOWN', 'reason': 'Temperature check unavailable'} + self.cached_results[cache_key] = unknown_result + self.last_check_times[cache_key] = current_time + return unknown_result + + def _check_memory_comprehensive(self) -> Dict[str, Any]: + """Check memory including RAM and swap with sustained thresholds""" + try: + memory = psutil.virtual_memory() + swap = psutil.swap_memory() + current_time = time.time() + + mem_percent = memory.percent + swap_percent = swap.percent if swap.total > 0 else 0 + swap_vs_ram = (swap.used / memory.total * 100) if memory.total > 0 else 0 + + # Track memory state + state_key = 'memory_usage' + self.state_history[state_key].append({ + 'mem_percent': mem_percent, + 'swap_percent': swap_percent, + 'swap_vs_ram': swap_vs_ram, + 'time': current_time + }) + + # Keep only recent history + self.state_history[state_key] = [ + entry for entry in self.state_history[state_key] + if current_time - entry['time'] < 600 + ] + + # Check sustained high memory + mem_critical = sum( + 1 for entry in self.state_history[state_key] + if entry['mem_percent'] >= self.MEMORY_CRITICAL and + current_time - entry['time'] <= self.MEMORY_DURATION + ) + + mem_warning = sum( + 1 for entry in self.state_history[state_key] + if entry['mem_percent'] >= self.MEMORY_WARNING and + current_time - entry['time'] <= self.MEMORY_DURATION + ) + + # Check swap usage + swap_critical = sum( + 1 for entry in self.state_history[state_key] + if entry['swap_vs_ram'] > self.SWAP_CRITICAL_PERCENT and + current_time - entry['time'] <= self.SWAP_CRITICAL_DURATION + ) + + swap_warning = sum( + 1 for entry in self.state_history[state_key] + if entry['swap_percent'] > 0 and + current_time - entry['time'] <= self.SWAP_WARNING_DURATION + ) + + # Determine status + if mem_critical >= 2: + status = 'CRITICAL' + reason = f'RAM >{self.MEMORY_CRITICAL}% for {self.MEMORY_DURATION}s' + elif swap_critical >= 2: + status = 'CRITICAL' + reason = f'Swap >{self.SWAP_CRITICAL_PERCENT}% of RAM for {self.SWAP_CRITICAL_DURATION}s' + elif mem_warning >= 2: + status = 'WARNING' + reason = f'RAM >{self.MEMORY_WARNING}% for {self.MEMORY_DURATION}s' + elif swap_warning >= 2: + status = 'WARNING' + reason = f'Swap active for >{self.SWAP_WARNING_DURATION}s' + else: + status = 'OK' + reason = None + + result = { + 'status': status, + 'ram_percent': round(mem_percent, 1), + 'ram_available_gb': round(memory.available / (1024**3), 2), + 'swap_percent': round(swap_percent, 1), + 'swap_used_gb': round(swap.used / (1024**3), 2) + } + + if reason: + result['reason'] = reason + + return result + + except Exception as e: + return {'status': 'UNKNOWN', 'reason': f'Memory check failed: {str(e)}'} + + def _check_storage_comprehensive(self) -> Dict[str, Any]: + """ + Comprehensive storage check including filesystems, mount points, + LVM, and Proxmox storages. + """ + storage_results = {} + + # Check critical filesystems + critical_mounts = ['/', '/var', '/var/lib/vz'] + + for mount_point in critical_mounts: + if os.path.exists(mount_point): + fs_status = self._check_filesystem(mount_point) + storage_results[mount_point] = fs_status + + # Check all mounted filesystems + try: + partitions = psutil.disk_partitions() + for partition in partitions: + if partition.mountpoint not in critical_mounts: + try: + fs_status = self._check_filesystem(partition.mountpoint) + storage_results[partition.mountpoint] = fs_status + except PermissionError: + continue + except Exception as e: + storage_results['partitions_error'] = { + 'status': 'WARNING', + 'reason': f'Could not enumerate partitions: {str(e)}' + } + + # Check LVM (especially local-lvm) + lvm_status = self._check_lvm() + if lvm_status: + storage_results['lvm'] = lvm_status + + # Check Proxmox storages + pve_storages = self._check_proxmox_storages() + if pve_storages: + storage_results.update(pve_storages) + + return storage_results + + def _check_filesystem(self, mount_point: str) -> Dict[str, Any]: + """Check individual filesystem for space and mount status""" + try: + # Check if mounted + result = subprocess.run( + ['mountpoint', '-q', mount_point], + capture_output=True, + timeout=2 + ) + + if result.returncode != 0: + return { + 'status': 'CRITICAL', + 'reason': f'Not mounted' } + + # Check if read-only + with open('/proc/mounts', 'r') as f: + for line in f: + parts = line.split() + if len(parts) >= 4 and parts[1] == mount_point: + options = parts[3].split(',') + if 'ro' in options: + return { + 'status': 'CRITICAL', + 'reason': 'Mounted read-only' + } + + # Check disk usage + usage = psutil.disk_usage(mount_point) + percent = usage.percent + + if percent >= self.STORAGE_CRITICAL: + status = 'CRITICAL' + reason = f'{percent:.1f}% full (≥{self.STORAGE_CRITICAL}%)' + elif percent >= self.STORAGE_WARNING: + status = 'WARNING' + reason = f'{percent:.1f}% full (≥{self.STORAGE_WARNING}%)' + else: + status = 'OK' + reason = None + + result = { + 'status': status, + 'usage_percent': round(percent, 1), + 'free_gb': round(usage.free / (1024**3), 2), + 'total_gb': round(usage.total / (1024**3), 2) + } + + if reason: + result['reason'] = reason + + return result + except Exception as e: return { - 'category': 'System Events', - 'name': 'Error Logs', - 'status': 'warning', - 'value': 'Unknown', - 'message': f'Could not check system logs: {str(e)}', - 'details': {'error': str(e)} + 'status': 'WARNING', + 'reason': f'Check failed: {str(e)}' } - def _parse_zpool_status(self, output: str) -> List[Dict[str, Any]]: - """Parse zpool status output""" - pools = [] - current_pool = None - - for line in output.split('\n'): - line = line.strip() + def _check_lvm(self) -> Dict[str, Any]: + """Check LVM volumes, especially local-lvm""" + try: + result = subprocess.run( + ['lvs', '--noheadings', '--options', 'lv_name,vg_name,lv_attr'], + capture_output=True, + text=True, + timeout=3 + ) - if line.startswith('pool:'): - if current_pool: - pools.append(current_pool) - current_pool = {'name': line.split(':')[1].strip(), 'state': 'UNKNOWN', 'errors': 0} - elif line.startswith('state:') and current_pool: - current_pool['state'] = line.split(':')[1].strip() - elif 'errors:' in line.lower() and current_pool: + if result.returncode != 0: + return { + 'status': 'WARNING', + 'reason': 'LVM not available or no volumes' + } + + volumes = [] + local_lvm_found = False + + for line in result.stdout.strip().split('\n'): + if line.strip(): + parts = line.split() + if len(parts) >= 2: + lv_name = parts[0].strip() + vg_name = parts[1].strip() + volumes.append(f'{vg_name}/{lv_name}') + + if 'local-lvm' in lv_name or 'local-lvm' in vg_name: + local_lvm_found = True + + if not local_lvm_found and volumes: + return { + 'status': 'CRITICAL', + 'reason': 'local-lvm volume not found', + 'volumes': volumes + } + + return { + 'status': 'OK', + 'volumes': volumes + } + + except Exception as e: + return { + 'status': 'WARNING', + 'reason': f'LVM check failed: {str(e)}' + } + + def _check_proxmox_storages(self) -> Dict[str, Any]: + """Check Proxmox-specific storages (NFS, CIFS, PBS)""" + storages = {} + + try: + # Read Proxmox storage configuration + if os.path.exists('/etc/pve/storage.cfg'): + with open('/etc/pve/storage.cfg', 'r') as f: + current_storage = None + storage_type = None + + for line in f: + line = line.strip() + + if line.startswith('dir:') or line.startswith('nfs:') or \ + line.startswith('cifs:') or line.startswith('pbs:'): + parts = line.split(':', 1) + storage_type = parts[0] + current_storage = parts[1].strip() + elif line.startswith('path ') and current_storage: + path = line.split(None, 1)[1] + + if storage_type == 'dir': + if os.path.exists(path): + storages[f'storage_{current_storage}'] = { + 'status': 'OK', + 'type': 'dir', + 'path': path + } + else: + storages[f'storage_{current_storage}'] = { + 'status': 'CRITICAL', + 'reason': 'Directory does not exist', + 'type': 'dir', + 'path': path + } + + current_storage = None + storage_type = None + except Exception as e: + storages['pve_storage_config'] = { + 'status': 'WARNING', + 'reason': f'Could not read storage config: {str(e)}' + } + + return storages + + def _check_disks_io(self) -> Dict[str, Any]: + """Check disk I/O errors from dmesg (lightweight)""" + disks = {} + current_time = time.time() + + try: + # Only check dmesg for recent errors (last 2 seconds of kernel log) + result = subprocess.run( + ['dmesg', '-T', '--level=err,warn', '--since', '5 minutes ago'], + capture_output=True, + text=True, + timeout=2 + ) + + if result.returncode == 0: + io_errors = defaultdict(int) + + for line in result.stdout.split('\n'): + line_lower = line.lower() + if any(keyword in line_lower for keyword in ['i/o error', 'ata error', 'scsi error']): + # Extract disk name + for part in line.split(): + if part.startswith('sd') or part.startswith('nvme') or part.startswith('hd'): + disk_name = part.rstrip(':,') + io_errors[disk_name] += 1 + + # Track in history + self.io_error_history[disk_name].append(current_time) + + # Clean old history (keep last 5 minutes) + for disk in list(self.io_error_history.keys()): + self.io_error_history[disk] = [ + t for t in self.io_error_history[disk] + if current_time - t < 300 + ] + + error_count = len(self.io_error_history[disk]) + + if error_count >= 3: + disks[f'/dev/{disk}'] = { + 'status': 'CRITICAL', + 'reason': f'{error_count} I/O errors in 5 minutes' + } + elif error_count >= 1: + disks[f'/dev/{disk}'] = { + 'status': 'WARNING', + 'reason': f'{error_count} I/O error(s) in 5 minutes' + } + + # If no errors found, report OK + if not disks: + disks['status'] = 'OK' + + return disks + + except Exception as e: + return { + 'status': 'WARNING', + 'reason': f'Disk I/O check failed: {str(e)}' + } + + def _check_network_comprehensive(self) -> Dict[str, Any]: + """Check network interfaces, bridges, and connectivity""" + try: + issues = [] + interface_details = {} + + # Check interface status + net_if_stats = psutil.net_if_stats() + net_io = psutil.net_io_counters(pernic=True) + current_time = time.time() + + for interface, stats in net_if_stats.items(): + if interface == 'lo': + continue + + # Check if interface is down (excluding administratively down) + if not stats.isup: + # Check if it's a bridge or important interface + if interface.startswith('vmbr') or interface.startswith('eth') or interface.startswith('ens'): + issues.append(f'{interface} is DOWN') + interface_details[interface] = { + 'status': 'CRITICAL', + 'reason': 'Interface DOWN' + } + continue + + # Check bridge traffic (if no traffic for 10 minutes) + if interface.startswith('vmbr') and interface in net_io: + io_stats = net_io[interface] + + # Initialize baseline if not exists + if interface not in self.network_baseline: + self.network_baseline[interface] = { + 'rx_bytes': io_stats.bytes_recv, + 'tx_bytes': io_stats.bytes_sent, + 'time': current_time + } + else: + baseline = self.network_baseline[interface] + time_diff = current_time - baseline['time'] + + if time_diff >= self.NETWORK_INACTIVE_DURATION: + rx_diff = io_stats.bytes_recv - baseline['rx_bytes'] + tx_diff = io_stats.bytes_sent - baseline['tx_bytes'] + + if rx_diff == 0 and tx_diff == 0: + issues.append(f'{interface} no traffic for 10+ minutes') + interface_details[interface] = { + 'status': 'WARNING', + 'reason': 'No traffic for 10+ minutes' + } + + # Update baseline + self.network_baseline[interface] = { + 'rx_bytes': io_stats.bytes_recv, + 'tx_bytes': io_stats.bytes_sent, + 'time': current_time + } + + # Check gateway/DNS latency (lightweight, cached) + latency_status = self._check_network_latency() + if latency_status.get('status') != 'OK': + issues.append(latency_status.get('reason', 'Network latency issue')) + interface_details['connectivity'] = latency_status + + # Determine overall network status + if any('CRITICAL' in str(detail.get('status')) for detail in interface_details.values()): + status = 'CRITICAL' + reason = '; '.join(issues[:2]) + elif issues: + status = 'WARNING' + reason = '; '.join(issues[:2]) + else: + status = 'OK' + reason = None + + result = {'status': status} + if reason: + result['reason'] = reason + if interface_details: + result['interfaces'] = interface_details + + return result + + except Exception as e: + return { + 'status': 'WARNING', + 'reason': f'Network check failed: {str(e)}' + } + + def _check_network_latency(self) -> Dict[str, Any]: + """Check network latency to gateway/DNS (cached, max 1 check per minute)""" + cache_key = 'network_latency' + current_time = time.time() + + # Check cache + if cache_key in self.last_check_times: + if current_time - self.last_check_times[cache_key] < 60: + return self.cached_results.get(cache_key, {'status': 'OK'}) + + try: + # Ping default gateway or 1.1.1.1 + result = subprocess.run( + ['ping', '-c', '1', '-W', '1', '1.1.1.1'], + capture_output=True, + text=True, + timeout=self.NETWORK_TIMEOUT + ) + + if result.returncode == 0: + # Extract latency + for line in result.stdout.split('\n'): + if 'time=' in line: + try: + latency_str = line.split('time=')[1].split()[0] + latency = float(latency_str) + + if latency > self.NETWORK_LATENCY_CRITICAL: + status = 'CRITICAL' + reason = f'Latency {latency:.1f}ms >{self.NETWORK_LATENCY_CRITICAL}ms' + elif latency > self.NETWORK_LATENCY_WARNING: + status = 'WARNING' + reason = f'Latency {latency:.1f}ms >{self.NETWORK_LATENCY_WARNING}ms' + else: + status = 'OK' + reason = None + + latency_result = { + 'status': status, + 'latency_ms': round(latency, 1) + } + if reason: + latency_result['reason'] = reason + + self.cached_results[cache_key] = latency_result + self.last_check_times[cache_key] = current_time + return latency_result + except: + pass + + # Ping failed + packet_loss_result = { + 'status': 'CRITICAL', + 'reason': 'Packet loss or timeout' + } + self.cached_results[cache_key] = packet_loss_result + self.last_check_times[cache_key] = current_time + return packet_loss_result + + except Exception as e: + error_result = { + 'status': 'WARNING', + 'reason': f'Latency check failed: {str(e)}' + } + self.cached_results[cache_key] = error_result + self.last_check_times[cache_key] = current_time + return error_result + + def _check_vms_cts(self) -> Dict[str, Any]: + """Check VM and CT status for unexpected stops""" + try: + issues = [] + vm_details = {} + + # Check VMs + try: + result = subprocess.run( + ['qm', 'list'], + capture_output=True, + text=True, + timeout=3 + ) + + if result.returncode == 0: + for line in result.stdout.strip().split('\n')[1:]: + if line.strip(): + parts = line.split() + if len(parts) >= 3: + vmid = parts[0] + vm_status = parts[2] + + if vm_status == 'stopped': + # Check if unexpected (this is simplified, would need autostart config) + vm_details[f'vm_{vmid}'] = { + 'status': 'WARNING', + 'reason': 'VM stopped' + } + issues.append(f'VM {vmid} stopped') + except Exception as e: + vm_details['vms_check'] = { + 'status': 'WARNING', + 'reason': f'Could not check VMs: {str(e)}' + } + + # Check CTs + try: + result = subprocess.run( + ['pct', 'list'], + capture_output=True, + text=True, + timeout=3 + ) + + if result.returncode == 0: + for line in result.stdout.strip().split('\n')[1:]: + if line.strip(): + parts = line.split() + if len(parts) >= 2: + ctid = parts[0] + ct_status = parts[1] + + if ct_status == 'stopped': + vm_details[f'ct_{ctid}'] = { + 'status': 'WARNING', + 'reason': 'CT stopped' + } + issues.append(f'CT {ctid} stopped') + except Exception as e: + vm_details['cts_check'] = { + 'status': 'WARNING', + 'reason': f'Could not check CTs: {str(e)}' + } + + # Determine overall status + if issues: + status = 'WARNING' + reason = '; '.join(issues[:3]) + else: + status = 'OK' + reason = None + + result = {'status': status} + if reason: + result['reason'] = reason + if vm_details: + result['details'] = vm_details + + return result + + except Exception as e: + return { + 'status': 'WARNING', + 'reason': f'VM/CT check failed: {str(e)}' + } + + def _check_pve_services(self) -> Dict[str, Any]: + """Check critical Proxmox services""" + try: + failed_services = [] + + for service in self.PVE_SERVICES: try: - error_part = line.split(':')[1].strip() - if error_part.lower() != 'no known data errors': - current_pool['errors'] = int(error_part.split()[0]) - except: - pass + result = subprocess.run( + ['systemctl', 'is-active', service], + capture_output=True, + text=True, + timeout=2 + ) + + if result.returncode != 0 or result.stdout.strip() != 'active': + failed_services.append(service) + except Exception: + failed_services.append(service) + + if failed_services: + return { + 'status': 'CRITICAL', + 'reason': f'Services inactive: {", ".join(failed_services)}', + 'failed': failed_services + } + + return {'status': 'OK'} + + except Exception as e: + return { + 'status': 'WARNING', + 'reason': f'Service check failed: {str(e)}' + } + + def _check_logs_lightweight(self) -> Dict[str, Any]: + """Lightweight log analysis (cached, checked every 5 minutes)""" + cache_key = 'logs_analysis' + current_time = time.time() - if current_pool: - pools.append(current_pool) + # Check cache + if cache_key in self.last_check_times: + if current_time - self.last_check_times[cache_key] < self.LOG_CHECK_INTERVAL: + return self.cached_results.get(cache_key, {'status': 'OK'}) - return pools + try: + # Check journalctl for recent errors and warnings + result = subprocess.run( + ['journalctl', '--since', '5 minutes ago', '--no-pager', '-p', 'warning'], + capture_output=True, + text=True, + timeout=3 + ) + + if result.returncode == 0: + lines = result.stdout.strip().split('\n') + + errors_5m = 0 + warnings_5m = 0 + critical_keywords_found = [] + + for line in lines: + line_lower = line.lower() + + # Check for critical keywords + for keyword in self.CRITICAL_LOG_KEYWORDS: + if keyword.lower() in line_lower: + critical_keywords_found.append(keyword) + errors_5m += 1 + break + else: + # Count errors and warnings + if 'error' in line_lower or 'critical' in line_lower or 'fatal' in line_lower: + errors_5m += 1 + elif 'warning' in line_lower or 'warn' in line_lower: + warnings_5m += 1 + + # Determine status + if critical_keywords_found: + status = 'CRITICAL' + reason = f'Critical errors: {", ".join(set(critical_keywords_found[:3]))}' + elif errors_5m >= self.LOG_ERRORS_CRITICAL: + status = 'CRITICAL' + reason = f'{errors_5m} errors in 5 minutes (≥{self.LOG_ERRORS_CRITICAL})' + elif warnings_5m >= self.LOG_WARNINGS_CRITICAL: + status = 'CRITICAL' + reason = f'{warnings_5m} warnings in 5 minutes (≥{self.LOG_WARNINGS_CRITICAL})' + elif errors_5m >= self.LOG_ERRORS_WARNING: + status = 'WARNING' + reason = f'{errors_5m} errors in 5 minutes' + elif warnings_5m >= self.LOG_WARNINGS_WARNING: + status = 'WARNING' + reason = f'{warnings_5m} warnings in 5 minutes' + else: + status = 'OK' + reason = None + + log_result = { + 'status': status, + 'errors_5m': errors_5m, + 'warnings_5m': warnings_5m + } + if reason: + log_result['reason'] = reason + + self.cached_results[cache_key] = log_result + self.last_check_times[cache_key] = current_time + return log_result + + ok_result = {'status': 'OK'} + self.cached_results[cache_key] = ok_result + self.last_check_times[cache_key] = current_time + return ok_result + + except Exception as e: + error_result = { + 'status': 'WARNING', + 'reason': f'Log check failed: {str(e)}' + } + self.cached_results[cache_key] = error_result + self.last_check_times[cache_key] = current_time + return error_result + + def _check_security(self) -> Dict[str, Any]: + """Check security-related items (fail2ban, certificates, uptime)""" + try: + issues = [] + + # Check fail2ban + try: + result = subprocess.run( + ['systemctl', 'is-active', 'fail2ban'], + capture_output=True, + text=True, + timeout=2 + ) + + if result.returncode != 0 or result.stdout.strip() != 'active': + issues.append('fail2ban inactive') + except Exception: + pass + + # Check uptime (warning if >180 days) + try: + uptime_seconds = time.time() - psutil.boot_time() + uptime_days = uptime_seconds / 86400 + + if uptime_days > 180: + issues.append(f'Uptime {int(uptime_days)} days (>180)') + except Exception: + pass + + # Check SSL certificates (cached, checked once per day) + cert_status = self._check_certificates() + if cert_status.get('status') != 'OK': + issues.append(cert_status.get('reason', 'Certificate issue')) + + if issues: + return { + 'status': 'WARNING', + 'reason': '; '.join(issues[:2]) + } + + return {'status': 'OK'} + + except Exception as e: + return { + 'status': 'WARNING', + 'reason': f'Security check failed: {str(e)}' + } + + def _check_certificates(self) -> Dict[str, Any]: + """Check SSL certificate expiration (cached, checked once per day)""" + cache_key = 'certificates' + current_time = time.time() + + # Check cache (24 hours) + if cache_key in self.last_check_times: + if current_time - self.last_check_times[cache_key] < 86400: + return self.cached_results.get(cache_key, {'status': 'OK'}) + + try: + # Check PVE certificate + cert_path = '/etc/pve/local/pve-ssl.pem' + + if os.path.exists(cert_path): + result = subprocess.run( + ['openssl', 'x509', '-enddate', '-noout', '-in', cert_path], + capture_output=True, + text=True, + timeout=2 + ) + + if result.returncode == 0: + # Parse expiration date + date_str = result.stdout.strip().replace('notAfter=', '') + + try: + from datetime import datetime + exp_date = datetime.strptime(date_str, '%b %d %H:%M:%S %Y %Z') + days_until_expiry = (exp_date - datetime.now()).days + + if days_until_expiry < 0: + status = 'CRITICAL' + reason = 'Certificate expired' + elif days_until_expiry < 15: + status = 'WARNING' + reason = f'Certificate expires in {days_until_expiry} days' + else: + status = 'OK' + reason = None + + cert_result = {'status': status} + if reason: + cert_result['reason'] = reason + + self.cached_results[cache_key] = cert_result + self.last_check_times[cache_key] = current_time + return cert_result + except Exception: + pass + + ok_result = {'status': 'OK'} + self.cached_results[cache_key] = ok_result + self.last_check_times[cache_key] = current_time + return ok_result + + except Exception: + ok_result = {'status': 'OK'} + self.cached_results[cache_key] = ok_result + self.last_check_times[cache_key] = current_time + return ok_result + # Global instance health_monitor = HealthMonitor() From 876d51b009078531d58e85c91f1625be94eca746 Mon Sep 17 00:00:00 2001 From: MacRimi Date: Wed, 5 Nov 2025 18:38:29 +0100 Subject: [PATCH 36/41] Update health-status-modal.tsx --- AppImage/components/health-status-modal.tsx | 23 ++++++++++++--------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/AppImage/components/health-status-modal.tsx b/AppImage/components/health-status-modal.tsx index 74b064b..4a033bd 100644 --- a/AppImage/components/health-status-modal.tsx +++ b/AppImage/components/health-status-modal.tsx @@ -87,16 +87,19 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu } } - const groupedChecks = healthData?.checks.reduce( - (acc, check) => { - if (!acc[check.category]) { - acc[check.category] = [] - } - acc[check.category].push(check) - return acc - }, - {} as Record, - ) + const groupedChecks = + healthData?.checks && Array.isArray(healthData.checks) + ? healthData.checks.reduce( + (acc, check) => { + if (!acc[check.category]) { + acc[check.category] = [] + } + acc[check.category].push(check) + return acc + }, + {} as Record, + ) + : {} return ( From 4ea5890e92dbeed698fc1df397c655751b6a16f2 Mon Sep 17 00:00:00 2001 From: MacRimi Date: Wed, 5 Nov 2025 18:46:19 +0100 Subject: [PATCH 37/41] Update health-status-modal.tsx --- AppImage/components/health-status-modal.tsx | 210 +++++++++++++------- 1 file changed, 142 insertions(+), 68 deletions(-) diff --git a/AppImage/components/health-status-modal.tsx b/AppImage/components/health-status-modal.tsx index 4a033bd..3496648 100644 --- a/AppImage/components/health-status-modal.tsx +++ b/AppImage/components/health-status-modal.tsx @@ -6,24 +6,19 @@ import { Badge } from "@/components/ui/badge" import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card" import { Loader2, CheckCircle2, AlertTriangle, XCircle, Activity } from "lucide-react" -interface HealthCheck { - category: string - name: string - status: "healthy" | "warning" | "critical" - value: string - message: string - details: any +interface HealthDetail { + status: string + reason?: string + [key: string]: any } interface HealthDetails { - overall: { - status: "healthy" | "warning" | "critical" - critical_count: number - warning_count: number - healthy_count: number - total_checks: number + overall: string + summary: string + details: { + [category: string]: HealthDetail | { [key: string]: HealthDetail } } - checks: HealthCheck[] + timestamp: string } interface HealthStatusModalProps { @@ -53,21 +48,92 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu throw new Error("Failed to fetch health details") } const data = await response.json() + console.log("[v0] Health data received:", data) setHealthData(data) } catch (err) { + console.error("[v0] Error fetching health data:", err) setError(err instanceof Error ? err.message : "Unknown error") } finally { setLoading(false) } } + const getHealthStats = () => { + if (!healthData?.details) { + return { total: 0, healthy: 0, warnings: 0, critical: 0 } + } + + let healthy = 0 + let warnings = 0 + let critical = 0 + let total = 0 + + const countStatus = (detail: any) => { + if (detail && typeof detail === "object" && detail.status) { + total++ + const status = detail.status.toUpperCase() + if (status === "OK") healthy++ + else if (status === "WARNING") warnings++ + else if (status === "CRITICAL") critical++ + } + } + + Object.values(healthData.details).forEach((categoryData) => { + if (categoryData && typeof categoryData === "object") { + if ("status" in categoryData) { + countStatus(categoryData) + } else { + Object.values(categoryData).forEach(countStatus) + } + } + }) + + return { total, healthy, warnings, critical } + } + + const getGroupedChecks = () => { + if (!healthData?.details) return {} + + const grouped: { [key: string]: Array<{ name: string; status: string; reason?: string; details?: any }> } = {} + + Object.entries(healthData.details).forEach(([category, categoryData]) => { + if (!categoryData || typeof categoryData !== "object") return + + const categoryName = category.charAt(0).toUpperCase() + category.slice(1) + grouped[categoryName] = [] + + if ("status" in categoryData) { + grouped[categoryName].push({ + name: categoryName, + status: categoryData.status, + reason: categoryData.reason, + details: categoryData, + }) + } else { + Object.entries(categoryData).forEach(([subKey, subData]: [string, any]) => { + if (subData && typeof subData === "object" && "status" in subData) { + grouped[categoryName].push({ + name: subKey, + status: subData.status, + reason: subData.reason, + details: subData, + }) + } + }) + } + }) + + return grouped + } + const getStatusIcon = (status: string) => { - switch (status) { - case "healthy": + const statusUpper = status?.toUpperCase() + switch (statusUpper) { + case "OK": return - case "warning": + case "WARNING": return - case "critical": + case "CRITICAL": return default: return @@ -75,31 +141,21 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu } const getStatusBadge = (status: string) => { - switch (status) { - case "healthy": + const statusUpper = status?.toUpperCase() + switch (statusUpper) { + case "OK": return Healthy - case "warning": + case "WARNING": return Warning - case "critical": + case "CRITICAL": return Critical default: return Unknown } } - const groupedChecks = - healthData?.checks && Array.isArray(healthData.checks) - ? healthData.checks.reduce( - (acc, check) => { - if (!acc[check.category]) { - acc[check.category] = [] - } - acc[check.category].push(check) - return acc - }, - {} as Record, - ) - : {} + const stats = getHealthStats() + const groupedChecks = getGroupedChecks() return ( @@ -119,7 +175,7 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu )} {error && ( -
+

Error loading health status

{error}

@@ -132,25 +188,26 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu Overall Status - {getStatusBadge(healthData.overall.status)} + {getStatusBadge(healthData.overall)} + {healthData.summary &&

{healthData.summary}

}
-
{healthData.overall.total_checks}
+
{stats.total}
Total Checks
-
{healthData.overall.healthy_count}
+
{stats.healthy}
Healthy
-
{healthData.overall.warning_count}
+
{stats.warnings}
Warnings
-
{healthData.overall.critical_count}
+
{stats.critical}
Critical
@@ -158,35 +215,52 @@ export function HealthStatusModal({ open, onOpenChange, getApiUrl }: HealthStatu {/* Grouped Health Checks */} - {groupedChecks && - Object.entries(groupedChecks).map(([category, checks]) => ( - - - {category} - - -
- {checks.map((check, index) => ( -
-
{getStatusIcon(check.status)}
-
-
-

{check.name}

- - {check.value} - -
-

{check.message}

+ {Object.entries(groupedChecks).map(([category, checks]) => ( + + + {category} + + +
+ {checks.map((check, index) => ( +
+
{getStatusIcon(check.status)}
+
+
+

{check.name}

+ + {check.status} +
+ {check.reason &&

{check.reason}

} + {check.details && ( +
+ {Object.entries(check.details).map(([key, value]) => { + if (key === "status" || key === "reason" || typeof value === "object") return null + return ( +
+ {key}: {String(value)} +
+ ) + })} +
+ )}
- ))} -
- - - ))} +
+ ))} +
+ + + ))} + + {healthData.timestamp && ( +
+ Last updated: {new Date(healthData.timestamp).toLocaleString()} +
+ )}
)} From 5864de7deac840d99f5d52afdb9dd15a6ab3d373 Mon Sep 17 00:00:00 2001 From: ProxMenuxBot Date: Wed, 5 Nov 2025 18:19:51 +0000 Subject: [PATCH 38/41] Update helpers_cache.json --- json/helpers_cache.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/json/helpers_cache.json b/json/helpers_cache.json index cc956c6..374ffe0 100644 --- a/json/helpers_cache.json +++ b/json/helpers_cache.json @@ -3111,8 +3111,7 @@ 21 ], "notes": [ - "Since there are hundreds of Certbot instances, it's necessary to install the specific Certbot of your preference. Running `/app/scripts/install-certbot-plugins` within the nginxproxymanager LXC shell will install many additional plugins.", - "WARNING: Installation sources scripts outside of Community Scripts repo. Please check the source before installing." + "You can install the specific one certbot you prefer, or you can Running /app/scripts/install-certbot-plugins within the Nginx Proxy Manager (NPM) LXC shell will install many common plugins. Important: This script does not install all Certbot plugins, as some require additional, external system dependencies (like specific packages for certain DNS providers). These external dependencies must be manually installed within the LXC container before you can successfully install and use the corresponding Certbot plugin. Consult the plugin's documentation for required packages." ], "type": "ct", "default_credentials": { @@ -3475,7 +3474,8 @@ 20 ], "notes": [ - "Script contains optional installation of Ollama." + "Script contains optional installation of Ollama.", + "Initial run of the application/container can take some time, depending on your host speed, as the application is installed/updated at runtime. Please be patient!" ], "type": "ct" }, From 22709dac361e87b3961799e0daa66198e590ceb7 Mon Sep 17 00:00:00 2001 From: JFC <47096567+MrCaringi@users.noreply.github.com> Date: Wed, 5 Nov 2025 12:35:11 -0600 Subject: [PATCH 39/41] Add issue template configuration for GitHub --- .github/ISSUE_TEMPLATE/config.yml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/config.yml diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..9354ec1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: Soporte General + url: https://github.com/MacRimi/ProxMenux/discussions + about: Si tu solicitud no es un bug ni un feature, usa las discusiones. From 143cb4cbab74661654e40364e662cf72c6e0fc39 Mon Sep 17 00:00:00 2001 From: JFC <47096567+MrCaringi@users.noreply.github.com> Date: Wed, 5 Nov 2025 12:36:50 -0600 Subject: [PATCH 40/41] Modify bug report template and assign to MacRimi Updated bug report template to include mandatory screenshots and assigned to 'MacRimi'. --- .github/ISSUE_TEMPLATE/bug_report.md | 29 ++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..91bed5e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,29 @@ +--- +name: Bug Report +about: Reporta un problema en el proyecto +title: "[BUG] Describe el problema" +labels: bug +assignees: 'MacRimi' +--- + +## Descripción +Describe el error de forma clara y concisa. + +## Pasos para reproducir +1. ... +2. ... +3. ... + +## Comportamiento esperado +¿Qué debería ocurrir? + +## Capturas de pantalla (Obligatorio) +Agrega imágenes para ayudar a entender el problema. + +## Entorno +- Sistema operativo: +- Versión del software: +- Otros detalles relevantes: + +## Información adicional +Agrega cualquier otro contexto sobre el problema aquí. From 61d87b46d99e057a0b7a345be3fffe3c78d6c894 Mon Sep 17 00:00:00 2001 From: JFC <47096567+MrCaringi@users.noreply.github.com> Date: Wed, 5 Nov 2025 12:37:45 -0600 Subject: [PATCH 41/41] Create feature request issue template Adds a feature request template for GitHub issues. --- .github/ISSUE_TEMPLATE/feature_request.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..68dd603 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,19 @@ +--- +name: Feature Request +about: Sugiere una nueva funcionalidad o mejora +title: "[FEATURE] Describe la propuesta" +labels: enhancement +assignees: 'MacRimi' +--- + +## Descripción +Explica la funcionalidad que propones. + +## Motivación +¿Por qué es importante esta mejora? ¿Qué problema resuelve? + +## Alternativas consideradas +¿Hay otras soluciones que hayas pensado? + +## Información adicional +Agrega cualquier detalle extra que ayude a entender la propuesta.