diff --git a/AppImage/components/system-logs.tsx b/AppImage/components/system-logs.tsx index 5ef052b..b0dc7cb 100644 --- a/AppImage/components/system-logs.tsx +++ b/AppImage/components/system-logs.tsx @@ -6,88 +6,165 @@ import { Button } from "./ui/button" import { Input } from "./ui/input" import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "./ui/select" import { ScrollArea } from "./ui/scroll-area" -import { FileText, Search, Download, AlertTriangle, Info, CheckCircle, XCircle } from "lucide-react" -import { useState } from "react" +import { Tabs, TabsContent, TabsList, TabsTrigger } from "./ui/tabs" +import { + FileText, + Search, + Download, + AlertTriangle, + Info, + CheckCircle, + XCircle, + Database, + Activity, + HardDrive, + Calendar, + RefreshCw, +} from "lucide-react" +import { useState, useEffect } from "react" -const systemLogs = [ - { - timestamp: "2024-01-15 14:32:15", - level: "info", - service: "pveproxy", - message: "User root@pam authenticated successfully", - source: "auth.log", - }, - { - timestamp: "2024-01-15 14:31:45", - level: "warning", - service: "pvedaemon", - message: "VM 101 high memory usage detected (85%)", - source: "syslog", - }, - { - timestamp: "2024-01-15 14:30:22", - level: "error", - service: "pve-cluster", - message: "Failed to connect to cluster node pve-02", - source: "cluster.log", - }, - { - timestamp: "2024-01-15 14:29:18", - level: "info", - service: "pvestatd", - message: "Storage local: 1.25TB used, 750GB available", - source: "syslog", - }, - { - timestamp: "2024-01-15 14:28:33", - level: "info", - service: "pve-firewall", - message: "Blocked connection attempt from 192.168.1.50", - source: "firewall.log", - }, - { - timestamp: "2024-01-15 14:27:45", - level: "warning", - service: "smartd", - message: "SMART warning: /dev/nvme0n1 temperature high (55°C)", - source: "smart.log", - }, - { - timestamp: "2024-01-15 14:26:12", - level: "info", - service: "pveproxy", - message: "Started backup job for VM 100", - source: "backup.log", - }, - { - timestamp: "2024-01-15 14:25:38", - level: "error", - service: "qemu-server", - message: "VM 102 failed to start: insufficient memory", - source: "qemu.log", - }, - { - timestamp: "2024-01-15 14:24:55", - level: "info", - service: "pvedaemon", - message: "VM 103 migrated successfully to node pve-01", - source: "migration.log", - }, - { - timestamp: "2024-01-15 14:23:17", - level: "warning", - service: "pve-ha-lrm", - message: "Resource VM:104 state changed to error", - source: "ha.log", - }, -] +interface Log { + timestamp: string + level: string + service: string + message: string + source: string + pid?: string + hostname?: string +} + +interface Backup { + volid: string + storage: string + vmid: string | null + type: string | null + size: number + size_human: string + created: string + timestamp: number +} + +interface Event { + upid: string + type: string + status: string + level: string + node: string + user: string + vmid: string + starttime: string + endtime: string + duration: string +} + +interface SystemLog { + timestamp: string + level: string + service: string + message: string + source: string + pid?: string + hostname?: string +} export function SystemLogs() { + const [logs, setLogs] = useState([]) + const [backups, setBackups] = useState([]) + const [events, setEvents] = useState([]) + const [loading, setLoading] = useState(true) + const [error, setError] = useState(null) + const [searchTerm, setSearchTerm] = useState("") const [levelFilter, setLevelFilter] = useState("all") const [serviceFilter, setServiceFilter] = useState("all") + const [activeTab, setActiveTab] = useState("logs") - const filteredLogs = systemLogs.filter((log) => { + // Fetch data + useEffect(() => { + fetchAllData() + // Refresh every 30 seconds + const interval = setInterval(fetchAllData, 30000) + return () => clearInterval(interval) + }, []) + + const fetchAllData = async () => { + try { + setLoading(true) + setError(null) + + // Fetch logs, backups, and events in parallel + const [logsRes, backupsRes, eventsRes] = await Promise.all([ + fetchSystemLogs(), + fetch("http://localhost:8008/api/backups"), + fetch("http://localhost:8008/api/events?limit=50"), + ]) + + setLogs(logsRes) + + if (backupsRes.ok) { + const backupsData = await backupsRes.json() + setBackups(backupsData.backups || []) + } + + if (eventsRes.ok) { + const eventsData = await eventsRes.json() + setEvents(eventsData.events || []) + } + } catch (err) { + console.error("[v0] Error fetching system logs data:", err) + setError("Failed to connect to server") + } finally { + setLoading(false) + } + } + + const fetchSystemLogs = async (): Promise => { + try { + const baseUrl = + typeof window !== "undefined" ? `${window.location.protocol}//${window.location.hostname}:8008` : "" + const apiUrl = `${baseUrl}/api/logs` + + const response = await fetch(apiUrl, { + method: "GET", + headers: { + "Content-Type": "application/json", + }, + cache: "no-store", + }) + + if (!response.ok) { + throw new Error(`Flask server responded with status: ${response.status}`) + } + + const data = await response.json() + return Array.isArray(data) ? data : data.logs || [] + } catch (error) { + console.error("[v0] Failed to fetch system logs:", error) + return [] + } + } + + const handleDownloadLogs = async (type = "system") => { + try { + const response = await fetch(`http://localhost:8008/api/logs/download?type=${type}&lines=1000`) + if (response.ok) { + const blob = await response.blob() + const url = window.URL.createObjectURL(blob) + const a = document.createElement("a") + a.href = url + a.download = `proxmox_${type}.log` + document.body.appendChild(a) + a.click() + window.URL.revokeObjectURL(url) + document.body.removeChild(a) + } + } catch (err) { + console.error("[v0] Error downloading logs:", err) + } + } + + // Filter logs + const filteredLogs = logs.filter((log) => { const matchesSearch = log.message.toLowerCase().includes(searchTerm.toLowerCase()) || log.service.toLowerCase().includes(searchTerm.toLowerCase()) @@ -100,10 +177,14 @@ export function SystemLogs() { const getLevelColor = (level: string) => { switch (level) { case "error": + case "critical": + case "emergency": + case "alert": return "bg-red-500/10 text-red-500 border-red-500/20" case "warning": return "bg-yellow-500/10 text-yellow-500 border-yellow-500/20" case "info": + case "notice": return "bg-blue-500/10 text-blue-500 border-blue-500/20" default: return "bg-gray-500/10 text-gray-500 border-gray-500/20" @@ -113,10 +194,14 @@ export function SystemLogs() { const getLevelIcon = (level: string) => { switch (level) { case "error": + case "critical": + case "emergency": + case "alert": return case "warning": return case "info": + case "notice": return default: return @@ -124,17 +209,41 @@ export function SystemLogs() { } const logCounts = { - total: systemLogs.length, - error: systemLogs.filter((log) => log.level === "error").length, - warning: systemLogs.filter((log) => log.level === "warning").length, - info: systemLogs.filter((log) => log.level === "info").length, + total: logs.length, + error: logs.filter((log) => ["error", "critical", "emergency", "alert"].includes(log.level)).length, + warning: logs.filter((log) => log.level === "warning").length, + info: logs.filter((log) => ["info", "notice", "debug"].includes(log.level)).length, } - const uniqueServices = [...new Set(systemLogs.map((log) => log.service))] + const uniqueServices = [...new Set(logs.map((log) => log.service))] + + // Calculate backup statistics + const backupStats = { + total: backups.length, + totalSize: backups.reduce((sum, b) => sum + b.size, 0), + qemu: backups.filter((b) => b.type === "qemu").length, + lxc: backups.filter((b) => b.type === "lxc").length, + } + + const formatBytes = (bytes: number) => { + if (bytes === 0) return "0 B" + const k = 1024 + const sizes = ["B", "KB", "MB", "GB", "TB"] + const i = Math.floor(Math.log(bytes) / Math.log(k)) + return `${(bytes / Math.pow(k, i)).toFixed(2)} ${sizes[i]}` + } + + if (loading && logs.length === 0) { + return ( +
+ +
+ ) + } return (
- {/* Log Statistics */} + {/* Statistics Cards */}
@@ -143,7 +252,7 @@ export function SystemLogs() {
{logCounts.total}
-

Last 24 hours

+

Last 200 entries

@@ -171,103 +280,235 @@ export function SystemLogs() { - Info - + Backups + -
{logCounts.info}
-

Normal operations

+
{backupStats.total}
+

{formatBytes(backupStats.totalSize)}

- {/* Log Filters and Search */} + {/* Main Content with Tabs */} - - - System Logs - - - -
-
-
- - setSearchTerm(e.target.value)} - className="pl-10 bg-background border-border" - /> -
-
- - - - - -
+ + + + + System Logs + Recent Events + Backups + - -
- {filteredLogs.map((log, index) => ( -
+
+
+
+ + setSearchTerm(e.target.value)} + className="pl-10 bg-background border-border" + /> +
+
+ + + + + + +
-
-
-
{log.service}
-
{log.timestamp}
+ +
+ {filteredLogs.map((log, index) => ( +
+
+ + {getLevelIcon(log.level)} + {log.level.toUpperCase()} + +
+ +
+
+
{log.service}
+
{log.timestamp}
+
+
{log.message}
+
+ Source: {log.source} + {log.pid && ` • PID: ${log.pid}`} + {log.hostname && ` • Host: ${log.hostname}`} +
+
-
{log.message}
-
Source: {log.source}
-
-
- ))} + ))} - {filteredLogs.length === 0 && ( -
- -

No logs found matching your criteria

+ {filteredLogs.length === 0 && ( +
+ +

No logs found matching your criteria

+
+ )}
- )} -
- + + + + {/* Recent Events Tab */} + + +
+ {events.map((event, index) => ( +
+
+ + {getLevelIcon(event.level)} + {event.status} + +
+ +
+
+
+ {event.type} + {event.vmid && ` (VM/CT ${event.vmid})`} +
+
{event.duration}
+
+
+ Node: {event.node} • User: {event.user} +
+
+ Started: {event.starttime} • Ended: {event.endtime} +
+
+
+ ))} + + {events.length === 0 && ( +
+ +

No recent events found

+
+ )} +
+
+
+ + {/* Backups Tab */} + +
+ + +
{backupStats.qemu}
+

QEMU Backups

+
+
+ + +
{backupStats.lxc}
+

LXC Backups

+
+
+ + +
{formatBytes(backupStats.totalSize)}
+

Total Size

+
+
+
+ + +
+ {backups.map((backup, index) => ( +
+
+ +
+ +
+
+
+ {backup.type?.toUpperCase()} {backup.vmid && `VM ${backup.vmid}`} +
+ + {backup.size_human} + +
+
Storage: {backup.storage}
+
+ + {backup.created} +
+
{backup.volid}
+
+
+ ))} + + {backups.length === 0 && ( +
+ +

No backups found

+
+ )} +
+
+
+
diff --git a/AppImage/scripts/flask_server.py b/AppImage/scripts/flask_server.py index 5c211d7..844bbb1 100644 --- a/AppImage/scripts/flask_server.py +++ b/AppImage/scripts/flask_server.py @@ -19,10 +19,24 @@ import re # Added for regex matching import select # Added for non-blocking read import shutil # Added for shutil.which import xml.etree.ElementTree as ET # Added for XML parsing +import math # Imported math for format_bytes function app = Flask(__name__) CORS(app) # Enable CORS for Next.js frontend +# Helper function to format bytes into human-readable string +def format_bytes(size_in_bytes): + """Converts bytes to a human-readable string (KB, MB, GB, TB).""" + if size_in_bytes is None: + return "N/A" + if size_in_bytes == 0: + return "0 B" + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size_in_bytes, 1024))) + p = math.pow(1024, i) + s = round(size_in_bytes / p, 2) + return f"{s} {size_name[i]}" + # AGREGANDO FUNCIÓN PARA PARSEAR PROCESOS DE INTEL_GPU_TOP (SIN -J) def get_intel_gpu_processes_from_text(): """Parse processes from intel_gpu_top text output (more reliable than JSON)""" @@ -989,11 +1003,6 @@ def get_proxmox_storage(): storage_list = [] lines = result.stdout.strip().split('\n') - # Skip header line - if len(lines) < 2: - print("[v0] No storage found in pvesm output") - return {'storage': []} - # Parse each storage line for line in lines[1:]: # Skip header parts = line.split() @@ -3300,9 +3309,21 @@ def api_vms(): def api_logs(): """Get system logs""" try: - # Get recent system logs - result = subprocess.run(['journalctl', '-n', '100', '--output', 'json'], - capture_output=True, text=True, timeout=10) + limit = request.args.get('limit', '200') + priority = request.args.get('priority', None) # 0-7 (0=emerg, 3=err, 4=warning, 6=info) + service = request.args.get('service', None) + + cmd = ['journalctl', '-n', limit, '--output', 'json', '--no-pager'] + + # Add priority filter if specified + if priority: + cmd.extend(['-p', priority]) + + # Add service filter if specified + if service: + cmd.extend(['-u', service]) + + result = subprocess.run(cmd, capture_output=True, text=True, timeout=10) if result.returncode == 0: logs = [] @@ -3310,26 +3331,193 @@ def api_logs(): if line: try: log_entry = json.loads(line) + # Convert timestamp from microseconds to readable format + timestamp_us = int(log_entry.get('__REALTIME_TIMESTAMP', '0')) + timestamp = datetime.fromtimestamp(timestamp_us / 1000000).strftime('%Y-%m-%d %H:%M:%S') + + # Map priority to level name + priority_map = { + '0': 'emergency', '1': 'alert', '2': 'critical', '3': 'error', + '4': 'warning', '5': 'notice', '6': 'info', '7': 'debug' + } + priority_num = str(log_entry.get('PRIORITY', '6')) + level = priority_map.get(priority_num, 'info') + logs.append({ - 'timestamp': log_entry.get('__REALTIME_TIMESTAMP', ''), - 'level': log_entry.get('PRIORITY', '6'), - 'service': log_entry.get('_SYSTEMD_UNIT', 'system'), + 'timestamp': timestamp, + 'level': level, + 'service': log_entry.get('_SYSTEMD_UNIT', log_entry.get('SYSLOG_IDENTIFIER', 'system')), 'message': log_entry.get('MESSAGE', ''), - 'source': 'journalctl' + 'source': 'journalctl', + 'pid': log_entry.get('_PID', ''), + 'hostname': log_entry.get('_HOSTNAME', '') }) - except json.JSONDecodeError: + except (json.JSONDecodeError, ValueError) as e: continue - return jsonify(logs) + return jsonify({'logs': logs, 'total': len(logs)}) else: return jsonify({ 'error': 'journalctl not available or failed', - 'logs': [] + 'logs': [], + 'total': 0 }) except Exception as e: print(f"Error getting logs: {e}") return jsonify({ 'error': f'Unable to access system logs: {str(e)}', - 'logs': [] + 'logs': [], + 'total': 0 + }) + +@app.route('/api/logs/download', methods=['GET']) +def api_logs_download(): + """Download system logs as a text file""" + try: + log_type = request.args.get('type', 'system') # system, kernel, auth + lines = request.args.get('lines', '1000') + + if log_type == 'kernel': + cmd = ['journalctl', '-k', '-n', lines, '--no-pager'] + filename = 'kernel.log' + elif log_type == 'auth': + cmd = ['journalctl', '-u', 'ssh', '-u', 'sshd', '-n', lines, '--no-pager'] + filename = 'auth.log' + else: + cmd = ['journalctl', '-n', lines, '--no-pager'] + filename = 'system.log' + + result = subprocess.run(cmd, capture_output=True, text=True, timeout=30) + + if result.returncode == 0: + # Create a temporary file + import tempfile + with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.log') as f: + f.write(result.stdout) + temp_path = f.name + + return send_file( + temp_path, + mimetype='text/plain', + as_attachment=True, + download_name=f'proxmox_{filename}' + ) + else: + return jsonify({'error': 'Failed to generate log file'}), 500 + + except Exception as e: + print(f"Error downloading logs: {e}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/notifications', methods=['GET']) +def api_notifications(): + """Get Proxmox notification history""" + try: + notifications = [] + + # 1. Get notifications from journalctl (Proxmox notification service) + try: + cmd = [ + 'journalctl', + '-u', 'pve-ha-lrm', + '-u', 'pve-ha-crm', + '-u', 'pvedaemon', + '-u', 'pveproxy', + '-u', 'pvestatd', + '--grep', 'notification|email|webhook|alert|notify', + '-n', '100', + '--output', 'json', + '--no-pager' + ] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=10) + + if result.returncode == 0: + for line in result.stdout.strip().split('\n'): + if line: + try: + log_entry = json.loads(line) + timestamp_us = int(log_entry.get('__REALTIME_TIMESTAMP', '0')) + timestamp = datetime.fromtimestamp(timestamp_us / 1000000).strftime('%Y-%m-%d %H:%M:%S') + + message = log_entry.get('MESSAGE', '') + + # Determine notification type from message + notif_type = 'info' + if 'email' in message.lower(): + notif_type = 'email' + elif 'webhook' in message.lower(): + notif_type = 'webhook' + elif 'alert' in message.lower() or 'warning' in message.lower(): + notif_type = 'alert' + elif 'error' in message.lower() or 'fail' in message.lower(): + notif_type = 'error' + + notifications.append({ + 'timestamp': timestamp, + 'type': notif_type, + 'service': log_entry.get('_SYSTEMD_UNIT', 'proxmox'), + 'message': message, + 'source': 'journal' + }) + except (json.JSONDecodeError, ValueError): + continue + except Exception as e: + print(f"Error reading notification logs: {e}") + + # 2. Try to read Proxmox notification configuration + try: + notif_config_path = '/etc/pve/notifications.cfg' + if os.path.exists(notif_config_path): + with open(notif_config_path, 'r') as f: + config_content = f.read() + # Parse notification targets (emails, webhooks, etc.) + for line in config_content.split('\n'): + if line.strip() and not line.startswith('#'): + notifications.append({ + 'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), + 'type': 'config', + 'service': 'notification-config', + 'message': f'Notification target configured: {line.strip()}', + 'source': 'config' + }) + except Exception as e: + print(f"Error reading notification config: {e}") + + # 3. Get backup notifications from task log + try: + cmd = ['pvesh', 'get', '/cluster/tasks', '--output-format', 'json'] + result = subprocess.run(cmd, capture_output=True, text=True, timeout=10) + + if result.returncode == 0: + tasks = json.loads(result.stdout) + for task in tasks: + if task.get('type') in ['vzdump', 'backup']: + status = task.get('status', 'unknown') + notif_type = 'success' if status == 'OK' else 'error' if status == 'stopped' else 'info' + + notifications.append({ + 'timestamp': datetime.fromtimestamp(task.get('starttime', 0)).strftime('%Y-%m-%d %H:%M:%S'), + 'type': notif_type, + 'service': 'backup', + 'message': f"Backup task {task.get('upid', 'unknown')}: {status}", + 'source': 'task-log' + }) + except Exception as e: + print(f"Error reading task notifications: {e}") + + # Sort by timestamp (newest first) + notifications.sort(key=lambda x: x['timestamp'], reverse=True) + + return jsonify({ + 'notifications': notifications[:100], # Limit to 100 most recent + 'total': len(notifications) + }) + + except Exception as e: + print(f"Error getting notifications: {e}") + return jsonify({ + 'error': str(e), + 'notifications': [], + 'total': 0 }) @app.route('/api/health', methods=['GET']) @@ -3408,7 +3596,10 @@ def api_info(): '/api/logs', '/api/health', '/api/hardware', - '/api/gpu//realtime' # Added endpoint for GPU monitoring + '/api/gpu//realtime', # Added endpoint for GPU monitoring + '/api/backups', # Added backup endpoint + '/api/events', # Added events endpoint + '/api/notifications' # Added notifications endpoint ] })