mirror of
https://github.com/MacRimi/ProxMenux.git
synced 2025-11-18 03:26:17 +00:00
Update AppImage
This commit is contained in:
@@ -674,7 +674,7 @@ export default function Hardware() {
|
||||
const utilizationNum =
|
||||
typeof utilization === "string" ? Number.parseFloat(utilization) : utilization
|
||||
|
||||
if (utilizationNum === 0) return null
|
||||
if (utilizationNum === 0 || isNaN(utilizationNum)) return null
|
||||
|
||||
return (
|
||||
<div key={engineName} className="space-y-1">
|
||||
|
||||
@@ -532,9 +532,11 @@ def get_storage_info():
|
||||
else:
|
||||
size_str = f"{disk_size_gb:.1f}G"
|
||||
|
||||
disk_size_kb = disk_size_bytes / 1024
|
||||
|
||||
physical_disks[disk_name] = {
|
||||
'name': disk_name,
|
||||
'size': size_str,
|
||||
'size': disk_size_kb, # Now in KB instead of string with units
|
||||
'size_bytes': disk_size_bytes,
|
||||
'temperature': smart_data.get('temperature', 0),
|
||||
'health': smart_data.get('health', 'unknown'),
|
||||
@@ -1310,10 +1312,6 @@ def get_network_info():
|
||||
network_data['vm_lxc_active_count'] = vm_lxc_active_count
|
||||
network_data['vm_lxc_total_count'] = vm_lxc_total_count
|
||||
|
||||
# Keep old counters for backward compatibility
|
||||
network_data['active_count'] = physical_active_count + bridge_active_count
|
||||
network_data['total_count'] = physical_total_count + bridge_total_count
|
||||
|
||||
print(f"[v0] Physical interfaces: {physical_active_count} active out of {physical_total_count} total")
|
||||
print(f"[v0] Bridge interfaces: {bridge_active_count} active out of {bridge_total_count} total")
|
||||
print(f"[v0] VM/LXC interfaces: {vm_lxc_active_count} active out of {vm_lxc_total_count} total")
|
||||
@@ -2188,20 +2186,14 @@ def get_detailed_gpu_info(gpu):
|
||||
process_info = {
|
||||
'pid': pid,
|
||||
'name': name,
|
||||
'memory_used_mb': memory_mb,
|
||||
'type': proc_type,
|
||||
'engines': {}
|
||||
'memory': memory_mb, # Changed key from memory_used_mb to memory for consistency
|
||||
'engines': {} # Leave engines empty for NVIDIA since we don't have per-process utilization
|
||||
}
|
||||
|
||||
# For NVIDIA, we don't have per-process engine utilization
|
||||
# But we can indicate the type of workload
|
||||
if proc_type == 'G':
|
||||
process_info['engines']['Graphics'] = 'Active'
|
||||
elif proc_type == 'C':
|
||||
process_info['engines']['Compute'] = 'Active'
|
||||
# The process type (C/G) is informational only
|
||||
|
||||
processes.append(process_info)
|
||||
print(f"[v0] Process: {name} (PID: {pid}) - {memory_mb} MB - Type: {proc_type}", flush=True)
|
||||
print(f"[v0] Found process: {name} (PID: {pid}, Memory: {memory_mb} MB)", flush=True)
|
||||
except (ValueError, AttributeError) as e:
|
||||
print(f"[v0] Error parsing process: {e}", flush=True)
|
||||
continue
|
||||
@@ -2657,112 +2649,6 @@ def get_network_hardware_info(pci_slot):
|
||||
|
||||
return net_info
|
||||
|
||||
def get_gpu_info():
|
||||
"""Detect and return information about GPUs in the system"""
|
||||
gpus = []
|
||||
|
||||
try:
|
||||
result = subprocess.run(['lspci'], capture_output=True, text=True, timeout=5)
|
||||
if result.returncode == 0:
|
||||
for line in result.stdout.split('\n'):
|
||||
# Match VGA, 3D, Display controllers
|
||||
if any(keyword in line for keyword in ['VGA compatible controller', '3D controller', 'Display controller']):
|
||||
|
||||
parts = line.split(' ', 1)
|
||||
if len(parts) >= 2:
|
||||
slot = parts[0].strip()
|
||||
remaining = parts[1]
|
||||
|
||||
if ':' in remaining:
|
||||
class_and_name = remaining.split(':', 1)
|
||||
gpu_name = class_and_name[1].strip() if len(class_and_name) > 1 else remaining.strip()
|
||||
else:
|
||||
gpu_name = remaining.strip()
|
||||
|
||||
# Determine vendor
|
||||
vendor = 'Unknown'
|
||||
if 'NVIDIA' in gpu_name or 'nVidia' in gpu_name:
|
||||
vendor = 'NVIDIA'
|
||||
elif 'AMD' in gpu_name or 'ATI' in gpu_name or 'Radeon' in gpu_name:
|
||||
vendor = 'AMD'
|
||||
elif 'Intel' in gpu_name:
|
||||
vendor = 'Intel'
|
||||
|
||||
gpu = {
|
||||
'slot': slot,
|
||||
'name': gpu_name,
|
||||
'vendor': vendor,
|
||||
'type': 'Discrete' if vendor in ['NVIDIA', 'AMD'] else 'Integrated'
|
||||
}
|
||||
|
||||
pci_info = get_pci_device_info(slot)
|
||||
if pci_info:
|
||||
gpu['pci_class'] = pci_info.get('class', '')
|
||||
gpu['pci_driver'] = pci_info.get('driver', '')
|
||||
gpu['pci_kernel_module'] = pci_info.get('kernel_module', '')
|
||||
|
||||
# detailed_info = get_detailed_gpu_info(gpu) # Removed this call here
|
||||
# gpu.update(detailed_info) # It will be called later in api_gpu_realtime
|
||||
|
||||
gpus.append(gpu)
|
||||
print(f"[v0] Found GPU: {gpu_name} ({vendor}) at slot {slot}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"[v0] Error detecting GPUs from lspci: {e}")
|
||||
|
||||
try:
|
||||
result = subprocess.run(['sensors'], capture_output=True, text=True, timeout=5)
|
||||
if result.returncode == 0:
|
||||
current_adapter = None
|
||||
|
||||
for line in result.stdout.split('\n'):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
# Detect adapter line
|
||||
if line.startswith('Adapter:'):
|
||||
current_adapter = line.replace('Adapter:', '').strip()
|
||||
continue
|
||||
|
||||
# Look for GPU-related sensors (nouveau, amdgpu, radeon, i915)
|
||||
if ':' in line and not line.startswith(' '):
|
||||
parts = line.split(':', 1)
|
||||
sensor_name = parts[0].strip()
|
||||
value_part = parts[1].strip()
|
||||
|
||||
# Check if this is a GPU sensor
|
||||
gpu_sensor_keywords = ['nouveau', 'amdgpu', 'radeon', 'i915']
|
||||
is_gpu_sensor = any(keyword in current_adapter.lower() if current_adapter else False for keyword in gpu_sensor_keywords)
|
||||
|
||||
if is_gpu_sensor:
|
||||
# Try to match this sensor to a GPU
|
||||
for gpu in gpus:
|
||||
# Match nouveau to NVIDIA, amdgpu/radeon to AMD, i915 to Intel
|
||||
if (('nouveau' in current_adapter.lower() and gpu['vendor'] == 'NVIDIA') or
|
||||
(('amdgpu' in current_adapter.lower() or 'radeon' in current_adapter.lower()) and gpu['vendor'] == 'AMD') or
|
||||
('i915' in current_adapter.lower() and gpu['vendor'] == 'Intel')):
|
||||
|
||||
# Parse temperature (only if not already set by nvidia-smi)
|
||||
if 'temperature' not in gpu or gpu['temperature'] is None:
|
||||
if '°C' in value_part or 'C' in value_part:
|
||||
temp_match = re.search(r'([+-]?[\d.]+)\s*°?C', value_part)
|
||||
if temp_match:
|
||||
gpu['temperature'] = float(temp_match.group(1))
|
||||
print(f"[v0] GPU {gpu['name']}: Temperature = {gpu['temperature']}°C")
|
||||
|
||||
# Parse fan speed
|
||||
elif 'RPM' in value_part:
|
||||
rpm_match = re.search(r'([\d.]+)\s*RPM', value_part)
|
||||
if rpm_match:
|
||||
gpu['fan_speed'] = int(float(rpm_match.group(1)))
|
||||
gpu['fan_unit'] = 'RPM'
|
||||
print(f"[v0] GPU {gpu['name']}: Fan = {gpu['fan_speed']} RPM")
|
||||
except Exception as e:
|
||||
print(f"[v0] Error enriching GPU data from sensors: {e}")
|
||||
|
||||
return gpus
|
||||
|
||||
def get_disk_hardware_info(disk_name):
|
||||
"""Get detailed hardware information for a disk"""
|
||||
disk_info = {}
|
||||
@@ -3644,7 +3530,7 @@ def api_vm_control(vmid):
|
||||
'error': control_result.stderr
|
||||
}), 500
|
||||
else:
|
||||
return jsonify({'error': 'Failed to control VM'}), 500
|
||||
return jsonify({'error': 'Failed to get VM details'}), 500
|
||||
except Exception as e:
|
||||
print(f"Error controlling VM: {e}")
|
||||
return jsonify({'error': str(e)}), 500
|
||||
|
||||
Reference in New Issue
Block a user