From 2344935357d62e9f2324c1fbc468fd7c675e428f Mon Sep 17 00:00:00 2001 From: MacRimi Date: Mon, 13 Apr 2026 14:49:48 +0200 Subject: [PATCH] update storage-overview.tsx --- AppImage/components/storage-overview.tsx | 787 +++++- AppImage/scripts/flask_server.py | 364 ++- .../backup_restore/apply_pending_restore.sh | 166 ++ scripts/backup_restore/backup_host.sh | 2428 +++++++++++------ scripts/backup_restore/backup_scheduler.sh | 387 +++ .../backup_restore/lib_host_backup_common.sh | 770 ++++++ .../backup_restore/run_scheduled_backup.sh | 243 ++ scripts/backup_restore/test_backup_restore.sh | 284 ++ scripts/storage/smart-disk-test.sh | 58 +- scripts/storage/smart-scheduled-test.sh | 195 ++ 10 files changed, 4798 insertions(+), 884 deletions(-) create mode 100644 scripts/backup_restore/apply_pending_restore.sh create mode 100644 scripts/backup_restore/backup_scheduler.sh create mode 100644 scripts/backup_restore/lib_host_backup_common.sh create mode 100644 scripts/backup_restore/run_scheduled_backup.sh create mode 100644 scripts/backup_restore/test_backup_restore.sh create mode 100644 scripts/storage/smart-scheduled-test.sh diff --git a/AppImage/components/storage-overview.tsx b/AppImage/components/storage-overview.tsx index 6153d5dd..15ecd0b0 100644 --- a/AppImage/components/storage-overview.tsx +++ b/AppImage/components/storage-overview.tsx @@ -2,7 +2,7 @@ import { useEffect, useState } from "react" import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card" -import { HardDrive, Database, AlertTriangle, CheckCircle2, XCircle, Square, Thermometer, Archive, Info, Clock, Usb, Server, Activity, FileText, Play, Loader2, Download } from "lucide-react" +import { HardDrive, Database, AlertTriangle, CheckCircle2, XCircle, Square, Thermometer, Archive, Info, Clock, Usb, Server, Activity, FileText, Play, Loader2, Download, Plus, Trash2, Settings } from "lucide-react" import { Badge } from "@/components/ui/badge" import { Progress } from "@/components/ui/progress" import { Dialog, DialogContent, DialogDescription, DialogHeader, DialogTitle } from "@/components/ui/dialog" @@ -121,7 +121,15 @@ export function StorageOverview() { const [detailsOpen, setDetailsOpen] = useState(false) const [diskObservations, setDiskObservations] = useState([]) const [loadingObservations, setLoadingObservations] = useState(false) - const [activeModalTab, setActiveModalTab] = useState<"overview" | "smart">("overview") + const [activeModalTab, setActiveModalTab] = useState<"overview" | "smart" | "schedule">("overview") + const [smartJsonData, setSmartJsonData] = useState<{ + has_data: boolean + data?: Record + timestamp?: string + test_type?: string + history?: Array<{ filename: string; timestamp: string; test_type: string; date_readable: string }> + } | null>(null) + const [loadingSmartJson, setLoadingSmartJson] = useState(false) const fetchStorageData = async () => { try { @@ -269,21 +277,46 @@ export function StorageOverview() { setSelectedDisk(disk) setDetailsOpen(true) setDiskObservations([]) + setSmartJsonData(null) - // Always attempt to fetch observations -- the count enrichment may lag - // behind the actual observation recording (especially for USB disks). + // Fetch observations and SMART JSON data in parallel setLoadingObservations(true) - try { - const params = new URLSearchParams() - if (disk.name) params.set('device', disk.name) - if (disk.serial && disk.serial !== 'Unknown') params.set('serial', disk.serial) - const data = await fetchApi<{ observations: DiskObservation[] }>(`/api/storage/observations?${params.toString()}`) - setDiskObservations(data.observations || []) - } catch { - setDiskObservations([]) - } finally { - setLoadingObservations(false) + setLoadingSmartJson(true) + + // Fetch observations + const fetchObservations = async () => { + try { + const params = new URLSearchParams() + if (disk.name) params.set('device', disk.name) + if (disk.serial && disk.serial !== 'Unknown') params.set('serial', disk.serial) + const data = await fetchApi<{ observations: DiskObservation[] }>(`/api/storage/observations?${params.toString()}`) + setDiskObservations(data.observations || []) + } catch { + setDiskObservations([]) + } finally { + setLoadingObservations(false) + } } + + // Fetch SMART JSON data from real test if available + const fetchSmartJson = async () => { + try { + const data = await fetchApi<{ + has_data: boolean + data?: Record + timestamp?: string + test_type?: string + }>(`/api/storage/smart/${disk.name}/latest`) + setSmartJsonData(data) + } catch { + setSmartJsonData({ has_data: false }) + } finally { + setLoadingSmartJson(false) + } + } + + // Run both in parallel + await Promise.all([fetchObservations(), fetchSmartJson()]) } const formatObsDate = (iso: string) => { @@ -1205,7 +1238,10 @@ export function StorageOverview() { {/* Disk Details Dialog */} { setDetailsOpen(open) - if (!open) setActiveModalTab("overview") + if (!open) { + setActiveModalTab("overview") + setSmartJsonData(null) + } }}> @@ -1255,6 +1291,17 @@ export function StorageOverview() { SMART Test + {/* Tab Content */} @@ -1389,6 +1436,153 @@ export function StorageOverview() { + {/* SMART Test Data Section (from real test JSON) */} + {(loadingSmartJson || smartJsonData?.has_data) && ( +
+

+ + SMART Test Data + {smartJsonData?.has_data && ( + + Real Test + + )} +

+ {loadingSmartJson ? ( +
+
+ Loading SMART test data... +
+ ) : smartJsonData?.has_data && smartJsonData.data ? ( +
+ {/* Last Test Info */} +
+
+

Last Test Date

+

+ {smartJsonData.timestamp + ? new Date(smartJsonData.timestamp).toLocaleString() + : 'Unknown'} +

+
+
+

Test Type

+

{smartJsonData.test_type || 'Unknown'}

+
+
+ + {/* SSD Life Estimation from JSON (if available) */} + {(() => { + const data = smartJsonData.data as Record + const ataAttrs = data?.ata_smart_attributes as { table?: Array<{ id: number; name: string; value: number; raw?: { value: number } }> } + const table = ataAttrs?.table || [] + + // Look for wear-related attributes + const wearAttr = table.find(a => + a.name?.toLowerCase().includes('wear_leveling') || + a.name?.toLowerCase().includes('media_wearout') || + a.name?.toLowerCase().includes('percent_lifetime') || + a.id === 177 || a.id === 231 || a.id === 233 + ) + + // Look for total LBAs written + const lbasAttr = table.find(a => + a.name?.toLowerCase().includes('total_lbas_written') || + a.id === 241 + ) + + if (wearAttr || lbasAttr) { + return ( +
+

From Real SMART Test

+
+ {wearAttr && ( +
+

{wearAttr.name?.replace(/_/g, ' ')}

+

+ {wearAttr.value}% +

+
+ )} + {lbasAttr && lbasAttr.raw?.value && ( +
+

Total Data Written

+

+ {(() => { + const tbWritten = (lbasAttr.raw.value * 512) / (1024 ** 4) + return tbWritten >= 1 + ? `${tbWritten.toFixed(2)} TB` + : `${(tbWritten * 1024).toFixed(2)} GB` + })()} +

+
+ )} +
+
+ ) + } + + // For NVMe, check nvme_smart_health_information_log + const nvmeHealth = data?.nvme_smart_health_information_log as Record + if (nvmeHealth) { + const percentUsed = nvmeHealth.percentage_used as number + const dataUnitsWritten = nvmeHealth.data_units_written as number + const availableSpare = nvmeHealth.available_spare as number + + return ( +
+

From Real SMART Test (NVMe)

+
+ {percentUsed !== undefined && ( +
+

Percent Used

+

80 ? 'text-red-400' : percentUsed > 50 ? 'text-yellow-400' : 'text-green-400'}`}> + {percentUsed}% +

+
+ )} + {availableSpare !== undefined && ( +
+

Available Spare

+

+ {availableSpare}% +

+
+ )} + {dataUnitsWritten !== undefined && ( +
+

Total Data Written

+

+ {(() => { + const tbWritten = (dataUnitsWritten * 512000) / (1024 ** 4) + return tbWritten >= 1 + ? `${tbWritten.toFixed(2)} TB` + : `${(tbWritten * 1024).toFixed(2)} GB` + })()} +

+
+ )} +
+
+ ) + } + + return null + })()} + +

+ Run a SMART test in the SMART Test tab for more detailed analysis. +

+
+ ) : ( +
+

No SMART test data available for this disk.

+

Run a SMART test in the SMART Test tab to get detailed health information.

+
+ )} +
+ )} + {/* Observations Section */} {(diskObservations.length > 0 || loadingObservations) && (
@@ -1463,6 +1657,11 @@ export function StorageOverview() { {selectedDisk && activeModalTab === "smart" && ( )} + + {/* Schedule Tab */} + {selectedDisk && activeModalTab === "schedule" && ( + + )}
@@ -1670,40 +1869,40 @@ function openSmartReport(disk: DiskInfo, testStatus: SmartTestStatus, smartAttri
${errorTypeLabel} ${severityLabel} - ID: #${obs.id} - Occurrences: ${obs.occurrence_count} + ID: #${obs.id} + Occurrences: ${obs.occurrence_count} ${dismissedBadge}
-
Error Signature:
+
Error Signature:
${obs.error_signature}
-
Raw Message:
+
Raw Message:
${obs.raw_message || 'N/A'}
- Device: + Device: ${obs.device_name || disk.name}
- Serial: + Serial: ${obs.serial || disk.serial || 'N/A'}
- Model: + Model: ${obs.model || disk.model || 'N/A'}
- First Seen: + First Seen: ${firstDate}
- Last Seen: + Last Seen: ${lastDate}
@@ -1715,7 +1914,7 @@ function openSmartReport(disk: DiskInfo, testStatus: SmartTestStatus, smartAttri
${typeLabel} - ${obsList.length} unique, ${groupOccurrences} total + ${obsList.length} unique, ${groupOccurrences} total
${obsItemsHtml} @@ -1729,7 +1928,7 @@ function openSmartReport(disk: DiskInfo, testStatus: SmartTestStatus, smartAttri
${obsSecNum}. Observations & Events (${observations.length} recorded, ${totalOccurrences} total occurrences)
-

The following events have been detected and logged for this disk. These observations may indicate potential issues that require attention.

+

The following events have been detected and logged for this disk. These observations may indicate potential issues that require attention.

${groupsHtml}
` @@ -1898,7 +2097,7 @@ function openSmartReport(disk: DiskInfo, testStatus: SmartTestStatus, smartAttri
${isHealthy ? '✓' : '✗'}
${healthLabel}
-
SMART Status
+
SMART Status

Disk Health Assessment

@@ -1910,6 +2109,51 @@ function openSmartReport(disk: DiskInfo, testStatus: SmartTestStatus, smartAttri

+ + +
+
+ ${isHealthy ? 'What does this mean? Your disk is healthy!' : (hasCritical ? 'ATTENTION REQUIRED: Problems detected' : 'Some issues need monitoring')} +
+

+ ${isHealthy + ? 'In simple terms: This disk is working properly. You can continue using it normally. We recommend running periodic SMART tests (monthly) to catch any issues early.' + : (hasCritical + ? 'In simple terms: This disk has problems that could cause data loss. You should back up your important files immediately and consider replacing the disk soon.' + : 'In simple terms: The disk is working but shows some signs of wear. It is not critical yet, but you should monitor it closely and ensure your backups are up to date.' + ) + } +

+ ${!isHealthy && criticalAttrs.length > 0 ? ` +
+
Issues found:
+
    + ${criticalAttrs.slice(0, 3).map(a => `
  • ${a.name.replace(/_/g, ' ')}: ${a.status === 'critical' ? 'Critical - requires immediate attention' : 'Warning - should be monitored'}
  • `).join('')} + ${criticalAttrs.length > 3 ? `
  • ...and ${criticalAttrs.length - 3} more issues (see details below)
  • ` : ''} +
+
+ ` : ''} +
+ + +
+
+
Report Generated
+
${now}
+
+
+
Last Test Type
+
${testStatus.last_test?.type || 'N/A'}
+
+
+
Test Result
+
${testStatus.last_test?.status || 'N/A'}
+
+
+
Attributes Checked
+
${smartAttributes.length}
+
+
@@ -1937,12 +2181,12 @@ function openSmartReport(disk: DiskInfo, testStatus: SmartTestStatus, smartAttri
${disk.temperature > 0 ? disk.temperature + '°C' : 'N/A'}
Temperature
-
Optimal: ${tempThresholds.optimal}
+
Optimal: ${tempThresholds.optimal}
${powerOnHours.toLocaleString()}h
Power On Time
-
${powerOnYears}y ${powerOnDays}d
+
${powerOnYears}y ${powerOnDays}d
${(disk.power_cycles ?? 0).toLocaleString()}
@@ -1980,7 +2224,7 @@ ${isNvmeDisk ? `
-
LIFE REMAINING
+
LIFE REMAINING
@@ -1996,11 +2240,11 @@ ${isNvmeDisk ? `
-
USAGE STATISTICS
+
USAGE STATISTICS
- Percentage Used + Percentage Used ${nvmePercentUsed}%
@@ -2010,7 +2254,7 @@ ${isNvmeDisk ? `
- Available Spare + Available Spare ${nvmeAvailSpare}%
@@ -2020,11 +2264,11 @@ ${isNvmeDisk ? `
-
Data Written
+
Data Written
${nvmeDataWrittenTB >= 1 ? nvmeDataWrittenTB.toFixed(2) + ' TB' : (nvmeDataWrittenTB * 1024).toFixed(1) + ' GB'}
-
Power Cycles
+
Power Cycles
${testStatus.smart_data?.nvme_raw?.power_cycles?.toLocaleString() ?? disk.power_cycles ?? 'N/A'}
@@ -2033,9 +2277,104 @@ ${isNvmeDisk ? `
` : ''} - +${!isNvmeDisk && diskType === 'SSD' ? (() => { + // Try to find SSD wear indicators from SMART attributes + const wearAttr = smartAttributes.find(a => + a.name?.toLowerCase().includes('wear_leveling') || + a.name?.toLowerCase().includes('media_wearout') || + a.name?.toLowerCase().includes('percent_lifetime') || + a.name?.toLowerCase().includes('ssd_life_left') || + a.id === 177 || a.id === 231 || a.id === 233 + ) + + const lbasWrittenAttr = smartAttributes.find(a => + a.name?.toLowerCase().includes('total_lbas_written') || + a.id === 241 + ) + + // Also check disk properties + const wearValue = wearAttr?.value ?? disk.wear_leveling_count ?? disk.ssd_life_left + + if (wearValue !== undefined && wearValue !== null) { + const lifeRemaining = wearValue // Usually this is percentage remaining + const lifeUsed = 100 - lifeRemaining + + // Calculate data written from LBAs (LBA = 512 bytes) + let dataWrittenTB = 0 + if (lbasWrittenAttr?.raw_value) { + const rawValue = parseInt(lbasWrittenAttr.raw_value.replace(/[^0-9]/g, '')) + if (!isNaN(rawValue)) { + dataWrittenTB = (rawValue * 512) / (1024 ** 4) + } + } else if (disk.total_lbas_written) { + dataWrittenTB = disk.total_lbas_written / 1024 // Already in GB + } + + return ` +
-
${isNvmeDisk ? '4' : '3'}. ${isNvmeDisk ? 'NVMe Health Metrics' : 'SMART Attributes'} (${smartAttributes.length} total${hasCritical ? `, ${criticalAttrs.length} warning(s)` : ''})
+
3. SSD Wear & Lifetime
+
+ +
+
LIFE REMAINING
+
+ + + + +
+
${lifeRemaining}%
+
+
+
+ Source: ${wearAttr?.name?.replace(/_/g, ' ') || 'SSD Life Indicator'} +
+
+ + +
+
USAGE STATISTICS
+ +
+
+ Wear Level + ${lifeUsed}% +
+
+
+
+
+ + ${dataWrittenTB > 0 ? ` +
+
+
Data Written
+
${dataWrittenTB >= 1 ? dataWrittenTB.toFixed(2) + ' TB' : (dataWrittenTB * 1024).toFixed(1) + ' GB'}
+
+
+
Power On Hours
+
${powerOnHours.toLocaleString()}h
+
+
+ ` : ''} + +
+ Note: SSD life estimates are based on manufacturer-reported wear indicators. + Actual lifespan may vary based on workload and usage patterns. +
+
+
+
+` + } + return '' +})() : ''} + + +
+
${isNvmeDisk ? '4' : (diskType === 'SSD' && (disk.wear_leveling_count !== undefined || disk.ssd_life_left !== undefined || smartAttributes.some(a => a.name?.toLowerCase().includes('wear'))) ? '4' : '3')}. ${isNvmeDisk ? 'NVMe Health Metrics' : 'SMART Attributes'} (${smartAttributes.length} total${hasCritical ? `, ${criticalAttrs.length} warning(s)` : ''})
@@ -2049,7 +2388,7 @@ ${isNvmeDisk ? ` - ${attributeRows || ''} + ${attributeRows || ''}
No ' + (isNvmeDisk ? 'NVMe metrics' : 'SMART attributes') + ' available
No ' + (isNvmeDisk ? 'NVMe metrics' : 'SMART attributes') + ' available
@@ -2077,7 +2416,7 @@ ${isNvmeDisk ? `
` : ` -
+
No self-test history available. Run a SMART self-test to see results here.
`} @@ -2508,3 +2847,373 @@ function SmartTestTab({ disk, observations = [] }: SmartTestTabProps) {
) } + +// ─── Schedule Tab Component ───────────────────────────────────────────────────── + +interface SmartSchedule { + id: string + active: boolean + test_type: 'short' | 'long' + frequency: 'daily' | 'weekly' | 'monthly' + hour: number + minute: number + day_of_week: number + day_of_month: number + disks: string[] + retention: number + notify_on_complete: boolean + notify_only_on_failure: boolean +} + +interface ScheduleConfig { + enabled: boolean + schedules: SmartSchedule[] +} + +function ScheduleTab({ disk }: { disk: DiskInfo }) { + const [config, setConfig] = useState({ enabled: true, schedules: [] }) + const [loading, setLoading] = useState(true) + const [saving, setSaving] = useState(false) + const [showForm, setShowForm] = useState(false) + const [editingSchedule, setEditingSchedule] = useState(null) + + // Form state + const [formData, setFormData] = useState>({ + test_type: 'short', + frequency: 'weekly', + hour: 3, + minute: 0, + day_of_week: 0, + day_of_month: 1, + disks: ['all'], + retention: 10, + active: true, + notify_on_complete: true, + notify_only_on_failure: false + }) + + const fetchSchedules = async () => { + try { + setLoading(true) + const data = await fetchApi('/api/storage/smart/schedules') + setConfig(data) + } catch { + console.error('Failed to load schedules') + } finally { + setLoading(false) + } + } + + useEffect(() => { + fetchSchedules() + }, []) + + const handleToggleGlobal = async () => { + try { + setSaving(true) + await fetchApi('/api/storage/smart/schedules/toggle', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ enabled: !config.enabled }) + }) + setConfig(prev => ({ ...prev, enabled: !prev.enabled })) + } catch { + console.error('Failed to toggle schedules') + } finally { + setSaving(false) + } + } + + const handleSaveSchedule = async () => { + try { + setSaving(true) + const scheduleData = { + ...formData, + id: editingSchedule?.id || undefined + } + + await fetchApi('/api/storage/smart/schedules', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(scheduleData) + }) + + await fetchSchedules() + setShowForm(false) + setEditingSchedule(null) + resetForm() + } catch { + console.error('Failed to save schedule') + } finally { + setSaving(false) + } + } + + const handleDeleteSchedule = async (id: string) => { + try { + setSaving(true) + await fetchApi(`/api/storage/smart/schedules/${id}`, { + method: 'DELETE' + }) + await fetchSchedules() + } catch { + console.error('Failed to delete schedule') + } finally { + setSaving(false) + } + } + + const resetForm = () => { + setFormData({ + test_type: 'short', + frequency: 'weekly', + hour: 3, + minute: 0, + day_of_week: 0, + day_of_month: 1, + disks: ['all'], + retention: 10, + active: true, + notify_on_complete: true, + notify_only_on_failure: false + }) + } + + const editSchedule = (schedule: SmartSchedule) => { + setEditingSchedule(schedule) + setFormData(schedule) + setShowForm(true) + } + + const dayNames = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'] + + const formatScheduleTime = (schedule: SmartSchedule) => { + const time = `${schedule.hour.toString().padStart(2, '0')}:${schedule.minute.toString().padStart(2, '0')}` + if (schedule.frequency === 'daily') return `Daily at ${time}` + if (schedule.frequency === 'weekly') return `${dayNames[schedule.day_of_week]}s at ${time}` + return `Day ${schedule.day_of_month} of month at ${time}` + } + + if (loading) { + return ( +
+
+ Loading schedules... +
+ ) + } + + return ( +
+ {/* Global Toggle */} +
+
+

Automatic SMART Tests

+

Enable or disable all scheduled tests

+
+ +
+ + {/* Schedules List */} + {config.schedules.length > 0 ? ( +
+

Configured Schedules

+ {config.schedules.map(schedule => ( +
+
+
+
+ + {schedule.test_type} + + {formatScheduleTime(schedule)} +
+
+ Disks: {schedule.disks.includes('all') ? 'All disks' : schedule.disks.join(', ')} | + Keep {schedule.retention} results +
+
+
+ + +
+
+
+ ))} +
+ ) : ( +
+ +

No scheduled tests configured

+

Create a schedule to automatically run SMART tests

+
+ )} + + {/* Add/Edit Form */} + {showForm ? ( +
+

{editingSchedule ? 'Edit Schedule' : 'New Schedule'}

+ +
+
+ + +
+ +
+ + +
+ + {formData.frequency === 'weekly' && ( +
+ + +
+ )} + + {formData.frequency === 'monthly' && ( +
+ + +
+ )} + +
+ + +
+ +
+ + +
+
+ +
+ +
+ +
+ + +
+
+ ) : ( + + )} + +

+ Scheduled tests run automatically via cron. Results are saved to the SMART history. +

+
+ ) +} diff --git a/AppImage/scripts/flask_server.py b/AppImage/scripts/flask_server.py index 209c1d9e..241f95e4 100644 --- a/AppImage/scripts/flask_server.py +++ b/AppImage/scripts/flask_server.py @@ -6353,14 +6353,103 @@ def api_proxmox_storage(): # ─── SMART Disk Testing API ─────────────────────────────────────────────────── SMART_DIR = '/usr/local/share/proxmenux/smart' +SMART_CONFIG_DIR = '/usr/local/share/proxmenux/smart/config' +DEFAULT_SMART_RETENTION = 10 # Keep last 10 JSON files per disk by default def _is_nvme(disk_name): """Check if disk is NVMe (supports names like nvme0n1, nvme0n1p1).""" return 'nvme' in disk_name -def _get_smart_json_path(disk_name): - """Get path to SMART JSON file for a disk.""" - return os.path.join(SMART_DIR, f"{disk_name}.json") +def _get_smart_disk_dir(disk_name): + """Get directory path for a disk's SMART JSON files.""" + return os.path.join(SMART_DIR, disk_name) + +def _get_smart_json_path(disk_name, test_type='short'): + """Get path to a new SMART JSON file for a disk with timestamp.""" + disk_dir = _get_smart_disk_dir(disk_name) + os.makedirs(disk_dir, exist_ok=True) + timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S') + return os.path.join(disk_dir, f"{timestamp}_{test_type}.json") + +def _get_latest_smart_json(disk_name): + """Get the most recent SMART JSON file for a disk.""" + disk_dir = _get_smart_disk_dir(disk_name) + if not os.path.exists(disk_dir): + return None + + json_files = sorted( + [f for f in os.listdir(disk_dir) if f.endswith('.json')], + reverse=True # Most recent first (timestamp-based naming) + ) + + if json_files: + return os.path.join(disk_dir, json_files[0]) + return None + +def _get_smart_history(disk_name, limit=None): + """Get list of all SMART JSON files for a disk, sorted by date (newest first).""" + disk_dir = _get_smart_disk_dir(disk_name) + if not os.path.exists(disk_dir): + return [] + + json_files = sorted( + [f for f in os.listdir(disk_dir) if f.endswith('.json')], + reverse=True + ) + + if limit: + json_files = json_files[:limit] + + result = [] + for filename in json_files: + # Parse timestamp and test type from filename: 2026-04-13T10-30-00_short.json + parts = filename.replace('.json', '').split('_') + if len(parts) >= 2: + timestamp_str = parts[0] + test_type = parts[1] + try: + # Convert back to readable format + dt = datetime.strptime(timestamp_str, '%Y-%m-%dT%H-%M-%S') + result.append({ + 'filename': filename, + 'path': os.path.join(disk_dir, filename), + 'timestamp': dt.isoformat(), + 'test_type': test_type, + 'date_readable': dt.strftime('%Y-%m-%d %H:%M:%S') + }) + except ValueError: + # Filename doesn't match expected format, skip + pass + + return result + +def _cleanup_old_smart_jsons(disk_name, retention=None): + """Remove old SMART JSON files, keeping only the most recent ones.""" + if retention is None: + retention = DEFAULT_SMART_RETENTION + + if retention <= 0: # 0 or negative means keep all + return 0 + + disk_dir = _get_smart_disk_dir(disk_name) + if not os.path.exists(disk_dir): + return 0 + + json_files = sorted( + [f for f in os.listdir(disk_dir) if f.endswith('.json')], + reverse=True # Most recent first + ) + + removed = 0 + # Keep first 'retention' files, delete the rest + for old_file in json_files[retention:]: + try: + os.remove(os.path.join(disk_dir, old_file)) + removed += 1 + except Exception: + pass + + return removed def _ensure_smart_tools(install_if_missing=False): """Check if SMART tools are installed and optionally install them.""" @@ -6483,14 +6572,17 @@ def api_smart_status(disk_name): result['error'] = 'smartmontools not installed' return jsonify(result) - # Check for existing JSON file (from previous test) - json_path = _get_smart_json_path(disk_name) - if os.path.exists(json_path): + # Check for existing JSON file (from previous test) - get most recent + json_path = _get_latest_smart_json(disk_name) + if json_path and os.path.exists(json_path): try: with open(json_path, 'r') as f: saved_data = json.load(f) result['saved_data'] = saved_data result['saved_timestamp'] = os.path.getmtime(json_path) + result['saved_path'] = json_path + # Get test history + result['test_history'] = _get_smart_history(disk_name, limit=10) except (json.JSONDecodeError, IOError): pass @@ -6712,6 +6804,74 @@ def api_smart_status(disk_name): return jsonify({'error': str(e)}), 500 +@app.route('/api/storage/smart//history', methods=['GET']) +@require_auth +def api_smart_history(disk_name): + """Get SMART test history for a disk.""" + try: + # Validate disk name (security) + if not re.match(r'^[a-zA-Z0-9]+$', disk_name): + return jsonify({'error': 'Invalid disk name'}), 400 + + limit = request.args.get('limit', 20, type=int) + history = _get_smart_history(disk_name, limit=limit) + + return jsonify({ + 'disk': disk_name, + 'history': history, + 'total': len(history) + }) + except Exception as e: + return jsonify({'error': str(e)}), 500 + + +@app.route('/api/storage/smart//latest', methods=['GET']) +@require_auth +def api_smart_latest(disk_name): + """Get the most recent SMART JSON data for a disk.""" + try: + # Validate disk name (security) + if not re.match(r'^[a-zA-Z0-9]+$', disk_name): + return jsonify({'error': 'Invalid disk name'}), 400 + + json_path = _get_latest_smart_json(disk_name) + if not json_path or not os.path.exists(json_path): + return jsonify({ + 'disk': disk_name, + 'has_data': False, + 'message': 'No SMART test data available. Run a SMART test first.' + }) + + with open(json_path, 'r') as f: + smart_data = json.load(f) + + # Extract timestamp from filename + filename = os.path.basename(json_path) + parts = filename.replace('.json', '').split('_') + timestamp = None + test_type = 'unknown' + if len(parts) >= 2: + try: + dt = datetime.strptime(parts[0], '%Y-%m-%dT%H-%M-%S') + timestamp = dt.isoformat() + test_type = parts[1] + except ValueError: + pass + + return jsonify({ + 'disk': disk_name, + 'has_data': True, + 'data': smart_data, + 'timestamp': timestamp, + 'test_type': test_type, + 'path': json_path + }) + except json.JSONDecodeError: + return jsonify({'error': 'Invalid JSON data in saved file'}), 500 + except Exception as e: + return jsonify({'error': str(e)}), 500 + + @app.route('/api/storage/smart//test', methods=['POST']) @require_auth def api_smart_run_test(disk_name): @@ -6736,9 +6896,12 @@ def api_smart_run_test(disk_name): # Check tools and auto-install if missing tools = _ensure_smart_tools(install_if_missing=True) - # Ensure SMART directory exists + # Ensure SMART directory exists and get path for new JSON file os.makedirs(SMART_DIR, exist_ok=True) - json_path = _get_smart_json_path(disk_name) + json_path = _get_smart_json_path(disk_name, test_type) + + # Cleanup old JSON files based on retention policy + _cleanup_old_smart_jsons(disk_name) if is_nvme: if not tools['nvme']: @@ -6832,7 +6995,7 @@ def api_smart_run_test(disk_name): while smartctl -c {device} 2>/dev/null | grep -qiE 'Self-test routine in progress|[1-9][0-9]?% of test remaining'; do sleep {sleep_interval} done - smartctl --json=c {device} > {json_path} 2>/dev/null + smartctl -a --json=c {device} > {json_path} 2>/dev/null ''', shell=True, start_new_session=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL @@ -6850,6 +7013,189 @@ def api_smart_run_test(disk_name): return jsonify({'error': str(e)}), 500 +# ─── SMART Schedule API ─────────────────────────────────────────────────────── + +SMART_SCHEDULE_FILE = os.path.join(SMART_CONFIG_DIR, 'smart-schedule.json') +SMART_CRON_FILE = '/etc/cron.d/proxmenux-smart' + +def _load_smart_schedules(): + """Load SMART test schedules from config file.""" + os.makedirs(SMART_CONFIG_DIR, exist_ok=True) + if os.path.exists(SMART_SCHEDULE_FILE): + try: + with open(SMART_SCHEDULE_FILE, 'r') as f: + return json.load(f) + except (json.JSONDecodeError, IOError): + pass + return {'enabled': True, 'schedules': []} + +def _save_smart_schedules(config): + """Save SMART test schedules to config file.""" + os.makedirs(SMART_CONFIG_DIR, exist_ok=True) + with open(SMART_SCHEDULE_FILE, 'w') as f: + json.dump(config, f, indent=2) + +def _update_smart_cron(): + """Update cron file based on current schedules.""" + config = _load_smart_schedules() + + if not config.get('enabled') or not config.get('schedules'): + # Remove cron file if disabled or no schedules + if os.path.exists(SMART_CRON_FILE): + os.remove(SMART_CRON_FILE) + return + + cron_lines = [ + '# ProxMenux SMART Scheduled Tests', + '# Auto-generated - do not edit manually', + 'SHELL=/bin/bash', + 'PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin', + '' + ] + + for schedule in config['schedules']: + if not schedule.get('active', True): + continue + + schedule_id = schedule.get('id', 'unknown') + hour = schedule.get('hour', 3) + minute = schedule.get('minute', 0) + frequency = schedule.get('frequency', 'weekly') + + # Build cron time specification + if frequency == 'daily': + cron_time = f'{minute} {hour} * * *' + elif frequency == 'weekly': + dow = schedule.get('day_of_week', 0) # 0=Sunday + cron_time = f'{minute} {hour} * * {dow}' + elif frequency == 'monthly': + dom = schedule.get('day_of_month', 1) + cron_time = f'{minute} {hour} {dom} * *' + else: + continue + + # Build command + disks = schedule.get('disks', ['all']) + test_type = schedule.get('test_type', 'short') + retention = schedule.get('retention', 10) + + cmd = f'/usr/local/share/proxmenux/scripts/smart-scheduled-test.sh --schedule-id {schedule_id} --test-type {test_type} --retention {retention}' + if disks != ['all']: + cmd += f" --disks '{','.join(disks)}'" + + cron_lines.append(f'{cron_time} root {cmd} >> /var/log/proxmenux/smart-schedule.log 2>&1') + + cron_lines.append('') # Empty line at end + + with open(SMART_CRON_FILE, 'w') as f: + f.write('\n'.join(cron_lines)) + + # Set proper permissions + os.chmod(SMART_CRON_FILE, 0o644) + + +@app.route('/api/storage/smart/schedules', methods=['GET']) +@require_auth +def api_smart_schedules_list(): + """Get all SMART test schedules.""" + config = _load_smart_schedules() + return jsonify(config) + + +@app.route('/api/storage/smart/schedules', methods=['POST']) +@require_auth +def api_smart_schedules_create(): + """Create or update a SMART test schedule.""" + try: + data = request.get_json() + if not data: + return jsonify({'error': 'No data provided'}), 400 + + config = _load_smart_schedules() + + # Generate ID if not provided + schedule_id = data.get('id') or f"schedule-{datetime.now().strftime('%Y%m%d%H%M%S')}" + data['id'] = schedule_id + + # Set defaults + data.setdefault('active', True) + data.setdefault('test_type', 'short') + data.setdefault('frequency', 'weekly') + data.setdefault('hour', 3) + data.setdefault('minute', 0) + data.setdefault('day_of_week', 0) + data.setdefault('day_of_month', 1) + data.setdefault('disks', ['all']) + data.setdefault('retention', 10) + data.setdefault('notify_on_complete', True) + data.setdefault('notify_only_on_failure', False) + + # Update existing or add new + existing_idx = next((i for i, s in enumerate(config['schedules']) if s['id'] == schedule_id), None) + if existing_idx is not None: + config['schedules'][existing_idx] = data + else: + config['schedules'].append(data) + + _save_smart_schedules(config) + _update_smart_cron() + + return jsonify({ + 'success': True, + 'schedule': data, + 'message': 'Schedule saved successfully' + }) + except Exception as e: + return jsonify({'error': str(e)}), 500 + + +@app.route('/api/storage/smart/schedules/', methods=['DELETE']) +@require_auth +def api_smart_schedules_delete(schedule_id): + """Delete a SMART test schedule.""" + try: + config = _load_smart_schedules() + + original_len = len(config['schedules']) + config['schedules'] = [s for s in config['schedules'] if s['id'] != schedule_id] + + if len(config['schedules']) == original_len: + return jsonify({'error': 'Schedule not found'}), 404 + + _save_smart_schedules(config) + _update_smart_cron() + + return jsonify({ + 'success': True, + 'message': 'Schedule deleted successfully' + }) + except Exception as e: + return jsonify({'error': str(e)}), 500 + + +@app.route('/api/storage/smart/schedules/toggle', methods=['POST']) +@require_auth +def api_smart_schedules_toggle(): + """Enable or disable all SMART test schedules.""" + try: + data = request.get_json() or {} + enabled = data.get('enabled', True) + + config = _load_smart_schedules() + config['enabled'] = enabled + + _save_smart_schedules(config) + _update_smart_cron() + + return jsonify({ + 'success': True, + 'enabled': enabled, + 'message': f'SMART schedules {"enabled" if enabled else "disabled"}' + }) + except Exception as e: + return jsonify({'error': str(e)}), 500 + + @app.route('/api/storage/smart/tools', methods=['GET']) @require_auth def api_smart_tools_status(): diff --git a/scripts/backup_restore/apply_pending_restore.sh b/scripts/backup_restore/apply_pending_restore.sh new file mode 100644 index 00000000..2e756be8 --- /dev/null +++ b/scripts/backup_restore/apply_pending_restore.sh @@ -0,0 +1,166 @@ +#!/bin/bash +# ========================================================== +# ProxMenux - Apply Pending Restore On Boot +# ========================================================== + +PENDING_BASE="${PMX_RESTORE_PENDING_BASE:-/var/lib/proxmenux/restore-pending}" +CURRENT_LINK="${PENDING_BASE}/current" +LOG_DIR="${PMX_RESTORE_LOG_DIR:-/var/log/proxmenux}" +DEST_PREFIX="${PMX_RESTORE_DEST_PREFIX:-/}" +PRE_BACKUP_BASE="${PMX_RESTORE_PRE_BACKUP_BASE:-/root/proxmenux-pre-restore}" +RECOVERY_BASE="${PMX_RESTORE_RECOVERY_BASE:-/root/proxmenux-recovery}" + +mkdir -p "$LOG_DIR" "$PENDING_BASE/completed" >/dev/null 2>&1 || true +LOG_FILE="${LOG_DIR}/proxmenux-restore-onboot-$(date +%Y%m%d_%H%M%S).log" + +exec >>"$LOG_FILE" 2>&1 + +echo "=== ProxMenux pending restore started at $(date -Iseconds) ===" + +if [[ ! -e "$CURRENT_LINK" ]]; then + echo "No pending restore link found. Nothing to do." + exit 0 +fi + +PENDING_DIR="$(readlink -f "$CURRENT_LINK" 2>/dev/null || echo "$CURRENT_LINK")" +if [[ ! -d "$PENDING_DIR" ]]; then + echo "Pending restore directory not found: $PENDING_DIR" + rm -f "$CURRENT_LINK" >/dev/null 2>&1 || true + exit 0 +fi + +APPLY_LIST="${PENDING_DIR}/apply-on-boot.list" +PLAN_ENV="${PENDING_DIR}/plan.env" +STATE_FILE="${PENDING_DIR}/state" + +if [[ -f "$PLAN_ENV" ]]; then + # shellcheck source=/dev/null + source "$PLAN_ENV" +fi + +: "${HB_RESTORE_INCLUDE_ZFS:=0}" + +if [[ ! -f "$APPLY_LIST" ]]; then + echo "Apply list missing: $APPLY_LIST" + echo "failed" >"$STATE_FILE" + exit 1 +fi + +echo "Pending dir: $PENDING_DIR" +echo "Apply list: $APPLY_LIST" +echo "Include ZFS: $HB_RESTORE_INCLUDE_ZFS" +echo "running" >"$STATE_FILE" + +backup_root="${PRE_BACKUP_BASE}/$(date +%Y%m%d_%H%M%S)-onboot" +mkdir -p "$backup_root" >/dev/null 2>&1 || true + +cluster_recovery_root="" +applied=0 +skipped=0 +failed=0 + +while IFS= read -r rel; do + [[ -z "$rel" ]] && continue + + src="${PENDING_DIR}/rootfs/${rel}" + dst="${DEST_PREFIX%/}/${rel}" + + if [[ ! -e "$src" ]]; then + ((skipped++)) + continue + fi + + # Never restore cluster virtual filesystem data live. + if [[ "$rel" == etc/pve* ]] || [[ "$rel" == var/lib/pve-cluster* ]]; then + if [[ -z "$cluster_recovery_root" ]]; then + cluster_recovery_root="${RECOVERY_BASE}/$(date +%Y%m%d_%H%M%S)-onboot" + mkdir -p "$cluster_recovery_root" >/dev/null 2>&1 || true + fi + mkdir -p "$cluster_recovery_root/$(dirname "$rel")" >/dev/null 2>&1 || true + cp -a "$src" "$cluster_recovery_root/$rel" >/dev/null 2>&1 || true + ((skipped++)) + continue + fi + + # /etc/zfs is opt-in. + if [[ "$rel" == etc/zfs || "$rel" == etc/zfs/* ]]; then + if [[ "$HB_RESTORE_INCLUDE_ZFS" != "1" ]]; then + ((skipped++)) + continue + fi + fi + + if [[ -e "$dst" ]]; then + mkdir -p "$backup_root/$(dirname "$rel")" >/dev/null 2>&1 || true + cp -a "$dst" "$backup_root/$rel" >/dev/null 2>&1 || true + fi + + if [[ -d "$src" ]]; then + mkdir -p "$dst" >/dev/null 2>&1 || true + if rsync -aAXH --delete "$src/" "$dst/" >/dev/null 2>&1; then + ((applied++)) + else + ((failed++)) + fi + else + mkdir -p "$(dirname "$dst")" >/dev/null 2>&1 || true + if cp -a "$src" "$dst" >/dev/null 2>&1; then + ((applied++)) + else + ((failed++)) + fi + fi +done <"$APPLY_LIST" + +systemctl daemon-reload >/dev/null 2>&1 || true +command -v update-initramfs >/dev/null 2>&1 && update-initramfs -u -k all >/dev/null 2>&1 || true +command -v update-grub >/dev/null 2>&1 && update-grub >/dev/null 2>&1 || true + +echo "Applied: $applied" +echo "Skipped: $skipped" +echo "Failed: $failed" +echo "Backup before restore: $backup_root" + +if [[ -n "$cluster_recovery_root" ]]; then + helper="${cluster_recovery_root}/apply-cluster-restore.sh" + cat > "$helper" </dev/null 2>&1 || true + + echo "Cluster paths extracted to: $cluster_recovery_root" + echo "Cluster recovery helper: $helper" +fi + +if [[ "$failed" -eq 0 ]]; then + echo "completed" >"$STATE_FILE" +else + echo "completed_with_errors" >"$STATE_FILE" +fi + +restore_id="$(basename "$PENDING_DIR")" +mv "$PENDING_DIR" "${PENDING_BASE}/completed/${restore_id}" >/dev/null 2>&1 || true +rm -f "$CURRENT_LINK" >/dev/null 2>&1 || true + +systemctl disable proxmenux-restore-onboot.service >/dev/null 2>&1 || true + +echo "=== ProxMenux pending restore finished at $(date -Iseconds) ===" +echo "Log file: $LOG_FILE" + +exit 0 diff --git a/scripts/backup_restore/backup_host.sh b/scripts/backup_restore/backup_host.sh index 97876e66..1c5e437b 100644 --- a/scripts/backup_restore/backup_host.sh +++ b/scripts/backup_restore/backup_host.sh @@ -1,917 +1,1689 @@ -#!/usr/bin/env bash - -# Configuration ============================================ -LOCAL_SCRIPTS="/usr/local/share/proxmenux/scripts" -BASE_DIR="/usr/local/share/proxmenux" -UTILS_FILE="$BASE_DIR/utils.sh" -VENV_PATH="/opt/googletrans-env" - -if [[ -f "$UTILS_FILE" ]]; then - source "$UTILS_FILE" -fi -load_language -initialize_cache +#!/bin/bash +# ========================================================== +# ProxMenux - Host Config Backup / Restore +# ========================================================== +# Author : MacRimi +# Copyright : (c) 2024 MacRimi +# License : MIT +# Version : 1.0 +# Last Updated: 08/04/2026 # ========================================================== +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +LOCAL_SCRIPTS_LOCAL="$(cd "$SCRIPT_DIR/.." && pwd)" +LOCAL_SCRIPTS_DEFAULT="/usr/local/share/proxmenux/scripts" +LOCAL_SCRIPTS="$LOCAL_SCRIPTS_DEFAULT" +BASE_DIR="/usr/local/share/proxmenux" +UTILS_FILE="$LOCAL_SCRIPTS/utils.sh" -get_external_backup_mount_point() { - local BACKUP_MOUNT_FILE="/usr/local/share/proxmenux/last_backup_mount.txt" - local STORAGE_REPO="$LOCAL_SCRIPTS/backup_restore" - local MOUNT_POINT +if [[ -f "$LOCAL_SCRIPTS_LOCAL/utils.sh" ]]; then + LOCAL_SCRIPTS="$LOCAL_SCRIPTS_LOCAL" + UTILS_FILE="$LOCAL_SCRIPTS/utils.sh" +elif [[ ! -f "$UTILS_FILE" ]]; then + UTILS_FILE="$BASE_DIR/utils.sh" +fi - if [[ -f "$BACKUP_MOUNT_FILE" ]]; then - MOUNT_POINT=$(head -n1 "$BACKUP_MOUNT_FILE" | tr -d '\r\n' | xargs) - >&2 echo "DEBUG: Valor MOUNT_POINT='$MOUNT_POINT'" - if [[ ! -d "$MOUNT_POINT" ]]; then - msg_error "Mount point does not exist: $MOUNT_POINT" - rm -f "$BACKUP_MOUNT_FILE" - return 1 - fi - if ! mountpoint -q "$MOUNT_POINT"; then - msg_error "Mount point is not mounted: $MOUNT_POINT" - rm -f "$BACKUP_MOUNT_FILE" - return 1 - fi - - echo "$MOUNT_POINT" - return 0 +if [[ -f "$UTILS_FILE" ]]; then + # shellcheck source=/dev/null + source "$UTILS_FILE" +else + echo "ERROR: utils.sh not found. Cannot continue." >&2 + exit 1 +fi + +# Source shared library +LIB_FILE="$SCRIPT_DIR/lib_host_backup_common.sh" +[[ ! -f "$LIB_FILE" ]] && LIB_FILE="$LOCAL_SCRIPTS_DEFAULT/backup_restore/lib_host_backup_common.sh" +if [[ -f "$LIB_FILE" ]]; then + # shellcheck source=/dev/null + source "$LIB_FILE" +else + msg_error "$(translate "Cannot load backup library: lib_host_backup_common.sh")" + exit 1 +fi + +load_language +initialize_cache + +if ! command -v pveversion >/dev/null 2>&1; then + dialog --backtitle "ProxMenux" --title "$(translate "Error")" \ + --msgbox "$(translate "This script must be run on a Proxmox host.")" 8 60 + exit 1 +fi +if [[ $EUID -ne 0 ]]; then + dialog --backtitle "ProxMenux" --title "$(translate "Error")" \ + --msgbox "$(translate "This script must be run as root.")" 8 60 + exit 1 +fi + +# ========================================================== +# BACKUP — PBS +# ========================================================== +_bk_pbs() { + local profile_mode="$1" + local -a paths=() + local backup_id epoch log_file staging_root t_start elapsed staged_size + + hb_select_pbs_repository || return 1 + hb_ask_pbs_encryption + + hb_select_profile_paths "$profile_mode" paths || return 1 + + backup_id="hostcfg-$(hostname)" + backup_id=$(dialog --backtitle "ProxMenux" --title "PBS" \ + --inputbox "$(hb_translate "Backup ID (group name in PBS):")" \ + "$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "$backup_id" 3>&1 1>&2 2>&3) || return 1 + [[ -z "$backup_id" ]] && return 1 + # Sanitize: only alphanumeric, dash, underscore + backup_id=$(echo "$backup_id" | tr -cs '[:alnum:]_-' '-' | sed 's/-*$//') + + log_file="/tmp/proxmenux-pbs-backup-$(date +%Y%m%d_%H%M%S).log" + staging_root=$(mktemp -d /tmp/proxmenux-pbs-stage.XXXXXX) + # shellcheck disable=SC2064 + trap "rm -rf '$staging_root'" RETURN + + show_proxmenux_logo + msg_title "$(translate "Host Backup → PBS")" + echo -e "" + local _pbs_enc_label + if [[ -n "$HB_PBS_KEYFILE_OPT" ]]; then _pbs_enc_label=$(hb_translate "Enabled"); else _pbs_enc_label=$(hb_translate "Disabled"); fi + echo -e "${TAB}${BGN}$(translate "Repository:")${CL} ${BL}${HB_PBS_REPOSITORY}${CL}" + echo -e "${TAB}${BGN}$(translate "Backup ID:")${CL} ${BL}${backup_id}${CL}" + echo -e "${TAB}${BGN}$(translate "Encryption:")${CL} ${BL}${_pbs_enc_label}${CL}" + echo -e "${TAB}${BGN}$(translate "Paths:")${CL}" + local p; for p in "${paths[@]}"; do echo -e "${TAB} ${BL}•${CL} $p"; done + echo -e "" + + msg_info "$(translate "Preparing files for backup...")" + hb_prepare_staging "$staging_root" "${paths[@]}" + staged_size=$(hb_file_size "$staging_root/rootfs") + msg_ok "$(translate "Staging ready.") $(translate "Data size:") $staged_size" + + echo -e "" + msg_info "$(translate "Connecting to PBS and starting backup...")" + stop_spinner + + epoch=$(date +%s) + t_start=$SECONDS + + local -a cmd=( + proxmox-backup-client backup + "hostcfg.pxar:$staging_root/rootfs" + --repository "$HB_PBS_REPOSITORY" + --backup-type host + --backup-id "$backup_id" + --backup-time "$epoch" + ) + # shellcheck disable=SC2086 # intentional word-split: HB_PBS_KEYFILE_OPT="--keyfile /path" + [[ -n "$HB_PBS_KEYFILE_OPT" ]] && cmd+=($HB_PBS_KEYFILE_OPT) + + : > "$log_file" + if env \ + PBS_PASSWORD="$HB_PBS_SECRET" \ + PBS_ENCRYPTION_PASSWORD="${HB_PBS_ENC_PASS:-}" \ + "${cmd[@]}" 2>&1 | tee -a "$log_file"; then + + elapsed=$((SECONDS - t_start)) + local snap_time + snap_time=$(date -d "@$epoch" '+%Y-%m-%dT%H:%M:%S' 2>/dev/null || date -r "$epoch" '+%Y-%m-%dT%H:%M:%S' 2>/dev/null || echo "$epoch") + echo -e "" + echo -e "${TAB}${BOLD}$(translate "Backup completed:")${CL}" + echo -e "${TAB}${BGN}$(translate "Method:")${CL} ${BL}Proxmox Backup Server (PBS)${CL}" + echo -e "${TAB}${BGN}$(translate "Repository:")${CL} ${BL}${HB_PBS_REPOSITORY}${CL}" + echo -e "${TAB}${BGN}$(translate "Backup ID:")${CL} ${BL}${backup_id}${CL}" + echo -e "${TAB}${BGN}$(translate "Snapshot:")${CL} ${BL}host/${backup_id}/${snap_time}${CL}" + echo -e "${TAB}${BGN}$(translate "Data size:")${CL} ${BL}${staged_size}${CL}" + echo -e "${TAB}${BGN}$(translate "Duration:")${CL} ${BL}$(hb_human_elapsed "$elapsed")${CL}" + echo -e "${TAB}${BGN}$(translate "Encryption:")${CL} ${BL}${_pbs_enc_label}${CL}" + echo -e "${TAB}${BGN}$(translate "Log:")${CL} ${BL}${log_file}${CL}" + echo -e "" + msg_ok "$(translate "Backup completed successfully.")" else - source "$STORAGE_REPO/mount_disk_host_bk.sh" - MOUNT_POINT=$(mount_disk_host_bk) - [[ -z "$MOUNT_POINT" ]] && msg_error "$(translate "No disk mounted.")" && return 1 - echo "$MOUNT_POINT" - return 0 + echo -e "" + msg_error "$(translate "PBS backup failed.")" + hb_show_log "$log_file" "$(translate "PBS backup error log")" + echo -e "" + msg_success "$(translate "Press Enter to return to menu...")" + read -r + return 1 fi + + echo -e "" + msg_success "$(translate "Press Enter to return to menu...")" + read -r } +# ========================================================== +# BACKUP — BORG +# ========================================================== +_bk_borg() { + local profile_mode="$1" + local -a paths=() + local borg_bin repo staging_root log_file t_start elapsed staged_size archive_name + borg_bin=$(hb_ensure_borg) || return 1 + hb_select_borg_repo repo || return 1 + hb_prepare_borg_passphrase || return 1 + hb_select_profile_paths "$profile_mode" paths || return 1 -# === Host Backup Main Menu === -host_backup_menu() { + archive_name="hostcfg-$(hostname)-$(date +%Y%m%d_%H%M%S)" + log_file="/tmp/proxmenux-borg-backup-$(date +%Y%m%d_%H%M%S).log" + staging_root=$(mktemp -d /tmp/proxmenux-borg-stage.XXXXXX) + # shellcheck disable=SC2064 + trap "rm -rf '$staging_root'" RETURN + + show_proxmenux_logo + msg_title "$(translate "Host Backup → Borg")" + echo -e "" + local _borg_enc_label + if [[ "${BORG_ENCRYPT_MODE:-none}" == "repokey" ]]; then _borg_enc_label=$(hb_translate "Enabled (repokey)"); else _borg_enc_label=$(hb_translate "Disabled"); fi + echo -e "${TAB}${BGN}$(translate "Repository:")${CL} ${BL}${repo}${CL}" + echo -e "${TAB}${BGN}$(translate "Archive:")${CL} ${BL}${archive_name}${CL}" + echo -e "${TAB}${BGN}$(translate "Encryption:")${CL} ${BL}${_borg_enc_label}${CL}" + echo -e "${TAB}${BGN}$(translate "Paths:")${CL}" + local p; for p in "${paths[@]}"; do echo -e "${TAB} ${BL}•${CL} $p"; done + echo -e "" + + msg_info "$(translate "Preparing files for backup...")" + hb_prepare_staging "$staging_root" "${paths[@]}" + staged_size=$(hb_file_size "$staging_root/rootfs") + msg_ok "$(translate "Staging ready.") $(translate "Data size:") $staged_size" + + msg_info "$(translate "Initializing Borg repository if needed...")" + if ! hb_borg_init_if_needed "$borg_bin" "$repo" "${BORG_ENCRYPT_MODE:-none}" >/dev/null 2>&1; then + msg_error "$(translate "Failed to initialize Borg repository at:") $repo" + return 1 + fi + msg_ok "$(translate "Repository ready.")" + + echo -e "" + msg_info "$(translate "Starting Borg backup...")" + stop_spinner + + t_start=$SECONDS + : > "$log_file" + if (cd "$staging_root" && "$borg_bin" create --stats --progress \ + "$repo::$archive_name" rootfs metadata) 2>&1 | tee -a "$log_file"; then + + elapsed=$((SECONDS - t_start)) + # Extract compressed size from borg stats if available + local borg_compressed + borg_compressed=$(grep -i "this archive" "$log_file" | awk '{print $4, $5}' | tail -1) + [[ -z "$borg_compressed" ]] && borg_compressed="$staged_size" + echo -e "" + echo -e "${TAB}${BOLD}$(translate "Backup completed:")${CL}" + echo -e "${TAB}${BGN}$(translate "Method:")${CL} ${BL}BorgBackup${CL}" + echo -e "${TAB}${BGN}$(translate "Repository:")${CL} ${BL}${repo}${CL}" + echo -e "${TAB}${BGN}$(translate "Archive:")${CL} ${BL}${archive_name}${CL}" + echo -e "${TAB}${BGN}$(translate "Data size:")${CL} ${BL}${staged_size}${CL}" + echo -e "${TAB}${BGN}$(translate "Compressed size:")${CL} ${BL}${borg_compressed}${CL}" + echo -e "${TAB}${BGN}$(translate "Duration:")${CL} ${BL}$(hb_human_elapsed "$elapsed")${CL}" + echo -e "${TAB}${BGN}$(translate "Encryption:")${CL} ${BL}${_borg_enc_label}${CL}" + echo -e "${TAB}${BGN}$(translate "Log:")${CL} ${BL}${log_file}${CL}" + echo -e "" + msg_ok "$(translate "Backup completed successfully.")" + else + echo -e "" + msg_error "$(translate "Borg backup failed.")" + hb_show_log "$log_file" "$(translate "Borg backup error log")" + echo -e "" + msg_success "$(translate "Press Enter to return to menu...")" + read -r + return 1 + fi + + echo -e "" + msg_success "$(translate "Press Enter to return to menu...")" + read -r +} + +# ========================================================== +# BACKUP — LOCAL tar +# ========================================================== +_bk_local() { + local profile_mode="$1" + local -a paths=() + local dest_dir staging_root archive log_file t_start elapsed staged_size archive_size + + hb_require_cmd rsync rsync || return 1 + + dest_dir=$(hb_prompt_dest_dir) || return 1 + hb_select_profile_paths "$profile_mode" paths || return 1 + + archive="$dest_dir/hostcfg-$(hostname)-$(date +%Y%m%d_%H%M%S).tar.zst" + log_file="/tmp/proxmenux-local-backup-$(date +%Y%m%d_%H%M%S).log" + staging_root=$(mktemp -d /tmp/proxmenux-local-stage.XXXXXX) + # shellcheck disable=SC2064 + trap "rm -rf '$staging_root'" RETURN + + show_proxmenux_logo + msg_title "$(translate "Host Backup → Local archive")" + echo -e "" + echo -e "${TAB}${BGN}$(translate "Destination:")${CL} ${BL}${archive}${CL}" + echo -e "${TAB}${BGN}$(translate "Paths:")${CL}" + local p; for p in "${paths[@]}"; do echo -e "${TAB} ${BL}•${CL} $p"; done + echo -e "" + + msg_info "$(translate "Preparing files for backup...")" + hb_prepare_staging "$staging_root" "${paths[@]}" + staged_size=$(hb_file_size "$staging_root/rootfs") + msg_ok "$(translate "Staging ready.") $(translate "Data size:") $staged_size" + + echo -e "" + msg_info "$(translate "Creating compressed archive...")" + stop_spinner + + t_start=$SECONDS + : > "$log_file" + local tar_ok=0 + + if command -v zstd >/dev/null 2>&1; then + if tar --zstd -cf "$archive" -C "$staging_root" . >>"$log_file" 2>&1; then + tar_ok=1 + fi + else + # Fallback: gzip (rename archive) + archive="${archive%.zst}" + archive="${archive%.tar}.tar.gz" + if command -v pv >/dev/null 2>&1; then + local stage_bytes + local pipefail_state + stage_bytes=$(du -sb "$staging_root" 2>/dev/null | awk '{print $1}') + pipefail_state=$(set -o | awk '$1=="pipefail" {print $2}') + set -o pipefail + if tar -cf - -C "$staging_root" . 2>>"$log_file" \ + | pv -s "$stage_bytes" | gzip > "$archive" 2>>"$log_file"; then + tar_ok=1 + fi + [[ "$pipefail_state" == "off" ]] && set +o pipefail + else + if tar -czf "$archive" -C "$staging_root" . >>"$log_file" 2>&1; then + tar_ok=1 + fi + fi + fi + + elapsed=$((SECONDS - t_start)) + + if [[ $tar_ok -eq 1 && -f "$archive" ]]; then + archive_size=$(hb_file_size "$archive") + echo -e "" + echo -e "${TAB}${BOLD}$(translate "Backup completed:")${CL}" + echo -e "${TAB}${BGN}$(translate "Method:")${CL} ${BL}Local archive (tar)${CL}" + echo -e "${TAB}${BGN}$(translate "Archive:")${CL} ${BL}${archive}${CL}" + echo -e "${TAB}${BGN}$(translate "Data size:")${CL} ${BL}${staged_size}${CL}" + echo -e "${TAB}${BGN}$(translate "Archive size:")${CL} ${BL}${archive_size}${CL}" + echo -e "${TAB}${BGN}$(translate "Duration:")${CL} ${BL}$(hb_human_elapsed "$elapsed")${CL}" + echo -e "${TAB}${BGN}$(translate "Log:")${CL} ${BL}${log_file}${CL}" + echo -e "" + msg_ok "$(translate "Backup completed successfully.")" + else + echo -e "" + msg_error "$(translate "Local backup failed.")" + hb_show_log "$log_file" "$(translate "Local backup error log")" + echo -e "" + msg_success "$(translate "Press Enter to return to menu...")" + read -r + return 1 + fi + + echo -e "" + msg_success "$(translate "Press Enter to return to menu...")" + read -r +} + +# ========================================================== +# BACKUP MENU +# ========================================================== +_bk_scheduler() { + local scheduler="$LOCAL_SCRIPTS/backup_restore/backup_scheduler.sh" + [[ ! -f "$scheduler" ]] && scheduler="$SCRIPT_DIR/backup_scheduler.sh" + + if [[ ! -f "$scheduler" ]]; then + show_proxmenux_logo + msg_error "$(translate "Scheduler script not found:") $scheduler" + echo -e "" + msg_success "$(translate "Press Enter to return to menu...")" + read -r + return 1 + fi + + bash "$scheduler" +} + +backup_menu() { while true; do - local CHOICE - CHOICE=$(dialog --backtitle "ProxMenux" \ - --title "$(translate 'Host Backup')" \ - --menu "\n$(translate 'Select backup option:')" 22 70 12 \ - "" "$(translate '--- FULL BACKUP ---')" \ - 1 "$(translate 'Full backup to Proxmox Backup Server (PBS)')" \ - 2 "$(translate 'Full backup with BorgBackup')" \ - 3 "$(translate 'Full backup to local .tar.gz')" \ - "" "$(translate '--- CUSTOM BACKUP ---')" \ - 4 "$(translate 'Custom backup to PBS')" \ - 5 "$(translate 'Custom backup with BorgBackup')" \ - 6 "$(translate 'Custom backup to local .tar.gz')" \ - 0 "$(translate 'Return')" \ + local choice + choice=$(dialog --backtitle "ProxMenux" \ + --title "$(translate "Host Config Backup")" \ + --menu "\n$(translate "Select backup method and profile:")" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \ + "" "$(translate "─── Default profile (all critical paths) ──────────")" \ + 1 "$(translate "Backup to Proxmox Backup Server (PBS)")" \ + 2 "$(translate "Backup to Borg repository")" \ + 3 "$(translate "Backup to local archive (.tar.zst)")" \ + "" "$(translate "─── Custom profile (choose paths manually) ────────")" \ + 4 "$(translate "Custom backup to PBS")" \ + 5 "$(translate "Custom backup to Borg")" \ + 6 "$(translate "Custom backup to local archive")" \ + "" "$(translate "─── Automation ─────────────────────────────────────")" \ + 7 "$(translate "Scheduled backups and retention policies")" \ + 0 "$(translate "Return")" \ 3>&1 1>&2 2>&3) || return 0 - case "$CHOICE" in - 1) backup_full_pbs_root ;; - 2) backup_with_borg "/boot/efi /etc/pve /etc/network /var/lib/pve-cluster /root /etc/ssh /home /usr/local/bin /etc/cron.d /etc/systemd/system /var/lib/vz" ;; - 3) backup_to_local_tar "/boot/efi /etc/pve /etc/network /var/lib/pve-cluster /root /etc/ssh /home /usr/local/bin /etc/cron.d /etc/systemd/system /var/lib/vz" ;; - 4) custom_backup_menu backup_to_pbs ;; - 5) custom_backup_menu backup_with_borg ;; - 6) custom_backup_menu backup_to_local_tar ;; + case "$choice" in + 1) _bk_pbs default ;; + 2) _bk_borg default ;; + 3) _bk_local default ;; + 4) _bk_pbs custom ;; + 5) _bk_borg custom ;; + 6) _bk_local custom ;; + 7) _bk_scheduler ;; 0) break ;; esac done } +# ========================================================== +# RESTORE — EXTRACT TO STAGING +# ========================================================== +_rs_extract_pbs() { + local staging_root="$1" + local log_file + log_file="/tmp/proxmenux-pbs-restore-$(date +%Y%m%d_%H%M%S).log" + local -a snapshots=() archives=() + local snapshot archive + hb_require_cmd proxmox-backup-client proxmox-backup-client || return 1 + hb_select_pbs_repository || return 1 -# === Menu checklist for custom backup === -custom_backup_menu() { - declare -A BACKUP_PATHS=( - [etc-pve]="/etc/pve" - [etc-network]="/etc/network" - [var-lib-pve-cluster]="/var/lib/pve-cluster" - [root-dir]="/root" - [etc-ssh]="/etc/ssh" - [home]="/home" - [local-bin]="/usr/local/bin" - [cron]="/etc/cron.d" - [custom-systemd]="/etc/systemd/system" - [var-lib-vz]="/var/lib/vz" + msg_info "$(translate "Listing snapshots from PBS...")" + mapfile -t snapshots < <( + PBS_PASSWORD="$HB_PBS_SECRET" \ + proxmox-backup-client snapshot list \ + --repository "$HB_PBS_REPOSITORY" 2>/dev/null \ + | awk '$2 ~ /^host\// {print $2}' \ + | sort -r | awk '!seen[$0]++' ) - local CHECKLIST_OPTIONS=() - for KEY in "${!BACKUP_PATHS[@]}"; do - DIR="${BACKUP_PATHS[$KEY]}" - CHECKLIST_OPTIONS+=("$KEY" "$DIR" "off") - done + msg_ok "$(translate "Snapshot list retrieved.")" - SELECTED_KEYS=$(dialog --separate-output --checklist \ - "$(translate 'Select directories to backup:')" 22 70 12 \ - "${CHECKLIST_OPTIONS[@]}" \ - 3>&1 1>&2 2>&3) || return 1 - - local BACKUP_DIRS=() - for KEY in $SELECTED_KEYS; do - BACKUP_DIRS+=("${BACKUP_PATHS[$KEY]}") - done - - -# "$1" "${BACKUP_DIRS[*]}" - "$1" "${BACKUP_DIRS[@]}" - - -} - - -# === Configure PBS === -configure_pbs_repository() { - local PBS_REPO_FILE="/usr/local/share/proxmenux/pbs-repo.conf" - local PBS_PASS_FILE="/usr/local/share/proxmenux/pbs-pass.txt" - local PBS_MANUAL_CONFIGS="/usr/local/share/proxmenux/pbs-manual-configs.txt" - - - [[ ! -f "$PBS_MANUAL_CONFIGS" ]] && touch "$PBS_MANUAL_CONFIGS" - - local PBS_CONFIGS=() - local PBS_SOURCES=() - - - if [[ -f "/etc/pve/storage.cfg" ]]; then - local current_pbs="" server="" datastore="" username="" - - while IFS= read -r line; do - if [[ $line =~ ^pbs:\ (.+)$ ]]; then - if [[ -n "$current_pbs" && -n "$server" && -n "$datastore" && -n "$username" ]]; then - PBS_CONFIGS+=("$current_pbs|$username@$server:$datastore") - PBS_SOURCES+=("proxmox|$current_pbs") - fi - current_pbs="${BASH_REMATCH[1]}" - server="" datastore="" username="" - elif [[ -n "$current_pbs" ]]; then - if [[ $line =~ ^[[:space:]]*server[[:space:]]+(.+)$ ]]; then - server="${BASH_REMATCH[1]}" - elif [[ $line =~ ^[[:space:]]*datastore[[:space:]]+(.+)$ ]]; then - datastore="${BASH_REMATCH[1]}" - elif [[ $line =~ ^[[:space:]]*username[[:space:]]+(.+)$ ]]; then - username="${BASH_REMATCH[1]}" - elif [[ $line =~ ^[a-zA-Z]+: ]]; then - if [[ -n "$server" && -n "$datastore" && -n "$username" ]]; then - PBS_CONFIGS+=("$current_pbs|$username@$server:$datastore") - PBS_SOURCES+=("proxmox|$current_pbs") - fi - current_pbs="" - fi - fi - done < "/etc/pve/storage.cfg" - - - if [[ -n "$current_pbs" && -n "$server" && -n "$datastore" && -n "$username" ]]; then - PBS_CONFIGS+=("$current_pbs|$username@$server:$datastore") - PBS_SOURCES+=("proxmox|$current_pbs") - fi - fi - - - if [[ -f "$PBS_MANUAL_CONFIGS" ]]; then - while IFS= read -r line; do - if [[ -n "$line" ]]; then - PBS_CONFIGS+=("$line") - local name="${line%%|*}" - PBS_SOURCES+=("manual|$name") - fi - done < "$PBS_MANUAL_CONFIGS" - fi - - - local menu_options=() - local i=1 - - - for j in "${!PBS_CONFIGS[@]}"; do - local config="${PBS_CONFIGS[$j]}" - local source="${PBS_SOURCES[$j]}" - local name="${config%%|*}" - local repo="${config##*|}" - local source_type="${source%%|*}" - - - if [[ "$source_type" == "proxmox" ]]; then - menu_options+=("$i" " $name ($repo) [Proxmox]") - else - menu_options+=("$i" " $name ($repo) [Manual]") - fi - ((i++)) - done - - - menu_options+=("" "") - menu_options+=("$i" "\Z4\Zb $(translate 'Configure new PBS')\Zn") - local choice - choice=$(dialog --colors --backtitle "ProxMenux" --title "PBS Server Selection" \ - --menu "\n$(translate 'Select PBS server for this backup:')" 22 70 12 "${menu_options[@]}" 3>&1 1>&2 2>&3) - local dialog_result=$? - clear - - - if [[ $dialog_result -ne 0 ]]; then + if [[ ${#snapshots[@]} -eq 0 ]]; then + msg_error "$(translate "No host snapshots found in this PBS repository.")" return 1 fi - - if [[ $choice -eq $i ]]; then - configure_pbs_manually + local menu=() i=1 + for snapshot in "${snapshots[@]}"; do menu+=("$i" "$snapshot"); ((i++)); done + local sel + sel=$(dialog --backtitle "ProxMenux" \ + --title "$(translate "Select snapshot to restore")" \ + --menu "\n$(translate "Available host snapshots:")" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" "${menu[@]}" 3>&1 1>&2 2>&3) || return 1 + snapshot="${snapshots[$((sel-1))]}" + + mapfile -t archives < <( + PBS_PASSWORD="$HB_PBS_SECRET" \ + proxmox-backup-client snapshot files "$snapshot" \ + --repository "$HB_PBS_REPOSITORY" 2>/dev/null \ + | awk '{print $1}' | grep '\.pxar$' || true + ) + if [[ ${#archives[@]} -eq 0 ]]; then + msg_error "$(translate "No .pxar archives found in selected snapshot.")" + return 1 + fi + + if printf '%s\n' "${archives[@]}" | grep -qx "hostcfg.pxar"; then + archive="hostcfg.pxar" else - - local selected_config="${PBS_CONFIGS[$((choice-1))]}" - local selected_source="${PBS_SOURCES[$((choice-1))]}" - local pbs_name="${selected_config%%|*}" - local source_type="${selected_source%%|*}" - PBS_REPO="${selected_config##*|}" - - - { - mkdir -p "$(dirname "$PBS_REPO_FILE")" - echo "$PBS_REPO" > "$PBS_REPO_FILE" - } >/dev/null 2>&1 - - - local password_found=false - if [[ "$source_type" == "proxmox" ]]; then - - local password_file="/etc/pve/priv/storage/${pbs_name}.pw" - if [[ -f "$password_file" ]]; then - { - cp "$password_file" "$PBS_PASS_FILE" - chmod 600 "$PBS_PASS_FILE" - } >/dev/null 2>&1 - password_found=true - - fi - else - - local manual_pass_file="/usr/local/share/proxmenux/pbs-pass-${pbs_name}.txt" - if [[ -f "$manual_pass_file" ]]; then - { - cp "$manual_pass_file" "$PBS_PASS_FILE" - chmod 600 "$PBS_PASS_FILE" - } >/dev/null 2>&1 - password_found=true - dialog --backtitle "ProxMenux" --title "PBS Selected" --msgbox "$(translate 'Using manual PBS:') $pbs_name\n\n$(translate 'Repository:') $PBS_REPO\n$(translate 'Password:') $(translate 'Previously saved')" 12 80 - fi - fi - - - if ! $password_found; then - dialog --backtitle "ProxMenux" --title "Password Required" --msgbox "$(translate 'Password not found for:') $pbs_name\n$(translate 'Please enter the password.')" 10 60 - get_pbs_password "$pbs_name" - fi - - clear - fi -} - - -configure_pbs_manually() { - local PBS_REPO_FILE="/usr/local/share/proxmenux/pbs-repo.conf" - local PBS_MANUAL_CONFIGS="/usr/local/share/proxmenux/pbs-manual-configs.txt" - - - local PBS_NAME - PBS_NAME=$(dialog --backtitle "ProxMenux" --title "New PBS Configuration" --inputbox "$(translate 'Enter a name for this PBS configuration:')" 10 60 "PBS-$(date +%m%d)" 3>&1 1>&2 2>&3) || return 1 - - PBS_USER=$(dialog --backtitle "ProxMenux" --title "New PBS Configuration" --inputbox "$(translate 'Enter PBS username:')" 10 50 "root@pam" 3>&1 1>&2 2>&3) || return 1 - PBS_HOST=$(dialog --backtitle "ProxMenux" --title "New PBS Configuration" --inputbox "$(translate 'Enter PBS host or IP:')" 10 50 "" 3>&1 1>&2 2>&3) || return 1 - PBS_DATASTORE=$(dialog --backtitle "ProxMenux" --title "New PBS Configuration" --inputbox "$(translate 'Enter PBS datastore name:')" 10 50 "" 3>&1 1>&2 2>&3) || return 1 - - - if [[ -z "$PBS_NAME" || -z "$PBS_USER" || -z "$PBS_HOST" || -z "$PBS_DATASTORE" ]]; then - dialog --backtitle "ProxMenux" --title "Error" --msgbox "$(translate 'All fields are required!')" 8 40 - return 1 - fi - - PBS_REPO="${PBS_USER}@${PBS_HOST}:${PBS_DATASTORE}" - - - { - mkdir -p "$(dirname "$PBS_REPO_FILE")" - echo "$PBS_REPO" > "$PBS_REPO_FILE" - } >/dev/null 2>&1 - - - local config_line="$PBS_NAME|$PBS_REPO" - if ! grep -Fxq "$config_line" "$PBS_MANUAL_CONFIGS" 2>/dev/null; then - echo "$config_line" >> "$PBS_MANUAL_CONFIGS" - fi - - - get_pbs_password "$PBS_NAME" - - dialog --backtitle "ProxMenux" --title "Success" --msgbox "$(translate 'PBS configuration saved:') $PBS_NAME\n\n$(translate 'Repository:') $PBS_REPO\n\n$(translate 'This configuration will appear in future backups.')" 12 80 -} - - -get_pbs_password() { - local PBS_NAME="$1" - local PBS_PASS_FILE="/usr/local/share/proxmenux/pbs-pass.txt" - local PBS_MANUAL_PASS_FILE="/usr/local/share/proxmenux/pbs-pass-${PBS_NAME}.txt" - - while true; do - PBS_REPO_PASS=$(dialog --backtitle "ProxMenux" --title "PBS Password" --insecure --passwordbox "$(translate 'Enter PBS repository password for:') $PBS_NAME" 10 70 "" 3>&1 1>&2 2>&3) || return 1 - PBS_REPO_PASS2=$(dialog --backtitle "ProxMenux" --title "PBS Password" --insecure --passwordbox "$(translate 'Confirm PBS repository password:')" 10 60 "" 3>&1 1>&2 2>&3) || return 1 - - if [[ "$PBS_REPO_PASS" == "$PBS_REPO_PASS2" ]]; then - break - else - dialog --backtitle "ProxMenux" --title "Error" --msgbox "$(translate 'Repository passwords do not match! Please try again.')" 8 50 - fi - done - - - { - echo "$PBS_REPO_PASS" > "$PBS_PASS_FILE" - chmod 600 "$PBS_PASS_FILE" - } >/dev/null 2>&1 - - - { - echo "$PBS_REPO_PASS" > "$PBS_MANUAL_PASS_FILE" - chmod 600 "$PBS_MANUAL_PASS_FILE" - } >/dev/null 2>&1 -} - -# =============================== - - - - - - - - -# ========== PBS BACKUP ========== -backup_full_pbs_root() { - local HOSTNAME PBS_REPO PBS_KEY_FILE PBS_PASS_FILE PBS_ENCRYPTION_PASS_FILE ENCRYPT_OPT="" - HOSTNAME=$(hostname) - - - local PBS_REPO_FILE="/usr/local/share/proxmenux/pbs-repo.conf" - PBS_KEY_FILE="/usr/local/share/proxmenux/pbs-key.conf" - PBS_PASS_FILE="/usr/local/share/proxmenux/pbs-pass.txt" - PBS_ENCRYPTION_PASS_FILE="/usr/local/share/proxmenux/pbs-encryption-pass.txt" - LOGFILE="/tmp/pbs-backup-${HOSTNAME}.log" - - - configure_pbs_repository - if [[ ! -f "$PBS_REPO_FILE" ]]; then - msg_error "$(translate "Failed to configure PBS connection")" - sleep 3 - return 1 - fi - PBS_REPO=$(<"$PBS_REPO_FILE") - - - if [[ ! -f "$PBS_PASS_FILE" ]]; then - msg_error "$(translate "PBS password not configured")" - sleep 3 - return 1 + menu=(); i=1 + for archive in "${archives[@]}"; do menu+=("$i" "$archive"); ((i++)); done + sel=$(dialog --backtitle "ProxMenux" \ + --title "$(translate "Select archive")" \ + --menu "\n$(translate "Available archives:")" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \ + "${menu[@]}" 3>&1 1>&2 2>&3) || return 1 + archive="${archives[$((sel-1))]}" fi - - dialog --backtitle "ProxMenux" --title "Encryption" --yesno "$(translate 'Do you want to encrypt the backup?')" 8 60 - if [[ $? -eq 0 ]]; then - - if [[ ! -f "$PBS_ENCRYPTION_PASS_FILE" ]]; then - while true; do - PBS_KEY_PASS=$(dialog --backtitle "ProxMenux" --title "Encryption Password" --insecure --passwordbox "$(translate 'Enter encryption password (different from PBS login):')" 12 70 "" 3>&1 1>&2 2>&3) || return 1 - PBS_KEY_PASS2=$(dialog --backtitle "ProxMenux" --title "Encryption Password" --insecure --passwordbox "$(translate 'Confirm encryption password:')" 10 60 "" 3>&1 1>&2 2>&3) || return 1 - - if [[ "$PBS_KEY_PASS" == "$PBS_KEY_PASS2" ]]; then - break - else - dialog --backtitle "ProxMenux" --title "Error" --msgbox "$(translate 'Passwords do not match! Please try again.')" 8 50 - fi - done - - - { - echo "$PBS_KEY_PASS" > "$PBS_ENCRYPTION_PASS_FILE" - chmod 600 "$PBS_ENCRYPTION_PASS_FILE" - } >/dev/null 2>&1 - - dialog --backtitle "ProxMenux" --title "Success" --msgbox "$(translate 'Encryption password saved successfully!')" 8 50 - fi - - - if [[ ! -f "$PBS_KEY_FILE" ]]; then - PBS_ENCRYPTION_PASS=$(<"$PBS_ENCRYPTION_PASS_FILE") - - dialog --backtitle "ProxMenux" --title "Encryption" --infobox "$(translate 'Creating encryption key...')" 5 50 - - expect -c " - set timeout 30 - spawn proxmox-backup-client key create \"$PBS_KEY_FILE\" - expect { - \"Encryption Key Password:\" { - send \"$PBS_ENCRYPTION_PASS\r\" - exp_continue - } - \"Verify Password:\" { - send \"$PBS_ENCRYPTION_PASS\r\" - exp_continue - } - eof - } - " >/dev/null 2>&1 - - if [[ ! -f "$PBS_KEY_FILE" ]]; then - dialog --backtitle "ProxMenux" --title "Error" --msgbox "$(translate 'Error creating encryption key.')" 8 40 - return 1 - fi - - dialog --backtitle "ProxMenux" --title "Important" --msgbox "$(translate 'IMPORTANT: Save the key file. Without it you will not be able to restore your backups!')\n\n$(translate 'Key file location:') $PBS_KEY_FILE" 12 70 - fi - ENCRYPT_OPT="--keyfile $PBS_KEY_FILE" - else - ENCRYPT_OPT="" - fi - - - clear show_proxmenux_logo - echo -e - msg_info2 "$(translate "Starting backup to PBS")" - echo -e - echo -e "${BL}$(translate "PBS Repository:")${WHITE} $PBS_REPO${RESET}" - echo -e "${BL}$(translate "Backup ID:")${WHITE} $HOSTNAME${RESET}" - echo -e "${BL}$(translate "Included:")${WHITE} /boot/efi /etc/pve (all root)${RESET}" - echo -e "${BL}$(translate "Encryption:")${WHITE} $([[ -n "$ENCRYPT_OPT" ]] && echo "Enabled" || echo "Disabled")${RESET}" - echo -e "${BL}$(translate "Log file:")${WHITE} $LOGFILE${RESET}" - echo -e "${BOLD}${NEON_PURPLE_BLUE}-------------------------------${RESET}" - echo "" + msg_title "$(translate "Restore from PBS → staging")" + echo -e "" + echo -e "${TAB}${BGN}$(translate "Repository:")${CL} ${BL}${HB_PBS_REPOSITORY}${CL}" + echo -e "${TAB}${BGN}$(translate "Snapshot:")${CL} ${BL}${snapshot}${CL}" + echo -e "${TAB}${BGN}$(translate "Archive:")${CL} ${BL}${archive}${CL}" + echo -e "${TAB}${BGN}$(translate "Staging directory:")${CL} ${BL}${staging_root}${CL}" + echo -e "" + msg_info "$(translate "Extracting data from PBS...")" + stop_spinner + local key_opt="" enc_pass="" + [[ -f "$HB_STATE_DIR/pbs-key.conf" ]] && key_opt="--keyfile $HB_STATE_DIR/pbs-key.conf" + [[ -f "$HB_STATE_DIR/pbs-encryption-pass.txt" ]] && \ + enc_pass="$(<"$HB_STATE_DIR/pbs-encryption-pass.txt")" - PBS_REPO_PASS=$(<"$PBS_PASS_FILE") - - if [[ -n "$ENCRYPT_OPT" ]]; then - - PBS_ENCRYPTION_PASS=$(<"$PBS_ENCRYPTION_PASS_FILE") - echo "$(translate "Starting encrypted full backup...")" - echo "" - - expect -c " - set timeout 3600 - log_file $LOGFILE - spawn proxmox-backup-client backup \ - --include-dev /boot/efi \ - --include-dev /etc/pve \ - root-${HOSTNAME}.pxar:/ \ - --repository \"$PBS_REPO\" \ - $ENCRYPT_OPT \ - --backup-type host \ - --backup-id \"$HOSTNAME\" \ - --backup-time \"$(date +%s)\" - expect { - -re \"Password for .*:\" { - send \"$PBS_REPO_PASS\r\" - exp_continue - } - \"Encryption Key Password:\" { - send \"$PBS_ENCRYPTION_PASS\r\" - exp_continue - } - eof - } - " | tee -a "$LOGFILE" + : > "$log_file" + # shellcheck disable=SC2086 + if env \ + PBS_PASSWORD="$HB_PBS_SECRET" \ + PBS_ENCRYPTION_PASSWORD="${enc_pass}" \ + proxmox-backup-client restore \ + "$snapshot" "$archive" "$staging_root" \ + --repository "$HB_PBS_REPOSITORY" \ + --allow-existing-dirs true \ + $key_opt \ + 2>&1 | tee -a "$log_file"; then + msg_ok "$(translate "Extraction completed.")" + return 0 else - - echo "$(translate "Starting unencrypted full backup...")" - echo "" - - expect -c " - set timeout 3600 - log_file $LOGFILE - spawn proxmox-backup-client backup \ - --include-dev /boot/efi \ - --include-dev /etc/pve \ - root-${HOSTNAME}.pxar:/ \ - --repository \"$PBS_REPO\" \ - --backup-type host \ - --backup-id \"$HOSTNAME\" \ - --backup-time \"$(date +%s)\" - expect { - -re \"Password for .*:\" { - send \"$PBS_REPO_PASS\r\" - exp_continue - } - eof - } - " | tee -a "$LOGFILE" + msg_error "$(translate "PBS extraction failed.")" + hb_show_log "$log_file" "$(translate "PBS restore error log")" + return 1 fi - local backup_result=$? - - echo -e "${BOLD}${NEON_PURPLE_BLUE}===============================${RESET}\n" - if [[ $backup_result -eq 0 ]]; then - msg_ok "$(translate "Full backup process completed successfully")" - else - msg_error "$(translate "Backup process finished with errors")" - fi - - echo "" - msg_success "$(translate "Press Enter to return to the main menu...")" - read -r } +_rs_extract_borg() { + local staging_root="$1" + local borg_bin repo log_file + log_file="/tmp/proxmenux-borg-restore-$(date +%Y%m%d_%H%M%S).log" + local -a archives=() + local archive + borg_bin=$(hb_ensure_borg) || return 1 + hb_select_borg_repo repo || return 1 - -backup_to_pbs() { - local HOSTNAME TIMESTAMP SNAPSHOT - HOSTNAME=$(hostname) - TIMESTAMP=$(date +%Y-%m-%d_%H-%M) - SNAPSHOT="${HOSTNAME}-${TIMESTAMP}" - - local PBS_REPO_FILE="/usr/local/share/proxmenux/pbs-repo.conf" - local PBS_KEY_FILE="/usr/local/share/proxmenux/pbs-key.conf" - local PBS_PASS_FILE="/usr/local/share/proxmenux/pbs-pass.txt" - local PBS_ENCRYPTION_PASS_FILE="/usr/local/share/proxmenux/pbs-encryption-pass.txt" - local PBS_REPO ENCRYPT_OPT USE_ENCRYPTION - local PBS_KEY_PASS PBS_REPO_PASS - - - configure_pbs_repository - PBS_REPO=$(<"$PBS_REPO_FILE") - - - USE_ENCRYPTION=false - dialog --backtitle "ProxMenux" --yesno "$(translate 'Do you want to encrypt the backup?')" 8 60 - [[ $? -eq 0 ]] && USE_ENCRYPTION=true - - - if $USE_ENCRYPTION && ! command -v expect >/dev/null 2>&1; then - apt-get update -qq >/dev/null 2>&1 - apt-get install -y expect >/dev/null 2>&1 + local pass_file="$HB_STATE_DIR/borg-pass.txt" + if [[ -f "$pass_file" ]]; then + BORG_PASSPHRASE="$(<"$pass_file")" + export BORG_PASSPHRASE + else + BORG_PASSPHRASE=$(dialog --backtitle "ProxMenux" --insecure --passwordbox \ + "$(hb_translate "Borg passphrase (leave empty if not encrypted):")" \ + "$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 1 + export BORG_PASSPHRASE fi - if [[ "$#" -lt 1 ]]; then - clear - show_proxmenux_logo - msg_error "$(translate "No directories specified for backup.")" - sleep 2 + mapfile -t archives < <( + "$borg_bin" list "$repo" --format '{archive}{NL}' 2>/dev/null | sort -r + ) + if [[ ${#archives[@]} -eq 0 ]]; then + msg_error "$(translate "No archives found in this Borg repository.")" return 1 fi - local TOTAL="$#" - local COUNT=1 + local menu=() i=1 + for archive in "${archives[@]}"; do menu+=("$i" "$archive"); ((i++)); done + local sel + sel=$(dialog --backtitle "ProxMenux" \ + --title "$(translate "Select archive to restore")" \ + --menu "\n$(translate "Available Borg archives:")" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \ + "${menu[@]}" 3>&1 1>&2 2>&3) || return 1 + archive="${archives[$((sel-1))]}" - for dir in "$@"; do - local SAFE_NAME SAFE_ID PXAR_NAME - SAFE_NAME=$(basename "$dir" | tr '.-/' '_') - PXAR_NAME="root-custom-${SAFE_NAME}-${SNAPSHOT}.pxar" - SAFE_ID="custom-${HOSTNAME}-${SAFE_NAME}" + show_proxmenux_logo + msg_title "$(translate "Restore from Borg → staging")" + echo -e "" + echo -e "${TAB}${BGN}$(translate "Repository:")${CL} ${BL}${repo}${CL}" + echo -e "${TAB}${BGN}$(translate "Archive:")${CL} ${BL}${archive}${CL}" + echo -e "${TAB}${BGN}$(translate "Staging directory:")${CL} ${BL}${staging_root}${CL}" + echo -e "" + msg_info "$(translate "Extracting data from Borg...")" + stop_spinner - msg_info2 "$(translate "[$COUNT/$TOTAL] Backing up") $dir $(translate "as") $PXAR_NAME" + : > "$log_file" + if (cd "$staging_root" && "$borg_bin" extract --progress \ + "$repo::$archive" 2>&1 | tee -a "$log_file"); then + msg_ok "$(translate "Extraction completed.")" + return 0 + else + msg_error "$(translate "Borg extraction failed.")" + hb_show_log "$log_file" "$(translate "Borg restore error log")" + return 1 + fi +} - ENCRYPT_OPT="" +_rs_extract_local() { + local staging_root="$1" + local log_file + log_file="/tmp/proxmenux-local-restore-$(date +%Y%m%d_%H%M%S).log" + local source_dir archive + hb_require_cmd tar tar || return 1 + source_dir=$(hb_prompt_restore_source_dir) || return 1 + archive=$(hb_prompt_local_archive "$source_dir" \ + "$(translate "Select backup archive to restore")") || return 1 - if $USE_ENCRYPTION; then - if [[ -f "$PBS_KEY_FILE" ]]; then - ENCRYPT_OPT="--keyfile $PBS_KEY_FILE" + show_proxmenux_logo + msg_title "$(translate "Restore from local archive → staging")" + echo -e "" + echo -e "${TAB}${BGN}$(translate "Archive:")${CL} ${BL}${archive}${CL}" + echo -e "${TAB}${BGN}$(translate "Archive size:")${CL} ${BL}$(hb_file_size "$archive")${CL}" + echo -e "${TAB}${BGN}$(translate "Staging directory:")${CL} ${BL}${staging_root}${CL}" + echo -e "" + msg_info "$(translate "Extracting archive...")" + stop_spinner + + : > "$log_file" + if [[ "$archive" == *.zst ]]; then + tar --zstd -xf "$archive" -C "$staging_root" >>"$log_file" 2>&1 + else + tar -xf "$archive" -C "$staging_root" >>"$log_file" 2>&1 + fi + local rc=$? + + if [[ $rc -eq 0 ]]; then + msg_ok "$(translate "Extraction completed.")" + return 0 + else + msg_error "$(translate "Extraction failed.")" + hb_show_log "$log_file" "$(translate "Local restore error log")" + return 1 + fi +} + +# Ensure staging has rootfs/ layout (Borg may nest) +_rs_check_layout() { + local staging_root="$1" + + # Case 1: new format — rootfs/ already present + [[ -d "$staging_root/rootfs" ]] && return 0 + + # Case 2: nested format (old Borg archives may include absolute tmp paths) + local -a rootfs_hits=() + mapfile -t rootfs_hits < <(find "$staging_root" -mindepth 2 -maxdepth 6 -type d -name rootfs 2>/dev/null) + if [[ ${#rootfs_hits[@]} -gt 1 ]]; then + dialog --backtitle "ProxMenux" \ + --title "$(translate "Incompatible archive")" \ + --msgbox "$(translate "Multiple rootfs directories were found in this archive. Restore cannot continue automatically.")" \ + 9 76 || true + return 1 + fi + if [[ ${#rootfs_hits[@]} -eq 1 ]]; then + local rootfs_dir nested + rootfs_dir="${rootfs_hits[0]}" + nested="$(dirname "$rootfs_dir")" + mv "$rootfs_dir" "$staging_root/rootfs" + if [[ -d "$nested/metadata" ]]; then + mv "$nested/metadata" "$staging_root/metadata" + fi + mkdir -p "$staging_root/metadata" + return 0 + fi + + # Case 3: flat format — config dirs extracted directly at staging root + # (archives created by older scripts that didn't use staging layout) + if [[ -d "$staging_root/etc" || -d "$staging_root/var" || \ + -d "$staging_root/root" || -d "$staging_root/usr" ]]; then + local tmp + tmp=$(mktemp -d "$staging_root/.rootfs_wrap.XXXXXX") + local item + for item in "$staging_root"/*/; do + [[ "$item" == "$tmp/" ]] && continue + mv "$item" "$tmp/" 2>/dev/null || true + done + find "$staging_root" -maxdepth 1 -type f -exec mv {} "$tmp/" \; 2>/dev/null || true + mv "$tmp" "$staging_root/rootfs" + mkdir -p "$staging_root/metadata" + return 0 + fi + + local incompatible_msg + incompatible_msg="$(translate "This archive does not contain a recognized backup layout.")"$'\n\n'"$(translate "Expected: rootfs/ directory, or /etc /var /root at archive root.")"$'\n'"$(translate "Use 'Export to file' to save it and inspect manually.")" + dialog --backtitle "ProxMenux" \ + --title "$(translate "Incompatible archive")" \ + --msgbox "$incompatible_msg" 12 72 || true + return 1 +} + +# ========================================================== +# RESTORE — REVIEW & APPLY +# ========================================================== +_rs_show_metadata() { + local staging_root="$1" + local meta="$staging_root/metadata" + local tmp + tmp=$(mktemp) || return 1 + trap 'rm -f "$tmp"; trap - INT TERM; kill -s INT "$$"' INT TERM + { + echo "═══ $(hb_translate "Backup information") ═══" + echo "" + if [[ -f "$meta/run_info.env" ]]; then + while IFS='=' read -r k v; do + printf " %-20s %s\n" "$k:" "$v" + done < "$meta/run_info.env" + fi + echo "" + echo "═══ $(hb_translate "Paths included in backup") ═══" + if [[ -f "$meta/selected_paths.txt" ]]; then + sed 's/^/ \//' "$meta/selected_paths.txt" + fi + echo "" + if [[ -f "$meta/missing_paths.txt" && -s "$meta/missing_paths.txt" ]]; then + echo "═══ $(hb_translate "Paths not found at backup time") ═══" + sed 's/^/ /' "$meta/missing_paths.txt" + echo "" + fi + if [[ -f "$meta/pveversion.txt" ]]; then + echo "═══ Proxmox version ═══" + cat "$meta/pveversion.txt" + echo "" + fi + if [[ -f "$meta/lsblk.txt" ]]; then + echo "═══ Disk layout (lsblk -f) ═══" + cat "$meta/lsblk.txt" + echo "" + fi + } > "$tmp" + dialog --backtitle "ProxMenux" --exit-label "OK" \ + --title "$(translate "Backup metadata")" \ + --textbox "$tmp" 28 110 || true + rm -f "$tmp" + trap - INT TERM +} + +_rs_preview_diff() { + local staging_root="$1" + local -a paths=() + hb_load_restore_paths "$staging_root" paths + local tmp + tmp=$(mktemp) || return 1 + trap 'rm -f "$tmp"; trap - INT TERM; kill -s INT "$$"' INT TERM + { + echo "$(hb_translate "Diff: current system vs backup (--- system +++ backup)")" + echo "" + local rel src dst + for rel in "${paths[@]}"; do + src="$staging_root/rootfs/$rel" + dst="/$rel" + [[ -e "$src" ]] || continue + echo "══════ /$rel ══════" + if [[ -d "$src" ]]; then + diff -qr "$dst" "$src" 2>/dev/null || true else - - while true; do - PBS_KEY_PASS=$(dialog --backtitle "ProxMenux" --insecure --passwordbox "$(translate 'Enter encryption password (different from PBS login):')" 10 60 "" 3>&1 1>&2 2>&3) || return 1 - PBS_KEY_PASS2=$(dialog --backtitle "ProxMenux" --insecure --passwordbox "$(translate 'Confirm encryption password:')" 10 60 "" 3>&1 1>&2 2>&3) || return 1 - - if [[ "$PBS_KEY_PASS" == "$PBS_KEY_PASS2" ]]; then - break - else - dialog --backtitle "ProxMenux" --msgbox "$(translate 'Passwords do not match! Please try again.')" 8 50 - fi - done - - - { - echo "$PBS_KEY_PASS" > "$PBS_ENCRYPTION_PASS_FILE" - chmod 600 "$PBS_ENCRYPTION_PASS_FILE" - } >/dev/null 2>&1 - - - expect -c " - set timeout 30 - spawn proxmox-backup-client key create \"$PBS_KEY_FILE\" - expect { - \"Encryption Key Password:\" { - send \"$PBS_KEY_PASS\r\" - exp_continue - } - \"Verify Password:\" { - send \"$PBS_KEY_PASS\r\" - exp_continue - } - eof - } - " >/dev/null 2>&1 - - if [[ ! -f "$PBS_KEY_FILE" ]]; then - dialog --backtitle "ProxMenux" --msgbox "$(translate 'Error creating encryption key.')" 8 40 - return 1 - fi - ENCRYPT_OPT="--keyfile $PBS_KEY_FILE" - dialog --backtitle "ProxMenux" --msgbox "$(translate 'Encryption key generated. Save it in a safe place!')" 10 60 + diff -u "$dst" "$src" 2>/dev/null || true fi - fi - - clear - show_proxmenux_logo - echo -e - msg_info2 "$(translate "Starting backup to PBS")" - TOTAL_SIZE=$(du -cb "$@" | awk '/total$/ {print $1}') - TOTAL_SIZE_GB=$(awk "BEGIN {printf \"%.2f\", $TOTAL_SIZE/1024/1024/1024}") - echo -e - echo -e "${BL}$(translate "PBS Repository:")${WHITE} $PBS_REPO${RESET}" - echo -e "${BL}$(translate "Backup ID:")${WHITE} $HOSTNAME${RESET}" - echo -e "${BL}$(translate "Encryption:")${WHITE} $([[ -n "$ENCRYPT_OPT" ]] && echo "Enabled" || echo "Disabled")${RESET}" - echo -e "${BL}$(translate "Included directories:")${WHITE} $*${RESET}" - echo -e "${BL}$(translate "Total size:")${WHITE} ${TOTAL_SIZE_GB} GB${RESET}" - echo -e "${BOLD}${NEON_PURPLE_BLUE}-------------------------------${RESET}" - - PBS_REPO_PASS=$(<"$PBS_PASS_FILE") - - if $USE_ENCRYPTION && [[ -f "$PBS_ENCRYPTION_PASS_FILE" ]]; then - PBS_KEY_PASS=$(<"$PBS_ENCRYPTION_PASS_FILE") - expect -c " - set timeout 300 - spawn proxmox-backup-client backup \"${PXAR_NAME}:$dir\" --repository \"$PBS_REPO\" $ENCRYPT_OPT --backup-type host --backup-id \"$SAFE_ID\" --backup-time \"$(date +%s)\" - expect { - -re \"Password for .*:\" { - send \"$PBS_REPO_PASS\r\" - exp_continue - } - \"Encryption Key Password:\" { - send \"$PBS_KEY_PASS\r\" - exp_continue - } - eof - } - " - else - - expect -c " - set timeout 300 - spawn proxmox-backup-client backup \"${PXAR_NAME}:$dir\" --repository \"$PBS_REPO\" $ENCRYPT_OPT --backup-type host --backup-id \"$SAFE_ID\" --backup-time \"$(date +%s)\" - expect { - -re \"Password for .*:\" { - send \"$PBS_REPO_PASS\r\" - exp_continue - } - eof - } - " - fi - - COUNT=$((COUNT+1)) - done - - echo -e "${BOLD}${NEON_PURPLE_BLUE}===============================${RESET}\n" - msg_ok "$(translate "Backup process finished.")" - echo "" - msg_success "$(translate "Press Enter to return to the main menu...")" - read -r - - + echo "" + done + } > "$tmp" + dialog --backtitle "ProxMenux" --exit-label "OK" \ + --title "$(translate "Preview: changes that would be applied")" \ + --textbox "$tmp" 28 130 || true + rm -f "$tmp" + trap - INT TERM } -# =============================== +_rs_export_to_file() { + local staging_root="$1" + local dest_dir archive archive_size t_start elapsed + dest_dir=$(hb_prompt_dest_dir) || return 1 + archive="$dest_dir/hostcfg-export-$(hostname)-$(date +%Y%m%d_%H%M%S).tar.gz" -# ========== BORGBACKUP ========== -backup_with_borg() { -# local SRC="$1" - local BORG_APPIMAGE="/usr/local/share/proxmenux/borg" - local LOGFILE="/tmp/borg-backup.log" - local DEST - local TYPE - local ENCRYPT_OPT="" - local BORG_KEY - - if [[ ! -x "$BORG_APPIMAGE" ]]; then - clear - show_proxmenux_logo - msg_info "$(translate "BorgBackup not found. Downloading AppImage...")" - mkdir -p /usr/local/share/proxmenux - wget -qO "$BORG_APPIMAGE" "https://github.com/borgbackup/borg/releases/download/1.2.8/borg-linux64" - chmod +x "$BORG_APPIMAGE" - msg_ok "$(translate "BorgBackup downloaded and ready.")" - fi - - - TYPE=$(dialog --backtitle "ProxMenux" --menu "$(translate 'Select Borg backup destination:')" 15 60 3 \ - "local" "$(translate 'Local directory')" \ - "usb" "$(translate 'Internal/External dedicated disk')" \ - "remote" "$(translate 'Remote server')" \ - 3>&1 1>&2 2>&3) || return 1 - - if [[ "$TYPE" == "local" ]]; then - DEST=$(dialog --backtitle "ProxMenux" --inputbox "$(translate 'Enter local directory for backup:')" 10 60 "/backup/borgbackup" 3>&1 1>&2 2>&3) || return 1 - mkdir -p "$DEST" - elif [[ "$TYPE" == "usb" ]]; then - - while true; do - BASE_DEST=$(get_external_backup_mount_point) - if [[ -z "$BASE_DEST" ]]; then - dialog --backtitle "ProxMenux" --yesno "$(translate 'No external disk detected or mounted. Would you like to retry?')" 8 60 - [[ $? -eq 0 ]] && continue - return 1 - fi - - DEST="$BASE_DEST/borgbackup" - mkdir -p "$DEST" - - DISK_DEV=$(df "$BASE_DEST" | awk 'NR==2{print $1}') - PKNAME=$(lsblk -no PKNAME "$DISK_DEV" 2>/dev/null) - [[ -z "$PKNAME" ]] && PKNAME=$(basename "$DISK_DEV" | sed 's/[0-9]*$//') - if [[ -n "$PKNAME" && -b /dev/$PKNAME ]]; then - DISK_MODEL=$(lsblk -no MODEL "/dev/$PKNAME") - else - DISK_MODEL="(unknown)" - fi - FREE_SPACE=$(df -h "$BASE_DEST" | awk 'NR==2{print $4}') - - dialog --backtitle "ProxMenux" \ - --title "$(translate "Dedicated Backup Disk")" \ - --yesno "\n$(translate "Mount point:") $DEST\n\n\ - $(translate "Disk model:") $DISK_MODEL\n\ - $(translate "Available space:") $FREE_SPACE\n\n\ - $(translate "Use this disk for backup?")" 12 70 - - if [[ $? -eq 0 ]]; then - break - else - return 1 - fi - done - - - elif [[ "$TYPE" == "remote" ]]; then - REMOTE_USER=$(dialog --backtitle "ProxMenux" --inputbox "$(translate 'Enter SSH user for remote:')" 10 60 "root" 3>&1 1>&2 2>&3) || return 1 - REMOTE_HOST=$(dialog --backtitle "ProxMenux" --inputbox "$(translate 'Enter SSH host:')" 10 60 "" 3>&1 1>&2 2>&3) || return 1 - REMOTE_PATH=$(dialog --backtitle "ProxMenux" --inputbox "$(translate 'Enter remote path:')" 10 60 "/backup/borgbackup" 3>&1 1>&2 2>&3) || return 1 - DEST="ssh://$REMOTE_USER@$REMOTE_HOST:$REMOTE_PATH" - fi - - - dialog --backtitle "ProxMenux" --yesno "$(translate 'Do you want to encrypt the backup?')" 8 60 - if [[ $? -eq 0 ]]; then - BORG_KEY=$(dialog --backtitle "ProxMenux" --inputbox "$(translate 'Enter Borg encryption passphrase (will be saved):')" 10 60 "" 3>&1 1>&2 2>&3) || return 1 - ENCRYPT_OPT="--encryption=repokey" - export BORG_PASSPHRASE="$BORG_KEY" - else - ENCRYPT_OPT="--encryption=none" - fi - - if [[ "$TYPE" == "local" || "$TYPE" == "usb" ]]; then - if [[ ! -f "$DEST/config" ]]; then - "$BORG_APPIMAGE" init $ENCRYPT_OPT "$DEST" - if [[ $? -ne 0 ]]; then - clear - show_proxmenux_logo - msg_error "$(translate "Failed to initialize Borg repo at") $DEST" - sleep 5 - return 1 - fi - fi - fi - - - dialog --backtitle "ProxMenux" --msgbox "$(translate 'Borg backup will start now. This may take a while.')" 8 40 - - clear show_proxmenux_logo - msg_info2 "$(translate "Starting backup with BorgBackup...")" - echo -e + msg_title "$(translate "Export backup data to file")" + echo -e "" + echo -e "${TAB}${BGN}$(translate "Staging source:")${CL} ${BL}${staging_root}${CL}" + echo -e "${TAB}${BGN}$(translate "Output archive:")${CL} ${BL}${archive}${CL}" + echo -e "" + echo -e "${TAB}$(translate "No changes will be made to the running system.")" + echo -e "" + msg_info "$(translate "Creating export archive...")" + stop_spinner - TOTAL_SIZE=$(du -cb "$@" | awk '/total$/ {print $1}') - TOTAL_SIZE_GB=$(awk "BEGIN {printf \"%.2f\", $TOTAL_SIZE/1024/1024/1024}") - - echo -e "${BL}$(translate "Included directories:")${WHITE} $*${RESET}" - echo -e "${BL}$(translate "Total size:")${WHITE} ${TOTAL_SIZE_GB} GB${RESET}" - - - # 6. Lanzar el backup y guardar log -# "$BORG_APPIMAGE" create --progress "$DEST"::"root-$(hostname)-$(date +%Y%m%d_%H%M)" $SRC 2>&1 | tee "$LOGFILE" - - "$BORG_APPIMAGE" create --progress "$DEST"::"root-$(hostname)-$(date +%Y%m%d_%H%M)" "$@" 2>&1 | tee "$LOGFILE" - - echo -e "${BOLD}${NEON_PURPLE_BLUE}===============================${RESET}\n" - msg_ok "$(translate "Backup process finished.")" - echo - msg_success "$(translate "Press Enter to return to the main menu...")" - read -r + t_start=$SECONDS + if tar -czf "$archive" -C "$staging_root" . 2>/dev/null; then + elapsed=$((SECONDS - t_start)) + archive_size=$(hb_file_size "$archive") + echo -e "" + echo -e "${TAB}${BOLD}$(translate "Export completed:")${CL}" + echo -e "${TAB}${BGN}$(translate "Archive:")${CL} ${BL}${archive}${CL}" + echo -e "${TAB}${BGN}$(translate "Archive size:")${CL} ${BL}${archive_size}${CL}" + echo -e "${TAB}${BGN}$(translate "Duration:")${CL} ${BL}$(hb_human_elapsed "$elapsed")${CL}" + echo -e "" + msg_ok "$(translate "Export completed. The running system has not been modified.")" + else + msg_error "$(translate "Export failed.")" + return 1 + fi } -# =============================== +_rs_warn_dangerous() { + local staging_root="$1" + local -a paths=() + hb_load_restore_paths "$staging_root" paths + local -a warnings=() + local rel + for rel in "${paths[@]}"; do + local cls warn + cls=$(hb_classify_path "$rel") + if [[ "$cls" == "dangerous" ]]; then + warn=$(hb_path_warning "$rel") + [[ -n "$warn" ]] && warnings+=("/$rel") + fi + done + [[ ${#warnings[@]} -eq 0 ]] && return 0 + local tmp; tmp=$(mktemp) + { + echo "$(hb_translate "WARNING — This backup contains paths that are risky to restore on a running system:")" + echo "" + for w in "${warnings[@]}"; do + echo " ⚠ $w" + local detail; detail=$(hb_path_warning "${w#/}") + [[ -n "$detail" ]] && echo " $detail" + echo "" + done + echo "$(hb_translate "Recommendation: use 'Export to file' for these paths and apply manually during a maintenance window.")" + } > "$tmp" + dialog --backtitle "ProxMenux" \ + --title "$(translate "Security Warning — read before applying")" \ + --exit-label "$(translate "I have read this")" \ + --textbox "$tmp" 24 92 || true + rm -f "$tmp" +} +_rs_is_ssh_session() { + [[ -n "${SSH_CONNECTION:-}" || -n "${SSH_CLIENT:-}" || -n "${SSH_TTY:-}" ]] +} -# ========== LOCAL TAR ========== -backup_to_local_tar() { -# local SRC="$1" - local TYPE - local DEST - local LOGFILE="/tmp/tar-backup.log" +_rs_paths_include_network() { + local rel + for rel in "$@"; do + [[ "$rel" == etc/network || "$rel" == etc/network/* || "$rel" == etc/resolv.conf ]] && return 0 + done + return 1 +} +_rs_write_cluster_recovery_helper() { + local recovery_root="$1" + local helper="${recovery_root}/apply-cluster-restore.sh" + cat > "$helper" </dev/null; then - apt-get update -qq && apt-get install -y pv >/dev/null 2>&1 +RECOVERY_ROOT="${recovery_root}" + +echo "Cluster recovery helper" +echo "Source: \$RECOVERY_ROOT" +echo +echo "WARNING: run this only in a maintenance window." +echo "This script stops pve-cluster, copies extracted cluster data, and starts pve-cluster again." +echo +read -r -p "Type YES to continue: " ans +[[ "\$ans" == "YES" ]] || { echo "Aborted."; exit 1; } + +systemctl stop pve-cluster || true + +if [[ -d "\$RECOVERY_ROOT/etc/pve" ]]; then + mkdir -p /etc/pve + cp -a "\$RECOVERY_ROOT/etc/pve/." /etc/pve/ || true fi +if [[ -d "\$RECOVERY_ROOT/var/lib/pve-cluster" ]]; then + mkdir -p /var/lib/pve-cluster + cp -a "\$RECOVERY_ROOT/var/lib/pve-cluster/." /var/lib/pve-cluster/ || true +fi +systemctl start pve-cluster || true +echo "Cluster recovery script finished." +EOF + chmod +x "$helper" 2>/dev/null || true +} - TYPE=$(dialog --backtitle "ProxMenux" --menu "$(translate 'Select backup destination:')" 15 60 2 \ - "local" "$(translate 'Local directory')" \ - "usb" "$(translate 'Internal/External dedicated disk')" \ - 3>&1 1>&2 2>&3) || return 1 - - if [[ "$TYPE" == "local" ]]; then - DEST=$(dialog --backtitle "ProxMenux" --inputbox "$(translate 'Enter directory for backup:')" 10 60 "/backup" 3>&1 1>&2 2>&3) || return 1 - - mkdir -p "$DEST" - - -else - - -while true; do - DEST=$(get_external_backup_mount_point) - if [[ -z "$DEST" ]]; then - dialog --backtitle "ProxMenux" --yesno "No external disk detected or mounted. Would you like to retry?" 8 60 - [[ $? -eq 0 ]] && continue - return 1 - fi - - DISK_DEV=$(df "$DEST" | awk 'NR==2{print $1}') - PKNAME=$(lsblk -no PKNAME "$DISK_DEV" 2>/dev/null) - [[ -z "$PKNAME" ]] && PKNAME=$(basename "$DISK_DEV" | sed 's/[0-9]*$//') - if [[ -n "$PKNAME" && -b /dev/$PKNAME ]]; then - DISK_MODEL=$(lsblk -no MODEL "/dev/$PKNAME") +_rs_apply() { + local staging_root="$1" + local group="$2" # hot | reboot | all + shift 2 + local -a paths=() + if [[ $# -gt 0 ]]; then + paths=("$@") else - DISK_MODEL="(unknown)" + hb_load_restore_paths "$staging_root" paths fi - FREE_SPACE=$(df -h "$DEST" | awk 'NR==2{print $4}') + local backup_root + backup_root="/root/proxmenux-pre-restore/$(date +%Y%m%d_%H%M%S)" + mkdir -p "$backup_root" + local applied=0 skipped=0 t_start elapsed + local cluster_recovery_root="" CLUSTER_DATA_EXTRACTED="" + t_start=$SECONDS + + local rel src dst cls + for rel in "${paths[@]}"; do + src="$staging_root/rootfs/$rel" + dst="/$rel" + [[ -e "$src" ]] || { ((skipped++)); continue; } + + # Never restore cluster virtual filesystem data live. + # Extract it for manual recovery in maintenance mode. + if [[ "$rel" == etc/pve* ]] || [[ "$rel" == var/lib/pve-cluster* ]]; then + if [[ -z "$cluster_recovery_root" ]]; then + cluster_recovery_root="/root/proxmenux-recovery/$(date +%Y%m%d_%H%M%S)" + mkdir -p "$cluster_recovery_root" + fi + mkdir -p "$cluster_recovery_root/$(dirname "$rel")" + cp -a "$src" "$cluster_recovery_root/$rel" 2>/dev/null || true + CLUSTER_DATA_EXTRACTED="$cluster_recovery_root" + ((skipped++)) + continue + fi + + cls=$(hb_classify_path "$rel") + case "$group" in + hot) [[ "$cls" != "hot" ]] && { ((skipped++)); continue; } ;; + reboot) [[ "$cls" != "reboot" ]] && { ((skipped++)); continue; } ;; + all) ;; # apply everything + esac + + # /etc/zfs: opt-in only + if [[ "$rel" == "etc/zfs" || "$rel" == "etc/zfs/"* ]]; then + [[ "${HB_RESTORE_INCLUDE_ZFS:-0}" != "1" ]] && { ((skipped++)); continue; } + fi + + # Save current before overwriting + if [[ -e "$dst" ]]; then + mkdir -p "$backup_root/$(dirname "$rel")" + cp -a "$dst" "$backup_root/$rel" 2>/dev/null || true + fi + + # Apply + if [[ -d "$src" ]]; then + mkdir -p "$dst" + rsync -aAXH --delete "$src/" "$dst/" 2>/dev/null && ((applied++)) || ((skipped++)) + else + mkdir -p "$(dirname "$dst")" + cp -a "$src" "$dst" 2>/dev/null && ((applied++)) || ((skipped++)) + fi + done + + elapsed=$((SECONDS - t_start)) + [[ "$group" == "hot" || "$group" == "all" ]] && \ + systemctl daemon-reload >/dev/null 2>&1 || true + + echo -e "" + echo -e "${TAB}${BOLD}$(translate "Restore applied:")${CL}" + echo -e "${TAB}${BGN}$(translate "Group:")${CL} ${BL}${group}${CL}" + echo -e "${TAB}${BGN}$(translate "Paths applied:")${CL} ${BL}${applied}${CL}" + echo -e "${TAB}${BGN}$(translate "Paths skipped:")${CL} ${BL}${skipped}${CL}" + echo -e "${TAB}${BGN}$(translate "Duration:")${CL} ${BL}$(hb_human_elapsed "$elapsed")${CL}" + echo -e "${TAB}${BGN}$(translate "Pre-restore backup:")${CL} ${BL}${backup_root}${CL}" + echo -e "" + + if [[ "$group" == "hot" ]]; then + msg_ok "$(translate "Hot changes applied. No reboot needed for these paths.")" + else + msg_warn "$(translate "Changes applied. A system reboot is recommended for them to take full effect.")" + fi + + if [[ -n "$CLUSTER_DATA_EXTRACTED" ]]; then + export HB_CLUSTER_DATA_EXTRACTED="$CLUSTER_DATA_EXTRACTED" + _rs_write_cluster_recovery_helper "$CLUSTER_DATA_EXTRACTED" + msg_warn "$(translate "Cluster data was extracted for safe manual recovery at:") $CLUSTER_DATA_EXTRACTED" + msg_warn "$(translate "Generated helper script:") $CLUSTER_DATA_EXTRACTED/apply-cluster-restore.sh" + msg_warn "$(translate "Run it only in a maintenance window.")" + else + unset HB_CLUSTER_DATA_EXTRACTED + fi +} + +_rs_collect_plan_stats() { + local staging_root="$1" + local -a paths=() + hb_load_restore_paths "$staging_root" paths + + RS_PLAN_TOTAL=0 + RS_PLAN_HOT=0 + RS_PLAN_REBOOT=0 + RS_PLAN_DANGEROUS=0 + RS_PLAN_HAS_CLUSTER=0 + RS_PLAN_HAS_NETWORK=0 + RS_PLAN_HAS_ZFS=0 + + local rel cls + RS_PLAN_TOTAL=${#paths[@]} + for rel in "${paths[@]}"; do + cls=$(hb_classify_path "$rel") + case "$cls" in + hot) ((RS_PLAN_HOT++)) ;; + reboot) ((RS_PLAN_REBOOT++)) ;; + dangerous) ((RS_PLAN_DANGEROUS++)) ;; + esac + + [[ "$rel" == etc/network* ]] && RS_PLAN_HAS_NETWORK=1 + [[ "$rel" == etc/pve* || "$rel" == var/lib/pve-cluster* ]] && RS_PLAN_HAS_CLUSTER=1 + [[ "$rel" == etc/zfs* ]] && RS_PLAN_HAS_ZFS=1 + done +} + +_rs_show_plan_summary() { + local staging_root="$1" + local meta="$staging_root/metadata" + local tmp + tmp=$(mktemp) || return 1 + + { + echo "═══ $(translate "Restore plan summary") ═══" + echo "" + if [[ -f "$meta/run_info.env" ]]; then + echo "$(translate "Backup origin metadata:")" + while IFS='=' read -r k v; do + [[ -n "$k" ]] && printf " %-20s %s\n" "${k}:" "$v" + done < "$meta/run_info.env" + echo "" + fi + + echo "$(translate "Detected paths in this backup:") ${RS_PLAN_TOTAL}" + echo " • $(translate "Safe to apply now"): ${RS_PLAN_HOT}" + echo " • $(translate "Require reboot"): ${RS_PLAN_REBOOT}" + echo " • $(translate "Risky on running system"): ${RS_PLAN_DANGEROUS}" + echo "" + + if [[ "$RS_PLAN_HAS_NETWORK" -eq 1 ]]; then + echo " • $(translate "Includes /etc/network (may drop SSH immediately)")" + fi + if [[ "$RS_PLAN_HAS_CLUSTER" -eq 1 ]]; then + echo " • $(translate "Includes cluster data (/etc/pve, /var/lib/pve-cluster)")" + echo " $(translate "These paths will not be restored live and will be extracted for manual recovery.")" + fi + if [[ "$RS_PLAN_HAS_ZFS" -eq 1 ]]; then + if [[ "${HB_RESTORE_INCLUDE_ZFS:-0}" == "1" ]]; then + echo " • $(translate "Includes /etc/zfs: ENABLED for restore")" + else + echo " • $(translate "Includes /etc/zfs: DISABLED unless you enable it")" + fi + fi + echo "" + echo "$(translate "Recommendation: start with Complete restore (guided — recommended).")" + } > "$tmp" dialog --backtitle "ProxMenux" \ - --title "$(translate "Dedicated Backup Disk")" \ - --yesno "\n$(translate "Mount point:") $DEST\n\n\ - $(translate "Disk model:") $DISK_MODEL\n\ - $(translate "Available space:") $FREE_SPACE\n\n\ - $(translate "Use this disk for backup?")" 12 70 + --title "$(translate "Restore plan")" \ + --exit-label "OK" \ + --textbox "$tmp" 24 94 || true + rm -f "$tmp" +} +_rs_prompt_zfs_opt_in() { + local staging_root="$1" + export HB_RESTORE_INCLUDE_ZFS=0 - if [[ $? -eq 0 ]]; then - mkdir -p "$DEST" - break - else + if [[ ! -d "$staging_root/rootfs/etc/zfs" ]]; then + return 0 + fi + + local zfs_confirm_msg + zfs_confirm_msg="$(translate "This backup includes /etc/zfs. Include it in restore?")"$'\n\n'"$(translate "Only enable this if the target host and ZFS pool names match exactly.")" + if whiptail --title "$(translate "ZFS configuration")" \ + --yesno "$zfs_confirm_msg" \ + 11 76; then + export HB_RESTORE_INCLUDE_ZFS=1 + fi +} + +_rs_finish_flow() { + echo -e "" + msg_success "$(translate "Press Enter to return to menu...")" + read -r +} + +_rs_collect_pending_paths() { + local mode="$1" + shift + local -a in_paths=("$@") + local -A seen=() + local -a out=() + local rel cls + + for rel in "${in_paths[@]}"; do + cls=$(hb_classify_path "$rel") + case "$mode" in + remaining_after_hot) + [[ "$cls" == "hot" ]] && continue + ;; + all_selected) + ;; + esac + [[ -z "$rel" || -n "${seen[$rel]}" ]] && continue + seen["$rel"]=1 + out+=("$rel") + done + + printf '%s\n' "${out[@]}" +} + +_rs_install_pending_service_unit() { + local onboot_script="$1" + local unit_file="/etc/systemd/system/proxmenux-restore-onboot.service" + + cat > "$unit_file" < "$pending_dir/apply-on-boot.list" + for rel in "${pending_paths[@]}"; do + src="$staging_root/rootfs/$rel" + [[ -e "$src" ]] || continue + dst="$pending_dir/rootfs/$rel" + mkdir -p "$(dirname "$dst")" + if [[ -d "$src" ]]; then + mkdir -p "$dst" + rsync -aAXH --delete "$src/" "$dst/" 2>/dev/null || true + else + cp -a "$src" "$dst" 2>/dev/null || true + fi + echo "$rel" >> "$pending_dir/apply-on-boot.list" + done -TAR_INPUT="" -TOTAL_SIZE=0 -for src in $SRC; do - sz=$(du -sb "$src" 2>/dev/null | awk '{print $1}') - TOTAL_SIZE=$((TOTAL_SIZE + sz)) - TAR_INPUT="$TAR_INPUT $src" -done + if [[ ! -s "$pending_dir/apply-on-boot.list" ]]; then + rm -rf "$pending_dir" + msg_warn "$(translate "Nothing to schedule for reboot from selected paths.")" + return 1 + fi -local FILENAME="root-$(hostname)-$(date +%Y%m%d_%H%M).tar.gz" -clear -show_proxmenux_logo -msg_info2 "$(translate "Starting backup with tar...")" -echo -e + [[ -d "$staging_root/metadata" ]] && cp -a "$staging_root/metadata/." "$pending_dir/metadata/" 2>/dev/null || true + cat > "$pending_dir/plan.env" < "$pending_dir/state" -TOTAL_SIZE=$(du -cb "$@" | awk '/total$/ {print $1}') -TOTAL_SIZE_GB=$(awk "BEGIN {printf \"%.2f\", $TOTAL_SIZE/1024/1024/1024}") + ln -sfn "$pending_dir" "$pending_base/current" -echo -e "${BL}$(translate "Included directories:")${WHITE} $*${RESET}" -echo -e "${BL}$(translate "Total size:")${WHITE} ${TOTAL_SIZE_GB} GB${RESET}" - -tar -cf - "$@" 2> >(grep -v "Removing leading \`/'" >&2) \ -| pv -s "$TOTAL_SIZE" \ -| gzip > "$DEST/$FILENAME" - - -echo -ne "\033[1A\r\033[K" - -echo -e "${BOLD}${NEON_PURPLE_BLUE}===============================${RESET}\n" -msg_ok "$(translate "Backup process finished. Review log above or in /tmp/tar-backup.log")" -echo -msg_success "$(translate "Press Enter to return to the main menu...")" -read -r + _rs_install_pending_service_unit "$onboot_script" + systemctl daemon-reload >/dev/null 2>&1 || true + if ! systemctl enable proxmenux-restore-onboot.service >/dev/null 2>&1; then + msg_error "$(translate "Could not enable on-boot restore service.")" + return 1 + fi + echo -e "" + echo -e "${TAB}${BGN}$(translate "Pending restore ID:")${CL} ${BL}${restore_id}${CL}" + echo -e "${TAB}${BGN}$(translate "Pending restore dir:")${CL} ${BL}${pending_dir}${CL}" + msg_ok "$(translate "Pending restore prepared. It will run automatically at next boot.")" + return 0 } -# =============================== +_rs_handle_ssh_network_risk() { + local staging_root="$1" + shift + local -a selected_paths=("$@") -host_backup_menu \ No newline at end of file + _rs_is_ssh_session || return 0 + _rs_paths_include_network "${selected_paths[@]}" || return 0 + + local schedule_msg + schedule_msg="$(translate "You are connected via SSH and selected network-related restore paths.")"$'\n\n'"$(translate "Recommended: schedule these paths for next boot to avoid immediate SSH disconnection.")"$'\n\n'"$(translate "Do you want to schedule selected paths for next boot now?")" + if whiptail --title "$(translate "SSH network risk")" \ + --yesno "$schedule_msg" \ + 12 86; then + local -a pending_paths=() + mapfile -t pending_paths < <(_rs_collect_pending_paths all_selected "${selected_paths[@]}") + show_proxmenux_logo + msg_title "$(translate "Preparing pending restore (network-safe)")" + if _rs_prepare_pending_restore "$staging_root" "${pending_paths[@]}"; then + msg_warn "$(translate "Reboot is required to apply the scheduled restore.")" + fi + _rs_finish_flow + return 2 + fi + + if ! whiptail --title "$(translate "High risk confirmation")" --defaultno \ + --yesno "$(translate "Continue with live apply now? SSH may disconnect immediately.")" \ + 10 80; then + return 1 + fi + return 0 +} + +_rs_run_complete_guided() { + local staging_root="$1" + local -a all_paths=() + hb_load_restore_paths "$staging_root" all_paths + + local choice + choice=$(dialog --backtitle "ProxMenux" \ + --title "$(translate "Complete restore (guided)")" \ + --menu "\n$(translate "Choose strategy:")" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \ + 1 "$(translate "Apply safe + reboot-required now (skip risky live paths)")" \ + 2 "$(translate "Full now: apply all paths (advanced — may drop SSH)")" \ + 3 "$(translate "Apply safe now + schedule remaining for next boot (recommended for SSH)")" \ + 4 "$(translate "Schedule full restore for next boot (no live apply now)")" \ + 0 "$(translate "Return")" \ + 3>&1 1>&2 2>&3) || return 1 + + case "$choice" in + 1) + if ! whiptail --title "$(translate "Confirm guided restore")" \ + --yesno "$(translate "Apply safe + reboot-required restore now?")"$'\n\n'"$(translate "Risky live paths (for example /etc/network) will NOT be applied in this mode.")" \ + 11 78; then + return 1 + fi + + show_proxmenux_logo + msg_title "$(translate "Applying guided complete restore")" + if [[ "$RS_PLAN_HOT" -gt 0 ]]; then + _rs_apply "$staging_root" hot + fi + if [[ "$RS_PLAN_REBOOT" -gt 0 ]]; then + _rs_apply "$staging_root" reboot + fi + if [[ "$RS_PLAN_DANGEROUS" -gt 0 ]]; then + msg_warn "$(translate "Risky live paths were skipped in guided mode. Use Custom restore if you need to apply them.")" + fi + _rs_finish_flow + return 0 + ;; + + 2) + local ssh_network_rc + _rs_handle_ssh_network_risk "$staging_root" "${all_paths[@]}" + ssh_network_rc=$? + [[ $ssh_network_rc -eq 2 ]] && return 0 + [[ $ssh_network_rc -ne 0 ]] && return 1 + + _rs_warn_dangerous "$staging_root" + if ! whiptail --title "$(translate "Final confirmation")" \ + --yesno "$(translate "You are about to apply ALL changes, including risky paths.")"$'\n\n'"$(translate "This may interrupt SSH immediately and a reboot is recommended.")"$'\n\n'"$(translate "Continue?")" \ + 12 80; then + return 1 + fi + + show_proxmenux_logo + msg_title "$(translate "Applying full restore")" + _rs_apply "$staging_root" all + _rs_finish_flow + return 0 + ;; + + 3) + if ! whiptail --title "$(translate "Confirm")" \ + --yesno "$(translate "Apply safe paths now and schedule remaining paths for next boot?")"$'\n\n'"$(translate "This is recommended when connected by SSH.")" \ + 11 80; then + return 1 + fi + + show_proxmenux_logo + msg_title "$(translate "Applying safe paths and preparing pending restore")" + [[ "$RS_PLAN_HOT" -gt 0 ]] && _rs_apply "$staging_root" hot + + local -a pending_paths=() + mapfile -t pending_paths < <(_rs_collect_pending_paths remaining_after_hot "${all_paths[@]}") + if _rs_prepare_pending_restore "$staging_root" "${pending_paths[@]}"; then + msg_warn "$(translate "Reboot is required to complete the pending restore.")" + fi + _rs_finish_flow + return 0 + ;; + + 4) + if ! whiptail --title "$(translate "Confirm")" \ + --yesno "$(translate "Schedule full restore for next boot without applying live changes now?")" \ + 10 80; then + return 1 + fi + + local -a pending_paths=() + mapfile -t pending_paths < <(_rs_collect_pending_paths all_selected "${all_paths[@]}") + show_proxmenux_logo + msg_title "$(translate "Preparing full pending restore")" + if _rs_prepare_pending_restore "$staging_root" "${pending_paths[@]}"; then + msg_warn "$(translate "Reboot is required to apply the scheduled restore.")" + fi + _rs_finish_flow + return 0 + ;; + esac + + return 1 +} + +_rs_component_paths() { + local comp_id="$1" + case "$comp_id" in + network) printf '%s\n' etc/network etc/resolv.conf ;; + ssh_access) printf '%s\n' etc/ssh root/.ssh ;; + host_identity) printf '%s\n' etc/hostname etc/hosts ;; + cron_jobs) printf '%s\n' etc/cron.d etc/cron.daily etc/cron.hourly etc/cron.weekly etc/cron.monthly etc/cron.allow etc/cron.deny var/spool/cron/crontabs ;; + apt_repos) printf '%s\n' etc/apt ;; + kernel_boot) printf '%s\n' etc/modules etc/modules-load.d etc/modprobe.d etc/default/grub etc/kernel etc/udev/rules.d etc/fstab etc/iscsi etc/multipath ;; + systemd_custom) printf '%s\n' etc/systemd/system ;; + scripts) printf '%s\n' usr/local/bin usr/local/share/proxmenux root/bin root/scripts ;; + root_config) printf '%s\n' root/.bashrc root/.profile root/.bash_aliases root/.config ;; + root_ssh) printf '%s\n' root/.ssh ;; + zfs_cfg) printf '%s\n' etc/zfs ;; + postfix_cfg) printf '%s\n' etc/postfix ;; + cluster_cfg) printf '%s\n' etc/pve var/lib/pve-cluster ;; + esac +} + +_rs_component_label() { + local comp_id="$1" + case "$comp_id" in + network) echo "$(translate "Network (interfaces, DNS)")" ;; + ssh_access) echo "$(translate "SSH access (host + root)")" ;; + host_identity) echo "$(translate "Host identity (hostname, hosts)")" ;; + cron_jobs) echo "$(translate "Scheduled tasks (cron)")" ;; + apt_repos) echo "$(translate "APT and repositories")" ;; + kernel_boot) echo "$(translate "Kernel, modules and boot config")" ;; + systemd_custom) echo "$(translate "Custom systemd units")" ;; + scripts) echo "$(translate "Custom scripts and ProxMenux files")" ;; + root_config) echo "$(translate "Root shell/profile config")" ;; + root_ssh) echo "$(translate "Root SSH keys/config")" ;; + zfs_cfg) echo "$(translate "ZFS configuration")" ;; + postfix_cfg) echo "$(translate "Postfix configuration")" ;; + cluster_cfg) echo "$(translate "Cluster configuration (advanced)")" ;; + *) echo "$comp_id" ;; + esac +} + +_rs_component_is_available() { + local staging_root="$1" + local comp_id="$2" + local rel + while IFS= read -r rel; do + [[ -n "$rel" && -e "$staging_root/rootfs/$rel" ]] && return 0 + done < <(_rs_component_paths "$comp_id") + return 1 +} + +_rs_unique_paths() { + local __out_var="$1" + shift + local -A seen=() + local -a uniq=() + local p + for p in "$@"; do + [[ -z "$p" || -n "${seen[$p]}" ]] && continue + seen["$p"]=1 + uniq+=("$p") + done + local -n __out_ref="$__out_var" + __out_ref=("${uniq[@]}") +} + +_rs_collect_stats_for_paths() { + RS_SEL_TOTAL=0 + RS_SEL_HOT=0 + RS_SEL_REBOOT=0 + RS_SEL_DANGEROUS=0 + + local rel cls + RS_SEL_TOTAL=$# + for rel in "$@"; do + cls=$(hb_classify_path "$rel") + case "$cls" in + hot) ((RS_SEL_HOT++)) ;; + reboot) ((RS_SEL_REBOOT++)) ;; + dangerous) ((RS_SEL_DANGEROUS++)) ;; + esac + done +} + +_rs_warn_dangerous_paths() { + local -a selected_paths=("$@") + local -a warnings=() + local rel + for rel in "${selected_paths[@]}"; do + [[ "$(hb_classify_path "$rel")" == "dangerous" ]] && warnings+=("$rel") + done + [[ ${#warnings[@]} -eq 0 ]] && return 0 + + local tmp + tmp=$(mktemp) || return 0 + { + echo "$(translate "WARNING — You selected risky paths for live restore:")" + echo "" + for rel in "${warnings[@]}"; do + echo " ⚠ /$rel" + local detail + detail=$(hb_path_warning "$rel") + [[ -n "$detail" ]] && echo " $detail" + echo "" + done + } > "$tmp" + + dialog --backtitle "ProxMenux" \ + --title "$(translate "Security Warning — read before applying")" \ + --exit-label "$(translate "I have read this")" \ + --textbox "$tmp" 24 92 || true + rm -f "$tmp" +} + +_rs_select_component_paths() { + local staging_root="$1" + local __out_var="$2" + local -n __out_ref="$__out_var" + + local -a component_ids=( + network ssh_access host_identity cron_jobs apt_repos kernel_boot + systemd_custom scripts root_config root_ssh zfs_cfg postfix_cfg cluster_cfg + ) + local -a checklist=() + local comp_id + for comp_id in "${component_ids[@]}"; do + _rs_component_is_available "$staging_root" "$comp_id" || continue + checklist+=("$comp_id" "$(_rs_component_label "$comp_id")" "off") + done + + if [[ ${#checklist[@]} -eq 0 ]]; then + dialog --backtitle "ProxMenux" --title "$(translate "No components available")" \ + --msgbox "$(translate "No restorable components were detected in this backup.")" 8 68 + return 1 + fi + + local selected + selected=$(dialog --backtitle "ProxMenux" --separate-output \ + --title "$(translate "Custom restore by components")" \ + --checklist "\n$(translate "Select components to restore:")" \ + 24 94 14 "${checklist[@]}" 3>&1 1>&2 2>&3) || return 1 + + if [[ -z "$selected" ]]; then + dialog --backtitle "ProxMenux" --title "$(translate "No components selected")" \ + --msgbox "$(translate "Select at least one component to continue.")" 8 66 + return 1 + fi + + local -a selected_paths=() + while IFS= read -r comp_id; do + [[ -z "$comp_id" ]] && continue + local rel + while IFS= read -r rel; do + [[ -n "$rel" && -e "$staging_root/rootfs/$rel" ]] && selected_paths+=("$rel") + done < <(_rs_component_paths "$comp_id") + done <<< "$selected" + + _rs_unique_paths "$__out_var" "${selected_paths[@]}" + + if [[ ${#__out_ref[@]} -eq 0 ]]; then + dialog --backtitle "ProxMenux" --title "$(translate "No paths available")" \ + --msgbox "$(translate "Selected components have no matching paths in this backup.")" 8 72 + return 1 + fi + return 0 +} + +_rs_run_custom_restore() { + local staging_root="$1" + local -a selected_paths=() + + _rs_select_component_paths "$staging_root" selected_paths || return 1 + _rs_collect_stats_for_paths "${selected_paths[@]}" + + while true; do + local choice + choice=$(dialog --backtitle "ProxMenux" \ + --title "$(translate "Custom restore")" \ + --menu "\n$(translate "Selected component paths:") ${RS_SEL_TOTAL}" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \ + 1 "$(translate "Apply safe changes now") (${RS_SEL_HOT})" \ + 2 "$(translate "Apply safe + reboot-required") ($((RS_SEL_HOT + RS_SEL_REBOOT)))" \ + 3 "$(translate "Apply all selected now (advanced)") (${RS_SEL_TOTAL})" \ + 4 "$(translate "Reselect components")" \ + 5 "$(translate "Apply safe now + schedule remaining for next boot")" \ + 6 "$(translate "Schedule selected components for next boot (no live apply)")" \ + 0 "$(translate "Return")" \ + 3>&1 1>&2 2>&3) || return 1 + + case "$choice" in + 1) + if [[ "$RS_SEL_HOT" -eq 0 ]]; then + dialog --backtitle "ProxMenux" --title "$(translate "Nothing to apply")" \ + --msgbox "$(translate "No safe-now paths in selected components.")" 8 60 + continue + fi + if ! whiptail --title "$(translate "Confirm")" \ + --yesno "$(translate "Apply safe changes from selected components now?")" 9 72; then + continue + fi + show_proxmenux_logo + msg_title "$(translate "Applying selected safe changes")" + _rs_apply "$staging_root" hot "${selected_paths[@]}" + [[ "$RS_SEL_REBOOT" -gt 0 || "$RS_SEL_DANGEROUS" -gt 0 ]] && \ + msg_warn "$(translate "Some selected paths were not applied in safe mode.")" + _rs_finish_flow + return 0 + ;; + + 2) + if [[ $((RS_SEL_HOT + RS_SEL_REBOOT)) -eq 0 ]]; then + dialog --backtitle "ProxMenux" --title "$(translate "Nothing to apply")" \ + --msgbox "$(translate "No safe/reboot paths in selected components.")" 8 64 + continue + fi + if ! whiptail --title "$(translate "Confirm")" \ + --yesno "$(translate "Apply safe + reboot-required paths from selected components now?")"$'\n\n'"$(translate "Risky live paths will be skipped.")" \ + 11 78; then + continue + fi + show_proxmenux_logo + msg_title "$(translate "Applying selected safe + reboot changes")" + [[ "$RS_SEL_HOT" -gt 0 ]] && _rs_apply "$staging_root" hot "${selected_paths[@]}" + [[ "$RS_SEL_REBOOT" -gt 0 ]] && _rs_apply "$staging_root" reboot "${selected_paths[@]}" + [[ "$RS_SEL_DANGEROUS" -gt 0 ]] && \ + msg_warn "$(translate "Risky selected paths were skipped in this mode.")" + _rs_finish_flow + return 0 + ;; + + 3) + local ssh_network_rc + _rs_handle_ssh_network_risk "$staging_root" "${selected_paths[@]}" + ssh_network_rc=$? + [[ $ssh_network_rc -eq 2 ]] && return 0 + [[ $ssh_network_rc -ne 0 ]] && continue + + [[ "$RS_SEL_DANGEROUS" -gt 0 ]] && _rs_warn_dangerous_paths "${selected_paths[@]}" + if ! whiptail --title "$(translate "Final confirmation")" \ + --yesno "$(translate "Apply ALL selected component paths now? This can include risky paths.")" \ + 10 78; then + continue + fi + show_proxmenux_logo + msg_title "$(translate "Applying all selected component paths")" + _rs_apply "$staging_root" all "${selected_paths[@]}" + _rs_finish_flow + return 0 + ;; + + 4) + _rs_select_component_paths "$staging_root" selected_paths || continue + _rs_collect_stats_for_paths "${selected_paths[@]}" + ;; + + 5) + if ! whiptail --title "$(translate "Confirm")" \ + --yesno "$(translate "Apply safe selected paths now and schedule remaining selected paths for next boot?")" \ + 10 82; then + continue + fi + show_proxmenux_logo + msg_title "$(translate "Applying safe selected paths and preparing pending restore")" + [[ "$RS_SEL_HOT" -gt 0 ]] && _rs_apply "$staging_root" hot "${selected_paths[@]}" + local -a pending_paths=() + mapfile -t pending_paths < <(_rs_collect_pending_paths remaining_after_hot "${selected_paths[@]}") + if _rs_prepare_pending_restore "$staging_root" "${pending_paths[@]}"; then + msg_warn "$(translate "Reboot is required to complete the pending restore.")" + fi + _rs_finish_flow + return 0 + ;; + + 6) + if ! whiptail --title "$(translate "Confirm")" \ + --yesno "$(translate "Schedule selected component paths for next boot without applying live changes now?")" \ + 10 82; then + continue + fi + local -a pending_paths=() + mapfile -t pending_paths < <(_rs_collect_pending_paths all_selected "${selected_paths[@]}") + show_proxmenux_logo + msg_title "$(translate "Preparing selected pending restore")" + if _rs_prepare_pending_restore "$staging_root" "${pending_paths[@]}"; then + msg_warn "$(translate "Reboot is required to apply the scheduled restore.")" + fi + _rs_finish_flow + return 0 + ;; + + 0) + return 1 + ;; + esac + done +} + +_rs_apply_menu() { + local staging_root="$1" + + _rs_collect_plan_stats "$staging_root" + _rs_prompt_zfs_opt_in "$staging_root" + _rs_show_plan_summary "$staging_root" + + while true; do + local choice + choice=$(dialog --backtitle "ProxMenux" \ + --title "$(translate "Restore actions")" \ + --menu "\n$(translate "Choose how to continue:")" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \ + 1 "$(translate "Complete restore (guided — recommended)")" \ + 2 "$(translate "Custom restore by components")" \ + 3 "$(translate "Export to file (no system changes)")" \ + 4 "$(translate "Preview changes (diff)")" \ + 5 "$(translate "View backup metadata")" \ + 6 "$(translate "View restore plan")" \ + 0 "$(translate "Return")" \ + 3>&1 1>&2 2>&3) || return 1 + + case "$choice" in + 1) + _rs_collect_plan_stats "$staging_root" + _rs_run_complete_guided "$staging_root" && return 0 + ;; + 2) + _rs_collect_plan_stats "$staging_root" + _rs_run_custom_restore "$staging_root" && return 0 + ;; + 3) + if _rs_export_to_file "$staging_root"; then + _rs_finish_flow + return 0 + fi + ;; + 4) _rs_preview_diff "$staging_root" ;; + 5) _rs_show_metadata "$staging_root" ;; + 6) + _rs_collect_plan_stats "$staging_root" + _rs_show_plan_summary "$staging_root" + ;; + 0) return 1 ;; + esac + done +} + +# ========================================================== +# RESTORE MENU +# ========================================================== +restore_menu() { + while true; do + local choice + choice=$(dialog --backtitle "ProxMenux" \ + --title "$(translate "Host Config Restore")" \ + --menu "\n$(translate "Select restore source:")" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \ + 1 "$(translate "Restore from Proxmox Backup Server (PBS)")" \ + 2 "$(translate "Restore from Borg repository")" \ + 3 "$(translate "Restore from local archive (.tar.gz / .tar.zst)")" \ + 0 "$(translate "Return")" \ + 3>&1 1>&2 2>&3) || break + [[ "$choice" == "0" ]] && break + + local staging_root + staging_root=$(mktemp -d /tmp/proxmenux-restore.XXXXXX) + + local ok=0 + case "$choice" in + 1) _rs_extract_pbs "$staging_root" && ok=1 ;; + 2) _rs_extract_borg "$staging_root" && ok=1 ;; + 3) _rs_extract_local "$staging_root" && ok=1 ;; + esac + + if [[ $ok -eq 1 ]] && _rs_check_layout "$staging_root"; then + if _rs_apply_menu "$staging_root"; then + rm -rf "$staging_root" + return 0 + fi + fi + + rm -rf "$staging_root" + done +} + +# ========================================================== +# MAIN MENU +# ========================================================== +main_menu() { + while true; do + show_proxmenux_logo + local choice + choice=$(dialog --backtitle "ProxMenux" \ + --title "$(translate "Host Config Backup / Restore")" \ + --menu "\n$(translate "Select operation:")" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \ + 1 "$(translate "Backup host configuration")" \ + 2 "$(translate "Restore host configuration")" \ + 0 "$(translate "Return")" \ + 3>&1 1>&2 2>&3) || break + + case "$choice" in + 1) backup_menu ;; + 2) restore_menu ;; + 0) break ;; + esac + done +} + +main_menu diff --git a/scripts/backup_restore/backup_scheduler.sh b/scripts/backup_restore/backup_scheduler.sh new file mode 100644 index 00000000..26c87d35 --- /dev/null +++ b/scripts/backup_restore/backup_scheduler.sh @@ -0,0 +1,387 @@ +#!/bin/bash +# ========================================================== +# ProxMenux - Scheduled Backup Jobs +# ========================================================== + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +LOCAL_SCRIPTS_LOCAL="$(cd "$SCRIPT_DIR/.." && pwd)" +LOCAL_SCRIPTS_DEFAULT="/usr/local/share/proxmenux/scripts" +LOCAL_SCRIPTS="$LOCAL_SCRIPTS_DEFAULT" +BASE_DIR="/usr/local/share/proxmenux" +UTILS_FILE="$LOCAL_SCRIPTS/utils.sh" + +if [[ -f "$LOCAL_SCRIPTS_LOCAL/utils.sh" ]]; then + LOCAL_SCRIPTS="$LOCAL_SCRIPTS_LOCAL" + UTILS_FILE="$LOCAL_SCRIPTS/utils.sh" +elif [[ ! -f "$UTILS_FILE" ]]; then + UTILS_FILE="$BASE_DIR/utils.sh" +fi + +if [[ -f "$UTILS_FILE" ]]; then + # shellcheck source=/dev/null + source "$UTILS_FILE" +else + echo "ERROR: utils.sh not found." >&2 + exit 1 +fi + +LIB_FILE="$SCRIPT_DIR/lib_host_backup_common.sh" +[[ ! -f "$LIB_FILE" ]] && LIB_FILE="$LOCAL_SCRIPTS_DEFAULT/backup_restore/lib_host_backup_common.sh" +if [[ -f "$LIB_FILE" ]]; then + # shellcheck source=/dev/null + source "$LIB_FILE" +else + msg_error "$(translate "Cannot load backup library: lib_host_backup_common.sh")" + exit 1 +fi + +load_language +initialize_cache + +JOBS_DIR="/var/lib/proxmenux/backup-jobs" +LOG_DIR="/var/log/proxmenux/backup-jobs" +mkdir -p "$JOBS_DIR" "$LOG_DIR" >/dev/null 2>&1 || true + +_job_file() { echo "${JOBS_DIR}/$1.env"; } +_job_paths_file() { echo "${JOBS_DIR}/$1.paths"; } +_service_file() { echo "/etc/systemd/system/proxmenux-backup-$1.service"; } +_timer_file() { echo "/etc/systemd/system/proxmenux-backup-$1.timer"; } + +_normalize_uint() { + local v="${1:-0}" + [[ "$v" =~ ^[0-9]+$ ]] || v=0 + echo "$v" +} + +_write_job_env() { + local file="$1" + shift + { + echo "# ProxMenux scheduled backup job" + local kv key val + for kv in "$@"; do + key="${kv%%=*}" + val="${kv#*=}" + printf '%s=%q\n' "$key" "$val" + done + } > "$file" +} + +_list_jobs() { + local f + for f in "$JOBS_DIR"/*.env; do + [[ -f "$f" ]] || continue + basename "$f" .env + done | sort +} + +_show_job_status() { + local id="$1" + local timer_state="disabled" + local service_state="unknown" + systemctl is-enabled --quiet "proxmenux-backup-${id}.timer" >/dev/null 2>&1 && timer_state="enabled" + service_state=$(systemctl is-active "proxmenux-backup-${id}.service" 2>/dev/null || echo "inactive") + echo "${timer_state}/${service_state}" +} + +_write_job_units() { + local id="$1" + local on_calendar="$2" + local runner="$LOCAL_SCRIPTS/backup_restore/run_scheduled_backup.sh" + [[ ! -f "$runner" ]] && runner="$SCRIPT_DIR/run_scheduled_backup.sh" + + cat > "$(_service_file "$id")" < "$(_timer_file "$id")" </dev/null 2>&1 || true +} + +_prompt_retention() { + local __out_var="$1" + local last hourly daily weekly monthly yearly + last=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \ + --inputbox "$(translate "keep-last (0 disables)")" 9 60 "7" 3>&1 1>&2 2>&3) || return 1 + hourly=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \ + --inputbox "$(translate "keep-hourly (0 disables)")" 9 60 "0" 3>&1 1>&2 2>&3) || return 1 + daily=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \ + --inputbox "$(translate "keep-daily (0 disables)")" 9 60 "7" 3>&1 1>&2 2>&3) || return 1 + weekly=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \ + --inputbox "$(translate "keep-weekly (0 disables)")" 9 60 "4" 3>&1 1>&2 2>&3) || return 1 + monthly=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \ + --inputbox "$(translate "keep-monthly (0 disables)")" 9 60 "3" 3>&1 1>&2 2>&3) || return 1 + yearly=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \ + --inputbox "$(translate "keep-yearly (0 disables)")" 9 60 "0" 3>&1 1>&2 2>&3) || return 1 + + last=$(_normalize_uint "$last") + hourly=$(_normalize_uint "$hourly") + daily=$(_normalize_uint "$daily") + weekly=$(_normalize_uint "$weekly") + monthly=$(_normalize_uint "$monthly") + yearly=$(_normalize_uint "$yearly") + + local -n out="$__out_var" + out=( + "KEEP_LAST=$last" + "KEEP_HOURLY=$hourly" + "KEEP_DAILY=$daily" + "KEEP_WEEKLY=$weekly" + "KEEP_MONTHLY=$monthly" + "KEEP_YEARLY=$yearly" + ) +} + +_create_job() { + local id backend on_calendar profile_mode + id=$(dialog --backtitle "ProxMenux" --title "$(translate "New backup job")" \ + --inputbox "$(translate "Job ID (letters, numbers, - _)")" 9 68 "hostcfg-daily" 3>&1 1>&2 2>&3) || return 1 + [[ -z "$id" ]] && return 1 + id=$(echo "$id" | tr -cs '[:alnum:]_-' '-' | sed 's/^-*//; s/-*$//') + [[ -z "$id" ]] && return 1 + [[ -f "$(_job_file "$id")" ]] && { + dialog --backtitle "ProxMenux" --title "$(translate "Error")" \ + --msgbox "$(translate "A job with this ID already exists.")" 8 62 + return 1 + } + + backend=$(dialog --backtitle "ProxMenux" --title "$(translate "Backend")" \ + --menu "\n$(translate "Select backup backend:")" 14 70 6 \ + "local" "Local archive" \ + "borg" "Borg repository" \ + "pbs" "Proxmox Backup Server" \ + 3>&1 1>&2 2>&3) || return 1 + + on_calendar=$(dialog --backtitle "ProxMenux" --title "$(translate "Schedule")" \ + --inputbox "$(translate "systemd OnCalendar expression")"$'\n'"$(translate "Example: daily or Mon..Fri 03:00")" \ + 11 72 "daily" 3>&1 1>&2 2>&3) || return 1 + [[ -z "$on_calendar" ]] && return 1 + + profile_mode=$(dialog --backtitle "ProxMenux" --title "$(translate "Profile")" \ + --menu "\n$(translate "Select backup profile:")" 12 68 4 \ + "default" "Default critical paths" \ + "custom" "Custom selected paths" \ + 3>&1 1>&2 2>&3) || return 1 + + local -a paths=() + hb_select_profile_paths "$profile_mode" paths || return 1 + + local -a retention=() + _prompt_retention retention || return 1 + + local -a lines=( + "JOB_ID=$id" + "BACKEND=$backend" + "ON_CALENDAR=$on_calendar" + "PROFILE_MODE=$profile_mode" + "ENABLED=1" + ) + lines+=("${retention[@]}") + + case "$backend" in + local) + local dest_dir ext + dest_dir=$(hb_prompt_dest_dir) || return 1 + ext=$(dialog --backtitle "ProxMenux" --title "$(translate "Archive format")" \ + --menu "\n$(translate "Select local archive format:")" 12 62 4 \ + "tar.zst" "tar + zstd (preferred)" \ + "tar.gz" "tar + gzip" \ + 3>&1 1>&2 2>&3) || return 1 + lines+=("LOCAL_DEST_DIR=$dest_dir" "LOCAL_ARCHIVE_EXT=$ext") + ;; + borg) + local repo passphrase + hb_select_borg_repo repo || return 1 + hb_prepare_borg_passphrase || return 1 + passphrase="${BORG_PASSPHRASE:-}" + lines+=( + "BORG_REPO=$repo" + "BORG_PASSPHRASE=$passphrase" + "BORG_ENCRYPT_MODE=${BORG_ENCRYPT_MODE:-none}" + ) + ;; + pbs) + hb_select_pbs_repository || return 1 + hb_ask_pbs_encryption + local bid + bid="hostcfg-$(hostname)" + bid=$(dialog --backtitle "ProxMenux" --title "PBS" \ + --inputbox "$(translate "Backup ID for this job:")" \ + "$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "$bid" 3>&1 1>&2 2>&3) || return 1 + bid=$(echo "$bid" | tr -cs '[:alnum:]_-' '-' | sed 's/-*$//') + lines+=( + "PBS_REPOSITORY=${HB_PBS_REPOSITORY}" + "PBS_PASSWORD=${HB_PBS_SECRET}" + "PBS_BACKUP_ID=${bid}" + "PBS_KEYFILE=${HB_PBS_KEYFILE:-}" + "PBS_ENCRYPTION_PASSWORD=${HB_PBS_ENC_PASS:-}" + ) + ;; + esac + + _write_job_env "$(_job_file "$id")" "${lines[@]}" + + : > "$(_job_paths_file "$id")" + local p + for p in "${paths[@]}"; do + echo "$p" >> "$(_job_paths_file "$id")" + done + + _write_job_units "$id" "$on_calendar" + systemctl enable --now "proxmenux-backup-${id}.timer" >/dev/null 2>&1 || true + + show_proxmenux_logo + msg_title "$(translate "Scheduled backup job created")" + echo -e "" + echo -e "${TAB}${BGN}$(translate "Job ID:")${CL} ${BL}${id}${CL}" + echo -e "${TAB}${BGN}$(translate "Backend:")${CL} ${BL}${backend}${CL}" + echo -e "${TAB}${BGN}$(translate "Schedule:")${CL} ${BL}${on_calendar}${CL}" + echo -e "${TAB}${BGN}$(translate "Status:")${CL} ${BL}$(_show_job_status "$id")${CL}" + echo -e "" + msg_success "$(translate "Press Enter to continue...")" + read -r + return 0 +} + +_pick_job() { + local title="$1" + local __out_var="$2" + + local -a ids=() + mapfile -t ids < <(_list_jobs) + if [[ ${#ids[@]} -eq 0 ]]; then + dialog --backtitle "ProxMenux" --title "$(translate "No jobs")" \ + --msgbox "$(translate "No scheduled backup jobs found.")" 8 62 + return 1 + fi + + local -a menu=() + local i=1 id + for id in "${ids[@]}"; do + menu+=("$i" "$id [$(_show_job_status "$id")]") + ((i++)) + done + local sel + sel=$(dialog --backtitle "ProxMenux" --title "$title" \ + --menu "\n$(translate "Select a job:")" "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \ + "${menu[@]}" 3>&1 1>&2 2>&3) || return 1 + + local picked="${ids[$((sel-1))]}" + local -n out="$__out_var" + out="$picked" + return 0 +} + +_job_run_now() { + local id="" + _pick_job "$(translate "Run job now")" id || return 1 + local runner="$LOCAL_SCRIPTS/backup_restore/run_scheduled_backup.sh" + [[ ! -f "$runner" ]] && runner="$SCRIPT_DIR/run_scheduled_backup.sh" + if "$runner" "$id"; then + msg_ok "$(translate "Job executed successfully.")" + else + msg_warn "$(translate "Job execution finished with errors. Check logs.")" + fi + msg_success "$(translate "Press Enter to continue...")" + read -r +} + +_job_toggle() { + local id="" + _pick_job "$(translate "Enable/Disable job")" id || return 1 + if systemctl is-enabled --quiet "proxmenux-backup-${id}.timer" >/dev/null 2>&1; then + systemctl disable --now "proxmenux-backup-${id}.timer" >/dev/null 2>&1 || true + msg_warn "$(translate "Job timer disabled:") $id" + else + systemctl enable --now "proxmenux-backup-${id}.timer" >/dev/null 2>&1 || true + msg_ok "$(translate "Job timer enabled:") $id" + fi + msg_success "$(translate "Press Enter to continue...")" + read -r +} + +_job_delete() { + local id="" + _pick_job "$(translate "Delete job")" id || return 1 + if ! whiptail --title "$(translate "Confirm delete")" \ + --yesno "$(translate "Delete scheduled backup job?")"$'\n\n'"ID: ${id}" 10 66; then + return 1 + fi + systemctl disable --now "proxmenux-backup-${id}.timer" >/dev/null 2>&1 || true + rm -f "$(_service_file "$id")" "$(_timer_file "$id")" "$(_job_file "$id")" "$(_job_paths_file "$id")" + systemctl daemon-reload >/dev/null 2>&1 || true + msg_ok "$(translate "Job deleted:") $id" + msg_success "$(translate "Press Enter to continue...")" + read -r +} + +_show_jobs() { + local tmp + tmp=$(mktemp) || return + { + echo "=== $(translate "Scheduled backup jobs") ===" + echo "" + local id + while IFS= read -r id; do + [[ -z "$id" ]] && continue + echo "• $id [$(_show_job_status "$id")]" + if [[ -f "${LOG_DIR}/${id}-last.status" ]]; then + sed 's/^/ /' "${LOG_DIR}/${id}-last.status" + fi + echo "" + done < <(_list_jobs) + } > "$tmp" + dialog --backtitle "ProxMenux" --title "$(translate "Scheduled backup jobs")" \ + --textbox "$tmp" 28 100 || true + rm -f "$tmp" +} + +main_menu() { + while true; do + local choice + choice=$(dialog --backtitle "ProxMenux" \ + --title "$(translate "Backup scheduler and retention")" \ + --menu "\n$(translate "Choose action:")" "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \ + 1 "$(translate "Create scheduled backup job")" \ + 2 "$(translate "Show jobs and last run status")" \ + 3 "$(translate "Run a job now")" \ + 4 "$(translate "Enable / disable job timer")" \ + 5 "$(translate "Delete job")" \ + 0 "$(translate "Return")" \ + 3>&1 1>&2 2>&3) || return 0 + + case "$choice" in + 1) _create_job ;; + 2) _show_jobs ;; + 3) _job_run_now ;; + 4) _job_toggle ;; + 5) _job_delete ;; + 0) return 0 ;; + esac + done +} + +main_menu diff --git a/scripts/backup_restore/lib_host_backup_common.sh b/scripts/backup_restore/lib_host_backup_common.sh new file mode 100644 index 00000000..315a62d9 --- /dev/null +++ b/scripts/backup_restore/lib_host_backup_common.sh @@ -0,0 +1,770 @@ +#!/bin/bash +# ========================================================== +# ProxMenux - Host Config Backup/Restore - Shared Library +# ========================================================== +# Author : MacRimi +# Copyright : (c) 2024 MacRimi +# License : MIT +# Version : 1.0 +# Last Updated: 08/04/2026 +# ========================================================== +# Do not execute directly — source from backup_host.sh + +# Library guard +[[ "${BASH_SOURCE[0]}" == "$0" ]] && { + echo "This file is a library. Source it, do not run it directly." >&2; exit 1 +} + +HB_STATE_DIR="/usr/local/share/proxmenux" +HB_BORG_VERSION="1.2.8" +HB_BORG_LINUX64_SHA256="cfa50fb704a93d3a4fa258120966345fddb394f960dca7c47fcb774d0172f40b" +HB_BORG_LINUX64_URL="https://github.com/borgbackup/borg/releases/download/${HB_BORG_VERSION}/borg-linux64" + +# Translation wrapper — safe fallback if translate not yet loaded +hb_translate() { + declare -f translate >/dev/null 2>&1 && translate "$1" || echo "$1" +} + +# ========================================================== +# UI SIZE CONSTANTS +# ========================================================== +HB_UI_MENU_H=22 +HB_UI_MENU_W=84 +HB_UI_MENU_LIST=10 +HB_UI_INPUT_H=10 +HB_UI_INPUT_W=72 +HB_UI_PASS_H=10 +HB_UI_PASS_W=72 +HB_UI_YESNO_H=10 +HB_UI_YESNO_W=78 + +# ========================================================== +# DEFAULT PROFILE PATHS +# ========================================================== +hb_default_profile_paths() { + local paths=( + "/etc/pve" + "/etc/network" + "/etc/hosts" + "/etc/hostname" + "/etc/ssh" + "/etc/systemd/system" + "/etc/modules" + "/etc/modules-load.d" + "/etc/modprobe.d" + "/etc/udev/rules.d" + "/etc/default/grub" + "/etc/fstab" + "/etc/kernel" + "/etc/apt" + "/etc/vzdump.conf" + "/etc/postfix" + "/etc/resolv.conf" + "/etc/timezone" + "/etc/iscsi" + "/etc/multipath" + "/usr/local/bin" + "/usr/local/share/proxmenux" + "/root" + "/etc/cron.d" + "/etc/cron.daily" + "/etc/cron.hourly" + "/etc/cron.weekly" + "/etc/cron.monthly" + "/etc/cron.allow" + "/etc/cron.deny" + "/var/spool/cron/crontabs" + "/var/lib/pve-cluster" + ) + if [[ -d /etc/zfs ]] || command -v zpool >/dev/null 2>&1; then + paths+=("/etc/zfs") + fi + printf '%s\n' "${paths[@]}" +} + +# ========================================================== +# PATH CLASSIFICATION (restore safety) +# Returns: dangerous | reboot | hot +# ========================================================== +hb_classify_path() { + local rel="$1" # without leading / + case "$rel" in + etc/pve|etc/pve/*|\ + var/lib/pve-cluster|var/lib/pve-cluster/*|\ + etc/network|etc/network/*) + echo "dangerous" ;; + etc/modules|etc/modules/*|\ + etc/modules-load.d|etc/modules-load.d/*|\ + etc/modprobe.d|etc/modprobe.d/*|\ + etc/udev/rules.d|etc/udev/rules.d/*|\ + etc/default/grub|\ + etc/fstab|\ + etc/kernel|etc/kernel/*|\ + etc/iscsi|etc/iscsi/*|\ + etc/multipath|etc/multipath/*|\ + etc/zfs|etc/zfs/*) + echo "reboot" ;; + *) + echo "hot" ;; + esac +} + +hb_path_warning() { + local rel="$1" + case "$rel" in + etc/pve|etc/pve/*) + hb_translate "/etc/pve is managed by pmxcfs (cluster filesystem). Applying this on a running node can corrupt cluster state. Use 'Export to file' and apply it manually during a maintenance window." ;; + var/lib/pve-cluster|var/lib/pve-cluster/*) + hb_translate "/var/lib/pve-cluster is live cluster data. Never restore this while the node is running. Use 'Export to file' for manual recovery only." ;; + etc/network|etc/network/*) + hb_translate "/etc/network controls active interfaces. Applying may immediately change or drop network connectivity, including active SSH sessions." ;; + esac +} + +# ========================================================== +# PROFILE PATH SELECTION +# ========================================================== +hb_select_profile_paths() { + local mode="$1" + local __out_var="$2" + local -n __out_ref="$__out_var" + + mapfile -t __defaults < <(hb_default_profile_paths) + + if [[ "$mode" == "default" ]]; then + __out_ref=("${__defaults[@]}") + return 0 + fi + + local options=() idx=1 path + for path in "${__defaults[@]}"; do + options+=("$idx" "$path" "off") + ((idx++)) + done + + local selected + selected=$(dialog --backtitle "ProxMenux" \ + --title "$(hb_translate "Custom backup profile")" \ + --separate-output --checklist \ + "$(hb_translate "Select paths to include:")" \ + 26 86 18 "${options[@]}" 3>&1 1>&2 2>&3) || return 1 + + __out_ref=() + local choice + while read -r choice; do + [[ -z "$choice" ]] && continue + __out_ref+=("${__defaults[$((choice-1))]}") + done <<< "$selected" + + if [[ ${#__out_ref[@]} -eq 0 ]]; then + dialog --backtitle "ProxMenux" --title "$(hb_translate "Error")" \ + --msgbox "$(hb_translate "No paths selected. Select at least one path.")" 8 60 + return 1 + fi +} + +# ========================================================== +# STAGING OPERATIONS +# ========================================================== +hb_prepare_staging() { + local staging_root="$1"; shift + local paths=("$@") + + rm -rf "$staging_root" + mkdir -p "$staging_root/rootfs" "$staging_root/metadata" + + local selected_file="$staging_root/metadata/selected_paths.txt" + local missing_file="$staging_root/metadata/missing_paths.txt" + : > "$selected_file" + : > "$missing_file" + + local p rel target + for p in "${paths[@]}"; do + rel="${p#/}" + echo "$rel" >> "$selected_file" + [[ -e "$p" ]] || { echo "$p" >> "$missing_file"; continue; } + target="$staging_root/rootfs/$rel" + if [[ -d "$p" ]]; then + mkdir -p "$target" + local -a rsync_opts=( + -aAXH --numeric-ids + --exclude "images/" + --exclude "dump/" + --exclude "tmp/" + --exclude "*.log" + ) + + # /root is included by default for easier recovery, but avoid volatile/sensitive noise. + if [[ "$rel" == "root" || "$rel" == "root/"* ]]; then + rsync_opts+=( + --exclude ".bash_history" + --exclude ".cache/" + --exclude "tmp/" + --exclude ".local/share/Trash/" + ) + fi + + # Runtime pending-restore data belongs in /var/lib/proxmenux, never in app code tree. + if [[ "$rel" == "usr/local/share/proxmenux" || "$rel" == "usr/local/share/proxmenux/"* ]]; then + rsync_opts+=( + --exclude "restore-pending/" + ) + fi + + rsync "${rsync_opts[@]}" "$p/" "$target/" 2>/dev/null || true + else + mkdir -p "$(dirname "$target")" + cp -a "$p" "$target" 2>/dev/null || true + fi + done + + # Metadata snapshot + local meta="$staging_root/metadata" + { + echo "generated_at=$(date -Iseconds)" + echo "hostname=$(hostname)" + echo "kernel=$(uname -r)" + } > "$meta/run_info.env" + command -v pveversion >/dev/null 2>&1 && pveversion -v > "$meta/pveversion.txt" 2>&1 || true + command -v lsblk >/dev/null 2>&1 && lsblk -f > "$meta/lsblk.txt" 2>&1 || true + command -v qm >/dev/null 2>&1 && qm list > "$meta/qm-list.txt" 2>&1 || true + command -v pct >/dev/null 2>&1 && pct list > "$meta/pct-list.txt" 2>&1 || true + command -v zpool >/dev/null 2>&1 && zpool status > "$meta/zpool.txt" 2>&1 || true + + # Manifest + checksums + ( + cd "$staging_root/rootfs" || return 1 + find . -mindepth 1 -print | sort > "$meta/manifest.txt" + find . -type f -print0 | sort -z | xargs -0 sha256sum 2>/dev/null \ + > "$meta/checksums.sha256" || true + ) +} + +hb_load_restore_paths() { + local restore_root="$1" + local __out_var="$2" + local -n __out="$__out_var" + + __out=() + local selected="$restore_root/metadata/selected_paths.txt" + if [[ -f "$selected" ]]; then + while IFS= read -r line; do + [[ -n "$line" ]] && __out+=("$line") + done < "$selected" + fi + # Fallback: scan rootfs + if [[ ${#__out[@]} -eq 0 ]]; then + local p + while IFS= read -r p; do + [[ -n "$p" && -e "$restore_root/rootfs/${p#/}" ]] && __out+=("${p#/}") + done < <(hb_default_profile_paths) + fi +} + +# ========================================================== +# PBS CONFIG — auto-detect from storage.cfg + manual +# ========================================================== +hb_collect_pbs_configs() { + HB_PBS_NAMES=() + HB_PBS_REPOS=() + HB_PBS_SECRETS=() + HB_PBS_SOURCES=() + + if [[ -f /etc/pve/storage.cfg ]]; then + local current="" server="" datastore="" username="" pw_file pw_val + while IFS= read -r line; do + line="${line%%#*}" + line="${line#"${line%%[![:space:]]*}"}" + line="${line%"${line##*[![:space:]]}"}" + [[ -z "$line" ]] && continue + if [[ $line =~ ^pbs:[[:space:]]*(.+)$ ]]; then + if [[ -n "$current" && -n "$server" && -n "$datastore" && -n "$username" ]]; then + pw_file="/etc/pve/priv/storage/${current}.pw" + pw_val="$([[ -f "$pw_file" ]] && cat "$pw_file" || echo "")" + HB_PBS_NAMES+=("$current") + HB_PBS_REPOS+=("${username}@${server}:${datastore}") + HB_PBS_SECRETS+=("$pw_val") + HB_PBS_SOURCES+=("proxmox") + fi + current="${BASH_REMATCH[1]}"; server="" datastore="" username="" + elif [[ -n "$current" ]]; then + [[ $line =~ ^[[:space:]]+server[[:space:]]+(.+)$ ]] && server="${BASH_REMATCH[1]}" + [[ $line =~ ^[[:space:]]+datastore[[:space:]]+(.+)$ ]] && datastore="${BASH_REMATCH[1]}" + [[ $line =~ ^[[:space:]]+username[[:space:]]+(.+)$ ]] && username="${BASH_REMATCH[1]}" + if [[ $line =~ ^[a-zA-Z]+:[[:space:]] && + -n "$server" && -n "$datastore" && -n "$username" ]]; then + pw_file="/etc/pve/priv/storage/${current}.pw" + pw_val="$([[ -f "$pw_file" ]] && cat "$pw_file" || echo "")" + HB_PBS_NAMES+=("$current") + HB_PBS_REPOS+=("${username}@${server}:${datastore}") + HB_PBS_SECRETS+=("$pw_val") + HB_PBS_SOURCES+=("proxmox") + current="" server="" datastore="" username="" + fi + fi + done < /etc/pve/storage.cfg + # Last stanza + if [[ -n "$current" && -n "$server" && -n "$datastore" && -n "$username" ]]; then + pw_file="/etc/pve/priv/storage/${current}.pw" + pw_val="$([[ -f "$pw_file" ]] && cat "$pw_file" || echo "")" + HB_PBS_NAMES+=("$current") + HB_PBS_REPOS+=("${username}@${server}:${datastore}") + HB_PBS_SECRETS+=("$pw_val") + HB_PBS_SOURCES+=("proxmox") + fi + fi + + # Manual configs + local manual_cfg="$HB_STATE_DIR/pbs-manual-configs.txt" + if [[ -f "$manual_cfg" ]]; then + local line name repo sf + while IFS= read -r line; do + line="${line%%#*}" + line="${line#"${line%%[![:space:]]*}"}" + line="${line%"${line##*[![:space:]]}"}" + [[ -z "$line" ]] && continue + name="${line%%|*}"; repo="${line##*|}" + sf="$HB_STATE_DIR/pbs-pass-${name}.txt" + HB_PBS_NAMES+=("$name"); HB_PBS_REPOS+=("$repo") + HB_PBS_SECRETS+=("$([[ -f "$sf" ]] && cat "$sf" || echo "")") + HB_PBS_SOURCES+=("manual") + done < "$manual_cfg" + fi +} + +hb_configure_pbs_manual() { + local name user host datastore repo secret + + name=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \ + --inputbox "$(hb_translate "Configuration name:")" \ + "$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "PBS-$(date +%m%d)" 3>&1 1>&2 2>&3) || return 1 + [[ -z "$name" ]] && return 1 + + user=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \ + --inputbox "$(hb_translate "Username (e.g. root@pam or user@pbs!token):")" \ + "$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "root@pam" 3>&1 1>&2 2>&3) || return 1 + + host=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \ + --inputbox "$(hb_translate "PBS host or IP address:")" \ + "$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "" 3>&1 1>&2 2>&3) || return 1 + [[ -z "$host" ]] && return 1 + + datastore=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \ + --inputbox "$(hb_translate "Datastore name:")" \ + "$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "" 3>&1 1>&2 2>&3) || return 1 + [[ -z "$datastore" ]] && return 1 + + secret=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \ + --insecure --passwordbox "$(hb_translate "Password or API token secret:")" \ + "$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 1 + + repo="${user}@${host}:${datastore}" + mkdir -p "$HB_STATE_DIR" + local cfg_line="${name}|${repo}" + local manual_cfg="$HB_STATE_DIR/pbs-manual-configs.txt" + touch "$manual_cfg" + grep -Fxq "$cfg_line" "$manual_cfg" || echo "$cfg_line" >> "$manual_cfg" + printf '%s' "$secret" > "$HB_STATE_DIR/pbs-pass-${name}.txt" + chmod 600 "$HB_STATE_DIR/pbs-pass-${name}.txt" + + HB_PBS_NAME="$name"; HB_PBS_REPOSITORY="$repo"; HB_PBS_SECRET="$secret" +} + +hb_select_pbs_repository() { + hb_collect_pbs_configs + + local menu=() i=1 idx + for idx in "${!HB_PBS_NAMES[@]}"; do + local src="${HB_PBS_SOURCES[$idx]}" + local label="${HB_PBS_NAMES[$idx]} — ${HB_PBS_REPOS[$idx]} [$src]" + [[ -z "${HB_PBS_SECRETS[$idx]}" ]] && label+=" ⚠ $(hb_translate "no password")" + menu+=("$i" "$label"); ((i++)) + done + menu+=("$i" "$(hb_translate "+ Add new PBS manually")") + + local choice + choice=$(dialog --backtitle "ProxMenux" \ + --title "$(hb_translate "Select PBS repository")" \ + --menu "\n$(hb_translate "Available PBS repositories:")" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" "${menu[@]}" 3>&1 1>&2 2>&3) || return 1 + + if [[ "$choice" == "$i" ]]; then + hb_configure_pbs_manual || return 1 + else + local sel=$((choice-1)) + HB_PBS_NAME="${HB_PBS_NAMES[$sel]}" + export HB_PBS_REPOSITORY="${HB_PBS_REPOS[$sel]}" + HB_PBS_SECRET="${HB_PBS_SECRETS[$sel]}" + if [[ -z "$HB_PBS_SECRET" ]]; then + HB_PBS_SECRET=$(dialog --backtitle "ProxMenux" --title "PBS" \ + --insecure --passwordbox \ + "$(hb_translate "Password for:") $HB_PBS_NAME" \ + "$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 1 + mkdir -p "$HB_STATE_DIR" + printf '%s' "$HB_PBS_SECRET" > "$HB_STATE_DIR/pbs-pass-${HB_PBS_NAME}.txt" + chmod 600 "$HB_STATE_DIR/pbs-pass-${HB_PBS_NAME}.txt" + fi + fi +} + +hb_ask_pbs_encryption() { + local key_file="$HB_STATE_DIR/pbs-key.conf" + local enc_pass_file="$HB_STATE_DIR/pbs-encryption-pass.txt" + export HB_PBS_KEYFILE_OPT="" + export HB_PBS_ENC_PASS="" + + dialog --backtitle "ProxMenux" --title "$(hb_translate "Encryption")" \ + --yesno "$(hb_translate "Encrypt this backup with a keyfile?")" \ + "$HB_UI_YESNO_H" "$HB_UI_YESNO_W" || return 0 + + if [[ -f "$key_file" ]]; then + export HB_PBS_KEYFILE_OPT="--keyfile $key_file" + if [[ -f "$enc_pass_file" ]]; then + HB_PBS_ENC_PASS="$(<"$enc_pass_file")" + export HB_PBS_ENC_PASS + fi + msg_ok "$(hb_translate "Using existing encryption key:") $key_file" + return 0 + fi + + # No key — offer to create one + dialog --backtitle "ProxMenux" --title "$(hb_translate "Encryption")" \ + --yesno "$(hb_translate "No encryption key found. Create one now?")" \ + "$HB_UI_YESNO_H" "$HB_UI_YESNO_W" || return 0 + + local pass1 pass2 + while true; do + pass1=$(dialog --backtitle "ProxMenux" --insecure --passwordbox \ + "$(hb_translate "Encryption passphrase (separate from PBS password):")" \ + "$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 0 + pass2=$(dialog --backtitle "ProxMenux" --insecure --passwordbox \ + "$(hb_translate "Confirm encryption passphrase:")" \ + "$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 0 + [[ "$pass1" == "$pass2" ]] && break + dialog --backtitle "ProxMenux" \ + --msgbox "$(hb_translate "Passphrases do not match. Try again.")" 8 50 + done + + msg_info "$(hb_translate "Creating PBS encryption key...")" + if PBS_ENCRYPTION_PASSWORD="$pass1" \ + proxmox-backup-client key create "$key_file" >/dev/null 2>&1; then + printf '%s' "$pass1" > "$enc_pass_file" + chmod 600 "$enc_pass_file" + msg_ok "$(hb_translate "Encryption key created:") $key_file" + HB_PBS_KEYFILE_OPT="--keyfile $key_file" + HB_PBS_ENC_PASS="$pass1" + local key_warn_msg + key_warn_msg="$(hb_translate "IMPORTANT: Back up this key file. Without it the backup cannot be restored.")"$'\n\n'"$(hb_translate "Key:") $key_file" + dialog --backtitle "ProxMenux" --msgbox \ + "$key_warn_msg" \ + 10 74 + else + msg_error "$(hb_translate "Failed to create encryption key. Backup will proceed without encryption.")" + fi +} + +# ========================================================== +# BORG +# ========================================================== +hb_ensure_borg() { + command -v borg >/dev/null 2>&1 && { echo "borg"; return 0; } + local appimage="$HB_STATE_DIR/borg" + local tmp_file + [[ -x "$appimage" ]] && { echo "$appimage"; return 0; } + command -v sha256sum >/dev/null 2>&1 || { + msg_error "$(hb_translate "sha256sum not found. Cannot verify Borg binary.")" + return 1 + } + msg_info "$(hb_translate "Borg not found. Downloading borg") ${HB_BORG_VERSION}..." + mkdir -p "$HB_STATE_DIR" + tmp_file=$(mktemp "$HB_STATE_DIR/.borg-download.XXXXXX") || return 1 + if wget -qO "$tmp_file" "$HB_BORG_LINUX64_URL"; then + if echo "${HB_BORG_LINUX64_SHA256} $tmp_file" | sha256sum -c - >/dev/null 2>&1; then + mv -f "$tmp_file" "$appimage" + else + rm -f "$tmp_file" + msg_error "$(hb_translate "Borg binary checksum verification failed.")" + return 1 + fi + chmod +x "$appimage" + msg_ok "$(hb_translate "Borg ready.")" + echo "$appimage"; return 0 + fi + rm -f "$tmp_file" + msg_error "$(hb_translate "Failed to download Borg.")" + return 1 +} + +hb_borg_init_if_needed() { + local borg_bin="$1" repo="$2" encrypt_mode="$3" + "$borg_bin" list "$repo" >/dev/null 2>&1 && return 0 + if "$borg_bin" help repo-create >/dev/null 2>&1; then + "$borg_bin" repo-create -e "$encrypt_mode" "$repo" + else + "$borg_bin" init --encryption="$encrypt_mode" "$repo" + fi +} + +hb_prepare_borg_passphrase() { + local pass_file="$HB_STATE_DIR/borg-pass.txt" + BORG_ENCRYPT_MODE="none" + unset BORG_PASSPHRASE + + if [[ -f "$pass_file" ]]; then + export BORG_PASSPHRASE + BORG_PASSPHRASE="$(<"$pass_file")" + BORG_ENCRYPT_MODE="repokey" + return 0 + fi + + dialog --backtitle "ProxMenux" --title "$(hb_translate "Borg encryption")" \ + --yesno "$(hb_translate "Encrypt this Borg repository?")" \ + "$HB_UI_YESNO_H" "$HB_UI_YESNO_W" || return 0 + + local pass1 pass2 + while true; do + pass1=$(dialog --backtitle "ProxMenux" --insecure --passwordbox \ + "$(hb_translate "Borg passphrase:")" \ + "$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 1 + pass2=$(dialog --backtitle "ProxMenux" --insecure --passwordbox \ + "$(hb_translate "Confirm Borg passphrase:")" \ + "$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 1 + [[ "$pass1" == "$pass2" ]] && break + dialog --backtitle "ProxMenux" \ + --msgbox "$(hb_translate "Passphrases do not match.")" 8 50 + done + + mkdir -p "$HB_STATE_DIR" + printf '%s' "$pass1" > "$pass_file" + chmod 600 "$pass_file" + export BORG_PASSPHRASE="$pass1" + export BORG_ENCRYPT_MODE="repokey" +} + +hb_select_borg_repo() { + local _borg_repo_var="$1" + local -n _borg_repo_ref="$_borg_repo_var" + local type + + type=$(dialog --backtitle "ProxMenux" \ + --title "$(hb_translate "Borg repository location")" \ + --menu "\n$(hb_translate "Select repository destination:")" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \ + "local" "$(hb_translate 'Local directory')" \ + "usb" "$(hb_translate 'Mounted external disk')" \ + "remote" "$(hb_translate 'Remote server via SSH')" \ + 3>&1 1>&2 2>&3) || return 1 + + unset BORG_RSH + case "$type" in + local) + _borg_repo_ref=$(dialog --backtitle "ProxMenux" \ + --inputbox "$(hb_translate "Borg repository path:")" \ + "$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "/backup/borgbackup" \ + 3>&1 1>&2 2>&3) || return 1 + mkdir -p "$_borg_repo_ref" 2>/dev/null || true + ;; + usb) + local mnt + mnt=$(hb_prompt_mounted_path "/mnt/backup") || return 1 + _borg_repo_ref="$mnt/borgbackup" + mkdir -p "$_borg_repo_ref" 2>/dev/null || true + ;; + remote) + local user host rpath ssh_key + user=$(dialog --backtitle "ProxMenux" --inputbox "$(hb_translate "SSH user:")" \ + "$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "root" 3>&1 1>&2 2>&3) || return 1 + host=$(dialog --backtitle "ProxMenux" --inputbox "$(hb_translate "SSH host or IP:")" \ + "$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "" 3>&1 1>&2 2>&3) || return 1 + rpath=$(dialog --backtitle "ProxMenux" \ + --inputbox "$(hb_translate "Remote repository path:")" \ + "$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "/backup/borgbackup" \ + 3>&1 1>&2 2>&3) || return 1 + if dialog --backtitle "ProxMenux" \ + --yesno "$(hb_translate "Use a custom SSH key?")" \ + "$HB_UI_YESNO_H" "$HB_UI_YESNO_W"; then + ssh_key=$(dialog --backtitle "ProxMenux" \ + --fselect "$HOME/.ssh/" 12 70 3>&1 1>&2 2>&3) || return 1 + export BORG_RSH="ssh -i $ssh_key -o StrictHostKeyChecking=accept-new" + fi + _borg_repo_ref="ssh://$user@$host/$rpath" + ;; + esac +} + +# ========================================================== +# COMMON PROMPTS +# ========================================================== +hb_trim_dialog_value() { + local value="$1" + value="${value//$'\r'/}" + value="${value//$'\n'/}" + value="${value#"${value%%[![:space:]]*}"}" + value="${value%"${value##*[![:space:]]}"}" + printf '%s' "$value" +} + +hb_prompt_mounted_path() { + local default_path="${1:-/mnt/backup}" + local out + + out=$(dialog --backtitle "ProxMenux" \ + --title "$(hb_translate "Mounted disk path")" \ + --inputbox "$(hb_translate "Path where the external disk is mounted:")" \ + "$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "$default_path" 3>&1 1>&2 2>&3) || return 1 + + out=$(hb_trim_dialog_value "$out") + [[ -n "$out" && -d "$out" ]] || { msg_error "$(hb_translate "Path does not exist.")"; return 1; } + if ! mountpoint -q "$out" 2>/dev/null; then + dialog --backtitle "ProxMenux" --title "$(hb_translate "Warning")" \ + --yesno "$(hb_translate "This path is not a registered mount point. Use it anyway?")" \ + "$HB_UI_YESNO_H" "$HB_UI_YESNO_W" || return 1 + fi + echo "$out" +} + +hb_prompt_dest_dir() { + local selection out + + selection=$(dialog --backtitle "ProxMenux" \ + --title "$(hb_translate "Select destination")" \ + --menu "\n$(hb_translate "Choose where to save the backup:")" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \ + "vzdump" "$(hb_translate '/var/lib/vz/dump (Proxmox default vzdump path)')" \ + "backup" "$(hb_translate '/backup')" \ + "local" "$(hb_translate 'Custom local directory')" \ + "usb" "$(hb_translate 'Mounted external disk')" \ + 3>&1 1>&2 2>&3) || return 1 + + case "$selection" in + vzdump) out="/var/lib/vz/dump" ;; + backup) out="/backup" ;; + local) + out=$(dialog --backtitle "ProxMenux" \ + --inputbox "$(hb_translate "Enter directory path:")" \ + "$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "/backup" 3>&1 1>&2 2>&3) || return 1 + ;; + usb) out=$(hb_prompt_mounted_path "/mnt/backup") || return 1 ;; + esac + + out=$(hb_trim_dialog_value "$out") + [[ -n "$out" ]] || return 1 + mkdir -p "$out" || { msg_error "$(hb_translate "Cannot create:") $out"; return 1; } + echo "$out" +} + +hb_prompt_restore_source_dir() { + local choice out + + choice=$(dialog --backtitle "ProxMenux" \ + --title "$(hb_translate "Restore source location")" \ + --menu "\n$(hb_translate "Where are the backup archives stored?")" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \ + "vzdump" "$(hb_translate '/var/lib/vz/dump (Proxmox default)')" \ + "backup" "$(hb_translate '/backup')" \ + "usb" "$(hb_translate 'Mounted external disk')" \ + "custom" "$(hb_translate 'Custom path')" \ + 3>&1 1>&2 2>&3) || return 1 + + case "$choice" in + vzdump) out="/var/lib/vz/dump" ;; + backup) out="/backup" ;; + usb) out=$(hb_prompt_mounted_path "/mnt/backup") || return 1 ;; + custom) + out=$(dialog --backtitle "ProxMenux" \ + --inputbox "$(hb_translate "Enter path:")" \ + "$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "/backup" 3>&1 1>&2 2>&3) || return 1 + ;; + esac + + out=$(hb_trim_dialog_value "$out") + [[ -n "$out" && -d "$out" ]] || { + msg_error "$(hb_translate "Directory does not exist.")" + return 1 + } + echo "$out" +} + +hb_prompt_local_archive() { + local base_dir="$1" + local title="${2:-$(hb_translate "Select backup archive")}" + local -a rows=() files=() menu=() + + # Single find pass using -printf: no per-file stat subprocesses. + # maxdepth 6 catches nested backup layouts commonly used in /var/lib/vz/dump. + mapfile -t rows < <( + find "$base_dir" -maxdepth 6 -type f \ + \( -name '*.tar.zst' -o -name '*.tar.gz' -o -name '*.tar' \) \ + -printf '%T@|%s|%p\n' 2>/dev/null \ + | sort -t'|' -k1,1nr \ + | head -200 + ) + + if [[ ${#rows[@]} -eq 0 ]]; then + local no_backups_msg + no_backups_msg="$(hb_translate "No backup archives were found in:") $base_dir"$'\n\n'"$(hb_translate "Select another source path and try again.")" + dialog --backtitle "ProxMenux" \ + --title "$(hb_translate "No backups found")" \ + --msgbox "$no_backups_msg" \ + 10 78 || true + return 1 + fi + + local i=1 row epoch size path date_str size_str label + for row in "${rows[@]}"; do + epoch="${row%%|*}"; row="${row#*|}" + size="${row%%|*}"; path="${row#*|}" + epoch="${epoch%%.*}" # drop sub-second fraction from %T@ + date_str=$(date -d "@$epoch" '+%Y-%m-%d %H:%M' 2>/dev/null || echo "-") + size_str=$(numfmt --to=iec-i --suffix=B "$size" 2>/dev/null || echo "${size}B") + label="${path#$base_dir/} $date_str $size_str" + files+=("$path"); menu+=("$i" "$label"); ((i++)) + done + + local choice + choice=$(dialog --backtitle "ProxMenux" --title "$title" \ + --menu "\n$(hb_translate "Detected backups — newest first:")" \ + "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" "${menu[@]}" 3>&1 1>&2 2>&3) || return 1 + + echo "${files[$((choice-1))]}" +} + +# ========================================================== +# UTILITIES +# ========================================================== +hb_human_elapsed() { + local secs="$1" + if (( secs < 60 )); then printf '%ds' "$secs" + elif (( secs < 3600 )); then printf '%dm %ds' "$((secs/60))" "$((secs%60))" + else printf '%dh %dm' "$((secs/3600))" "$(( (secs%3600)/60 ))" + fi +} + +hb_file_size() { + local path="$1" + if [[ -f "$path" ]]; then + numfmt --to=iec-i --suffix=B "$(stat -c %s "$path" 2>/dev/null || echo 0)" 2>/dev/null \ + || du -sh "$path" 2>/dev/null | awk '{print $1}' + elif [[ -d "$path" ]]; then + du -sh "$path" 2>/dev/null | awk '{print $1}' + else + echo "-" + fi +} + +hb_show_log() { + local logfile="$1" title="${2:-$(hb_translate "Operation log")}" + [[ -f "$logfile" && -s "$logfile" ]] || return 0 + dialog --backtitle "ProxMenux" --exit-label "OK" \ + --title "$title" --textbox "$logfile" 26 110 || true +} + +hb_require_cmd() { + local cmd="$1" pkg="${2:-$1}" + command -v "$cmd" >/dev/null 2>&1 && return 0 + if command -v apt-get >/dev/null 2>&1; then + msg_warn "$(hb_translate "Installing dependency:") $pkg" + apt-get update -qq >/dev/null 2>&1 && apt-get install -y "$pkg" >/dev/null 2>&1 + fi + command -v "$cmd" >/dev/null 2>&1 +} diff --git a/scripts/backup_restore/run_scheduled_backup.sh b/scripts/backup_restore/run_scheduled_backup.sh new file mode 100644 index 00000000..73d86e7c --- /dev/null +++ b/scripts/backup_restore/run_scheduled_backup.sh @@ -0,0 +1,243 @@ +#!/bin/bash +# ========================================================== +# ProxMenux - Run Scheduled Host Backup Job +# ========================================================== + +set -u + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +LOCAL_SCRIPTS_LOCAL="$(cd "$SCRIPT_DIR/.." && pwd)" +LOCAL_SCRIPTS_DEFAULT="/usr/local/share/proxmenux/scripts" +LOCAL_SCRIPTS="$LOCAL_SCRIPTS_DEFAULT" +BASE_DIR="/usr/local/share/proxmenux" +UTILS_FILE="$LOCAL_SCRIPTS/utils.sh" + +if [[ -f "$LOCAL_SCRIPTS_LOCAL/utils.sh" ]]; then + LOCAL_SCRIPTS="$LOCAL_SCRIPTS_LOCAL" + UTILS_FILE="$LOCAL_SCRIPTS/utils.sh" +elif [[ ! -f "$UTILS_FILE" ]]; then + UTILS_FILE="$BASE_DIR/utils.sh" +fi + +if [[ -f "$UTILS_FILE" ]]; then + # shellcheck source=/dev/null + source "$UTILS_FILE" +else + echo "ERROR: utils.sh not found" >&2 + exit 1 +fi + +LIB_FILE="$SCRIPT_DIR/lib_host_backup_common.sh" +[[ ! -f "$LIB_FILE" ]] && LIB_FILE="$LOCAL_SCRIPTS_DEFAULT/backup_restore/lib_host_backup_common.sh" +if [[ -f "$LIB_FILE" ]]; then + # shellcheck source=/dev/null + source "$LIB_FILE" +else + echo "ERROR: lib_host_backup_common.sh not found" >&2 + exit 1 +fi + +JOBS_DIR="${PMX_BACKUP_JOBS_DIR:-/var/lib/proxmenux/backup-jobs}" +LOG_DIR="${PMX_BACKUP_LOG_DIR:-/var/log/proxmenux/backup-jobs}" +LOCK_DIR="${PMX_BACKUP_LOCK_DIR:-/var/lock}" +mkdir -p "$JOBS_DIR" "$LOG_DIR" >/dev/null 2>&1 || true + +_sb_prune_local() { + local job_id="$1" + local dest_dir="$2" + local ext="$3" # tar.zst or tar.gz + local keep_last="${KEEP_LAST:-0}" + + local -a files=() + mapfile -t files < <(find "$dest_dir" -maxdepth 1 -type f -name "${job_id}-*.${ext}" | sort -r) + [[ ${#files[@]} -eq 0 ]] && return 0 + + if [[ "$keep_last" =~ ^[0-9]+$ ]] && (( keep_last > 0 )); then + local idx=0 + for f in "${files[@]}"; do + idx=$((idx+1)) + (( idx <= keep_last )) && continue + rm -f "$f" || true + done + fi +} + +_sb_run_local() { + local stage_root="$1" + local job_id="$2" + local ts="$3" + local dest_dir="$4" + local archive_ext="${LOCAL_ARCHIVE_EXT:-tar.zst}" + local archive="${dest_dir}/${job_id}-${ts}.${archive_ext}" + + mkdir -p "$dest_dir" || return 1 + + if [[ "$archive_ext" == "tar.zst" ]] && command -v zstd >/dev/null 2>&1; then + tar --zstd -cf "$archive" -C "$stage_root" . >/dev/null 2>&1 || return 1 + else + archive="${dest_dir}/${job_id}-${ts}.tar.gz" + tar -czf "$archive" -C "$stage_root" . >/dev/null 2>&1 || return 1 + archive_ext="tar.gz" + fi + + _sb_prune_local "$job_id" "$dest_dir" "$archive_ext" + echo "LOCAL_ARCHIVE=$archive" + return 0 +} + +_sb_run_borg() { + local stage_root="$1" + local archive_name="$2" + local borg_bin repo passphrase + + borg_bin=$(hb_ensure_borg) || return 1 + repo="${BORG_REPO:-}" + passphrase="${BORG_PASSPHRASE:-}" + [[ -z "$repo" || -z "$passphrase" ]] && return 1 + + export BORG_PASSPHRASE="$passphrase" + + if ! hb_borg_init_if_needed "$borg_bin" "$repo" "${BORG_ENCRYPT_MODE:-none}" >/dev/null 2>&1; then + return 1 + fi + + (cd "$stage_root" && "$borg_bin" create --stats \ + "${repo}::${archive_name}" rootfs metadata) >/dev/null 2>&1 || return 1 + + "$borg_bin" prune -v --list "$repo" \ + ${KEEP_LAST:+--keep-last "$KEEP_LAST"} \ + ${KEEP_HOURLY:+--keep-hourly "$KEEP_HOURLY"} \ + ${KEEP_DAILY:+--keep-daily "$KEEP_DAILY"} \ + ${KEEP_WEEKLY:+--keep-weekly "$KEEP_WEEKLY"} \ + ${KEEP_MONTHLY:+--keep-monthly "$KEEP_MONTHLY"} \ + ${KEEP_YEARLY:+--keep-yearly "$KEEP_YEARLY"} \ + >/dev/null 2>&1 || true + + echo "BORG_ARCHIVE=${archive_name}" + return 0 +} + +_sb_run_pbs() { + local stage_root="$1" + local backup_id="$2" + local epoch="$3" + local -a cmd=( + proxmox-backup-client backup + "hostcfg.pxar:${stage_root}/rootfs" + --repository "$PBS_REPOSITORY" + --backup-type host + --backup-id "$backup_id" + --backup-time "$epoch" + ) + + [[ -z "${PBS_REPOSITORY:-}" || -z "${PBS_PASSWORD:-}" ]] && return 1 + if [[ -n "${PBS_KEYFILE:-}" ]]; then + cmd+=(--keyfile "$PBS_KEYFILE") + fi + + env PBS_PASSWORD="$PBS_PASSWORD" PBS_ENCRYPTION_PASSWORD="${PBS_ENCRYPTION_PASSWORD:-}" \ + "${cmd[@]}" >/dev/null 2>&1 || return 1 + + # Best effort prune for PBS group. + proxmox-backup-client prune "host/${backup_id}" --repository "$PBS_REPOSITORY" \ + ${KEEP_LAST:+--keep-last "$KEEP_LAST"} \ + ${KEEP_HOURLY:+--keep-hourly "$KEEP_HOURLY"} \ + ${KEEP_DAILY:+--keep-daily "$KEEP_DAILY"} \ + ${KEEP_WEEKLY:+--keep-weekly "$KEEP_WEEKLY"} \ + ${KEEP_MONTHLY:+--keep-monthly "$KEEP_MONTHLY"} \ + ${KEEP_YEARLY:+--keep-yearly "$KEEP_YEARLY"} \ + >/dev/null 2>&1 || true + + echo "PBS_SNAPSHOT=host/${backup_id}/${epoch}" + return 0 +} + +main() { + local job_id="${1:-}" + [[ -z "$job_id" ]] && { echo "Usage: $0 " >&2; exit 1; } + + local job_file="${JOBS_DIR}/${job_id}.env" + [[ -f "$job_file" ]] || { echo "Job not found: $job_id" >&2; exit 1; } + + # shellcheck source=/dev/null + source "$job_file" + + local lock_file="${LOCK_DIR}/proxmenux-backup-${job_id}.lock" + if command -v flock >/dev/null 2>&1; then + exec 9>"$lock_file" || exit 1 + if ! flock -n 9; then + echo "Another run is active for job ${job_id}" >&2 + exit 1 + fi + fi + + local ts log_file stage_root summary_file + ts="$(date +%Y%m%d_%H%M%S)" + log_file="${LOG_DIR}/${job_id}-${ts}.log" + summary_file="${LOG_DIR}/${job_id}-last.status" + stage_root="$(mktemp -d /tmp/proxmenux-sched-stage.XXXXXX)" + + { + echo "JOB_ID=${job_id}" + echo "RUN_AT=$(date -Iseconds)" + echo "BACKEND=${BACKEND:-}" + echo "PROFILE_MODE=${PROFILE_MODE:-default}" + } >"$summary_file" + + { + echo "=== Scheduled backup job ${job_id} started at $(date -Iseconds) ===" + echo "Backend: ${BACKEND:-}" + } >"$log_file" + + local -a paths=() + if [[ "${PROFILE_MODE:-default}" == "custom" && -f "${JOBS_DIR}/${job_id}.paths" ]]; then + mapfile -t paths < "${JOBS_DIR}/${job_id}.paths" + else + mapfile -t paths < <(hb_default_profile_paths) + fi + + if [[ ${#paths[@]} -eq 0 ]]; then + echo "No paths configured for job" >>"$log_file" + echo "RESULT=failed" >>"$summary_file" + rm -rf "$stage_root" + exit 1 + fi + + hb_prepare_staging "$stage_root" "${paths[@]}" >>"$log_file" 2>&1 + + local rc=1 + case "${BACKEND:-}" in + local) + _sb_run_local "$stage_root" "$job_id" "$ts" "${LOCAL_DEST_DIR:-/var/lib/vz/dump}" >>"$log_file" 2>&1 + rc=$? + ;; + borg) + _sb_run_borg "$stage_root" "${job_id}-${ts}" >>"$log_file" 2>&1 + rc=$? + ;; + pbs) + _sb_run_pbs "$stage_root" "${PBS_BACKUP_ID:-hostcfg-$(hostname)}" "$(date +%s)" >>"$log_file" 2>&1 + rc=$? + ;; + *) + echo "Unknown backend: ${BACKEND:-}" >>"$log_file" + rc=1 + ;; + esac + + rm -rf "$stage_root" + + if [[ $rc -eq 0 ]]; then + echo "RESULT=ok" >>"$summary_file" + echo "LOG_FILE=${log_file}" >>"$summary_file" + echo "=== Job finished OK at $(date -Iseconds) ===" >>"$log_file" + exit 0 + else + echo "RESULT=failed" >>"$summary_file" + echo "LOG_FILE=${log_file}" >>"$summary_file" + echo "=== Job finished with errors at $(date -Iseconds) ===" >>"$log_file" + exit 1 + fi +} + +main "$@" diff --git a/scripts/backup_restore/test_backup_restore.sh b/scripts/backup_restore/test_backup_restore.sh new file mode 100644 index 00000000..64fc7a29 --- /dev/null +++ b/scripts/backup_restore/test_backup_restore.sh @@ -0,0 +1,284 @@ +#!/bin/bash +# ========================================================== +# ProxMenux - Backup/Restore Test Matrix (non-destructive) +# ========================================================== + +set -u + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +RUNNER="${SCRIPT_DIR}/run_scheduled_backup.sh" +APPLY_ONBOOT="${SCRIPT_DIR}/apply_pending_restore.sh" +HOST_SCRIPT="${SCRIPT_DIR}/backup_host.sh" +LIB_SCRIPT="${SCRIPT_DIR}/lib_host_backup_common.sh" +SCHED_SCRIPT="${SCRIPT_DIR}/backup_scheduler.sh" + +KEEP_TMP=0 +if [[ "${1:-}" == "--keep-tmp" ]]; then + KEEP_TMP=1 +fi + +TMP_ROOT="$(mktemp -d /tmp/proxmenux-brtest.XXXXXX)" +REPORT_FILE="/tmp/proxmenux-backup-restore-test-$(date +%Y%m%d_%H%M%S).log" + +PASS=0 +FAIL=0 +SKIP=0 + +log() { + echo "$*" | tee -a "$REPORT_FILE" +} + +pass() { + PASS=$((PASS + 1)) + log "[PASS] $*" +} + +fail() { + FAIL=$((FAIL + 1)) + log "[FAIL] $*" +} + +skip() { + SKIP=$((SKIP + 1)) + log "[SKIP] $*" +} + +cleanup() { + if [[ "$KEEP_TMP" -eq 0 ]]; then + rm -rf "$TMP_ROOT" + else + log "[INFO] Temp root preserved: $TMP_ROOT" + fi +} +trap cleanup EXIT + +assert_file_contains() { + local file="$1" + local needle="$2" + if [[ -f "$file" ]] && grep -q "$needle" "$file"; then + return 0 + fi + return 1 +} + +run_cmd_expect_ok() { + local desc="$1" + shift + if "$@" >>"$REPORT_FILE" 2>&1; then + pass "$desc" + return 0 + fi + fail "$desc" + return 1 +} + +run_cmd_expect_fail() { + local desc="$1" + shift + if "$@" >>"$REPORT_FILE" 2>&1; then + fail "$desc" + return 1 + fi + pass "$desc" + return 0 +} + +syntax_tests() { + log "\n=== Syntax checks ===" + run_cmd_expect_ok "bash -n backup_host.sh" bash -n "$HOST_SCRIPT" + run_cmd_expect_ok "bash -n lib_host_backup_common.sh" bash -n "$LIB_SCRIPT" + run_cmd_expect_ok "bash -n backup_scheduler.sh" bash -n "$SCHED_SCRIPT" + run_cmd_expect_ok "bash -n run_scheduled_backup.sh" bash -n "$RUNNER" + run_cmd_expect_ok "bash -n apply_pending_restore.sh" bash -n "$APPLY_ONBOOT" +} + +scheduler_e2e_tests() { + log "\n=== Scheduler E2E (sandbox) ===" + if ! help mapfile >/dev/null 2>&1; then + skip "Scheduler E2E skipped: current bash does not provide mapfile (requires bash >= 4)." + return + fi + + local jobs_dir="$TMP_ROOT/backup-jobs" + local logs_dir="$TMP_ROOT/backup-jobs-logs" + local lock_dir="$TMP_ROOT/locks" + local archives_dir="$TMP_ROOT/archives" + + mkdir -p "$jobs_dir" "$logs_dir" "$lock_dir" "$archives_dir" + + cat > "$jobs_dir/t1.env" < "$jobs_dir/t1.paths" <>"$REPORT_FILE" 2>&1; then + : + else + fail "Runner execution #$i for t1" + return + fi + sleep 1 + done + + local archive_count + archive_count="$(find "$archives_dir" -maxdepth 1 -type f -name 't1-*.tar.gz' | wc -l | tr -d ' ')" + if [[ "$archive_count" == "2" ]]; then + pass "Retention KEEP_LAST=2 keeps exactly 2 archives" + else + fail "Retention expected 2 archives, got $archive_count" + fi + + if assert_file_contains "$logs_dir/t1-last.status" "RESULT=ok"; then + pass "t1-last.status reports RESULT=ok" + else + fail "t1-last.status does not report RESULT=ok" + fi + + cat > "$jobs_dir/tbad.env" < "$jobs_dir/tbad.paths" + + run_cmd_expect_fail "Invalid backend fails" \ + env PMX_BACKUP_JOBS_DIR="$jobs_dir" PMX_BACKUP_LOG_DIR="$logs_dir" PMX_BACKUP_LOCK_DIR="$lock_dir" \ + bash "$RUNNER" tbad + + if assert_file_contains "$logs_dir/tbad-last.status" "RESULT=failed"; then + pass "tbad-last.status reports RESULT=failed" + else + fail "tbad-last.status does not report RESULT=failed" + fi + + cat > "$jobs_dir/tempty.env" < "$jobs_dir/tempty.paths" + + run_cmd_expect_fail "Empty paths fails" \ + env PMX_BACKUP_JOBS_DIR="$jobs_dir" PMX_BACKUP_LOG_DIR="$logs_dir" PMX_BACKUP_LOCK_DIR="$lock_dir" \ + bash "$RUNNER" tempty + + if assert_file_contains "$logs_dir/tempty-last.status" "RESULT=failed"; then + pass "tempty-last.status reports RESULT=failed" + else + fail "tempty-last.status does not report RESULT=failed" + fi +} + +pending_restore_tests() { + log "\n=== Pending restore E2E (sandbox) ===" + local pending_base="$TMP_ROOT/restore-pending" + local logs_dir="$TMP_ROOT/restore-logs" + local target_root="$TMP_ROOT/target" + local pre_backup_base="$TMP_ROOT/pre-restore" + local recovery_base="$TMP_ROOT/recovery" + + mkdir -p "$pending_base/r1/rootfs/etc/pve" "$pending_base/r1/rootfs/etc/zfs" "$pending_base/r1/rootfs/etc" "$target_root/etc" + + echo "new-value" > "$pending_base/r1/rootfs/etc/test.conf" + echo "cluster-data" > "$pending_base/r1/rootfs/etc/pve/cluster.cfg" + echo "zfs-data" > "$pending_base/r1/rootfs/etc/zfs/zpool.cache" + echo "old-value" > "$target_root/etc/test.conf" + + cat > "$pending_base/r1/apply-on-boot.list" < "$pending_base/r1/plan.env" <>"$REPORT_FILE" 2>&1; then + pass "apply_pending_restore completes" + else + fail "apply_pending_restore completes" + return + fi + + if assert_file_contains "$target_root/etc/test.conf" "new-value"; then + pass "Regular file restored into target prefix" + else + fail "Regular file was not restored" + fi + + if [[ -e "$target_root/etc/pve/cluster.cfg" ]]; then + fail "Cluster file should not be restored live" + else + pass "Cluster file skipped from live restore" + fi + + if find "$recovery_base" -type f -name cluster.cfg 2>/dev/null | grep -q .; then + pass "Cluster file extracted to recovery directory" + else + fail "Cluster file not found in recovery directory" + fi + + if assert_file_contains "$pending_base/completed/r1/state" "completed"; then + pass "Pending restore state marked completed" + else + fail "Pending restore state not marked completed" + fi + + if [[ -e "$pending_base/current" ]]; then + fail "current symlink should be removed" + else + pass "current symlink removed" + fi +} + +main() { + log "ProxMenux backup/restore test matrix" + log "Report: $REPORT_FILE" + log "Temp root: $TMP_ROOT" + + syntax_tests + scheduler_e2e_tests + pending_restore_tests + + log "\n=== Summary ===" + log "PASS=$PASS" + log "FAIL=$FAIL" + log "SKIP=$SKIP" + + if [[ "$FAIL" -eq 0 ]]; then + log "RESULT=OK" + exit 0 + else + log "RESULT=FAILED" + exit 1 + fi +} + +main "$@" diff --git a/scripts/storage/smart-disk-test.sh b/scripts/storage/smart-disk-test.sh index 80564ec9..5dd3b8f5 100644 --- a/scripts/storage/smart-disk-test.sh +++ b/scripts/storage/smart-disk-test.sh @@ -70,7 +70,42 @@ _smart_disk_label() { _smart_json_path() { local disk="$1" - echo "${SMART_DIR}/$(basename "$disk").json" + local test_type="${2:-short}" + local disk_name + disk_name=$(basename "$disk") + local disk_dir="${SMART_DIR}/${disk_name}" + local timestamp + timestamp=$(date +%Y-%m-%dT%H-%M-%S) + + # Create disk directory if it doesn't exist + mkdir -p "$disk_dir" + + echo "${disk_dir}/${timestamp}_${test_type}.json" +} + +_smart_get_latest_json() { + local disk="$1" + local disk_name + disk_name=$(basename "$disk") + local disk_dir="${SMART_DIR}/${disk_name}" + + if [[ -d "$disk_dir" ]]; then + # Get most recent JSON file (sorted by name = sorted by timestamp) + ls -1 "${disk_dir}"/*.json 2>/dev/null | sort -r | head -1 + fi +} + +_smart_cleanup_old_jsons() { + local disk="$1" + local retention="${2:-10}" # Default: keep last 10 + local disk_name + disk_name=$(basename "$disk") + local disk_dir="${SMART_DIR}/${disk_name}" + + if [[ -d "$disk_dir" && "$retention" -gt 0 ]]; then + # List all JSON files sorted by name (oldest last), skip first $retention, delete rest + ls -1 "${disk_dir}"/*.json 2>/dev/null | sort -r | tail -n +$((retention + 1)) | xargs -r rm -f + fi } _smart_ensure_packages() { @@ -146,7 +181,7 @@ while true; do DISK_SIZE=$(lsblk -dn -o SIZE "$SELECTED_DISK" 2>/dev/null | xargs) if ! dialog --backtitle "$BACKTITLE" \ --title "$(translate 'Long Test — Background')" \ - --yesno "\n$(translate 'The long test runs directly on the disk hardware.')\n\n$(translate 'Disk:') $SELECTED_DISK ($DISK_SIZE)\n\n$(translate 'The test will continue even if you close this terminal.')\n$(translate 'Results will be saved automatically to:')\n$(_smart_json_path "$SELECTED_DISK")\n\n$(translate 'Start long test now?')" \ + --yesno "\n$(translate 'The long test runs directly on the disk hardware.')\n\n$(translate 'Disk:') $SELECTED_DISK ($DISK_SIZE)\n\n$(translate 'The test will continue even if you close this terminal.')\n$(translate 'Results will be saved automatically to:')\n$(_smart_json_path "$SELECTED_DISK" "long")\n\n$(translate 'Start long test now?')" \ 16 $UI_RESULT_W; then continue fi @@ -253,9 +288,10 @@ while true; do fi ;; - # ── Long test (background) ────────────────────────────── - long) - JSON_PATH=$(_smart_json_path "$SELECTED_DISK") + # ── Long test (background) ────────────────────────────── + long) + JSON_PATH=$(_smart_json_path "$SELECTED_DISK" "long") + _smart_cleanup_old_jsons "$SELECTED_DISK" DISK_SAFE=$(printf '%q' "$SELECTED_DISK") JSON_SAFE=$(printf '%q' "$JSON_PATH") @@ -309,7 +345,7 @@ while true; do while smartctl -c ${DISK_SAFE} 2>/dev/null | grep -qiE 'Self-test routine in progress|[1-9][0-9]?% of test remaining'; do sleep 60 done - smartctl --json=c ${DISK_SAFE} > ${JSON_SAFE} 2>/dev/null + smartctl -a --json=c ${DISK_SAFE} > ${JSON_SAFE} 2>/dev/null # Send notification when test completes if [[ -f \"${NOTIFY_SCRIPT}\" ]]; then @@ -380,11 +416,17 @@ while true; do # ── Auto-export JSON (except long — handled by background monitor) if [[ "$ACTION" != "long" && "$ACTION" != "report" ]]; then - JSON_PATH=$(_smart_json_path "$SELECTED_DISK") + # Determine test type from ACTION (short test or status check) + local json_test_type="short" + [[ "$ACTION" == "status" ]] && json_test_type="status" + + JSON_PATH=$(_smart_json_path "$SELECTED_DISK" "$json_test_type") + _smart_cleanup_old_jsons "$SELECTED_DISK" + if _smart_is_nvme "$SELECTED_DISK"; then nvme smart-log -o json "$SELECTED_DISK" > "$JSON_PATH" 2>/dev/null else - smartctl --json=c "$SELECTED_DISK" > "$JSON_PATH" 2>/dev/null + smartctl -a --json=c "$SELECTED_DISK" > "$JSON_PATH" 2>/dev/null fi [[ -s "$JSON_PATH" ]] || rm -f "$JSON_PATH" fi diff --git a/scripts/storage/smart-scheduled-test.sh b/scripts/storage/smart-scheduled-test.sh new file mode 100644 index 00000000..9e0dd169 --- /dev/null +++ b/scripts/storage/smart-scheduled-test.sh @@ -0,0 +1,195 @@ +#!/bin/bash + +# ========================================================== +# ProxMenux - SMART Scheduled Test Runner +# ========================================================== +# Author : MacRimi +# Copyright : (c) 2024 MacRimi +# License : GPL-3.0 +# Version : 1.0 +# Last Updated: 13/04/2026 +# ========================================================== +# Description: +# Runs scheduled SMART tests based on configuration. +# Called by cron jobs created by ProxMenux Monitor. +# ========================================================== + +# Configuration +SMART_DIR="/usr/local/share/proxmenux/smart" +LOG_DIR="/var/log/proxmenux" +SCRIPT_NAME="smart-scheduled-test" + +# Ensure log directory exists +mkdir -p "$LOG_DIR" + +# Logging function +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] [$SCRIPT_NAME] $1" +} + +# Parse arguments +SCHEDULE_ID="" +TEST_TYPE="short" +RETENTION=10 +DISKS="" + +while [[ $# -gt 0 ]]; do + case $1 in + --schedule-id) + SCHEDULE_ID="$2" + shift 2 + ;; + --test-type) + TEST_TYPE="$2" + shift 2 + ;; + --retention) + RETENTION="$2" + shift 2 + ;; + --disks) + DISKS="$2" + shift 2 + ;; + *) + shift + ;; + esac +done + +log "Starting scheduled SMART test: schedule=$SCHEDULE_ID, type=$TEST_TYPE, retention=$RETENTION" + +# Helper functions +_is_nvme() { + [[ "$1" == *nvme* ]] +} + +_get_json_path() { + local disk="$1" + local test_type="$2" + local disk_name + disk_name=$(basename "$disk") + local disk_dir="${SMART_DIR}/${disk_name}" + local timestamp + timestamp=$(date +%Y-%m-%dT%H-%M-%S) + + mkdir -p "$disk_dir" + echo "${disk_dir}/${timestamp}_${test_type}.json" +} + +_cleanup_old_jsons() { + local disk="$1" + local retention="$2" + local disk_name + disk_name=$(basename "$disk") + local disk_dir="${SMART_DIR}/${disk_name}" + + if [[ -d "$disk_dir" && "$retention" -gt 0 ]]; then + ls -1 "${disk_dir}"/*.json 2>/dev/null | sort -r | tail -n +$((retention + 1)) | xargs -r rm -f + fi +} + +_run_test() { + local disk="$1" + local test_type="$2" + local json_path="$3" + + log "Running $test_type test on $disk" + + if _is_nvme "$disk"; then + # NVMe test + local code=1 + [[ "$test_type" == "long" ]] && code=2 + + nvme device-self-test "$disk" --self-test-code=$code 2>/dev/null + if [[ $? -ne 0 ]]; then + log "ERROR: Failed to start NVMe test on $disk" + return 1 + fi + + # Wait for test to complete + local sleep_interval=10 + [[ "$test_type" == "long" ]] && sleep_interval=60 + + sleep 5 + while true; do + local op + op=$(nvme self-test-log "$disk" -o json 2>/dev/null | grep -o '"Current Device Self-Test Operation":[0-9]*' | grep -o '[0-9]*$') + [[ -z "$op" || "$op" -eq 0 ]] && break + sleep $sleep_interval + done + + # Save results + nvme smart-log -o json "$disk" > "$json_path" 2>/dev/null + else + # SATA/SAS test + local test_flag="-t short" + [[ "$test_type" == "long" ]] && test_flag="-t long" + + smartctl $test_flag "$disk" 2>/dev/null + if [[ $? -ne 0 && $? -ne 4 ]]; then + log "ERROR: Failed to start SMART test on $disk" + return 1 + fi + + # Wait for test to complete + local sleep_interval=10 + [[ "$test_type" == "long" ]] && sleep_interval=60 + + sleep 5 + while smartctl -c "$disk" 2>/dev/null | grep -qiE 'Self-test routine in progress|[1-9][0-9]?% of test remaining'; do + sleep $sleep_interval + done + + # Save results + smartctl -a --json=c "$disk" > "$json_path" 2>/dev/null + fi + + log "Test completed on $disk, results saved to $json_path" + return 0 +} + +# Get list of disks to test +get_disk_list() { + if [[ -n "$DISKS" && "$DISKS" != "all" ]]; then + # Use specified disks + echo "$DISKS" | tr ',' '\n' + else + # Get all physical disks + lsblk -dpno NAME,TYPE 2>/dev/null | awk '$2=="disk"{print $1}' + fi +} + +# Main execution +DISK_LIST=$(get_disk_list) +TOTAL_DISKS=$(echo "$DISK_LIST" | wc -l) +SUCCESS_COUNT=0 +FAIL_COUNT=0 + +log "Found $TOTAL_DISKS disk(s) to test" + +for disk in $DISK_LIST; do + # Skip if disk doesn't exist + if [[ ! -b "$disk" ]]; then + log "WARNING: Disk $disk not found, skipping" + continue + fi + + # Get JSON path and cleanup old files + JSON_PATH=$(_get_json_path "$disk" "$TEST_TYPE") + _cleanup_old_jsons "$disk" "$RETENTION" + + # Run the test + if _run_test "$disk" "$TEST_TYPE" "$JSON_PATH"; then + ((SUCCESS_COUNT++)) + else + ((FAIL_COUNT++)) + fi +done + +log "Scheduled test complete: $SUCCESS_COUNT succeeded, $FAIL_COUNT failed" + +# TODO: Send notification if configured +# This would integrate with the notification system + +exit 0