update storage-overview.tsx

This commit is contained in:
MacRimi
2026-04-13 14:49:48 +02:00
parent 44aefb5d3b
commit 2344935357
10 changed files with 4798 additions and 884 deletions

View File

@@ -2,7 +2,7 @@
import { useEffect, useState } from "react" import { useEffect, useState } from "react"
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card" import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"
import { HardDrive, Database, AlertTriangle, CheckCircle2, XCircle, Square, Thermometer, Archive, Info, Clock, Usb, Server, Activity, FileText, Play, Loader2, Download } from "lucide-react" import { HardDrive, Database, AlertTriangle, CheckCircle2, XCircle, Square, Thermometer, Archive, Info, Clock, Usb, Server, Activity, FileText, Play, Loader2, Download, Plus, Trash2, Settings } from "lucide-react"
import { Badge } from "@/components/ui/badge" import { Badge } from "@/components/ui/badge"
import { Progress } from "@/components/ui/progress" import { Progress } from "@/components/ui/progress"
import { Dialog, DialogContent, DialogDescription, DialogHeader, DialogTitle } from "@/components/ui/dialog" import { Dialog, DialogContent, DialogDescription, DialogHeader, DialogTitle } from "@/components/ui/dialog"
@@ -121,7 +121,15 @@ export function StorageOverview() {
const [detailsOpen, setDetailsOpen] = useState(false) const [detailsOpen, setDetailsOpen] = useState(false)
const [diskObservations, setDiskObservations] = useState<DiskObservation[]>([]) const [diskObservations, setDiskObservations] = useState<DiskObservation[]>([])
const [loadingObservations, setLoadingObservations] = useState(false) const [loadingObservations, setLoadingObservations] = useState(false)
const [activeModalTab, setActiveModalTab] = useState<"overview" | "smart">("overview") const [activeModalTab, setActiveModalTab] = useState<"overview" | "smart" | "schedule">("overview")
const [smartJsonData, setSmartJsonData] = useState<{
has_data: boolean
data?: Record<string, unknown>
timestamp?: string
test_type?: string
history?: Array<{ filename: string; timestamp: string; test_type: string; date_readable: string }>
} | null>(null)
const [loadingSmartJson, setLoadingSmartJson] = useState(false)
const fetchStorageData = async () => { const fetchStorageData = async () => {
try { try {
@@ -269,21 +277,46 @@ export function StorageOverview() {
setSelectedDisk(disk) setSelectedDisk(disk)
setDetailsOpen(true) setDetailsOpen(true)
setDiskObservations([]) setDiskObservations([])
setSmartJsonData(null)
// Always attempt to fetch observations -- the count enrichment may lag // Fetch observations and SMART JSON data in parallel
// behind the actual observation recording (especially for USB disks).
setLoadingObservations(true) setLoadingObservations(true)
try { setLoadingSmartJson(true)
const params = new URLSearchParams()
if (disk.name) params.set('device', disk.name) // Fetch observations
if (disk.serial && disk.serial !== 'Unknown') params.set('serial', disk.serial) const fetchObservations = async () => {
const data = await fetchApi<{ observations: DiskObservation[] }>(`/api/storage/observations?${params.toString()}`) try {
setDiskObservations(data.observations || []) const params = new URLSearchParams()
} catch { if (disk.name) params.set('device', disk.name)
setDiskObservations([]) if (disk.serial && disk.serial !== 'Unknown') params.set('serial', disk.serial)
} finally { const data = await fetchApi<{ observations: DiskObservation[] }>(`/api/storage/observations?${params.toString()}`)
setLoadingObservations(false) setDiskObservations(data.observations || [])
} catch {
setDiskObservations([])
} finally {
setLoadingObservations(false)
}
} }
// Fetch SMART JSON data from real test if available
const fetchSmartJson = async () => {
try {
const data = await fetchApi<{
has_data: boolean
data?: Record<string, unknown>
timestamp?: string
test_type?: string
}>(`/api/storage/smart/${disk.name}/latest`)
setSmartJsonData(data)
} catch {
setSmartJsonData({ has_data: false })
} finally {
setLoadingSmartJson(false)
}
}
// Run both in parallel
await Promise.all([fetchObservations(), fetchSmartJson()])
} }
const formatObsDate = (iso: string) => { const formatObsDate = (iso: string) => {
@@ -1205,7 +1238,10 @@ export function StorageOverview() {
{/* Disk Details Dialog */} {/* Disk Details Dialog */}
<Dialog open={detailsOpen} onOpenChange={(open) => { <Dialog open={detailsOpen} onOpenChange={(open) => {
setDetailsOpen(open) setDetailsOpen(open)
if (!open) setActiveModalTab("overview") if (!open) {
setActiveModalTab("overview")
setSmartJsonData(null)
}
}}> }}>
<DialogContent className="max-w-2xl max-h-[80vh] sm:max-h-[85vh] overflow-hidden flex flex-col p-0"> <DialogContent className="max-w-2xl max-h-[80vh] sm:max-h-[85vh] overflow-hidden flex flex-col p-0">
<DialogHeader className="px-6 pt-6 pb-0"> <DialogHeader className="px-6 pt-6 pb-0">
@@ -1255,6 +1291,17 @@ export function StorageOverview() {
<Activity className="h-4 w-4" /> <Activity className="h-4 w-4" />
SMART Test SMART Test
</button> </button>
<button
onClick={() => setActiveModalTab("schedule")}
className={`flex items-center gap-2 px-4 py-2.5 text-sm font-medium transition-colors border-b-2 -mb-px ${
activeModalTab === "schedule"
? "border-purple-500 text-purple-500"
: "border-transparent text-muted-foreground hover:text-foreground"
}`}
>
<Clock className="h-4 w-4" />
Schedule
</button>
</div> </div>
{/* Tab Content */} {/* Tab Content */}
@@ -1389,6 +1436,153 @@ export function StorageOverview() {
</div> </div>
</div> </div>
{/* SMART Test Data Section (from real test JSON) */}
{(loadingSmartJson || smartJsonData?.has_data) && (
<div className="border-t pt-4">
<h4 className="font-semibold mb-3 flex items-center gap-2">
<Activity className="h-4 w-4 text-green-400" />
SMART Test Data
{smartJsonData?.has_data && (
<Badge className="bg-green-500/10 text-green-400 border-green-500/20 text-[10px] px-1.5">
Real Test
</Badge>
)}
</h4>
{loadingSmartJson ? (
<div className="flex items-center gap-2 text-sm text-muted-foreground py-2">
<div className="h-4 w-4 rounded-full border-2 border-transparent border-t-green-400 animate-spin" />
Loading SMART test data...
</div>
) : smartJsonData?.has_data && smartJsonData.data ? (
<div className="space-y-3">
{/* Last Test Info */}
<div className="grid grid-cols-2 gap-4">
<div>
<p className="text-sm text-muted-foreground">Last Test Date</p>
<p className="font-medium">
{smartJsonData.timestamp
? new Date(smartJsonData.timestamp).toLocaleString()
: 'Unknown'}
</p>
</div>
<div>
<p className="text-sm text-muted-foreground">Test Type</p>
<p className="font-medium capitalize">{smartJsonData.test_type || 'Unknown'}</p>
</div>
</div>
{/* SSD Life Estimation from JSON (if available) */}
{(() => {
const data = smartJsonData.data as Record<string, unknown>
const ataAttrs = data?.ata_smart_attributes as { table?: Array<{ id: number; name: string; value: number; raw?: { value: number } }> }
const table = ataAttrs?.table || []
// Look for wear-related attributes
const wearAttr = table.find(a =>
a.name?.toLowerCase().includes('wear_leveling') ||
a.name?.toLowerCase().includes('media_wearout') ||
a.name?.toLowerCase().includes('percent_lifetime') ||
a.id === 177 || a.id === 231 || a.id === 233
)
// Look for total LBAs written
const lbasAttr = table.find(a =>
a.name?.toLowerCase().includes('total_lbas_written') ||
a.id === 241
)
if (wearAttr || lbasAttr) {
return (
<div className="bg-green-500/5 border border-green-500/20 rounded-lg p-3">
<p className="text-xs text-green-400 mb-2 font-medium">From Real SMART Test</p>
<div className="grid grid-cols-2 gap-4">
{wearAttr && (
<div>
<p className="text-sm text-muted-foreground">{wearAttr.name?.replace(/_/g, ' ')}</p>
<p className={`font-medium ${wearAttr.value < 50 ? 'text-red-400' : wearAttr.value < 80 ? 'text-yellow-400' : 'text-green-400'}`}>
{wearAttr.value}%
</p>
</div>
)}
{lbasAttr && lbasAttr.raw?.value && (
<div>
<p className="text-sm text-muted-foreground">Total Data Written</p>
<p className="font-medium">
{(() => {
const tbWritten = (lbasAttr.raw.value * 512) / (1024 ** 4)
return tbWritten >= 1
? `${tbWritten.toFixed(2)} TB`
: `${(tbWritten * 1024).toFixed(2)} GB`
})()}
</p>
</div>
)}
</div>
</div>
)
}
// For NVMe, check nvme_smart_health_information_log
const nvmeHealth = data?.nvme_smart_health_information_log as Record<string, unknown>
if (nvmeHealth) {
const percentUsed = nvmeHealth.percentage_used as number
const dataUnitsWritten = nvmeHealth.data_units_written as number
const availableSpare = nvmeHealth.available_spare as number
return (
<div className="bg-green-500/5 border border-green-500/20 rounded-lg p-3">
<p className="text-xs text-green-400 mb-2 font-medium">From Real SMART Test (NVMe)</p>
<div className="grid grid-cols-2 gap-4">
{percentUsed !== undefined && (
<div>
<p className="text-sm text-muted-foreground">Percent Used</p>
<p className={`font-medium ${percentUsed > 80 ? 'text-red-400' : percentUsed > 50 ? 'text-yellow-400' : 'text-green-400'}`}>
{percentUsed}%
</p>
</div>
)}
{availableSpare !== undefined && (
<div>
<p className="text-sm text-muted-foreground">Available Spare</p>
<p className={`font-medium ${availableSpare < 20 ? 'text-red-400' : availableSpare < 50 ? 'text-yellow-400' : 'text-green-400'}`}>
{availableSpare}%
</p>
</div>
)}
{dataUnitsWritten !== undefined && (
<div>
<p className="text-sm text-muted-foreground">Total Data Written</p>
<p className="font-medium">
{(() => {
const tbWritten = (dataUnitsWritten * 512000) / (1024 ** 4)
return tbWritten >= 1
? `${tbWritten.toFixed(2)} TB`
: `${(tbWritten * 1024).toFixed(2)} GB`
})()}
</p>
</div>
)}
</div>
</div>
)
}
return null
})()}
<p className="text-xs text-muted-foreground">
Run a SMART test in the SMART Test tab for more detailed analysis.
</p>
</div>
) : (
<div className="text-sm text-muted-foreground">
<p>No SMART test data available for this disk.</p>
<p className="text-xs mt-1">Run a SMART test in the SMART Test tab to get detailed health information.</p>
</div>
)}
</div>
)}
{/* Observations Section */} {/* Observations Section */}
{(diskObservations.length > 0 || loadingObservations) && ( {(diskObservations.length > 0 || loadingObservations) && (
<div className="border-t pt-4"> <div className="border-t pt-4">
@@ -1463,6 +1657,11 @@ export function StorageOverview() {
{selectedDisk && activeModalTab === "smart" && ( {selectedDisk && activeModalTab === "smart" && (
<SmartTestTab disk={selectedDisk} observations={diskObservations} /> <SmartTestTab disk={selectedDisk} observations={diskObservations} />
)} )}
{/* Schedule Tab */}
{selectedDisk && activeModalTab === "schedule" && (
<ScheduleTab disk={selectedDisk} />
)}
</div> </div>
</DialogContent> </DialogContent>
</Dialog> </Dialog>
@@ -1670,40 +1869,40 @@ function openSmartReport(disk: DiskInfo, testStatus: SmartTestStatus, smartAttri
<div style="display:flex;flex-wrap:wrap;align-items:center;gap:8px;margin-bottom:10px;"> <div style="display:flex;flex-wrap:wrap;align-items:center;gap:8px;margin-bottom:10px;">
<span style="background:${infoColor}20;color:${infoColor};padding:2px 8px;border-radius:4px;font-size:11px;font-weight:600;">${errorTypeLabel}</span> <span style="background:${infoColor}20;color:${infoColor};padding:2px 8px;border-radius:4px;font-size:11px;font-weight:600;">${errorTypeLabel}</span>
<span style="background:${severityBadgeColor}20;color:${severityBadgeColor};padding:2px 8px;border-radius:4px;font-size:11px;font-weight:600;">${severityLabel}</span> <span style="background:${severityBadgeColor}20;color:${severityBadgeColor};padding:2px 8px;border-radius:4px;font-size:11px;font-weight:600;">${severityLabel}</span>
<span style="background:#64748b20;color:#64748b;padding:2px 8px;border-radius:4px;font-size:11px;">ID: #${obs.id}</span> <span style="background:#64748b20;color:#475569;padding:2px 8px;border-radius:4px;font-size:11px;">ID: #${obs.id}</span>
<span style="background:#64748b20;color:#64748b;padding:2px 8px;border-radius:4px;font-size:11px;">Occurrences: <strong>${obs.occurrence_count}</strong></span> <span style="background:#64748b20;color:#475569;padding:2px 8px;border-radius:4px;font-size:11px;">Occurrences: <strong>${obs.occurrence_count}</strong></span>
${dismissedBadge} ${dismissedBadge}
</div> </div>
<div style="margin-bottom:10px;"> <div style="margin-bottom:10px;">
<div style="font-size:10px;color:#64748b;margin-bottom:4px;">Error Signature:</div> <div style="font-size:10px;color:#475569;margin-bottom:4px;">Error Signature:</div>
<div style="font-family:monospace;font-size:11px;color:#1e293b;background:#f1f5f9;padding:8px;border-radius:4px;word-break:break-all;">${obs.error_signature}</div> <div style="font-family:monospace;font-size:11px;color:#1e293b;background:#f1f5f9;padding:8px;border-radius:4px;word-break:break-all;">${obs.error_signature}</div>
</div> </div>
<div style="margin-bottom:12px;"> <div style="margin-bottom:12px;">
<div style="font-size:10px;color:#64748b;margin-bottom:4px;">Raw Message:</div> <div style="font-size:10px;color:#475569;margin-bottom:4px;">Raw Message:</div>
<div style="font-family:monospace;font-size:11px;color:#1e293b;background:#f8fafc;padding:10px;border-radius:4px;white-space:pre-wrap;word-break:break-all;max-height:120px;overflow-y:auto;">${obs.raw_message || 'N/A'}</div> <div style="font-family:monospace;font-size:11px;color:#1e293b;background:#f8fafc;padding:10px;border-radius:4px;white-space:pre-wrap;word-break:break-all;max-height:120px;overflow-y:auto;">${obs.raw_message || 'N/A'}</div>
</div> </div>
<div style="display:grid;grid-template-columns:repeat(auto-fit, minmax(140px, 1fr));gap:10px;font-size:11px;padding-top:10px;border-top:1px solid ${infoColor}20;"> <div style="display:grid;grid-template-columns:repeat(auto-fit, minmax(140px, 1fr));gap:10px;font-size:11px;padding-top:10px;border-top:1px solid ${infoColor}20;">
<div> <div>
<span style="color:#64748b;">Device:</span> <span style="color:#475569;">Device:</span>
<strong style="color:#1e293b;margin-left:4px;">${obs.device_name || disk.name}</strong> <strong style="color:#1e293b;margin-left:4px;">${obs.device_name || disk.name}</strong>
</div> </div>
<div> <div>
<span style="color:#64748b;">Serial:</span> <span style="color:#475569;">Serial:</span>
<strong style="color:#1e293b;margin-left:4px;">${obs.serial || disk.serial || 'N/A'}</strong> <strong style="color:#1e293b;margin-left:4px;">${obs.serial || disk.serial || 'N/A'}</strong>
</div> </div>
<div> <div>
<span style="color:#64748b;">Model:</span> <span style="color:#475569;">Model:</span>
<strong style="color:#1e293b;margin-left:4px;">${obs.model || disk.model || 'N/A'}</strong> <strong style="color:#1e293b;margin-left:4px;">${obs.model || disk.model || 'N/A'}</strong>
</div> </div>
<div> <div>
<span style="color:#64748b;">First Seen:</span> <span style="color:#475569;">First Seen:</span>
<strong style="color:#1e293b;margin-left:4px;">${firstDate}</strong> <strong style="color:#1e293b;margin-left:4px;">${firstDate}</strong>
</div> </div>
<div> <div>
<span style="color:#64748b;">Last Seen:</span> <span style="color:#475569;">Last Seen:</span>
<strong style="color:#1e293b;margin-left:4px;">${lastDate}</strong> <strong style="color:#1e293b;margin-left:4px;">${lastDate}</strong>
</div> </div>
</div> </div>
@@ -1715,7 +1914,7 @@ function openSmartReport(disk: DiskInfo, testStatus: SmartTestStatus, smartAttri
<div style="margin-bottom:20px;"> <div style="margin-bottom:20px;">
<div style="display:flex;align-items:center;gap:8px;margin-bottom:12px;padding-bottom:8px;border-bottom:1px solid #e2e8f0;"> <div style="display:flex;align-items:center;gap:8px;margin-bottom:12px;padding-bottom:8px;border-bottom:1px solid #e2e8f0;">
<span style="font-weight:600;color:#1e293b;">${typeLabel}</span> <span style="font-weight:600;color:#1e293b;">${typeLabel}</span>
<span style="background:#64748b15;color:#64748b;padding:2px 8px;border-radius:4px;font-size:11px;">${obsList.length} unique, ${groupOccurrences} total</span> <span style="background:#64748b15;color:#475569;padding:2px 8px;border-radius:4px;font-size:11px;">${obsList.length} unique, ${groupOccurrences} total</span>
</div> </div>
<div style="display:flex;flex-direction:column;gap:12px;"> <div style="display:flex;flex-direction:column;gap:12px;">
${obsItemsHtml} ${obsItemsHtml}
@@ -1729,7 +1928,7 @@ function openSmartReport(disk: DiskInfo, testStatus: SmartTestStatus, smartAttri
<!-- ${obsSecNum}. Observations & Events --> <!-- ${obsSecNum}. Observations & Events -->
<div class="section"> <div class="section">
<div class="section-title">${obsSecNum}. Observations & Events (${observations.length} recorded, ${totalOccurrences} total occurrences)</div> <div class="section-title">${obsSecNum}. Observations & Events (${observations.length} recorded, ${totalOccurrences} total occurrences)</div>
<p style="color:#64748b;font-size:12px;margin-bottom:16px;">The following events have been detected and logged for this disk. These observations may indicate potential issues that require attention.</p> <p style="color:#475569;font-size:12px;margin-bottom:16px;">The following events have been detected and logged for this disk. These observations may indicate potential issues that require attention.</p>
${groupsHtml} ${groupsHtml}
</div> </div>
` `
@@ -1898,7 +2097,7 @@ function openSmartReport(disk: DiskInfo, testStatus: SmartTestStatus, smartAttri
<div class="health-icon">${isHealthy ? '&#10003;' : '&#10007;'}</div> <div class="health-icon">${isHealthy ? '&#10003;' : '&#10007;'}</div>
<div class="health-lbl">${healthLabel}</div> <div class="health-lbl">${healthLabel}</div>
</div> </div>
<div style="font-size:10px;color:#64748b;font-weight:600;">SMART Status</div> <div style="font-size:10px;color:#475569;font-weight:600;">SMART Status</div>
</div> </div>
<div class="exec-text"> <div class="exec-text">
<h3>Disk Health Assessment</h3> <h3>Disk Health Assessment</h3>
@@ -1910,6 +2109,51 @@ function openSmartReport(disk: DiskInfo, testStatus: SmartTestStatus, smartAttri
</p> </p>
</div> </div>
</div> </div>
<!-- Simple Explanation for Non-Technical Users -->
<div style="background:${isHealthy ? '#dcfce7' : (hasCritical ? '#fee2e2' : '#fef3c7')};border:1px solid ${isHealthy ? '#86efac' : (hasCritical ? '#fca5a5' : '#fcd34d')};border-radius:8px;padding:16px;margin-top:12px;">
<div style="font-weight:700;font-size:14px;color:${isHealthy ? '#166534' : (hasCritical ? '#991b1b' : '#92400e')};margin-bottom:8px;">
${isHealthy ? 'What does this mean? Your disk is healthy!' : (hasCritical ? 'ATTENTION REQUIRED: Problems detected' : 'Some issues need monitoring')}
</div>
<p style="color:${isHealthy ? '#166534' : (hasCritical ? '#991b1b' : '#92400e')};font-size:12px;margin:0 0 8px 0;">
${isHealthy
? 'In simple terms: This disk is working properly. You can continue using it normally. We recommend running periodic SMART tests (monthly) to catch any issues early.'
: (hasCritical
? 'In simple terms: This disk has problems that could cause data loss. You should back up your important files immediately and consider replacing the disk soon.'
: 'In simple terms: The disk is working but shows some signs of wear. It is not critical yet, but you should monitor it closely and ensure your backups are up to date.'
)
}
</p>
${!isHealthy && criticalAttrs.length > 0 ? `
<div style="margin-top:8px;padding-top:8px;border-top:1px solid ${hasCritical ? '#fca5a5' : '#fcd34d'};">
<div style="font-size:11px;font-weight:600;color:#475569;margin-bottom:4px;">Issues found:</div>
<ul style="margin:0;padding-left:20px;font-size:11px;color:${hasCritical ? '#991b1b' : '#92400e'};">
${criticalAttrs.slice(0, 3).map(a => `<li>${a.name.replace(/_/g, ' ')}: ${a.status === 'critical' ? 'Critical - requires immediate attention' : 'Warning - should be monitored'}</li>`).join('')}
${criticalAttrs.length > 3 ? `<li>...and ${criticalAttrs.length - 3} more issues (see details below)</li>` : ''}
</ul>
</div>
` : ''}
</div>
<!-- Test Information -->
<div style="display:grid;grid-template-columns:repeat(auto-fit, minmax(150px, 1fr));gap:8px;margin-top:12px;">
<div style="background:#f8fafc;border:1px solid #e2e8f0;border-radius:6px;padding:10px 12px;">
<div style="font-size:10px;color:#475569;font-weight:600;text-transform:uppercase;">Report Generated</div>
<div style="font-size:12px;font-weight:600;color:#1e293b;">${now}</div>
</div>
<div style="background:#f8fafc;border:1px solid #e2e8f0;border-radius:6px;padding:10px 12px;">
<div style="font-size:10px;color:#475569;font-weight:600;text-transform:uppercase;">Last Test Type</div>
<div style="font-size:12px;font-weight:600;color:#1e293b;">${testStatus.last_test?.type || 'N/A'}</div>
</div>
<div style="background:#f8fafc;border:1px solid #e2e8f0;border-radius:6px;padding:10px 12px;">
<div style="font-size:10px;color:#475569;font-weight:600;text-transform:uppercase;">Test Result</div>
<div style="font-size:12px;font-weight:600;color:${testStatus.last_test?.status?.toLowerCase() === 'passed' ? '#16a34a' : testStatus.last_test?.status?.toLowerCase() === 'failed' ? '#dc2626' : '#64748b'};">${testStatus.last_test?.status || 'N/A'}</div>
</div>
<div style="background:#f8fafc;border:1px solid #e2e8f0;border-radius:6px;padding:10px 12px;">
<div style="font-size:10px;color:#475569;font-weight:600;text-transform:uppercase;">Attributes Checked</div>
<div style="font-size:12px;font-weight:600;color:#1e293b;">${smartAttributes.length}</div>
</div>
</div>
</div> </div>
<!-- 2. Disk Information --> <!-- 2. Disk Information -->
@@ -1937,12 +2181,12 @@ function openSmartReport(disk: DiskInfo, testStatus: SmartTestStatus, smartAttri
<div class="card card-c"> <div class="card card-c">
<div class="card-value" style="color:${getTempColorForReport(disk.temperature)}">${disk.temperature > 0 ? disk.temperature + '°C' : 'N/A'}</div> <div class="card-value" style="color:${getTempColorForReport(disk.temperature)}">${disk.temperature > 0 ? disk.temperature + '°C' : 'N/A'}</div>
<div class="card-label">Temperature</div> <div class="card-label">Temperature</div>
<div style="font-size:9px;color:#64748b;margin-top:2px;">Optimal: ${tempThresholds.optimal}</div> <div style="font-size:9px;color:#475569;margin-top:2px;">Optimal: ${tempThresholds.optimal}</div>
</div> </div>
<div class="card card-c"> <div class="card card-c">
<div class="card-value">${powerOnHours.toLocaleString()}h</div> <div class="card-value">${powerOnHours.toLocaleString()}h</div>
<div class="card-label">Power On Time</div> <div class="card-label">Power On Time</div>
<div style="font-size:9px;color:#64748b;margin-top:2px;">${powerOnYears}y ${powerOnDays}d</div> <div style="font-size:9px;color:#475569;margin-top:2px;">${powerOnYears}y ${powerOnDays}d</div>
</div> </div>
<div class="card card-c"> <div class="card card-c">
<div class="card-value">${(disk.power_cycles ?? 0).toLocaleString()}</div> <div class="card-value">${(disk.power_cycles ?? 0).toLocaleString()}</div>
@@ -1980,7 +2224,7 @@ ${isNvmeDisk ? `
<div style="display:grid;grid-template-columns:1fr 1fr;gap:20px;margin-bottom:20px;"> <div style="display:grid;grid-template-columns:1fr 1fr;gap:20px;margin-bottom:20px;">
<!-- Life Remaining Gauge --> <!-- Life Remaining Gauge -->
<div style="background:linear-gradient(135deg,#f8fafc 0%,#f1f5f9 100%);border:1px solid #e2e8f0;border-radius:12px;padding:20px;text-align:center;"> <div style="background:linear-gradient(135deg,#f8fafc 0%,#f1f5f9 100%);border:1px solid #e2e8f0;border-radius:12px;padding:20px;text-align:center;">
<div style="font-size:12px;color:#64748b;margin-bottom:8px;font-weight:600;">LIFE REMAINING</div> <div style="font-size:12px;color:#475569;margin-bottom:8px;font-weight:600;">LIFE REMAINING</div>
<div style="position:relative;width:120px;height:120px;margin:0 auto;"> <div style="position:relative;width:120px;height:120px;margin:0 auto;">
<svg viewBox="0 0 120 120" style="transform:rotate(-90deg);"> <svg viewBox="0 0 120 120" style="transform:rotate(-90deg);">
<circle cx="60" cy="60" r="50" fill="none" stroke="#e2e8f0" stroke-width="12"/> <circle cx="60" cy="60" r="50" fill="none" stroke="#e2e8f0" stroke-width="12"/>
@@ -1996,11 +2240,11 @@ ${isNvmeDisk ? `
<!-- Usage Statistics --> <!-- Usage Statistics -->
<div style="background:linear-gradient(135deg,#f8fafc 0%,#f1f5f9 100%);border:1px solid #e2e8f0;border-radius:12px;padding:20px;"> <div style="background:linear-gradient(135deg,#f8fafc 0%,#f1f5f9 100%);border:1px solid #e2e8f0;border-radius:12px;padding:20px;">
<div style="font-size:12px;color:#64748b;margin-bottom:12px;font-weight:600;">USAGE STATISTICS</div> <div style="font-size:12px;color:#475569;margin-bottom:12px;font-weight:600;">USAGE STATISTICS</div>
<div style="margin-bottom:16px;"> <div style="margin-bottom:16px;">
<div style="display:flex;justify-content:space-between;margin-bottom:6px;"> <div style="display:flex;justify-content:space-between;margin-bottom:6px;">
<span style="font-size:12px;color:#64748b;">Percentage Used</span> <span style="font-size:12px;color:#475569;">Percentage Used</span>
<span style="font-size:14px;font-weight:600;color:${getWearColorHex(nvmePercentUsed)};">${nvmePercentUsed}%</span> <span style="font-size:14px;font-weight:600;color:${getWearColorHex(nvmePercentUsed)};">${nvmePercentUsed}%</span>
</div> </div>
<div style="background:#e2e8f0;border-radius:4px;height:8px;overflow:hidden;"> <div style="background:#e2e8f0;border-radius:4px;height:8px;overflow:hidden;">
@@ -2010,7 +2254,7 @@ ${isNvmeDisk ? `
<div style="margin-bottom:16px;"> <div style="margin-bottom:16px;">
<div style="display:flex;justify-content:space-between;margin-bottom:6px;"> <div style="display:flex;justify-content:space-between;margin-bottom:6px;">
<span style="font-size:12px;color:#64748b;">Available Spare</span> <span style="font-size:12px;color:#475569;">Available Spare</span>
<span style="font-size:14px;font-weight:600;color:${nvmeAvailSpare >= 50 ? '#16a34a' : nvmeAvailSpare >= 20 ? '#ca8a04' : '#dc2626'};">${nvmeAvailSpare}%</span> <span style="font-size:14px;font-weight:600;color:${nvmeAvailSpare >= 50 ? '#16a34a' : nvmeAvailSpare >= 20 ? '#ca8a04' : '#dc2626'};">${nvmeAvailSpare}%</span>
</div> </div>
<div style="background:#e2e8f0;border-radius:4px;height:8px;overflow:hidden;"> <div style="background:#e2e8f0;border-radius:4px;height:8px;overflow:hidden;">
@@ -2020,11 +2264,11 @@ ${isNvmeDisk ? `
<div style="display:grid;grid-template-columns:1fr 1fr;gap:12px;margin-top:16px;padding-top:12px;border-top:1px solid #e2e8f0;"> <div style="display:grid;grid-template-columns:1fr 1fr;gap:12px;margin-top:16px;padding-top:12px;border-top:1px solid #e2e8f0;">
<div> <div>
<div style="font-size:11px;color:#64748b;">Data Written</div> <div style="font-size:11px;color:#475569;">Data Written</div>
<div style="font-size:15px;font-weight:600;color:#1e293b;">${nvmeDataWrittenTB >= 1 ? nvmeDataWrittenTB.toFixed(2) + ' TB' : (nvmeDataWrittenTB * 1024).toFixed(1) + ' GB'}</div> <div style="font-size:15px;font-weight:600;color:#1e293b;">${nvmeDataWrittenTB >= 1 ? nvmeDataWrittenTB.toFixed(2) + ' TB' : (nvmeDataWrittenTB * 1024).toFixed(1) + ' GB'}</div>
</div> </div>
<div> <div>
<div style="font-size:11px;color:#64748b;">Power Cycles</div> <div style="font-size:11px;color:#475569;">Power Cycles</div>
<div style="font-size:15px;font-weight:600;color:#1e293b;">${testStatus.smart_data?.nvme_raw?.power_cycles?.toLocaleString() ?? disk.power_cycles ?? 'N/A'}</div> <div style="font-size:15px;font-weight:600;color:#1e293b;">${testStatus.smart_data?.nvme_raw?.power_cycles?.toLocaleString() ?? disk.power_cycles ?? 'N/A'}</div>
</div> </div>
</div> </div>
@@ -2033,9 +2277,104 @@ ${isNvmeDisk ? `
</div> </div>
` : ''} ` : ''}
<!-- 4. SMART Attributes / NVMe Health Metrics --> ${!isNvmeDisk && diskType === 'SSD' ? (() => {
// Try to find SSD wear indicators from SMART attributes
const wearAttr = smartAttributes.find(a =>
a.name?.toLowerCase().includes('wear_leveling') ||
a.name?.toLowerCase().includes('media_wearout') ||
a.name?.toLowerCase().includes('percent_lifetime') ||
a.name?.toLowerCase().includes('ssd_life_left') ||
a.id === 177 || a.id === 231 || a.id === 233
)
const lbasWrittenAttr = smartAttributes.find(a =>
a.name?.toLowerCase().includes('total_lbas_written') ||
a.id === 241
)
// Also check disk properties
const wearValue = wearAttr?.value ?? disk.wear_leveling_count ?? disk.ssd_life_left
if (wearValue !== undefined && wearValue !== null) {
const lifeRemaining = wearValue // Usually this is percentage remaining
const lifeUsed = 100 - lifeRemaining
// Calculate data written from LBAs (LBA = 512 bytes)
let dataWrittenTB = 0
if (lbasWrittenAttr?.raw_value) {
const rawValue = parseInt(lbasWrittenAttr.raw_value.replace(/[^0-9]/g, ''))
if (!isNaN(rawValue)) {
dataWrittenTB = (rawValue * 512) / (1024 ** 4)
}
} else if (disk.total_lbas_written) {
dataWrittenTB = disk.total_lbas_written / 1024 // Already in GB
}
return `
<!-- SSD Wear & Lifetime -->
<div class="section"> <div class="section">
<div class="section-title">${isNvmeDisk ? '4' : '3'}. ${isNvmeDisk ? 'NVMe Health Metrics' : 'SMART Attributes'} (${smartAttributes.length} total${hasCritical ? `, ${criticalAttrs.length} warning(s)` : ''})</div> <div class="section-title">3. SSD Wear & Lifetime</div>
<div style="display:grid;grid-template-columns:1fr 1fr;gap:20px;margin-bottom:20px;">
<!-- Life Remaining Gauge -->
<div style="background:linear-gradient(135deg,#f8fafc 0%,#f1f5f9 100%);border:1px solid #e2e8f0;border-radius:12px;padding:20px;text-align:center;">
<div style="font-size:12px;color:#475569;margin-bottom:8px;font-weight:600;">LIFE REMAINING</div>
<div style="position:relative;width:120px;height:120px;margin:0 auto;">
<svg viewBox="0 0 120 120" style="transform:rotate(-90deg);">
<circle cx="60" cy="60" r="50" fill="none" stroke="#e2e8f0" stroke-width="12"/>
<circle cx="60" cy="60" r="50" fill="none" stroke="${getLifeColorHex(lifeUsed)}" stroke-width="12"
stroke-dasharray="${lifeRemaining * 3.14} 314" stroke-linecap="round"/>
</svg>
<div style="position:absolute;top:50%;left:50%;transform:translate(-50%,-50%);text-align:center;">
<div style="font-size:28px;font-weight:700;color:${getLifeColorHex(lifeUsed)};">${lifeRemaining}%</div>
</div>
</div>
<div style="margin-top:12px;font-size:11px;color:#475569;">
Source: ${wearAttr?.name?.replace(/_/g, ' ') || 'SSD Life Indicator'}
</div>
</div>
<!-- Usage Statistics -->
<div style="background:linear-gradient(135deg,#f8fafc 0%,#f1f5f9 100%);border:1px solid #e2e8f0;border-radius:12px;padding:20px;">
<div style="font-size:12px;color:#475569;margin-bottom:12px;font-weight:600;">USAGE STATISTICS</div>
<div style="margin-bottom:16px;">
<div style="display:flex;justify-content:space-between;margin-bottom:6px;">
<span style="font-size:12px;color:#475569;">Wear Level</span>
<span style="font-size:14px;font-weight:600;color:${getWearColorHex(lifeUsed)};">${lifeUsed}%</span>
</div>
<div style="background:#e2e8f0;border-radius:4px;height:8px;overflow:hidden;">
<div style="background:${getWearColorHex(lifeUsed)};height:100%;width:${Math.min(lifeUsed, 100)}%;border-radius:4px;"></div>
</div>
</div>
${dataWrittenTB > 0 ? `
<div style="display:grid;grid-template-columns:1fr 1fr;gap:12px;margin-top:16px;padding-top:12px;border-top:1px solid #e2e8f0;">
<div>
<div style="font-size:11px;color:#475569;">Data Written</div>
<div style="font-size:15px;font-weight:600;color:#1e293b;">${dataWrittenTB >= 1 ? dataWrittenTB.toFixed(2) + ' TB' : (dataWrittenTB * 1024).toFixed(1) + ' GB'}</div>
</div>
<div>
<div style="font-size:11px;color:#475569;">Power On Hours</div>
<div style="font-size:15px;font-weight:600;color:#1e293b;">${powerOnHours.toLocaleString()}h</div>
</div>
</div>
` : ''}
<div style="margin-top:12px;padding:8px;background:#f1f5f9;border-radius:6px;font-size:11px;color:#475569;">
<strong>Note:</strong> SSD life estimates are based on manufacturer-reported wear indicators.
Actual lifespan may vary based on workload and usage patterns.
</div>
</div>
</div>
</div>
`
}
return ''
})() : ''}
<!-- ${isNvmeDisk ? '4' : (diskType === 'SSD' && (disk.wear_leveling_count !== undefined || disk.ssd_life_left !== undefined || smartAttributes.some(a => a.name?.toLowerCase().includes('wear'))) ? '4' : '3')}. SMART Attributes / NVMe Health Metrics -->
<div class="section">
<div class="section-title">${isNvmeDisk ? '4' : (diskType === 'SSD' && (disk.wear_leveling_count !== undefined || disk.ssd_life_left !== undefined || smartAttributes.some(a => a.name?.toLowerCase().includes('wear'))) ? '4' : '3')}. ${isNvmeDisk ? 'NVMe Health Metrics' : 'SMART Attributes'} (${smartAttributes.length} total${hasCritical ? `, ${criticalAttrs.length} warning(s)` : ''})</div>
<table class="attr-tbl"> <table class="attr-tbl">
<thead> <thead>
<tr> <tr>
@@ -2049,7 +2388,7 @@ ${isNvmeDisk ? `
</tr> </tr>
</thead> </thead>
<tbody> <tbody>
${attributeRows || '<tr><td colspan="' + (isNvmeDisk ? '3' : '7') + '" style="text-align:center;color:#94a3b8;padding:20px;">No ' + (isNvmeDisk ? 'NVMe metrics' : 'SMART attributes') + ' available</td></tr>'} ${attributeRows || '<tr><td colspan="' + (isNvmeDisk ? '3' : '7') + '" style="text-align:center;color:#64748b;padding:20px;">No ' + (isNvmeDisk ? 'NVMe metrics' : 'SMART attributes') + ' available</td></tr>'}
</tbody> </tbody>
</table> </table>
</div> </div>
@@ -2077,7 +2416,7 @@ ${isNvmeDisk ? `
</div> </div>
</div> </div>
` : ` ` : `
<div style="text-align:center;padding:20px;color:#94a3b8;background:#f8fafc;border:1px solid #e2e8f0;border-radius:8px;"> <div style="text-align:center;padding:20px;color:#64748b;background:#f8fafc;border:1px solid #e2e8f0;border-radius:8px;">
No self-test history available. Run a SMART self-test to see results here. No self-test history available. Run a SMART self-test to see results here.
</div> </div>
`} `}
@@ -2508,3 +2847,373 @@ function SmartTestTab({ disk, observations = [] }: SmartTestTabProps) {
</div> </div>
) )
} }
// ─── Schedule Tab Component ─────────────────────────────────────────────────────
interface SmartSchedule {
id: string
active: boolean
test_type: 'short' | 'long'
frequency: 'daily' | 'weekly' | 'monthly'
hour: number
minute: number
day_of_week: number
day_of_month: number
disks: string[]
retention: number
notify_on_complete: boolean
notify_only_on_failure: boolean
}
interface ScheduleConfig {
enabled: boolean
schedules: SmartSchedule[]
}
function ScheduleTab({ disk }: { disk: DiskInfo }) {
const [config, setConfig] = useState<ScheduleConfig>({ enabled: true, schedules: [] })
const [loading, setLoading] = useState(true)
const [saving, setSaving] = useState(false)
const [showForm, setShowForm] = useState(false)
const [editingSchedule, setEditingSchedule] = useState<SmartSchedule | null>(null)
// Form state
const [formData, setFormData] = useState<Partial<SmartSchedule>>({
test_type: 'short',
frequency: 'weekly',
hour: 3,
minute: 0,
day_of_week: 0,
day_of_month: 1,
disks: ['all'],
retention: 10,
active: true,
notify_on_complete: true,
notify_only_on_failure: false
})
const fetchSchedules = async () => {
try {
setLoading(true)
const data = await fetchApi<ScheduleConfig>('/api/storage/smart/schedules')
setConfig(data)
} catch {
console.error('Failed to load schedules')
} finally {
setLoading(false)
}
}
useEffect(() => {
fetchSchedules()
}, [])
const handleToggleGlobal = async () => {
try {
setSaving(true)
await fetchApi('/api/storage/smart/schedules/toggle', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ enabled: !config.enabled })
})
setConfig(prev => ({ ...prev, enabled: !prev.enabled }))
} catch {
console.error('Failed to toggle schedules')
} finally {
setSaving(false)
}
}
const handleSaveSchedule = async () => {
try {
setSaving(true)
const scheduleData = {
...formData,
id: editingSchedule?.id || undefined
}
await fetchApi('/api/storage/smart/schedules', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(scheduleData)
})
await fetchSchedules()
setShowForm(false)
setEditingSchedule(null)
resetForm()
} catch {
console.error('Failed to save schedule')
} finally {
setSaving(false)
}
}
const handleDeleteSchedule = async (id: string) => {
try {
setSaving(true)
await fetchApi(`/api/storage/smart/schedules/${id}`, {
method: 'DELETE'
})
await fetchSchedules()
} catch {
console.error('Failed to delete schedule')
} finally {
setSaving(false)
}
}
const resetForm = () => {
setFormData({
test_type: 'short',
frequency: 'weekly',
hour: 3,
minute: 0,
day_of_week: 0,
day_of_month: 1,
disks: ['all'],
retention: 10,
active: true,
notify_on_complete: true,
notify_only_on_failure: false
})
}
const editSchedule = (schedule: SmartSchedule) => {
setEditingSchedule(schedule)
setFormData(schedule)
setShowForm(true)
}
const dayNames = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
const formatScheduleTime = (schedule: SmartSchedule) => {
const time = `${schedule.hour.toString().padStart(2, '0')}:${schedule.minute.toString().padStart(2, '0')}`
if (schedule.frequency === 'daily') return `Daily at ${time}`
if (schedule.frequency === 'weekly') return `${dayNames[schedule.day_of_week]}s at ${time}`
return `Day ${schedule.day_of_month} of month at ${time}`
}
if (loading) {
return (
<div className="flex items-center justify-center py-8">
<div className="h-6 w-6 rounded-full border-2 border-transparent border-t-purple-400 animate-spin" />
<span className="ml-2 text-muted-foreground">Loading schedules...</span>
</div>
)
}
return (
<div className="space-y-4">
{/* Global Toggle */}
<div className="flex items-center justify-between p-3 bg-muted/50 rounded-lg">
<div>
<p className="font-medium">Automatic SMART Tests</p>
<p className="text-xs text-muted-foreground">Enable or disable all scheduled tests</p>
</div>
<Button
variant={config.enabled ? "default" : "outline"}
size="sm"
onClick={handleToggleGlobal}
disabled={saving}
className={config.enabled ? "bg-purple-600 hover:bg-purple-700" : ""}
>
{config.enabled ? 'Enabled' : 'Disabled'}
</Button>
</div>
{/* Schedules List */}
{config.schedules.length > 0 ? (
<div className="space-y-2">
<h4 className="font-semibold text-sm">Configured Schedules</h4>
{config.schedules.map(schedule => (
<div
key={schedule.id}
className={`border rounded-lg p-3 ${schedule.active ? 'border-purple-500/30 bg-purple-500/5' : 'border-muted opacity-60'}`}
>
<div className="flex items-center justify-between">
<div>
<div className="flex items-center gap-2">
<Badge className={schedule.test_type === 'long' ? 'bg-orange-500/10 text-orange-400 border-orange-500/20' : 'bg-blue-500/10 text-blue-400 border-blue-500/20'}>
{schedule.test_type}
</Badge>
<span className="text-sm font-medium">{formatScheduleTime(schedule)}</span>
</div>
<div className="text-xs text-muted-foreground mt-1">
Disks: {schedule.disks.includes('all') ? 'All disks' : schedule.disks.join(', ')} |
Keep {schedule.retention} results
</div>
</div>
<div className="flex items-center gap-2">
<Button
variant="ghost"
size="sm"
onClick={() => editSchedule(schedule)}
className="h-8 w-8 p-0"
>
<Settings className="h-4 w-4" />
</Button>
<Button
variant="ghost"
size="sm"
onClick={() => handleDeleteSchedule(schedule.id)}
className="h-8 w-8 p-0 text-red-400 hover:text-red-300 hover:bg-red-500/10"
disabled={saving}
>
<Trash2 className="h-4 w-4" />
</Button>
</div>
</div>
</div>
))}
</div>
) : (
<div className="text-center py-6 text-muted-foreground">
<Clock className="h-8 w-8 mx-auto mb-2 opacity-50" />
<p>No scheduled tests configured</p>
<p className="text-xs mt-1">Create a schedule to automatically run SMART tests</p>
</div>
)}
{/* Add/Edit Form */}
{showForm ? (
<div className="border rounded-lg p-4 space-y-4">
<h4 className="font-semibold">{editingSchedule ? 'Edit Schedule' : 'New Schedule'}</h4>
<div className="grid grid-cols-2 gap-4">
<div>
<label className="text-sm text-muted-foreground">Test Type</label>
<select
value={formData.test_type}
onChange={e => setFormData(prev => ({ ...prev, test_type: e.target.value as 'short' | 'long' }))}
className="w-full mt-1 p-2 rounded-md bg-background border border-input text-sm"
>
<option value="short">Short Test (~2 min)</option>
<option value="long">Long Test (1-4 hours)</option>
</select>
</div>
<div>
<label className="text-sm text-muted-foreground">Frequency</label>
<select
value={formData.frequency}
onChange={e => setFormData(prev => ({ ...prev, frequency: e.target.value as 'daily' | 'weekly' | 'monthly' }))}
className="w-full mt-1 p-2 rounded-md bg-background border border-input text-sm"
>
<option value="daily">Daily</option>
<option value="weekly">Weekly</option>
<option value="monthly">Monthly</option>
</select>
</div>
{formData.frequency === 'weekly' && (
<div>
<label className="text-sm text-muted-foreground">Day of Week</label>
<select
value={formData.day_of_week}
onChange={e => setFormData(prev => ({ ...prev, day_of_week: parseInt(e.target.value) }))}
className="w-full mt-1 p-2 rounded-md bg-background border border-input text-sm"
>
{dayNames.map((day, i) => (
<option key={day} value={i}>{day}</option>
))}
</select>
</div>
)}
{formData.frequency === 'monthly' && (
<div>
<label className="text-sm text-muted-foreground">Day of Month</label>
<select
value={formData.day_of_month}
onChange={e => setFormData(prev => ({ ...prev, day_of_month: parseInt(e.target.value) }))}
className="w-full mt-1 p-2 rounded-md bg-background border border-input text-sm"
>
{Array.from({ length: 28 }, (_, i) => i + 1).map(day => (
<option key={day} value={day}>{day}</option>
))}
</select>
</div>
)}
<div>
<label className="text-sm text-muted-foreground">Time (Hour)</label>
<select
value={formData.hour}
onChange={e => setFormData(prev => ({ ...prev, hour: parseInt(e.target.value) }))}
className="w-full mt-1 p-2 rounded-md bg-background border border-input text-sm"
>
{Array.from({ length: 24 }, (_, i) => (
<option key={i} value={i}>{i.toString().padStart(2, '0')}:00</option>
))}
</select>
</div>
<div>
<label className="text-sm text-muted-foreground">Keep Results</label>
<select
value={formData.retention}
onChange={e => setFormData(prev => ({ ...prev, retention: parseInt(e.target.value) }))}
className="w-full mt-1 p-2 rounded-md bg-background border border-input text-sm"
>
<option value={5}>Last 5</option>
<option value={10}>Last 10</option>
<option value={20}>Last 20</option>
<option value={50}>Last 50</option>
<option value={0}>Keep All</option>
</select>
</div>
</div>
<div className="flex items-center gap-4">
<label className="flex items-center gap-2 text-sm">
<input
type="checkbox"
checked={formData.disks?.includes('all')}
onChange={e => setFormData(prev => ({
...prev,
disks: e.target.checked ? ['all'] : [disk.name]
}))}
className="rounded border-input"
/>
Test all disks
</label>
</div>
<div className="flex items-center gap-2 pt-2">
<Button
onClick={handleSaveSchedule}
disabled={saving}
className="bg-purple-600 hover:bg-purple-700"
>
{saving ? 'Saving...' : 'Save Schedule'}
</Button>
<Button
variant="outline"
onClick={() => {
setShowForm(false)
setEditingSchedule(null)
resetForm()
}}
>
Cancel
</Button>
</div>
</div>
) : (
<Button
onClick={() => setShowForm(true)}
variant="outline"
className="w-full"
>
<Plus className="h-4 w-4 mr-2" />
Add Schedule
</Button>
)}
<p className="text-xs text-muted-foreground text-center">
Scheduled tests run automatically via cron. Results are saved to the SMART history.
</p>
</div>
)
}

View File

@@ -6353,14 +6353,103 @@ def api_proxmox_storage():
# ─── SMART Disk Testing API ─────────────────────────────────────────────────── # ─── SMART Disk Testing API ───────────────────────────────────────────────────
SMART_DIR = '/usr/local/share/proxmenux/smart' SMART_DIR = '/usr/local/share/proxmenux/smart'
SMART_CONFIG_DIR = '/usr/local/share/proxmenux/smart/config'
DEFAULT_SMART_RETENTION = 10 # Keep last 10 JSON files per disk by default
def _is_nvme(disk_name): def _is_nvme(disk_name):
"""Check if disk is NVMe (supports names like nvme0n1, nvme0n1p1).""" """Check if disk is NVMe (supports names like nvme0n1, nvme0n1p1)."""
return 'nvme' in disk_name return 'nvme' in disk_name
def _get_smart_json_path(disk_name): def _get_smart_disk_dir(disk_name):
"""Get path to SMART JSON file for a disk.""" """Get directory path for a disk's SMART JSON files."""
return os.path.join(SMART_DIR, f"{disk_name}.json") return os.path.join(SMART_DIR, disk_name)
def _get_smart_json_path(disk_name, test_type='short'):
"""Get path to a new SMART JSON file for a disk with timestamp."""
disk_dir = _get_smart_disk_dir(disk_name)
os.makedirs(disk_dir, exist_ok=True)
timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
return os.path.join(disk_dir, f"{timestamp}_{test_type}.json")
def _get_latest_smart_json(disk_name):
"""Get the most recent SMART JSON file for a disk."""
disk_dir = _get_smart_disk_dir(disk_name)
if not os.path.exists(disk_dir):
return None
json_files = sorted(
[f for f in os.listdir(disk_dir) if f.endswith('.json')],
reverse=True # Most recent first (timestamp-based naming)
)
if json_files:
return os.path.join(disk_dir, json_files[0])
return None
def _get_smart_history(disk_name, limit=None):
"""Get list of all SMART JSON files for a disk, sorted by date (newest first)."""
disk_dir = _get_smart_disk_dir(disk_name)
if not os.path.exists(disk_dir):
return []
json_files = sorted(
[f for f in os.listdir(disk_dir) if f.endswith('.json')],
reverse=True
)
if limit:
json_files = json_files[:limit]
result = []
for filename in json_files:
# Parse timestamp and test type from filename: 2026-04-13T10-30-00_short.json
parts = filename.replace('.json', '').split('_')
if len(parts) >= 2:
timestamp_str = parts[0]
test_type = parts[1]
try:
# Convert back to readable format
dt = datetime.strptime(timestamp_str, '%Y-%m-%dT%H-%M-%S')
result.append({
'filename': filename,
'path': os.path.join(disk_dir, filename),
'timestamp': dt.isoformat(),
'test_type': test_type,
'date_readable': dt.strftime('%Y-%m-%d %H:%M:%S')
})
except ValueError:
# Filename doesn't match expected format, skip
pass
return result
def _cleanup_old_smart_jsons(disk_name, retention=None):
"""Remove old SMART JSON files, keeping only the most recent ones."""
if retention is None:
retention = DEFAULT_SMART_RETENTION
if retention <= 0: # 0 or negative means keep all
return 0
disk_dir = _get_smart_disk_dir(disk_name)
if not os.path.exists(disk_dir):
return 0
json_files = sorted(
[f for f in os.listdir(disk_dir) if f.endswith('.json')],
reverse=True # Most recent first
)
removed = 0
# Keep first 'retention' files, delete the rest
for old_file in json_files[retention:]:
try:
os.remove(os.path.join(disk_dir, old_file))
removed += 1
except Exception:
pass
return removed
def _ensure_smart_tools(install_if_missing=False): def _ensure_smart_tools(install_if_missing=False):
"""Check if SMART tools are installed and optionally install them.""" """Check if SMART tools are installed and optionally install them."""
@@ -6483,14 +6572,17 @@ def api_smart_status(disk_name):
result['error'] = 'smartmontools not installed' result['error'] = 'smartmontools not installed'
return jsonify(result) return jsonify(result)
# Check for existing JSON file (from previous test) # Check for existing JSON file (from previous test) - get most recent
json_path = _get_smart_json_path(disk_name) json_path = _get_latest_smart_json(disk_name)
if os.path.exists(json_path): if json_path and os.path.exists(json_path):
try: try:
with open(json_path, 'r') as f: with open(json_path, 'r') as f:
saved_data = json.load(f) saved_data = json.load(f)
result['saved_data'] = saved_data result['saved_data'] = saved_data
result['saved_timestamp'] = os.path.getmtime(json_path) result['saved_timestamp'] = os.path.getmtime(json_path)
result['saved_path'] = json_path
# Get test history
result['test_history'] = _get_smart_history(disk_name, limit=10)
except (json.JSONDecodeError, IOError): except (json.JSONDecodeError, IOError):
pass pass
@@ -6712,6 +6804,74 @@ def api_smart_status(disk_name):
return jsonify({'error': str(e)}), 500 return jsonify({'error': str(e)}), 500
@app.route('/api/storage/smart/<disk_name>/history', methods=['GET'])
@require_auth
def api_smart_history(disk_name):
"""Get SMART test history for a disk."""
try:
# Validate disk name (security)
if not re.match(r'^[a-zA-Z0-9]+$', disk_name):
return jsonify({'error': 'Invalid disk name'}), 400
limit = request.args.get('limit', 20, type=int)
history = _get_smart_history(disk_name, limit=limit)
return jsonify({
'disk': disk_name,
'history': history,
'total': len(history)
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/storage/smart/<disk_name>/latest', methods=['GET'])
@require_auth
def api_smart_latest(disk_name):
"""Get the most recent SMART JSON data for a disk."""
try:
# Validate disk name (security)
if not re.match(r'^[a-zA-Z0-9]+$', disk_name):
return jsonify({'error': 'Invalid disk name'}), 400
json_path = _get_latest_smart_json(disk_name)
if not json_path or not os.path.exists(json_path):
return jsonify({
'disk': disk_name,
'has_data': False,
'message': 'No SMART test data available. Run a SMART test first.'
})
with open(json_path, 'r') as f:
smart_data = json.load(f)
# Extract timestamp from filename
filename = os.path.basename(json_path)
parts = filename.replace('.json', '').split('_')
timestamp = None
test_type = 'unknown'
if len(parts) >= 2:
try:
dt = datetime.strptime(parts[0], '%Y-%m-%dT%H-%M-%S')
timestamp = dt.isoformat()
test_type = parts[1]
except ValueError:
pass
return jsonify({
'disk': disk_name,
'has_data': True,
'data': smart_data,
'timestamp': timestamp,
'test_type': test_type,
'path': json_path
})
except json.JSONDecodeError:
return jsonify({'error': 'Invalid JSON data in saved file'}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/storage/smart/<disk_name>/test', methods=['POST']) @app.route('/api/storage/smart/<disk_name>/test', methods=['POST'])
@require_auth @require_auth
def api_smart_run_test(disk_name): def api_smart_run_test(disk_name):
@@ -6736,9 +6896,12 @@ def api_smart_run_test(disk_name):
# Check tools and auto-install if missing # Check tools and auto-install if missing
tools = _ensure_smart_tools(install_if_missing=True) tools = _ensure_smart_tools(install_if_missing=True)
# Ensure SMART directory exists # Ensure SMART directory exists and get path for new JSON file
os.makedirs(SMART_DIR, exist_ok=True) os.makedirs(SMART_DIR, exist_ok=True)
json_path = _get_smart_json_path(disk_name) json_path = _get_smart_json_path(disk_name, test_type)
# Cleanup old JSON files based on retention policy
_cleanup_old_smart_jsons(disk_name)
if is_nvme: if is_nvme:
if not tools['nvme']: if not tools['nvme']:
@@ -6832,7 +6995,7 @@ def api_smart_run_test(disk_name):
while smartctl -c {device} 2>/dev/null | grep -qiE 'Self-test routine in progress|[1-9][0-9]?% of test remaining'; do while smartctl -c {device} 2>/dev/null | grep -qiE 'Self-test routine in progress|[1-9][0-9]?% of test remaining'; do
sleep {sleep_interval} sleep {sleep_interval}
done done
smartctl --json=c {device} > {json_path} 2>/dev/null smartctl -a --json=c {device} > {json_path} 2>/dev/null
''', ''',
shell=True, start_new_session=True, shell=True, start_new_session=True,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
@@ -6850,6 +7013,189 @@ def api_smart_run_test(disk_name):
return jsonify({'error': str(e)}), 500 return jsonify({'error': str(e)}), 500
# ─── SMART Schedule API ───────────────────────────────────────────────────────
SMART_SCHEDULE_FILE = os.path.join(SMART_CONFIG_DIR, 'smart-schedule.json')
SMART_CRON_FILE = '/etc/cron.d/proxmenux-smart'
def _load_smart_schedules():
"""Load SMART test schedules from config file."""
os.makedirs(SMART_CONFIG_DIR, exist_ok=True)
if os.path.exists(SMART_SCHEDULE_FILE):
try:
with open(SMART_SCHEDULE_FILE, 'r') as f:
return json.load(f)
except (json.JSONDecodeError, IOError):
pass
return {'enabled': True, 'schedules': []}
def _save_smart_schedules(config):
"""Save SMART test schedules to config file."""
os.makedirs(SMART_CONFIG_DIR, exist_ok=True)
with open(SMART_SCHEDULE_FILE, 'w') as f:
json.dump(config, f, indent=2)
def _update_smart_cron():
"""Update cron file based on current schedules."""
config = _load_smart_schedules()
if not config.get('enabled') or not config.get('schedules'):
# Remove cron file if disabled or no schedules
if os.path.exists(SMART_CRON_FILE):
os.remove(SMART_CRON_FILE)
return
cron_lines = [
'# ProxMenux SMART Scheduled Tests',
'# Auto-generated - do not edit manually',
'SHELL=/bin/bash',
'PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin',
''
]
for schedule in config['schedules']:
if not schedule.get('active', True):
continue
schedule_id = schedule.get('id', 'unknown')
hour = schedule.get('hour', 3)
minute = schedule.get('minute', 0)
frequency = schedule.get('frequency', 'weekly')
# Build cron time specification
if frequency == 'daily':
cron_time = f'{minute} {hour} * * *'
elif frequency == 'weekly':
dow = schedule.get('day_of_week', 0) # 0=Sunday
cron_time = f'{minute} {hour} * * {dow}'
elif frequency == 'monthly':
dom = schedule.get('day_of_month', 1)
cron_time = f'{minute} {hour} {dom} * *'
else:
continue
# Build command
disks = schedule.get('disks', ['all'])
test_type = schedule.get('test_type', 'short')
retention = schedule.get('retention', 10)
cmd = f'/usr/local/share/proxmenux/scripts/smart-scheduled-test.sh --schedule-id {schedule_id} --test-type {test_type} --retention {retention}'
if disks != ['all']:
cmd += f" --disks '{','.join(disks)}'"
cron_lines.append(f'{cron_time} root {cmd} >> /var/log/proxmenux/smart-schedule.log 2>&1')
cron_lines.append('') # Empty line at end
with open(SMART_CRON_FILE, 'w') as f:
f.write('\n'.join(cron_lines))
# Set proper permissions
os.chmod(SMART_CRON_FILE, 0o644)
@app.route('/api/storage/smart/schedules', methods=['GET'])
@require_auth
def api_smart_schedules_list():
"""Get all SMART test schedules."""
config = _load_smart_schedules()
return jsonify(config)
@app.route('/api/storage/smart/schedules', methods=['POST'])
@require_auth
def api_smart_schedules_create():
"""Create or update a SMART test schedule."""
try:
data = request.get_json()
if not data:
return jsonify({'error': 'No data provided'}), 400
config = _load_smart_schedules()
# Generate ID if not provided
schedule_id = data.get('id') or f"schedule-{datetime.now().strftime('%Y%m%d%H%M%S')}"
data['id'] = schedule_id
# Set defaults
data.setdefault('active', True)
data.setdefault('test_type', 'short')
data.setdefault('frequency', 'weekly')
data.setdefault('hour', 3)
data.setdefault('minute', 0)
data.setdefault('day_of_week', 0)
data.setdefault('day_of_month', 1)
data.setdefault('disks', ['all'])
data.setdefault('retention', 10)
data.setdefault('notify_on_complete', True)
data.setdefault('notify_only_on_failure', False)
# Update existing or add new
existing_idx = next((i for i, s in enumerate(config['schedules']) if s['id'] == schedule_id), None)
if existing_idx is not None:
config['schedules'][existing_idx] = data
else:
config['schedules'].append(data)
_save_smart_schedules(config)
_update_smart_cron()
return jsonify({
'success': True,
'schedule': data,
'message': 'Schedule saved successfully'
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/storage/smart/schedules/<schedule_id>', methods=['DELETE'])
@require_auth
def api_smart_schedules_delete(schedule_id):
"""Delete a SMART test schedule."""
try:
config = _load_smart_schedules()
original_len = len(config['schedules'])
config['schedules'] = [s for s in config['schedules'] if s['id'] != schedule_id]
if len(config['schedules']) == original_len:
return jsonify({'error': 'Schedule not found'}), 404
_save_smart_schedules(config)
_update_smart_cron()
return jsonify({
'success': True,
'message': 'Schedule deleted successfully'
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/storage/smart/schedules/toggle', methods=['POST'])
@require_auth
def api_smart_schedules_toggle():
"""Enable or disable all SMART test schedules."""
try:
data = request.get_json() or {}
enabled = data.get('enabled', True)
config = _load_smart_schedules()
config['enabled'] = enabled
_save_smart_schedules(config)
_update_smart_cron()
return jsonify({
'success': True,
'enabled': enabled,
'message': f'SMART schedules {"enabled" if enabled else "disabled"}'
})
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/api/storage/smart/tools', methods=['GET']) @app.route('/api/storage/smart/tools', methods=['GET'])
@require_auth @require_auth
def api_smart_tools_status(): def api_smart_tools_status():

View File

@@ -0,0 +1,166 @@
#!/bin/bash
# ==========================================================
# ProxMenux - Apply Pending Restore On Boot
# ==========================================================
PENDING_BASE="${PMX_RESTORE_PENDING_BASE:-/var/lib/proxmenux/restore-pending}"
CURRENT_LINK="${PENDING_BASE}/current"
LOG_DIR="${PMX_RESTORE_LOG_DIR:-/var/log/proxmenux}"
DEST_PREFIX="${PMX_RESTORE_DEST_PREFIX:-/}"
PRE_BACKUP_BASE="${PMX_RESTORE_PRE_BACKUP_BASE:-/root/proxmenux-pre-restore}"
RECOVERY_BASE="${PMX_RESTORE_RECOVERY_BASE:-/root/proxmenux-recovery}"
mkdir -p "$LOG_DIR" "$PENDING_BASE/completed" >/dev/null 2>&1 || true
LOG_FILE="${LOG_DIR}/proxmenux-restore-onboot-$(date +%Y%m%d_%H%M%S).log"
exec >>"$LOG_FILE" 2>&1
echo "=== ProxMenux pending restore started at $(date -Iseconds) ==="
if [[ ! -e "$CURRENT_LINK" ]]; then
echo "No pending restore link found. Nothing to do."
exit 0
fi
PENDING_DIR="$(readlink -f "$CURRENT_LINK" 2>/dev/null || echo "$CURRENT_LINK")"
if [[ ! -d "$PENDING_DIR" ]]; then
echo "Pending restore directory not found: $PENDING_DIR"
rm -f "$CURRENT_LINK" >/dev/null 2>&1 || true
exit 0
fi
APPLY_LIST="${PENDING_DIR}/apply-on-boot.list"
PLAN_ENV="${PENDING_DIR}/plan.env"
STATE_FILE="${PENDING_DIR}/state"
if [[ -f "$PLAN_ENV" ]]; then
# shellcheck source=/dev/null
source "$PLAN_ENV"
fi
: "${HB_RESTORE_INCLUDE_ZFS:=0}"
if [[ ! -f "$APPLY_LIST" ]]; then
echo "Apply list missing: $APPLY_LIST"
echo "failed" >"$STATE_FILE"
exit 1
fi
echo "Pending dir: $PENDING_DIR"
echo "Apply list: $APPLY_LIST"
echo "Include ZFS: $HB_RESTORE_INCLUDE_ZFS"
echo "running" >"$STATE_FILE"
backup_root="${PRE_BACKUP_BASE}/$(date +%Y%m%d_%H%M%S)-onboot"
mkdir -p "$backup_root" >/dev/null 2>&1 || true
cluster_recovery_root=""
applied=0
skipped=0
failed=0
while IFS= read -r rel; do
[[ -z "$rel" ]] && continue
src="${PENDING_DIR}/rootfs/${rel}"
dst="${DEST_PREFIX%/}/${rel}"
if [[ ! -e "$src" ]]; then
((skipped++))
continue
fi
# Never restore cluster virtual filesystem data live.
if [[ "$rel" == etc/pve* ]] || [[ "$rel" == var/lib/pve-cluster* ]]; then
if [[ -z "$cluster_recovery_root" ]]; then
cluster_recovery_root="${RECOVERY_BASE}/$(date +%Y%m%d_%H%M%S)-onboot"
mkdir -p "$cluster_recovery_root" >/dev/null 2>&1 || true
fi
mkdir -p "$cluster_recovery_root/$(dirname "$rel")" >/dev/null 2>&1 || true
cp -a "$src" "$cluster_recovery_root/$rel" >/dev/null 2>&1 || true
((skipped++))
continue
fi
# /etc/zfs is opt-in.
if [[ "$rel" == etc/zfs || "$rel" == etc/zfs/* ]]; then
if [[ "$HB_RESTORE_INCLUDE_ZFS" != "1" ]]; then
((skipped++))
continue
fi
fi
if [[ -e "$dst" ]]; then
mkdir -p "$backup_root/$(dirname "$rel")" >/dev/null 2>&1 || true
cp -a "$dst" "$backup_root/$rel" >/dev/null 2>&1 || true
fi
if [[ -d "$src" ]]; then
mkdir -p "$dst" >/dev/null 2>&1 || true
if rsync -aAXH --delete "$src/" "$dst/" >/dev/null 2>&1; then
((applied++))
else
((failed++))
fi
else
mkdir -p "$(dirname "$dst")" >/dev/null 2>&1 || true
if cp -a "$src" "$dst" >/dev/null 2>&1; then
((applied++))
else
((failed++))
fi
fi
done <"$APPLY_LIST"
systemctl daemon-reload >/dev/null 2>&1 || true
command -v update-initramfs >/dev/null 2>&1 && update-initramfs -u -k all >/dev/null 2>&1 || true
command -v update-grub >/dev/null 2>&1 && update-grub >/dev/null 2>&1 || true
echo "Applied: $applied"
echo "Skipped: $skipped"
echo "Failed: $failed"
echo "Backup before restore: $backup_root"
if [[ -n "$cluster_recovery_root" ]]; then
helper="${cluster_recovery_root}/apply-cluster-restore.sh"
cat > "$helper" <<EOF
#!/bin/bash
set -euo pipefail
RECOVERY_ROOT="${cluster_recovery_root}"
echo "Cluster recovery helper"
echo "Source: \$RECOVERY_ROOT"
echo
echo "WARNING: run this only in a maintenance window."
echo
read -r -p "Type YES to continue: " ans
[[ "\$ans" == "YES" ]] || { echo "Aborted."; exit 1; }
systemctl stop pve-cluster || true
[[ -d "\$RECOVERY_ROOT/etc/pve" ]] && mkdir -p /etc/pve && cp -a "\$RECOVERY_ROOT/etc/pve/." /etc/pve/ || true
[[ -d "\$RECOVERY_ROOT/var/lib/pve-cluster" ]] && mkdir -p /var/lib/pve-cluster && cp -a "\$RECOVERY_ROOT/var/lib/pve-cluster/." /var/lib/pve-cluster/ || true
systemctl start pve-cluster || true
echo "Cluster recovery finished."
EOF
chmod +x "$helper" >/dev/null 2>&1 || true
echo "Cluster paths extracted to: $cluster_recovery_root"
echo "Cluster recovery helper: $helper"
fi
if [[ "$failed" -eq 0 ]]; then
echo "completed" >"$STATE_FILE"
else
echo "completed_with_errors" >"$STATE_FILE"
fi
restore_id="$(basename "$PENDING_DIR")"
mv "$PENDING_DIR" "${PENDING_BASE}/completed/${restore_id}" >/dev/null 2>&1 || true
rm -f "$CURRENT_LINK" >/dev/null 2>&1 || true
systemctl disable proxmenux-restore-onboot.service >/dev/null 2>&1 || true
echo "=== ProxMenux pending restore finished at $(date -Iseconds) ==="
echo "Log file: $LOG_FILE"
exit 0

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,387 @@
#!/bin/bash
# ==========================================================
# ProxMenux - Scheduled Backup Jobs
# ==========================================================
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
LOCAL_SCRIPTS_LOCAL="$(cd "$SCRIPT_DIR/.." && pwd)"
LOCAL_SCRIPTS_DEFAULT="/usr/local/share/proxmenux/scripts"
LOCAL_SCRIPTS="$LOCAL_SCRIPTS_DEFAULT"
BASE_DIR="/usr/local/share/proxmenux"
UTILS_FILE="$LOCAL_SCRIPTS/utils.sh"
if [[ -f "$LOCAL_SCRIPTS_LOCAL/utils.sh" ]]; then
LOCAL_SCRIPTS="$LOCAL_SCRIPTS_LOCAL"
UTILS_FILE="$LOCAL_SCRIPTS/utils.sh"
elif [[ ! -f "$UTILS_FILE" ]]; then
UTILS_FILE="$BASE_DIR/utils.sh"
fi
if [[ -f "$UTILS_FILE" ]]; then
# shellcheck source=/dev/null
source "$UTILS_FILE"
else
echo "ERROR: utils.sh not found." >&2
exit 1
fi
LIB_FILE="$SCRIPT_DIR/lib_host_backup_common.sh"
[[ ! -f "$LIB_FILE" ]] && LIB_FILE="$LOCAL_SCRIPTS_DEFAULT/backup_restore/lib_host_backup_common.sh"
if [[ -f "$LIB_FILE" ]]; then
# shellcheck source=/dev/null
source "$LIB_FILE"
else
msg_error "$(translate "Cannot load backup library: lib_host_backup_common.sh")"
exit 1
fi
load_language
initialize_cache
JOBS_DIR="/var/lib/proxmenux/backup-jobs"
LOG_DIR="/var/log/proxmenux/backup-jobs"
mkdir -p "$JOBS_DIR" "$LOG_DIR" >/dev/null 2>&1 || true
_job_file() { echo "${JOBS_DIR}/$1.env"; }
_job_paths_file() { echo "${JOBS_DIR}/$1.paths"; }
_service_file() { echo "/etc/systemd/system/proxmenux-backup-$1.service"; }
_timer_file() { echo "/etc/systemd/system/proxmenux-backup-$1.timer"; }
_normalize_uint() {
local v="${1:-0}"
[[ "$v" =~ ^[0-9]+$ ]] || v=0
echo "$v"
}
_write_job_env() {
local file="$1"
shift
{
echo "# ProxMenux scheduled backup job"
local kv key val
for kv in "$@"; do
key="${kv%%=*}"
val="${kv#*=}"
printf '%s=%q\n' "$key" "$val"
done
} > "$file"
}
_list_jobs() {
local f
for f in "$JOBS_DIR"/*.env; do
[[ -f "$f" ]] || continue
basename "$f" .env
done | sort
}
_show_job_status() {
local id="$1"
local timer_state="disabled"
local service_state="unknown"
systemctl is-enabled --quiet "proxmenux-backup-${id}.timer" >/dev/null 2>&1 && timer_state="enabled"
service_state=$(systemctl is-active "proxmenux-backup-${id}.service" 2>/dev/null || echo "inactive")
echo "${timer_state}/${service_state}"
}
_write_job_units() {
local id="$1"
local on_calendar="$2"
local runner="$LOCAL_SCRIPTS/backup_restore/run_scheduled_backup.sh"
[[ ! -f "$runner" ]] && runner="$SCRIPT_DIR/run_scheduled_backup.sh"
cat > "$(_service_file "$id")" <<EOF
[Unit]
Description=ProxMenux Scheduled Backup Job (${id})
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=${runner} ${id}
Nice=10
IOSchedulingClass=best-effort
IOSchedulingPriority=7
EOF
cat > "$(_timer_file "$id")" <<EOF
[Unit]
Description=ProxMenux Scheduled Backup Timer (${id})
[Timer]
OnCalendar=${on_calendar}
Persistent=true
RandomizedDelaySec=120
Unit=proxmenux-backup-${id}.service
[Install]
WantedBy=timers.target
EOF
systemctl daemon-reload >/dev/null 2>&1 || true
}
_prompt_retention() {
local __out_var="$1"
local last hourly daily weekly monthly yearly
last=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \
--inputbox "$(translate "keep-last (0 disables)")" 9 60 "7" 3>&1 1>&2 2>&3) || return 1
hourly=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \
--inputbox "$(translate "keep-hourly (0 disables)")" 9 60 "0" 3>&1 1>&2 2>&3) || return 1
daily=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \
--inputbox "$(translate "keep-daily (0 disables)")" 9 60 "7" 3>&1 1>&2 2>&3) || return 1
weekly=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \
--inputbox "$(translate "keep-weekly (0 disables)")" 9 60 "4" 3>&1 1>&2 2>&3) || return 1
monthly=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \
--inputbox "$(translate "keep-monthly (0 disables)")" 9 60 "3" 3>&1 1>&2 2>&3) || return 1
yearly=$(dialog --backtitle "ProxMenux" --title "$(translate "Retention")" \
--inputbox "$(translate "keep-yearly (0 disables)")" 9 60 "0" 3>&1 1>&2 2>&3) || return 1
last=$(_normalize_uint "$last")
hourly=$(_normalize_uint "$hourly")
daily=$(_normalize_uint "$daily")
weekly=$(_normalize_uint "$weekly")
monthly=$(_normalize_uint "$monthly")
yearly=$(_normalize_uint "$yearly")
local -n out="$__out_var"
out=(
"KEEP_LAST=$last"
"KEEP_HOURLY=$hourly"
"KEEP_DAILY=$daily"
"KEEP_WEEKLY=$weekly"
"KEEP_MONTHLY=$monthly"
"KEEP_YEARLY=$yearly"
)
}
_create_job() {
local id backend on_calendar profile_mode
id=$(dialog --backtitle "ProxMenux" --title "$(translate "New backup job")" \
--inputbox "$(translate "Job ID (letters, numbers, - _)")" 9 68 "hostcfg-daily" 3>&1 1>&2 2>&3) || return 1
[[ -z "$id" ]] && return 1
id=$(echo "$id" | tr -cs '[:alnum:]_-' '-' | sed 's/^-*//; s/-*$//')
[[ -z "$id" ]] && return 1
[[ -f "$(_job_file "$id")" ]] && {
dialog --backtitle "ProxMenux" --title "$(translate "Error")" \
--msgbox "$(translate "A job with this ID already exists.")" 8 62
return 1
}
backend=$(dialog --backtitle "ProxMenux" --title "$(translate "Backend")" \
--menu "\n$(translate "Select backup backend:")" 14 70 6 \
"local" "Local archive" \
"borg" "Borg repository" \
"pbs" "Proxmox Backup Server" \
3>&1 1>&2 2>&3) || return 1
on_calendar=$(dialog --backtitle "ProxMenux" --title "$(translate "Schedule")" \
--inputbox "$(translate "systemd OnCalendar expression")"$'\n'"$(translate "Example: daily or Mon..Fri 03:00")" \
11 72 "daily" 3>&1 1>&2 2>&3) || return 1
[[ -z "$on_calendar" ]] && return 1
profile_mode=$(dialog --backtitle "ProxMenux" --title "$(translate "Profile")" \
--menu "\n$(translate "Select backup profile:")" 12 68 4 \
"default" "Default critical paths" \
"custom" "Custom selected paths" \
3>&1 1>&2 2>&3) || return 1
local -a paths=()
hb_select_profile_paths "$profile_mode" paths || return 1
local -a retention=()
_prompt_retention retention || return 1
local -a lines=(
"JOB_ID=$id"
"BACKEND=$backend"
"ON_CALENDAR=$on_calendar"
"PROFILE_MODE=$profile_mode"
"ENABLED=1"
)
lines+=("${retention[@]}")
case "$backend" in
local)
local dest_dir ext
dest_dir=$(hb_prompt_dest_dir) || return 1
ext=$(dialog --backtitle "ProxMenux" --title "$(translate "Archive format")" \
--menu "\n$(translate "Select local archive format:")" 12 62 4 \
"tar.zst" "tar + zstd (preferred)" \
"tar.gz" "tar + gzip" \
3>&1 1>&2 2>&3) || return 1
lines+=("LOCAL_DEST_DIR=$dest_dir" "LOCAL_ARCHIVE_EXT=$ext")
;;
borg)
local repo passphrase
hb_select_borg_repo repo || return 1
hb_prepare_borg_passphrase || return 1
passphrase="${BORG_PASSPHRASE:-}"
lines+=(
"BORG_REPO=$repo"
"BORG_PASSPHRASE=$passphrase"
"BORG_ENCRYPT_MODE=${BORG_ENCRYPT_MODE:-none}"
)
;;
pbs)
hb_select_pbs_repository || return 1
hb_ask_pbs_encryption
local bid
bid="hostcfg-$(hostname)"
bid=$(dialog --backtitle "ProxMenux" --title "PBS" \
--inputbox "$(translate "Backup ID for this job:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "$bid" 3>&1 1>&2 2>&3) || return 1
bid=$(echo "$bid" | tr -cs '[:alnum:]_-' '-' | sed 's/-*$//')
lines+=(
"PBS_REPOSITORY=${HB_PBS_REPOSITORY}"
"PBS_PASSWORD=${HB_PBS_SECRET}"
"PBS_BACKUP_ID=${bid}"
"PBS_KEYFILE=${HB_PBS_KEYFILE:-}"
"PBS_ENCRYPTION_PASSWORD=${HB_PBS_ENC_PASS:-}"
)
;;
esac
_write_job_env "$(_job_file "$id")" "${lines[@]}"
: > "$(_job_paths_file "$id")"
local p
for p in "${paths[@]}"; do
echo "$p" >> "$(_job_paths_file "$id")"
done
_write_job_units "$id" "$on_calendar"
systemctl enable --now "proxmenux-backup-${id}.timer" >/dev/null 2>&1 || true
show_proxmenux_logo
msg_title "$(translate "Scheduled backup job created")"
echo -e ""
echo -e "${TAB}${BGN}$(translate "Job ID:")${CL} ${BL}${id}${CL}"
echo -e "${TAB}${BGN}$(translate "Backend:")${CL} ${BL}${backend}${CL}"
echo -e "${TAB}${BGN}$(translate "Schedule:")${CL} ${BL}${on_calendar}${CL}"
echo -e "${TAB}${BGN}$(translate "Status:")${CL} ${BL}$(_show_job_status "$id")${CL}"
echo -e ""
msg_success "$(translate "Press Enter to continue...")"
read -r
return 0
}
_pick_job() {
local title="$1"
local __out_var="$2"
local -a ids=()
mapfile -t ids < <(_list_jobs)
if [[ ${#ids[@]} -eq 0 ]]; then
dialog --backtitle "ProxMenux" --title "$(translate "No jobs")" \
--msgbox "$(translate "No scheduled backup jobs found.")" 8 62
return 1
fi
local -a menu=()
local i=1 id
for id in "${ids[@]}"; do
menu+=("$i" "$id [$(_show_job_status "$id")]")
((i++))
done
local sel
sel=$(dialog --backtitle "ProxMenux" --title "$title" \
--menu "\n$(translate "Select a job:")" "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \
"${menu[@]}" 3>&1 1>&2 2>&3) || return 1
local picked="${ids[$((sel-1))]}"
local -n out="$__out_var"
out="$picked"
return 0
}
_job_run_now() {
local id=""
_pick_job "$(translate "Run job now")" id || return 1
local runner="$LOCAL_SCRIPTS/backup_restore/run_scheduled_backup.sh"
[[ ! -f "$runner" ]] && runner="$SCRIPT_DIR/run_scheduled_backup.sh"
if "$runner" "$id"; then
msg_ok "$(translate "Job executed successfully.")"
else
msg_warn "$(translate "Job execution finished with errors. Check logs.")"
fi
msg_success "$(translate "Press Enter to continue...")"
read -r
}
_job_toggle() {
local id=""
_pick_job "$(translate "Enable/Disable job")" id || return 1
if systemctl is-enabled --quiet "proxmenux-backup-${id}.timer" >/dev/null 2>&1; then
systemctl disable --now "proxmenux-backup-${id}.timer" >/dev/null 2>&1 || true
msg_warn "$(translate "Job timer disabled:") $id"
else
systemctl enable --now "proxmenux-backup-${id}.timer" >/dev/null 2>&1 || true
msg_ok "$(translate "Job timer enabled:") $id"
fi
msg_success "$(translate "Press Enter to continue...")"
read -r
}
_job_delete() {
local id=""
_pick_job "$(translate "Delete job")" id || return 1
if ! whiptail --title "$(translate "Confirm delete")" \
--yesno "$(translate "Delete scheduled backup job?")"$'\n\n'"ID: ${id}" 10 66; then
return 1
fi
systemctl disable --now "proxmenux-backup-${id}.timer" >/dev/null 2>&1 || true
rm -f "$(_service_file "$id")" "$(_timer_file "$id")" "$(_job_file "$id")" "$(_job_paths_file "$id")"
systemctl daemon-reload >/dev/null 2>&1 || true
msg_ok "$(translate "Job deleted:") $id"
msg_success "$(translate "Press Enter to continue...")"
read -r
}
_show_jobs() {
local tmp
tmp=$(mktemp) || return
{
echo "=== $(translate "Scheduled backup jobs") ==="
echo ""
local id
while IFS= read -r id; do
[[ -z "$id" ]] && continue
echo "$id [$(_show_job_status "$id")]"
if [[ -f "${LOG_DIR}/${id}-last.status" ]]; then
sed 's/^/ /' "${LOG_DIR}/${id}-last.status"
fi
echo ""
done < <(_list_jobs)
} > "$tmp"
dialog --backtitle "ProxMenux" --title "$(translate "Scheduled backup jobs")" \
--textbox "$tmp" 28 100 || true
rm -f "$tmp"
}
main_menu() {
while true; do
local choice
choice=$(dialog --backtitle "ProxMenux" \
--title "$(translate "Backup scheduler and retention")" \
--menu "\n$(translate "Choose action:")" "$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \
1 "$(translate "Create scheduled backup job")" \
2 "$(translate "Show jobs and last run status")" \
3 "$(translate "Run a job now")" \
4 "$(translate "Enable / disable job timer")" \
5 "$(translate "Delete job")" \
0 "$(translate "Return")" \
3>&1 1>&2 2>&3) || return 0
case "$choice" in
1) _create_job ;;
2) _show_jobs ;;
3) _job_run_now ;;
4) _job_toggle ;;
5) _job_delete ;;
0) return 0 ;;
esac
done
}
main_menu

View File

@@ -0,0 +1,770 @@
#!/bin/bash
# ==========================================================
# ProxMenux - Host Config Backup/Restore - Shared Library
# ==========================================================
# Author : MacRimi
# Copyright : (c) 2024 MacRimi
# License : MIT
# Version : 1.0
# Last Updated: 08/04/2026
# ==========================================================
# Do not execute directly — source from backup_host.sh
# Library guard
[[ "${BASH_SOURCE[0]}" == "$0" ]] && {
echo "This file is a library. Source it, do not run it directly." >&2; exit 1
}
HB_STATE_DIR="/usr/local/share/proxmenux"
HB_BORG_VERSION="1.2.8"
HB_BORG_LINUX64_SHA256="cfa50fb704a93d3a4fa258120966345fddb394f960dca7c47fcb774d0172f40b"
HB_BORG_LINUX64_URL="https://github.com/borgbackup/borg/releases/download/${HB_BORG_VERSION}/borg-linux64"
# Translation wrapper — safe fallback if translate not yet loaded
hb_translate() {
declare -f translate >/dev/null 2>&1 && translate "$1" || echo "$1"
}
# ==========================================================
# UI SIZE CONSTANTS
# ==========================================================
HB_UI_MENU_H=22
HB_UI_MENU_W=84
HB_UI_MENU_LIST=10
HB_UI_INPUT_H=10
HB_UI_INPUT_W=72
HB_UI_PASS_H=10
HB_UI_PASS_W=72
HB_UI_YESNO_H=10
HB_UI_YESNO_W=78
# ==========================================================
# DEFAULT PROFILE PATHS
# ==========================================================
hb_default_profile_paths() {
local paths=(
"/etc/pve"
"/etc/network"
"/etc/hosts"
"/etc/hostname"
"/etc/ssh"
"/etc/systemd/system"
"/etc/modules"
"/etc/modules-load.d"
"/etc/modprobe.d"
"/etc/udev/rules.d"
"/etc/default/grub"
"/etc/fstab"
"/etc/kernel"
"/etc/apt"
"/etc/vzdump.conf"
"/etc/postfix"
"/etc/resolv.conf"
"/etc/timezone"
"/etc/iscsi"
"/etc/multipath"
"/usr/local/bin"
"/usr/local/share/proxmenux"
"/root"
"/etc/cron.d"
"/etc/cron.daily"
"/etc/cron.hourly"
"/etc/cron.weekly"
"/etc/cron.monthly"
"/etc/cron.allow"
"/etc/cron.deny"
"/var/spool/cron/crontabs"
"/var/lib/pve-cluster"
)
if [[ -d /etc/zfs ]] || command -v zpool >/dev/null 2>&1; then
paths+=("/etc/zfs")
fi
printf '%s\n' "${paths[@]}"
}
# ==========================================================
# PATH CLASSIFICATION (restore safety)
# Returns: dangerous | reboot | hot
# ==========================================================
hb_classify_path() {
local rel="$1" # without leading /
case "$rel" in
etc/pve|etc/pve/*|\
var/lib/pve-cluster|var/lib/pve-cluster/*|\
etc/network|etc/network/*)
echo "dangerous" ;;
etc/modules|etc/modules/*|\
etc/modules-load.d|etc/modules-load.d/*|\
etc/modprobe.d|etc/modprobe.d/*|\
etc/udev/rules.d|etc/udev/rules.d/*|\
etc/default/grub|\
etc/fstab|\
etc/kernel|etc/kernel/*|\
etc/iscsi|etc/iscsi/*|\
etc/multipath|etc/multipath/*|\
etc/zfs|etc/zfs/*)
echo "reboot" ;;
*)
echo "hot" ;;
esac
}
hb_path_warning() {
local rel="$1"
case "$rel" in
etc/pve|etc/pve/*)
hb_translate "/etc/pve is managed by pmxcfs (cluster filesystem). Applying this on a running node can corrupt cluster state. Use 'Export to file' and apply it manually during a maintenance window." ;;
var/lib/pve-cluster|var/lib/pve-cluster/*)
hb_translate "/var/lib/pve-cluster is live cluster data. Never restore this while the node is running. Use 'Export to file' for manual recovery only." ;;
etc/network|etc/network/*)
hb_translate "/etc/network controls active interfaces. Applying may immediately change or drop network connectivity, including active SSH sessions." ;;
esac
}
# ==========================================================
# PROFILE PATH SELECTION
# ==========================================================
hb_select_profile_paths() {
local mode="$1"
local __out_var="$2"
local -n __out_ref="$__out_var"
mapfile -t __defaults < <(hb_default_profile_paths)
if [[ "$mode" == "default" ]]; then
__out_ref=("${__defaults[@]}")
return 0
fi
local options=() idx=1 path
for path in "${__defaults[@]}"; do
options+=("$idx" "$path" "off")
((idx++))
done
local selected
selected=$(dialog --backtitle "ProxMenux" \
--title "$(hb_translate "Custom backup profile")" \
--separate-output --checklist \
"$(hb_translate "Select paths to include:")" \
26 86 18 "${options[@]}" 3>&1 1>&2 2>&3) || return 1
__out_ref=()
local choice
while read -r choice; do
[[ -z "$choice" ]] && continue
__out_ref+=("${__defaults[$((choice-1))]}")
done <<< "$selected"
if [[ ${#__out_ref[@]} -eq 0 ]]; then
dialog --backtitle "ProxMenux" --title "$(hb_translate "Error")" \
--msgbox "$(hb_translate "No paths selected. Select at least one path.")" 8 60
return 1
fi
}
# ==========================================================
# STAGING OPERATIONS
# ==========================================================
hb_prepare_staging() {
local staging_root="$1"; shift
local paths=("$@")
rm -rf "$staging_root"
mkdir -p "$staging_root/rootfs" "$staging_root/metadata"
local selected_file="$staging_root/metadata/selected_paths.txt"
local missing_file="$staging_root/metadata/missing_paths.txt"
: > "$selected_file"
: > "$missing_file"
local p rel target
for p in "${paths[@]}"; do
rel="${p#/}"
echo "$rel" >> "$selected_file"
[[ -e "$p" ]] || { echo "$p" >> "$missing_file"; continue; }
target="$staging_root/rootfs/$rel"
if [[ -d "$p" ]]; then
mkdir -p "$target"
local -a rsync_opts=(
-aAXH --numeric-ids
--exclude "images/"
--exclude "dump/"
--exclude "tmp/"
--exclude "*.log"
)
# /root is included by default for easier recovery, but avoid volatile/sensitive noise.
if [[ "$rel" == "root" || "$rel" == "root/"* ]]; then
rsync_opts+=(
--exclude ".bash_history"
--exclude ".cache/"
--exclude "tmp/"
--exclude ".local/share/Trash/"
)
fi
# Runtime pending-restore data belongs in /var/lib/proxmenux, never in app code tree.
if [[ "$rel" == "usr/local/share/proxmenux" || "$rel" == "usr/local/share/proxmenux/"* ]]; then
rsync_opts+=(
--exclude "restore-pending/"
)
fi
rsync "${rsync_opts[@]}" "$p/" "$target/" 2>/dev/null || true
else
mkdir -p "$(dirname "$target")"
cp -a "$p" "$target" 2>/dev/null || true
fi
done
# Metadata snapshot
local meta="$staging_root/metadata"
{
echo "generated_at=$(date -Iseconds)"
echo "hostname=$(hostname)"
echo "kernel=$(uname -r)"
} > "$meta/run_info.env"
command -v pveversion >/dev/null 2>&1 && pveversion -v > "$meta/pveversion.txt" 2>&1 || true
command -v lsblk >/dev/null 2>&1 && lsblk -f > "$meta/lsblk.txt" 2>&1 || true
command -v qm >/dev/null 2>&1 && qm list > "$meta/qm-list.txt" 2>&1 || true
command -v pct >/dev/null 2>&1 && pct list > "$meta/pct-list.txt" 2>&1 || true
command -v zpool >/dev/null 2>&1 && zpool status > "$meta/zpool.txt" 2>&1 || true
# Manifest + checksums
(
cd "$staging_root/rootfs" || return 1
find . -mindepth 1 -print | sort > "$meta/manifest.txt"
find . -type f -print0 | sort -z | xargs -0 sha256sum 2>/dev/null \
> "$meta/checksums.sha256" || true
)
}
hb_load_restore_paths() {
local restore_root="$1"
local __out_var="$2"
local -n __out="$__out_var"
__out=()
local selected="$restore_root/metadata/selected_paths.txt"
if [[ -f "$selected" ]]; then
while IFS= read -r line; do
[[ -n "$line" ]] && __out+=("$line")
done < "$selected"
fi
# Fallback: scan rootfs
if [[ ${#__out[@]} -eq 0 ]]; then
local p
while IFS= read -r p; do
[[ -n "$p" && -e "$restore_root/rootfs/${p#/}" ]] && __out+=("${p#/}")
done < <(hb_default_profile_paths)
fi
}
# ==========================================================
# PBS CONFIG — auto-detect from storage.cfg + manual
# ==========================================================
hb_collect_pbs_configs() {
HB_PBS_NAMES=()
HB_PBS_REPOS=()
HB_PBS_SECRETS=()
HB_PBS_SOURCES=()
if [[ -f /etc/pve/storage.cfg ]]; then
local current="" server="" datastore="" username="" pw_file pw_val
while IFS= read -r line; do
line="${line%%#*}"
line="${line#"${line%%[![:space:]]*}"}"
line="${line%"${line##*[![:space:]]}"}"
[[ -z "$line" ]] && continue
if [[ $line =~ ^pbs:[[:space:]]*(.+)$ ]]; then
if [[ -n "$current" && -n "$server" && -n "$datastore" && -n "$username" ]]; then
pw_file="/etc/pve/priv/storage/${current}.pw"
pw_val="$([[ -f "$pw_file" ]] && cat "$pw_file" || echo "")"
HB_PBS_NAMES+=("$current")
HB_PBS_REPOS+=("${username}@${server}:${datastore}")
HB_PBS_SECRETS+=("$pw_val")
HB_PBS_SOURCES+=("proxmox")
fi
current="${BASH_REMATCH[1]}"; server="" datastore="" username=""
elif [[ -n "$current" ]]; then
[[ $line =~ ^[[:space:]]+server[[:space:]]+(.+)$ ]] && server="${BASH_REMATCH[1]}"
[[ $line =~ ^[[:space:]]+datastore[[:space:]]+(.+)$ ]] && datastore="${BASH_REMATCH[1]}"
[[ $line =~ ^[[:space:]]+username[[:space:]]+(.+)$ ]] && username="${BASH_REMATCH[1]}"
if [[ $line =~ ^[a-zA-Z]+:[[:space:]] &&
-n "$server" && -n "$datastore" && -n "$username" ]]; then
pw_file="/etc/pve/priv/storage/${current}.pw"
pw_val="$([[ -f "$pw_file" ]] && cat "$pw_file" || echo "")"
HB_PBS_NAMES+=("$current")
HB_PBS_REPOS+=("${username}@${server}:${datastore}")
HB_PBS_SECRETS+=("$pw_val")
HB_PBS_SOURCES+=("proxmox")
current="" server="" datastore="" username=""
fi
fi
done < /etc/pve/storage.cfg
# Last stanza
if [[ -n "$current" && -n "$server" && -n "$datastore" && -n "$username" ]]; then
pw_file="/etc/pve/priv/storage/${current}.pw"
pw_val="$([[ -f "$pw_file" ]] && cat "$pw_file" || echo "")"
HB_PBS_NAMES+=("$current")
HB_PBS_REPOS+=("${username}@${server}:${datastore}")
HB_PBS_SECRETS+=("$pw_val")
HB_PBS_SOURCES+=("proxmox")
fi
fi
# Manual configs
local manual_cfg="$HB_STATE_DIR/pbs-manual-configs.txt"
if [[ -f "$manual_cfg" ]]; then
local line name repo sf
while IFS= read -r line; do
line="${line%%#*}"
line="${line#"${line%%[![:space:]]*}"}"
line="${line%"${line##*[![:space:]]}"}"
[[ -z "$line" ]] && continue
name="${line%%|*}"; repo="${line##*|}"
sf="$HB_STATE_DIR/pbs-pass-${name}.txt"
HB_PBS_NAMES+=("$name"); HB_PBS_REPOS+=("$repo")
HB_PBS_SECRETS+=("$([[ -f "$sf" ]] && cat "$sf" || echo "")")
HB_PBS_SOURCES+=("manual")
done < "$manual_cfg"
fi
}
hb_configure_pbs_manual() {
local name user host datastore repo secret
name=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \
--inputbox "$(hb_translate "Configuration name:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "PBS-$(date +%m%d)" 3>&1 1>&2 2>&3) || return 1
[[ -z "$name" ]] && return 1
user=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \
--inputbox "$(hb_translate "Username (e.g. root@pam or user@pbs!token):")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "root@pam" 3>&1 1>&2 2>&3) || return 1
host=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \
--inputbox "$(hb_translate "PBS host or IP address:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "" 3>&1 1>&2 2>&3) || return 1
[[ -z "$host" ]] && return 1
datastore=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \
--inputbox "$(hb_translate "Datastore name:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "" 3>&1 1>&2 2>&3) || return 1
[[ -z "$datastore" ]] && return 1
secret=$(dialog --backtitle "ProxMenux" --title "$(hb_translate "Add PBS")" \
--insecure --passwordbox "$(hb_translate "Password or API token secret:")" \
"$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 1
repo="${user}@${host}:${datastore}"
mkdir -p "$HB_STATE_DIR"
local cfg_line="${name}|${repo}"
local manual_cfg="$HB_STATE_DIR/pbs-manual-configs.txt"
touch "$manual_cfg"
grep -Fxq "$cfg_line" "$manual_cfg" || echo "$cfg_line" >> "$manual_cfg"
printf '%s' "$secret" > "$HB_STATE_DIR/pbs-pass-${name}.txt"
chmod 600 "$HB_STATE_DIR/pbs-pass-${name}.txt"
HB_PBS_NAME="$name"; HB_PBS_REPOSITORY="$repo"; HB_PBS_SECRET="$secret"
}
hb_select_pbs_repository() {
hb_collect_pbs_configs
local menu=() i=1 idx
for idx in "${!HB_PBS_NAMES[@]}"; do
local src="${HB_PBS_SOURCES[$idx]}"
local label="${HB_PBS_NAMES[$idx]}${HB_PBS_REPOS[$idx]} [$src]"
[[ -z "${HB_PBS_SECRETS[$idx]}" ]] && label+="$(hb_translate "no password")"
menu+=("$i" "$label"); ((i++))
done
menu+=("$i" "$(hb_translate "+ Add new PBS manually")")
local choice
choice=$(dialog --backtitle "ProxMenux" \
--title "$(hb_translate "Select PBS repository")" \
--menu "\n$(hb_translate "Available PBS repositories:")" \
"$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" "${menu[@]}" 3>&1 1>&2 2>&3) || return 1
if [[ "$choice" == "$i" ]]; then
hb_configure_pbs_manual || return 1
else
local sel=$((choice-1))
HB_PBS_NAME="${HB_PBS_NAMES[$sel]}"
export HB_PBS_REPOSITORY="${HB_PBS_REPOS[$sel]}"
HB_PBS_SECRET="${HB_PBS_SECRETS[$sel]}"
if [[ -z "$HB_PBS_SECRET" ]]; then
HB_PBS_SECRET=$(dialog --backtitle "ProxMenux" --title "PBS" \
--insecure --passwordbox \
"$(hb_translate "Password for:") $HB_PBS_NAME" \
"$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 1
mkdir -p "$HB_STATE_DIR"
printf '%s' "$HB_PBS_SECRET" > "$HB_STATE_DIR/pbs-pass-${HB_PBS_NAME}.txt"
chmod 600 "$HB_STATE_DIR/pbs-pass-${HB_PBS_NAME}.txt"
fi
fi
}
hb_ask_pbs_encryption() {
local key_file="$HB_STATE_DIR/pbs-key.conf"
local enc_pass_file="$HB_STATE_DIR/pbs-encryption-pass.txt"
export HB_PBS_KEYFILE_OPT=""
export HB_PBS_ENC_PASS=""
dialog --backtitle "ProxMenux" --title "$(hb_translate "Encryption")" \
--yesno "$(hb_translate "Encrypt this backup with a keyfile?")" \
"$HB_UI_YESNO_H" "$HB_UI_YESNO_W" || return 0
if [[ -f "$key_file" ]]; then
export HB_PBS_KEYFILE_OPT="--keyfile $key_file"
if [[ -f "$enc_pass_file" ]]; then
HB_PBS_ENC_PASS="$(<"$enc_pass_file")"
export HB_PBS_ENC_PASS
fi
msg_ok "$(hb_translate "Using existing encryption key:") $key_file"
return 0
fi
# No key — offer to create one
dialog --backtitle "ProxMenux" --title "$(hb_translate "Encryption")" \
--yesno "$(hb_translate "No encryption key found. Create one now?")" \
"$HB_UI_YESNO_H" "$HB_UI_YESNO_W" || return 0
local pass1 pass2
while true; do
pass1=$(dialog --backtitle "ProxMenux" --insecure --passwordbox \
"$(hb_translate "Encryption passphrase (separate from PBS password):")" \
"$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 0
pass2=$(dialog --backtitle "ProxMenux" --insecure --passwordbox \
"$(hb_translate "Confirm encryption passphrase:")" \
"$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 0
[[ "$pass1" == "$pass2" ]] && break
dialog --backtitle "ProxMenux" \
--msgbox "$(hb_translate "Passphrases do not match. Try again.")" 8 50
done
msg_info "$(hb_translate "Creating PBS encryption key...")"
if PBS_ENCRYPTION_PASSWORD="$pass1" \
proxmox-backup-client key create "$key_file" >/dev/null 2>&1; then
printf '%s' "$pass1" > "$enc_pass_file"
chmod 600 "$enc_pass_file"
msg_ok "$(hb_translate "Encryption key created:") $key_file"
HB_PBS_KEYFILE_OPT="--keyfile $key_file"
HB_PBS_ENC_PASS="$pass1"
local key_warn_msg
key_warn_msg="$(hb_translate "IMPORTANT: Back up this key file. Without it the backup cannot be restored.")"$'\n\n'"$(hb_translate "Key:") $key_file"
dialog --backtitle "ProxMenux" --msgbox \
"$key_warn_msg" \
10 74
else
msg_error "$(hb_translate "Failed to create encryption key. Backup will proceed without encryption.")"
fi
}
# ==========================================================
# BORG
# ==========================================================
hb_ensure_borg() {
command -v borg >/dev/null 2>&1 && { echo "borg"; return 0; }
local appimage="$HB_STATE_DIR/borg"
local tmp_file
[[ -x "$appimage" ]] && { echo "$appimage"; return 0; }
command -v sha256sum >/dev/null 2>&1 || {
msg_error "$(hb_translate "sha256sum not found. Cannot verify Borg binary.")"
return 1
}
msg_info "$(hb_translate "Borg not found. Downloading borg") ${HB_BORG_VERSION}..."
mkdir -p "$HB_STATE_DIR"
tmp_file=$(mktemp "$HB_STATE_DIR/.borg-download.XXXXXX") || return 1
if wget -qO "$tmp_file" "$HB_BORG_LINUX64_URL"; then
if echo "${HB_BORG_LINUX64_SHA256} $tmp_file" | sha256sum -c - >/dev/null 2>&1; then
mv -f "$tmp_file" "$appimage"
else
rm -f "$tmp_file"
msg_error "$(hb_translate "Borg binary checksum verification failed.")"
return 1
fi
chmod +x "$appimage"
msg_ok "$(hb_translate "Borg ready.")"
echo "$appimage"; return 0
fi
rm -f "$tmp_file"
msg_error "$(hb_translate "Failed to download Borg.")"
return 1
}
hb_borg_init_if_needed() {
local borg_bin="$1" repo="$2" encrypt_mode="$3"
"$borg_bin" list "$repo" >/dev/null 2>&1 && return 0
if "$borg_bin" help repo-create >/dev/null 2>&1; then
"$borg_bin" repo-create -e "$encrypt_mode" "$repo"
else
"$borg_bin" init --encryption="$encrypt_mode" "$repo"
fi
}
hb_prepare_borg_passphrase() {
local pass_file="$HB_STATE_DIR/borg-pass.txt"
BORG_ENCRYPT_MODE="none"
unset BORG_PASSPHRASE
if [[ -f "$pass_file" ]]; then
export BORG_PASSPHRASE
BORG_PASSPHRASE="$(<"$pass_file")"
BORG_ENCRYPT_MODE="repokey"
return 0
fi
dialog --backtitle "ProxMenux" --title "$(hb_translate "Borg encryption")" \
--yesno "$(hb_translate "Encrypt this Borg repository?")" \
"$HB_UI_YESNO_H" "$HB_UI_YESNO_W" || return 0
local pass1 pass2
while true; do
pass1=$(dialog --backtitle "ProxMenux" --insecure --passwordbox \
"$(hb_translate "Borg passphrase:")" \
"$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 1
pass2=$(dialog --backtitle "ProxMenux" --insecure --passwordbox \
"$(hb_translate "Confirm Borg passphrase:")" \
"$HB_UI_PASS_H" "$HB_UI_PASS_W" "" 3>&1 1>&2 2>&3) || return 1
[[ "$pass1" == "$pass2" ]] && break
dialog --backtitle "ProxMenux" \
--msgbox "$(hb_translate "Passphrases do not match.")" 8 50
done
mkdir -p "$HB_STATE_DIR"
printf '%s' "$pass1" > "$pass_file"
chmod 600 "$pass_file"
export BORG_PASSPHRASE="$pass1"
export BORG_ENCRYPT_MODE="repokey"
}
hb_select_borg_repo() {
local _borg_repo_var="$1"
local -n _borg_repo_ref="$_borg_repo_var"
local type
type=$(dialog --backtitle "ProxMenux" \
--title "$(hb_translate "Borg repository location")" \
--menu "\n$(hb_translate "Select repository destination:")" \
"$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \
"local" "$(hb_translate 'Local directory')" \
"usb" "$(hb_translate 'Mounted external disk')" \
"remote" "$(hb_translate 'Remote server via SSH')" \
3>&1 1>&2 2>&3) || return 1
unset BORG_RSH
case "$type" in
local)
_borg_repo_ref=$(dialog --backtitle "ProxMenux" \
--inputbox "$(hb_translate "Borg repository path:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "/backup/borgbackup" \
3>&1 1>&2 2>&3) || return 1
mkdir -p "$_borg_repo_ref" 2>/dev/null || true
;;
usb)
local mnt
mnt=$(hb_prompt_mounted_path "/mnt/backup") || return 1
_borg_repo_ref="$mnt/borgbackup"
mkdir -p "$_borg_repo_ref" 2>/dev/null || true
;;
remote)
local user host rpath ssh_key
user=$(dialog --backtitle "ProxMenux" --inputbox "$(hb_translate "SSH user:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "root" 3>&1 1>&2 2>&3) || return 1
host=$(dialog --backtitle "ProxMenux" --inputbox "$(hb_translate "SSH host or IP:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "" 3>&1 1>&2 2>&3) || return 1
rpath=$(dialog --backtitle "ProxMenux" \
--inputbox "$(hb_translate "Remote repository path:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "/backup/borgbackup" \
3>&1 1>&2 2>&3) || return 1
if dialog --backtitle "ProxMenux" \
--yesno "$(hb_translate "Use a custom SSH key?")" \
"$HB_UI_YESNO_H" "$HB_UI_YESNO_W"; then
ssh_key=$(dialog --backtitle "ProxMenux" \
--fselect "$HOME/.ssh/" 12 70 3>&1 1>&2 2>&3) || return 1
export BORG_RSH="ssh -i $ssh_key -o StrictHostKeyChecking=accept-new"
fi
_borg_repo_ref="ssh://$user@$host/$rpath"
;;
esac
}
# ==========================================================
# COMMON PROMPTS
# ==========================================================
hb_trim_dialog_value() {
local value="$1"
value="${value//$'\r'/}"
value="${value//$'\n'/}"
value="${value#"${value%%[![:space:]]*}"}"
value="${value%"${value##*[![:space:]]}"}"
printf '%s' "$value"
}
hb_prompt_mounted_path() {
local default_path="${1:-/mnt/backup}"
local out
out=$(dialog --backtitle "ProxMenux" \
--title "$(hb_translate "Mounted disk path")" \
--inputbox "$(hb_translate "Path where the external disk is mounted:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "$default_path" 3>&1 1>&2 2>&3) || return 1
out=$(hb_trim_dialog_value "$out")
[[ -n "$out" && -d "$out" ]] || { msg_error "$(hb_translate "Path does not exist.")"; return 1; }
if ! mountpoint -q "$out" 2>/dev/null; then
dialog --backtitle "ProxMenux" --title "$(hb_translate "Warning")" \
--yesno "$(hb_translate "This path is not a registered mount point. Use it anyway?")" \
"$HB_UI_YESNO_H" "$HB_UI_YESNO_W" || return 1
fi
echo "$out"
}
hb_prompt_dest_dir() {
local selection out
selection=$(dialog --backtitle "ProxMenux" \
--title "$(hb_translate "Select destination")" \
--menu "\n$(hb_translate "Choose where to save the backup:")" \
"$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \
"vzdump" "$(hb_translate '/var/lib/vz/dump (Proxmox default vzdump path)')" \
"backup" "$(hb_translate '/backup')" \
"local" "$(hb_translate 'Custom local directory')" \
"usb" "$(hb_translate 'Mounted external disk')" \
3>&1 1>&2 2>&3) || return 1
case "$selection" in
vzdump) out="/var/lib/vz/dump" ;;
backup) out="/backup" ;;
local)
out=$(dialog --backtitle "ProxMenux" \
--inputbox "$(hb_translate "Enter directory path:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "/backup" 3>&1 1>&2 2>&3) || return 1
;;
usb) out=$(hb_prompt_mounted_path "/mnt/backup") || return 1 ;;
esac
out=$(hb_trim_dialog_value "$out")
[[ -n "$out" ]] || return 1
mkdir -p "$out" || { msg_error "$(hb_translate "Cannot create:") $out"; return 1; }
echo "$out"
}
hb_prompt_restore_source_dir() {
local choice out
choice=$(dialog --backtitle "ProxMenux" \
--title "$(hb_translate "Restore source location")" \
--menu "\n$(hb_translate "Where are the backup archives stored?")" \
"$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" \
"vzdump" "$(hb_translate '/var/lib/vz/dump (Proxmox default)')" \
"backup" "$(hb_translate '/backup')" \
"usb" "$(hb_translate 'Mounted external disk')" \
"custom" "$(hb_translate 'Custom path')" \
3>&1 1>&2 2>&3) || return 1
case "$choice" in
vzdump) out="/var/lib/vz/dump" ;;
backup) out="/backup" ;;
usb) out=$(hb_prompt_mounted_path "/mnt/backup") || return 1 ;;
custom)
out=$(dialog --backtitle "ProxMenux" \
--inputbox "$(hb_translate "Enter path:")" \
"$HB_UI_INPUT_H" "$HB_UI_INPUT_W" "/backup" 3>&1 1>&2 2>&3) || return 1
;;
esac
out=$(hb_trim_dialog_value "$out")
[[ -n "$out" && -d "$out" ]] || {
msg_error "$(hb_translate "Directory does not exist.")"
return 1
}
echo "$out"
}
hb_prompt_local_archive() {
local base_dir="$1"
local title="${2:-$(hb_translate "Select backup archive")}"
local -a rows=() files=() menu=()
# Single find pass using -printf: no per-file stat subprocesses.
# maxdepth 6 catches nested backup layouts commonly used in /var/lib/vz/dump.
mapfile -t rows < <(
find "$base_dir" -maxdepth 6 -type f \
\( -name '*.tar.zst' -o -name '*.tar.gz' -o -name '*.tar' \) \
-printf '%T@|%s|%p\n' 2>/dev/null \
| sort -t'|' -k1,1nr \
| head -200
)
if [[ ${#rows[@]} -eq 0 ]]; then
local no_backups_msg
no_backups_msg="$(hb_translate "No backup archives were found in:") $base_dir"$'\n\n'"$(hb_translate "Select another source path and try again.")"
dialog --backtitle "ProxMenux" \
--title "$(hb_translate "No backups found")" \
--msgbox "$no_backups_msg" \
10 78 || true
return 1
fi
local i=1 row epoch size path date_str size_str label
for row in "${rows[@]}"; do
epoch="${row%%|*}"; row="${row#*|}"
size="${row%%|*}"; path="${row#*|}"
epoch="${epoch%%.*}" # drop sub-second fraction from %T@
date_str=$(date -d "@$epoch" '+%Y-%m-%d %H:%M' 2>/dev/null || echo "-")
size_str=$(numfmt --to=iec-i --suffix=B "$size" 2>/dev/null || echo "${size}B")
label="${path#$base_dir/} $date_str $size_str"
files+=("$path"); menu+=("$i" "$label"); ((i++))
done
local choice
choice=$(dialog --backtitle "ProxMenux" --title "$title" \
--menu "\n$(hb_translate "Detected backups — newest first:")" \
"$HB_UI_MENU_H" "$HB_UI_MENU_W" "$HB_UI_MENU_LIST" "${menu[@]}" 3>&1 1>&2 2>&3) || return 1
echo "${files[$((choice-1))]}"
}
# ==========================================================
# UTILITIES
# ==========================================================
hb_human_elapsed() {
local secs="$1"
if (( secs < 60 )); then printf '%ds' "$secs"
elif (( secs < 3600 )); then printf '%dm %ds' "$((secs/60))" "$((secs%60))"
else printf '%dh %dm' "$((secs/3600))" "$(( (secs%3600)/60 ))"
fi
}
hb_file_size() {
local path="$1"
if [[ -f "$path" ]]; then
numfmt --to=iec-i --suffix=B "$(stat -c %s "$path" 2>/dev/null || echo 0)" 2>/dev/null \
|| du -sh "$path" 2>/dev/null | awk '{print $1}'
elif [[ -d "$path" ]]; then
du -sh "$path" 2>/dev/null | awk '{print $1}'
else
echo "-"
fi
}
hb_show_log() {
local logfile="$1" title="${2:-$(hb_translate "Operation log")}"
[[ -f "$logfile" && -s "$logfile" ]] || return 0
dialog --backtitle "ProxMenux" --exit-label "OK" \
--title "$title" --textbox "$logfile" 26 110 || true
}
hb_require_cmd() {
local cmd="$1" pkg="${2:-$1}"
command -v "$cmd" >/dev/null 2>&1 && return 0
if command -v apt-get >/dev/null 2>&1; then
msg_warn "$(hb_translate "Installing dependency:") $pkg"
apt-get update -qq >/dev/null 2>&1 && apt-get install -y "$pkg" >/dev/null 2>&1
fi
command -v "$cmd" >/dev/null 2>&1
}

View File

@@ -0,0 +1,243 @@
#!/bin/bash
# ==========================================================
# ProxMenux - Run Scheduled Host Backup Job
# ==========================================================
set -u
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
LOCAL_SCRIPTS_LOCAL="$(cd "$SCRIPT_DIR/.." && pwd)"
LOCAL_SCRIPTS_DEFAULT="/usr/local/share/proxmenux/scripts"
LOCAL_SCRIPTS="$LOCAL_SCRIPTS_DEFAULT"
BASE_DIR="/usr/local/share/proxmenux"
UTILS_FILE="$LOCAL_SCRIPTS/utils.sh"
if [[ -f "$LOCAL_SCRIPTS_LOCAL/utils.sh" ]]; then
LOCAL_SCRIPTS="$LOCAL_SCRIPTS_LOCAL"
UTILS_FILE="$LOCAL_SCRIPTS/utils.sh"
elif [[ ! -f "$UTILS_FILE" ]]; then
UTILS_FILE="$BASE_DIR/utils.sh"
fi
if [[ -f "$UTILS_FILE" ]]; then
# shellcheck source=/dev/null
source "$UTILS_FILE"
else
echo "ERROR: utils.sh not found" >&2
exit 1
fi
LIB_FILE="$SCRIPT_DIR/lib_host_backup_common.sh"
[[ ! -f "$LIB_FILE" ]] && LIB_FILE="$LOCAL_SCRIPTS_DEFAULT/backup_restore/lib_host_backup_common.sh"
if [[ -f "$LIB_FILE" ]]; then
# shellcheck source=/dev/null
source "$LIB_FILE"
else
echo "ERROR: lib_host_backup_common.sh not found" >&2
exit 1
fi
JOBS_DIR="${PMX_BACKUP_JOBS_DIR:-/var/lib/proxmenux/backup-jobs}"
LOG_DIR="${PMX_BACKUP_LOG_DIR:-/var/log/proxmenux/backup-jobs}"
LOCK_DIR="${PMX_BACKUP_LOCK_DIR:-/var/lock}"
mkdir -p "$JOBS_DIR" "$LOG_DIR" >/dev/null 2>&1 || true
_sb_prune_local() {
local job_id="$1"
local dest_dir="$2"
local ext="$3" # tar.zst or tar.gz
local keep_last="${KEEP_LAST:-0}"
local -a files=()
mapfile -t files < <(find "$dest_dir" -maxdepth 1 -type f -name "${job_id}-*.${ext}" | sort -r)
[[ ${#files[@]} -eq 0 ]] && return 0
if [[ "$keep_last" =~ ^[0-9]+$ ]] && (( keep_last > 0 )); then
local idx=0
for f in "${files[@]}"; do
idx=$((idx+1))
(( idx <= keep_last )) && continue
rm -f "$f" || true
done
fi
}
_sb_run_local() {
local stage_root="$1"
local job_id="$2"
local ts="$3"
local dest_dir="$4"
local archive_ext="${LOCAL_ARCHIVE_EXT:-tar.zst}"
local archive="${dest_dir}/${job_id}-${ts}.${archive_ext}"
mkdir -p "$dest_dir" || return 1
if [[ "$archive_ext" == "tar.zst" ]] && command -v zstd >/dev/null 2>&1; then
tar --zstd -cf "$archive" -C "$stage_root" . >/dev/null 2>&1 || return 1
else
archive="${dest_dir}/${job_id}-${ts}.tar.gz"
tar -czf "$archive" -C "$stage_root" . >/dev/null 2>&1 || return 1
archive_ext="tar.gz"
fi
_sb_prune_local "$job_id" "$dest_dir" "$archive_ext"
echo "LOCAL_ARCHIVE=$archive"
return 0
}
_sb_run_borg() {
local stage_root="$1"
local archive_name="$2"
local borg_bin repo passphrase
borg_bin=$(hb_ensure_borg) || return 1
repo="${BORG_REPO:-}"
passphrase="${BORG_PASSPHRASE:-}"
[[ -z "$repo" || -z "$passphrase" ]] && return 1
export BORG_PASSPHRASE="$passphrase"
if ! hb_borg_init_if_needed "$borg_bin" "$repo" "${BORG_ENCRYPT_MODE:-none}" >/dev/null 2>&1; then
return 1
fi
(cd "$stage_root" && "$borg_bin" create --stats \
"${repo}::${archive_name}" rootfs metadata) >/dev/null 2>&1 || return 1
"$borg_bin" prune -v --list "$repo" \
${KEEP_LAST:+--keep-last "$KEEP_LAST"} \
${KEEP_HOURLY:+--keep-hourly "$KEEP_HOURLY"} \
${KEEP_DAILY:+--keep-daily "$KEEP_DAILY"} \
${KEEP_WEEKLY:+--keep-weekly "$KEEP_WEEKLY"} \
${KEEP_MONTHLY:+--keep-monthly "$KEEP_MONTHLY"} \
${KEEP_YEARLY:+--keep-yearly "$KEEP_YEARLY"} \
>/dev/null 2>&1 || true
echo "BORG_ARCHIVE=${archive_name}"
return 0
}
_sb_run_pbs() {
local stage_root="$1"
local backup_id="$2"
local epoch="$3"
local -a cmd=(
proxmox-backup-client backup
"hostcfg.pxar:${stage_root}/rootfs"
--repository "$PBS_REPOSITORY"
--backup-type host
--backup-id "$backup_id"
--backup-time "$epoch"
)
[[ -z "${PBS_REPOSITORY:-}" || -z "${PBS_PASSWORD:-}" ]] && return 1
if [[ -n "${PBS_KEYFILE:-}" ]]; then
cmd+=(--keyfile "$PBS_KEYFILE")
fi
env PBS_PASSWORD="$PBS_PASSWORD" PBS_ENCRYPTION_PASSWORD="${PBS_ENCRYPTION_PASSWORD:-}" \
"${cmd[@]}" >/dev/null 2>&1 || return 1
# Best effort prune for PBS group.
proxmox-backup-client prune "host/${backup_id}" --repository "$PBS_REPOSITORY" \
${KEEP_LAST:+--keep-last "$KEEP_LAST"} \
${KEEP_HOURLY:+--keep-hourly "$KEEP_HOURLY"} \
${KEEP_DAILY:+--keep-daily "$KEEP_DAILY"} \
${KEEP_WEEKLY:+--keep-weekly "$KEEP_WEEKLY"} \
${KEEP_MONTHLY:+--keep-monthly "$KEEP_MONTHLY"} \
${KEEP_YEARLY:+--keep-yearly "$KEEP_YEARLY"} \
>/dev/null 2>&1 || true
echo "PBS_SNAPSHOT=host/${backup_id}/${epoch}"
return 0
}
main() {
local job_id="${1:-}"
[[ -z "$job_id" ]] && { echo "Usage: $0 <job_id>" >&2; exit 1; }
local job_file="${JOBS_DIR}/${job_id}.env"
[[ -f "$job_file" ]] || { echo "Job not found: $job_id" >&2; exit 1; }
# shellcheck source=/dev/null
source "$job_file"
local lock_file="${LOCK_DIR}/proxmenux-backup-${job_id}.lock"
if command -v flock >/dev/null 2>&1; then
exec 9>"$lock_file" || exit 1
if ! flock -n 9; then
echo "Another run is active for job ${job_id}" >&2
exit 1
fi
fi
local ts log_file stage_root summary_file
ts="$(date +%Y%m%d_%H%M%S)"
log_file="${LOG_DIR}/${job_id}-${ts}.log"
summary_file="${LOG_DIR}/${job_id}-last.status"
stage_root="$(mktemp -d /tmp/proxmenux-sched-stage.XXXXXX)"
{
echo "JOB_ID=${job_id}"
echo "RUN_AT=$(date -Iseconds)"
echo "BACKEND=${BACKEND:-}"
echo "PROFILE_MODE=${PROFILE_MODE:-default}"
} >"$summary_file"
{
echo "=== Scheduled backup job ${job_id} started at $(date -Iseconds) ==="
echo "Backend: ${BACKEND:-}"
} >"$log_file"
local -a paths=()
if [[ "${PROFILE_MODE:-default}" == "custom" && -f "${JOBS_DIR}/${job_id}.paths" ]]; then
mapfile -t paths < "${JOBS_DIR}/${job_id}.paths"
else
mapfile -t paths < <(hb_default_profile_paths)
fi
if [[ ${#paths[@]} -eq 0 ]]; then
echo "No paths configured for job" >>"$log_file"
echo "RESULT=failed" >>"$summary_file"
rm -rf "$stage_root"
exit 1
fi
hb_prepare_staging "$stage_root" "${paths[@]}" >>"$log_file" 2>&1
local rc=1
case "${BACKEND:-}" in
local)
_sb_run_local "$stage_root" "$job_id" "$ts" "${LOCAL_DEST_DIR:-/var/lib/vz/dump}" >>"$log_file" 2>&1
rc=$?
;;
borg)
_sb_run_borg "$stage_root" "${job_id}-${ts}" >>"$log_file" 2>&1
rc=$?
;;
pbs)
_sb_run_pbs "$stage_root" "${PBS_BACKUP_ID:-hostcfg-$(hostname)}" "$(date +%s)" >>"$log_file" 2>&1
rc=$?
;;
*)
echo "Unknown backend: ${BACKEND:-}" >>"$log_file"
rc=1
;;
esac
rm -rf "$stage_root"
if [[ $rc -eq 0 ]]; then
echo "RESULT=ok" >>"$summary_file"
echo "LOG_FILE=${log_file}" >>"$summary_file"
echo "=== Job finished OK at $(date -Iseconds) ===" >>"$log_file"
exit 0
else
echo "RESULT=failed" >>"$summary_file"
echo "LOG_FILE=${log_file}" >>"$summary_file"
echo "=== Job finished with errors at $(date -Iseconds) ===" >>"$log_file"
exit 1
fi
}
main "$@"

View File

@@ -0,0 +1,284 @@
#!/bin/bash
# ==========================================================
# ProxMenux - Backup/Restore Test Matrix (non-destructive)
# ==========================================================
set -u
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
RUNNER="${SCRIPT_DIR}/run_scheduled_backup.sh"
APPLY_ONBOOT="${SCRIPT_DIR}/apply_pending_restore.sh"
HOST_SCRIPT="${SCRIPT_DIR}/backup_host.sh"
LIB_SCRIPT="${SCRIPT_DIR}/lib_host_backup_common.sh"
SCHED_SCRIPT="${SCRIPT_DIR}/backup_scheduler.sh"
KEEP_TMP=0
if [[ "${1:-}" == "--keep-tmp" ]]; then
KEEP_TMP=1
fi
TMP_ROOT="$(mktemp -d /tmp/proxmenux-brtest.XXXXXX)"
REPORT_FILE="/tmp/proxmenux-backup-restore-test-$(date +%Y%m%d_%H%M%S).log"
PASS=0
FAIL=0
SKIP=0
log() {
echo "$*" | tee -a "$REPORT_FILE"
}
pass() {
PASS=$((PASS + 1))
log "[PASS] $*"
}
fail() {
FAIL=$((FAIL + 1))
log "[FAIL] $*"
}
skip() {
SKIP=$((SKIP + 1))
log "[SKIP] $*"
}
cleanup() {
if [[ "$KEEP_TMP" -eq 0 ]]; then
rm -rf "$TMP_ROOT"
else
log "[INFO] Temp root preserved: $TMP_ROOT"
fi
}
trap cleanup EXIT
assert_file_contains() {
local file="$1"
local needle="$2"
if [[ -f "$file" ]] && grep -q "$needle" "$file"; then
return 0
fi
return 1
}
run_cmd_expect_ok() {
local desc="$1"
shift
if "$@" >>"$REPORT_FILE" 2>&1; then
pass "$desc"
return 0
fi
fail "$desc"
return 1
}
run_cmd_expect_fail() {
local desc="$1"
shift
if "$@" >>"$REPORT_FILE" 2>&1; then
fail "$desc"
return 1
fi
pass "$desc"
return 0
}
syntax_tests() {
log "\n=== Syntax checks ==="
run_cmd_expect_ok "bash -n backup_host.sh" bash -n "$HOST_SCRIPT"
run_cmd_expect_ok "bash -n lib_host_backup_common.sh" bash -n "$LIB_SCRIPT"
run_cmd_expect_ok "bash -n backup_scheduler.sh" bash -n "$SCHED_SCRIPT"
run_cmd_expect_ok "bash -n run_scheduled_backup.sh" bash -n "$RUNNER"
run_cmd_expect_ok "bash -n apply_pending_restore.sh" bash -n "$APPLY_ONBOOT"
}
scheduler_e2e_tests() {
log "\n=== Scheduler E2E (sandbox) ==="
if ! help mapfile >/dev/null 2>&1; then
skip "Scheduler E2E skipped: current bash does not provide mapfile (requires bash >= 4)."
return
fi
local jobs_dir="$TMP_ROOT/backup-jobs"
local logs_dir="$TMP_ROOT/backup-jobs-logs"
local lock_dir="$TMP_ROOT/locks"
local archives_dir="$TMP_ROOT/archives"
mkdir -p "$jobs_dir" "$logs_dir" "$lock_dir" "$archives_dir"
cat > "$jobs_dir/t1.env" <<EOJ
JOB_ID=t1
BACKEND=local
PROFILE_MODE=custom
LOCAL_DEST_DIR=${archives_dir}
LOCAL_ARCHIVE_EXT=tar.gz
KEEP_LAST=2
KEEP_HOURLY=0
KEEP_DAILY=0
KEEP_WEEKLY=0
KEEP_MONTHLY=0
KEEP_YEARLY=0
EOJ
cat > "$jobs_dir/t1.paths" <<EOP
/etc/hosts
/etc/resolv.conf
EOP
local i
for i in 1 2 3; do
if PMX_BACKUP_JOBS_DIR="$jobs_dir" PMX_BACKUP_LOG_DIR="$logs_dir" PMX_BACKUP_LOCK_DIR="$lock_dir" \
bash "$RUNNER" t1 >>"$REPORT_FILE" 2>&1; then
:
else
fail "Runner execution #$i for t1"
return
fi
sleep 1
done
local archive_count
archive_count="$(find "$archives_dir" -maxdepth 1 -type f -name 't1-*.tar.gz' | wc -l | tr -d ' ')"
if [[ "$archive_count" == "2" ]]; then
pass "Retention KEEP_LAST=2 keeps exactly 2 archives"
else
fail "Retention expected 2 archives, got $archive_count"
fi
if assert_file_contains "$logs_dir/t1-last.status" "RESULT=ok"; then
pass "t1-last.status reports RESULT=ok"
else
fail "t1-last.status does not report RESULT=ok"
fi
cat > "$jobs_dir/tbad.env" <<EOJ
JOB_ID=tbad
BACKEND=invalid
PROFILE_MODE=custom
KEEP_LAST=1
EOJ
echo "/etc/hosts" > "$jobs_dir/tbad.paths"
run_cmd_expect_fail "Invalid backend fails" \
env PMX_BACKUP_JOBS_DIR="$jobs_dir" PMX_BACKUP_LOG_DIR="$logs_dir" PMX_BACKUP_LOCK_DIR="$lock_dir" \
bash "$RUNNER" tbad
if assert_file_contains "$logs_dir/tbad-last.status" "RESULT=failed"; then
pass "tbad-last.status reports RESULT=failed"
else
fail "tbad-last.status does not report RESULT=failed"
fi
cat > "$jobs_dir/tempty.env" <<EOJ
JOB_ID=tempty
BACKEND=local
PROFILE_MODE=custom
LOCAL_DEST_DIR=${archives_dir}
LOCAL_ARCHIVE_EXT=tar.gz
KEEP_LAST=1
EOJ
: > "$jobs_dir/tempty.paths"
run_cmd_expect_fail "Empty paths fails" \
env PMX_BACKUP_JOBS_DIR="$jobs_dir" PMX_BACKUP_LOG_DIR="$logs_dir" PMX_BACKUP_LOCK_DIR="$lock_dir" \
bash "$RUNNER" tempty
if assert_file_contains "$logs_dir/tempty-last.status" "RESULT=failed"; then
pass "tempty-last.status reports RESULT=failed"
else
fail "tempty-last.status does not report RESULT=failed"
fi
}
pending_restore_tests() {
log "\n=== Pending restore E2E (sandbox) ==="
local pending_base="$TMP_ROOT/restore-pending"
local logs_dir="$TMP_ROOT/restore-logs"
local target_root="$TMP_ROOT/target"
local pre_backup_base="$TMP_ROOT/pre-restore"
local recovery_base="$TMP_ROOT/recovery"
mkdir -p "$pending_base/r1/rootfs/etc/pve" "$pending_base/r1/rootfs/etc/zfs" "$pending_base/r1/rootfs/etc" "$target_root/etc"
echo "new-value" > "$pending_base/r1/rootfs/etc/test.conf"
echo "cluster-data" > "$pending_base/r1/rootfs/etc/pve/cluster.cfg"
echo "zfs-data" > "$pending_base/r1/rootfs/etc/zfs/zpool.cache"
echo "old-value" > "$target_root/etc/test.conf"
cat > "$pending_base/r1/apply-on-boot.list" <<EOL
etc/test.conf
etc/pve/cluster.cfg
etc/zfs/zpool.cache
EOL
cat > "$pending_base/r1/plan.env" <<EOP
HB_RESTORE_INCLUDE_ZFS=0
EOP
ln -sfn "$pending_base/r1" "$pending_base/current"
if PMX_RESTORE_PENDING_BASE="$pending_base" PMX_RESTORE_LOG_DIR="$logs_dir" \
PMX_RESTORE_DEST_PREFIX="$target_root" PMX_RESTORE_PRE_BACKUP_BASE="$pre_backup_base" \
PMX_RESTORE_RECOVERY_BASE="$recovery_base" \
bash "$APPLY_ONBOOT" >>"$REPORT_FILE" 2>&1; then
pass "apply_pending_restore completes"
else
fail "apply_pending_restore completes"
return
fi
if assert_file_contains "$target_root/etc/test.conf" "new-value"; then
pass "Regular file restored into target prefix"
else
fail "Regular file was not restored"
fi
if [[ -e "$target_root/etc/pve/cluster.cfg" ]]; then
fail "Cluster file should not be restored live"
else
pass "Cluster file skipped from live restore"
fi
if find "$recovery_base" -type f -name cluster.cfg 2>/dev/null | grep -q .; then
pass "Cluster file extracted to recovery directory"
else
fail "Cluster file not found in recovery directory"
fi
if assert_file_contains "$pending_base/completed/r1/state" "completed"; then
pass "Pending restore state marked completed"
else
fail "Pending restore state not marked completed"
fi
if [[ -e "$pending_base/current" ]]; then
fail "current symlink should be removed"
else
pass "current symlink removed"
fi
}
main() {
log "ProxMenux backup/restore test matrix"
log "Report: $REPORT_FILE"
log "Temp root: $TMP_ROOT"
syntax_tests
scheduler_e2e_tests
pending_restore_tests
log "\n=== Summary ==="
log "PASS=$PASS"
log "FAIL=$FAIL"
log "SKIP=$SKIP"
if [[ "$FAIL" -eq 0 ]]; then
log "RESULT=OK"
exit 0
else
log "RESULT=FAILED"
exit 1
fi
}
main "$@"

View File

@@ -70,7 +70,42 @@ _smart_disk_label() {
_smart_json_path() { _smart_json_path() {
local disk="$1" local disk="$1"
echo "${SMART_DIR}/$(basename "$disk").json" local test_type="${2:-short}"
local disk_name
disk_name=$(basename "$disk")
local disk_dir="${SMART_DIR}/${disk_name}"
local timestamp
timestamp=$(date +%Y-%m-%dT%H-%M-%S)
# Create disk directory if it doesn't exist
mkdir -p "$disk_dir"
echo "${disk_dir}/${timestamp}_${test_type}.json"
}
_smart_get_latest_json() {
local disk="$1"
local disk_name
disk_name=$(basename "$disk")
local disk_dir="${SMART_DIR}/${disk_name}"
if [[ -d "$disk_dir" ]]; then
# Get most recent JSON file (sorted by name = sorted by timestamp)
ls -1 "${disk_dir}"/*.json 2>/dev/null | sort -r | head -1
fi
}
_smart_cleanup_old_jsons() {
local disk="$1"
local retention="${2:-10}" # Default: keep last 10
local disk_name
disk_name=$(basename "$disk")
local disk_dir="${SMART_DIR}/${disk_name}"
if [[ -d "$disk_dir" && "$retention" -gt 0 ]]; then
# List all JSON files sorted by name (oldest last), skip first $retention, delete rest
ls -1 "${disk_dir}"/*.json 2>/dev/null | sort -r | tail -n +$((retention + 1)) | xargs -r rm -f
fi
} }
_smart_ensure_packages() { _smart_ensure_packages() {
@@ -146,7 +181,7 @@ while true; do
DISK_SIZE=$(lsblk -dn -o SIZE "$SELECTED_DISK" 2>/dev/null | xargs) DISK_SIZE=$(lsblk -dn -o SIZE "$SELECTED_DISK" 2>/dev/null | xargs)
if ! dialog --backtitle "$BACKTITLE" \ if ! dialog --backtitle "$BACKTITLE" \
--title "$(translate 'Long Test — Background')" \ --title "$(translate 'Long Test — Background')" \
--yesno "\n$(translate 'The long test runs directly on the disk hardware.')\n\n$(translate 'Disk:') $SELECTED_DISK ($DISK_SIZE)\n\n$(translate 'The test will continue even if you close this terminal.')\n$(translate 'Results will be saved automatically to:')\n$(_smart_json_path "$SELECTED_DISK")\n\n$(translate 'Start long test now?')" \ --yesno "\n$(translate 'The long test runs directly on the disk hardware.')\n\n$(translate 'Disk:') $SELECTED_DISK ($DISK_SIZE)\n\n$(translate 'The test will continue even if you close this terminal.')\n$(translate 'Results will be saved automatically to:')\n$(_smart_json_path "$SELECTED_DISK" "long")\n\n$(translate 'Start long test now?')" \
16 $UI_RESULT_W; then 16 $UI_RESULT_W; then
continue continue
fi fi
@@ -253,9 +288,10 @@ while true; do
fi fi
;; ;;
# ── Long test (background) ────────────────────────────── # ── Long test (background) ──────────────────────────────
long) long)
JSON_PATH=$(_smart_json_path "$SELECTED_DISK") JSON_PATH=$(_smart_json_path "$SELECTED_DISK" "long")
_smart_cleanup_old_jsons "$SELECTED_DISK"
DISK_SAFE=$(printf '%q' "$SELECTED_DISK") DISK_SAFE=$(printf '%q' "$SELECTED_DISK")
JSON_SAFE=$(printf '%q' "$JSON_PATH") JSON_SAFE=$(printf '%q' "$JSON_PATH")
@@ -309,7 +345,7 @@ while true; do
while smartctl -c ${DISK_SAFE} 2>/dev/null | grep -qiE 'Self-test routine in progress|[1-9][0-9]?% of test remaining'; do while smartctl -c ${DISK_SAFE} 2>/dev/null | grep -qiE 'Self-test routine in progress|[1-9][0-9]?% of test remaining'; do
sleep 60 sleep 60
done done
smartctl --json=c ${DISK_SAFE} > ${JSON_SAFE} 2>/dev/null smartctl -a --json=c ${DISK_SAFE} > ${JSON_SAFE} 2>/dev/null
# Send notification when test completes # Send notification when test completes
if [[ -f \"${NOTIFY_SCRIPT}\" ]]; then if [[ -f \"${NOTIFY_SCRIPT}\" ]]; then
@@ -380,11 +416,17 @@ while true; do
# ── Auto-export JSON (except long — handled by background monitor) # ── Auto-export JSON (except long — handled by background monitor)
if [[ "$ACTION" != "long" && "$ACTION" != "report" ]]; then if [[ "$ACTION" != "long" && "$ACTION" != "report" ]]; then
JSON_PATH=$(_smart_json_path "$SELECTED_DISK") # Determine test type from ACTION (short test or status check)
local json_test_type="short"
[[ "$ACTION" == "status" ]] && json_test_type="status"
JSON_PATH=$(_smart_json_path "$SELECTED_DISK" "$json_test_type")
_smart_cleanup_old_jsons "$SELECTED_DISK"
if _smart_is_nvme "$SELECTED_DISK"; then if _smart_is_nvme "$SELECTED_DISK"; then
nvme smart-log -o json "$SELECTED_DISK" > "$JSON_PATH" 2>/dev/null nvme smart-log -o json "$SELECTED_DISK" > "$JSON_PATH" 2>/dev/null
else else
smartctl --json=c "$SELECTED_DISK" > "$JSON_PATH" 2>/dev/null smartctl -a --json=c "$SELECTED_DISK" > "$JSON_PATH" 2>/dev/null
fi fi
[[ -s "$JSON_PATH" ]] || rm -f "$JSON_PATH" [[ -s "$JSON_PATH" ]] || rm -f "$JSON_PATH"
fi fi

View File

@@ -0,0 +1,195 @@
#!/bin/bash
# ==========================================================
# ProxMenux - SMART Scheduled Test Runner
# ==========================================================
# Author : MacRimi
# Copyright : (c) 2024 MacRimi
# License : GPL-3.0
# Version : 1.0
# Last Updated: 13/04/2026
# ==========================================================
# Description:
# Runs scheduled SMART tests based on configuration.
# Called by cron jobs created by ProxMenux Monitor.
# ==========================================================
# Configuration
SMART_DIR="/usr/local/share/proxmenux/smart"
LOG_DIR="/var/log/proxmenux"
SCRIPT_NAME="smart-scheduled-test"
# Ensure log directory exists
mkdir -p "$LOG_DIR"
# Logging function
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] [$SCRIPT_NAME] $1"
}
# Parse arguments
SCHEDULE_ID=""
TEST_TYPE="short"
RETENTION=10
DISKS=""
while [[ $# -gt 0 ]]; do
case $1 in
--schedule-id)
SCHEDULE_ID="$2"
shift 2
;;
--test-type)
TEST_TYPE="$2"
shift 2
;;
--retention)
RETENTION="$2"
shift 2
;;
--disks)
DISKS="$2"
shift 2
;;
*)
shift
;;
esac
done
log "Starting scheduled SMART test: schedule=$SCHEDULE_ID, type=$TEST_TYPE, retention=$RETENTION"
# Helper functions
_is_nvme() {
[[ "$1" == *nvme* ]]
}
_get_json_path() {
local disk="$1"
local test_type="$2"
local disk_name
disk_name=$(basename "$disk")
local disk_dir="${SMART_DIR}/${disk_name}"
local timestamp
timestamp=$(date +%Y-%m-%dT%H-%M-%S)
mkdir -p "$disk_dir"
echo "${disk_dir}/${timestamp}_${test_type}.json"
}
_cleanup_old_jsons() {
local disk="$1"
local retention="$2"
local disk_name
disk_name=$(basename "$disk")
local disk_dir="${SMART_DIR}/${disk_name}"
if [[ -d "$disk_dir" && "$retention" -gt 0 ]]; then
ls -1 "${disk_dir}"/*.json 2>/dev/null | sort -r | tail -n +$((retention + 1)) | xargs -r rm -f
fi
}
_run_test() {
local disk="$1"
local test_type="$2"
local json_path="$3"
log "Running $test_type test on $disk"
if _is_nvme "$disk"; then
# NVMe test
local code=1
[[ "$test_type" == "long" ]] && code=2
nvme device-self-test "$disk" --self-test-code=$code 2>/dev/null
if [[ $? -ne 0 ]]; then
log "ERROR: Failed to start NVMe test on $disk"
return 1
fi
# Wait for test to complete
local sleep_interval=10
[[ "$test_type" == "long" ]] && sleep_interval=60
sleep 5
while true; do
local op
op=$(nvme self-test-log "$disk" -o json 2>/dev/null | grep -o '"Current Device Self-Test Operation":[0-9]*' | grep -o '[0-9]*$')
[[ -z "$op" || "$op" -eq 0 ]] && break
sleep $sleep_interval
done
# Save results
nvme smart-log -o json "$disk" > "$json_path" 2>/dev/null
else
# SATA/SAS test
local test_flag="-t short"
[[ "$test_type" == "long" ]] && test_flag="-t long"
smartctl $test_flag "$disk" 2>/dev/null
if [[ $? -ne 0 && $? -ne 4 ]]; then
log "ERROR: Failed to start SMART test on $disk"
return 1
fi
# Wait for test to complete
local sleep_interval=10
[[ "$test_type" == "long" ]] && sleep_interval=60
sleep 5
while smartctl -c "$disk" 2>/dev/null | grep -qiE 'Self-test routine in progress|[1-9][0-9]?% of test remaining'; do
sleep $sleep_interval
done
# Save results
smartctl -a --json=c "$disk" > "$json_path" 2>/dev/null
fi
log "Test completed on $disk, results saved to $json_path"
return 0
}
# Get list of disks to test
get_disk_list() {
if [[ -n "$DISKS" && "$DISKS" != "all" ]]; then
# Use specified disks
echo "$DISKS" | tr ',' '\n'
else
# Get all physical disks
lsblk -dpno NAME,TYPE 2>/dev/null | awk '$2=="disk"{print $1}'
fi
}
# Main execution
DISK_LIST=$(get_disk_list)
TOTAL_DISKS=$(echo "$DISK_LIST" | wc -l)
SUCCESS_COUNT=0
FAIL_COUNT=0
log "Found $TOTAL_DISKS disk(s) to test"
for disk in $DISK_LIST; do
# Skip if disk doesn't exist
if [[ ! -b "$disk" ]]; then
log "WARNING: Disk $disk not found, skipping"
continue
fi
# Get JSON path and cleanup old files
JSON_PATH=$(_get_json_path "$disk" "$TEST_TYPE")
_cleanup_old_jsons "$disk" "$RETENTION"
# Run the test
if _run_test "$disk" "$TEST_TYPE" "$JSON_PATH"; then
((SUCCESS_COUNT++))
else
((FAIL_COUNT++))
fi
done
log "Scheduled test complete: $SUCCESS_COUNT succeeded, $FAIL_COUNT failed"
# TODO: Send notification if configured
# This would integrate with the notification system
exit 0