update gpu-switch-mode-indicator.tsx

This commit is contained in:
MacRimi
2026-04-19 12:26:52 +02:00
parent bcca760403
commit 834795d6d9
15 changed files with 1235 additions and 119 deletions

View File

@@ -138,6 +138,12 @@ if [[ -f "$vm_conf" ]]; then
slot_has_gpu=false
for dev in /sys/bus/pci/devices/0000:${slot}.*; do
[[ -e "$dev" ]] || continue
# SR-IOV: skip Virtual Functions when iterating a whole slot.
# VFs share the slot with their PF but carry their own driver
# state; their vfio-pci rebind is handled by Proxmox at VM
# start. Pre-flighting them would falsely block SR-IOV setups
# where the PF legitimately stays on the native driver.
[[ -L "${dev}/physfn" ]] && continue
class_hex="$(cat "$dev/class" 2>/dev/null | sed 's/^0x//')"
[[ "${class_hex:0:2}" != "03" ]] && continue
slot_has_gpu=true
@@ -159,6 +165,14 @@ if [[ -f "$vm_conf" ]]; then
details+=$'\n'"- ${id}: PCI device not found"
continue
fi
# SR-IOV VF: do not pre-flight the driver. Proxmox rebinds the VF
# to vfio-pci as part of VM start; at pre-start time the VF may
# still be on its native driver (i915, etc.) — that is normal,
# not an error. Blocking here would prevent every SR-IOV VF
# passthrough from starting.
if [[ -L "${dev_path}/physfn" ]]; then
continue
fi
class_hex="$(cat "$dev_path/class" 2>/dev/null | sed 's/^0x//')"
# Enforce vfio only for display/3D devices (PCI class 03xx).
[[ "${class_hex:0:2}" == "03" ]] || continue

View File

@@ -50,3 +50,109 @@ function _pci_function_assigned_to_vm() {
qm config "$vmid" 2>/dev/null | grep -qE "$pattern"
}
# ==========================================================
# SR-IOV detection helpers
# ==========================================================
# A PCI device participates in SR-IOV when either:
# - It is a Physical Function (PF) with one or more active VFs
# → /sys/bus/pci/devices/<BDF>/sriov_numvfs > 0
# - It is a Virtual Function (VF) spawned by a PF
# → /sys/bus/pci/devices/<BDF>/physfn is a symlink to the PF
#
# These helpers accept a BDF in either "0000:00:02.0" or "00:02.0" form.
# Return 0 on match, non-zero otherwise (shell convention).
function _pci_normalize_bdf() {
local id="$1"
[[ -z "$id" ]] && return 1
[[ "$id" =~ ^0000: ]] || id="0000:${id}"
printf '%s\n' "$id"
}
function _pci_is_vf() {
local id
id=$(_pci_normalize_bdf "$1") || return 1
[[ -L "/sys/bus/pci/devices/${id}/physfn" ]]
}
function _pci_get_pf_of_vf() {
local id
id=$(_pci_normalize_bdf "$1") || return 1
local link="/sys/bus/pci/devices/${id}/physfn"
[[ -L "$link" ]] || return 1
basename "$(readlink -f "$link")"
}
function _pci_is_sriov_capable() {
local id total
id=$(_pci_normalize_bdf "$1") || return 1
total=$(cat "/sys/bus/pci/devices/${id}/sriov_totalvfs" 2>/dev/null)
[[ -n "$total" && "$total" -gt 0 ]]
}
function _pci_active_vf_count() {
local id num
id=$(_pci_normalize_bdf "$1") || { echo 0; return 1; }
num=$(cat "/sys/bus/pci/devices/${id}/sriov_numvfs" 2>/dev/null)
[[ -n "$num" ]] || num=0
echo "$num"
}
function _pci_has_active_vfs() {
local n
n=$(_pci_active_vf_count "$1")
[[ "$n" -gt 0 ]]
}
# Filter an array (by name) of PCI BDFs in place, removing entries that
# are SR-IOV Virtual Functions or Physical Functions with active VFs —
# i.e. the configurations ProxMenux refuses to operate on today.
#
# Usage: _pci_sriov_filter_array <array_name_by_ref>
# Output: one line per removed entry, formatted "BDF|role" where role is
# whatever _pci_sriov_role prints (e.g. "vf 0000:00:02.0" or
# "pf-active 7"). The caller decides how to surface the removals.
# Returns: 0 if the caller should continue (even if some entries were
# filtered); the array mutation happens either way.
function _pci_sriov_filter_array() {
local -n _arr_ref="$1"
local -a _kept=()
local bdf role first
for bdf in "${_arr_ref[@]}"; do
role=$(_pci_sriov_role "$bdf" 2>/dev/null)
first="${role%% *}"
if [[ "$first" == "vf" || "$first" == "pf-active" ]]; then
echo "${bdf}|${role}"
else
_kept+=("$bdf")
fi
done
_arr_ref=("${_kept[@]}")
}
# Emits a one-line SR-IOV role description for diagnostics/messages.
# Prints one of:
# "pf-active <N>" — PF with N>0 active VFs
# "pf-idle" — SR-IOV capable PF with 0 VFs (benign)
# "vf <PF-BDF>" — VF (names its parent PF)
# "none" — device not involved in SR-IOV
function _pci_sriov_role() {
local id
id=$(_pci_normalize_bdf "$1") || { echo "none"; return 0; }
if _pci_is_vf "$id"; then
echo "vf $(_pci_get_pf_of_vf "$id")"
return 0
fi
if _pci_is_sriov_capable "$id"; then
local n
n=$(_pci_active_vf_count "$id")
if [[ "$n" -gt 0 ]]; then
echo "pf-active ${n}"
else
echo "pf-idle"
fi
return 0
fi
echo "none"
}

View File

@@ -28,6 +28,11 @@ NVIDIA_VID_DID=""
if [[ -f "$UTILS_FILE" ]]; then
source "$UTILS_FILE"
fi
if [[ -f "$LOCAL_SCRIPTS/global/pci_passthrough_helpers.sh" ]]; then
source "$LOCAL_SCRIPTS/global/pci_passthrough_helpers.sh"
elif [[ -f "$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)/global/pci_passthrough_helpers.sh" ]]; then
source "$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)/global/pci_passthrough_helpers.sh"
fi
if [[ -f "$LOCAL_SCRIPTS/global/gpu_hook_guard_helpers.sh" ]]; then
source "$LOCAL_SCRIPTS/global/gpu_hook_guard_helpers.sh"
elif [[ -f "$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)/global/gpu_hook_guard_helpers.sh" ]]; then
@@ -259,6 +264,67 @@ select_container() {
# ============================================================
# GPU checklist selection
# ============================================================
# ============================================================
# SR-IOV guard — refuse to pass an SR-IOV GPU to an LXC via ProxMenux.
# Although the LXC flow does not rewrite vfio.conf/blacklist (so it is
# not destructive like add_gpu_vm.sh), it blindly globs /dev/dri/card*
# and /dev/dri/renderD* without mapping each node to its BDF. With 7
# VFs the container may end up holding any/all of them, which is not
# the behavior a user asking for "one VF to this LXC" expects. Until a
# VF-aware LXC flow exists, stop and point to manual configuration —
# matching the policy used in switch_gpu_mode.sh and add_gpu_vm.sh.
# ============================================================
check_sriov_and_block_if_needed() {
declare -F _pci_sriov_role >/dev/null 2>&1 || return 0
local gpu_type pci role first_word
local -a offenders=()
for gpu_type in "${SELECTED_GPUS[@]}"; do
case "$gpu_type" in
intel) pci="$INTEL_PCI" ;;
amd) pci="$AMD_PCI" ;;
nvidia) pci="$NVIDIA_PCI" ;;
*) continue ;;
esac
[[ -n "$pci" ]] || continue
role=$(_pci_sriov_role "$pci")
first_word="${role%% *}"
case "$first_word" in
vf)
offenders+=("${pci}|vf|${role#vf }")
;;
pf-active)
offenders+=("${pci}|pf-active|${role#pf-active }")
;;
esac
done
[[ ${#offenders[@]} -eq 0 ]] && return 0
local msg entry bdf kind info
msg="\n\Zb\Z6$(translate 'SR-IOV Configuration Detected')\Zn\n\n"
for entry in "${offenders[@]}"; do
bdf="${entry%%|*}"
kind="${entry#*|}"; kind="${kind%%|*}"
info="${entry##*|}"
if [[ "$kind" == "vf" ]]; then
msg+=" • \Zb${bdf}\Zn — $(translate 'Virtual Function (parent PF:') ${info})\n"
else
msg+=" • \Zb${bdf}\Zn — $(translate 'Physical Function with') ${info} $(translate 'active VFs')\n"
fi
done
msg+="\n$(translate 'To pass SR-IOV Virtual Functions to a container, edit the LXC configuration manually via the Proxmox web interface. The Physical Function will remain bound to the native driver.')"
dialog --backtitle "ProxMenux" --colors \
--title "$(translate 'SR-IOV Configuration Detected')" \
--msgbox "$msg" 16 82
exit 0
}
select_gpus() {
local gpu_items=()
$HAS_INTEL && gpu_items+=("intel" "${INTEL_NAME:-Intel iGPU}" "off")
@@ -927,6 +993,7 @@ main() {
detect_host_gpus
select_container
select_gpus
check_sriov_and_block_if_needed
check_vfio_switch_mode
precheck_existing_lxc_gpu_config

View File

@@ -718,6 +718,48 @@ select_gpu() {
}
# ==========================================================
# SR-IOV guard — refuse to assign a Virtual Function or a Physical
# Function with active VFs. Matches the policy in switch_gpu_mode.sh:
# writing this GPU's vendor:device to /etc/modprobe.d/vfio.conf would
# let vfio-pci claim the PF at next boot and destroy the whole VF
# tree. ProxMenux does not yet manage SR-IOV lifecycle, so we stop
# before touching vfio.conf / blacklist.conf.
# ==========================================================
check_sriov_and_block_if_needed() {
declare -F _pci_sriov_role >/dev/null 2>&1 || return 0
[[ -n "$SELECTED_GPU_PCI" ]] || return 0
local role first_word detail=""
role=$(_pci_sriov_role "$SELECTED_GPU_PCI")
first_word="${role%% *}"
case "$first_word" in
vf)
local parent="${role#vf }"
detail="$(translate 'The selected device') \Zb${SELECTED_GPU_PCI}\Zn $(translate 'is an SR-IOV Virtual Function (VF). Its parent Physical Function is') \Zb${parent}\Zn."
;;
pf-active)
local n="${role#pf-active }"
detail="$(translate 'The selected device') \Zb${SELECTED_GPU_PCI}\Zn $(translate 'is a Physical Function with') \Zb${n}\Zn $(translate 'active Virtual Functions. Changing its driver binding would destroy every VF.')"
;;
*)
return 0
;;
esac
local msg
msg="\n\Zb\Z6$(translate 'SR-IOV Configuration Detected')\Zn\n\n"
msg+="${detail}\n\n"
msg+="$(translate 'To assign VFs to VMs or LXCs, edit the configuration manually via the Proxmox web interface. The Physical Function will remain bound to the native driver.')"
_pmx_msgbox "$(translate 'SR-IOV Configuration Detected')" "$msg" 16 82
[[ "$WIZARD_CALL" == "true" ]] && _set_wizard_result "cancelled"
exit 0
}
# ==========================================================
# Phase 1 — Step 4: Single-GPU warning
# ==========================================================
@@ -1922,6 +1964,7 @@ main() {
detect_host_gpus
check_iommu_enabled
select_gpu
check_sriov_and_block_if_needed
warn_single_gpu
select_vm
ensure_selected_gpu_not_already_in_target_vm

View File

@@ -624,6 +624,75 @@ select_gpus() {
read -ra SELECTED_GPU_IDX <<< "$sel"
}
# ==========================================================
# SR-IOV guard — abort mode switch when SR-IOV is active
# ==========================================================
# Intel i915-sriov-dkms and AMD MxGPU split a Physical Function (PF) into
# multiple Virtual Functions (VFs). Switching the PF's driver destroys
# every VF; switching a VF's driver affects only that VF. ProxMenux does
# not yet manage the SR-IOV lifecycle (create/destroy VFs, track per-VF
# ownership), so operating on a PF with active VFs — or on a VF itself —
# would leave the user's virtualization stack in an inconsistent state.
# We detect the situation early and hand the user back to the Proxmox
# web UI, which understands VFs as first-class PCI devices.
check_sriov_and_block_if_needed() {
declare -F _pci_sriov_role >/dev/null 2>&1 || return 0
local idx pci role first_word pf_bdf active_count
local -a vf_list=()
local -a pf_list=()
for idx in "${SELECTED_GPU_IDX[@]}"; do
pci="${ALL_GPU_PCIS[$idx]}"
role=$(_pci_sriov_role "$pci")
first_word="${role%% *}"
case "$first_word" in
vf)
pf_bdf="${role#vf }"
vf_list+=("${pci}|${pf_bdf}")
;;
pf-active)
active_count="${role#pf-active }"
pf_list+=("${pci}|${active_count}")
;;
esac
done
[[ ${#vf_list[@]} -eq 0 && ${#pf_list[@]} -eq 0 ]] && return 0
local title msg entry bdf parent cnt
title="$(translate 'SR-IOV Configuration Detected')"
msg="\n"
if [[ ${#vf_list[@]} -gt 0 ]]; then
msg+="$(translate 'The following selected device(s) are SR-IOV Virtual Functions (VFs):')\n\n"
for entry in "${vf_list[@]}"; do
bdf="${entry%%|*}"
parent="${entry#*|}"
msg+="${bdf} $(translate '(parent PF:') ${parent})\n"
done
msg+="\n"
fi
if [[ ${#pf_list[@]} -gt 0 ]]; then
msg+="$(translate 'The following selected device(s) are Physical Functions with active Virtual Functions:')\n\n"
for entry in "${pf_list[@]}"; do
bdf="${entry%%|*}"
cnt="${entry#*|}"
msg+="${bdf}${cnt} $(translate 'active VF(s)')\n"
done
msg+="\n"
fi
msg+="$(translate 'To assign VFs to VMs or LXCs, edit the configuration manually via the Proxmox web interface. The Physical Function will remain bound to the native driver.')"
dialog --backtitle "ProxMenux" \
--title "$title" \
--msgbox "$msg" 20 80
exit 0
}
collect_selected_iommu_ids() {
SELECTED_IOMMU_IDS=()
SELECTED_PCI_SLOTS=()
@@ -1164,6 +1233,7 @@ main() {
detect_host_gpus
while true; do
select_gpus
check_sriov_and_block_if_needed
select_target_mode
[[ $? -eq 2 ]] && continue
validate_vm_mode_blocked_ids

View File

@@ -507,6 +507,67 @@ find_gpu_by_slot() {
return 1
}
# ==========================================================
# SR-IOV guard — abort mode switch when SR-IOV is active
# ==========================================================
# Same policy as the interactive switch_gpu_mode.sh: refuse to operate on
# a Virtual Function or on a Physical Function that already has active
# VFs, since flipping drivers in that state collapses the VF tree and
# breaks every guest that was consuming a VF.
check_sriov_and_block_if_needed() {
declare -F _pci_sriov_role >/dev/null 2>&1 || return 0
local idx pci role first_word pf_bdf active_count
local -a vf_list=()
local -a pf_list=()
for idx in "${SELECTED_GPU_IDX[@]}"; do
pci="${ALL_GPU_PCIS[$idx]}"
role=$(_pci_sriov_role "$pci")
first_word="${role%% *}"
case "$first_word" in
vf)
pf_bdf="${role#vf }"
vf_list+=("${pci}|${pf_bdf}")
;;
pf-active)
active_count="${role#pf-active }"
pf_list+=("${pci}|${active_count}")
;;
esac
done
[[ ${#vf_list[@]} -eq 0 && ${#pf_list[@]} -eq 0 ]] && return 0
local msg entry bdf parent cnt
msg="<div style='color:#f0ad4e;font-weight:bold;margin-bottom:10px;'>$(translate 'SR-IOV Configuration Detected')</div>"
if [[ ${#vf_list[@]} -gt 0 ]]; then
msg+="<p>$(translate 'The following selected device(s) are SR-IOV Virtual Functions (VFs):')</p><ul>"
for entry in "${vf_list[@]}"; do
bdf="${entry%%|*}"
parent="${entry#*|}"
msg+="<li><code>${bdf}</code> &mdash; $(translate 'parent PF:') <code>${parent}</code></li>"
done
msg+="</ul>"
fi
if [[ ${#pf_list[@]} -gt 0 ]]; then
msg+="<p>$(translate 'The following selected device(s) are Physical Functions with active Virtual Functions:')</p><ul>"
for entry in "${pf_list[@]}"; do
bdf="${entry%%|*}"
cnt="${entry#*|}"
msg+="<li><code>${bdf}</code> &mdash; ${cnt} $(translate 'active VF(s)')</li>"
done
msg+="</ul>"
fi
msg+="<p>$(translate 'To assign VFs to VMs or LXCs, edit the configuration manually via the Proxmox web interface. The Physical Function will remain bound to the native driver.')</p>"
hybrid_msgbox "$(translate 'SR-IOV Configuration Detected')" "$msg"
return 1
}
validate_vm_mode_blocked_ids() {
[[ "$TARGET_MODE" != "vm" ]] && return 0
@@ -1147,6 +1208,12 @@ main() {
exit 1
fi
# SR-IOV guard: refuse to toggle the driver on a VF or on a PF with
# active VFs. Manual handling via Proxmox web UI is required.
if ! check_sriov_and_block_if_needed; then
exit 1
fi
# Validate if GPU is blocked for VM mode (certain Intel GPUs)
if ! validate_vm_mode_blocked_ids; then
exit 1

View File

@@ -173,13 +173,28 @@ run_script_by_slug() {
credentials=$(format_credentials "$first")
# Build info message
local msg="\Zb\Z4$(translate "Description"):\Zn\n$desc"
[[ -n "$notes_dialog" ]] && msg+="\n\n\Zb\Z4$(translate "Notes"):\Zn\n$notes_dialog"
local msg="\Zb\Z4$(translate "Description"):\Zn\n$desc"
if [[ -n "$notes" ]]; then
local notes_short=""
local char_count=0
local max_chars=400
while IFS= read -r line; do
[[ -z "$line" ]] && continue
char_count=$(( char_count + ${#line} ))
if [[ $char_count -lt $max_chars ]]; then
notes_short+="$line\n"
else
notes_short+="...\n"
break
fi
done <<< "$notes"
msg+="\n\n\Zb\Z4$(translate "Notes"):\Zn\n$notes_short"
fi
[[ -n "$credentials" ]] && msg+="\n\n\Zb\Z4$(translate "Default Credentials"):\Zn\n$credentials"
[[ "$port" -gt 0 ]] && msg+="\n\n\Zb\Z4$(translate "Default Port"):\Zn $port"
[[ -n "$website" ]] && msg+="\n\Zb\Z4$(translate "Website"):\Zn $website"
msg+="\n\n$(translate "Choose how to run the script:"):"
msg+="\n\n$(translate "Choose how to run the script:")"
# Build menu: one or two entries per script_info (GH + optional Mirror)
declare -a MENU_OPTS=()
@@ -383,7 +398,7 @@ while true; do
SELECTED_IDX=$(dialog --backtitle "ProxMenux" \
--title "Proxmox VE Helper-Scripts" \
--menu "$(translate "Select a category or search for scripts:"):" \
20 70 14 "${MENU_ITEMS[@]}" 3>&1 1>&2 2>&3) || {
22 75 15 "${MENU_ITEMS[@]}" 3>&1 1>&2 2>&3) || {
dialog --clear --title "ProxMenux" \
--msgbox "\n\n$(translate "Visit the website to discover more scripts, stay updated with the latest updates, and support the project:")\n\nhttps://community-scripts.github.io/ProxmoxVE" 15 70
exec bash "$LOCAL_SCRIPTS/menus/main_menu.sh"
@@ -425,7 +440,7 @@ while true; do
SCRIPT_INDEX=$(dialog --colors --backtitle "ProxMenux" \
--title "$(translate "Scripts in") ${CATEGORY_NAMES[$SELECTED]}" \
--menu "$(translate "Choose a script to execute:"):" \
20 70 14 "${SCRIPTS[@]}" 3>&1 1>&2 2>&3) || break
22 75 15 "${SCRIPTS[@]}" 3>&1 1>&2 2>&3) || break
SCRIPT_SELECTED="${INDEX_TO_SLUG[$SCRIPT_INDEX]}"
run_script_by_slug "$SCRIPT_SELECTED"

View File

@@ -364,6 +364,41 @@ select_controller_nvme() {
return 1
fi
# SR-IOV guard: drop VFs / active PFs and inform the user. Same policy
# as add_gpu_vm.sh and the VM creators — refuse to rewrite host VFIO
# config for an SR-IOV device since it would collapse the VF tree.
if declare -F _pci_sriov_filter_array >/dev/null 2>&1; then
local sriov_removed=""
sriov_removed=$(_pci_sriov_filter_array SELECTED_CONTROLLER_PCIS)
if [[ -n "$sriov_removed" ]]; then
local sriov_msg=""
sriov_msg="\n$(translate "The following devices were excluded because they are part of an SR-IOV configuration:")\n"
local entry bdf role first
while IFS= read -r entry; do
[[ -z "$entry" ]] && continue
bdf="${entry%%|*}"
role="${entry#*|}"
first="${role%% *}"
if [[ "$first" == "vf" ]]; then
sriov_msg+="\n • ${bdf}$(translate "Virtual Function")"
else
sriov_msg+="\n • ${bdf}$(translate "Physical Function with") ${role#pf-active } $(translate "active VFs")"
fi
done <<< "$sriov_removed"
sriov_msg+="\n\n$(translate "To pass SR-IOV Virtual Functions to a VM, edit the VM configuration manually via the Proxmox web interface.")"
dialog --backtitle "ProxMenux" --colors \
--title "$(translate "SR-IOV Configuration Detected")" \
--msgbox "$sriov_msg" 18 82
fi
if [[ ${#SELECTED_CONTROLLER_PCIS[@]} -eq 0 ]]; then
dialog --backtitle "ProxMenux" \
--title "$(translate "Controller + NVMe")" \
--msgbox "\n$(translate "No eligible controllers remain after SR-IOV filtering.")" 8 70
return 1
fi
fi
return 0
}

View File

@@ -1255,6 +1255,48 @@ if [[ ${#EFFECTIVE_IMPORT_DISKS[@]} -gt 0 ]]; then
done
fi
if [[ ${#CONTROLLER_NVME_PCIS[@]} -gt 0 ]]; then
# SR-IOV guard: exclude VFs / active PFs before staging. Mid-flow
# phase-2 output; a whiptail msgbox stops the scrolling so the user
# actually sees which devices were dropped. After the ack, each
# skipped BDF is logged via msg_warn so the action is visible in the
# captured log as well.
if declare -F _pci_sriov_filter_array >/dev/null 2>&1; then
SRIOV_REMOVED=$(_pci_sriov_filter_array CONTROLLER_NVME_PCIS)
if [[ -n "$SRIOV_REMOVED" ]]; then
SRIOV_MSG=""
SRIOV_BDFS=()
SRIOV_NL=$'\n'
SRIOV_MSG="$(translate "The following devices were excluded from Controller/NVMe passthrough because they are part of an SR-IOV configuration:")"
while IFS= read -r SRIOV_ENTRY; do
[[ -z "$SRIOV_ENTRY" ]] && continue
SRIOV_BDF="${SRIOV_ENTRY%%|*}"
SRIOV_ROLE="${SRIOV_ENTRY#*|}"
SRIOV_FIRST="${SRIOV_ROLE%% *}"
SRIOV_BDFS+=("$SRIOV_BDF")
if [[ "$SRIOV_FIRST" == "vf" ]]; then
SRIOV_MSG+="${SRIOV_NL}${SRIOV_BDF}$(translate "Virtual Function")"
else
SRIOV_MSG+="${SRIOV_NL}${SRIOV_BDF}$(translate "Physical Function with") ${SRIOV_ROLE#pf-active } $(translate "active VFs")"
fi
done <<< "$SRIOV_REMOVED"
SRIOV_MSG+="${SRIOV_NL}${SRIOV_NL}$(translate "To pass SR-IOV Virtual Functions to a VM, edit the VM configuration manually via the Proxmox web interface.")"
whiptail --backtitle "ProxMenux" \
--title "$(translate "SR-IOV Configuration Detected")" \
--msgbox "$SRIOV_MSG" 18 82
for SRIOV_SKIPPED in "${SRIOV_BDFS[@]}"; do
msg_warn "$(translate "Skipping SR-IOV device"): ${SRIOV_SKIPPED}"
done
fi
fi
if [[ ${#CONTROLLER_NVME_PCIS[@]} -eq 0 ]]; then
msg_warn "$(translate "No eligible Controller/NVMe devices remain after SR-IOV filtering. Skipping.")"
fi
fi
if [[ ${#CONTROLLER_NVME_PCIS[@]} -gt 0 ]]; then
local CONTROLLER_CAN_STAGE=true
if declare -F _pci_is_iommu_active >/dev/null 2>&1 && ! _pci_is_iommu_active; then

View File

@@ -468,6 +468,55 @@ fi
done
fi
if [[ ${#CONTROLLER_NVME_PCIS[@]} -gt 0 ]]; then
# SR-IOV guard: drop Virtual Functions / active-PFs before staging.
# Proxmox's VFIO rebind via qm hostpci would trigger the same VF-tree
# collapse described in the GPU flows, so we exclude them and tell
# the user to manage those passthroughs manually.
#
# UI choice: this runs mid-flow (phase 2 of the wizard, interleaved
# with msg_info/msg_ok output), so a whiptail msgbox is used to force
# the user to acknowledge the exclusion instead of letting the notice
# scroll by with the rest of the processing output. After the user
# clicks OK, a per-device msg_warn is emitted so the skipped BDFs
# remain visible in the captured log.
if declare -F _pci_sriov_filter_array >/dev/null 2>&1; then
local _sriov_removed=""
_sriov_removed=$(_pci_sriov_filter_array CONTROLLER_NVME_PCIS)
if [[ -n "$_sriov_removed" ]]; then
local _sriov_msg="" _entry _bdf _role _first _sb
local -a _sriov_bdfs=()
local _nl=$'\n'
_sriov_msg="$(translate "The following devices were excluded from Controller/NVMe passthrough because they are part of an SR-IOV configuration:")"
while IFS= read -r _entry; do
[[ -z "$_entry" ]] && continue
_bdf="${_entry%%|*}"
_role="${_entry#*|}"
_first="${_role%% *}"
_sriov_bdfs+=("$_bdf")
if [[ "$_first" == "vf" ]]; then
_sriov_msg+="${_nl}${_bdf}$(translate "Virtual Function")"
else
_sriov_msg+="${_nl}${_bdf}$(translate "Physical Function with") ${_role#pf-active } $(translate "active VFs")"
fi
done <<< "$_sriov_removed"
_sriov_msg+="${_nl}${_nl}$(translate "To pass SR-IOV Virtual Functions to a VM, edit the VM configuration manually via the Proxmox web interface.")"
whiptail --backtitle "ProxMenux" \
--title "$(translate "SR-IOV Configuration Detected")" \
--msgbox "$_sriov_msg" 18 82
for _sb in "${_sriov_bdfs[@]}"; do
msg_warn "$(translate "Skipping SR-IOV device"): ${_sb}"
done
fi
fi
if [[ ${#CONTROLLER_NVME_PCIS[@]} -eq 0 ]]; then
msg_warn "$(translate "No eligible Controller/NVMe devices remain after SR-IOV filtering. Skipping.")"
fi
fi
if [[ ${#CONTROLLER_NVME_PCIS[@]} -gt 0 ]]; then
local CONTROLLER_CAN_STAGE=true
if declare -F _pci_is_iommu_active >/dev/null 2>&1 && ! _pci_is_iommu_active; then

View File

@@ -1270,6 +1270,48 @@ function create_vm() {
done
fi
if [[ ${#CONTROLLER_NVME_PCIS[@]} -gt 0 ]]; then
# SR-IOV guard: mirror of the synology.sh/vm_creator.sh block —
# drop VFs and active-PF devices before staging so Proxmox does
# not collapse the VF tree at VM start. Mid-flow, so the notice
# goes through whiptail (blocking acknowledgment) and each
# skipped BDF is then echoed via msg_warn for the log trail.
if declare -F _pci_sriov_filter_array >/dev/null 2>&1; then
SRIOV_REMOVED=$(_pci_sriov_filter_array CONTROLLER_NVME_PCIS)
if [[ -n "$SRIOV_REMOVED" ]]; then
SRIOV_MSG=""
SRIOV_BDFS=()
SRIOV_NL=$'\n'
SRIOV_MSG="$(translate "The following devices were excluded from Controller/NVMe passthrough because they are part of an SR-IOV configuration:")"
while IFS= read -r SRIOV_ENTRY; do
[[ -z "$SRIOV_ENTRY" ]] && continue
SRIOV_BDF="${SRIOV_ENTRY%%|*}"
SRIOV_ROLE="${SRIOV_ENTRY#*|}"
SRIOV_FIRST="${SRIOV_ROLE%% *}"
SRIOV_BDFS+=("$SRIOV_BDF")
if [[ "$SRIOV_FIRST" == "vf" ]]; then
SRIOV_MSG+="${SRIOV_NL}${SRIOV_BDF}$(translate "Virtual Function")"
else
SRIOV_MSG+="${SRIOV_NL}${SRIOV_BDF}$(translate "Physical Function with") ${SRIOV_ROLE#pf-active } $(translate "active VFs")"
fi
done <<< "$SRIOV_REMOVED"
SRIOV_MSG+="${SRIOV_NL}${SRIOV_NL}$(translate "To pass SR-IOV Virtual Functions to a VM, edit the VM configuration manually via the Proxmox web interface.")"
whiptail --backtitle "ProxMenux" \
--title "$(translate "SR-IOV Configuration Detected")" \
--msgbox "$SRIOV_MSG" 18 82
for SRIOV_SKIPPED in "${SRIOV_BDFS[@]}"; do
msg_warn "$(translate "Skipping SR-IOV device"): ${SRIOV_SKIPPED}"
done
fi
fi
if [[ ${#CONTROLLER_NVME_PCIS[@]} -eq 0 ]]; then
msg_warn "$(translate "No eligible Controller/NVMe devices remain after SR-IOV filtering. Skipping.")"
fi
fi
if [[ ${#CONTROLLER_NVME_PCIS[@]} -gt 0 ]]; then
local CONTROLLER_CAN_STAGE=true
if declare -F _pci_is_iommu_active >/dev/null 2>&1 && ! _pci_is_iommu_active; then