diff --git a/AppImage/components/notification-settings.tsx b/AppImage/components/notification-settings.tsx index 31a0b3b0..f0a638fa 100644 --- a/AppImage/components/notification-settings.tsx +++ b/AppImage/components/notification-settings.tsx @@ -15,7 +15,7 @@ import { Bell, BellOff, Send, CheckCircle2, XCircle, Loader2, AlertTriangle, Info, Settings2, Zap, Eye, EyeOff, Trash2, ChevronDown, ChevronUp, ChevronRight, TestTube2, Mail, Webhook, - Copy, Server, Shield, ExternalLink + Copy, Server, Shield, ExternalLink, RefreshCw } from "lucide-react" interface ChannelConfig { @@ -247,6 +247,8 @@ export function NotificationSettings() { const [showProviderInfo, setShowProviderInfo] = useState(false) const [testingAI, setTestingAI] = useState(false) const [aiTestResult, setAiTestResult] = useState<{ success: boolean; message: string; model?: string } | null>(null) + const [ollamaModels, setOllamaModels] = useState([]) + const [loadingOllamaModels, setLoadingOllamaModels] = useState(false) const [webhookSetup, setWebhookSetup] = useState<{ status: "idle" | "running" | "success" | "failed" fallback_commands: string[] @@ -594,6 +596,37 @@ export function NotificationSettings() { } } + const fetchOllamaModels = useCallback(async (url: string) => { + if (!url) return + setLoadingOllamaModels(true) + try { + const data = await fetchApi<{ success: boolean; models: string[]; message: string }>("/api/notifications/ollama-models", { + method: "POST", + body: JSON.stringify({ ollama_url: url }), + }) + if (data.success) { + setOllamaModels(data.models) + // If current model not in list and there are models available, select first one + if (data.models.length > 0 && !data.models.includes(config.ai_model)) { + updateConfig(p => ({ ...p, ai_model: data.models[0] })) + } + } else { + setOllamaModels([]) + } + } catch { + setOllamaModels([]) + } finally { + setLoadingOllamaModels(false) + } + }, [config.ai_model]) + + // Fetch Ollama models when provider is ollama and URL changes + useEffect(() => { + if (config.ai_provider === 'ollama' && config.ai_ollama_url) { + fetchOllamaModels(config.ai_ollama_url) + } + }, [config.ai_provider, config.ai_ollama_url, fetchOllamaModels]) + const handleTestAI = async () => { setTestingAI(true) setAiTestResult(null) @@ -1449,12 +1482,47 @@ export function NotificationSettings() { )} - {/* Model (read-only display) */} + {/* Model - selector for Ollama, read-only for others */}
-
- {AI_PROVIDERS.find(p => p.value === config.ai_provider)?.model || "default"} -
+ {config.ai_provider === "ollama" ? ( +
+ + +
+ ) : ( +
+ {AI_PROVIDERS.find(p => p.value === config.ai_provider)?.model || "default"} +
+ )}
{/* Language selector */} diff --git a/AppImage/scripts/flask_notification_routes.py b/AppImage/scripts/flask_notification_routes.py index b7bb7be9..f051df8f 100644 --- a/AppImage/scripts/flask_notification_routes.py +++ b/AppImage/scripts/flask_notification_routes.py @@ -101,6 +101,57 @@ def test_notification(): return jsonify({'error': str(e)}), 500 +@notification_bp.route('/api/notifications/ollama-models', methods=['POST']) +def get_ollama_models(): + """Fetch available models from an Ollama server. + + Request body: + { + "ollama_url": "http://localhost:11434" + } + + Returns: + { + "success": true/false, + "models": ["model1", "model2", ...], + "message": "error message if failed" + } + """ + try: + import urllib.request + import urllib.error + + data = request.get_json() or {} + ollama_url = data.get('ollama_url', 'http://localhost:11434') + + url = f"{ollama_url.rstrip('/')}/api/tags" + req = urllib.request.Request(url, method='GET') + req.add_header('User-Agent', 'ProxMenux-Monitor/1.1') + + with urllib.request.urlopen(req, timeout=10) as resp: + result = json.loads(resp.read().decode('utf-8')) + models = [m.get('name', '').split(':')[0] for m in result.get('models', [])] + # Remove duplicates and sort + models = sorted(list(set(models))) + return jsonify({ + 'success': True, + 'models': models, + 'message': f'Found {len(models)} models' + }) + except urllib.error.URLError as e: + return jsonify({ + 'success': False, + 'models': [], + 'message': f'Cannot connect to Ollama: {str(e.reason)}' + }) + except Exception as e: + return jsonify({ + 'success': False, + 'models': [], + 'message': f'Error: {str(e)}' + }) + + @notification_bp.route('/api/notifications/test-ai', methods=['POST']) def test_ai_connection(): """Test AI provider connection and configuration.