Update notification service

This commit is contained in:
MacRimi
2026-03-17 14:07:47 +01:00
parent 9a51a9e635
commit 0cb8900374
14 changed files with 1390 additions and 112 deletions

View File

@@ -0,0 +1,111 @@
"""AI Providers for ProxMenux notification enhancement.
This module provides a pluggable architecture for different AI providers
to enhance and translate notification messages.
Supported providers:
- Groq: Fast inference, generous free tier (30 req/min)
- OpenAI: Industry standard, widely used
- Anthropic: Excellent for text generation, Claude Haiku is fast and affordable
- Gemini: Google's model, free tier available, good quality/price ratio
- Ollama: 100% local execution, no costs, complete privacy
- OpenRouter: Aggregator with access to 100+ models using a single API key
"""
from .base import AIProvider, AIProviderError
from .groq_provider import GroqProvider
from .openai_provider import OpenAIProvider
from .anthropic_provider import AnthropicProvider
from .gemini_provider import GeminiProvider
from .ollama_provider import OllamaProvider
from .openrouter_provider import OpenRouterProvider
PROVIDERS = {
'groq': GroqProvider,
'openai': OpenAIProvider,
'anthropic': AnthropicProvider,
'gemini': GeminiProvider,
'ollama': OllamaProvider,
'openrouter': OpenRouterProvider,
}
# Provider metadata for UI display
PROVIDER_INFO = {
'groq': {
'name': 'Groq',
'default_model': 'llama-3.3-70b-versatile',
'description': 'Fast inference, generous free tier (30 req/min). Ideal to get started.',
'requires_api_key': True,
},
'openai': {
'name': 'OpenAI',
'default_model': 'gpt-4o-mini',
'description': 'Industry standard. Very accurate and widely used.',
'requires_api_key': True,
},
'anthropic': {
'name': 'Anthropic (Claude)',
'default_model': 'claude-3-haiku-20240307',
'description': 'Excellent for writing and translation. Fast and affordable.',
'requires_api_key': True,
},
'gemini': {
'name': 'Google Gemini',
'default_model': 'gemini-1.5-flash',
'description': 'Free tier available, very good quality/price ratio.',
'requires_api_key': True,
},
'ollama': {
'name': 'Ollama (Local)',
'default_model': 'llama3.2',
'description': '100% local execution. No costs, complete privacy, no internet required.',
'requires_api_key': False,
},
'openrouter': {
'name': 'OpenRouter',
'default_model': 'meta-llama/llama-3.3-70b-instruct',
'description': 'Aggregator with access to 100+ models using a single API key. Maximum flexibility.',
'requires_api_key': True,
},
}
def get_provider(name: str, **kwargs) -> AIProvider:
"""Factory function to get provider instance.
Args:
name: Provider name (groq, openai, anthropic, gemini, ollama, openrouter)
**kwargs: Provider-specific arguments (api_key, model, base_url)
Returns:
AIProvider instance
Raises:
AIProviderError: If provider name is unknown
"""
if name not in PROVIDERS:
raise AIProviderError(f"Unknown provider: {name}. Available: {list(PROVIDERS.keys())}")
return PROVIDERS[name](**kwargs)
def get_provider_info(name: str = None) -> dict:
"""Get provider metadata for UI display.
Args:
name: Optional provider name. If None, returns all providers info.
Returns:
Provider info dict or dict of all providers
"""
if name:
return PROVIDER_INFO.get(name, {})
return PROVIDER_INFO
__all__ = [
'AIProvider',
'AIProviderError',
'PROVIDERS',
'PROVIDER_INFO',
'get_provider',
'get_provider_info',
]

View File

@@ -0,0 +1,65 @@
"""Anthropic (Claude) provider implementation.
Anthropic's Claude models are excellent for text generation and translation.
Claude Haiku is particularly fast and affordable for notification enhancement.
"""
from typing import Optional
from .base import AIProvider, AIProviderError
class AnthropicProvider(AIProvider):
"""Anthropic provider using their Messages API."""
NAME = "anthropic"
DEFAULT_MODEL = "claude-3-haiku-20240307"
REQUIRES_API_KEY = True
API_URL = "https://api.anthropic.com/v1/messages"
API_VERSION = "2023-06-01"
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using Anthropic's API.
Note: Anthropic uses a different API format than OpenAI.
The system prompt goes in a separate field, not in messages.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for Anthropic")
# Anthropic uses a different format - system is a top-level field
payload = {
'model': self.model,
'system': system_prompt,
'messages': [
{'role': 'user', 'content': user_message},
],
'max_tokens': max_tokens,
}
headers = {
'Content-Type': 'application/json',
'x-api-key': self.api_key,
'anthropic-version': self.API_VERSION,
}
result = self._make_request(self.API_URL, payload, headers)
try:
# Anthropic returns content as array of content blocks
content = result['content']
if isinstance(content, list) and len(content) > 0:
return content[0].get('text', '').strip()
return str(content).strip()
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")

View File

@@ -0,0 +1,141 @@
"""Base class for AI providers."""
from abc import ABC, abstractmethod
from typing import Optional, Dict, Any
class AIProviderError(Exception):
"""Exception for AI provider errors."""
pass
class AIProvider(ABC):
"""Abstract base class for AI providers.
All provider implementations must inherit from this class and implement
the generate() method.
"""
# Provider metadata (override in subclasses)
NAME = "base"
DEFAULT_MODEL = ""
REQUIRES_API_KEY = True
def __init__(self, api_key: str = "", model: str = "", base_url: str = ""):
"""Initialize the AI provider.
Args:
api_key: API key for authentication (not required for local providers)
model: Model name to use (defaults to DEFAULT_MODEL if empty)
base_url: Base URL for API calls (used by Ollama and custom endpoints)
"""
self.api_key = api_key
self.model = model or self.DEFAULT_MODEL
self.base_url = base_url
@abstractmethod
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response from the AI model.
Args:
system_prompt: System instructions for the model
user_message: User message/query to process
max_tokens: Maximum tokens in the response
Returns:
Generated text or None if failed
Raises:
AIProviderError: If there's an error communicating with the provider
"""
pass
def test_connection(self) -> Dict[str, Any]:
"""Test the connection to the AI provider.
Sends a simple test message to verify the provider is accessible
and the API key is valid.
Returns:
Dictionary with:
- success: bool indicating if connection succeeded
- message: Human-readable status message
- model: Model name being used
"""
try:
response = self.generate(
system_prompt="You are a test assistant. Respond with exactly: CONNECTION_OK",
user_message="Test connection",
max_tokens=20
)
if response:
# Check if response contains our expected text
if "CONNECTION_OK" in response.upper() or "CONNECTION" in response.upper():
return {
'success': True,
'message': 'Connection successful',
'model': self.model
}
# Even if different response, connection worked
return {
'success': True,
'message': f'Connected (response received)',
'model': self.model
}
return {
'success': False,
'message': 'No response received from provider',
'model': self.model
}
except AIProviderError as e:
return {
'success': False,
'message': str(e),
'model': self.model
}
except Exception as e:
return {
'success': False,
'message': f'Unexpected error: {str(e)}',
'model': self.model
}
def _make_request(self, url: str, payload: dict, headers: dict,
timeout: int = 15) -> dict:
"""Make HTTP request to AI provider API.
Args:
url: API endpoint URL
payload: JSON payload to send
headers: HTTP headers
timeout: Request timeout in seconds
Returns:
Parsed JSON response
Raises:
AIProviderError: If request fails
"""
import json
import urllib.request
import urllib.error
data = json.dumps(payload).encode('utf-8')
req = urllib.request.Request(url, data=data, headers=headers, method='POST')
try:
with urllib.request.urlopen(req, timeout=timeout) as resp:
return json.loads(resp.read().decode('utf-8'))
except urllib.error.HTTPError as e:
error_body = ""
try:
error_body = e.read().decode('utf-8')
except Exception:
pass
raise AIProviderError(f"HTTP {e.code}: {error_body or e.reason}")
except urllib.error.URLError as e:
raise AIProviderError(f"Connection error: {e.reason}")
except json.JSONDecodeError as e:
raise AIProviderError(f"Invalid JSON response: {e}")
except Exception as e:
raise AIProviderError(f"Request failed: {str(e)}")

View File

@@ -0,0 +1,74 @@
"""Google Gemini provider implementation.
Google's Gemini models offer a free tier and excellent quality/price ratio.
Gemini 1.5 Flash is particularly fast and cost-effective.
"""
from typing import Optional
from .base import AIProvider, AIProviderError
class GeminiProvider(AIProvider):
"""Google Gemini provider using the Generative Language API."""
NAME = "gemini"
DEFAULT_MODEL = "gemini-1.5-flash"
REQUIRES_API_KEY = True
API_BASE = "https://generativelanguage.googleapis.com/v1beta/models"
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using Google's Gemini API.
Note: Gemini uses a different API format. System instructions
go in a separate systemInstruction field.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for Gemini")
url = f"{self.API_BASE}/{self.model}:generateContent?key={self.api_key}"
# Gemini uses a specific format with contents array
payload = {
'systemInstruction': {
'parts': [{'text': system_prompt}]
},
'contents': [
{
'role': 'user',
'parts': [{'text': user_message}]
}
],
'generationConfig': {
'maxOutputTokens': max_tokens,
'temperature': 0.3,
}
}
headers = {
'Content-Type': 'application/json',
}
result = self._make_request(url, payload, headers)
try:
# Gemini returns candidates array with content parts
candidates = result.get('candidates', [])
if candidates:
content = candidates[0].get('content', {})
parts = content.get('parts', [])
if parts:
return parts[0].get('text', '').strip()
raise AIProviderError("No content in response")
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")

View File

@@ -0,0 +1,56 @@
"""Groq AI provider implementation.
Groq provides fast inference with a generous free tier (30 requests/minute).
Uses the OpenAI-compatible API format.
"""
from typing import Optional
from .base import AIProvider, AIProviderError
class GroqProvider(AIProvider):
"""Groq AI provider using their OpenAI-compatible API."""
NAME = "groq"
DEFAULT_MODEL = "llama-3.3-70b-versatile"
REQUIRES_API_KEY = True
API_URL = "https://api.groq.com/openai/v1/chat/completions"
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using Groq's API.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for Groq")
payload = {
'model': self.model,
'messages': [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': user_message},
],
'max_tokens': max_tokens,
'temperature': 0.3,
}
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}',
}
result = self._make_request(self.API_URL, payload, headers)
try:
return result['choices'][0]['message']['content'].strip()
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")

View File

@@ -0,0 +1,118 @@
"""Ollama provider implementation.
Ollama enables 100% local AI execution with no costs and complete privacy.
No internet connection required - perfect for sensitive enterprise environments.
"""
from typing import Optional
from .base import AIProvider, AIProviderError
class OllamaProvider(AIProvider):
"""Ollama provider for local AI execution."""
NAME = "ollama"
DEFAULT_MODEL = "llama3.2"
REQUIRES_API_KEY = False
DEFAULT_URL = "http://localhost:11434"
def __init__(self, api_key: str = "", model: str = "", base_url: str = ""):
"""Initialize Ollama provider.
Args:
api_key: Not used for Ollama (local execution)
model: Model name (default: llama3.2)
base_url: Ollama server URL (default: http://localhost:11434)
"""
super().__init__(api_key, model, base_url)
# Use default URL if not provided
if not self.base_url:
self.base_url = self.DEFAULT_URL
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using local Ollama server.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length (maps to num_predict)
Returns:
Generated text or None if failed
Raises:
AIProviderError: If Ollama server is unreachable
"""
url = f"{self.base_url.rstrip('/')}/api/chat"
payload = {
'model': self.model,
'messages': [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': user_message},
],
'stream': False,
'options': {
'num_predict': max_tokens,
'temperature': 0.3,
}
}
headers = {
'Content-Type': 'application/json',
}
try:
result = self._make_request(url, payload, headers, timeout=30)
except AIProviderError as e:
if "Connection" in str(e) or "refused" in str(e).lower():
raise AIProviderError(
f"Cannot connect to Ollama at {self.base_url}. "
"Make sure Ollama is running (ollama serve)"
)
raise
try:
message = result.get('message', {})
return message.get('content', '').strip()
except (KeyError, AttributeError) as e:
raise AIProviderError(f"Unexpected response format: {e}")
def test_connection(self):
"""Test connection to Ollama server.
Also checks if the specified model is available.
"""
import json
import urllib.request
import urllib.error
# First check if server is running
try:
url = f"{self.base_url.rstrip('/')}/api/tags"
req = urllib.request.Request(url, method='GET')
with urllib.request.urlopen(req, timeout=5) as resp:
data = json.loads(resp.read().decode('utf-8'))
models = [m.get('name', '').split(':')[0] for m in data.get('models', [])]
if self.model not in models and f"{self.model}:latest" not in [m.get('name', '') for m in data.get('models', [])]:
return {
'success': False,
'message': f"Model '{self.model}' not found. Available: {', '.join(models[:5])}...",
'model': self.model
}
except urllib.error.URLError:
return {
'success': False,
'message': f"Cannot connect to Ollama at {self.base_url}. Make sure Ollama is running.",
'model': self.model
}
except Exception as e:
return {
'success': False,
'message': f"Error checking Ollama: {str(e)}",
'model': self.model
}
# If server is up and model exists, do the actual test
return super().test_connection()

View File

@@ -0,0 +1,56 @@
"""OpenAI provider implementation.
OpenAI is the industry standard for AI APIs. gpt-4o-mini provides
excellent quality at a reasonable price point.
"""
from typing import Optional
from .base import AIProvider, AIProviderError
class OpenAIProvider(AIProvider):
"""OpenAI provider using their Chat Completions API."""
NAME = "openai"
DEFAULT_MODEL = "gpt-4o-mini"
REQUIRES_API_KEY = True
API_URL = "https://api.openai.com/v1/chat/completions"
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using OpenAI's API.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for OpenAI")
payload = {
'model': self.model,
'messages': [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': user_message},
],
'max_tokens': max_tokens,
'temperature': 0.3,
}
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}',
}
result = self._make_request(self.API_URL, payload, headers)
try:
return result['choices'][0]['message']['content'].strip()
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")

View File

@@ -0,0 +1,62 @@
"""OpenRouter provider implementation.
OpenRouter is an aggregator that provides access to 100+ AI models
using a single API key. Maximum flexibility for choosing models.
Uses OpenAI-compatible API format.
"""
from typing import Optional
from .base import AIProvider, AIProviderError
class OpenRouterProvider(AIProvider):
"""OpenRouter provider for multi-model access."""
NAME = "openrouter"
DEFAULT_MODEL = "meta-llama/llama-3.3-70b-instruct"
REQUIRES_API_KEY = True
API_URL = "https://openrouter.ai/api/v1/chat/completions"
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using OpenRouter's API.
OpenRouter uses OpenAI-compatible format with additional
headers for app identification.
Args:
system_prompt: System instructions
user_message: User message to process
max_tokens: Maximum response length
Returns:
Generated text or None if failed
Raises:
AIProviderError: If API key is missing or request fails
"""
if not self.api_key:
raise AIProviderError("API key required for OpenRouter")
payload = {
'model': self.model,
'messages': [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': user_message},
],
'max_tokens': max_tokens,
'temperature': 0.3,
}
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.api_key}',
'HTTP-Referer': 'https://github.com/MacRimi/ProxMenux',
'X-Title': 'ProxMenux Monitor',
}
result = self._make_request(self.API_URL, payload, headers)
try:
return result['choices'][0]['message']['content'].strip()
except (KeyError, IndexError) as e:
raise AIProviderError(f"Unexpected response format: {e}")