Update notification service

This commit is contained in:
MacRimi
2026-03-20 21:45:19 +01:00
parent 22cd2e4bb3
commit c24c10a13a
13 changed files with 542 additions and 150 deletions

View File

@@ -29,40 +29,35 @@ PROVIDERS = {
}
# Provider metadata for UI display
# Note: No hardcoded models - users load models dynamically from each provider
PROVIDER_INFO = {
'groq': {
'name': 'Groq',
'default_model': 'llama-3.3-70b-versatile',
'description': 'Fast inference, generous free tier (30 req/min). Ideal to get started.',
'requires_api_key': True,
},
'openai': {
'name': 'OpenAI',
'default_model': 'gpt-4o-mini',
'description': 'Industry standard. Very accurate and widely used.',
'requires_api_key': True,
},
'anthropic': {
'name': 'Anthropic (Claude)',
'default_model': 'claude-3-5-haiku-latest',
'description': 'Excellent for writing and translation. Fast and affordable.',
'requires_api_key': True,
},
'gemini': {
'name': 'Google Gemini',
'default_model': 'gemini-2.0-flash',
'description': 'Free tier available, very good quality/price ratio.',
'requires_api_key': True,
},
'ollama': {
'name': 'Ollama (Local)',
'default_model': 'llama3.2',
'description': '100% local execution. No costs, complete privacy, no internet required.',
'requires_api_key': False,
},
'openrouter': {
'name': 'OpenRouter',
'default_model': 'meta-llama/llama-3.3-70b-instruct',
'description': 'Aggregator with access to 100+ models using a single API key. Maximum flexibility.',
'requires_api_key': True,
},

View File

@@ -1,9 +1,9 @@
"""Anthropic (Claude) provider implementation.
Anthropic's Claude models are excellent for text generation and translation.
Claude 3.5 Haiku is fast and affordable for notification enhancement.
Models use "-latest" aliases that auto-update to newest versions.
"""
from typing import Optional
from typing import Optional, List
from .base import AIProvider, AIProviderError
@@ -11,11 +11,26 @@ class AnthropicProvider(AIProvider):
"""Anthropic provider using their Messages API."""
NAME = "anthropic"
DEFAULT_MODEL = "claude-3-5-haiku-latest"
REQUIRES_API_KEY = True
API_URL = "https://api.anthropic.com/v1/messages"
API_VERSION = "2023-06-01"
# Known stable model aliases (Anthropic doesn't have a public models list API)
# These use "-latest" which auto-updates to the newest version
KNOWN_MODELS = [
"claude-3-5-haiku-latest",
"claude-3-5-sonnet-latest",
"claude-3-opus-latest",
]
def list_models(self) -> List[str]:
"""Return known Anthropic model aliases.
Anthropic doesn't have a public models list API, but their "-latest"
aliases auto-update to the newest versions, making them reliable choices.
"""
return self.KNOWN_MODELS
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using Anthropic's API.

View File

@@ -1,6 +1,6 @@
"""Base class for AI providers."""
from abc import ABC, abstractmethod
from typing import Optional, Dict, Any
from typing import Optional, Dict, Any, List
class AIProviderError(Exception):
@@ -17,7 +17,6 @@ class AIProvider(ABC):
# Provider metadata (override in subclasses)
NAME = "base"
DEFAULT_MODEL = ""
REQUIRES_API_KEY = True
def __init__(self, api_key: str = "", model: str = "", base_url: str = ""):
@@ -25,11 +24,11 @@ class AIProvider(ABC):
Args:
api_key: API key for authentication (not required for local providers)
model: Model name to use (defaults to DEFAULT_MODEL if empty)
model: Model name to use (required - user selects from loaded models)
base_url: Base URL for API calls (used by Ollama and custom endpoints)
"""
self.api_key = api_key
self.model = model or self.DEFAULT_MODEL
self.model = model # Model must be provided by user after loading from provider
self.base_url = base_url
@abstractmethod
@@ -100,6 +99,39 @@ class AIProvider(ABC):
'model': self.model
}
def list_models(self) -> List[str]:
"""List available models from the provider.
Returns:
List of model IDs available for use.
Returns empty list if the provider doesn't support listing.
"""
# Default implementation - subclasses should override
return []
def get_recommended_model(self) -> str:
"""Get the recommended model for this provider.
Checks if the current model is available. If not, returns
the first available model from the provider's model list.
This is fully dynamic - no hardcoded fallback models.
Returns:
Recommended model ID, or empty string if no models available
"""
available = self.list_models()
if not available:
# Can't get model list - keep current model and hope it works
return self.model
# Check if current model is available
if self.model and self.model in available:
return self.model
# Current model not available - return first available model
# Models are typically sorted, so first one is usually a good default
return available[0]
def _make_request(self, url: str, payload: dict, headers: dict,
timeout: int = 15) -> dict:
"""Make HTTP request to AI provider API.

View File

@@ -1,9 +1,12 @@
"""Google Gemini provider implementation.
Google's Gemini models offer a free tier and excellent quality/price ratio.
Gemini 2.0 Flash is fast and cost-effective with improved capabilities.
Models are loaded dynamically from the API - no hardcoded model names.
"""
from typing import Optional
from typing import Optional, List
import json
import urllib.request
import urllib.error
from .base import AIProvider, AIProviderError
@@ -11,10 +14,44 @@ class GeminiProvider(AIProvider):
"""Google Gemini provider using the Generative Language API."""
NAME = "gemini"
DEFAULT_MODEL = "gemini-2.0-flash"
REQUIRES_API_KEY = True
API_BASE = "https://generativelanguage.googleapis.com/v1beta/models"
def list_models(self) -> List[str]:
"""List available Gemini models that support generateContent.
Returns:
List of model IDs available for text generation.
"""
if not self.api_key:
return []
try:
url = f"{self.API_BASE}?key={self.api_key}"
req = urllib.request.Request(url, method='GET')
with urllib.request.urlopen(req, timeout=10) as resp:
data = json.loads(resp.read().decode('utf-8'))
models = []
for model in data.get('models', []):
model_name = model.get('name', '')
# Extract just the model ID (e.g., "models/gemini-pro" -> "gemini-pro")
if model_name.startswith('models/'):
model_id = model_name[7:]
else:
model_id = model_name
# Only include models that support generateContent
supported_methods = model.get('supportedGenerationMethods', [])
if 'generateContent' in supported_methods:
models.append(model_id)
return models
except Exception as e:
print(f"[GeminiProvider] Failed to list models: {e}")
return []
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:
"""Generate a response using Google's Gemini API.

View File

@@ -3,7 +3,10 @@
Groq provides fast inference with a generous free tier (30 requests/minute).
Uses the OpenAI-compatible API format.
"""
from typing import Optional
from typing import Optional, List
import json
import urllib.request
import urllib.error
from .base import AIProvider, AIProviderError
@@ -11,9 +14,39 @@ class GroqProvider(AIProvider):
"""Groq AI provider using their OpenAI-compatible API."""
NAME = "groq"
DEFAULT_MODEL = "llama-3.3-70b-versatile"
REQUIRES_API_KEY = True
API_URL = "https://api.groq.com/openai/v1/chat/completions"
MODELS_URL = "https://api.groq.com/openai/v1/models"
def list_models(self) -> List[str]:
"""List available Groq models.
Returns:
List of model IDs available for chat completions.
"""
if not self.api_key:
return []
try:
req = urllib.request.Request(
self.MODELS_URL,
headers={'Authorization': f'Bearer {self.api_key}'},
method='GET'
)
with urllib.request.urlopen(req, timeout=10) as resp:
data = json.loads(resp.read().decode('utf-8'))
models = []
for model in data.get('data', []):
model_id = model.get('id', '')
if model_id:
models.append(model_id)
return models
except Exception as e:
print(f"[GroqProvider] Failed to list models: {e}")
return []
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]:

View File

@@ -11,7 +11,6 @@ class OllamaProvider(AIProvider):
"""Ollama provider for local AI execution."""
NAME = "ollama"
DEFAULT_MODEL = "llama3.2"
REQUIRES_API_KEY = False
DEFAULT_URL = "http://localhost:11434"
@@ -20,7 +19,7 @@ class OllamaProvider(AIProvider):
Args:
api_key: Not used for Ollama (local execution)
model: Model name (default: llama3.2)
model: Model name (user must select from loaded models)
base_url: Ollama server URL (default: http://localhost:11434)
"""
super().__init__(api_key, model, base_url)

View File

@@ -1,9 +1,12 @@
"""OpenAI provider implementation.
OpenAI is the industry standard for AI APIs. gpt-4o-mini provides
excellent quality at a reasonable price point.
OpenAI is the industry standard for AI APIs.
Models are loaded dynamically from the API.
"""
from typing import Optional
from typing import Optional, List
import json
import urllib.request
import urllib.error
from .base import AIProvider, AIProviderError
@@ -20,9 +23,49 @@ class OpenAIProvider(AIProvider):
"""
NAME = "openai"
DEFAULT_MODEL = "gpt-4o-mini"
REQUIRES_API_KEY = True
DEFAULT_API_URL = "https://api.openai.com/v1/chat/completions"
DEFAULT_MODELS_URL = "https://api.openai.com/v1/models"
def list_models(self) -> List[str]:
"""List available OpenAI models.
Returns:
List of model IDs available for chat completions.
"""
if not self.api_key:
return []
try:
# Determine models URL from base_url if set
if self.base_url:
base = self.base_url.rstrip('/')
if not base.endswith('/v1'):
base = f"{base}/v1"
models_url = f"{base}/models"
else:
models_url = self.DEFAULT_MODELS_URL
req = urllib.request.Request(
models_url,
headers={'Authorization': f'Bearer {self.api_key}'},
method='GET'
)
with urllib.request.urlopen(req, timeout=10) as resp:
data = json.loads(resp.read().decode('utf-8'))
models = []
for model in data.get('data', []):
model_id = model.get('id', '')
# Filter to chat models only (skip embeddings, etc.)
if model_id and ('gpt' in model_id.lower() or 'turbo' in model_id.lower()):
models.append(model_id)
return models
except Exception as e:
print(f"[OpenAIProvider] Failed to list models: {e}")
return []
def _get_api_url(self) -> str:
"""Get the API URL, using custom base_url if provided."""

View File

@@ -4,7 +4,10 @@ OpenRouter is an aggregator that provides access to 100+ AI models
using a single API key. Maximum flexibility for choosing models.
Uses OpenAI-compatible API format.
"""
from typing import Optional
from typing import Optional, List
import json
import urllib.request
import urllib.error
from .base import AIProvider, AIProviderError
@@ -12,9 +15,40 @@ class OpenRouterProvider(AIProvider):
"""OpenRouter provider for multi-model access."""
NAME = "openrouter"
DEFAULT_MODEL = "meta-llama/llama-3.3-70b-instruct"
REQUIRES_API_KEY = True
API_URL = "https://openrouter.ai/api/v1/chat/completions"
MODELS_URL = "https://openrouter.ai/api/v1/models"
def list_models(self) -> List[str]:
"""List available OpenRouter models.
Returns:
List of model IDs available. OpenRouter has 100+ models,
this returns only the most popular free/low-cost options.
"""
if not self.api_key:
return []
try:
req = urllib.request.Request(
self.MODELS_URL,
headers={'Authorization': f'Bearer {self.api_key}'},
method='GET'
)
with urllib.request.urlopen(req, timeout=10) as resp:
data = json.loads(resp.read().decode('utf-8'))
models = []
for model in data.get('data', []):
model_id = model.get('id', '')
if model_id:
models.append(model_id)
return models
except Exception as e:
print(f"[OpenRouterProvider] Failed to list models: {e}")
return []
def generate(self, system_prompt: str, user_message: str,
max_tokens: int = 200) -> Optional[str]: