Improve caching. (#431)

* Move the caching module to core.

Add an in memory cache that for caching function and method results
during an energy management run (optimization run). Two decorators
are provided for methods and functions.

* Improve the file cache store by load and save functions.

Make EOS load the cache file store on startup and save it on shutdown.
Add a cyclic task that cleans the cache file store from outdated cache files.

* Improve startup of EOSdash by EOS

Make EOS starting EOSdash adhere to path configuration given in EOS.
The whole environment from EOS is now passed to EOSdash.
Should also prevent test errors due to unwanted/ wrong config file creation.

Both servers now provide a health endpoint that can be used to detect whether
the server is running. This is also used for testing now.

* Improve startup of EOS

EOS now has got an energy management task that runs shortly after startup.
It tries to execute energy management runs with predictions newly fetched
or initialized from cached data on first run.

* Improve shutdown of EOS

EOS has now a shutdown task that shuts EOS down gracefully with some
time delay to allow REST API requests for shutdwon or restart to be fully serviced.

* Improve EMS

Add energy management task for repeated energy management controlled by
startup delay and interval configuration parameters.
Translate EnergieManagementSystem to english EnergyManagement.

* Add administration endpoints

  - endpoints to control caching from REST API.
  - endpoints to control server restart (will not work on Windows) and shutdown from REST API

* Improve doc generation

Use "\n" linenend convention also on Windows when generating doc files.
Replace Windows specific 127.0.0.1 address by standard 0.0.0.0.

* Improve test support (to be able to test caching)

  - Add system test option to pytest for running tests with "real" resources
  - Add new test fixture to start server for test class and test function
  - Make kill signal adapt to Windows/ Linux
  - Use consistently "\n" for lineends when writing text files in  doc test
  - Fix test_logging under Windows
  - Fix conftest config_default_dirs test fixture under Windows

From @Lasall

* Improve Windows support

 - Use 127.0.0.1 as default config host (model defaults) and
   addionally redirect 0.0.0.0 to localhost on Windows (because default
   config file still has 0.0.0.0).
 - Update install/startup instructions as package installation is
   required atm.

Signed-off-by: Bobby Noelte <b0661n0e17e@gmail.com>
This commit is contained in:
Bobby Noelte
2025-02-12 21:35:51 +01:00
committed by GitHub
parent 1a2cb4d37d
commit 80bfe4d0f0
54 changed files with 3661 additions and 894 deletions

View File

@@ -22,12 +22,13 @@ from pydantic_settings import (
PydanticBaseSettingsSource,
SettingsConfigDict,
)
from pydantic_settings.sources import ConfigFileSourceMixin
# settings
from akkudoktoreos.config.configabc import SettingsBaseModel
from akkudoktoreos.core.cachesettings import CacheCommonSettings
from akkudoktoreos.core.coreabc import SingletonMixin
from akkudoktoreos.core.decorators import classproperty
from akkudoktoreos.core.emsettings import EnergyManagementCommonSettings
from akkudoktoreos.core.logging import get_logger
from akkudoktoreos.core.logsettings import LoggingCommonSettings
from akkudoktoreos.core.pydantic import access_nested_value, merge_models
@@ -96,10 +97,6 @@ class GeneralSettings(SettingsBaseModel):
default="output", description="Sub-path for the EOS output data directory."
)
data_cache_subpath: Optional[Path] = Field(
default="cache", description="Sub-path for the EOS cache data directory."
)
latitude: Optional[float] = Field(
default=52.52,
ge=-90.0,
@@ -128,12 +125,6 @@ class GeneralSettings(SettingsBaseModel):
"""Compute data_output_path based on data_folder_path."""
return get_absolute_path(self.data_folder_path, self.data_output_subpath)
@computed_field # type: ignore[prop-decorator]
@property
def data_cache_path(self) -> Optional[Path]:
"""Compute data_cache_path based on data_folder_path."""
return get_absolute_path(self.data_folder_path, self.data_cache_subpath)
@computed_field # type: ignore[prop-decorator]
@property
def config_folder_path(self) -> Optional[Path]:
@@ -153,18 +144,62 @@ class SettingsEOS(BaseSettings):
Used by updating the configuration with specific settings only.
"""
general: Optional[GeneralSettings] = None
logging: Optional[LoggingCommonSettings] = None
devices: Optional[DevicesCommonSettings] = None
measurement: Optional[MeasurementCommonSettings] = None
optimization: Optional[OptimizationCommonSettings] = None
prediction: Optional[PredictionCommonSettings] = None
elecprice: Optional[ElecPriceCommonSettings] = None
load: Optional[LoadCommonSettings] = None
pvforecast: Optional[PVForecastCommonSettings] = None
weather: Optional[WeatherCommonSettings] = None
server: Optional[ServerCommonSettings] = None
utils: Optional[UtilsCommonSettings] = None
general: Optional[GeneralSettings] = Field(
default=None,
description="General Settings",
)
cache: Optional[CacheCommonSettings] = Field(
default=None,
description="Cache Settings",
)
ems: Optional[EnergyManagementCommonSettings] = Field(
default=None,
description="Energy Management Settings",
)
logging: Optional[LoggingCommonSettings] = Field(
default=None,
description="Logging Settings",
)
devices: Optional[DevicesCommonSettings] = Field(
default=None,
description="Devices Settings",
)
measurement: Optional[MeasurementCommonSettings] = Field(
default=None,
description="Measurement Settings",
)
optimization: Optional[OptimizationCommonSettings] = Field(
default=None,
description="Optimization Settings",
)
prediction: Optional[PredictionCommonSettings] = Field(
default=None,
description="Prediction Settings",
)
elecprice: Optional[ElecPriceCommonSettings] = Field(
default=None,
description="Electricity Price Settings",
)
load: Optional[LoadCommonSettings] = Field(
default=None,
description="Load Settings",
)
pvforecast: Optional[PVForecastCommonSettings] = Field(
default=None,
description="PV Forecast Settings",
)
weather: Optional[WeatherCommonSettings] = Field(
default=None,
description="Weather Settings",
)
server: Optional[ServerCommonSettings] = Field(
default=None,
description="Server Settings",
)
utils: Optional[UtilsCommonSettings] = Field(
default=None,
description="Utilities Settings",
)
model_config = SettingsConfigDict(
env_nested_delimiter="__",
@@ -181,6 +216,8 @@ class SettingsEOSDefaults(SettingsEOS):
"""
general: GeneralSettings = GeneralSettings()
cache: CacheCommonSettings = CacheCommonSettings()
ems: EnergyManagementCommonSettings = EnergyManagementCommonSettings()
logging: LoggingCommonSettings = LoggingCommonSettings()
devices: DevicesCommonSettings = DevicesCommonSettings()
measurement: MeasurementCommonSettings = MeasurementCommonSettings()
@@ -290,7 +327,7 @@ class ConfigEOS(SingletonMixin, SettingsEOSDefaults):
dotenv_settings,
]
file_settings: Optional[ConfigFileSourceMixin] = None
file_settings: Optional[JsonConfigSettingsSource] = None
config_file, exists = cls._get_config_file_path()
config_dir = config_file.parent
if not exists:
@@ -335,13 +372,15 @@ class ConfigEOS(SingletonMixin, SettingsEOSDefaults):
"""
if hasattr(self, "_initialized"):
return
super().__init__(*args, **kwargs)
self._create_initial_config_file()
self._update_data_folder_path()
self._setup(self, *args, **kwargs)
def _setup(self, *args: Any, **kwargs: Any) -> None:
"""Re-initialize global settings."""
# Assure settings base knows EOS configuration
SettingsBaseModel.config = self
# (Re-)load settings
SettingsEOSDefaults.__init__(self, *args, **kwargs)
# Init config file and data folder pathes
self._create_initial_config_file()
self._update_data_folder_path()
@@ -417,7 +456,7 @@ class ConfigEOS(SingletonMixin, SettingsEOSDefaults):
if self.general.config_file_path and not self.general.config_file_path.exists():
self.general.config_file_path.parent.mkdir(parents=True, exist_ok=True)
try:
with open(self.general.config_file_path, "w") as f:
with self.general.config_file_path.open("w", encoding="utf-8", newline="\n") as f:
f.write(self.model_dump_json(indent=4))
except Exception as e:
logger.error(
@@ -489,7 +528,7 @@ class ConfigEOS(SingletonMixin, SettingsEOSDefaults):
"""
if not self.general.config_file_path:
raise ValueError("Configuration file path unknown.")
with self.general.config_file_path.open("w", encoding=self.ENCODING) as f_out:
with self.general.config_file_path.open("w", encoding="utf-8", newline="\n") as f_out:
json_str = super().model_dump_json()
f_out.write(json_str)

View File

@@ -1,9 +1,12 @@
"""Abstract and base classes for configuration."""
from typing import Any, ClassVar
from akkudoktoreos.core.pydantic import PydanticBaseModel
class SettingsBaseModel(PydanticBaseModel):
"""Base model class for all settings configurations."""
pass
# EOS configuration - set by ConfigEOS
config: ClassVar[Any] = None

View File

@@ -1,32 +1,14 @@
"""Class for in-memory managing of cache files.
"""In-memory and file caching.
The `CacheFileStore` class is a singleton-based, thread-safe key-value store for managing
temporary file objects, allowing the creation, retrieval, and management of cache files.
Classes:
--------
- CacheFileStore: A thread-safe, singleton class for in-memory managing of file-like cache objects.
- CacheFileStoreMeta: Metaclass for enforcing the singleton behavior in `CacheFileStore`.
Example usage:
--------------
# CacheFileStore usage
>>> cache_store = CacheFileStore()
>>> cache_store.create('example_key')
>>> cache_file = cache_store.get('example_key')
>>> cache_file.write('Some data')
>>> cache_file.seek(0)
>>> print(cache_file.read()) # Output: 'Some data'
Notes:
------
- Cache files are automatically associated with the current date unless specified.
Decorators and classes for caching results of computations,
both in memory (using an LRU cache) and in temporary files. It also includes
mechanisms for managing cache file expiration and retrieval.
"""
from __future__ import annotations
import functools
import hashlib
import inspect
import json
import os
import pickle
import tempfile
@@ -35,8 +17,8 @@ from typing import (
IO,
Any,
Callable,
ClassVar,
Dict,
Generic,
List,
Literal,
Optional,
@@ -44,29 +26,226 @@ from typing import (
TypeVar,
)
import cachebox
from pendulum import DateTime, Duration
from pydantic import BaseModel, ConfigDict, Field
from pydantic import Field
from akkudoktoreos.core.coreabc import ConfigMixin
from akkudoktoreos.core.coreabc import ConfigMixin, SingletonMixin
from akkudoktoreos.core.logging import get_logger
from akkudoktoreos.core.pydantic import PydanticBaseModel
from akkudoktoreos.utils.datetimeutil import compare_datetimes, to_datetime, to_duration
logger = get_logger(__name__)
T = TypeVar("T")
# ---------------------------------
# In-Memory Caching Functionality
# ---------------------------------
# Define a type variable for methods and functions
TCallable = TypeVar("TCallable", bound=Callable[..., Any])
def cache_until_update_store_callback(event: int, key: Any, value: Any) -> None:
"""Calback function for CacheUntilUpdateStore."""
CacheUntilUpdateStore.last_event = event
CacheUntilUpdateStore.last_key = key
CacheUntilUpdateStore.last_value = value
if event == cachebox.EVENT_MISS:
CacheUntilUpdateStore.miss_count += 1
elif event == cachebox.EVENT_HIT:
CacheUntilUpdateStore.hit_count += 1
else:
# unreachable code
raise NotImplementedError
class CacheUntilUpdateStore(SingletonMixin):
"""Singleton-based in-memory LRU (Least Recently Used) cache.
This cache is shared across the application to store results of decorated
methods or functions until the next EMS (Energy Management System) update.
The cache uses an LRU eviction strategy, storing up to 100 items, with the oldest
items being evicted once the cache reaches its capacity.
"""
cache: ClassVar[cachebox.LRUCache] = cachebox.LRUCache(maxsize=100, iterable=None, capacity=100)
last_event: ClassVar[Optional[int]] = None
last_key: ClassVar[Any] = None
last_value: ClassVar[Any] = None
hit_count: ClassVar[int] = 0
miss_count: ClassVar[int] = 0
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initializes the `CacheUntilUpdateStore` instance with default parameters.
The cache uses an LRU eviction strategy with a maximum size of 100 items.
This cache is a singleton, meaning only one instance will exist throughout
the application lifecycle.
Example:
>>> cache = CacheUntilUpdateStore()
"""
if hasattr(self, "_initialized"):
return
super().__init__(*args, **kwargs)
def __getattr__(self, name: str) -> Any:
"""Propagates method calls to the cache object.
This method allows you to call methods on the underlying cache object,
and it will delegate the call to the cache's corresponding method.
Args:
name (str): The name of the method being called.
Returns:
Callable: A method bound to the cache object.
Raises:
AttributeError: If the cache object does not have the requested method.
Example:
>>> result = cache.get("key")
"""
# This will return a method of the target cache, or raise an AttributeError
target_attr = getattr(self.cache, name)
if callable(target_attr):
return target_attr
else:
return target_attr
def __getitem__(self, key: Any) -> Any:
"""Retrieves an item from the cache by its key.
Args:
key (Any): The key used for subscripting to retrieve an item.
Returns:
Any: The value corresponding to the key in the cache.
Raises:
KeyError: If the key does not exist in the cache.
Example:
>>> value = cache["user_data"]
"""
return CacheUntilUpdateStore.cache[key]
def __setitem__(self, key: Any, value: Any) -> None:
"""Stores an item in the cache.
Args:
key (Any): The key used to store the item in the cache.
value (Any): The value to store.
Example:
>>> cache["user_data"] = {"name": "Alice", "age": 30}
"""
CacheUntilUpdateStore.cache[key] = value
def __len__(self) -> int:
"""Returns the number of items in the cache."""
return len(CacheUntilUpdateStore.cache)
def __repr__(self) -> str:
"""Provides a string representation of the CacheUntilUpdateStore object."""
return repr(CacheUntilUpdateStore.cache)
def clear(self) -> None:
"""Clears the cache, removing all stored items.
This method propagates the `clear` method call to the underlying cache object,
ensuring that the cache is emptied when necessary (e.g., at the end of the energy
management system run).
Example:
>>> cache.clear()
"""
if hasattr(self.cache, "clear") and callable(getattr(self.cache, "clear")):
CacheUntilUpdateStore.cache.clear()
CacheUntilUpdateStore.last_event = None
CacheUntilUpdateStore.last_key = None
CacheUntilUpdateStore.last_value = None
CacheUntilUpdateStore.miss_count = 0
CacheUntilUpdateStore.hit_count = 0
else:
raise AttributeError(f"'{self.cache.__class__.__name__}' object has no method 'clear'")
def cachemethod_until_update(method: TCallable) -> TCallable:
"""Decorator for in memory caching the result of an instance method.
This decorator caches the method's result in `CacheUntilUpdateStore`, ensuring
that subsequent calls with the same arguments return the cached result until the
next EMS update cycle.
Args:
method (Callable): The instance method to be decorated.
Returns:
Callable: The wrapped method with caching functionality.
Example:
>>> class MyClass:
>>> @cachemethod_until_update
>>> def expensive_method(self, param: str) -> str:
>>> # Perform expensive computation
>>> return f"Computed {param}"
"""
@cachebox.cachedmethod(
cache=CacheUntilUpdateStore().cache, callback=cache_until_update_store_callback
)
@functools.wraps(method)
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
result = method(self, *args, **kwargs)
return result
return wrapper
def cache_until_update(func: TCallable) -> TCallable:
"""Decorator for in memory caching the result of a standalone function.
This decorator caches the function's result in `CacheUntilUpdateStore`, ensuring
that subsequent calls with the same arguments return the cached result until the
next EMS update cycle.
Args:
func (Callable): The function to be decorated.
Returns:
Callable: The wrapped function with caching functionality.
Example:
>>> @cache_until_next_update
>>> def expensive_function(param: str) -> str:
>>> # Perform expensive computation
>>> return f"Computed {param}"
"""
@cachebox.cached(
cache=CacheUntilUpdateStore().cache, callback=cache_until_update_store_callback
)
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
result = func(*args, **kwargs)
return result
return wrapper
# ---------------------------------
# Cache File Management
# ---------------------------------
Param = ParamSpec("Param")
RetType = TypeVar("RetType")
class CacheFileRecord(BaseModel):
# Enable custom serialization globally in config
model_config = ConfigDict(
arbitrary_types_allowed=True,
use_enum_values=True,
validate_assignment=True,
)
class CacheFileRecord(PydanticBaseModel):
cache_file: Any = Field(..., description="File descriptor of the cache file.")
until_datetime: DateTime = Field(..., description="Datetime until the cache file is valid.")
ttl_duration: Optional[Duration] = Field(
@@ -74,24 +253,7 @@ class CacheFileRecord(BaseModel):
)
class CacheFileStoreMeta(type, Generic[T]):
"""A thread-safe implementation of CacheFileStore."""
_instances: dict[CacheFileStoreMeta[T], T] = {}
_lock: threading.Lock = threading.Lock()
"""Lock object to synchronize threads on first access to CacheFileStore."""
def __call__(cls) -> T:
"""Return CacheFileStore instance."""
with cls._lock:
if cls not in cls._instances:
instance = super().__call__()
cls._instances[cls] = instance
return cls._instances[cls]
class CacheFileStore(ConfigMixin, metaclass=CacheFileStoreMeta):
class CacheFileStore(ConfigMixin, SingletonMixin):
"""A key-value store that manages file-like tempfile objects to be used as cache files.
Cache files are associated with a date. If no date is specified, the cache files are
@@ -105,7 +267,7 @@ class CacheFileStore(ConfigMixin, metaclass=CacheFileStoreMeta):
store (dict): A dictionary that holds the in-memory cache file objects
with their associated keys and dates.
Example usage:
Example:
>>> cache_store = CacheFileStore()
>>> cache_store.create('example_file')
>>> cache_file = cache_store.get('example_file')
@@ -114,14 +276,18 @@ class CacheFileStore(ConfigMixin, metaclass=CacheFileStoreMeta):
>>> print(cache_file.read()) # Output: 'Some data'
"""
def __init__(self) -> None:
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initializes the CacheFileStore instance.
This constructor sets up an empty key-value store (a dictionary) where each key
corresponds to a cache file that is associated with a given key and an optional date.
"""
if hasattr(self, "_initialized"):
return
self._store: Dict[str, CacheFileRecord] = {}
self._store_lock = threading.Lock()
self._store_lock = threading.RLock()
self._store_file = self.config.cache.path().joinpath("cachefilestore.json")
super().__init__(*args, **kwargs)
def _until_datetime_by_options(
self,
@@ -329,9 +495,9 @@ class CacheFileStore(ConfigMixin, metaclass=CacheFileStoreMeta):
# File already available
cache_file_obj = cache_item.cache_file
else:
self.config.general.data_cache_path.mkdir(parents=True, exist_ok=True)
self.config.cache.path().mkdir(parents=True, exist_ok=True)
cache_file_obj = tempfile.NamedTemporaryFile(
mode=mode, delete=delete, suffix=suffix, dir=self.config.general.data_cache_path
mode=mode, delete=delete, suffix=suffix, dir=self.config.cache.path()
)
self._store[cache_file_key] = CacheFileRecord(
cache_file=cache_file_obj,
@@ -502,7 +668,7 @@ class CacheFileStore(ConfigMixin, metaclass=CacheFileStoreMeta):
def clear(
self,
clear_all: bool = False,
clear_all: Optional[bool] = None,
before_datetime: Optional[Any] = None,
) -> None:
"""Deletes all cache files or those expiring before `before_datetime`.
@@ -516,8 +682,6 @@ class CacheFileStore(ConfigMixin, metaclass=CacheFileStoreMeta):
Raises:
OSError: If there's an error during file deletion.
"""
delete_keys = [] # List of keys to delete, prevent deleting when traversing the store
# Some weired logic to prevent calling to_datetime on clear_all.
# Clear_all may be set on __del__. At this time some info for to_datetime will
# not be available anymore.
@@ -528,6 +692,8 @@ class CacheFileStore(ConfigMixin, metaclass=CacheFileStoreMeta):
before_datetime = to_datetime(before_datetime)
with self._store_lock: # Synchronize access to _store
delete_keys = [] # List of keys to delete, prevent deleting when traversing the store
for cache_file_key, cache_item in self._store.items():
# Some weired logic to prevent calling to_datetime on clear_all.
# Clear_all may be set on __del__. At this time some info for to_datetime will
@@ -566,6 +732,89 @@ class CacheFileStore(ConfigMixin, metaclass=CacheFileStoreMeta):
for delete_key in delete_keys:
del self._store[delete_key]
def current_store(self) -> dict:
"""Current state of the store.
Returns:
data (dict): current cache management data.
"""
with self._store_lock:
store_current = {}
for key, record in self._store.items():
ttl_duration = record.ttl_duration
if ttl_duration:
ttl_duration = ttl_duration.total_seconds()
store_current[key] = {
# Convert file-like objects to file paths for serialization
"cache_file": self._get_file_path(record.cache_file),
"mode": record.cache_file.mode,
"until_datetime": to_datetime(record.until_datetime, as_string=True),
"ttl_duration": ttl_duration,
}
return store_current
def save_store(self) -> dict:
"""Saves the current state of the store to a file.
Returns:
data (dict): cache management data that was saved.
"""
with self._store_lock:
self._store_file.parent.mkdir(parents=True, exist_ok=True)
store_to_save = self.current_store()
with self._store_file.open("w", encoding="utf-8", newline="\n") as f:
try:
json.dump(store_to_save, f, indent=4)
except Exception as e:
logger.error(f"Error saving cache file store: {e}")
return store_to_save
def load_store(self) -> dict:
"""Loads the state of the store from a file.
Returns:
data (dict): cache management data that was loaded.
"""
with self._store_lock:
store_loaded = {}
if self._store_file.exists():
with self._store_file.open("r", encoding="utf-8", newline=None) as f:
try:
store_to_load = json.load(f)
except Exception as e:
logger.error(
f"Error loading cache file store: {e}\n"
+ f"Deleting the store file {self._store_file}."
)
self._store_file.unlink()
return {}
for key, record in store_to_load.items():
if record is None:
continue
if key in self._store.keys():
# Already available - do not overwrite by record from file
continue
try:
cache_file_obj = open(
record["cache_file"], "rb+" if "b" in record["mode"] else "r+"
)
except Exception as e:
cache_file_record = record["cache_file"]
logger.warning(f"Can not open cache file '{cache_file_record}': {e}")
continue
ttl_duration = record["ttl_duration"]
if ttl_duration:
ttl_duration = to_duration(float(record["ttl_duration"]))
self._store[key] = CacheFileRecord(
cache_file=cache_file_obj,
until_datetime=record["until_datetime"],
ttl_duration=ttl_duration,
)
cache_file_obj.seek(0)
# Remember newly loaded
store_loaded[key] = record
return store_loaded
def cache_in_file(
ignore_params: List[str] = [],

View File

@@ -0,0 +1,32 @@
"""Settings for caching.
Kept in an extra module to avoid cyclic dependencies on package import.
"""
from pathlib import Path
from typing import Optional
from pydantic import Field
from akkudoktoreos.config.configabc import SettingsBaseModel
class CacheCommonSettings(SettingsBaseModel):
"""Cache Configuration."""
subpath: Optional[Path] = Field(
default="cache", description="Sub-path for the EOS cache data directory."
)
cleanup_interval: float = Field(
default=5 * 60, description="Intervall in seconds for EOS file cache cleanup."
)
# Do not make this a pydantic computed field. The pydantic model must be fully initialized
# to have access to config.general, which may not be the case if it is a computed field.
def path(self) -> Optional[Path]:
"""Compute cache path based on general.data_folder_path."""
data_cache_path = self.config.general.data_folder_path
if data_cache_path is None or self.subpath is None:
return None
return data_cache_path.joinpath(self.subpath)

View File

@@ -265,10 +265,12 @@ class SingletonMixin:
class MySingletonModel(SingletonMixin, PydanticBaseModel):
name: str
# implement __init__ to avoid re-initialization of parent class PydanticBaseModel:
# implement __init__ to avoid re-initialization of parent classes:
def __init__(self, *args: Any, **kwargs: Any) -> None:
if hasattr(self, "_initialized"):
return
# Your initialisation here
...
super().__init__(*args, **kwargs)
instance1 = MySingletonModel(name="Instance 1")

View File

@@ -953,6 +953,44 @@ class DataSequence(DataBase, MutableSequence):
array = resampled.values
return array
def to_dataframe(
self,
start_datetime: Optional[DateTime] = None,
end_datetime: Optional[DateTime] = None,
) -> pd.DataFrame:
"""Converts the sequence of DataRecord instances into a Pandas DataFrame.
Args:
start_datetime (Optional[datetime]): The lower bound for filtering (inclusive).
Defaults to the earliest possible datetime if None.
end_datetime (Optional[datetime]): The upper bound for filtering (exclusive).
Defaults to the latest possible datetime if None.
Returns:
pd.DataFrame: A DataFrame containing the filtered data from all records.
"""
if not self.records:
return pd.DataFrame() # Return empty DataFrame if no records exist
# Use filter_by_datetime to get filtered records
filtered_records = self.filter_by_datetime(start_datetime, end_datetime)
# Convert filtered records to a dictionary list
data = [record.model_dump() for record in filtered_records]
# Convert to DataFrame
df = pd.DataFrame(data)
if df.empty:
return df
# Ensure `date_time` column exists and use it for the index
if not "date_time" in df.columns:
error_msg = f"Cannot create dataframe: no `date_time` column in `{df}`."
logger.error(error_msg)
raise TypeError(error_msg)
df.index = pd.DatetimeIndex(df["date_time"])
return df
def sort_by_datetime(self, reverse: bool = False) -> None:
"""Sort the DataRecords in the sequence by their date_time attribute.
@@ -1465,7 +1503,7 @@ class DataImportMixin:
error_msg += f"Field: {field}\nError: {message}\nType: {error_type}\n"
logger.debug(f"PydanticDateTimeDataFrame import: {error_msg}")
# Try dictionary with special keys start_datetime and intervall
# Try dictionary with special keys start_datetime and interval
try:
import_data = PydanticDateTimeData.model_validate_json(json_str)
self.import_from_dict(import_data.to_dict())
@@ -1525,7 +1563,7 @@ class DataImportMixin:
and `key_prefix = "load"`, only the "load_mean" key will be processed even though
both keys are in the record.
"""
with import_file_path.open("r") as import_file:
with import_file_path.open("r", encoding="utf-8", newline=None) as import_file:
import_str = import_file.read()
self.import_from_json(
import_str, key_prefix=key_prefix, start_datetime=start_datetime, interval=interval

View File

@@ -6,19 +6,20 @@ from pendulum import DateTime
from pydantic import ConfigDict, Field, computed_field, field_validator, model_validator
from typing_extensions import Self
from akkudoktoreos.core.cache import CacheUntilUpdateStore
from akkudoktoreos.core.coreabc import ConfigMixin, PredictionMixin, SingletonMixin
from akkudoktoreos.core.logging import get_logger
from akkudoktoreos.core.pydantic import ParametersBaseModel, PydanticBaseModel
from akkudoktoreos.devices.battery import Battery
from akkudoktoreos.devices.generic import HomeAppliance
from akkudoktoreos.devices.inverter import Inverter
from akkudoktoreos.utils.datetimeutil import to_datetime
from akkudoktoreos.utils.datetimeutil import compare_datetimes, to_datetime
from akkudoktoreos.utils.utils import NumpyEncoder
logger = get_logger(__name__)
class EnergieManagementSystemParameters(ParametersBaseModel):
class EnergyManagementParameters(ParametersBaseModel):
pv_prognose_wh: list[float] = Field(
description="An array of floats representing the forecasted photovoltaic output in watts for different time intervals."
)
@@ -107,7 +108,7 @@ class SimulationResult(ParametersBaseModel):
return NumpyEncoder.convert_numpy(field)[0]
class EnergieManagementSystem(SingletonMixin, ConfigMixin, PredictionMixin, PydanticBaseModel):
class EnergyManagement(SingletonMixin, ConfigMixin, PredictionMixin, PydanticBaseModel):
# Disable validation on assignment to speed up simulation runs.
model_config = ConfigDict(
validate_assignment=False,
@@ -116,16 +117,33 @@ class EnergieManagementSystem(SingletonMixin, ConfigMixin, PredictionMixin, Pyda
# Start datetime.
_start_datetime: ClassVar[Optional[DateTime]] = None
# last run datetime. Used by energy management task
_last_datetime: ClassVar[Optional[DateTime]] = None
@computed_field # type: ignore[prop-decorator]
@property
def start_datetime(self) -> DateTime:
"""The starting datetime of the current or latest energy management."""
if EnergieManagementSystem._start_datetime is None:
EnergieManagementSystem.set_start_datetime()
return EnergieManagementSystem._start_datetime
if EnergyManagement._start_datetime is None:
EnergyManagement.set_start_datetime()
return EnergyManagement._start_datetime
@classmethod
def set_start_datetime(cls, start_datetime: Optional[DateTime] = None) -> DateTime:
"""Set the start datetime for the next energy management cycle.
If no datetime is provided, the current datetime is used.
The start datetime is always rounded down to the nearest hour
(i.e., setting minutes, seconds, and microseconds to zero).
Args:
start_datetime (Optional[DateTime]): The datetime to set as the start.
If None, the current datetime is used.
Returns:
DateTime: The adjusted start datetime.
"""
if start_datetime is None:
start_datetime = to_datetime()
cls._start_datetime = start_datetime.set(minute=0, second=0, microsecond=0)
@@ -176,7 +194,7 @@ class EnergieManagementSystem(SingletonMixin, ConfigMixin, PredictionMixin, Pyda
def set_parameters(
self,
parameters: EnergieManagementSystemParameters,
parameters: EnergyManagementParameters,
ev: Optional[Battery] = None,
home_appliance: Optional[HomeAppliance] = None,
inverter: Optional[Inverter] = None,
@@ -243,6 +261,8 @@ class EnergieManagementSystem(SingletonMixin, ConfigMixin, PredictionMixin, Pyda
is mostly relevant to prediction providers.
force_update (bool, optional): If True, forces to update the data even if still cached.
"""
# Throw away any cached results of the last run.
CacheUntilUpdateStore().clear()
self.set_start_hour(start_hour=start_hour)
# Check for run definitions
@@ -254,14 +274,70 @@ class EnergieManagementSystem(SingletonMixin, ConfigMixin, PredictionMixin, Pyda
error_msg = "Prediction hours unknown."
logger.error(error_msg)
raise ValueError(error_msg)
if self.config.prediction.optimisation_hours is None:
error_msg = "Optimisation hours unknown."
if self.config.optimization.hours is None:
error_msg = "Optimization hours unknown."
logger.error(error_msg)
raise ValueError(error_msg)
self.prediction.update_data(force_enable=force_enable, force_update=force_update)
# TODO: Create optimisation problem that calls into devices.update_data() for simulations.
def manage_energy(self) -> None:
"""Repeating task for managing energy.
This task should be executed by the server regularly (e.g., every 10 seconds)
to ensure proper energy management. Configuration changes to the energy management interval
will only take effect if this task is executed.
- Initializes and runs the energy management for the first time if it has never been run
before.
- If the energy management interval is not configured or invalid (NaN), the task will not
trigger any repeated energy management runs.
- Compares the current time with the last run time and runs the energy management if the
interval has elapsed.
- Logs any exceptions that occur during the initialization or execution of the energy
management.
Note: The task maintains the interval even if some intervals are missed.
"""
current_datetime = to_datetime()
if EnergyManagement._last_datetime is None:
# Never run before
try:
# Try to run a first energy management. May fail due to config incomplete.
self.run()
# Remember energy run datetime.
EnergyManagement._last_datetime = current_datetime
except Exception as e:
message = f"EOS init: {e}"
logger.error(message)
return
if self.config.ems.interval is None or self.config.ems.interval == float("nan"):
# No Repetition
return
if (
compare_datetimes(current_datetime, self._last_datetime).time_diff
< self.config.ems.interval
):
# Wait for next run
return
try:
self.run()
except Exception as e:
message = f"EOS run: {e}"
logger.error(message)
# Remember the energy management run - keep on interval even if we missed some intervals
while (
compare_datetimes(current_datetime, EnergyManagement._last_datetime).time_diff
>= self.config.ems.interval
):
EnergyManagement._last_datetime.add(seconds=self.config.ems.interval)
def set_start_hour(self, start_hour: Optional[int] = None) -> None:
"""Sets start datetime to given hour.
@@ -439,9 +515,9 @@ class EnergieManagementSystem(SingletonMixin, ConfigMixin, PredictionMixin, Pyda
# Initialize the Energy Management System, it is a singleton.
ems = EnergieManagementSystem()
ems = EnergyManagement()
def get_ems() -> EnergieManagementSystem:
def get_ems() -> EnergyManagement:
"""Gets the EOS Energy Management System."""
return ems

View File

@@ -0,0 +1,26 @@
"""Settings for energy management.
Kept in an extra module to avoid cyclic dependencies on package import.
"""
from typing import Optional
from pydantic import Field
from akkudoktoreos.config.configabc import SettingsBaseModel
class EnergyManagementCommonSettings(SettingsBaseModel):
"""Energy Management Configuration."""
startup_delay: float = Field(
default=5,
ge=1,
description="Startup delay in seconds for EOS energy management runs.",
)
interval: Optional[float] = Field(
default=None,
description="Intervall in seconds between EOS energy management runs.",
examples=["300"],
)

View File

@@ -12,7 +12,7 @@ from akkudoktoreos.core.coreabc import (
DevicesMixin,
EnergyManagementSystemMixin,
)
from akkudoktoreos.core.ems import EnergieManagementSystemParameters, SimulationResult
from akkudoktoreos.core.ems import EnergyManagementParameters, SimulationResult
from akkudoktoreos.core.logging import get_logger
from akkudoktoreos.core.pydantic import ParametersBaseModel
from akkudoktoreos.devices.battery import (
@@ -29,7 +29,7 @@ logger = get_logger(__name__)
class OptimizationParameters(ParametersBaseModel):
ems: EnergieManagementSystemParameters
ems: EnergyManagementParameters
pv_akku: Optional[SolarPanelBatteryParameters]
inverter: Optional[InverterParameters]
eauto: Optional[ElectricVehicleParameters]

View File

@@ -14,10 +14,10 @@ import requests
from pydantic import ValidationError
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from akkudoktoreos.core.cache import cache_in_file
from akkudoktoreos.core.logging import get_logger
from akkudoktoreos.core.pydantic import PydanticBaseModel
from akkudoktoreos.prediction.elecpriceabc import ElecPriceProvider
from akkudoktoreos.utils.cacheutil import cache_in_file
from akkudoktoreos.utils.datetimeutil import to_datetime, to_duration
logger = get_logger(__name__)

View File

@@ -80,13 +80,13 @@ from typing import Any, List, Optional, Union
import requests
from pydantic import Field, ValidationError, computed_field
from akkudoktoreos.core.cache import cache_in_file
from akkudoktoreos.core.logging import get_logger
from akkudoktoreos.core.pydantic import PydanticBaseModel
from akkudoktoreos.prediction.pvforecastabc import (
PVForecastDataRecord,
PVForecastProvider,
)
from akkudoktoreos.utils.cacheutil import cache_in_file
from akkudoktoreos.utils.datetimeutil import compare_datetimes, to_datetime
logger = get_logger(__name__)
@@ -267,7 +267,7 @@ class PVForecastAkkudoktor(PVForecastProvider):
logger.debug(f"Response from {self._url()}: {response}")
akkudoktor_data = self._validate_data(response.content)
# We are working on fresh data (no cache), report update time
self.update_datetime = to_datetime(in_timezone=self.config.general.timezone)
return akkudoktor_data
def _update_data(self, force_update: Optional[bool] = False) -> None:

View File

@@ -13,9 +13,9 @@ import pandas as pd
import pvlib
import requests
from akkudoktoreos.core.cache import cache_in_file
from akkudoktoreos.core.logging import get_logger
from akkudoktoreos.prediction.weatherabc import WeatherDataRecord, WeatherProvider
from akkudoktoreos.utils.cacheutil import cache_in_file
from akkudoktoreos.utils.datetimeutil import to_datetime
logger = get_logger(__name__)

View File

@@ -19,9 +19,9 @@ import pandas as pd
import requests
from bs4 import BeautifulSoup
from akkudoktoreos.core.cache import cache_in_file
from akkudoktoreos.core.logging import get_logger
from akkudoktoreos.prediction.weatherabc import WeatherDataRecord, WeatherProvider
from akkudoktoreos.utils.cacheutil import cache_in_file
from akkudoktoreos.utils.datetimeutil import to_datetime, to_duration, to_timezone
logger = get_logger(__name__)

View File

@@ -1,22 +1,34 @@
#!/usr/bin/env python3
import argparse
import asyncio
import json
import os
import signal
import subprocess
import sys
import time
from contextlib import asynccontextmanager
from pathlib import Path
from typing import Annotated, Any, AsyncGenerator, Dict, List, Optional, Union
import httpx
import psutil
import uvicorn
from fastapi import Body, FastAPI
from fastapi import Path as FastapiPath
from fastapi import Query, Request
from fastapi.exceptions import HTTPException
from fastapi.responses import FileResponse, HTMLResponse, RedirectResponse, Response
from fastapi.responses import (
FileResponse,
HTMLResponse,
JSONResponse,
RedirectResponse,
Response,
)
from akkudoktoreos.config.config import ConfigEOS, SettingsEOS, get_config
from akkudoktoreos.core.cache import CacheFileStore
from akkudoktoreos.core.ems import get_ems
from akkudoktoreos.core.logging import get_logger
from akkudoktoreos.core.pydantic import (
@@ -36,6 +48,8 @@ from akkudoktoreos.prediction.load import LoadCommonSettings
from akkudoktoreos.prediction.loadakkudoktor import LoadAkkudoktorCommonSettings
from akkudoktoreos.prediction.prediction import PredictionCommonSettings, get_prediction
from akkudoktoreos.prediction.pvforecast import PVForecastCommonSettings
from akkudoktoreos.server.rest.tasks import repeat_every
from akkudoktoreos.server.server import get_default_host
from akkudoktoreos.utils.datetimeutil import to_datetime, to_duration
logger = get_logger(__name__)
@@ -145,35 +159,58 @@ def create_error_page(
# ----------------------
def start_eosdash() -> subprocess.Popen:
def start_eosdash(
host: str,
port: int,
eos_host: str,
eos_port: int,
log_level: str,
access_log: bool,
reload: bool,
eos_dir: str,
eos_config_dir: str,
) -> subprocess.Popen:
"""Start the EOSdash server as a subprocess.
This function starts the EOSdash server by launching it as a subprocess. It checks if the server
is already running on the specified port and either returns the existing process or starts a new one.
Args:
host (str): The hostname for the EOSdash server.
port (int): The port for the EOSdash server.
eos_host (str): The hostname for the EOS server.
eos_port (int): The port for the EOS server.
log_level (str): The logging level for the EOSdash server.
access_log (bool): Flag to enable or disable access logging.
reload (bool): Flag to enable or disable auto-reloading.
eos_dir (str): Path to the EOS data directory.
eos_config_dir (str): Path to the EOS configuration directory.
Returns:
server_process: The process of the EOSdash server
subprocess.Popen: The process of the EOSdash server.
Raises:
RuntimeError: If the EOSdash server fails to start.
"""
eosdash_path = Path(__file__).parent.resolve().joinpath("eosdash.py")
if args is None:
# No command line arguments
host = config_eos.server.eosdash_host
port = config_eos.server.eosdash_port
eos_host = config_eos.server.host
eos_port = config_eos.server.port
log_level = "info"
access_log = False
reload = False
else:
host = args.host
port = config_eos.server.eosdash_port if config_eos.server.eosdash_port else (args.port + 1)
eos_host = args.host
eos_port = args.port
log_level = args.log_level
access_log = args.access_log
reload = args.reload
# Check if the EOSdash process is still/ already running, e.g. in case of server restart
process_info = None
for conn in psutil.net_connections(kind="inet"):
if conn.laddr.port == port:
process = psutil.Process(conn.pid)
# Get the fresh process info
process_info = process.as_dict(attrs=["pid", "cmdline"])
break
if process_info:
# Just warn
logger.warning(f"EOSdash port `{port}` still/ already in use.")
logger.warning(f"PID: `{process_info['pid']}`, CMD: `{process_info['cmdline']}`")
cmd = [
sys.executable,
str(eosdash_path),
"-m",
"akkudoktoreos.server.eosdash",
"--host",
str(host),
"--port",
@@ -189,11 +226,23 @@ def start_eosdash() -> subprocess.Popen:
"--reload",
str(reload),
]
server_process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# Set environment before any subprocess run, to keep custom config dir
env = os.environ.copy()
env["EOS_DIR"] = eos_dir
env["EOS_CONFIG_DIR"] = eos_config_dir
try:
server_process = subprocess.Popen(
cmd,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
start_new_session=True,
)
except subprocess.CalledProcessError as ex:
error_msg = f"Could not start EOSdash: {ex}"
logger.error(error_msg)
raise RuntimeError(error_msg)
return server_process
@@ -203,20 +252,130 @@ def start_eosdash() -> subprocess.Popen:
# ----------------------
def cache_clear(clear_all: Optional[bool] = None) -> None:
"""Cleanup expired cache files."""
if clear_all:
CacheFileStore().clear(clear_all=True)
else:
CacheFileStore().clear(before_datetime=to_datetime())
def cache_load() -> dict:
"""Load cache from cachefilestore.json."""
return CacheFileStore().load_store()
def cache_save() -> dict:
"""Save cache to cachefilestore.json."""
return CacheFileStore().save_store()
@repeat_every(seconds=float(config_eos.cache.cleanup_interval))
def cache_cleanup_task() -> None:
"""Repeating task to clear cache from expired cache files."""
cache_clear()
@repeat_every(
seconds=10,
wait_first=config_eos.ems.startup_delay,
)
def energy_management_task() -> None:
"""Repeating task for energy management."""
ems_eos.manage_energy()
async def server_shutdown_task() -> None:
"""One-shot task for shutting down the EOS server.
This coroutine performs the following actions:
1. Ensures the cache is saved by calling the cache_save function.
2. Waits for 5 seconds to allow the EOS server to complete any ongoing tasks.
3. Gracefully shuts down the current process by sending the appropriate signal.
If running on Windows, the CTRL_C_EVENT signal is sent to terminate the process.
On other operating systems, the SIGTERM signal is used.
Finally, logs a message indicating that the EOS server has been terminated.
"""
# Assure cache is saved
cache_save()
# Give EOS time to finish some work
await asyncio.sleep(5)
# Gracefully shut down this process.
pid = psutil.Process().pid
if os.name == "nt":
os.kill(pid, signal.CTRL_C_EVENT) # type: ignore[attr-defined]
else:
os.kill(pid, signal.SIGTERM)
logger.info(f"🚀 EOS terminated, PID {pid}")
@asynccontextmanager
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
"""Lifespan manager for the app."""
# On startup
if config_eos.server.startup_eosdash:
try:
eosdash_process = start_eosdash()
if args is None:
# No command line arguments
host = config_eos.server.eosdash_host
port = config_eos.server.eosdash_port
eos_host = config_eos.server.host
eos_port = config_eos.server.port
log_level = "info"
access_log = False
reload = False
else:
host = args.host
port = (
config_eos.server.eosdash_port
if config_eos.server.eosdash_port
else (args.port + 1)
)
eos_host = args.host
eos_port = args.port
log_level = args.log_level
access_log = args.access_log
reload = args.reload
host = host if host else get_default_host()
port = port if port else 8504
eos_host = eos_host if eos_host else get_default_host()
eos_port = eos_port if eos_port else 8503
eos_dir = str(config_eos.general.data_folder_path)
eos_config_dir = str(config_eos.general.config_folder_path)
eosdash_process = start_eosdash(
host=host,
port=port,
eos_host=eos_host,
eos_port=eos_port,
log_level=log_level,
access_log=access_log,
reload=reload,
eos_dir=eos_dir,
eos_config_dir=eos_config_dir,
)
except Exception as e:
logger.error(f"Failed to start EOSdash server. Error: {e}")
sys.exit(1)
cache_load()
if config_eos.cache.cleanup_interval is None:
logger.warning("Cache file cleanup disabled. Set cache.cleanup_interval.")
else:
await cache_cleanup_task()
await energy_management_task()
# Handover to application
yield
# On shutdown
# nothing to do
cache_save()
app = FastAPI(
@@ -229,9 +388,9 @@ app = FastAPI(
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
},
lifespan=lifespan,
root_path=str(Path(__file__).parent),
)
server_dir = Path(__file__).parent.resolve()
@@ -239,9 +398,132 @@ class PdfResponse(FileResponse):
media_type = "application/pdf"
@app.put("/v1/config/reset", tags=["config"])
def fastapi_config_update_post() -> ConfigEOS:
"""Reset the configuration to the EOS configuration file.
@app.post("/v1/admin/cache/clear", tags=["admin"])
def fastapi_admin_cache_clear_post(clear_all: Optional[bool] = None) -> dict:
"""Clear the cache from expired data.
Deletes expired cache files.
Args:
clear_all (Optional[bool]): Delete all cached files. Default is False.
Returns:
data (dict): The management data after cleanup.
"""
try:
cache_clear(clear_all=clear_all)
data = CacheFileStore().current_store()
except Exception as e:
raise HTTPException(status_code=400, detail=f"Error on cache clear: {e}")
return data
@app.post("/v1/admin/cache/save", tags=["admin"])
def fastapi_admin_cache_save_post() -> dict:
"""Save the current cache management data.
Returns:
data (dict): The management data that was saved.
"""
try:
data = cache_save()
except Exception as e:
raise HTTPException(status_code=400, detail=f"Error on cache save: {e}")
return data
@app.post("/v1/admin/cache/load", tags=["admin"])
def fastapi_admin_cache_load_post() -> dict:
"""Load cache management data.
Returns:
data (dict): The management data that was loaded.
"""
try:
data = cache_save()
except Exception as e:
raise HTTPException(status_code=400, detail=f"Error on cache load: {e}")
return data
@app.get("/v1/admin/cache", tags=["admin"])
def fastapi_admin_cache_get() -> dict:
"""Current cache management data.
Returns:
data (dict): The management data.
"""
try:
data = CacheFileStore().current_store()
except Exception as e:
raise HTTPException(status_code=400, detail=f"Error on cache data retrieval: {e}")
return data
@app.post("/v1/admin/server/restart", tags=["admin"])
async def fastapi_admin_server_restart_post() -> dict:
"""Restart the server.
Restart EOS properly by starting a new instance before exiting the old one.
"""
logger.info("🔄 Restarting EOS...")
# Start a new EOS (Uvicorn) process
# Force a new process group to make the new process easily distinguishable from the current one
# Set environment before any subprocess run, to keep custom config dir
env = os.environ.copy()
env["EOS_DIR"] = str(config_eos.general.data_folder_path)
env["EOS_CONFIG_DIR"] = str(config_eos.general.config_folder_path)
new_process = subprocess.Popen(
[
sys.executable,
]
+ sys.argv,
env=env,
start_new_session=True,
)
logger.info(f"🚀 EOS restarted, PID {new_process.pid}")
# Gracefully shut down this process.
asyncio.create_task(server_shutdown_task())
# Will be executed because shutdown is delegated to async coroutine
return {
"message": "Restarting EOS...",
"pid": new_process.pid,
}
@app.post("/v1/admin/server/shutdown", tags=["admin"])
async def fastapi_admin_server_shutdown_post() -> dict:
"""Shutdown the server."""
logger.info("🔄 Stopping EOS...")
# Gracefully shut down this process.
asyncio.create_task(server_shutdown_task())
# Will be executed because shutdown is delegated to async coroutine
return {
"message": "Stopping EOS...",
"pid": psutil.Process().pid,
}
@app.get("/v1/health")
def fastapi_health_get(): # type: ignore
"""Health check endpoint to verify that the EOS server is alive."""
return JSONResponse(
{
"status": "alive",
"pid": psutil.Process().pid,
}
)
@app.post("/v1/config/reset", tags=["config"])
def fastapi_config_reset_post() -> ConfigEOS:
"""Reset the configuration.
Returns:
configuration (ConfigEOS): The current configuration after update.
@@ -251,7 +533,7 @@ def fastapi_config_update_post() -> ConfigEOS:
except Exception as e:
raise HTTPException(
status_code=404,
detail=f"Cannot update configuration from file '{config_eos.config_file_path}': {e}",
detail=f"Cannot reset configuration: {e}",
)
return config_eos
@@ -543,7 +825,7 @@ def fastapi_prediction_list_get(
] = None,
interval: Annotated[
Optional[str],
Query(description="Time duration for each interval."),
Query(description="Time duration for each interval. Defaults to 1 hour."),
] = None,
) -> List[Any]:
"""Get prediction for given key within given date range as value list.
@@ -580,8 +862,40 @@ def fastapi_prediction_list_get(
return prediction_list
@app.put("/v1/prediction/import/{provider_id}", tags=["prediction"])
def fastapi_prediction_import_provider(
provider_id: str = FastapiPath(..., description="Provider ID."),
data: Optional[Union[PydanticDateTimeDataFrame, PydanticDateTimeData, dict]] = None,
force_enable: Optional[bool] = None,
) -> Response:
"""Import prediction for given provider ID.
Args:
provider_id: ID of provider to update.
data: Prediction data.
force_enable: Update data even if provider is disabled.
Defaults to False.
"""
try:
provider = prediction_eos.provider_by_id(provider_id)
except ValueError:
raise HTTPException(status_code=404, detail=f"Provider '{provider_id}' not found.")
if not provider.enabled() and not force_enable:
raise HTTPException(status_code=404, detail=f"Provider '{provider_id}' not enabled.")
try:
provider.import_from_json(json_str=json.dumps(data))
provider.update_datetime = to_datetime(in_timezone=config_eos.general.timezone)
except Exception as e:
raise HTTPException(
status_code=400, detail=f"Error on import for provider '{provider_id}': {e}"
)
return Response()
@app.post("/v1/prediction/update", tags=["prediction"])
def fastapi_prediction_update(force_update: bool = False, force_enable: bool = False) -> Response:
def fastapi_prediction_update(
force_update: Optional[bool] = False, force_enable: Optional[bool] = False
) -> Response:
"""Update predictions for all providers.
Args:
@@ -593,8 +907,7 @@ def fastapi_prediction_update(force_update: bool = False, force_enable: bool = F
try:
prediction_eos.update_data(force_update=force_update, force_enable=force_enable)
except Exception as e:
raise e
# raise HTTPException(status_code=400, detail=f"Error on update of provider: {e}")
raise HTTPException(status_code=400, detail=f"Error on prediction update: {e}")
return Response()
@@ -912,34 +1225,35 @@ def site_map() -> RedirectResponse:
# Keep the proxy last to handle all requests that are not taken by the Rest API.
if config_eos.server.startup_eosdash:
@app.delete("/{path:path}", include_in_schema=False)
async def proxy_delete(request: Request, path: str) -> Response:
return await proxy(request, path)
@app.delete("/{path:path}", include_in_schema=False)
async def proxy_delete(request: Request, path: str) -> Response:
return await proxy(request, path)
@app.get("/{path:path}", include_in_schema=False)
async def proxy_get(request: Request, path: str) -> Response:
return await proxy(request, path)
@app.post("/{path:path}", include_in_schema=False)
async def proxy_post(request: Request, path: str) -> Response:
return await proxy(request, path)
@app.get("/{path:path}", include_in_schema=False)
async def proxy_get(request: Request, path: str) -> Response:
return await proxy(request, path)
@app.put("/{path:path}", include_in_schema=False)
async def proxy_put(request: Request, path: str) -> Response:
return await proxy(request, path)
else:
@app.get("/", include_in_schema=False)
def root() -> RedirectResponse:
return RedirectResponse(url="/docs")
@app.post("/{path:path}", include_in_schema=False)
async def proxy_post(request: Request, path: str) -> Response:
return await proxy(request, path)
@app.put("/{path:path}", include_in_schema=False)
async def proxy_put(request: Request, path: str) -> Response:
return await proxy(request, path)
async def proxy(request: Request, path: str) -> Union[Response | RedirectResponse | HTMLResponse]:
if config_eos.server.eosdash_host and config_eos.server.eosdash_port:
# Make hostname Windows friendly
host = str(config_eos.server.eosdash_host)
if host == "0.0.0.0" and os.name == "nt":
host = "localhost"
if host and config_eos.server.eosdash_port:
# Proxy to EOSdash server
url = f"http://{config_eos.server.eosdash_host}:{config_eos.server.eosdash_port}/{path}"
url = f"http://{host}:{config_eos.server.eosdash_port}/{path}"
headers = dict(request.headers)
data = await request.body()
@@ -1004,6 +1318,29 @@ def run_eos(host: str, port: int, log_level: str, access_log: bool, reload: bool
# Make hostname Windows friendly
if host == "0.0.0.0" and os.name == "nt":
host = "localhost"
# Wait for EOS port to be free - e.g. in case of restart
timeout = 120 # Maximum 120 seconds to wait
process_info: list[dict] = []
for retries in range(int(timeout / 10)):
process_info = []
pids: list[int] = []
for conn in psutil.net_connections(kind="inet"):
if conn.laddr.port == port:
if conn.pid not in pids:
# Get fresh process info
process = psutil.Process(conn.pid)
pids.append(conn.pid)
process_info.append(process.as_dict(attrs=["pid", "cmdline"]))
if len(process_info) == 0:
break
logger.info(f"EOS waiting for port `{port}` ...")
time.sleep(10)
if len(process_info) > 0:
logger.warning(f"EOS port `{port}` in use.")
for info in process_info:
logger.warning(f"PID: `{info["pid"]}`, CMD: `{info["cmdline"]}`")
try:
uvicorn.run(
"akkudoktoreos.server.eos:app",
@@ -1071,8 +1408,11 @@ def main() -> None:
args = parser.parse_args()
host = args.host if args.host else get_default_host()
port = args.port if args.port else 8503
try:
run_eos(args.host, args.port, args.log_level, args.access_log, args.reload)
run_eos(host, port, args.log_level, args.access_log, args.reload)
except:
sys.exit(1)

View File

@@ -1,11 +1,14 @@
import argparse
import os
import sys
import time
from functools import reduce
from typing import Any, Union
import psutil
import uvicorn
from fasthtml.common import H1, Table, Td, Th, Thead, Titled, Tr, fast_app
from fasthtml.starlette import JSONResponse
from pydantic.fields import ComputedFieldInfo, FieldInfo
from pydantic_core import PydanticUndefined
@@ -121,6 +124,17 @@ def get(): # type: ignore
return Titled("EOS Dashboard", H1("Configuration"), config_table())
@app.get("/eosdash/health")
def get_eosdash_health(): # type: ignore
"""Health check endpoint to verify that the EOSdash server is alive."""
return JSONResponse(
{
"status": "alive",
"pid": psutil.Process().pid,
}
)
def run_eosdash(host: str, port: int, log_level: str, access_log: bool, reload: bool) -> None:
"""Run the EOSdash server with the specified configurations.
@@ -131,20 +145,54 @@ def run_eosdash(host: str, port: int, log_level: str, access_log: bool, reload:
server to the specified host and port, an error message is logged and the
application exits.
Parameters:
host (str): The hostname to bind the server to.
port (int): The port number to bind the server to.
log_level (str): The log level for the server. Options include "critical", "error",
"warning", "info", "debug", and "trace".
access_log (bool): Whether to enable or disable the access log. Set to True to enable.
reload (bool): Whether to enable or disable auto-reload. Set to True for development.
Args:
host (str): The hostname to bind the server to.
port (int): The port number to bind the server to.
log_level (str): The log level for the server. Options include "critical", "error",
"warning", "info", "debug", and "trace".
access_log (bool): Whether to enable or disable the access log. Set to True to enable.
reload (bool): Whether to enable or disable auto-reload. Set to True for development.
Returns:
None
None
"""
# Make hostname Windows friendly
if host == "0.0.0.0" and os.name == "nt":
host = "localhost"
# Wait for EOSdash port to be free - e.g. in case of restart
timeout = 120 # Maximum 120 seconds to wait
process_info: list[dict] = []
for retries in range(int(timeout / 3)):
process_info = []
pids: list[int] = []
for conn in psutil.net_connections(kind="inet"):
if conn.laddr.port == port:
if conn.pid not in pids:
# Get fresh process info
process = psutil.Process(conn.pid)
pids.append(conn.pid)
process_info.append(process.as_dict(attrs=["pid", "cmdline"]))
if len(process_info) == 0:
break
logger.info(f"EOSdash waiting for port `{port}` ...")
time.sleep(3)
if len(process_info) > 0:
logger.warning(f"EOSdash port `{port}` in use.")
for info in process_info:
logger.warning(f"PID: `{info["pid"]}`, CMD: `{info["cmdline"]}`")
# Setup config from args
if args:
if args.eos_host:
config_eos.server.host = args.eos_host
if args.eos_port:
config_eos.server.port = args.eos_port
if args.host:
config_eos.server.eosdash_host = args.host
if args.port:
config_eos.server.eosdash_port = args.port
try:
uvicorn.run(
"akkudoktoreos.server.eosdash:app",
@@ -197,13 +245,13 @@ def main() -> None:
"--eos-host",
type=str,
default=str(config_eos.server.host),
help="Host for the EOS server (default: value from config)",
help="Host of the EOS server (default: value from config)",
)
parser.add_argument(
"--eos-port",
type=int,
default=config_eos.server.port,
help="Port for the EOS server (default: value from config)",
help="Port of the EOS server (default: value from config)",
)
# Optional arguments for log_level, access_log, and reload
@@ -230,7 +278,9 @@ def main() -> None:
try:
run_eosdash(args.host, args.port, args.log_level, args.access_log, args.reload)
except:
except Exception as ex:
error_msg = f"Failed to run EOSdash: {ex}"
logger.error(error_msg)
sys.exit(1)

View File

@@ -0,0 +1,92 @@
"""Task handling taken from fastapi-utils/fastapi_utils/tasks.py."""
from __future__ import annotations
import asyncio
import logging
from functools import wraps
from typing import Any, Callable, Coroutine, Union
from starlette.concurrency import run_in_threadpool
NoArgsNoReturnFuncT = Callable[[], None]
NoArgsNoReturnAsyncFuncT = Callable[[], Coroutine[Any, Any, None]]
ExcArgNoReturnFuncT = Callable[[Exception], None]
ExcArgNoReturnAsyncFuncT = Callable[[Exception], Coroutine[Any, Any, None]]
NoArgsNoReturnAnyFuncT = Union[NoArgsNoReturnFuncT, NoArgsNoReturnAsyncFuncT]
ExcArgNoReturnAnyFuncT = Union[ExcArgNoReturnFuncT, ExcArgNoReturnAsyncFuncT]
NoArgsNoReturnDecorator = Callable[[NoArgsNoReturnAnyFuncT], NoArgsNoReturnAsyncFuncT]
async def _handle_func(func: NoArgsNoReturnAnyFuncT) -> None:
if asyncio.iscoroutinefunction(func):
await func()
else:
await run_in_threadpool(func)
async def _handle_exc(exc: Exception, on_exception: ExcArgNoReturnAnyFuncT | None) -> None:
if on_exception:
if asyncio.iscoroutinefunction(on_exception):
await on_exception(exc)
else:
await run_in_threadpool(on_exception, exc)
def repeat_every(
*,
seconds: float,
wait_first: float | None = None,
logger: logging.Logger | None = None,
raise_exceptions: bool = False,
max_repetitions: int | None = None,
on_complete: NoArgsNoReturnAnyFuncT | None = None,
on_exception: ExcArgNoReturnAnyFuncT | None = None,
) -> NoArgsNoReturnDecorator:
"""A decorator that modifies a function so it is periodically re-executed after its first call.
The function it decorates should accept no arguments and return nothing. If necessary, this can be accomplished
by using `functools.partial` or otherwise wrapping the target function prior to decoration.
Parameters
----------
seconds: float
The number of seconds to wait between repeated calls
wait_first: float (default None)
If not None, the function will wait for the given duration before the first call
max_repetitions: Optional[int] (default None)
The maximum number of times to call the repeated function. If `None`, the function is repeated forever.
on_complete: Optional[Callable[[], None]] (default None)
A function to call after the final repetition of the decorated function.
on_exception: Optional[Callable[[Exception], None]] (default None)
A function to call when an exception is raised by the decorated function.
"""
def decorator(func: NoArgsNoReturnAnyFuncT) -> NoArgsNoReturnAsyncFuncT:
"""Converts the decorated function into a repeated, periodically-called version."""
@wraps(func)
async def wrapped() -> None:
async def loop() -> None:
if wait_first is not None:
await asyncio.sleep(wait_first)
repetitions = 0
while max_repetitions is None or repetitions < max_repetitions:
try:
await _handle_func(func)
except Exception as exc:
await _handle_exc(exc, on_exception)
repetitions += 1
await asyncio.sleep(seconds)
if on_complete:
await _handle_func(on_complete)
asyncio.ensure_future(loop())
return wrapped
return decorator

View File

@@ -1,5 +1,6 @@
"""Server Module."""
import os
from typing import Optional
from pydantic import Field, IPvAnyAddress, field_validator
@@ -10,6 +11,12 @@ from akkudoktoreos.core.logging import get_logger
logger = get_logger(__name__)
def get_default_host() -> str:
if os.name == "nt":
return "127.0.0.1"
return "0.0.0.0"
class ServerCommonSettings(SettingsBaseModel):
"""Server Configuration.
@@ -17,14 +24,16 @@ class ServerCommonSettings(SettingsBaseModel):
To be added
"""
host: Optional[IPvAnyAddress] = Field(default="0.0.0.0", description="EOS server IP address.")
host: Optional[IPvAnyAddress] = Field(
default=get_default_host(), description="EOS server IP address."
)
port: Optional[int] = Field(default=8503, description="EOS server IP port number.")
verbose: Optional[bool] = Field(default=False, description="Enable debug output")
startup_eosdash: Optional[bool] = Field(
default=True, description="EOS server to start EOSdash server."
)
eosdash_host: Optional[IPvAnyAddress] = Field(
default="0.0.0.0", description="EOSdash server IP address."
default=get_default_host(), description="EOSdash server IP address."
)
eosdash_port: Optional[int] = Field(default=8504, description="EOSdash server IP port number.")

View File

@@ -46,7 +46,7 @@ class VisualizationReport(ConfigMixin):
"""Add a chart function to the current group and save it as a PNG and SVG."""
self.current_group.append(chart_func)
if self.create_img and title:
server_output_dir = self.config.general.data_cache_path
server_output_dir = self.config.cache.path()
server_output_dir.mkdir(parents=True, exist_ok=True)
fig, ax = plt.subplots()
chart_func()