mirror of
https://github.com/Akkudoktor-EOS/EOS.git
synced 2025-10-11 11:56:17 +00:00
fix: logging, prediction update, multiple bugs (#584)
Some checks failed
docker-build / platform-excludes (push) Has been cancelled
pre-commit / pre-commit (push) Has been cancelled
Run Pytest on Pull Request / test (push) Has been cancelled
docker-build / build (push) Has been cancelled
docker-build / merge (push) Has been cancelled
Close stale pull requests/issues / Find Stale issues and PRs (push) Has been cancelled
Some checks failed
docker-build / platform-excludes (push) Has been cancelled
pre-commit / pre-commit (push) Has been cancelled
Run Pytest on Pull Request / test (push) Has been cancelled
docker-build / build (push) Has been cancelled
docker-build / merge (push) Has been cancelled
Close stale pull requests/issues / Find Stale issues and PRs (push) Has been cancelled
* Fix logging configuration issues that made logging stop operation. Switch to Loguru logging (from Python logging). Enable console and file logging with different log levels. Add logging documentation. * Fix logging configuration and EOS configuration out of sync. Added tracking support for nested value updates of Pydantic models. This used to update the logging configuration when the EOS configurationm for logging is changed. Should keep logging config and EOS config in sync as long as all changes to the EOS logging configuration are done by set_nested_value(), which is the case for the REST API. * Fix energy management task looping endlessly after the second update when trying to update the last_update datetime. * Fix get_nested_value() to correctly take values from the dicts in a Pydantic model instance. * Fix usage of model classes instead of model instances in nested value access when evaluation the value type that is associated to each key. * Fix illegal json format in prediction documentation for PVForecastAkkudoktor provider. * Fix documentation qirks and add EOS Connect to integrations. * Support deprecated fields in configuration in documentation generation and EOSdash. * Enhance EOSdash demo to show BrightSky humidity data (that is often missing) * Update documentation reference to German EOS installation videos. Signed-off-by: Bobby Noelte <b0661n0e17e@gmail.com>
This commit is contained in:
@@ -1,95 +1,241 @@
|
||||
"""Utility functions for handling logging tasks.
|
||||
|
||||
Functions:
|
||||
----------
|
||||
- get_logger: Creates and configures a logger with console and optional rotating file logging.
|
||||
|
||||
Example usage:
|
||||
--------------
|
||||
# Logger setup
|
||||
>>> logger = get_logger(__name__, log_file="app.log", logging_level="DEBUG")
|
||||
>>> logger.info("Logging initialized.")
|
||||
|
||||
Notes:
|
||||
------
|
||||
- The logger supports rotating log files to prevent excessive log file size.
|
||||
"""
|
||||
"""Utility for configuring Loguru loggers."""
|
||||
|
||||
import json
|
||||
import logging as pylogging
|
||||
import os
|
||||
from logging.handlers import RotatingFileHandler
|
||||
from typing import Optional
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from types import FrameType
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from akkudoktoreos.core.logabc import logging_str_to_level
|
||||
import pendulum
|
||||
from loguru import logger
|
||||
|
||||
from akkudoktoreos.core.logabc import LOGGING_LEVELS
|
||||
|
||||
|
||||
def get_logger(
|
||||
name: str,
|
||||
log_file: Optional[str] = None,
|
||||
logging_level: Optional[str] = None,
|
||||
max_bytes: int = 5000000,
|
||||
backup_count: int = 5,
|
||||
) -> pylogging.Logger:
|
||||
"""Creates and configures a logger with a given name.
|
||||
class InterceptHandler(pylogging.Handler):
|
||||
"""A logging handler that redirects standard Python logging messages to Loguru.
|
||||
|
||||
The logger supports logging to both the console and an optional log file. File logging is
|
||||
handled by a rotating file handler to prevent excessive log file size.
|
||||
This handler ensures consistency between the `logging` module and Loguru by intercepting
|
||||
logs sent to the standard logging system and re-emitting them through Loguru with proper
|
||||
formatting and context (including exception info and call depth).
|
||||
|
||||
Attributes:
|
||||
loglevel_mapping (dict): Mapping from standard logging levels to Loguru level names.
|
||||
"""
|
||||
|
||||
loglevel_mapping: dict[int, str] = {
|
||||
50: "CRITICAL",
|
||||
40: "ERROR",
|
||||
30: "WARNING",
|
||||
20: "INFO",
|
||||
10: "DEBUG",
|
||||
5: "TRACE",
|
||||
0: "NOTSET",
|
||||
}
|
||||
|
||||
def emit(self, record: pylogging.LogRecord) -> None:
|
||||
"""Emits a logging record by forwarding it to Loguru with preserved metadata.
|
||||
|
||||
Args:
|
||||
record (logging.LogRecord): A record object containing log message and metadata.
|
||||
"""
|
||||
try:
|
||||
level = logger.level(record.levelname).name
|
||||
except AttributeError:
|
||||
level = self.loglevel_mapping.get(record.levelno, "INFO")
|
||||
|
||||
frame: Optional[FrameType] = pylogging.currentframe()
|
||||
depth: int = 2
|
||||
while frame and frame.f_code.co_filename == pylogging.__file__:
|
||||
frame = frame.f_back
|
||||
depth += 1
|
||||
|
||||
log = logger.bind(request_id="app")
|
||||
log.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())
|
||||
|
||||
|
||||
console_handler_id = None
|
||||
file_handler_id = None
|
||||
|
||||
|
||||
def track_logging_config(config_eos: Any, path: str, old_value: Any, value: Any) -> None:
|
||||
"""Track logging config changes."""
|
||||
global console_handler_id, file_handler_id
|
||||
|
||||
if not path.startswith("logging"):
|
||||
raise ValueError(f"Logging shall not track '{path}'")
|
||||
|
||||
if not config_eos.logging.console_level:
|
||||
# No value given - check environment value - may also be None
|
||||
config_eos.logging.console_level = os.getenv("EOS_LOGGING__LEVEL")
|
||||
if not config_eos.logging.file_level:
|
||||
# No value given - check environment value - may also be None
|
||||
config_eos.logging.file_level = os.getenv("EOS_LOGGING__LEVEL")
|
||||
|
||||
# Remove handlers
|
||||
if console_handler_id:
|
||||
try:
|
||||
logger.remove(console_handler_id)
|
||||
except Exception as e:
|
||||
logger.debug("Exception on logger.remove: {}", e, exc_info=True)
|
||||
console_handler_id = None
|
||||
if file_handler_id:
|
||||
try:
|
||||
logger.remove(file_handler_id)
|
||||
except Exception as e:
|
||||
logger.debug("Exception on logger.remove: {}", e, exc_info=True)
|
||||
file_handler_id = None
|
||||
|
||||
# Create handlers with new configuration
|
||||
# Always add console handler
|
||||
if config_eos.logging.console_level not in LOGGING_LEVELS:
|
||||
logger.error(
|
||||
f"Invalid console log level '{config_eos.logging.console_level} - forced to INFO'."
|
||||
)
|
||||
config_eos.logging.console_level = "INFO"
|
||||
|
||||
console_handler_id = logger.add(
|
||||
sys.stderr,
|
||||
enqueue=True,
|
||||
backtrace=True,
|
||||
level=config_eos.logging.console_level,
|
||||
# format=_console_format
|
||||
)
|
||||
|
||||
# Add file handler
|
||||
if config_eos.logging.file_level and config_eos.logging.file_path:
|
||||
if config_eos.logging.file_level not in LOGGING_LEVELS:
|
||||
logger.error(
|
||||
f"Invalid file log level '{config_eos.logging.console_level}' - forced to INFO."
|
||||
)
|
||||
config_eos.logging.file_level = "INFO"
|
||||
|
||||
file_handler_id = logger.add(
|
||||
sink=config_eos.logging.file_path,
|
||||
rotation="100 MB",
|
||||
retention="3 days",
|
||||
enqueue=True,
|
||||
backtrace=True,
|
||||
level=config_eos.logging.file_level,
|
||||
serialize=True, # JSON dict formatting
|
||||
# format=_file_format
|
||||
)
|
||||
|
||||
# Redirect standard logging to Loguru
|
||||
pylogging.basicConfig(handlers=[InterceptHandler()], level=0)
|
||||
# Redirect uvicorn and fastapi logging to Loguru
|
||||
pylogging.getLogger("uvicorn.access").handlers = [InterceptHandler()]
|
||||
for pylogger_name in ["uvicorn", "uvicorn.error", "fastapi"]:
|
||||
pylogger = pylogging.getLogger(pylogger_name)
|
||||
pylogger.handlers = [InterceptHandler()]
|
||||
pylogger.propagate = False
|
||||
|
||||
logger.info(
|
||||
f"Logger reconfigured - console: {config_eos.logging.console_level}, file: {config_eos.logging.file_level}."
|
||||
)
|
||||
|
||||
|
||||
def read_file_log(
|
||||
log_path: Path,
|
||||
limit: int = 100,
|
||||
level: Optional[str] = None,
|
||||
contains: Optional[str] = None,
|
||||
regex: Optional[str] = None,
|
||||
from_time: Optional[str] = None,
|
||||
to_time: Optional[str] = None,
|
||||
tail: bool = False,
|
||||
) -> List[dict]:
|
||||
"""Read and filter structured log entries from a JSON-formatted log file.
|
||||
|
||||
Args:
|
||||
name (str): The name of the logger, typically `__name__` from the calling module.
|
||||
log_file (Optional[str]): Path to the log file for file logging. If None, no file logging is done.
|
||||
logging_level (Optional[str]): Logging level (e.g., "INFO", "DEBUG"). Defaults to "INFO".
|
||||
max_bytes (int): Maximum size in bytes for log file before rotation. Defaults to 5 MB.
|
||||
backup_count (int): Number of backup log files to keep. Defaults to 5.
|
||||
log_path (Path): Path to the JSON-formatted log file.
|
||||
limit (int, optional): Maximum number of log entries to return. Defaults to 100.
|
||||
level (Optional[str], optional): Filter logs by log level (e.g., "INFO", "ERROR"). Defaults to None.
|
||||
contains (Optional[str], optional): Filter logs that contain this substring in their message. Case-insensitive. Defaults to None.
|
||||
regex (Optional[str], optional): Filter logs whose message matches this regular expression. Defaults to None.
|
||||
from_time (Optional[str], optional): ISO 8601 datetime string to filter logs not earlier than this time. Defaults to None.
|
||||
to_time (Optional[str], optional): ISO 8601 datetime string to filter logs not later than this time. Defaults to None.
|
||||
tail (bool, optional): If True, read the last lines of the file (like `tail -n`). Defaults to False.
|
||||
|
||||
Returns:
|
||||
logging.Logger: Configured logger instance.
|
||||
List[dict]: A list of filtered log entries as dictionaries.
|
||||
|
||||
Example:
|
||||
logger = get_logger(__name__, log_file="app.log", logging_level="DEBUG")
|
||||
logger.info("Application started")
|
||||
Raises:
|
||||
FileNotFoundError: If the log file does not exist.
|
||||
ValueError: If the datetime strings are invalid or improperly formatted.
|
||||
Exception: For other unforeseen I/O or parsing errors.
|
||||
"""
|
||||
# Create a logger with the specified name
|
||||
logger = pylogging.getLogger(name)
|
||||
logger.propagate = True
|
||||
# This is already supported by pydantic-settings in LoggingCommonSettings, however in case
|
||||
# loading the config itself fails and to set the level before we load the config, we set it here manually.
|
||||
if logging_level is None and (env_level := os.getenv("EOS_LOGGING__LEVEL")) is not None:
|
||||
logging_level = env_level
|
||||
if logging_level is not None:
|
||||
level = logging_str_to_level(logging_level)
|
||||
logger.setLevel(level)
|
||||
if not log_path.exists():
|
||||
raise FileNotFoundError("Log file not found")
|
||||
|
||||
# The log message format
|
||||
formatter = pylogging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
||||
try:
|
||||
from_dt = pendulum.parse(from_time) if from_time else None
|
||||
to_dt = pendulum.parse(to_time) if to_time else None
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid date/time format: {e}")
|
||||
|
||||
# Prevent loggers from being added multiple times
|
||||
# There may already be a logger from pytest
|
||||
if not logger.handlers:
|
||||
# Create a console handler with a standard output stream
|
||||
console_handler = pylogging.StreamHandler()
|
||||
if logging_level is not None:
|
||||
console_handler.setLevel(level)
|
||||
console_handler.setFormatter(formatter)
|
||||
regex_pattern = re.compile(regex) if regex else None
|
||||
|
||||
# Add the console handler to the logger
|
||||
logger.addHandler(console_handler)
|
||||
def matches_filters(log: dict) -> bool:
|
||||
if level and log.get("level", {}).get("name") != level.upper():
|
||||
return False
|
||||
if contains and contains.lower() not in log.get("message", "").lower():
|
||||
return False
|
||||
if regex_pattern and not regex_pattern.search(log.get("message", "")):
|
||||
return False
|
||||
if from_dt or to_dt:
|
||||
try:
|
||||
log_time = pendulum.parse(log["time"])
|
||||
except Exception:
|
||||
return False
|
||||
if from_dt and log_time < from_dt:
|
||||
return False
|
||||
if to_dt and log_time > to_dt:
|
||||
return False
|
||||
return True
|
||||
|
||||
if log_file and len(logger.handlers) < 2: # We assume a console logger to be the first logger
|
||||
# If a log file path is specified, create a rotating file handler
|
||||
matched_logs = []
|
||||
lines: list[str] = []
|
||||
|
||||
# Ensure the log directory exists
|
||||
log_dir = os.path.dirname(log_file)
|
||||
if log_dir and not os.path.exists(log_dir):
|
||||
os.makedirs(log_dir)
|
||||
if tail:
|
||||
with log_path.open("rb") as f:
|
||||
f.seek(0, 2)
|
||||
end = f.tell()
|
||||
buffer = bytearray()
|
||||
pointer = end
|
||||
|
||||
# Create a rotating file handler
|
||||
file_handler = RotatingFileHandler(log_file, maxBytes=max_bytes, backupCount=backup_count)
|
||||
if logging_level is not None:
|
||||
file_handler.setLevel(level)
|
||||
file_handler.setFormatter(formatter)
|
||||
while pointer > 0 and len(lines) < limit * 5:
|
||||
pointer -= 1
|
||||
f.seek(pointer)
|
||||
byte = f.read(1)
|
||||
if byte == b"\n":
|
||||
if buffer:
|
||||
line = buffer[::-1].decode("utf-8", errors="ignore")
|
||||
lines.append(line)
|
||||
buffer.clear()
|
||||
else:
|
||||
buffer.append(byte[0])
|
||||
if buffer:
|
||||
line = buffer[::-1].decode("utf-8", errors="ignore")
|
||||
lines.append(line)
|
||||
lines = lines[::-1]
|
||||
else:
|
||||
with log_path.open("r", encoding="utf-8", newline=None) as f_txt:
|
||||
lines = f_txt.readlines()
|
||||
|
||||
# Add the file handler to the logger
|
||||
logger.addHandler(file_handler)
|
||||
for line in lines:
|
||||
if not line.strip():
|
||||
continue
|
||||
try:
|
||||
log = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
if matches_filters(log):
|
||||
matched_logs.append(log)
|
||||
if len(matched_logs) >= limit:
|
||||
break
|
||||
|
||||
return logger
|
||||
return matched_logs
|
||||
|
Reference in New Issue
Block a user