Files
EOS/tests/test_loadvrm.py
Christopher Nadler 04420e66ab
Some checks are pending
Bump Version / Bump Version Workflow (push) Waiting to run
docker-build / platform-excludes (push) Waiting to run
docker-build / build (push) Blocked by required conditions
docker-build / merge (push) Blocked by required conditions
pre-commit / pre-commit (push) Waiting to run
Run Pytest on Pull Request / test (push) Waiting to run
fix: Improve provider update error handling and add VRM provider settings validation (#887)
* fix: improve error handling for provider updates

Distinguishes failures of active providers from inactive ones.
Propagates errors only for enabled providers, allowing execution
to continue if a non-active provider fails, which avoids unnecessary
interruptions and improves robustness.

* fix: add provider settings validation for forecast requests

Prevents potential runtime errors by checking if provider settings are configured
before accessing forecast credentials.

Raises a clear error when settings are missing to help with debugging misconfigurations.

* refactor(load): move provider settings to top-level fields

Transitions load provider settings from a nested "provider_settings" object with provider-specific keys to dedicated top-level fields.\n\nRemoves the legacy "provider_settings" mapping and updates migration logic to ensure backward compatibility with existing configurations.

* docs: update version numbers and documantation

---------

Co-authored-by: Normann <github@koldrack.com>
2026-02-26 18:31:47 +01:00

117 lines
3.5 KiB
Python

import json
from unittest.mock import call, patch
import pendulum
import pytest
import requests
from akkudoktoreos.prediction.loadvrm import (
LoadVrm,
VrmForecastRecords,
VrmForecastResponse,
)
@pytest.fixture
def load_vrm_instance(config_eos):
# Settings für LoadVrm
settings = {
"load": {
"provider": "LoadVrm",
"loadvrm": {
"load_vrm_token": "dummy-token",
"load_vrm_idsite": 12345,
},
}
}
config_eos.merge_settings_from_dict(settings)
# start_datetime initialize
start_dt = pendulum.datetime(2025, 1, 1, tz='Europe/Berlin')
# create LoadVrm-instance with config and start_datetime
lv = LoadVrm(config=config_eos.load, start_datetime=start_dt)
return lv
def mock_forecast_response():
"""Return a fake VrmForecastResponse with sample data."""
return VrmForecastResponse(
success=True,
records=VrmForecastRecords(
vrm_consumption_fc=[
(pendulum.datetime(2025, 1, 1, 0, 0, tz='Europe/Berlin').int_timestamp * 1000, 100.5),
(pendulum.datetime(2025, 1, 1, 1, 0, tz='Europe/Berlin').int_timestamp * 1000, 101.2)
],
solar_yield_forecast=[]
),
totals={}
)
def test_update_data_calls_update_value(load_vrm_instance):
with patch.object(load_vrm_instance, "_request_forecast", return_value=mock_forecast_response()), \
patch.object(LoadVrm, "update_value") as mock_update:
load_vrm_instance._update_data()
assert mock_update.call_count == 2
expected_calls = [
call(
pendulum.datetime(2025, 1, 1, 0, 0, 0, tz='Europe/Berlin'),
{"loadforecast_power_w": 100.5,}
),
call(
pendulum.datetime(2025, 1, 1, 1, 0, 0, tz='Europe/Berlin'),
{"loadforecast_power_w": 101.2,}
),
]
mock_update.assert_has_calls(expected_calls, any_order=False)
def test_validate_data_accepts_valid_json():
"""Test that _validate_data doesn't raise with valid input."""
response = mock_forecast_response()
json_data = response.model_dump_json()
validated = LoadVrm._validate_data(json_data)
assert validated.success
assert len(validated.records.vrm_consumption_fc) == 2
def test_validate_data_raises_on_invalid_json():
"""_validate_data should raise ValueError on schema mismatch."""
invalid_json = json.dumps({"success": True}) # missing 'records'
with pytest.raises(ValueError) as exc_info:
LoadVrm._validate_data(invalid_json)
assert "Field:" in str(exc_info.value)
assert "records" in str(exc_info.value)
def test_request_forecast_raises_on_http_error(load_vrm_instance):
with patch("requests.get", side_effect=requests.Timeout("Request timed out")) as mock_get:
with pytest.raises(RuntimeError) as exc_info:
load_vrm_instance._request_forecast(0, 1)
assert "Failed to fetch load forecast" in str(exc_info.value)
mock_get.assert_called_once()
def test_update_data_does_nothing_on_empty_forecast(load_vrm_instance):
empty_response = VrmForecastResponse(
success=True,
records=VrmForecastRecords(vrm_consumption_fc=[], solar_yield_forecast=[]),
totals={}
)
with patch.object(load_vrm_instance, "_request_forecast", return_value=empty_response), \
patch.object(LoadVrm, "update_value") as mock_update:
load_vrm_instance._update_data()
mock_update.assert_not_called()