EOS/single_test_prediction.py

171 lines
5.4 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
import argparse
import cProfile
import pstats
import sys
import time
from akkudoktoreos.config.config import get_config
from akkudoktoreos.prediction.prediction import get_prediction
config_eos = get_config()
prediction_eos = get_prediction()
def config_pvforecast() -> dict:
"""Configure settings for PV forecast."""
settings = {
"prediction_hours": 48,
"prediction_historic_hours": 24,
"latitude": 52.52,
"longitude": 13.405,
"pvforecast_provider": "PVForecastAkkudoktor",
"pvforecast0_peakpower": 5.0,
"pvforecast0_surface_azimuth": -10,
"pvforecast0_surface_tilt": 7,
"pvforecast0_userhorizon": [20, 27, 22, 20],
"pvforecast0_inverter_paco": 10000,
"pvforecast1_peakpower": 4.8,
"pvforecast1_surface_azimuth": -90,
"pvforecast1_surface_tilt": 7,
"pvforecast1_userhorizon": [30, 30, 30, 50],
"pvforecast1_inverter_paco": 10000,
"pvforecast2_peakpower": 1.4,
"pvforecast2_surface_azimuth": -40,
"pvforecast2_surface_tilt": 60,
"pvforecast2_userhorizon": [60, 30, 0, 30],
"pvforecast2_inverter_paco": 2000,
"pvforecast3_peakpower": 1.6,
"pvforecast3_surface_azimuth": 5,
"pvforecast3_surface_tilt": 45,
"pvforecast3_userhorizon": [45, 25, 30, 60],
"pvforecast3_inverter_paco": 1400,
"pvforecast4_peakpower": None,
}
return settings
def config_weather() -> dict:
"""Configure settings for weather forecast."""
settings = {
"prediction_hours": 48,
"prediction_historic_hours": 24,
"latitude": 52.52,
"longitude": 13.405,
}
return settings
def config_elecprice() -> dict:
"""Configure settings for electricity price forecast."""
settings = {
"prediction_hours": 48,
"prediction_historic_hours": 24,
"latitude": 52.52,
"longitude": 13.405,
}
return settings
def config_load() -> dict:
"""Configure settings for load forecast."""
settings = {
"prediction_hours": 48,
"prediction_historic_hours": 24,
"latitude": 52.52,
"longitude": 13.405,
}
return settings
def run_prediction(provider_id: str, verbose: bool = False) -> str:
"""Run the prediction.
Args:
provider_id (str): ID of prediction provider.
verbose (bool, optional): Whether to print verbose output. Defaults to False.
Returns:
dict: Prediction result as a dictionary
"""
# Initialize the oprediction
config_eos = get_config()
prediction_eos = get_prediction()
if verbose:
print(f"\nProvider ID: {provider_id}")
if provider_id in ("PVForecastAkkudoktor",):
settings = config_pvforecast()
settings["pvforecast_provider"] = provider_id
elif provider_id in ("BrightSky", "ClearOutside"):
settings = config_weather()
settings["weather_provider"] = provider_id
elif provider_id in ("ElecPriceAkkudoktor",):
settings = config_elecprice()
settings["elecprice_provider"] = provider_id
elif provider_id in ("LoadAkkudoktor",):
settings = config_elecprice()
settings["loadakkudoktor_year_energy"] = 1000
settings["load_provider"] = provider_id
else:
raise ValueError(f"Unknown provider '{provider_id}'.")
config_eos.merge_settings_from_dict(settings)
prediction_eos.update_data()
# Return result of prediction
provider = prediction_eos.provider_by_id(provider_id)
if verbose:
for key in provider.record_keys:
print(f"\n{key}\n----------")
print(f"Array: {provider.key_to_array(key)}")
return provider.model_dump_json(indent=4)
def main():
"""Main function to run the optimization script with optional profiling."""
Fix2 config and predictions revamp. (#281) measurement: - Add new measurement class to hold real world measurements. - Handles load meter readings, grid import and export meter readings. - Aggregates load meter readings aka. measurements to total load. - Can import measurements from files, pandas datetime series, pandas datetime dataframes, simple daetime arrays and programmatically. - Maybe expanded to other measurement values. - Should be used for load prediction adaptions by real world measurements. core/coreabc: - Add mixin class to access measurements core/pydantic: - Add pydantic models for pandas datetime series and dataframes. - Add pydantic models for simple datetime array core/dataabc: - Provide DataImport mixin class for generic import handling. Imports from JSON string and files. Imports from pandas datetime dataframes and simple datetime arrays. Signature of import method changed to allow import datetimes to be given programmatically and by data content. - Use pydantic models for datetime series, dataframes, arrays - Validate generic imports by pydantic models - Provide new attributes min_datetime and max_datetime for DataSequence. - Add parameter dropna to drop NAN/ None values when creating lists, pandas series or numpy array from DataSequence. config/config: - Add common settings for the measurement module. predictions/elecpriceakkudoktor: - Use mean values of last 7 days to fill prediction values not provided by akkudoktor.net (only provides 24 values). prediction/loadabc: - Extend the generic prediction keys by 'load_total_adjusted' for load predictions that adjust the predicted total load by measured load values. prediction/loadakkudoktor: - Extend the Akkudoktor load prediction by load adjustment using measured load values. prediction/load_aggregator: - Module removed. Load aggregation is now handled by the measurement module. prediction/load_corrector: - Module removed. Load correction (aka. adjustment of load prediction by measured load energy) is handled by the LoadAkkudoktor prediction and the generic 'load_mean_adjusted' prediction key. prediction/load_forecast: - Module removed. Functionality now completely handled by the LoadAkkudoktor prediction. utils/cacheutil: - Use pydantic. - Fix potential bug in ttl (time to live) duration handling. utils/datetimeutil: - Added missing handling of pendulum.DateTime and pendulum.Duration instances as input. Handled before as datetime.datetime and datetime.timedelta. utils/visualize: - Move main to generate_example_report() for better testing support. server/server: - Added new configuration option server_fastapi_startup_server_fasthtml to make startup of FastHTML server by FastAPI server conditional. server/fastapi_server: - Add APIs for measurements - Improve APIs to provide or take pandas datetime series and datetime dataframes controlled by pydantic model. - Improve APIs to provide or take simple datetime data arrays controlled by pydantic model. - Move fastAPI server API to v1 for new APIs. - Update pre v1 endpoints to use new prediction and measurement capabilities. - Only start FastHTML server if 'server_fastapi_startup_server_fasthtml' config option is set. tests: - Adapt import tests to changed import method signature - Adapt server test to use the v1 API - Extend the dataabc test to test for array generation from data with several data interval scenarios. - Extend the datetimeutil test to also test for correct handling of to_datetime() providing now(). - Adapt LoadAkkudoktor test for new adjustment calculation. - Adapt visualization test to use example report function instead of visualize.py run as process. - Removed test_load_aggregator. Functionality is now tested in test_measurement. - Added tests for measurement module docs: - Remove sphinxcontrib-openapi as it prevents build of documentation. "site-packages/sphinxcontrib/openapi/openapi31.py", line 305, in _get_type_from_schema for t in schema["anyOf"]: KeyError: 'anyOf'" Signed-off-by: Bobby Noelte <b0661n0e17e@gmail.com>
2024-12-29 18:42:49 +01:00
parser = argparse.ArgumentParser(description="Run Prediction")
parser.add_argument("--profile", action="store_true", help="Enable performance profiling")
parser.add_argument(
Fix2 config and predictions revamp. (#281) measurement: - Add new measurement class to hold real world measurements. - Handles load meter readings, grid import and export meter readings. - Aggregates load meter readings aka. measurements to total load. - Can import measurements from files, pandas datetime series, pandas datetime dataframes, simple daetime arrays and programmatically. - Maybe expanded to other measurement values. - Should be used for load prediction adaptions by real world measurements. core/coreabc: - Add mixin class to access measurements core/pydantic: - Add pydantic models for pandas datetime series and dataframes. - Add pydantic models for simple datetime array core/dataabc: - Provide DataImport mixin class for generic import handling. Imports from JSON string and files. Imports from pandas datetime dataframes and simple datetime arrays. Signature of import method changed to allow import datetimes to be given programmatically and by data content. - Use pydantic models for datetime series, dataframes, arrays - Validate generic imports by pydantic models - Provide new attributes min_datetime and max_datetime for DataSequence. - Add parameter dropna to drop NAN/ None values when creating lists, pandas series or numpy array from DataSequence. config/config: - Add common settings for the measurement module. predictions/elecpriceakkudoktor: - Use mean values of last 7 days to fill prediction values not provided by akkudoktor.net (only provides 24 values). prediction/loadabc: - Extend the generic prediction keys by 'load_total_adjusted' for load predictions that adjust the predicted total load by measured load values. prediction/loadakkudoktor: - Extend the Akkudoktor load prediction by load adjustment using measured load values. prediction/load_aggregator: - Module removed. Load aggregation is now handled by the measurement module. prediction/load_corrector: - Module removed. Load correction (aka. adjustment of load prediction by measured load energy) is handled by the LoadAkkudoktor prediction and the generic 'load_mean_adjusted' prediction key. prediction/load_forecast: - Module removed. Functionality now completely handled by the LoadAkkudoktor prediction. utils/cacheutil: - Use pydantic. - Fix potential bug in ttl (time to live) duration handling. utils/datetimeutil: - Added missing handling of pendulum.DateTime and pendulum.Duration instances as input. Handled before as datetime.datetime and datetime.timedelta. utils/visualize: - Move main to generate_example_report() for better testing support. server/server: - Added new configuration option server_fastapi_startup_server_fasthtml to make startup of FastHTML server by FastAPI server conditional. server/fastapi_server: - Add APIs for measurements - Improve APIs to provide or take pandas datetime series and datetime dataframes controlled by pydantic model. - Improve APIs to provide or take simple datetime data arrays controlled by pydantic model. - Move fastAPI server API to v1 for new APIs. - Update pre v1 endpoints to use new prediction and measurement capabilities. - Only start FastHTML server if 'server_fastapi_startup_server_fasthtml' config option is set. tests: - Adapt import tests to changed import method signature - Adapt server test to use the v1 API - Extend the dataabc test to test for array generation from data with several data interval scenarios. - Extend the datetimeutil test to also test for correct handling of to_datetime() providing now(). - Adapt LoadAkkudoktor test for new adjustment calculation. - Adapt visualization test to use example report function instead of visualize.py run as process. - Removed test_load_aggregator. Functionality is now tested in test_measurement. - Added tests for measurement module docs: - Remove sphinxcontrib-openapi as it prevents build of documentation. "site-packages/sphinxcontrib/openapi/openapi31.py", line 305, in _get_type_from_schema for t in schema["anyOf"]: KeyError: 'anyOf'" Signed-off-by: Bobby Noelte <b0661n0e17e@gmail.com>
2024-12-29 18:42:49 +01:00
"--verbose", action="store_true", help="Enable verbose output during prediction"
)
parser.add_argument("--provider-id", type=str, default=0, help="Provider ID of prediction")
args = parser.parse_args()
if args.profile:
# Run with profiling
profiler = cProfile.Profile()
try:
result = profiler.runcall(
run_prediction, provider_id=args.provider_id, verbose=args.verbose
)
# Print profiling statistics
stats = pstats.Stats(profiler)
stats.strip_dirs().sort_stats("cumulative").print_stats(200)
# Print result
print("\nPrediction Result:")
print(result)
except Exception as e:
print(f"Error during prediction: {e}", file=sys.stderr)
sys.exit(1)
else:
# Run without profiling
try:
start_time = time.time()
result = run_prediction(provider_id=args.provider_id, verbose=args.verbose)
end_time = time.time()
elapsed_time = end_time - start_time
print(f"\nElapsed time: {elapsed_time:.4f} seconds.")
print("\nPrediction Result:")
print(result)
except Exception as e:
print(f"Error during prediction: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()