mirror of
https://github.com/Akkudoktor-EOS/EOS.git
synced 2026-01-01 08:16:18 +00:00
feat: add Home Assistant and NodeRED adapters (#764)
Adapters for Home Assistant and NodeRED integration are added. Akkudoktor-EOS can now be run as Home Assistant add-on and standalone. As Home Assistant add-on EOS uses ingress to fully integrate the EOSdash dashboard in Home Assistant. The fix includes several bug fixes that are not directly related to the adapter implementation but are necessary to keep EOS running properly and to test and document the changes. * fix: development version scheme The development versioning scheme is adaptet to fit to docker and home assistant expectations. The new scheme is x.y.z and x.y.z.dev<hash>. Hash is only digits as expected by home assistant. Development version is appended by .dev as expected by docker. * fix: use mean value in interval on resampling for array When downsampling data use the mean value of all values within the new sampling interval. * fix: default battery ev soc and appliance wh Make the genetic simulation return default values for the battery SoC, electric vehicle SoC and appliance load if these assets are not used. * fix: import json string Strip outer quotes from JSON strings on import to be compliant to json.loads() expectation. * fix: default interval definition for import data Default interval must be defined in lowercase human definition to be accepted by pendulum. * fix: clearoutside schema change * feat: add adapters for integrations Adapters for Home Assistant and NodeRED integration are added. Akkudoktor-EOS can now be run as Home Assistant add-on and standalone. As Home Assistant add-on EOS uses ingress to fully integrate the EOSdash dashboard in Home Assistant. * feat: allow eos to be started with root permissions and drop priviledges Home assistant starts all add-ons with root permissions. Eos now drops root permissions if an applicable user is defined by paramter --run_as_user. The docker image defines the user eos to be used. * feat: make eos supervise and monitor EOSdash Eos now not only starts EOSdash but also monitors EOSdash during runtime and restarts EOSdash on fault. EOSdash logging is captured by EOS and forwarded to the EOS log to provide better visibility. * feat: add duration to string conversion Make to_duration to also return the duration as string on request. * chore: Use info logging to report missing optimization parameters In parameter preparation for automatic optimization an error was logged for missing paramters. Log is now down using the info level. * chore: make EOSdash use the EOS data directory for file import/ export EOSdash use the EOS data directory for file import/ export by default. This allows to use the configuration import/ export function also within docker images. * chore: improve EOSdash config tab display Improve display of JSON code and add more forms for config value update. * chore: make docker image file system layout similar to home assistant Only use /data directory for persistent data. This is handled as a docker volume. The /data volume is mapped to ~/.local/share/net.akkudoktor.eos if using docker compose. * chore: add home assistant add-on development environment Add VSCode devcontainer and task definition for home assistant add-on development. * chore: improve documentation
This commit is contained in:
343
tests/test_homeassistant.py
Normal file
343
tests/test_homeassistant.py
Normal file
@@ -0,0 +1,343 @@
|
||||
import os
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from pydantic import ValidationError
|
||||
|
||||
|
||||
class TestHomeAssistantAddon:
|
||||
"""Tests to ensure the repository root is a valid Home Assistant add-on.
|
||||
Simulates the Home Assistant Supervisor's expectations.
|
||||
"""
|
||||
|
||||
@property
|
||||
def root(self):
|
||||
"""Repository root (repo == addon)."""
|
||||
return Path(__file__).resolve().parent.parent
|
||||
|
||||
def test_config_yaml_exists(self):
|
||||
"""Ensure config.yaml exists in the repo root."""
|
||||
cfg_path = self.root / "config.yaml"
|
||||
assert cfg_path.is_file(), "config.yaml must exist in repository root."
|
||||
|
||||
def test_config_yaml_loadable(self):
|
||||
"""Verify that config.yaml parses and contains required fields."""
|
||||
cfg_path = self.root / "config.yaml"
|
||||
with open(cfg_path) as f:
|
||||
cfg = yaml.safe_load(f)
|
||||
|
||||
required_fields = ["name", "version", "slug", "description", "arch"]
|
||||
for field in required_fields:
|
||||
assert field in cfg, f"Missing required field '{field}' in config.yaml."
|
||||
|
||||
# Additional validation
|
||||
assert isinstance(cfg["arch"], list), "arch must be a list"
|
||||
assert len(cfg["arch"]) > 0, "arch list cannot be empty"
|
||||
|
||||
print(f"✓ config.yaml valid:")
|
||||
print(f" Name: {cfg['name']}")
|
||||
print(f" Version: {cfg['version']}")
|
||||
print(f" Slug: {cfg['slug']}")
|
||||
print(f" Architectures: {', '.join(cfg['arch'])}")
|
||||
|
||||
def test_readme_exists(self):
|
||||
"""Ensure README.md exists and is not empty."""
|
||||
readme_path = self.root / "README.md"
|
||||
assert readme_path.is_file(), "README.md must exist in the repository root."
|
||||
|
||||
content = readme_path.read_text()
|
||||
assert len(content.strip()) > 0, "README.md is empty"
|
||||
|
||||
print(f"✓ README.md exists ({len(content)} bytes)")
|
||||
|
||||
def test_docs_md_exists(self):
|
||||
"""Ensure DOCS.md exists in the repo root (for Home Assistant add-on documentation)."""
|
||||
docs_path = self.root / "DOCS.md"
|
||||
assert docs_path.is_file(), "DOCS.md must exist in the repository root for add-on documentation."
|
||||
|
||||
content = docs_path.read_text()
|
||||
assert len(content.strip()) > 0, "DOCS.md is empty"
|
||||
|
||||
print(f"✓ DOCS.md exists ({len(content)} bytes)")
|
||||
|
||||
@pytest.mark.docker
|
||||
def test_dockerfile_exists(self):
|
||||
"""Ensure Dockerfile exists in the repo root and has basic structure."""
|
||||
dockerfile = self.root / "Dockerfile"
|
||||
assert dockerfile.is_file(), "Dockerfile must exist in repository root."
|
||||
|
||||
content = dockerfile.read_text()
|
||||
|
||||
# Check for FROM statement
|
||||
assert "FROM" in content, "Dockerfile must contain FROM statement"
|
||||
|
||||
# Check for common add-on patterns
|
||||
if "ARG BUILD_FROM" in content:
|
||||
print("✓ Dockerfile uses Home Assistant build args")
|
||||
|
||||
print("✓ Dockerfile exists and has valid structure")
|
||||
|
||||
@pytest.mark.docker
|
||||
def test_docker_build_context_valid(self):
|
||||
"""Runs a Docker build using the root of the repo as Home Assistant supervisor would.
|
||||
Fails if the build context is invalid or Dockerfile has syntax errors.
|
||||
"""
|
||||
# Check if Docker is available
|
||||
try:
|
||||
subprocess.run(
|
||||
["docker", "--version"],
|
||||
capture_output=True,
|
||||
check=True
|
||||
)
|
||||
except (FileNotFoundError, subprocess.CalledProcessError):
|
||||
pytest.skip("Docker not found or not running")
|
||||
|
||||
cmd = [
|
||||
"docker", "build",
|
||||
"-t", "ha-addon-test:latest",
|
||||
str(self.root),
|
||||
]
|
||||
|
||||
print(f"\nBuilding Docker image from: {self.root}")
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
check=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=str(self.root)
|
||||
)
|
||||
print("✓ Docker build successful")
|
||||
if result.stdout:
|
||||
print("\nBuild output (last 20 lines):")
|
||||
print('\n'.join(result.stdout.splitlines()[-20:]))
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("\n✗ Docker build failed")
|
||||
print("\nSTDOUT:")
|
||||
print(e.stdout)
|
||||
print("\nSTDERR:")
|
||||
print(e.stderr)
|
||||
pytest.fail(
|
||||
f"Docker build failed with exit code {e.returncode}. "
|
||||
"This simulates a Supervisor build failure."
|
||||
)
|
||||
|
||||
@pytest.mark.docker
|
||||
def test_addon_builder_validation(self, is_finalize: bool):
|
||||
"""Validate add-on can be built using Home Assistant's builder tool.
|
||||
|
||||
This is the closest to what Supervisor does when installing an add-on.
|
||||
"""
|
||||
if not is_finalize:
|
||||
pytest.skip("Skipping add-on builder validation test — not full run")
|
||||
|
||||
# Check if Docker is available
|
||||
try:
|
||||
subprocess.run(
|
||||
["docker", "--version"],
|
||||
capture_output=True,
|
||||
check=True
|
||||
)
|
||||
except (FileNotFoundError, subprocess.CalledProcessError):
|
||||
pytest.skip("Docker not found or not running")
|
||||
|
||||
print(f"\nValidating add-on with builder: {self.root}")
|
||||
|
||||
# Read config to get architecture info
|
||||
cfg_path = self.root / "config.yaml"
|
||||
with open(cfg_path) as f:
|
||||
cfg = yaml.safe_load(f)
|
||||
|
||||
# Detect host architecture
|
||||
import platform
|
||||
machine = platform.machine().lower()
|
||||
|
||||
# Map Python's platform names to Home Assistant architectures
|
||||
arch_map = {
|
||||
"x86_64": "amd64",
|
||||
"amd64": "amd64",
|
||||
"aarch64": "aarch64",
|
||||
"arm64": "aarch64",
|
||||
"armv7l": "armv7",
|
||||
"armv7": "armv7",
|
||||
}
|
||||
|
||||
host_arch = arch_map.get(machine, "amd64")
|
||||
|
||||
# Check if config supports this architecture
|
||||
if host_arch not in cfg["arch"]:
|
||||
pytest.skip(
|
||||
f"Add-on doesn't support host architecture {host_arch}. "
|
||||
f"Supported: {', '.join(cfg['arch'])}"
|
||||
)
|
||||
|
||||
print(f"Using builder for architecture: {host_arch}")
|
||||
|
||||
# The builder expects specific arguments for building
|
||||
builder_image = f"ghcr.io/home-assistant/{host_arch}-builder:latest"
|
||||
result = subprocess.run(
|
||||
[
|
||||
"docker", "run", "--rm", "--privileged",
|
||||
"-v", f"{self.root}:/data",
|
||||
"-v", "/var/run/docker.sock:/var/run/docker.sock",
|
||||
builder_image,
|
||||
"--generic", cfg["version"],
|
||||
"--target", "/data",
|
||||
f"--{host_arch}",
|
||||
"--test"
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=str(self.root),
|
||||
check=False,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
# Print output for debugging
|
||||
if result.stdout:
|
||||
print("\nBuilder stdout:")
|
||||
print(result.stdout)
|
||||
if result.stderr:
|
||||
print("\nBuilder stderr:")
|
||||
print(result.stderr)
|
||||
|
||||
# Check result
|
||||
if result.returncode != 0:
|
||||
# Check if it's just because the builder tool is unavailable
|
||||
if "exec format error" in result.stderr or "not found" in result.stderr:
|
||||
pytest.fail(
|
||||
"Builder tool not compatible with this system."
|
||||
)
|
||||
|
||||
pytest.fail(
|
||||
f"Add-on builder validation failed with exit code {result.returncode}"
|
||||
)
|
||||
|
||||
print("✓ Add-on builder validation passed")
|
||||
|
||||
def test_build_yaml_if_exists(self):
|
||||
"""If build.yaml exists, validate its structure."""
|
||||
build_path = self.root / "build.yaml"
|
||||
|
||||
if not build_path.exists():
|
||||
pytest.skip("build.yaml not present (optional)")
|
||||
|
||||
with open(build_path) as f:
|
||||
build_cfg = yaml.safe_load(f)
|
||||
|
||||
assert "build_from" in build_cfg, "build.yaml must contain 'build_from'"
|
||||
assert isinstance(build_cfg["build_from"], dict), "'build_from' must be a dictionary"
|
||||
|
||||
print("✓ build.yaml structure valid")
|
||||
print(f" Architectures defined: {', '.join(build_cfg['build_from'].keys())}")
|
||||
|
||||
def test_addon_configuration_complete(self):
|
||||
"""Comprehensive validation of add-on configuration.
|
||||
Checks all required fields and common configuration issues.
|
||||
"""
|
||||
cfg_path = self.root / "config.yaml"
|
||||
with open(cfg_path) as f:
|
||||
cfg = yaml.safe_load(f)
|
||||
|
||||
# Required top-level fields
|
||||
required_fields = ["name", "version", "slug", "description", "arch"]
|
||||
for field in required_fields:
|
||||
assert field in cfg, f"Missing required field: {field}"
|
||||
|
||||
# Validate specific fields
|
||||
assert isinstance(cfg["arch"], list), "arch must be a list"
|
||||
assert len(cfg["arch"]) > 0, "arch list cannot be empty"
|
||||
|
||||
valid_archs = ["aarch64", "amd64", "armhf", "armv7", "i386"]
|
||||
for arch in cfg["arch"]:
|
||||
assert arch in valid_archs, f"Invalid architecture: {arch}"
|
||||
|
||||
# Validate version format (should be semantic versioning)
|
||||
version = cfg["version"]
|
||||
assert isinstance(version, str), "version must be a string"
|
||||
|
||||
# Validate slug (lowercase, no special chars except dash)
|
||||
slug = cfg["slug"]
|
||||
assert slug.islower() or "-" in slug, "slug should be lowercase"
|
||||
assert slug.replace("-", "").replace("_", "").isalnum(), \
|
||||
"slug should only contain alphanumeric characters, dash, or underscore"
|
||||
|
||||
# Optional but common fields
|
||||
if "startup" in cfg:
|
||||
valid_startup = ["initialize", "system", "services", "application", "once"]
|
||||
assert cfg["startup"] in valid_startup, \
|
||||
f"Invalid startup value: {cfg['startup']}"
|
||||
|
||||
if "boot" in cfg:
|
||||
valid_boot = ["auto", "manual"]
|
||||
assert cfg["boot"] in valid_boot, f"Invalid boot value: {cfg['boot']}"
|
||||
|
||||
# Validate ingress configuration
|
||||
if cfg.get("ingress"):
|
||||
assert "ingress_port" in cfg, "ingress_port required when ingress is enabled"
|
||||
|
||||
ingress_port = cfg["ingress_port"]
|
||||
assert isinstance(ingress_port, int), "ingress_port must be an integer"
|
||||
assert 1 <= ingress_port <= 65535, "ingress_port must be a valid port number"
|
||||
|
||||
# Ingress port should NOT be in ports section
|
||||
ports = cfg.get("ports", {})
|
||||
port_key = f"{ingress_port}/tcp"
|
||||
assert port_key not in ports, \
|
||||
f"Port {ingress_port} is used for ingress and should not be in 'ports' section"
|
||||
|
||||
# Validate URL if present
|
||||
if "url" in cfg:
|
||||
url = cfg["url"]
|
||||
assert url.startswith("http://") or url.startswith("https://"), \
|
||||
"URL must start with http:// or https://"
|
||||
|
||||
# Validate map directories if present
|
||||
if "map" in cfg:
|
||||
assert isinstance(cfg["map"], list), "map must be a list"
|
||||
valid_mappings = ["config", "ssl", "addons", "backup", "share", "media"]
|
||||
for mapping in cfg["map"]:
|
||||
# Handle both "config:rw" and "config" formats
|
||||
base_mapping = mapping.split(":")[0]
|
||||
assert base_mapping in valid_mappings, \
|
||||
f"Invalid map directory: {base_mapping}"
|
||||
|
||||
print("✓ Add-on configuration validation passed")
|
||||
print(f" Name: {cfg['name']}")
|
||||
print(f" Version: {cfg['version']}")
|
||||
print(f" Slug: {cfg['slug']}")
|
||||
print(f" Architectures: {', '.join(cfg['arch'])}")
|
||||
if "startup" in cfg:
|
||||
print(f" Startup: {cfg['startup']}")
|
||||
if cfg.get("ingress"):
|
||||
print(f" Ingress: enabled on port {cfg['ingress_port']}")
|
||||
|
||||
def test_ingress_configuration_consistent(self):
|
||||
"""If ingress is enabled, ensure port configuration is correct."""
|
||||
cfg_path = self.root / "config.yaml"
|
||||
with open(cfg_path) as f:
|
||||
cfg = yaml.safe_load(f)
|
||||
|
||||
if not cfg.get("ingress"):
|
||||
pytest.skip("Ingress not enabled")
|
||||
|
||||
# If ingress is enabled, check configuration
|
||||
assert "ingress_port" in cfg, "ingress_port must be specified when ingress is enabled"
|
||||
|
||||
ingress_port = cfg["ingress_port"]
|
||||
|
||||
# The ingress port should NOT be in the ports section
|
||||
ports = cfg.get("ports", {})
|
||||
port_key = f"{ingress_port}/tcp"
|
||||
|
||||
if port_key in ports:
|
||||
pytest.fail(
|
||||
f"Port {ingress_port} is used for ingress but also listed in 'ports' section. "
|
||||
f"Remove it from 'ports' to avoid conflicts."
|
||||
)
|
||||
|
||||
print(f"✓ Ingress configuration valid (port {ingress_port})")
|
||||
Reference in New Issue
Block a user