Add configuration files and compose setups for Grafana Alloy, Backrest, Loki, and Prometheus

This commit is contained in:
dominikhoebert 2025-07-09 15:48:05 +02:00
parent f43daa15b1
commit c2d28fcc12
8 changed files with 344 additions and 1 deletions

5
.gitignore vendored
View File

@ -75,4 +75,7 @@ watchyourlan/data/
puter/config/ puter/config/
puter/data/ puter/data/
postgis/shared/ postgis/shared/
postgis/data/ postgis/data/
backrest/backrest/
backrest/backup/
backrest/repos/

39
alloy/compose.yml Normal file
View File

@ -0,0 +1,39 @@
#
#
name: alloy
services:
alloy:
image: grafana/alloy:v1.9.2
container_name: alloy
command:
- run
- --server.http.listen-addr=0.0.0.0:12345
- --storage.path=/var/lib/alloy/data
- /etc/alloy/config.alloy
ports:
- "12345:12345"
volumes:
- ./config.alloy:/etc/alloy/config.alloy
- alloy_data:/var/lib/alloy/data
- /:/rootfs:ro
- /run:/run:ro
- /var/log:/var/log:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker/:ro
- /run/udev/data:/run/udev/data:ro
restart: unless-stopped
networks:
- proxy
volumes:
alloy_data:
driver: local
networks:
proxy:
external: true

140
alloy/config.alloy Normal file
View File

@ -0,0 +1,140 @@
/* Grafana Alloy Configuration Examples
* ---
* LINK: For more details, visit https://github.com/grafana/alloy-scenarios
*/
// SECTION: TARGETS
loki.write "default" {
endpoint {
url = "http://loki:3100/loki/api/v1/push"
}
external_labels = {}
}
prometheus.remote_write "default" {
endpoint {
url = "http://prometheus:9090/api/v1/write"
}
}
// !SECTION
// SECTION: SYSTEM LOGS & JOURNAL
local.file_match "system" {
path_targets = [{
__address__ = "localhost",
__path__ = "/var/log/{syslog,messages,*.log}",
instance = constants.hostname,
job = string.format("%s-logs", constants.hostname),
}]
}
discovery.relabel "journal" {
targets = []
rule {
source_labels = ["__journal__systemd_unit"]
target_label = "unit"
}
rule {
source_labels = ["__journal__boot_id"]
target_label = "boot_id"
}
rule {
source_labels = ["__journal__transport"]
target_label = "transport"
}
rule {
source_labels = ["__journal_priority_keyword"]
target_label = "level"
}
}
loki.source.file "system" {
targets = local.file_match.system.targets
forward_to = [loki.write.default.receiver]
}
// !SECTION
// SECTION: SYSTEM METRICS
discovery.relabel "metrics" {
targets = prometheus.exporter.unix.metrics.targets
rule {
target_label = "instance"
replacement = constants.hostname
}
rule {
target_label = "job"
replacement = string.format("%s-metrics", constants.hostname)
}
}
prometheus.exporter.unix "metrics" {
disable_collectors = ["ipvs", "btrfs", "infiniband", "xfs", "zfs"]
enable_collectors = ["meminfo"]
filesystem {
fs_types_exclude = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|tmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
mount_points_exclude = "^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/)"
mount_timeout = "5s"
}
netclass {
ignored_devices = "^(veth.*|cali.*|[a-f0-9]{15})$"
}
netdev {
device_exclude = "^(veth.*|cali.*|[a-f0-9]{15})$"
}
}
prometheus.scrape "metrics" {
scrape_interval = "15s"
targets = discovery.relabel.metrics.output
forward_to = [prometheus.remote_write.default.receiver]
}
// !SECTION
// SECTION: DOCKER METRICS
prometheus.exporter.cadvisor "dockermetrics" {
docker_host = "unix:///var/run/docker.sock"
storage_duration = "5m"
}
prometheus.scrape "dockermetrics" {
targets = prometheus.exporter.cadvisor.dockermetrics.targets
forward_to = [ prometheus.remote_write.default.receiver ]
scrape_interval = "10s"
}
//!SECTION
// SECTION: DOCKER LOGS
discovery.docker "dockerlogs" {
host = "unix:///var/run/docker.sock"
}
discovery.relabel "dockerlogs" {
targets = []
rule {
source_labels = ["__meta_docker_container_name"]
regex = "/(.*)"
target_label = "service_name"
}
}
loki.source.docker "default" {
host = "unix:///var/run/docker.sock"
targets = discovery.docker.dockerlogs.targets
labels = {"platform" = "docker"}
relabel_rules = discovery.relabel.dockerlogs.rules
forward_to = [loki.write.default.receiver]
}
// !SECTION

33
backrest/compose.yml Normal file
View File

@ -0,0 +1,33 @@
#
# https://github.com/garethgeorge/backrest
name: backrest
services:
backrest:
image: garethgeorge/backrest:latest
container_name: backrest
volumes:
- ./backrest/data:/data
- ./backrest/config:/config
- ./backrest/cache:/cache
- ./backrest/tmp:/tmp
- ./backup/data:/userdata # Mount local paths to backup
- ./repos:/repos # Mount local repos (optional for remote storage)
environment:
- BACKREST_DATA=/data
- BACKREST_CONFIG=/config/config.json
- XDG_CACHE_HOME=/cache
- TMPDIR=/tmp
- TZ=Europe/Vienna
ports:
- "9898:9898"
restart: unless-stopped
networks:
- proxy
networks:
proxy:
external: true

47
loki/compose.yml Normal file
View File

@ -0,0 +1,47 @@
name: loki
services:
loki:
container_name: loki
image: docker.io/grafana/loki
command: "-config.file=/etc/loki/config.yaml"
ports:
# --> (Optional) Remove when using traefik...
- "3100:3100"
# <--
volumes:
- ./config/config.yaml:/etc/loki/config.yaml:ro
- data_loki:/loki:rw
# --> (Optional) When using traefik...
# labels:
# - traefik.enable=true
# # -- Traefik Services
# - traefik.http.services.loki.loadbalancer.server.port=3100
# # -- Traefik Routers
# - traefik.http.routers.loki.entrypoints=websecure
# - traefik.http.routers.loki.rule=Host(`loki-fqdn`)
# - traefik.http.routers.loki.tls=true
# - traefik.http.routers.loki.tls.certresolver=cloudflare
# - traefik.http.routers.loki.service=loki
# # -- (Optional) Authentication
# # - traefik.http.routers.loki.middlewares=authentik-middleware@file
# networks:
# - frontend
# <--
restart: unless-stopped
networks:
- proxy
networks:
proxy:
external: true
volumes:
data_loki:
driver: local
# --> (Optional) When using traefik...
# networks:
# frontend:
# external: true
# <--

30
loki/config/config.yaml Normal file
View File

@ -0,0 +1,30 @@
---
auth_enabled: false
server:
http_listen_port: 3100
common:
instance_addr: 127.0.0.1
path_prefix: /loki
storage:
filesystem:
chunks_directory: /loki/chunks
rules_directory: /loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
schema_config:
configs:
- from: 2020-10-24
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
ruler:
alertmanager_url: http://localhost:9093

24
prometheus/compose.yml Normal file
View File

@ -0,0 +1,24 @@
name: prometheus
services:
prometheus:
image: docker.io/prom/prometheus
container_name: prometheus
ports:
- 9090:9090
command: "--config.file=/etc/prometheus/prometheus.yaml --web.enable-remote-write-receiver"
volumes:
- ./config/prometheus.yaml:/etc/prometheus/prometheus.yaml:ro
- prometheus-data:/prometheus
restart: unless-stopped
networks:
- proxy
networks:
proxy:
external: true
volumes:
prometheus-data:
driver: local

View File

@ -0,0 +1,27 @@
---
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
# external_labels:
# monitor: 'codelab-monitor'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "prometheus"
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
static_configs:
- targets: ["localhost:9090"]
# Example job for node_exporter
# - job_name: 'node_exporter'
# static_configs:
# - targets: ['node_exporter:9100']
# Example job for cadvisor
# - job_name: 'cadvisor'
# static_configs:
# - targets: ['cadvisor:8080']