mirror of
https://github.com/dominikhoebert/docker-projects.git
synced 2025-07-21 20:47:05 +00:00
Füge vollständige Konfigurationsdateien für Grafana Alloy, Loki, Prometheus und deren Setup hinzu
This commit is contained in:
parent
c2d28fcc12
commit
ab4c646cc7
22
alloy-stack/README.md
Normal file
22
alloy-stack/README.md
Normal file
@ -0,0 +1,22 @@
|
||||
# Grafana-Alloy System Monitoring Stack
|
||||
|
||||
- Alloy: Metrics and Logs collection, pushes to prometheus and loki
|
||||
- Loki: Logs DB
|
||||
- Prometheus: Metrics DB
|
||||
- Grafana: Data Visualisation
|
||||
|
||||
## Setup
|
||||
|
||||
1. Run compose
|
||||
2. Open [Grafana Dashboard](http://localhost:3000)
|
||||
3. Add new prometheus connection: Connections -> Add new Datasource -> search for "prometheus" -> http://prometheus:9090 -> Test & Save
|
||||
4. Same for Loki -> http://loki:3100
|
||||
|
||||
Use Drilldown to look through your data
|
||||
|
||||
## Links
|
||||
|
||||
- https://grafana.com/docs/alloy/latest/
|
||||
- https://www.youtube.com/watch?v=E654LPrkCjo
|
||||
- https://github.com/ChristianLempa/boilerplates/tree/main/docker-compose/alloy
|
||||
- https://github.com/grafana/alloy-scenarios
|
65
alloy-stack/compose.yml
Normal file
65
alloy-stack/compose.yml
Normal file
@ -0,0 +1,65 @@
|
||||
name: alloy-stack
|
||||
|
||||
services:
|
||||
alloy:
|
||||
image: grafana/alloy:v1.9.2
|
||||
container_name: alloy
|
||||
command:
|
||||
- run
|
||||
- --server.http.listen-addr=0.0.0.0:12345
|
||||
- --storage.path=/var/lib/alloy/data
|
||||
- /etc/alloy/config.alloy
|
||||
ports:
|
||||
- 12345:12345
|
||||
volumes:
|
||||
- ./config.alloy:/etc/alloy/config.alloy
|
||||
- alloy_data:/var/lib/alloy/data
|
||||
- /:/rootfs:ro
|
||||
- /run:/run:ro
|
||||
- /var/log:/var/log:ro
|
||||
- /sys:/sys:ro
|
||||
- /var/lib/docker/:/var/lib/docker/:ro
|
||||
- /run/udev/data:/run/udev/data:ro
|
||||
restart: unless-stopped
|
||||
grafana:
|
||||
container_name: grafana
|
||||
image: grafana/grafana
|
||||
environment:
|
||||
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
|
||||
- GF_AUTH_ANONYMOUS_ENABLED=true
|
||||
- GF_AUTH_BASIC_ENABLED=false
|
||||
ports:
|
||||
- 3000:3000/tcp
|
||||
volumes:
|
||||
- ./grafana:/etc/grafana/provisioning
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
restart: unless-stopped
|
||||
loki:
|
||||
container_name: loki
|
||||
image: docker.io/grafana/loki
|
||||
command: "-config.file=/etc/loki/config.yaml"
|
||||
ports:
|
||||
- 3100:3100
|
||||
volumes:
|
||||
- ./loki/config.yaml:/etc/loki/config.yaml:ro
|
||||
- data_loki:/loki:rw
|
||||
restart: unless-stopped
|
||||
prometheus:
|
||||
image: docker.io/prom/prometheus
|
||||
container_name: prometheus
|
||||
ports:
|
||||
- 9090:9090
|
||||
command: "--config.file=/etc/prometheus/prometheus.yaml --web.enable-remote-write-receiver"
|
||||
volumes:
|
||||
- ./prometheus/prometheus.yaml:/etc/prometheus/prometheus.yaml:ro
|
||||
- prometheus-data:/prometheus
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
alloy_data:
|
||||
driver: local
|
||||
data_loki:
|
||||
driver: local
|
||||
prometheus-data:
|
||||
driver: local
|
228
alloy-stack/config.alloy
Normal file
228
alloy-stack/config.alloy
Normal file
@ -0,0 +1,228 @@
|
||||
/* Grafana Alloy Configuration Examples
|
||||
* ---
|
||||
* LINK: For more details, visit https://github.com/grafana/alloy-scenarios
|
||||
*/
|
||||
|
||||
// SECTION: TARGETS
|
||||
|
||||
loki.write "default" {
|
||||
endpoint {
|
||||
url = "http://loki:3100/loki/api/v1/push"
|
||||
}
|
||||
external_labels = {}
|
||||
}
|
||||
|
||||
prometheus.remote_write "default" {
|
||||
endpoint {
|
||||
url = "http://prometheus:9090/api/v1/write"
|
||||
}
|
||||
}
|
||||
|
||||
// !SECTION
|
||||
|
||||
// SECTION: SYSTEM LOGS & JOURNAL
|
||||
|
||||
|
||||
local.file_match "system" {
|
||||
path_targets = [{
|
||||
__address__ = "localhost",
|
||||
__path__ = "/var/log/{syslog,messages,*.log}",
|
||||
instance = constants.hostname,
|
||||
job = string.format("%s-logs", constants.hostname),
|
||||
}]
|
||||
}
|
||||
|
||||
discovery.relabel "journal" {
|
||||
targets = []
|
||||
rule {
|
||||
source_labels = ["__journal__systemd_unit"]
|
||||
target_label = "unit"
|
||||
}
|
||||
rule {
|
||||
source_labels = ["__journal__boot_id"]
|
||||
target_label = "boot_id"
|
||||
}
|
||||
rule {
|
||||
source_labels = ["__journal__transport"]
|
||||
target_label = "transport"
|
||||
}
|
||||
rule {
|
||||
source_labels = ["__journal_priority_keyword"]
|
||||
target_label = "level"
|
||||
}
|
||||
}
|
||||
|
||||
loki.source.file "system" {
|
||||
targets = local.file_match.system.targets
|
||||
forward_to = [loki.write.default.receiver]
|
||||
}
|
||||
|
||||
// !SECTION
|
||||
|
||||
// SECTION: SYSTEM METRICS
|
||||
|
||||
discovery.relabel "metrics" {
|
||||
targets = prometheus.exporter.unix.metrics.targets
|
||||
rule {
|
||||
target_label = "instance"
|
||||
replacement = constants.hostname
|
||||
}
|
||||
rule {
|
||||
target_label = "job"
|
||||
replacement = string.format("%s-metrics", constants.hostname)
|
||||
}
|
||||
}
|
||||
|
||||
prometheus.exporter.unix "metrics" {
|
||||
disable_collectors = ["ipvs", "btrfs", "infiniband", "xfs", "zfs"]
|
||||
enable_collectors = ["meminfo"]
|
||||
filesystem {
|
||||
fs_types_exclude = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|tmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
|
||||
mount_points_exclude = "^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/)"
|
||||
mount_timeout = "5s"
|
||||
}
|
||||
netclass {
|
||||
ignored_devices = "^(veth.*|cali.*|[a-f0-9]{15})$"
|
||||
}
|
||||
netdev {
|
||||
device_exclude = "^(veth.*|cali.*|[a-f0-9]{15})$"
|
||||
}
|
||||
}
|
||||
|
||||
prometheus.scrape "metrics" {
|
||||
scrape_interval = "15s"
|
||||
targets = discovery.relabel.metrics.output
|
||||
forward_to = [prometheus.remote_write.default.receiver]
|
||||
}
|
||||
|
||||
// !SECTION
|
||||
|
||||
// SECTION: DOCKER METRICS
|
||||
|
||||
prometheus.exporter.cadvisor "dockermetrics" {
|
||||
docker_host = "unix:///var/run/docker.sock"
|
||||
storage_duration = "5m"
|
||||
}
|
||||
|
||||
prometheus.scrape "dockermetrics" {
|
||||
targets = prometheus.exporter.cadvisor.dockermetrics.targets
|
||||
forward_to = [ prometheus.remote_write.default.receiver ]
|
||||
scrape_interval = "10s"
|
||||
}
|
||||
|
||||
//!SECTION
|
||||
|
||||
// SECTION: DOCKER LOGS
|
||||
|
||||
discovery.docker "dockerlogs" {
|
||||
host = "unix:///var/run/docker.sock"
|
||||
}
|
||||
|
||||
discovery.relabel "dockerlogs" {
|
||||
targets = []
|
||||
|
||||
rule {
|
||||
source_labels = ["__meta_docker_container_name"]
|
||||
regex = "/(.*)"
|
||||
target_label = "service_name"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
loki.source.docker "default" {
|
||||
host = "unix:///var/run/docker.sock"
|
||||
targets = discovery.docker.dockerlogs.targets
|
||||
labels = {"platform" = "docker"}
|
||||
relabel_rules = discovery.relabel.dockerlogs.rules
|
||||
forward_to = [loki.write.default.receiver]
|
||||
}
|
||||
|
||||
// !SECTION
|
||||
|
||||
// ####################################
|
||||
// Windows Server Metrics Configuration
|
||||
// ####################################
|
||||
|
||||
prometheus.exporter.windows "default" {
|
||||
enabled_collectors = ["cpu","cs","logical_disk","net","os","service","system", "memory", "scheduled_task", "tcp"]
|
||||
}
|
||||
|
||||
// Configure a prometheus.scrape component to collect windows metrics.
|
||||
prometheus.scrape "example" {
|
||||
targets = prometheus.exporter.windows.default.targets
|
||||
forward_to = [prometheus.remote_write.demo.receiver]
|
||||
}
|
||||
|
||||
prometheus.remote_write "demo" {
|
||||
endpoint {
|
||||
url = "http://prometheus:9090/api/v1/write"
|
||||
}
|
||||
}
|
||||
|
||||
// ####################################
|
||||
// Windows Server Logs Configuration
|
||||
// ####################################
|
||||
|
||||
loki.source.windowsevent "application" {
|
||||
eventlog_name = "Application"
|
||||
use_incoming_timestamp = true
|
||||
forward_to = [loki.process.endpoint.receiver]
|
||||
}
|
||||
|
||||
loki.source.windowsevent "System" {
|
||||
eventlog_name = "System"
|
||||
use_incoming_timestamp = true
|
||||
forward_to = [loki.process.endpoint.receiver]
|
||||
}
|
||||
|
||||
loki.process "endpoint" {
|
||||
forward_to = [loki.write.endpoint.receiver]
|
||||
stage.json {
|
||||
expressions = {
|
||||
message = "",
|
||||
Overwritten = "",
|
||||
source = "",
|
||||
computer = "",
|
||||
eventRecordID = "",
|
||||
channel = "",
|
||||
component_id = "",
|
||||
execution_processId = "",
|
||||
execution_processName = "",
|
||||
}
|
||||
}
|
||||
|
||||
stage.structured_metadata {
|
||||
values = {
|
||||
"eventRecordID" = "",
|
||||
"channel" = "",
|
||||
"component_id" = "",
|
||||
"execution_processId" = "",
|
||||
"execution_processName" = "",
|
||||
}
|
||||
}
|
||||
|
||||
stage.eventlogmessage {
|
||||
source = "message"
|
||||
overwrite_existing = true
|
||||
}
|
||||
|
||||
stage.labels {
|
||||
values = {
|
||||
"service_name" = "source",
|
||||
}
|
||||
}
|
||||
|
||||
stage.output {
|
||||
source = "message"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
loki.write "endpoint" {
|
||||
endpoint {
|
||||
url ="http://loki:3100/loki/api/v1/push"
|
||||
}
|
||||
}
|
||||
|
||||
livedebugging{}
|
30
alloy-stack/loki/config.yaml
Normal file
30
alloy-stack/loki/config.yaml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
auth_enabled: false
|
||||
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
|
||||
common:
|
||||
instance_addr: 127.0.0.1
|
||||
path_prefix: /loki
|
||||
storage:
|
||||
filesystem:
|
||||
chunks_directory: /loki/chunks
|
||||
rules_directory: /loki/rules
|
||||
replication_factor: 1
|
||||
ring:
|
||||
kvstore:
|
||||
store: inmemory
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
- from: 2020-10-24
|
||||
store: tsdb
|
||||
object_store: filesystem
|
||||
schema: v13
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
|
||||
ruler:
|
||||
alertmanager_url: http://localhost:9093
|
27
alloy-stack/prometheus/prometheus.yaml
Normal file
27
alloy-stack/prometheus/prometheus.yaml
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
global:
|
||||
scrape_interval: 15s # By default, scrape targets every 15 seconds.
|
||||
|
||||
# Attach these labels to any time series or alerts when communicating with
|
||||
# external systems (federation, remote storage, Alertmanager).
|
||||
# external_labels:
|
||||
# monitor: 'codelab-monitor'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||
- job_name: "prometheus"
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
static_configs:
|
||||
- targets: ["localhost:9090"]
|
||||
# Example job for node_exporter
|
||||
# - job_name: 'node_exporter'
|
||||
# static_configs:
|
||||
# - targets: ['node_exporter:9100']
|
||||
|
||||
# Example job for cadvisor
|
||||
# - job_name: 'cadvisor'
|
||||
# static_configs:
|
||||
# - targets: ['cadvisor:8080']
|
Loading…
x
Reference in New Issue
Block a user