chore: backup infrastructure configurations [skip ci]

This commit is contained in:
github-actions[bot]
2025-11-22 17:50:56 +00:00
parent a2d70e1d5d
commit 2b7e5ef040
6 changed files with 414 additions and 2 deletions

View File

@@ -0,0 +1,8 @@
{
"backup_timestamp": "2025-11-22T17:50:56.601241",
"total_keys": 0,
"successful_backups": 0,
"failed_backups": 0,
"consul_address": "http://192.168.4.250:4646",
"status": "empty_kv_store"
}

128
nomad_backup/grafana.hcl Normal file
View File

@@ -0,0 +1,128 @@
job "grafana" {
# region = "global"
datacenters = ["dc1"]
type = "service"
# priority = 50
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
constraint {
attribute = "${attr.unique.hostname}"
operator = "regexp"
value = "odroid.*"
}
update {
# Stagger updates every 60 seconds
stagger = "60s"
max_parallel = 1
}
group "grafana" {
count = 1
restart {
attempts = 2
interval = "1m"
delay = "10s"
mode = "delay"
}
task "grafana" {
driver = "docker"
config {
// image = "fg2it/grafana-armhf:v5.1.4"
image = "grafana/grafana:latest"
ports = ["http"]
logging {
type = "json-file"
}
memory_hard_limit = 2048
}
env {
disable_login_form = "EST5EDT"
PUID = 1000
PGID = 1000
GF_PATHS_PROVISIONING = "/local/"
GF_AUTH_ANONYMOUS_ENABLED = true
GF_AUTH_ANONYMOUS_ORG_NAME = "Main Org."
GF_AUTH_ANONYMOUS_ORG_ROLE = "Admin"
}
template {
data = <<EOH
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus.service.dc1.consul:9090
isDefault: true
- name: Loki
type: loki
access: proxy
url: http://{{ range service "loki" }}{{ .Address }}:{{ .Port }}{{ end }}
isDefault: false
editable: true
jsonData:
maxLines: 1000
EOH
destination = "local/datasources/prometheus.yaml"
}
template {
data = <<EOH
apiVersion: 1
providers:
- name: dashboards
type: file
updateIntervalSeconds: 30
options:
path: /local/dashboard_definitons
foldersFromFilesStructure: true
EOH
destination = "local/dashboards/dashboards.yaml"
}
template {
data = "{{ key \"grafana_dashboards/nomad\" }}"
destination = "local/dashboard_definitons/nomad.json"
}
template {
data = "{{ key \"grafana_dashboards/thermals\" }}"
destination = "local/dashboard_definitons/thermals.json"
}
template {
data = "{{ key \"grafana_dashboards/NomadMem\" }}"
destination = "local/dashboard_definitons/NomadMem.json"
}
service {
name = "${TASKGROUP}"
tags = ["global", "backend"]
port = "http"
check {
name = "alive"
type = "http"
interval = "60s"
timeout = "120s"
path = "/login"
port = "http"
check_restart {
limit = 3
grace = "120s"
ignore_warnings = false
}
}
}
resources {
cpu = 128 # 500 MHz
memory = 64 # 128MB
}
# Specify configuration related to log rotation
logs {
max_files = 10
max_file_size = 15
}
kill_timeout = "10s"
}
network {
port "http" {
static = 3100
to = 3000
}
}
}
}

97
nomad_backup/loki.hcl Normal file
View File

@@ -0,0 +1,97 @@
job "loki" {
datacenters = ["dc1"]
type = "service"
group "loki" {
count = 1
network {
port "http" {
static = 3100
}
port "grpc" {
static = 9096
}
}
task "loki" {
driver = "docker"
config {
image = "grafana/loki:2.9.3"
ports = ["http", "grpc"]
args = [
"-config.file=/local/loki-config.yml",
]
}
template {
data = <<EOH
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
common:
path_prefix: /alloc/data/loki
storage:
filesystem:
chunks_directory: /alloc/data/loki/chunks
rules_directory: /alloc/data/loki/rules
replication_factor: 1
ring:
instance_addr: 127.0.0.1
kvstore:
store: inmemory
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
limits_config:
retention_period: 168h # 7 days retention
ingestion_rate_mb: 10
ingestion_burst_size_mb: 20
compactor:
working_directory: /alloc/data/loki/compactor
shared_store: filesystem
ruler:
alertmanager_url: http://localhost:9093
EOH
destination = "local/loki-config.yml"
}
resources {
cpu = 500
memory = 512
}
service {
name = "loki"
port = "http"
tags = [
"logging",
]
check {
type = "http"
path = "/ready"
interval = "10s"
timeout = "2s"
}
}
}
}
}

149
nomad_backup/promtail.hcl Normal file
View File

@@ -0,0 +1,149 @@
job "promtail" {
datacenters = ["dc1"]
type = "system" # Runs on every node
group "promtail" {
network {
port "http" {
static = 9080
}
}
task "promtail" {
driver = "docker"
config {
image = "grafana/promtail:2.9.3"
ports = ["http"]
args = [
"-config.file=/local/promtail-config.yml",
]
# Mount the Docker socket, journal, and machine-id for systemd logs
volumes = [
"/var/lib/docker/containers:/var/lib/docker/containers:ro",
"/var/log/journal:/var/log/journal:ro",
"/run/log/journal:/run/log/journal:ro",
"/etc/machine-id:/etc/machine-id:ro",
"/var/run/docker.sock:/var/run/docker.sock:ro",
]
# Run as root to access Docker socket
privileged = true
}
env {
HOSTNAME = "${node.unique.name}"
}
template {
data = <<EOH
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /alloc/data/positions.yaml
clients:
- url: http://{{ range service "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
scrape_configs:
# Scrape Nomad server logs from journald
- job_name: nomad-server
journal:
json: false
max_age: 12h
path: /var/log/journal
labels:
job: nomad-server
component: server
host: {{ env "HOSTNAME" }}
relabel_configs:
- source_labels: ['__journal__systemd_unit']
regex: 'nomad.service'
action: keep
- source_labels: ['__journal__hostname']
target_label: 'journal_host'
- source_labels: ['__journal__systemd_unit']
target_label: 'unit'
# Scrape Nomad allocation logs
- job_name: nomad-alloc-logs
static_configs:
- targets:
- localhost
labels:
job: nomad-logs
host: {{ env "HOSTNAME" }}
__path__: /alloc/logs/*
pipeline_stages:
- match:
selector: '{job="nomad-logs"}'
stages:
- drop:
expression: '.*\.promtail\.(stdout|stderr)\.fifo.*'
# Scrape Docker container logs
- job_name: docker
docker_sd_configs:
- host: unix:///var/run/docker.sock
refresh_interval: 5s
relabel_configs:
# Add hostname label
- replacement: {{ env "HOSTNAME" }}
target_label: 'host'
# Extract container name
- source_labels: ['__meta_docker_container_name']
regex: '/(.*)'
target_label: 'container'
# Extract Nomad job name if available
- source_labels: ['__meta_docker_container_label_com_hashicorp_nomad_job_name']
target_label: 'nomad_job'
# Extract Nomad task name if available
- source_labels: ['__meta_docker_container_label_com_hashicorp_nomad_task_name']
target_label: 'nomad_task'
# Extract Nomad namespace if available
- source_labels: ['__meta_docker_container_label_com_hashicorp_nomad_namespace']
target_label: 'nomad_namespace'
# Set the log path
- source_labels: ['__meta_docker_container_id']
target_label: '__path__'
replacement: '/var/lib/docker/containers/$1/*.log'
EOH
destination = "local/promtail-config.yml"
change_mode = "restart"
}
resources {
cpu = 200
memory = 128
}
service {
name = "promtail"
port = "http"
tags = [
"logging",
]
check {
type = "http"
path = "/ready"
interval = "10s"
timeout = "2s"
}
}
}
}
}

View File

@@ -35,7 +35,7 @@ task "flaresolver" {
config { config {
image = "ghcr.io/flaresolverr/flaresolverr:latest" image = "ghcr.io/flaresolverr/flaresolverr:latest"
ports = ["http_flare"] ports = ["http_flare"]
dns_servers = ["192.168.1.1", "1.1.1.1"] dns_servers = ["192.168.4.250", "1.1.1.1"]
memory_hard_limit = "2048" memory_hard_limit = "2048"
force_pull = false force_pull = false
} }
@@ -51,7 +51,7 @@ task "flaresolver" {
config { config {
image = "ghcr.io/linuxserver/prowlarr:develop" image = "ghcr.io/linuxserver/prowlarr:develop"
ports = ["http"] ports = ["http"]
dns_servers = ["192.168.1.1", "1.1.1.1"] dns_servers = ["192.168.4.250", "1.1.1.1"]
memory_hard_limit = "2048" memory_hard_limit = "2048"
volumes = [ volumes = [
"/mnt/Public/configs/prowlarr_pg:/config", "/mnt/Public/configs/prowlarr_pg:/config",

View File

@@ -0,0 +1,30 @@
job "qbitcheck" {
datacenters = ["dc1"]
type = "service"
group "qbitcheck" {
count = 1
task "qbitcheck" {
driver = "docker"
config {
image = "gitea.service.dc1.fbleagh.duckdns.org/sstent/qbitcheck:latest"
force_pull = true
}
resources {
cpu = 100 # 100 MHz
memory = 128 # 128 MB
}
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
}
}
}