chore: backup infrastructure configurations [skip ci]

This commit is contained in:
github-actions[bot]
2026-02-08 02:08:34 +00:00
parent 8814f0bdc9
commit 828fa9d62d
36 changed files with 356 additions and 96 deletions

View File

@@ -0,0 +1,22 @@
job "check-firewall" {
datacenters = ["dc1"]
type = "batch"
group "check" {
count = 1
constraint {
attribute = "${attr.unique.hostname}"
value = "odroid7"
}
task "check" {
driver = "docker"
config {
image = "busybox"
network_mode = "host"
command = "sh"
args = ["-c", "nc -zv 127.0.0.1 20202 && echo 'Listening'"]
}
}
}
}

View File

@@ -0,0 +1,25 @@
job "cleanup-litefs-all" {
datacenters = ["dc1"]
type = "batch"
group "cleanup" {
count = 4
constraint {
attribute = "${attr.unique.hostname}"
operator = "regexp"
value = "odroid6|odroid7|odroid8|opti1"
}
task "clean" {
driver = "docker"
config {
image = "busybox"
volumes = [
"/mnt/configs/navidrome_litefs:/mnt/data"
]
command = "sh"
args = ["-c", "rm -rf /mnt/data/* && echo \"Cleaned $(hostname)\""]
}
}
}
}

View File

@@ -0,0 +1,26 @@
job "host-check" {
datacenters = ["dc1"]
type = "batch"
constraint {
attribute = "${attr.unique.hostname}"
value = "odroid7"
}
group "check" {
task "ss" {
driver = "raw_exec"
config {
command = "ss"
args = ["-tln"]
}
}
task "ufw" {
driver = "raw_exec"
config {
command = "ufw"
args = ["status"]
}
}
}
}

View File

@@ -31,7 +31,8 @@ job "jfs-controller" {
}
env {
POD_NAME = "csi-controller"
POD_NAMESPACE = "default"
}
}
}
}
}

View File

@@ -3,25 +3,24 @@ job "jfs-node" {
type = "system"
group "nodes" {
network {
network {
port "metrics" {
static = 9567
to = 8080
}
}
# Add this inside your "juicefs-plugin" task in jfs-node.nomad
service {
name = "juicefs-metrics"
port = "metrics" # References the static port 9567 defined in your network block
tags = ["prometheus"]
check {
type = "http"
path = "/metrics"
interval = "10s"
timeout = "2s"
}
}
service {
name = "juicefs-metrics"
port = "metrics"
tags = ["prometheus"]
check {
type = "http"
path = "/metrics"
interval = "10s"
timeout = "2s"
}
}
task "juicefs-plugin" {
driver = "docker"
@@ -29,12 +28,12 @@ service {
config {
image = "juicedata/juicefs-csi-driver:v0.31.1"
memory_hard_limit = 2048
ports = ["metrics"]
ports = ["metrics"]
args = [
"--endpoint=unix://csi/csi.sock",
"--logtostderr",
"--v=5",
"--nodeid=${node.unique.name}", # Better than hardcoded "test"
"--nodeid=${node.unique.name}",
"--by-process=true",
]
@@ -53,7 +52,7 @@ ports = ["metrics"]
}
env {
POD_NAME = "csi-node"
AWS_REGION = "garage"
POD_NAMESPACE = "default"
# Aggregates metrics from children onto the 8080 port
JFS_METRICS = "0.0.0.0:8080"
# Ensures mounts run as background processes managed by the driver
@@ -61,4 +60,4 @@ ports = ["metrics"]
}
}
}
}
}

View File

@@ -0,0 +1,108 @@
job "navidrome-litefs" {
datacenters = ["dc1"]
type = "service"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
group "navidrome" {
count = 2
update {
max_parallel = 1
min_healthy_time = "30s"
healthy_deadline = "5m"
auto_revert = false
}
constraint {
distinct_hosts = true
}
network {
# Request static ports on the host
port "http" {
static = 4533
to = 4533 # Direct to Navidrome
}
port "litefs" {
static = 20202
to = 20202 # Maps host 20202 to container 20202 (LiteFS Replication)
}
port "health" {
static = 8082
to = 8082 # Handled by nc loop in litefs.yml exec
}
}
task "navidrome" {
driver = "docker"
config {
image = "gitea.service.dc1.fbleagh.duckdns.org/sstent/navidrome-litefs:latest"
privileged = true # Still needed for FUSE
ports = ["http", "litefs", "health"]
volumes = [
"/mnt/configs/navidrome_litefs:/var/lib/litefs",
"/mnt/Public/configs/navidrome:/shared_data",
"/mnt/Public/Downloads/Clean_Music:/music/CleanMusic:ro",
"/mnt/Public/Downloads/news/slskd/downloads:/music/slskd:ro",
"/mnt/Public/Downloads/incoming_music:/music/incomingmusic:ro"
]
}
env {
# LiteFS Config
CONSUL_URL = "http://${attr.unique.network.ip-address}:8500"
ADVERTISE_IP = "${attr.unique.network.ip-address}"
PORT = "8080" # Internal proxy port (unused by direct routing but kept for image compat)
# Navidrome Config
ND_DATAFOLDER = "/local/data"
ND_CACHEFOLDER = "/shared_data/cache"
ND_CONFIGFILE = "/local/data/navidrome.toml"
# Database is on the LiteFS FUSE mount
ND_DBPATH = "/data/navidrome.db?_busy_timeout=30000&_journal_mode=WAL&_foreign_keys=on&synchronous=NORMAL"
ND_SCANSCHEDULE = "0"
ND_SCANNER_FSWATCHER_ENABLED = "false"
ND_LOGLEVEL = "info"
ND_REVERSEPROXYWHITELIST = "0.0.0.0/0"
ND_REVERSEPROXYUSERHEADER = "X-Forwarded-User"
}
service {
name = "navidrome"
tags = [
"navidrome",
"web",
"traefik.enable=true",
"urlprefix-/navidrome",
"tools",
"traefik.http.routers.navidromelan.rule=Host(`navidrome.service.dc1.consul`)",
"traefik.http.routers.navidromewan.rule=Host(`m.fbleagh.duckdns.org`)",
"traefik.http.routers.navidromewan.middlewares=dex@consulcatalog",
"traefik.http.routers.navidromewan.tls=true",
]
port = "http"
check {
type = "http"
port = "health"
path = "/"
interval = "10s"
timeout = "2s"
}
}
resources {
cpu = 500
memory = 512
}
}
}
}

View File

@@ -0,0 +1,20 @@
job "nomad-config-check" {
datacenters = ["dc1"]
type = "batch"
group "check" {
count = 1
constraint {
attribute = "${attr.unique.hostname}"
value = "odroid7"
}
task "config" {
driver = "raw_exec"
config {
command = "grep"
args = ["-r", "disable_script_checks", "/etc/nomad.d/"]
}
}
}
}

View File

@@ -0,0 +1,34 @@
job "port-discovery" {
datacenters = ["dc1"]
type = "batch"
group "scan" {
count = 1
constraint {
attribute = "${attr.unique.hostname}"
value = "odroid6"
}
task "scan" {
driver = "docker"
config {
image = "busybox"
network_mode = "host"
command = "sh"
args = ["local/scan.sh"]
}
template {
data = <<EOF
#!/bin/sh
TARGET="192.168.4.227"
for p in 8085 8086 8087; do
echo "Testing $p..."
nc -zv -w 3 $TARGET $p 2>&1 | grep -q "refused" && echo "MATCH: $p is AVAILABLE (Refused)"
nc -zv -w 3 $TARGET $p 2>&1 | grep -q "succeeded" && echo "BUSY: $p is IN USE"
done
EOF
destination = "local/scan.sh"
}
}
}
}