From 890c867763245ae5b88a610d75a1c32ad19c71a4 Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
Date: Fri, 21 Nov 2025 21:10:40 +0000
Subject: [PATCH] chore: backup Nomad jobs [skip ci]
---
nomad_backup/acme.hcl | 99 ++++++++
nomad_backup/auth.hcl | 89 ++++++++
nomad_backup/calendar-proxy.hcl | 228 +++++++++++++++++++
nomad_backup/foodplanner.hcl | 99 ++++++++
nomad_backup/garage.hcl | 169 ++++++++++++++
nomad_backup/gitea-act-runners.hcl | 314 ++++++++++++++++++++++++++
nomad_backup/gitea.hcl | 84 +++++++
nomad_backup/gonic.hcl | 150 ++++++++++++
nomad_backup/hass.hcl | 285 +++++++++++++++++++++++
nomad_backup/immich.hcl | 151 +++++++++++++
nomad_backup/miniflux.hcl | 74 ++++++
nomad_backup/minihass.hcl | 48 ++++
nomad_backup/navidrome.hcl | 117 ++++++++++
nomad_backup/nginx.hcl | 98 ++++++++
nomad_backup/nzbget.hcl | 305 +++++++++++++++++++++++++
nomad_backup/postgres-15.hcl | 116 ++++++++++
nomad_backup/prometheus.hcl | 195 ++++++++++++++++
nomad_backup/prowlarr.hcl | 138 ++++++++++++
nomad_backup/qautomate.hcl | 59 +++++
nomad_backup/qbittorrent.hcl | 351 +++++++++++++++++++++++++++++
nomad_backup/radarr.hcl | 156 +++++++++++++
nomad_backup/seaweedfsmaster.hcl | 177 +++++++++++++++
nomad_backup/slskd.hcl | 149 ++++++++++++
nomad_backup/sonarr.hcl | 158 +++++++++++++
nomad_backup/traefik.hcl | 226 +++++++++++++++++++
nomad_backup/trilium.hcl | 107 +++++++++
nomad_backup/vaultwarden.hcl | 230 +++++++++++++++++++
nomad_backup/wallabag.hcl | 82 +++++++
nomad_backup/wireguard.hcl | 65 ++++++
29 files changed, 4519 insertions(+)
create mode 100644 nomad_backup/acme.hcl
create mode 100644 nomad_backup/auth.hcl
create mode 100644 nomad_backup/calendar-proxy.hcl
create mode 100644 nomad_backup/foodplanner.hcl
create mode 100644 nomad_backup/garage.hcl
create mode 100644 nomad_backup/gitea-act-runners.hcl
create mode 100644 nomad_backup/gitea.hcl
create mode 100644 nomad_backup/gonic.hcl
create mode 100644 nomad_backup/hass.hcl
create mode 100644 nomad_backup/immich.hcl
create mode 100644 nomad_backup/miniflux.hcl
create mode 100644 nomad_backup/minihass.hcl
create mode 100644 nomad_backup/navidrome.hcl
create mode 100644 nomad_backup/nginx.hcl
create mode 100644 nomad_backup/nzbget.hcl
create mode 100644 nomad_backup/postgres-15.hcl
create mode 100644 nomad_backup/prometheus.hcl
create mode 100644 nomad_backup/prowlarr.hcl
create mode 100644 nomad_backup/qautomate.hcl
create mode 100644 nomad_backup/qbittorrent.hcl
create mode 100644 nomad_backup/radarr.hcl
create mode 100644 nomad_backup/seaweedfsmaster.hcl
create mode 100644 nomad_backup/slskd.hcl
create mode 100644 nomad_backup/sonarr.hcl
create mode 100644 nomad_backup/traefik.hcl
create mode 100644 nomad_backup/trilium.hcl
create mode 100644 nomad_backup/vaultwarden.hcl
create mode 100644 nomad_backup/wallabag.hcl
create mode 100644 nomad_backup/wireguard.hcl
diff --git a/nomad_backup/acme.hcl b/nomad_backup/acme.hcl
new file mode 100644
index 0000000..3429d95
--- /dev/null
+++ b/nomad_backup/acme.hcl
@@ -0,0 +1,99 @@
+job "acme" {
+ datacenters = ["dc1"]
+ type = "batch"
+
+ periodic {
+ cron = "@weekly"
+ prohibit_overlap = true
+ }
+
+ group "acme" {
+ restart {
+ attempts = 1
+ delay = "30s"
+ }
+ task "acme" {
+ driver = "docker"
+
+ config {
+ image = "neilpang/acme.sh"
+ entrypoint = ["/local/acme_wrapper.sh"]
+ volumes = [
+ "/mnt/configs/${NOMAD_GROUP_NAME}:/acme.sh",
+ ]
+ }
+ env {
+ DEDYN_TOKEN="JPwQpUiWGkKHSkmGEC4YAeGiGFki"
+ DuckDNS_Token="e4b5ca33-1f4d-494b-b06d-6dd4600df662"
+ CONSUL_URL="${attr.unique.network.ip-address}"
+
+ }
+
+ template {
+ change_mode = "noop"
+ data = "{{ key \"scripts/acme.sh\" }}"
+ destination = "local/acme_wrapper.sh"
+ perms = 0777
+ }
+
+
+
+ resources {
+ cpu = 256
+ memory = 64
+ }
+ }
+
+ task "init" {
+ driver = "docker"
+
+ lifecycle {
+ hook = "prestart"
+ sidecar = false
+ }
+
+ config {
+ image = "ghcr.io/sstent/rsync"
+ memory_hard_limit = 2048
+
+ volumes = [
+ "/mnt/configs/${NOMAD_GROUP_NAME}:/config",
+ "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
+ "/mnt/Public/config/locks:/locks",
+ ]
+
+ args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-avz","--delete-excluded","--exclude=renewal","--exclude='live'","--exclude='archive'","--exclude='keys'","--exclude='csr'", "/configbackup/", "/config/", "--delete-before"]
+ }
+ resources {
+ cpu = 20 # 500 MHz
+ memory = 20 # 128MB
+ }
+ } #end init task
+
+ task "finalsync" {
+ driver = "docker"
+
+ lifecycle {
+ hook = "poststop"
+ }
+
+ config {
+ // image = "pebalk/rsync"
+ image = "ghcr.io/sstent/rsync"
+ memory_hard_limit = 2048
+
+ volumes = [
+ "/mnt/configs/${NOMAD_GROUP_NAME}:/config",
+ "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
+ "/mnt/Public/config/locks:/locks",
+ ]
+
+ args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-av","--delete-excluded","--exclude=renewal","--exclude='live'","--exclude='archive'","--exclude='keys'","--exclude='csr'", "/config/", "/configbackup/","--remove-source-files"]
+ }
+ resources {
+ cpu = 20 # 500 MHz
+ memory = 20 # 128MB
+ }
+ } #end finalsync task
+ }
+}
\ No newline at end of file
diff --git a/nomad_backup/auth.hcl b/nomad_backup/auth.hcl
new file mode 100644
index 0000000..cb24977
--- /dev/null
+++ b/nomad_backup/auth.hcl
@@ -0,0 +1,89 @@
+job "auth" {
+ datacenters = ["dc1"]
+ type = "service"
+ constraint {
+ attribute = "${attr.kernel.name}"
+ value = "linux"
+ }
+
+ constraint {
+ attribute = "${attr.unique.hostname}"
+ operator = "regexp"
+ value = "odroid.*"
+ }
+ group "auth" {
+ count = 1
+
+ task "fwdauth" {
+ driver = "docker"
+
+ config {
+ // image = "npawelek/traefik-forward-auth"
+ image = "thomseddon/traefik-forward-auth:2-arm"
+
+ port_map {
+ auth = 4181
+ }
+
+ volumes = [
+ "/etc/localtime:/etc/localtime:ro",
+ ]
+ }
+
+ env {
+ PROVIDERS_GOOGLE_CLIENT_ID = "807888907647-uog95jmiolsuh6ql1t8jm53l1jvuajck.apps.googleusercontent.com"
+ PROVIDERS_GOOGLE_CLIENT_SECRET = "B8bDri5mFvGv-Ghzbt8fLj4W"
+ SECRET = "ladskfdjmqwermnnbasfnmldas"
+ CONFIG = "/local/config.ini"
+ LIFETIME = "31536000"
+ WHITELIST = "stuart.stent@gmail.com,stephen.bunt@gmail.com"
+
+ // AUTH_HOST = "fwdauth.fbleagh.duckdns.org"
+ COOKIE_DOMAIN = "fbleagh.duckdns.org"
+ }
+
+ template {
+ data = "{{ key \"Dex\" }}"
+ destination = "local/config.ini"
+ change_mode = "restart"
+ }
+
+ resources {
+ cpu = 100 # 100 MHz
+ memory = 64 # 128 MB
+
+ network {
+ port "auth" {
+ static = 4181
+ }
+ }
+ }
+
+ service {
+ name = "dex"
+
+ tags = [
+ "fwdauth",
+ "web",
+ "traefik.http.routers.dex.rule=Host(`fwdauth.fbleagh.duckdns.org`,`fwdauth.fbleagh.dedyn.io`)",
+ "traefik.http.routers.dex.entrypoints=websecure",
+ "traefik.http.routers.dex.tls=true",
+ // "traefik.http.routers.dex.tls.certresolver=myresolver",
+ "traefik.http.middlewares.dex.forwardauth.address=http://dex.service.dc1.consul:4181",
+ "traefik.http.middlewares.dex.forwardauth.trustForwardHeader=true",
+ "traefik.http.middlewares.dex.forwardauth.authResponseHeaders=X-Forwarded-User",
+ "traefik.http.routers.auth.middlewares=dex",
+ "traefik.http.routers.traefik-forward-auth.middlewares=dex",
+ ]
+
+ port = "auth"
+
+ check {
+ type = "tcp"
+ interval = "10s"
+ timeout = "2s"
+ }
+ }
+ } #end Dex
+ }
+}
diff --git a/nomad_backup/calendar-proxy.hcl b/nomad_backup/calendar-proxy.hcl
new file mode 100644
index 0000000..e35ff3d
--- /dev/null
+++ b/nomad_backup/calendar-proxy.hcl
@@ -0,0 +1,228 @@
+job "calendar-proxy" {
+ datacenters = ["dc1"]
+ type = "service"
+
+ group "web" {
+ count = 1
+
+ network {
+ port "http" {
+ to = 80
+ }
+ }
+
+ service {
+ name = "calendar-proxy"
+ port = "http"
+
+ tags = [
+ "traefik.enable=true",
+ "traefik.http.routers.calendar-proxy.rule=Host(`mail.fbleagh.duckdns.org`)",
+ "traefik.http.routers.calendar-proxy.entrypoints=web",
+ ]
+
+ check {
+ type = "http"
+ path = "/health"
+ interval = "10s"
+ timeout = "3s"
+ }
+ }
+
+ # Nginx container
+ task "nginx" {
+ driver = "docker"
+ lifecycle {
+ hook = "prestart"
+ sidecar = true
+ }
+
+ config {
+ image = "nginx:alpine"
+ ports = ["http"]
+
+ volumes = [
+ "local/nginx.conf:/etc/nginx/nginx.conf",
+ "local/app:/var/www/html",
+ ]
+ }
+
+ template {
+ data = < [
+ "header" => "User-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36",
+ ]
+]);
+if (($data = @file_get_contents($url, false, $context)) === false) {
+ $error = error_get_last();
+ http_response_code(500);
+ print "HTTP request failed: " . $error['message'];
+ exit;
+}
+header('Content-Type: text/calendar; charset=utf-8');
+print $data;
+?>
+EOF
+ destination = "local/app/ics.php"
+ }
+
+
+ resources {
+ cpu = 100
+ memory = 128
+ }
+ }
+
+ # PHP-FPM container
+ task "php-fpm" {
+ driver = "docker"
+
+ config {
+ image = "php:8.2-fpm-alpine"
+ network_mode = "container:nginx-${NOMAD_ALLOC_ID}"
+
+ volumes = [
+ "local/app:/var/www/html",
+ "local/php-fpm.conf:/usr/local/etc/php-fpm.d/www.conf",
+ ]
+ }
+
+ template {
+ data = < [
+ "header" => "User-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36",
+ ]
+]);
+if (($data = @file_get_contents($url, false, $context)) === false) {
+ $error = error_get_last();
+ http_response_code(500);
+ print "HTTP request failed: " . $error['message'];
+ exit;
+}
+header('Content-Type: text/calendar; charset=utf-8');
+print $data;
+?>
+EOF
+ destination = "local/app/ics.php"
+ }
+
+ resources {
+ cpu = 200
+ memory = 256
+ }
+ }
+
+ # Restart policy
+ restart {
+ attempts = 3
+ interval = "5m"
+ delay = "25s"
+ mode = "fail"
+ }
+
+ # Update strategy
+ update {
+ max_parallel = 1
+ min_healthy_time = "10s"
+ healthy_deadline = "3m"
+ auto_revert = true
+ }
+ }
+}
\ No newline at end of file
diff --git a/nomad_backup/foodplanner.hcl b/nomad_backup/foodplanner.hcl
new file mode 100644
index 0000000..8f15b81
--- /dev/null
+++ b/nomad_backup/foodplanner.hcl
@@ -0,0 +1,99 @@
+job "foodplanner" {
+ datacenters = ["dc1"]
+
+ type = "service"
+
+ group "app" {
+ count = 1
+
+ network {
+ port "http" {
+ to = 8999
+ }
+ }
+
+ service {
+ name = "foodplanner"
+ port = "http"
+
+ check {
+ type = "http"
+ path = "/"
+ interval = "10s"
+ timeout = "2s"
+ }
+ }
+
+ # Prestart restore task
+ task "restore" {
+ driver = "docker"
+ lifecycle {
+ hook = "prestart"
+ sidecar = false
+ }
+ config {
+ # image = "litestream/litestream:latest"
+ image = "litestream/litestream:0.3"
+ args = [
+ "restore",
+ # "-if-replica-exists",
+ #"-if-db-not-exists",
+ "-o", "/alloc/tmp/meal_planner.db",
+ "sftp://root:odroid@192.168.4.63/mnt/Shares/litestream/foodplanner.db"
+ ]
+ volumes = [
+ "/opt/nomad/data:/data"
+ ]
+ }
+ }
+
+ task "app" {
+ driver = "docker"
+
+ config {
+ image = "ghcr.io/sstent/foodplanner:main"
+ ports = ["http"]
+
+ # Mount the SQLite database file to persist data
+ # Adjust the source path as needed for your environment
+ volumes = [
+ "/mnt/Public/configs/FoodPlanner_backups:/app/backups/",
+ ]
+ }
+ env {
+ DATABASE_PATH = "/alloc/tmp"
+ DATABASE_URL = "sqlite:////alloc/tmp/meal_planner.db"
+ }
+ resources {
+ cpu = 500
+ memory = 1024
+ }
+
+ # Restart policy
+ restart {
+ attempts = 3
+ interval = "10m"
+ delay = "15s"
+ mode = "fail"
+ }
+ }
+
+ # Litestream sidecar for continuous replication
+ task "litestream" {
+ driver = "docker"
+ lifecycle {
+ hook = "poststart" # runs after main task starts
+ sidecar = true
+ }
+ config {
+ # image = "litestream/litestream:0.5.0-test.10"
+ image = "litestream/litestream:0.3"
+ args = [
+ "replicate",
+ "/alloc/tmp/meal_planner.db",
+ "sftp://root:odroid@192.168.4.63/mnt/Shares/litestream/foodplanner.db"
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/nomad_backup/garage.hcl b/nomad_backup/garage.hcl
new file mode 100644
index 0000000..5ce9999
--- /dev/null
+++ b/nomad_backup/garage.hcl
@@ -0,0 +1,169 @@
+job "garage" {
+ datacenters = ["dc1"]
+ type = "system"
+
+ group "garage" {
+ # Network configuration for Garage
+ network {
+ port "s3_api" {
+ static = 3900
+ }
+ port "k2v_api" {
+ static = 3902
+ }
+ port "web_api" {
+ static = 3903
+ }
+ port "admin" {
+ static = 3904
+ }
+ port "rpc" {
+ static = 3901
+ }
+ }
+
+ task "garage" {
+ driver = "docker"
+
+ # Multi-architecture image selection
+ config {
+ image = "dxflrs/garage:v2.1.0"
+ ports = ["s3_api", "k2v_api", "web_api", "admin", "rpc"]
+
+ volumes = [
+ "/mnt/configs/garage_data:/var/lib/garage/data",
+ "/mnt/configs/garage_meta:/var/lib/garage/meta",
+ "local/garage.toml:/etc/garage.toml"
+ ]
+ }
+
+ # Configuration template
+ template {
+ data = <Local Services - HTTP/HTTPS
+
+
+ Node Environment Information:
+ node_id: {{ env "node.unique.id" }}
+ datacenter: {{ env "NOMAD_DC" }}
+
+ EOH
+
+ destination = "local/data/index.html"
+ change_mode = "noop"
+ }
+
+ resources {
+ cpu = 100 # 100 MHz
+ memory = 64 # 128 MB
+ }
+ service {
+ name = "nginx"
+ tags = ["nginx", "web", "urlprefix-/nginx", "backend",
+ "traefik.http.routers.nginxlan.rule=Host(`nginx.service.dc1.consul`)",
+ "traefik.http.routers.nginxwan.rule=Host(`nginx.service.dc1.fbleagh.duckdns.org`)",
+ "traefik.http.routers.nginxwan.tls=true",
+
+]
+ port = "http"
+
+ check {
+ type = "tcp"
+ interval = "10s"
+ timeout = "2s"
+ }
+ }
+ }
+
+ network {
+ port "http" {
+ to = 8080
+ }
+
+ // port "https" {
+ // to = 443
+ // }
+ }
+ }
+}
diff --git a/nomad_backup/nzbget.hcl b/nomad_backup/nzbget.hcl
new file mode 100644
index 0000000..0ac7b66
--- /dev/null
+++ b/nomad_backup/nzbget.hcl
@@ -0,0 +1,305 @@
+job "nzbget" {
+ # region = "global"
+ datacenters = ["dc1"]
+ type = "service"
+
+ # priority = 50
+
+ constraint {
+ attribute = "${attr.kernel.name}"
+ value = "linux"
+ }
+ constraint {
+ attribute = "${attr.unique.hostname}"
+ operator = "regexp"
+ value = "odroid.*"
+ }
+ update {
+ # Stagger updates every 60 seconds
+ stagger = "90s"
+ max_parallel = 1
+ healthy_deadline = "5m"
+ }
+ group "nzbget" {
+ count = 1
+
+ restart {
+ attempts = 8
+ interval = "20m"
+ delay = "10s"
+ mode = "delay"
+ }
+
+ // task "init-trigger" {
+ // driver = "docker"
+
+ // lifecycle {
+ // hook = "prestart"
+ // }
+
+ // config {
+ // image = "curlimages/curl"
+ // args = ["--request", "PUT", "--data", "${NOMAD_ALLOC_ID}", "http://${attr.unique.network.ip-address}:8500/v1/kv/${NOMAD_GROUP_NAME}"]
+ // }
+ // resources {
+ // cpu = 20 # 500 MHz
+ // memory = 20 # 128MB
+ // }
+ // }
+
+
+ task "ovpn-client" {
+ driver = "docker"
+ lifecycle {
+ hook = "prestart"
+ sidecar = true
+ }
+
+ config {
+ // image = "registry.service.dc1.consul:5000/openpyn:latest"
+ image = "qmcgaw/gluetun"
+ memory_hard_limit = "1024"
+
+ ports = [
+ "shadowsocks",
+ "nzbget",
+ "http_proxy",
+ "http_admin",
+ "sabnzb_admin",
+ "socks",
+ ]
+
+ cap_add = [
+ "NET_ADMIN",
+ "NET_BIND_SERVICE",
+ ]
+
+ #network_mode = "host"
+ #network_mode = "vpn"
+
+ volumes = [
+ "/etc/localtime:/etc/localtime",
+ ]
+ devices = [
+ {
+ host_path = "/dev/net/tun"
+ container_path = "/dev/net/tun"
+ },
+ ]
+ }
+
+ env {
+ // VPNFLAGS = "us --max-load 70 --top-servers 10 --pings 5"
+ // VPNFLAGS = "nl --max-load 70 --top-servers 10 --pings 5"
+ // VPNSP = "nordvpn"
+ // OPENVPN_USER = "yvPLaZ3xkXtnJKsyXDNQf9Ft"
+ // OPENVPN_PASSWORD = "SW8XvhGkSVuQitjuFrbH9WPA"
+ // REGION = "Netherlands"
+ ##Mullvad
+ VPNSP = "mullvad"
+ VPN_TYPE = "wireguard"
+ COUNTRY = "Canada"
+ CITY = "Toronto"
+ FIREWALL_VPN_INPUT_PORTS = "56987"
+ WIREGUARD_PRIVATE_KEY = "2FHwQX1jxk+qeMmXUtSGRc2kKF1WHeSCyIgHNW+7akA=" #ActiveLynx
+ WIREGUARD_ADDRESS = "10.66.246.4/32"
+
+
+
+ HTTPPROXY = "on"
+ UPDATER_PERIOD= "24h"
+ SHADOWSOCKS_PASSWORD = "farscape5"
+ SHADOWSOCKS = "off"
+ DOT_PROVIDERS = "cloudflare,google,quad9,quadrant"
+ DOT = "off"
+ }
+
+ service {
+ name = "${TASKGROUP}-admin"
+ tags = ["global", "ovpn-openpyn"]
+ port = "http_admin"
+ }
+
+ service {
+ name = "${TASKGROUP}"
+ tags = ["global", "ovpn-openpyn"]
+ port = "shadowsocks"
+ }
+
+ service {
+ name = "nzbget"
+ tags = ["global", "ovpn-openpyn"]
+ port = "nzbget"
+ }
+ service {
+ name = "sabnzb"
+ tags = ["global", "ovpn-openpyn", "enable_gocast",
+ "gocast_vip=192.168.1.247/32",
+ "gocast_nat=tcp:8080:8080",
+ "gocast_nat=udp:8080:8080",]
+ port = "sabnzb_admin"
+ }
+service {
+ name = "socks-nord"
+ tags = ["global", "ovpn-openpyn", "enable_gocast",
+ "gocast_vip=192.168.1.243/32",
+ "gocast_nat=tcp:1080:1080",
+ "gocast_nat=udp:1080:1080",]
+ port = "socks"
+
+ }
+
+
+ resources {
+ cpu = 100 # 500 MHz
+ memory = 100 # 128MB
+ }
+ } #task ovpn
+
+
+ task "dante" {
+ driver = "docker"
+
+ config {
+ image = "serjs/go-socks5-proxy"
+ // image = "ghcr.io/sstent/dante:latest"
+ network_mode = "container:ovpn-client-${NOMAD_ALLOC_ID}"
+ memory_hard_limit = 256
+
+ devices = [
+ {
+ host_path = "/dev/net/tun"
+ container_path = "/dev/net/tun"
+ },
+ ]
+ }
+ env {
+ REQUIRE_AUTH = "false"
+ }
+
+
+ resources {
+ cpu = 64 # 500 MHz
+ memory = 128 # 128MB
+ }
+ }
+ ######################################################################
+ ######################################################################
+ ######################################################################
+
+ task "init" {
+ driver = "docker"
+
+ lifecycle {
+ hook = "prestart"
+ sidecar = false
+ }
+
+ config {
+ image = "ghcr.io/sstent/rsync:v0.3.5"
+ memory_hard_limit = "2048"
+
+ volumes = [
+ "/mnt/configs/${NOMAD_GROUP_NAME}:/config",
+ "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
+ "/mnt/Public/config/locks:/locks",
+ ]
+
+ args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-avz", "--exclude=Backups", "/configbackup/", "/config/", "--delete-before"]
+ }
+
+ resources {
+ cpu = 20 # 500 MHz
+ memory = 20 # 128MB
+ }
+ } #end init task
+ task "finalsync" {
+ driver = "docker"
+
+ lifecycle {
+ hook = "poststop"
+ }
+
+ config {
+ // image = "pebalk/rsync"
+ image = "ghcr.io/sstent/rsync:v0.3.5"
+ memory_hard_limit = "2048"
+
+ volumes = [
+ "/mnt/configs/${NOMAD_GROUP_NAME}:/config",
+ "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
+ "/mnt/Public/config/locks:/locks",
+ ]
+
+ args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-avz", "/config/", "/configbackup/"]
+ }
+
+ resources {
+ cpu = 20 # 500 MHz
+ memory = 20 # 128MB
+ }
+ } #end finalsync task
+ // task "sync" {
+ // driver = "docker"
+
+ // lifecycle {
+ // hook = "poststart"
+ // sidecar = true
+ // }
+
+ // config {
+ // image = "ghcr.io/sstent/rsync:v0.3.5"
+ // memory_hard_limit = "2048"
+
+ // volumes = [
+ // "/mnt/configs/:/configs",
+ // "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
+ // ]
+
+ // args = ["client"]
+ // }
+ // resources {
+ // cpu = 20 # 500 MHz
+ // memory = 20 # 128MB
+ // }
+ // env {
+ // CRON_TASK_1 = "*/20 8-20 * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;"
+ // }
+ // } #end sync task
+
+ ######################################################################
+ ######################################################################
+ ######################################################################
+
+ network {
+ port "shadowsocks" {
+ static = "8338"
+ to = "8388"
+ }
+
+ port "http_proxy" {
+ static = "8888"
+ to = "8888"
+ }
+ port "http_admin" {
+ static = "8000"
+ to = "8000"
+ }
+
+ port "sabnzb_admin" {
+ static = "8080"
+ to = "8080"
+ }
+
+
+ port "socks" {
+ static = "1080"
+ to = "1080"
+ }
+ port "nzbget" {
+ static = "6789"
+ to = "6789"
+ }
+ }
+ }
+}
diff --git a/nomad_backup/postgres-15.hcl b/nomad_backup/postgres-15.hcl
new file mode 100644
index 0000000..86ee7d7
--- /dev/null
+++ b/nomad_backup/postgres-15.hcl
@@ -0,0 +1,116 @@
+job "postgres-15" {
+ type = "system"
+ datacenters = ["dc1"]
+
+ group "group" {
+ count = 1
+
+ network {
+
+ port api {
+ static = 8008
+ to = 8008
+ }
+ port pg {
+ static = 5432
+ to = 5432
+ }
+ }
+
+ task "db" {
+ driver = "docker"
+
+ template {
+ data = <\K.8-20 * * *?(?=<.8-20 * * *?>)' /config/config.xml)
+ // // curl -f "http://localhost:9696/api/v1/system/status?apikey=$API"
+ // }
+ }
+ template {
+ data = <\K.*?(?=<.*?>)' /config/config.xml)"
+ EOH
+
+ destination = "local/healthcheck.sh"
+ perms = "0755"
+ }
+
+ env {
+ // SHARE = "Public;/mount/Public;yes;no;yes;all;none;;Public"
+ # GLOBAL = "socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=65536 SO_SNDBUF=65536"
+ # PERMISSIONS = "true"
+ # WORKGROUP = "WORKGROUP"
+ TZ = "EST5EDT"
+
+ PUID = 1000
+ PGID = 1000
+ }
+
+ # We must specify the resources required for
+ # this task to ensure it runs on a machine with
+ # enough capacity.
+ resources {
+ cpu = 500 # 500 MHz
+ memory = 256 # 128MB
+ }
+
+ # Specify configuration related to log rotation
+ logs {
+ max_files = 10
+ max_file_size = 15
+ }
+
+ # Controls the timeout between signalling a task it will be killed
+ # and killing the task. If not set a default is used.
+ kill_timeout = "10s"
+ } #End main task
+
+ network {
+ // mbits = 100
+
+ port "http" {
+ static = 9696
+ to = 9696
+ }
+ port "http_flare" {
+ static = 8191
+ to = 8191
+ }
+ }
+ }
+}
diff --git a/nomad_backup/qautomate.hcl b/nomad_backup/qautomate.hcl
new file mode 100644
index 0000000..b7ea82e
--- /dev/null
+++ b/nomad_backup/qautomate.hcl
@@ -0,0 +1,59 @@
+job "qautomate" {
+ periodic {
+ cron = "*/30 * * * *"
+ prohibit_overlap = true
+ }
+ datacenters = ["dc1"]
+
+ type = "batch"
+
+ group "app" {
+ count = 1
+
+ task "setup" {
+ driver = "docker"
+
+ lifecycle {
+ hook = "prestart"
+ }
+
+ config {
+ image = "ghcr.io/sstent/ubuntu-python3"
+ // command = "/bin/bash"
+ args = ["-v","-c", "/local/prestart.sh"]
+
+ }
+
+ template {
+ data = <
+Info
+7878
+
+*
+9898
+False
+237c27f22504440385e5ee295fd65eb5
+Forms
+master
+
+True
+
+Docker
+False
+Radarr
+DisabledForLocalAddresses
+postgres
+postgres
+5432
+master.postgres.service.dc1.consul
+
+ EOH
+
+ destination = "local/config.xml"
+ perms = "0755"
+ }
+
+ service {
+ name = "${TASKGROUP}"
+ tags = ["radarr", "tools"]
+ port = "http"
+
+ check {
+ type = "script"
+ name = "check_up"
+ command = "/local/healthcheck.sh"
+ interval = "5m"
+ timeout = "25s"
+
+ check_restart {
+ limit = 3
+ grace = "90s"
+ ignore_warnings = false
+ }
+ }
+ }
+ template {
+ data = <(.*)' /config/config.xml | sed -e 's/<[^>]*>//g')"
+ EOH
+
+ destination = "local/healthcheck.sh"
+ perms = "0755"
+ }
+
+ env {
+ TZ = "EST5EDT"
+ PUID = 1000
+ PGID = 1000
+ }
+
+ resources {
+ cpu = 500 # 500 MHz
+ memory = 256 # 128MB
+
+ // network {
+ // port "radarr" {
+ // static = "7878"
+ // }
+ // }
+ }
+
+ logs {
+ max_files = 10
+ max_file_size = 15
+ }
+
+ kill_timeout = "10s"
+ } #Task
+
+ network {
+ // mode = "bridge"
+ port "http" {
+ static = 7878
+ to = 7878
+ }
+ }
+ } #Group
+}
diff --git a/nomad_backup/seaweedfsmaster.hcl b/nomad_backup/seaweedfsmaster.hcl
new file mode 100644
index 0000000..00b5878
--- /dev/null
+++ b/nomad_backup/seaweedfsmaster.hcl
@@ -0,0 +1,177 @@
+job "seaweedfsmaster" {
+ datacenters = ["dc1"]
+ type = "service"
+
+ constraint {
+ attribute = "${attr.kernel.name}"
+ value = "linux"
+ }
+
+ constraint {
+ operator = "distinct_hosts"
+ value = "true"
+ }
+
+ // constraint {
+ // attribute = "${attr.unique.hostname}"
+ // operator = "regexp"
+ // # We need static IPs for master servers
+ // # dc1-n1 - 172.21.100.51
+ // # dc1-n2 - 172.21.100.52
+ // # dc1-n3 - 172.21.100.53
+ // value = "^odroid5.node.dc1.consul|odroid6.node.dc1.consul|odroid7.node.dc1.consul|odroid8.node.dc1.consul$"
+ // }
+ update {
+ stagger = "10s"
+ max_parallel = 1
+ healthy_deadline = "5m"
+ }
+
+ group "seaweedfsmaster" {
+ count = 3
+
+ restart {
+ attempts = 6
+ interval = "1m"
+ delay = "10s"
+ mode = "delay"
+ }
+
+ task "seaweedfsadmin" {
+ driver = "docker"
+
+ config {
+ image = "chrislusf/seaweedfs"
+ memory_hard_limit = "2048"
+ entrypoint = ["/usr/bin/weed"]
+
+ args = [
+ "admin",
+ "-masters=${NOMAD_GROUP_NAME}0.service.dc1.consul:9333,${NOMAD_GROUP_NAME}1.service.dc1.consul:9333,${NOMAD_GROUP_NAME}2.service.dc1.consul:9333",
+ "-dataDir=/data",
+ ]
+
+ volumes = [
+ "/mnt/Public/configs/seaweedfadmin:/data/",
+ ]
+
+ ports = ["seaweedfs_admin"]
+ }
+
+
+
+}
+
+
+ task "seaweedfsmaster" {
+ driver = "docker"
+
+ config {
+ image = "chrislusf/seaweedfs"
+ memory_hard_limit = "2048"
+ entrypoint = ["/usr/bin/weed"]
+
+ args = [
+ "server",
+ "-ip=${NOMAD_GROUP_NAME}${NOMAD_ALLOC_INDEX}.service.dc1.consul",
+ "-ip.bind=0.0.0.0",
+ "-master.port=9333",
+ "-master.defaultReplication=002",
+ "-master.peers=${NOMAD_GROUP_NAME}0.service.dc1.consul:9333,${NOMAD_GROUP_NAME}1.service.dc1.consul:9333,${NOMAD_GROUP_NAME}2.service.dc1.consul:9333",
+ "-dir=/data",
+ "-filer",
+ "-filer.port=8877",
+ "-filer.port.grpc=18877",
+ "-s3",
+ "-webdav",
+ "-volume=true",
+ "-volume.port=9444",
+ "-volume.port.grpc=19444",
+ "-volume.max=100"
+ ]
+
+ volumes = [
+ "/mnt/configs/seaweedfs:/data/",
+ ]
+
+ ports = ["seaweedfs", "seaweedfs_high", "seaweedfs_filer", "seaweed_s3", "seaweed_webdav", "seaweedfs_filer_high","seaweedfs_vol","seaweedfs_vol_high"]
+ }
+
+ env {
+ seaweedfs_ACCESS_KEY = "seaweedfs"
+ seaweedfs_SECRET_KEY = "seaweedfs123"
+ }
+
+ service {
+ name = "${NOMAD_GROUP_NAME}${NOMAD_ALLOC_INDEX}"
+ tags = ["global", "seaweedfs"]
+ port = "seaweedfs"
+ }
+
+ service {
+ name = "seaweedfiler"
+ tags = ["global", "seaweedfs"]
+ port = "seaweedfs_filer"
+ }
+
+ resources {
+ cpu = 64 # 500 MHz
+ memory = 64 # 128MB
+ } #end resources
+ } #end task
+
+ network {
+ port "seaweedfs" {
+ static = 9333
+ to = 9333
+ }
+
+ port "seaweedfs_high" {
+ static = 19333
+ to = 19333
+ }
+
+ port "seaweedfs_filer" {
+ static = 8877
+ to = 8877
+ }
+
+ port "seaweedfs_filer_high" {
+ static = 18877
+ to = 18877
+ }
+
+ port "seaweed_s3" {
+ static = 8333
+ to = 8333
+ }
+
+ port "seaweed_webdav" {
+ static = 7333
+ to = 7333
+ }
+ port "seaweedfs_vol" {
+ static = "9444"
+ }
+
+ port "seaweedfs_vol_high" {
+ static = "19444"
+ }
+ port "seaweedfs_admin" {
+ static = "23646"
+ }
+ // port "s8080" {
+ // static = 8080
+ // to = 8080
+ // }
+ // port "s18080" {
+ // static = 18080
+ // to = 18080
+ // }
+ // port "seaweed_volume" {
+ // static = 9444
+ // to = 9444
+ // }
+ }
+ } # end group
+} # end job
diff --git a/nomad_backup/slskd.hcl b/nomad_backup/slskd.hcl
new file mode 100644
index 0000000..0794503
--- /dev/null
+++ b/nomad_backup/slskd.hcl
@@ -0,0 +1,149 @@
+# There can only be a single job definition per file.
+# Create a job with ID and Name 'example'
+job "slskd" {
+ datacenters = ["dc1"]
+ type = "service"
+
+ constraint {
+ attribute = "${attr.kernel.name}"
+ value = "linux"
+ }
+constraint {
+ attribute = "${attr.cpu.arch}"
+ operator = "regexp"
+ value = "amd64"
+ }
+
+ update {
+ stagger = "10s"
+ max_parallel = 1
+ }
+
+ group "slskd" {
+ count = 1
+
+ restart {
+ attempts = 2
+ interval = "1m"
+ delay = "10s"
+ mode = "fail"
+ }
+
+task "slskd-vpn" {
+ driver = "docker"
+ lifecycle {
+ hook = "prestart"
+ sidecar = true
+ }
+
+ config {
+ // image = "registry.service.dc1.consul:5000/openpyn:latest"
+ // image = "qmcgaw/gluetun"
+ image = "qmcgaw/gluetun"
+
+ memory_hard_limit = "1024"
+ ports = ["http","https","guac"]
+
+ cap_add = [
+ "NET_ADMIN",
+ "NET_BIND_SERVICE",
+ ]
+
+ #network_mode = "host"
+ #network_mode = "vpn"
+
+ volumes = [
+ "/etc/localtime:/etc/localtime",
+ "/mnt/syncthing/mullvad:/vpn",
+
+ ]
+ devices = [
+ {
+ host_path = "/dev/net/tun"
+ container_path = "/dev/net/tun"
+ },
+ ]
+ }
+
+ env {
+ VPN_SERVICE_PROVIDER="airvpn"
+ VPN_TYPE="wireguard"
+ WIREGUARD_PRIVATE_KEY="SPpgnVCsOnz+zzorzTiOmvDF8GElgp27dSIeYAq43Vk="
+ WIREGUARD_PRESHARED_KEY="TBb2gU8pSTSG6lT4QYr7pzNWN7wZEn0yeG4VDTtuy50="
+ WIREGUARD_ADDRESSES="10.186.222.247/32"
+ SERVER_COUNTRIES="Canada"
+ SERVER_CITIES="Montreal"
+ FIREWALL_VPN_INPUT_PORTS = "53304"
+ HEALTH_TARGET_ADDRESS="1.1.1.1"
+ HEALTH_SUCCESS_WAIT_DURATION="30s"
+ HEALTH_VPN_DURATION_INITIAL="3600s"
+ HEALTH_VPN_DURATION_ADDITION="600s"
+ // HEALTH_TARGET_ADDRESS="cloudflare.com:443"
+
+
+ ##Mullvad
+ #VPNSP = "mullvad"
+ #VPN_TYPE = "wireguard"
+ #COUNTRY = "Canada"
+ #CITY = "Toronto"
+ #FIREWALL_VPN_INPUT_PORTS = "56987"
+ #WIREGUARD_PRIVATE_KEY = "iA64ImY2XNvml7s+HEHWNNGXeqpzFN0/KYGxhCsHLV8="
+ #WIREGUARD_ADDRESS = "10.64.141.217/32"
+ HTTPPROXY = "on"
+ SHADOWSOCKS_PASSWORD = "farscape5"
+ SHADOWSOCKS = "on"
+ DOT_PROVIDERS = "cloudflare,google,quad9,quadrant"
+ DOT = "off"
+ WEBUI_PORT=8080
+ }
+
+
+ resources {
+ cpu = 100 # 500 MHz
+ memory = 250 # 128MB
+ }
+ } #task ovpn
+
+ task "nicotine" {
+ driver = "docker"
+
+ config {
+ image = "ghcr.io/fletchto99/nicotine-plus-docker:latest"
+ memory_hard_limit = "1024"
+
+ network_mode = "container:slskd-vpn-${NOMAD_ALLOC_ID}"
+
+ volumes = [
+ "/mnt/Public/Downloads/news/slskd/config:/config",
+ "/mnt/Public/Downloads/news/slskd/shared:/shared",
+ "/mnt/Public/Downloads/news/slskd/downloads/:/downloads",
+ "/mnt/Public/Downloads/news/slskd/incomplete/:/incomplete",
+ ]
+ }
+ env {
+ PUID=1000
+ PGID=1000
+ TZ="Etc/UTC"
+ LISTENING_PORT="53304"
+ }
+ }
+
+
+ network {
+ // mbits = 100
+
+ port "http" {
+ static = 5000
+ to = 5000
+ }
+ port "https" {
+ static = 5001
+ to = 5001
+ }
+ port "guac" {
+ static = 6080
+ to = 6080
+ }
+ }
+ }
+}
diff --git a/nomad_backup/sonarr.hcl b/nomad_backup/sonarr.hcl
new file mode 100644
index 0000000..b2fa4a3
--- /dev/null
+++ b/nomad_backup/sonarr.hcl
@@ -0,0 +1,158 @@
+# There can only be a single job definition per file.
+# Create a job with ID and Name 'example'
+job "sonarr" {
+ datacenters = ["dc1"]
+ type = "service"
+
+ constraint {
+ attribute = "${attr.kernel.name}"
+ value = "linux"
+ }
+ constraint {
+ attribute = "${attr.cpu.arch}"
+ operator = "regexp"
+ value = "amd64"
+ }
+
+ update {
+ stagger = "10s"
+ max_parallel = 1
+ }
+ group "sonarr" {
+ count = 1
+
+ restart {
+ attempts = 2
+ interval = "1m"
+ delay = "10s"
+ mode = "fail"
+ }
+
+ task "sonarr" {
+ // driver = "raw_exec" // config { // command = "docker" // args = ["run", // "--rm", // "--name=sonarr", // "-e", "PUID=1000", // "-e", "PGID=1000", // "-e", "TZ=EST5EDT", // "-p", "8989:8989", // "-v", "/mnt/syncthing/sonarrv3:/config", // "-v", "/mnt/Public/Downloads/tv:/tv", // "-v", "/mnt/Public/Downloads/news:/downloads", // "--cpuset-cpus","4-7", // "linuxserver/sonarr:preview"] // }
+
+ driver = "docker"
+
+ config {
+ image = "linuxserver/sonarr:develop"
+
+ ports = ["http"]
+
+ // dns_servers = ["192.168.1.1", "1.1.1.1"]
+ memory_hard_limit = "2048"
+ // cpuset_cpus = "4-7"
+
+ volumes = [
+ "/mnt/Public/Downloads/news:/downloads",
+ "/mnt/Public/Downloads/tv:/tv",
+ // "/mnt/configs/sonarr:/config",
+ "/mnt/Public/configs/sonarr_pg:/config",
+ // "local/config.xml:/config/config.xml"
+ ]
+
+ // "/mnt/gv0/sonarr:/config",
+ force_pull = false
+ }
+
+ service {
+ name = "${TASKGROUP}"
+ tags = ["sonarr", "tools"]
+ port = "http"
+
+ check {
+ type = "script"
+ name = "check_up"
+ command = "/local/healthcheck.sh"
+ interval = "60s"
+ timeout = "5s"
+
+ check_restart {
+ limit = 3
+ grace = "90s"
+ ignore_warnings = false
+ }
+ }
+ }
+ template {
+ data = <(.*)' /config/config.xml | sed -e 's/<[^>]*>//g')"
+ EOH
+
+ destination = "local/healthcheck.sh"
+ perms = "0755"
+ }
+
+ template {
+ data = "---\nkey: {{ key \"ovpn-client\" }}"
+ destination = "local/file.yml"
+ change_mode = "restart"
+ }
+ template {
+ data = <
+Info
+8989
+/sonarr
+*
+9898
+False
+1632787062fb47a9a6eb4c88e32b3ff3
+None
+develop
+True
+
+Docker
+False
+/config/restart.sh
+Sonarr
+postgres
+postgres
+5432
+master.postgres.service.dc1.consul
+
+ EOH
+
+ destination = "local/config.xml"
+ perms = "0755"
+ }
+ env {
+ // SHARE = "Public;/mount/Public;yes;no;yes;all;none;;Public"
+ # GLOBAL = "socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=65536 SO_SNDBUF=65536"
+ # PERMISSIONS = "true"
+ # WORKGROUP = "WORKGROUP"
+ TZ = "EST5EDT"
+
+ PUID = 1000
+ PGID = 1000
+ }
+
+ # We must specify the resources required for
+ # this task to ensure it runs on a machine with
+ # enough capacity.
+ resources {
+ cpu = 500 # 500 MHz
+ memory = 256 # 128MB
+ }
+
+ # Specify configuration related to log rotation
+ logs {
+ max_files = 12
+ max_file_size = 15
+ }
+
+ # Controls the timeout between signalling a task it will be killed
+ # and killing the task. If not set a default is used.
+ kill_timeout = "10s"
+ } #End main task
+
+ network {
+ // mbits = 100
+
+ port "http" {
+ static = 8989
+ to = 8989
+ }
+ }
+ }
+}
diff --git a/nomad_backup/traefik.hcl b/nomad_backup/traefik.hcl
new file mode 100644
index 0000000..4c07e8e
--- /dev/null
+++ b/nomad_backup/traefik.hcl
@@ -0,0 +1,226 @@
+job "traefik" {
+ datacenters = ["dc1"]
+ type = "system"
+
+ constraint {
+ attribute = "${attr.kernel.name}"
+ value = "linux"
+ }
+
+ update {
+ stagger = "10s"
+ max_parallel = 1
+ healthy_deadline = "5m"
+ }
+
+ group "traefik" {
+ count = 1
+
+ restart {
+ attempts = 6
+ interval = "1m"
+ delay = "10s"
+ mode = "delay"
+ }
+
+ task "traefik" {
+ driver = "docker"
+
+ config {
+ image = "traefik:2.9"
+ // network_mode = "host"
+
+ args = [
+ // "--api.dashboard",
+ // "--providers.consulcatalog.defaultRule=Host(`{{ .Name }}.service.dc1.consul`)",
+ // "--providers.consulcatalog.endpoint.address=${attr.unique.network.ip-address}:8500",
+ // "--providers.consulcatalog.exposedbydefault=true",
+ // "--metrics=true",
+ // "--metrics.prometheus=true",
+ // "--metrics.prometheus.entryPoint=web",
+ // "--entryPoints.web.address=:80",
+ // "--entryPoints.websecure.address=:443",
+ // "--entryPoints.openvpn.address=:1194/udp",
+ "--configFile=/local/file.yml",
+ // "--certificatesresolvers.myresolver.acme.email=stuart.stent@gmail.com",
+ // "--certificatesresolvers.myresolver.acme.storage=/acmecert/acme.json",
+ // "--certificatesresolvers.myresolver.acme.tlschallenge=true",
+ // "--certificatesresolvers.myresolver-int.acme.email=stuart.stent@gmail.com",
+ // "--certificatesresolvers.myresolver-int.acme.storage=/acmecert/acme.json",
+ // "--certificatesresolvers.myresolver-int.acme.tlschallenge=true",
+ // "--certificatesresolvers.myresolver-int.acme.dnschallenge=true",
+ // "--certificatesresolvers.myresolver-int.acme.dnschallenge.provider=duckdns",
+ "--accesslog=true",
+ // "--serversTransport.insecureSkipVerify=true",
+ ]
+ volumes = [
+ "/var/run/docker.sock:/var/run/docker.sock",
+ "/mnt/mnt/configs/letsencrypt:/acmecert/",
+ ]
+
+ // dns_servers = ["192.168.4.1", "192.168.4.250"]
+ ports = ["traefik", "traefikhttps","traefikui"]
+
+ memory_hard_limit = 20480
+ }
+
+ env {
+ TZ = "EST5EDT"
+ PUID = 1000
+ PGID = 1000
+ DUCKDNS_TOKEN="e4b5ca33-1f4d-494b-b06d-6dd4600df662"
+ }
+
+ template {
+ left_delimiter = "[["
+ right_delimiter = "]]"
+
+ data = <