chore: backup Nomad jobs [skip ci]

This commit is contained in:
github-actions[bot]
2025-11-21 21:10:40 +00:00
parent fe8d4976ed
commit 890c867763
29 changed files with 4519 additions and 0 deletions

99
nomad_backup/acme.hcl Normal file
View File

@@ -0,0 +1,99 @@
job "acme" {
datacenters = ["dc1"]
type = "batch"
periodic {
cron = "@weekly"
prohibit_overlap = true
}
group "acme" {
restart {
attempts = 1
delay = "30s"
}
task "acme" {
driver = "docker"
config {
image = "neilpang/acme.sh"
entrypoint = ["/local/acme_wrapper.sh"]
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}:/acme.sh",
]
}
env {
DEDYN_TOKEN="JPwQpUiWGkKHSkmGEC4YAeGiGFki"
DuckDNS_Token="e4b5ca33-1f4d-494b-b06d-6dd4600df662"
CONSUL_URL="${attr.unique.network.ip-address}"
}
template {
change_mode = "noop"
data = "{{ key \"scripts/acme.sh\" }}"
destination = "local/acme_wrapper.sh"
perms = 0777
}
resources {
cpu = 256
memory = 64
}
}
task "init" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "ghcr.io/sstent/rsync"
memory_hard_limit = 2048
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
"/mnt/Public/config/locks:/locks",
]
args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-avz","--delete-excluded","--exclude=renewal","--exclude='live'","--exclude='archive'","--exclude='keys'","--exclude='csr'", "/configbackup/", "/config/", "--delete-before"]
}
resources {
cpu = 20 # 500 MHz
memory = 20 # 128MB
}
} #end init task
task "finalsync" {
driver = "docker"
lifecycle {
hook = "poststop"
}
config {
// image = "pebalk/rsync"
image = "ghcr.io/sstent/rsync"
memory_hard_limit = 2048
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
"/mnt/Public/config/locks:/locks",
]
args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-av","--delete-excluded","--exclude=renewal","--exclude='live'","--exclude='archive'","--exclude='keys'","--exclude='csr'", "/config/", "/configbackup/","--remove-source-files"]
}
resources {
cpu = 20 # 500 MHz
memory = 20 # 128MB
}
} #end finalsync task
}
}

89
nomad_backup/auth.hcl Normal file
View File

@@ -0,0 +1,89 @@
job "auth" {
datacenters = ["dc1"]
type = "service"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
constraint {
attribute = "${attr.unique.hostname}"
operator = "regexp"
value = "odroid.*"
}
group "auth" {
count = 1
task "fwdauth" {
driver = "docker"
config {
// image = "npawelek/traefik-forward-auth"
image = "thomseddon/traefik-forward-auth:2-arm"
port_map {
auth = 4181
}
volumes = [
"/etc/localtime:/etc/localtime:ro",
]
}
env {
PROVIDERS_GOOGLE_CLIENT_ID = "807888907647-uog95jmiolsuh6ql1t8jm53l1jvuajck.apps.googleusercontent.com"
PROVIDERS_GOOGLE_CLIENT_SECRET = "B8bDri5mFvGv-Ghzbt8fLj4W"
SECRET = "ladskfdjmqwermnnbasfnmldas"
CONFIG = "/local/config.ini"
LIFETIME = "31536000"
WHITELIST = "stuart.stent@gmail.com,stephen.bunt@gmail.com"
// AUTH_HOST = "fwdauth.fbleagh.duckdns.org"
COOKIE_DOMAIN = "fbleagh.duckdns.org"
}
template {
data = "{{ key \"Dex\" }}"
destination = "local/config.ini"
change_mode = "restart"
}
resources {
cpu = 100 # 100 MHz
memory = 64 # 128 MB
network {
port "auth" {
static = 4181
}
}
}
service {
name = "dex"
tags = [
"fwdauth",
"web",
"traefik.http.routers.dex.rule=Host(`fwdauth.fbleagh.duckdns.org`,`fwdauth.fbleagh.dedyn.io`)",
"traefik.http.routers.dex.entrypoints=websecure",
"traefik.http.routers.dex.tls=true",
// "traefik.http.routers.dex.tls.certresolver=myresolver",
"traefik.http.middlewares.dex.forwardauth.address=http://dex.service.dc1.consul:4181",
"traefik.http.middlewares.dex.forwardauth.trustForwardHeader=true",
"traefik.http.middlewares.dex.forwardauth.authResponseHeaders=X-Forwarded-User",
"traefik.http.routers.auth.middlewares=dex",
"traefik.http.routers.traefik-forward-auth.middlewares=dex",
]
port = "auth"
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
} #end Dex
}
}

View File

@@ -0,0 +1,228 @@
job "calendar-proxy" {
datacenters = ["dc1"]
type = "service"
group "web" {
count = 1
network {
port "http" {
to = 80
}
}
service {
name = "calendar-proxy"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.calendar-proxy.rule=Host(`mail.fbleagh.duckdns.org`)",
"traefik.http.routers.calendar-proxy.entrypoints=web",
]
check {
type = "http"
path = "/health"
interval = "10s"
timeout = "3s"
}
}
# Nginx container
task "nginx" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
image = "nginx:alpine"
ports = ["http"]
volumes = [
"local/nginx.conf:/etc/nginx/nginx.conf",
"local/app:/var/www/html",
]
}
template {
data = <<EOF
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
upstream php-fpm {
server 127.0.0.1:9000;
}
server {
listen 80;
server_name mail.fbleagh.duckdns.org;
root /var/www/html;
index ics.php;
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
location / {
try_files $uri $uri/ /ics.php?$query_string;
}
location ~ \.php$ {
fastcgi_pass php-fpm;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header X-Content-Type-Options "nosniff" always;
# Hide nginx version
server_tokens off;
}
}
EOF
destination = "local/nginx.conf"
}
template {
data = <<EOF
<?php
$url = $_GET['url'];
// Accept ICS calendar links only
$url_arr = explode('/', $url);
if ($url_arr[2] !== 'outlook.office365.com'
|| $url_arr[3] !== 'owa'
|| $url_arr[4] !== 'calendar'
|| end($url_arr) !== 'calendar.ics'
) {
http_response_code(500);
print "Error";
exit;
}
$context = stream_context_create([
"http" => [
"header" => "User-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36",
]
]);
if (($data = @file_get_contents($url, false, $context)) === false) {
$error = error_get_last();
http_response_code(500);
print "HTTP request failed: " . $error['message'];
exit;
}
header('Content-Type: text/calendar; charset=utf-8');
print $data;
?>
EOF
destination = "local/app/ics.php"
}
resources {
cpu = 100
memory = 128
}
}
# PHP-FPM container
task "php-fpm" {
driver = "docker"
config {
image = "php:8.2-fpm-alpine"
network_mode = "container:nginx-${NOMAD_ALLOC_ID}"
volumes = [
"local/app:/var/www/html",
"local/php-fpm.conf:/usr/local/etc/php-fpm.d/www.conf",
]
}
template {
data = <<EOF
[www]
user = www-data
group = www-data
listen = 127.0.0.1:9000
listen.owner = www-data
listen.group = www-data
pm = dynamic
pm.max_children = 5
pm.start_servers = 2
pm.min_spare_servers = 1
pm.max_spare_servers = 3
EOF
destination = "local/php-fpm.conf"
}
template {
data = <<EOF
<?php
$url = $_GET['url'];
# // Accept ICS calendar links only
# $url_arr = explode('/', $url);
# if ($url_arr[2] !== 'outlook.office365.com'
# || $url_arr[3] !== 'owa'
# || $url_arr[4] !== 'calendar'
# || end($url_arr) !== 'calendar.ics'
# ) {
# http_response_code(500);
# print "Error";
# exit;
# }
$context = stream_context_create([
"http" => [
"header" => "User-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36",
]
]);
if (($data = @file_get_contents($url, false, $context)) === false) {
$error = error_get_last();
http_response_code(500);
print "HTTP request failed: " . $error['message'];
exit;
}
header('Content-Type: text/calendar; charset=utf-8');
print $data;
?>
EOF
destination = "local/app/ics.php"
}
resources {
cpu = 200
memory = 256
}
}
# Restart policy
restart {
attempts = 3
interval = "5m"
delay = "25s"
mode = "fail"
}
# Update strategy
update {
max_parallel = 1
min_healthy_time = "10s"
healthy_deadline = "3m"
auto_revert = true
}
}
}

View File

@@ -0,0 +1,99 @@
job "foodplanner" {
datacenters = ["dc1"]
type = "service"
group "app" {
count = 1
network {
port "http" {
to = 8999
}
}
service {
name = "foodplanner"
port = "http"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
# Prestart restore task
task "restore" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
# image = "litestream/litestream:latest"
image = "litestream/litestream:0.3"
args = [
"restore",
# "-if-replica-exists",
#"-if-db-not-exists",
"-o", "/alloc/tmp/meal_planner.db",
"sftp://root:odroid@192.168.4.63/mnt/Shares/litestream/foodplanner.db"
]
volumes = [
"/opt/nomad/data:/data"
]
}
}
task "app" {
driver = "docker"
config {
image = "ghcr.io/sstent/foodplanner:main"
ports = ["http"]
# Mount the SQLite database file to persist data
# Adjust the source path as needed for your environment
volumes = [
"/mnt/Public/configs/FoodPlanner_backups:/app/backups/",
]
}
env {
DATABASE_PATH = "/alloc/tmp"
DATABASE_URL = "sqlite:////alloc/tmp/meal_planner.db"
}
resources {
cpu = 500
memory = 1024
}
# Restart policy
restart {
attempts = 3
interval = "10m"
delay = "15s"
mode = "fail"
}
}
# Litestream sidecar for continuous replication
task "litestream" {
driver = "docker"
lifecycle {
hook = "poststart" # runs after main task starts
sidecar = true
}
config {
# image = "litestream/litestream:0.5.0-test.10"
image = "litestream/litestream:0.3"
args = [
"replicate",
"/alloc/tmp/meal_planner.db",
"sftp://root:odroid@192.168.4.63/mnt/Shares/litestream/foodplanner.db"
]
}
}
}
}

169
nomad_backup/garage.hcl Normal file
View File

@@ -0,0 +1,169 @@
job "garage" {
datacenters = ["dc1"]
type = "system"
group "garage" {
# Network configuration for Garage
network {
port "s3_api" {
static = 3900
}
port "k2v_api" {
static = 3902
}
port "web_api" {
static = 3903
}
port "admin" {
static = 3904
}
port "rpc" {
static = 3901
}
}
task "garage" {
driver = "docker"
# Multi-architecture image selection
config {
image = "dxflrs/garage:v2.1.0"
ports = ["s3_api", "k2v_api", "web_api", "admin", "rpc"]
volumes = [
"/mnt/configs/garage_data:/var/lib/garage/data",
"/mnt/configs/garage_meta:/var/lib/garage/meta",
"local/garage.toml:/etc/garage.toml"
]
}
# Configuration template
template {
data = <<EOF
metadata_dir = "/var/lib/garage/meta"
data_dir = "/var/lib/garage/data"
db_engine = "lmdb"
replication_factor = 3
consistency_mode = "consistent"
compression_level = 1
rpc_bind_addr = "[::]:3901"
rpc_public_addr = "{{ env "NOMAD_IP_rpc" }}:{{ env "NOMAD_PORT_rpc" }}"
rpc_secret = "{{ key "garage/rpc_secret" }}"
[consul_discovery]
consul_http_addr = "http://consul.service.dc1.consul:8500"
service_name = "garage-admin"
[s3_api]
s3_region = "garage"
api_bind_addr = "[::]:3900"
root_domain = ".s3.garage.dc1.consul"
[s3_web]
bind_addr = "[::]:3903"
root_domain = ".web.garage.dc1.consul"
[k2v_api]
api_bind_addr = "[::]:3902"
[admin]
api_bind_addr = "[::]:3904"
admin_token = "{{ key "garage/admin_token" }}"
metrics_token = "{{ key "garage/metrics_token" }}"
EOF
destination = "local/garage.toml"
change_mode = "restart"
}
# Environment variables
env {
RUST_LOG = "garage=info"
}
# Resources
resources {
cpu = 500
memory = 512
}
# Service registration
service {
name = "garage-s3"
port = "s3_api"
tags = [
"s3",
"storage"
]
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
service {
name = "garage-admin"
port = "admin"
tags = [
"admin",
"metrics"
]
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
service {
name = "garage-web"
port = "web_api"
tags = [
"web",
"http"
]
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
service {
name = "garage-k2v"
port = "k2v_api"
tags = [
"k2v",
"storage"
]
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
# Kill timeout
kill_timeout = "30s"
}
}
# Update strategy for system jobs
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "5m"
auto_revert = true
}
}

View File

@@ -0,0 +1,314 @@
job "gitea-act-runners" {
datacenters = ["dc1"]
type = "service"
# ARM (armhf) runner
group "runner-arm" {
count = 0
constraint {
attribute = "${attr.cpu.arch}"
operator = "="
value = "arm"
}
task "act-runner" {
driver = "docker"
config {
image = "gitea/act_runner:latest"
command = "act_runner"
args = ["daemon"]
privileged = true
volumes = [
"/var/run/docker.sock:/var/run/docker.sock",
"/mnt/Public/configs/gitearunner_arm:/data"
]
mount {
type = "bind"
source = "local/config.yaml"
target = "/config.yaml"
}
}
template {
data = <<-EOH
log:
level: info
runner:
file: /data/.runner
capacity: 1
timeout: 3h
insecure: false
fetch_timeout: 5s
fetch_interval: 2s
labels:
- "arm:docker://gitea/runner-images:ubuntu-latest-arm"
cache:
enabled: true
dir: /data/cache
host: ""
port: 0
container:
network: bridge
privileged: false
options: ""
workdir_parent: /data/actions
EOH
destination = "local/config.yaml"
env = false
}
template {
data = <<-EOH
GITEA_INSTANCE_URL={{ key "gitea/instance_url" }}
GITEA_RUNNER_REGISTRATION_TOKEN={{ key "gitea/runner_token" }}
GITEA_RUNNER_NAME=runner-arm-${NOMAD_ALLOC_ID}
GITEA_RUNNER_LABELS=arm:docker://gitea/runner-images:ubuntu-latest-arm
CONFIG_FILE=/config.yaml
EOH
destination = "local/runner.env"
env = true
}
resources {
cpu = 1000
memory = 1024
}
service {
name = "gitea-act-runner"
tags = [
"gitea",
"act-runner",
"arm"
]
check {
type = "script"
command = "/bin/sh"
args = ["-c", "pgrep -f 'act_runner daemon'"]
interval = "30s"
timeout = "5s"
}
}
}
}
# ARM64 runner
group "runner-arm64" {
count = 0
constraint {
attribute = "${attr.cpu.arch}"
operator = "="
value = "arm64"
}
task "act-runner" {
driver = "docker"
config {
image = "gitea/act_runner:latest"
command = "act_runner"
args = ["daemon"]
privileged = true
volumes = [
"/var/run/docker.sock:/var/run/docker.sock",
"/mnt/Public/configs/gitearunner_arm64:/data"
]
mount {
type = "bind"
source = "local/config.yaml"
target = "/config.yaml"
}
}
template {
data = <<-EOH
log:
level: info
runner:
file: /data/.runner
capacity: 1
timeout: 3h
insecure: false
fetch_timeout: 5s
fetch_interval: 2s
labels:
- "arm64:docker://gitea/runner-images:ubuntu-latest"
cache:
enabled: true
dir: /data/cache
host: ""
port: 0
container:
network: bridge
privileged: false
options: ""
workdir_parent: /data/actions
EOH
destination = "local/config.yaml"
env = false
}
template {
data = <<-EOH
GITEA_INSTANCE_URL={{ key "gitea/instance_url" }}
GITEA_RUNNER_REGISTRATION_TOKEN={{ key "gitea/runner_token" }}
GITEA_RUNNER_NAME=runner-arm64-${NOMAD_ALLOC_ID}
GITEA_RUNNER_LABELS=arm64:docker://gitea/runner-images:ubuntu-latest
CONFIG_FILE=/config.yaml
EOH
destination = "local/runner.env"
env = true
}
resources {
cpu = 1000
memory = 1024
}
service {
name = "gitea-act-runner"
tags = [
"gitea",
"act-runner",
"arm64"
]
check {
type = "script"
command = "/bin/sh"
args = ["-c", "pgrep -f 'act_runner daemon'"]
interval = "30s"
timeout = "5s"
}
}
}
}
# AMD64 runner
group "runner-amd64" {
count = 1
constraint {
attribute = "${attr.cpu.arch}"
operator = "="
value = "amd64"
}
task "act-runner" {
driver = "docker"
config {
image = "gitea/act_runner:latest"
command = "act_runner"
args = ["daemon"]
privileged = true
volumes = [
"/var/run/docker.sock:/var/run/docker.sock",
"/mnt/Public/configs/gitearunner_amd64:/data"
]
mount {
type = "bind"
source = "local/config.yaml"
target = "/config.yaml"
}
}
template {
data = <<-EOH
log:
level: info
runner:
file: /data/.runner
capacity: 1
timeout: 3h
insecure: false
fetch_timeout: 5s
fetch_interval: 2s
labels:
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
- "amd64:docker://gitea/runner-images:ubuntu-latest"
cache:
enabled: true
dir: /data/cache
host: ""
port: 0
container:
network: bridge
privileged: false
options: ""
workdir_parent: /data/actions
EOH
destination = "local/config.yaml"
env = false
}
template {
data = <<-EOH
GITEA_INSTANCE_URL={{ key "gitea/instance_url" }}
GITEA_RUNNER_REGISTRATION_TOKEN={{ key "gitea/runner_token" }}
GITEA_RUNNER_NAME=runner-amd64-${NOMAD_ALLOC_ID}
GITEA_RUNNER_LABELS=ubuntu-latest:docker://gitea/runner-images:ubuntu-latest,amd64:docker://gitea/runner-images:ubuntu-latest
CONFIG_FILE=/config.yaml
EOH
destination = "local/runner.env"
env = true
}
resources {
cpu = 1000
memory = 1024
}
service {
name = "gitea-act-runner"
tags = [
"gitea",
"act-runner",
"amd64"
]
check {
type = "script"
command = "/bin/sh"
args = ["-c", "pgrep -f 'act_runner daemon'"]
interval = "30s"
timeout = "5s"
}
}
}
}
}

84
nomad_backup/gitea.hcl Normal file
View File

@@ -0,0 +1,84 @@
job "gitea" {
datacenters = ["dc1"]
type = "service"
group "gitea-server" {
count = 1
network {
# Define ports for Gitea's web UI (HTTP) and SSH
port "http" {
static = 3000
to = 3000
}
port "ssh" {
to = 22
}
}
service {
name = "gitea"
tags = ["git-server", "web", "sslcert",
"traefik.http.routers.gitea.rule=Host(`gitea.service.dc1.consul`,`gitea.service.dc1.fbleagh.duckdns.org`)",
"traefik.http.routers.gitea.tls=true",
"traefik.http.middlewares.gitea-headers.headers.customrequestheaders.X-Forwarded-Proto=https",
"traefik.http.routers.gitea.middlewares=gitea-headers"
]
port = "http"
# Health check to ensure Gitea is running
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
task "gitea" {
driver = "docker"
config {
image = "gitea/gitea:latest"
memory_hard_limit = "20480"
ports = ["http", "ssh"]
volumes = ["/mnt/Public/configs/gitea:/data"]
}
# Gitea uses environment variables for configuration.
env {
# --- Database Configuration (PostgreSQL) ---
# The main data (users, repos metadata, issues, etc.)
GITEA__database__DB_TYPE = "postgres"
GITEA__database__HOST = "master.postgres.service.dc1.consul:5432" # Replace with your endpoint
GITEA__database__NAME = "gitea"
# Best practice is to use a secure secret management solution like Vault
# to inject these values, but here they are hardcoded for the example.
GITEA__database__USER = "postgres"
GITEA__database__PASSWD = "postgres"
GITEA__database__SSL_MODE = "disable" # Change if using TLS
GITEA__security__REVERSE_PROXY_TRUSTED_PROXIES = "*" # Trust traefik
# --- Gitea Host Configuration ---
# Used for generating URLs in emails, SSH clone links, etc.
GITEA__server__DOMAIN = "gitea.service.dc1.fbleagh.duckdns.org"
GITEA__server__ROOT_URL = "https://gitea.service.dc1.fbleagh.duckdns.org/" # Adjust if using a reverse proxy
GITEA__server__SSH_LISTEN_PORT = "22"
GITEA__server__SSH_PORT = "${NOMAD_HOST_PORT_ssh}" # The port exposed on the host for SSH
GITEA__actions__ENABLED=true
GITEA__packages__ENABLED=true
# Set the user Gitea runs as inside the container (important for permissions)
USER_UID = "1000"
USER_GID = "1000"
}
resources {
cpu = 500
memory = 256
}
}
}
}

150
nomad_backup/gonic.hcl Normal file
View File

@@ -0,0 +1,150 @@
job "gonic" {
datacenters = ["dc1"]
type = "service"
group "gonic" {
count = 1
task "gonic" {
driver = "docker"
config {
image = "sentriz/gonic"
port_map {
http = 80
}
volumes = [
"/mnt/configs/gonic/data:/data",
"/mnt/configs/gonic/cache:/cache",
"/mnt/Public/Downloads/Clean_Music:/music/CleanMusic:ro",
"/mnt/Public/Downloads/incoming_music:/music/incomingmusic:ro",
"/mnt/Public/Downloads/podcasts:/podcasts",
"/mnt/Public/Downloads/musicplaylists:/musicplaylists",
]
}
env{
GONIC_SCAN_INTERVAL = "120"
GONIC_MUSIC_PATH = "/music"
GONIC_PODCAST_PATH = "/podcasts"
GONIC_PLAYLISTS_PATH = "/musicplaylists"
}
resources {
cpu = 100 # 100 MHz
memory = 256 # 128 MB
network {
port "http" {
static = "4747"
}
}
}
service {
name = "gonic"
tags = [
"gonic",
"web",
"urlprefix-/gonic",
"backend",
"traefik.http.routers.goniclan.rule=Host(`gonic.service.dc1.consul`)",
"traefik.http.routers.gonicwan.rule=Host(`mg.fbleagh.duckdns.org`)",
"traefik.http.routers.gonicwan.tls=true",
]
// "traefik.http.middlewares.gonic_auth.basicauth.users=admin:$2y$05$cJGONoS0BFTeBUXqmETikeI14QhLDKIDkYuPdA1umIOC129grVMAm",
port = "http"
meta {
ALLOC = "${NOMAD_ALLOC_ID}"
}
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
} #end gonic
task "init-manual" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
memory_hard_limit = "2048"
image = "ghcr.io/sstent/rsync"
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
"/mnt/Public/config/locks:/locks"
]
args = ["flock", "-x","/locks/${NOMAD_GROUP_NAME}_rsync.lock","rsync","-av","--exclude=Backups","/configbackup/","/config/","--delete-before"]
}
resources {
cpu = 20 # 500 MHz
memory = 20 # 128MB
}
}
task "finalsync-manual" {
driver = "docker"
lifecycle {
hook = "poststop"
}
config {
memory_hard_limit = "2048"
image = "ghcr.io/sstent/rsync"
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
"/mnt/Public/config/locks:/locks"
]
args = ["flock", "-x","/locks/${NOMAD_GROUP_NAME}_rsync.lock","rsync","-av","/config/","/configbackup/"]
}
resources {
cpu = 20 # 500 MHz
memory = 20 # 128MB
}
}
task "db-sync" {
driver = "docker"
lifecycle {
hook = "poststart"
sidecar = true
}
config {
memory_hard_limit = "2048"
image = "ghcr.io/sstent/rsync"
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
]
// args = ["/usr/local/bin/litestream", "replicate", "-config","/local/litestream.yml"]
}
resources {
cpu = 20 # 500 MHz
memory = 20 # 128MB
}
template {
data = <<EOH
dbs:
- path: /data/gonic.db
replicas:
- path: /configbackup
EOH
destination = "local/litestream.yml"
}
} #####
}
}

285
nomad_backup/hass.hcl Normal file
View File

@@ -0,0 +1,285 @@
job "hass" {
# region = "global"
datacenters = ["dc1"]
type = "service"
# priority = 50
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
constraint {
attribute = "${attr.unique.hostname}"
operator = "regexp"
value = "odroid.*"
}
// constraint {
// operator = "distinct_hosts"
// value = "true"
// }
affinity {
attribute = "${attr.unique.hostname}"
value = "odroid2"
weight = 80
}
update {
# Stagger updates every 60 seconds
stagger = "10s"
max_parallel = 1
}
group "hass" {
count = 1
restart {
attempts = 99
interval = "1h"
delay = "10s"
mode = "delay"
}
task "init" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
memory_hard_limit = "2048"
image_pull_timeout = "10m"
force_pull = false
image = "ghcr.io/sstent/rsync"
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
"/mnt/Public/config/locks:/locks"
]
}
env {
DB_NAME = "home-assistant_v2.db"
}
resources {
cpu = 20 # 500 MHz
memory = 20 # 128MB
}
// template {
// data = <<EOH
// dbs:
// - path: /config/radarr.db
// replicas:
// - path: /configbackup
// EOH
// destination = "local/litestream.yml"
// }
}
task "finalsync" {
driver = "docker"
lifecycle {
hook = "poststop"
}
config {
memory_hard_limit = "2048"
image = "ghcr.io/sstent/rsync"
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
"/mnt/Public/config/locks:/locks"
]
// args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-av","--exclude='8-20 * * *.db8-20 * * *'","--exclude='8-20 * * *.db'","--exclude='8-20 * * *.db-litestream'","--exclude='generations'","/config/","/configbackup/"]
}
resources {
cpu = 20 # 500 MHz
memory = 128 # 128MB
}
}
task "sync" {
driver = "docker"
lifecycle {
hook = "poststart"
sidecar = true
}
config {
memory_hard_limit = "2048"
image = "ghcr.io/sstent/rsync"
volumes = [
"/mnt/configs/:/configs",
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
]
args = ["client"]
}
env {
CRON_TASK_1 = "50 8-20 * * *rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;"
}
resources {
cpu = 20 # 500 MHz
memory = 20 # 128MB
}
}
task "hass" {
driver = "docker"
config {
// image = "homeassistant/armhf-homeassistant:latest"
image = "ghcr.io/home-assistant/home-assistant:stable"
ports = ["http","http_8989"]
memory_hard_limit = "2048"
network_mode = "host"
cap_add = ["NET_ADMIN"]
force_pull = false
volumes = [
"/etc/localtime:/etc/localtime",
"local/configuration.yaml:/config/configuration.yaml",
"local/ui-lovelace.yaml:/config/ui-lovelace.yaml",
"/mnt/configs/hass:/config",
]
// "local/auth_provider.homeassistant:/config/.storage/auth_provider.homeassistant"
}
env {
JEMALLOC_DISABLE=true
}
service {
name = "${TASKGROUP}"
tags = ["global", "homeassistant", "tools","logo=home-assistant"]
port = "http"
// check {
// name = "hass-alive"
// type = "http"
// type = "script"
// command = "curl -sS http://localhost:8123"
// interval = "120s"
// timeout = "15s"
// path = "/api/"
// port = "http"
// check_restart {
// limit = 10
// grace = "90s"
// ignore_warnings = false
// }
// }
}
// template {
// data = <<EOH
// homeassistant:
// name: Our_House
// latitude: 40.7654
// longitude: -73.8175
// elevation: 26
// unit_system: metric
// time_zone: America/New_York
// auth_providers:
// - type: trusted_networks
// trusted_networks:
// - 127.0.0.1
// - ::1
// - 192.168.1.0/24
// allow_bypass_login: true
// - type: homeassistant
// frontend:
// lovelace:
// mode: yaml
// config:
// http:
// sun:
// automation:
// - alias: LightsAtSunset
// trigger:
// platform: sun
// event: sunset
// action:
// service: switch.turn_on
// entity_id: switch.lampdrawers
// wemo:
// static:
// - 192.168.99.200 # StuBed
// - 192.168.99.201 # LampDrawers
// - 192.168.99.202 # BigLamp
// - 192.168.99.203 # TallTree
// - 192.168.99.204 # ShortTree
// - 192.168.99.205 # TallTree
// switch:
// - platform: template
// switches:
// living_room_screen_off:
// friendly_name: 'living_room_screen_off'
// turn_on:
// - service: input_boolean.turn_on
// entity_id: input_boolean.living_room_screen_off
// - service: webostv.command
// data:
// {
// "entity_id": "media_player.lg_webos_smart_tv",
// "command": "com.webos.service.tvpower/power/turnOffScreen"
// }
// turn_off:
// - service: input_boolean.turn_off
// entity_id: input_boolean.living_room_screen_off
// - service: webostv.command
// data:
// {
// "entity_id": "media_player.lg_webos_smart_tv",
// "command": "com.webos.service.tvpower/power/turnOnScreen"
// }
// EOH
// destination = "local/configuration.yaml"
// }
template {
change_mode = "restart"
data = "{{ key \"homeassistant/configuration.yml\" }}"
destination = "local/configuration.yaml"
}
template {
change_mode = "restart"
data = "{{ key \"homeassistant/lovelace.yml\" }}"
destination = "local/ui-lovelace.yaml"
}
resources {
cpu = 256 # 500 MHz
memory = 128 # 128MB
}
# Specify configuration related to log rotation
logs {
max_files = 10
max_file_size = 15
}
kill_timeout = "10s"
}
network {
port "http" {
static = 8123
to = 8123
}
port "http_8989" {
static = 8989
to = 8989
}
}
}
}

151
nomad_backup/immich.hcl Normal file
View File

@@ -0,0 +1,151 @@
job "immich" {
datacenters = ["dc1"] # Specify your datacenter
type = "service"
group "immich" {
count = 1
task "immich" {
driver = "docker"
config {
image = "ghcr.io/imagegenius/immich:2.2.3-noml"
memory_hard_limit = "2048"
ports = ["http"]
volumes = [
"/mnt/Public/configs/Immich:/config",
"/mnt/Public/Immich/Photos:/photos",
"/mnt/Public/Immich/libraries:/libraries" # optional
]
}
env {
PUID = "1000"
PGID = "1000"
TZ = "Etc/UTC"
DB_HOSTNAME = "master.postgres.service.dc1.consul"
DB_USERNAME = "postgres"
DB_PASSWORD = "postgres"
DB_DATABASE_NAME = "immich"
DB_PORT = "5432" # optional
REDIS_HOSTNAME = "redis.service.dc1.consul"
REDIS_PORT = "6379" # optional
REDIS_PASSWORD = "" # optional
MACHINE_LEARNING_HOST = "0.0.0.0" # optional
MACHINE_LEARNING_PORT = "3003" # optional
MACHINE_LEARNING_WORKERS = "1" # optional
MACHINE_LEARNING_WORKER_TIMEOUT = "120" # optional
DB_VECTOR_EXTENSION= "pgvector"
}
service {
name = "${TASKGROUP}"
tags = ["immich", "tools",
"traefik.http.routers.immichlan.rule=Host(`immich.service.dc1.consul`)",
"traefik.http.routers.immichwan.rule=Host(`immich.fbleagh.duckdns.org`)",
"traefik.http.routers.immichwan.tls=true",
]
port = "http"
}
resources {
cpu = 500
memory = 512
}
}
network {
port "http" {
static = 8088
to = 8080
}
}
restart {
attempts = 2
interval = "5m"
delay = "30s"
mode = "delay"
}
}
# group "immichpg" {
# count = 1
# task "immichpg" {
# driver = "docker"
# config {
# image = "tensorchord/pgvecto-rs:pg14-v0.2.0"
# ports = ["pg"]
# }
# env {
# POSTGRES_USER="postgres"
# POSTGRES_PASSWORD = "postgres"
# POSTGRES_DB = "immich"
# }
# service {
# name = "${TASKGROUP}"
# tags = ["immich", "tools"]
# port = "pg"
# }
# resources {
# cpu = 500
# memory = 500
# }
# }
# network {
# port "pg" {
# static = 5433
# to = 5432
# }
# }
# restart {
# attempts = 2
# interval = "5m"
# delay = "30s"
# mode = "delay"
# }
# }
group "redis" {
count = 1
task "redis" {
driver = "docker"
config {
image = "redis"
ports = ["redis"]
}
service {
name = "${TASKGROUP}"
tags = ["wallabag", "tools"]
port = "redis"
}
resources {
cpu = 250
memory = 256
}
}
network {
port "redis" {
static = 6379
}
}
restart {
attempts = 2
interval = "5m"
delay = "30s"
mode = "delay"
}
}
}

74
nomad_backup/miniflux.hcl Normal file
View File

@@ -0,0 +1,74 @@
job "miniflux" {
datacenters = ["dc1"]
group "miniflux" {
count = 1
task "miniflux" {
driver = "docker"
config {
image = "miniflux/miniflux:latest"
ports = ["http"]
#health_check {
# test = ["CMD", "curl", "-f", "http://localhost:8080/health"]
# interval = "10s"
# timeout = "5s"
#}
}
service {
name = "${TASKGROUP}"
tags = ["miniflux", "tools",
"traefik.http.routers.minifluxlan.rule=Host(`miniflux.service.dc1.consul`)",
"traefik.http.routers.minifluxwan.rule=Host(`miniflux.fbleagh.duckdns.org`)",
"traefik.http.routers.minifluxwan.tls=true",
]
port = "http"
#check {
# type = "script"
# name = "check_up"
# command = "/local/healthcheck.sh"
# interval = "60s"
# timeout = "5s"
# check_restart {
# limit = 3
# grace = "90s"
# ignore_warnings = false
# }
#}
}
env {
DATABASE_URL = "postgres://postgres:postgres@master.postgres.service.dc1.consul/miniflux?sslmode=disable"
RUN_MIGRATIONS = "1"
CREATE_ADMIN = "1"
ADMIN_USERNAME = "admin"
ADMIN_PASSWORD = "farscape5"
BASE_URL = "https://miniflux.fbleagh.duckdns.org"
OAUTH2_PROVIDER="google"
OAUTH2_CLIENT_ID="807888907647-g6i7t2auvbrjinf151qupc663tgpn6gh.apps.googleusercontent.com"
OAUTH2_CLIENT_SECRET="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"
OAUTH2_REDIRECT_URL="https://miniflux.fbleagh.duckdns.org/oauth2/google/callback"
OAUTH2_USER_CREATION="1"
}
resources {
cpu = 500
memory = 256
}
}
network {
// mbits = 100
port "http" {
to = 8080
}
}
}
}

48
nomad_backup/minihass.hcl Normal file
View File

@@ -0,0 +1,48 @@
job "minihass" {
datacenters = ["dc1"]
group "smart-home" {
network {
mode = "host"
port "http" {
to = 5000
}
}
service {
name = "minihass"
port = "http"
tags = [
"traefik.http.routers.minihasslan.rule=Host(`minihass.service.dc1.consul`)",
"traefik.http.routers.minihasswan.rule=Host(`mh.fbleagh.duckdns.org`)",
"traefik.http.routers.minihasswan.middlewares=dex@consulcatalog",
"traefik.http.routers.minihasswan.tls=true",
]
check {
type = "http"
path = "/health"
interval = "30s"
timeout = "7s"
}
}
task "app" {
driver = "docker"
config {
image = "ghcr.io/sstent/minihass"
ports = ["http"]
}
env {
CONSUL_HOST = "consul.service.dc1.consul"
CONSUL_PORT = "8500"
}
resources {
cpu = 500
memory = 256
}
}
}
}

117
nomad_backup/navidrome.hcl Normal file
View File

@@ -0,0 +1,117 @@
job "navidrome" {
datacenters = ["dc1"]
type = "service"
constraint {
attribute = "${attr.unique.hostname}"
operator = "regexp"
value = "odroid.*"
}
group "navidrome" {
count = 1
# Prestart restore task
task "restore" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "litestream/litestream:latest"
args = [
"restore",
"-if-replica-exists",
"-if-db-not-exists",
"-o", "/alloc/tmp/navidrome.db",
"sftp://root:odroid@192.168.4.63/mnt/Shares/litestream/navidrome.db"
]
volumes = [
"/mnt/configs/navidrome:/data"
]
}
}
# Main Navidrome task
task "navidrome" {
driver = "docker"
config {
image = "ghcr.io/navidrome/navidrome:latest"
memory_hard_limit = "2048"
ports = ["http"]
volumes = [
"/mnt/Public/Downloads/Clean_Music:/music/CleanMusic:ro",
"/mnt/Public/Downloads/news/slskd/downloads:/music/slskd:ro",
"/mnt/Public/Downloads/incoming_music:/music/incomingmusic:ro",
"/mnt/configs/navidrome:/data"
]
}
env {
ND_DATAFOLDER = "/alloc/tmp/"
ND_CACHEFOLDER = "./data/cache"
ND_SCANSCHEDULE = "32 8-20 * * *"
ND_LOGLEVEL = "debug"
ND_REVERSEPROXYWHITELIST = "0.0.0.0/0"
ND_REVERSEPROXYUSERHEADER = "X-Forwarded-User"
ND_SCANNER_GROUPALBUMRELEASES = "False"
ND_BACKUP_PATH = "/data"
ND_BACKUP_SCHEDULE = "0 0 * * *"
ND_BACKUP_COUNT = "7"
}
resources {
cpu = 100
memory = 128
}
service {
name = "navidrome"
tags = [
"navidrome",
"web",
"urlprefix-/navidrome",
"tools",
"traefik.http.routers.navidromelan.rule=Host(`navidrome.service.dc1.consul`)",
"traefik.http.routers.navidromewan.rule=Host(`m.fbleagh.duckdns.org`)",
"traefik.http.routers.navidromewan.middlewares=dex@consulcatalog",
"traefik.http.routers.navidromewan.tls=true",
]
port = "http"
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
# Litestream sidecar for continuous replication
task "litestream" {
driver = "docker"
lifecycle {
hook = "poststart" # runs after main task starts
sidecar = true
}
config {
image = "litestream/litestream:latest"
args = [
"replicate",
"/alloc/tmp/navidrome.db",
"sftp://root:odroid@192.168.4.63/mnt/Shares/litestream/navidrome.db"
]
volumes = [
"/mnt/configs/navidrome:/data"
]
}
}
network {
port "http" {
static = 4533
to = 4533
}
}
}
}

98
nomad_backup/nginx.hcl Normal file
View File

@@ -0,0 +1,98 @@
job "nginx" {
datacenters = ["dc1"]
type = "service"
group "nginx" {
count = 1
task "nginx" {
driver = "docker"
config {
image = "nginx"
ports = ["http"]
volumes = [
"custom/default.conf:/etc/nginx/conf.d/default.conf",
]
}
template {
data = <<EOH
server {
listen 8080;
server_name nginx.service.dc1.consul;
location / {
root /local/data;
}
}
EOH
destination = "custom/default.conf"
}
template {
data = <<EOH
<p>Local Services - HTTP/HTTPS</p>
<table style="width:100%">
<tr>
<th>Service Name</th>
<th>Traefik</th>
<th>Direct</th>
</tr>
{{range services}}
{{range $i, $s :=service .Name}}
{{ if eq $i 0 }}
<tr>
<td>{{.Name}}</td>
<td><a href="{{ if eq .Name "unifi" }}https{{else}}http{{end}}://{{.Name}}.service.dc1.consul" target="_blank">{{.Name}}.service.dc1.consul</a></td>
<td><a href="{{ if eq .Name "unifi" }}https{{else}}http{{end}}://{{.Name}}.service.dc1.consul:{{.Port}}" target="_blank">{{ if eq .Name "unifi" }}https{{else}}http{{end}}://{{.Name}}.service.dc1.consul:{{.Port}}</a></td>
</tr>
{{end}}
{{end}}
{{end}}
</table>
<p>
Node Environment Information: <br />
node_id: {{ env "node.unique.id" }} <br/>
datacenter: {{ env "NOMAD_DC" }}
</p>
EOH
destination = "local/data/index.html"
change_mode = "noop"
}
resources {
cpu = 100 # 100 MHz
memory = 64 # 128 MB
}
service {
name = "nginx"
tags = ["nginx", "web", "urlprefix-/nginx", "backend",
"traefik.http.routers.nginxlan.rule=Host(`nginx.service.dc1.consul`)",
"traefik.http.routers.nginxwan.rule=Host(`nginx.service.dc1.fbleagh.duckdns.org`)",
"traefik.http.routers.nginxwan.tls=true",
]
port = "http"
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
network {
port "http" {
to = 8080
}
// port "https" {
// to = 443
// }
}
}
}

305
nomad_backup/nzbget.hcl Normal file
View File

@@ -0,0 +1,305 @@
job "nzbget" {
# region = "global"
datacenters = ["dc1"]
type = "service"
# priority = 50
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
constraint {
attribute = "${attr.unique.hostname}"
operator = "regexp"
value = "odroid.*"
}
update {
# Stagger updates every 60 seconds
stagger = "90s"
max_parallel = 1
healthy_deadline = "5m"
}
group "nzbget" {
count = 1
restart {
attempts = 8
interval = "20m"
delay = "10s"
mode = "delay"
}
// task "init-trigger" {
// driver = "docker"
// lifecycle {
// hook = "prestart"
// }
// config {
// image = "curlimages/curl"
// args = ["--request", "PUT", "--data", "${NOMAD_ALLOC_ID}", "http://${attr.unique.network.ip-address}:8500/v1/kv/${NOMAD_GROUP_NAME}"]
// }
// resources {
// cpu = 20 # 500 MHz
// memory = 20 # 128MB
// }
// }
task "ovpn-client" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
// image = "registry.service.dc1.consul:5000/openpyn:latest"
image = "qmcgaw/gluetun"
memory_hard_limit = "1024"
ports = [
"shadowsocks",
"nzbget",
"http_proxy",
"http_admin",
"sabnzb_admin",
"socks",
]
cap_add = [
"NET_ADMIN",
"NET_BIND_SERVICE",
]
#network_mode = "host"
#network_mode = "vpn"
volumes = [
"/etc/localtime:/etc/localtime",
]
devices = [
{
host_path = "/dev/net/tun"
container_path = "/dev/net/tun"
},
]
}
env {
// VPNFLAGS = "us --max-load 70 --top-servers 10 --pings 5"
// VPNFLAGS = "nl --max-load 70 --top-servers 10 --pings 5"
// VPNSP = "nordvpn"
// OPENVPN_USER = "yvPLaZ3xkXtnJKsyXDNQf9Ft"
// OPENVPN_PASSWORD = "SW8XvhGkSVuQitjuFrbH9WPA"
// REGION = "Netherlands"
##Mullvad
VPNSP = "mullvad"
VPN_TYPE = "wireguard"
COUNTRY = "Canada"
CITY = "Toronto"
FIREWALL_VPN_INPUT_PORTS = "56987"
WIREGUARD_PRIVATE_KEY = "2FHwQX1jxk+qeMmXUtSGRc2kKF1WHeSCyIgHNW+7akA=" #ActiveLynx
WIREGUARD_ADDRESS = "10.66.246.4/32"
HTTPPROXY = "on"
UPDATER_PERIOD= "24h"
SHADOWSOCKS_PASSWORD = "farscape5"
SHADOWSOCKS = "off"
DOT_PROVIDERS = "cloudflare,google,quad9,quadrant"
DOT = "off"
}
service {
name = "${TASKGROUP}-admin"
tags = ["global", "ovpn-openpyn"]
port = "http_admin"
}
service {
name = "${TASKGROUP}"
tags = ["global", "ovpn-openpyn"]
port = "shadowsocks"
}
service {
name = "nzbget"
tags = ["global", "ovpn-openpyn"]
port = "nzbget"
}
service {
name = "sabnzb"
tags = ["global", "ovpn-openpyn", "enable_gocast",
"gocast_vip=192.168.1.247/32",
"gocast_nat=tcp:8080:8080",
"gocast_nat=udp:8080:8080",]
port = "sabnzb_admin"
}
service {
name = "socks-nord"
tags = ["global", "ovpn-openpyn", "enable_gocast",
"gocast_vip=192.168.1.243/32",
"gocast_nat=tcp:1080:1080",
"gocast_nat=udp:1080:1080",]
port = "socks"
}
resources {
cpu = 100 # 500 MHz
memory = 100 # 128MB
}
} #task ovpn
task "dante" {
driver = "docker"
config {
image = "serjs/go-socks5-proxy"
// image = "ghcr.io/sstent/dante:latest"
network_mode = "container:ovpn-client-${NOMAD_ALLOC_ID}"
memory_hard_limit = 256
devices = [
{
host_path = "/dev/net/tun"
container_path = "/dev/net/tun"
},
]
}
env {
REQUIRE_AUTH = "false"
}
resources {
cpu = 64 # 500 MHz
memory = 128 # 128MB
}
}
######################################################################
######################################################################
######################################################################
task "init" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "ghcr.io/sstent/rsync:v0.3.5"
memory_hard_limit = "2048"
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
"/mnt/Public/config/locks:/locks",
]
args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-avz", "--exclude=Backups", "/configbackup/", "/config/", "--delete-before"]
}
resources {
cpu = 20 # 500 MHz
memory = 20 # 128MB
}
} #end init task
task "finalsync" {
driver = "docker"
lifecycle {
hook = "poststop"
}
config {
// image = "pebalk/rsync"
image = "ghcr.io/sstent/rsync:v0.3.5"
memory_hard_limit = "2048"
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
"/mnt/Public/config/locks:/locks",
]
args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-avz", "/config/", "/configbackup/"]
}
resources {
cpu = 20 # 500 MHz
memory = 20 # 128MB
}
} #end finalsync task
// task "sync" {
// driver = "docker"
// lifecycle {
// hook = "poststart"
// sidecar = true
// }
// config {
// image = "ghcr.io/sstent/rsync:v0.3.5"
// memory_hard_limit = "2048"
// volumes = [
// "/mnt/configs/:/configs",
// "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
// ]
// args = ["client"]
// }
// resources {
// cpu = 20 # 500 MHz
// memory = 20 # 128MB
// }
// env {
// CRON_TASK_1 = "*/20 8-20 * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;"
// }
// } #end sync task
######################################################################
######################################################################
######################################################################
network {
port "shadowsocks" {
static = "8338"
to = "8388"
}
port "http_proxy" {
static = "8888"
to = "8888"
}
port "http_admin" {
static = "8000"
to = "8000"
}
port "sabnzb_admin" {
static = "8080"
to = "8080"
}
port "socks" {
static = "1080"
to = "1080"
}
port "nzbget" {
static = "6789"
to = "6789"
}
}
}
}

View File

@@ -0,0 +1,116 @@
job "postgres-15" {
type = "system"
datacenters = ["dc1"]
group "group" {
count = 1
network {
port api {
static = 8008
to = 8008
}
port pg {
static = 5432
to = 5432
}
}
task "db" {
driver = "docker"
template {
data = <<EOL
scope: postgres
name: pg-{{env "node.unique.name"}}
namespace: /nomad
restapi:
listen: 0.0.0.0:8008
connect_address: {{env "attr.unique.network.ip-address"}}:8008
consul:
scheme: http
host: {{env "attr.unique.network.ip-address"}}:8500
register_service: True
bootstrap:
dcs:
ttl: 30
loop_wait: 10
retry_timeout: 10
maximum_lag_on_failover: 1048576
postgresql:
use_pg_rewind: true
use_slots: true
parameters:
timezone: 'UTC'
max_connections: 100
shared_preload_libraries: pg_stat_statements
shared_buffers: 64MB
work_mem: 16MB
effective_cache_size: 512MB
tcp_keepalives_idle: 300
timescaledb.telemetry_level: off
method: local
local:
command: /usr/local/bin/docker-initdb.sh
keep_existing_recovery_conf: True
initdb:
- encoding: UTF8
pg_hba:
- host all postgres all md5
- host replication repl all md5
- host all all all md5
users:
postgres:
password: postgres
options:
- createrole
- createdb
repl:
password: repl
options:
- replication
postgresql:
listen: 0.0.0.0:5432
connect_address: {{env "attr.unique.network.ip-address"}}:5432
use_unix_socket: true
data_dir: /store/data
authentication:
replication:
username: repl
password: repl
superuser:
username: postgres
password: postgres
EOL
destination = "/secrets/patroni.yml"
}
config {
image = "ghcr.io/sstent/nomad-pgsql-patroni:latest"
volumes = [
"/mnt/configs/postgres:/store",
]
ports = ["api", "pg"]
}
env {
PGDATA="/store/data"
// POSTGRES_USER="root"
// POSTGRES_PASSWORD="rootpassword"
}
resources {
memory = 1024
}
}
}
}

195
nomad_backup/prometheus.hcl Normal file
View File

@@ -0,0 +1,195 @@
job "prometheus" {
# region = "global"
datacenters = ["dc1"]
type = "service"
# priority = 50
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
constraint {
attribute = "${attr.unique.hostname}"
operator = "regexp"
value = "odroid.*"
}
# constraint {
# attribute = "${attr.unique.hostname}"
# operator = "regexp"
# value = "pi.*"
# }
update {
# Stagger updates every 60 seconds
stagger = "60s"
max_parallel = 1
}
group "prometheus" {
count = 2
restart {
attempts = 2
interval = "1m"
delay = "10s"
mode = "delay"
}
task "prometheus" {
driver = "docker"
config {
image = "prom/prometheus"
args = ["--web.enable-admin-api","--config.file=/etc/prometheus/prometheus.yml"]
ports = ["http"]
memory_hard_limit = "512"
volumes = [
"local/prometheus.yml:/etc/prometheus/prometheus.yml",
"local/alerts.yml:/etc/prometheus/alerts.yml",
]
// "/mnt/gv0/prom_data:/data",
// "/etc/localtime:/etc/localtime",
logging {
type = "json-file"
}
}
service {
name = "${TASKGROUP}"
tags = ["global", "prometheus"]
port = "http"
check {
name = "alive"
type = "http"
interval = "10s"
timeout = "120s"
path = "/status"
port = "http"
}
}
template {
change_mode = "signal"
change_signal = "SIGHUP"
data = "{{ key \"prometheus_yml\" }}"
destination = "local/prometheus.yml"
}
template {
change_mode = "restart"
destination = "local/alerts.yml"
data = "{{ key \"alerts\" }}"
}
resources {
cpu = 500 # 500 MHz
memory = 48 # 128MB
}
# Specify configuration related to log rotation
logs {
max_files = 10
max_file_size = 15
}
kill_timeout = "10s"
} ## end prometheus
task "alertmanager" {
driver = "docker"
config {
image = "prom/alertmanager"
ports = ["alerthttp"]
// volumes = [
// "local/alertmanager.yml:/etc/prometheus/prometheus.yml",
// ]
args = ["--config.file=/local/alertmanager.yml"]
// "/mnt/gv0/prom_data:/data",
// "/etc/localtime:/etc/localtime",
logging {
type = "json-file"
}
}
service {
name = "${TASK}"
tags = ["global", "prometheus"]
port = "alerthttp"
check {
name = "alive"
type = "http"
interval = "60s"
timeout = "120s"
path = "/status"
port = "http"
}
}
template {
data = <<EOH
global:
receivers:
- name: default-receiver
- name: gotify-webhook
webhook_configs:
- url: "http://prometheus.service.dc1.consul:9094/gotify_webhook"
route:
group_wait: 10s
group_interval: 5m
receiver: gotify-webhook
repeat_interval: 3h
EOH
destination = "local/alertmanager.yml"
}
resources {
cpu = 128 # 500 MHz
memory = 48 # 128MB
}
# Specify configuration related to log rotation
logs {
max_files = 10
max_file_size = 15
}
kill_timeout = "10s"
} ## end alertmanager
task "gotifybridge" {
driver = "docker"
config {
image = "ghcr.io/druggeri/alertmanager_gotify_bridge"
ports = ["gotifybridge"]
args = ["--debug"]
}
env {
GOTIFY_TOKEN="AQ7l7NVgtylam86"
GOTIFY_ENDPOINT="http://gotify.service.dc1.consul/message"
}
}
network {
port "http" {
static = 9090
to = 9090
}
port "alerthttp" {
static = 9093
to = 9093
}
port "gotifybridge" {
static = 9094
to = 8080
}
}
}
}

138
nomad_backup/prowlarr.hcl Normal file
View File

@@ -0,0 +1,138 @@
# There can only be a single job definition per file.
# Create a job with ID and Name 'example'
job "prowlarr" {
datacenters = ["dc1"]
type = "service"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
constraint {
attribute = "${attr.unique.hostname}"
operator = "regexp"
value = "odroid.*"
}
update {
stagger = "10s"
max_parallel = 1
}
group "prowlarr" {
count = 1
restart {
attempts = 2
interval = "1m"
delay = "10s"
mode = "fail"
}
task "flaresolver" {
driver = "docker"
config {
image = "ghcr.io/flaresolverr/flaresolverr:latest"
ports = ["http_flare"]
dns_servers = ["192.168.1.1", "1.1.1.1"]
memory_hard_limit = "2048"
force_pull = false
}
service {
name = "${TASKGROUP}"
tags = ["prowlarr_pg", "tools"]
port = "http_flare"
}
}
task "prowlarr" {
driver = "docker"
config {
image = "ghcr.io/linuxserver/prowlarr:develop"
ports = ["http"]
dns_servers = ["192.168.1.1", "1.1.1.1"]
memory_hard_limit = "2048"
volumes = [
"/mnt/Public/configs/prowlarr_pg:/config",
]
force_pull = false
}
service {
name = "${TASKGROUP}"
tags = ["prowlarr_pg", "tools"]
port = "http"
// check {
// type = "script"
// name = "check_up"
// command = "/local/healthcheck.sh"
// interval = "60s"
// timeout = "5s"
// check_restart {
// limit = 3
// grace = "90s"
// ignore_warnings = false
// }
// // export API=$(grep -Po '<ApiKey>\K.8-20 * * *?(?=<.8-20 * * *?>)' /config/config.xml)
// // curl -f "http://localhost:9696/api/v1/system/status?apikey=$API"
// }
}
template {
data = <<EOH
#!/bin/bash
/usr/bin/curl -f "http://localhost:${NOMAD_PORT_http}/api/v1/system/status?apikey=$(grep -Po '<ApiKey>\K.*?(?=<.*?>)' /config/config.xml)"
EOH
destination = "local/healthcheck.sh"
perms = "0755"
}
env {
// SHARE = "Public;/mount/Public;yes;no;yes;all;none;;Public"
# GLOBAL = "socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=65536 SO_SNDBUF=65536"
# PERMISSIONS = "true"
# WORKGROUP = "WORKGROUP"
TZ = "EST5EDT"
PUID = 1000
PGID = 1000
}
# We must specify the resources required for
# this task to ensure it runs on a machine with
# enough capacity.
resources {
cpu = 500 # 500 MHz
memory = 256 # 128MB
}
# Specify configuration related to log rotation
logs {
max_files = 10
max_file_size = 15
}
# Controls the timeout between signalling a task it will be killed
# and killing the task. If not set a default is used.
kill_timeout = "10s"
} #End main task
network {
// mbits = 100
port "http" {
static = 9696
to = 9696
}
port "http_flare" {
static = 8191
to = 8191
}
}
}
}

View File

@@ -0,0 +1,59 @@
job "qautomate" {
periodic {
cron = "*/30 * * * *"
prohibit_overlap = true
}
datacenters = ["dc1"]
type = "batch"
group "app" {
count = 1
task "setup" {
driver = "docker"
lifecycle {
hook = "prestart"
}
config {
image = "ghcr.io/sstent/ubuntu-python3"
// command = "/bin/bash"
args = ["-v","-c", "/local/prestart.sh"]
}
template {
data = <<EOH
#!/bin/bash
rm /usr/lib/python3.11/EXTERNALLY-MANAGED
pip3 install qbittorrent-api --upgrade
python3 /local/torrent_tagger.py
python3 /local/torrent_tidy.py
EOH
perms = 0777
destination = "local/prestart.sh"
}
template {
change_mode = "restart"
data = "{{ key \"functions/torrent_tagger\" }}"
destination = "local/torrent_tagger.py"
}
template {
change_mode = "restart"
data = "{{ key \"functions/torrent_tidy\" }}"
destination = "local/torrent_tidy.py"
}
}
}
}

View File

@@ -0,0 +1,351 @@
job "qbittorrent" {
# region = "global"
datacenters = ["dc1"]
type = "service"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
constraint {
attribute = "${attr.cpu.arch}"
operator = "regexp"
value = "amd64"
}
update {
# Stagger updates every 60 seconds
stagger = "90s"
max_parallel = 1
healthy_deadline = "2m"
health_check = "task_states"
}
group "qbittorrent" {
count = 1
restart {
attempts = 8
interval = "20m"
delay = "10s"
mode = "delay"
}
task "qbittorrent-vpn" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
// image = "registry.service.dc1.consul:5000/openpyn:latest"
// image = "qmcgaw/gluetun"
image = "qmcgaw/gluetun"
memory_hard_limit = "1024"
ports = [
"shadowsocks",
"http_proxy",
"http_admin",
"qbittorrent_51413",
"qbittorrent_80",
"socks"
]
cap_add = [
"NET_ADMIN",
"NET_BIND_SERVICE",
]
#network_mode = "host"
#network_mode = "vpn"
volumes = [
"/etc/localtime:/etc/localtime",
"/mnt/syncthing/mullvad:/vpn",
"local/gluetun.toml:/gluetun/config.toml"
]
devices = [
{
host_path = "/dev/net/tun"
container_path = "/dev/net/tun"
},
]
}
template {
data = <<EOH
[[roles]]
name = "qbittorrent"
# Define a list of routes with the syntax "Http-Method /path"
routes = ["GET /v1/openvpn/status", "PUT /v1/openvpn/status", "PUT /v1/openvpn/status", "GET /v1/openvpn/portforwarded", "GET /v1/openvpn/settings", "GET /v1/dns/status", "PUT /v1/dns/status", "PUT /v1/dns/status", "GET /v1/updater/status", "PUT /v1/updater/status", "PUT /v1/updater/status", "GET /v1/publicip/ip", "GET /v1/version", "GET /v1/vpn/status", "PUT /v1/vpn/status", "GET /v1/vpn/settings", "PUT /v1/vpn/settings"]
auth = "none"
EOH
destination = "custom/auth.toml"
}
env {
VPN_SERVICE_PROVIDER="airvpn"
VPN_TYPE="wireguard"
WIREGUARD_PRIVATE_KEY="EIos1A9eGCIoCHr02aOsEu8S4C0gqhNLIYF/vMykEV0="
WIREGUARD_PRESHARED_KEY="RzPcraPA24hLFxGiB7z5JMWrtI+JBbEzvmeiEX36XWo="
WIREGUARD_ADDRESSES="10.161.31.240/32"
SERVER_COUNTRIES="Canada"
SERVER_CITIES="Vancouver,Montreal,Toronto"
FIREWALL_VPN_INPUT_PORTS = "61944,53304"
HEALTH_TARGET_ADDRESS="1.1.1.1"
HEALTH_SUCCESS_WAIT_DURATION="30s"
HEALTH_VPN_DURATION_INITIAL="3600s"
HEALTH_VPN_DURATION_ADDITION="600s"
HTTP_CONTROL_SERVER_AUTH_CONFIG_FILEPATH="/custom/auth.toml"
// HEALTH_TARGET_ADDRESS="cloudflare.com:443"
##Mullvad
#VPNSP = "mullvad"
#VPN_TYPE = "wireguard"
#COUNTRY = "Canada"
#CITY = "Toronto"
#FIREWALL_VPN_INPUT_PORTS = "56987"
#WIREGUARD_PRIVATE_KEY = "iA64ImY2XNvml7s+HEHWNNGXeqpzFN0/KYGxhCsHLV8="
#WIREGUARD_ADDRESS = "10.64.141.217/32"
HTTPPROXY = "on"
SHADOWSOCKS_PASSWORD = "farscape5"
SHADOWSOCKS = "on"
DOT_PROVIDERS = "cloudflare,google,quad9,quadrant"
DOT = "off"
WEBUI_PORT=8080
}
service {
name = "qbittorrent"
tags = ["global", "tools"]
port = "qbittorrent_80"
}
service {
name = "${TASKGROUP}-admin"
tags = ["global"]
port = "http_admin"
}
service {
name = "sp"
tags = ["global", "ovpn-openpyn",
"enable_gocast",
"gocast_vip=192.168.1.242/32",
"gocast_nat=tcp:1080:1080",
"gocast_nat=udp:1080:1080",
]
port = "socks"
}
resources {
cpu = 100 # 500 MHz
memory = 250 # 128MB
}
} #task ovpn
task "dante" {
driver = "docker"
config {
image = "serjs/go-socks5-proxy"
// image = "ghcr.io/sstent/dante-wg:latest"
network_mode = "container:qbittorrent-vpn-${NOMAD_ALLOC_ID}"
memory_hard_limit = 256
devices = [
{
host_path = "/dev/net/tun"
container_path = "/dev/net/tun"
},
]
}
env {REQUIRE_AUTH="false"}
resources {
cpu = 64 # 500 MHz
memory = 128 # 128MB
}
} #end dante
# task "init" {
# driver = "docker"
# lifecycle {
# hook = "prestart"
# sidecar = false
# }
# config {
# memory_hard_limit = "2048"
# image = "ghcr.io/sstent/rsync"
# volumes = [
# "/mnt/configs/${NOMAD_GROUP_NAME}:/config",
# "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
# "/mnt/Public/config/locks:/locks"
# ]
# args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-av", "/configbackup/", "/config/", "--delete-before"]
# }
# resources {
# cpu = 20 # 500 MHz
# memory = 20 # 128MB
# }
# }
# task "finalsync" {
# driver = "docker"
# lifecycle {
# hook = "poststop"
# }
# config {
# memory_hard_limit = "2048"
# image = "ghcr.io/sstent/rsync"
# volumes = [
# "/mnt/configs/${NOMAD_GROUP_NAME}:/config",
# "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
# "/mnt/Public/config/locks:/locks"
# ]
# args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-av", "--delete", "/config/", "/configbackup/"]
# }
# resources {
# cpu = 20 # 500 MHz
# memory = 128 # 128MB
# }
# }
# task "sync" {
# driver = "docker"
# lifecycle {
# hook = "poststart"
# sidecar = true
# }
# config {
# memory_hard_limit = "2048"
# image = "ghcr.io/sstent/rsync"
# volumes = [
# "/mnt/configs/:/configs",
# "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
# ]
# args = ["client"]
# }
# env {
# CRON_TASK_1 = "25 8-20 * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz --delete /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;"
# }
# resources {
# cpu = 20 # 500 MHz
# memory = 20 # 128MB
# }
# } #end sync
task "qbittorrent" {
driver = "docker"
// "/mnt/Public/config/qbittorrent:/config",
config {
image = "linuxserver/qbittorrent:5.1.0"
network_mode = "container:qbittorrent-vpn-${NOMAD_ALLOC_ID}"
memory_hard_limit = 2048
#cpuset_cpus = "4-7"
volumes = [
"/mnt/Public/Downloads/news/qbittorrent:/downloads/",
"/mnt/configs/qbittorrent:/config",
"/mnt/Archive/seeding:/archiveseeding",
"/mnt/odroid5:/odroid5",
]
// ulimit {
// nproc = "4242"
// nofile = "2048:4096"
// }
sysctl = {
"net.core.somaxconn" = "4096"
}
}
env {
TZ = "EST5EDT"
PUID = 1000
PGID = 1000
WEBUI_PORT=8080
}
service {
name = "qb"
tags = ["global", "ovpn-openpyn",
"enable_gocast",
"gocast_vip=192.168.1.244/32",
"gocast_nat=tcp:8080:8080",
"gocast_nat=udp:8080:8080"
]
port = "qbittorrent_80"
}
template {
data = <<EOH
#!/bin/bash
curl -sL http://127.0.0.1:8080/api/v2/transfer/info | grep connected || exit;
EOH
destination = "local/qbithealth.sh"
change_mode = "noop"
perms = "0777"
}
resources {
cpu = 128 # 500 MHz
memory = 256 # 128MB
}
} #task resilio-sync
network {
// mode = "bridge"
port "qbittorrent_51413" {
static = 6881
to = 6881
}
port "shadowsocks" {
static = "8388"
to = "8388"
}
port "socks" {
static = "1080"
to = "1080"
}
port "http_proxy" {
static = "8888"
to = "8888"
}
port "http_admin" {
static = "8000"
to = "8000"
}
port "qbittorrent_80" {
static = 8080
to = 8080
}
}
} #group "au-proxy"
}
// }

156
nomad_backup/radarr.hcl Normal file
View File

@@ -0,0 +1,156 @@
# There can only be a single job definition per file.
# Create a job with ID and Name 'example'
job "radarr" {
datacenters = ["dc1"]
type = "service"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
constraint {
attribute = "${attr.unique.hostname}"
operator = "regexp"
value = "odroid.*"
}
constraint {
attribute = "${attr.cpu.arch}"
operator = "regexp"
value = "arm64"
}
update {
stagger = "10s"
max_parallel = 1
}
group "radarr" {
count = 1
restart {
attempts = 5
interval = "1m"
delay = "10s"
mode = "delay"
}
task "radarr" {
driver = "docker"
config {
// image = "linuxserver/radarr:nightly"
image = "linuxserver/radarr:latest"
dns_servers = ["192.168.4.250", "192.168.4.1"]
ports = ["http"]
memory_hard_limit = "2048"
// cpuset_cpus = "4-7"
volumes = [
"/mnt/Public/Downloads/news:/downloads",
"/mnt/Public/Downloads/movies:/movies",
// "/mnt/configs/radarr:/config",
"/mnt/Archive/Movies:/archive",
"/mnt/Public/configs/radarr_pg:/config",
// "local/config.xml:/config/config.xml"
]
}
template {
data = "---\nkey: {{ key \"ovpn-client\" }}"
destination = "local/file.yml"
change_mode = "restart"
}
template {
data = <<EOH
<Config>
<LogLevel>Info</LogLevel>
<Port>7878</Port>
<UrlBase/>
<BindAddress>*</BindAddress>
<SslPort>9898</SslPort>
<EnableSsl>False</EnableSsl>
<ApiKey>237c27f22504440385e5ee295fd65eb5</ApiKey>
<AuthenticationMethod>Forms</AuthenticationMethod>
<Branch>master</Branch>
<SslCertPath/>
<LaunchBrowser>True</LaunchBrowser>
<SslCertPassword/>
<UpdateMechanism>Docker</UpdateMechanism>
<AnalyticsEnabled>False</AnalyticsEnabled>
<InstanceName>Radarr</InstanceName>
<AuthenticationRequired>DisabledForLocalAddresses</AuthenticationRequired>
<PostgresUser>postgres</PostgresUser>
<PostgresPassword>postgres</PostgresPassword>
<PostgresPort>5432</PostgresPort>
<PostgresHost>master.postgres.service.dc1.consul</PostgresHost>
</Config>
EOH
destination = "local/config.xml"
perms = "0755"
}
service {
name = "${TASKGROUP}"
tags = ["radarr", "tools"]
port = "http"
check {
type = "script"
name = "check_up"
command = "/local/healthcheck.sh"
interval = "5m"
timeout = "25s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
template {
data = <<EOH
#!/bin/bash
/usr/bin/curl -f "http://localhost:${NOMAD_PORT_http}/api/v3/system/status?apikey=$(grep -Eo '<ApiKey>(.*)</ApiKey>' /config/config.xml | sed -e 's/<[^>]*>//g')"
EOH
destination = "local/healthcheck.sh"
perms = "0755"
}
env {
TZ = "EST5EDT"
PUID = 1000
PGID = 1000
}
resources {
cpu = 500 # 500 MHz
memory = 256 # 128MB
// network {
// port "radarr" {
// static = "7878"
// }
// }
}
logs {
max_files = 10
max_file_size = 15
}
kill_timeout = "10s"
} #Task
network {
// mode = "bridge"
port "http" {
static = 7878
to = 7878
}
}
} #Group
}

View File

@@ -0,0 +1,177 @@
job "seaweedfsmaster" {
datacenters = ["dc1"]
type = "service"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
constraint {
operator = "distinct_hosts"
value = "true"
}
// constraint {
// attribute = "${attr.unique.hostname}"
// operator = "regexp"
// # We need static IPs for master servers
// # dc1-n1 - 172.21.100.51
// # dc1-n2 - 172.21.100.52
// # dc1-n3 - 172.21.100.53
// value = "^odroid5.node.dc1.consul|odroid6.node.dc1.consul|odroid7.node.dc1.consul|odroid8.node.dc1.consul$"
// }
update {
stagger = "10s"
max_parallel = 1
healthy_deadline = "5m"
}
group "seaweedfsmaster" {
count = 3
restart {
attempts = 6
interval = "1m"
delay = "10s"
mode = "delay"
}
task "seaweedfsadmin" {
driver = "docker"
config {
image = "chrislusf/seaweedfs"
memory_hard_limit = "2048"
entrypoint = ["/usr/bin/weed"]
args = [
"admin",
"-masters=${NOMAD_GROUP_NAME}0.service.dc1.consul:9333,${NOMAD_GROUP_NAME}1.service.dc1.consul:9333,${NOMAD_GROUP_NAME}2.service.dc1.consul:9333",
"-dataDir=/data",
]
volumes = [
"/mnt/Public/configs/seaweedfadmin:/data/",
]
ports = ["seaweedfs_admin"]
}
}
task "seaweedfsmaster" {
driver = "docker"
config {
image = "chrislusf/seaweedfs"
memory_hard_limit = "2048"
entrypoint = ["/usr/bin/weed"]
args = [
"server",
"-ip=${NOMAD_GROUP_NAME}${NOMAD_ALLOC_INDEX}.service.dc1.consul",
"-ip.bind=0.0.0.0",
"-master.port=9333",
"-master.defaultReplication=002",
"-master.peers=${NOMAD_GROUP_NAME}0.service.dc1.consul:9333,${NOMAD_GROUP_NAME}1.service.dc1.consul:9333,${NOMAD_GROUP_NAME}2.service.dc1.consul:9333",
"-dir=/data",
"-filer",
"-filer.port=8877",
"-filer.port.grpc=18877",
"-s3",
"-webdav",
"-volume=true",
"-volume.port=9444",
"-volume.port.grpc=19444",
"-volume.max=100"
]
volumes = [
"/mnt/configs/seaweedfs:/data/",
]
ports = ["seaweedfs", "seaweedfs_high", "seaweedfs_filer", "seaweed_s3", "seaweed_webdav", "seaweedfs_filer_high","seaweedfs_vol","seaweedfs_vol_high"]
}
env {
seaweedfs_ACCESS_KEY = "seaweedfs"
seaweedfs_SECRET_KEY = "seaweedfs123"
}
service {
name = "${NOMAD_GROUP_NAME}${NOMAD_ALLOC_INDEX}"
tags = ["global", "seaweedfs"]
port = "seaweedfs"
}
service {
name = "seaweedfiler"
tags = ["global", "seaweedfs"]
port = "seaweedfs_filer"
}
resources {
cpu = 64 # 500 MHz
memory = 64 # 128MB
} #end resources
} #end task
network {
port "seaweedfs" {
static = 9333
to = 9333
}
port "seaweedfs_high" {
static = 19333
to = 19333
}
port "seaweedfs_filer" {
static = 8877
to = 8877
}
port "seaweedfs_filer_high" {
static = 18877
to = 18877
}
port "seaweed_s3" {
static = 8333
to = 8333
}
port "seaweed_webdav" {
static = 7333
to = 7333
}
port "seaweedfs_vol" {
static = "9444"
}
port "seaweedfs_vol_high" {
static = "19444"
}
port "seaweedfs_admin" {
static = "23646"
}
// port "s8080" {
// static = 8080
// to = 8080
// }
// port "s18080" {
// static = 18080
// to = 18080
// }
// port "seaweed_volume" {
// static = 9444
// to = 9444
// }
}
} # end group
} # end job

149
nomad_backup/slskd.hcl Normal file
View File

@@ -0,0 +1,149 @@
# There can only be a single job definition per file.
# Create a job with ID and Name 'example'
job "slskd" {
datacenters = ["dc1"]
type = "service"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
constraint {
attribute = "${attr.cpu.arch}"
operator = "regexp"
value = "amd64"
}
update {
stagger = "10s"
max_parallel = 1
}
group "slskd" {
count = 1
restart {
attempts = 2
interval = "1m"
delay = "10s"
mode = "fail"
}
task "slskd-vpn" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = true
}
config {
// image = "registry.service.dc1.consul:5000/openpyn:latest"
// image = "qmcgaw/gluetun"
image = "qmcgaw/gluetun"
memory_hard_limit = "1024"
ports = ["http","https","guac"]
cap_add = [
"NET_ADMIN",
"NET_BIND_SERVICE",
]
#network_mode = "host"
#network_mode = "vpn"
volumes = [
"/etc/localtime:/etc/localtime",
"/mnt/syncthing/mullvad:/vpn",
]
devices = [
{
host_path = "/dev/net/tun"
container_path = "/dev/net/tun"
},
]
}
env {
VPN_SERVICE_PROVIDER="airvpn"
VPN_TYPE="wireguard"
WIREGUARD_PRIVATE_KEY="SPpgnVCsOnz+zzorzTiOmvDF8GElgp27dSIeYAq43Vk="
WIREGUARD_PRESHARED_KEY="TBb2gU8pSTSG6lT4QYr7pzNWN7wZEn0yeG4VDTtuy50="
WIREGUARD_ADDRESSES="10.186.222.247/32"
SERVER_COUNTRIES="Canada"
SERVER_CITIES="Montreal"
FIREWALL_VPN_INPUT_PORTS = "53304"
HEALTH_TARGET_ADDRESS="1.1.1.1"
HEALTH_SUCCESS_WAIT_DURATION="30s"
HEALTH_VPN_DURATION_INITIAL="3600s"
HEALTH_VPN_DURATION_ADDITION="600s"
// HEALTH_TARGET_ADDRESS="cloudflare.com:443"
##Mullvad
#VPNSP = "mullvad"
#VPN_TYPE = "wireguard"
#COUNTRY = "Canada"
#CITY = "Toronto"
#FIREWALL_VPN_INPUT_PORTS = "56987"
#WIREGUARD_PRIVATE_KEY = "iA64ImY2XNvml7s+HEHWNNGXeqpzFN0/KYGxhCsHLV8="
#WIREGUARD_ADDRESS = "10.64.141.217/32"
HTTPPROXY = "on"
SHADOWSOCKS_PASSWORD = "farscape5"
SHADOWSOCKS = "on"
DOT_PROVIDERS = "cloudflare,google,quad9,quadrant"
DOT = "off"
WEBUI_PORT=8080
}
resources {
cpu = 100 # 500 MHz
memory = 250 # 128MB
}
} #task ovpn
task "nicotine" {
driver = "docker"
config {
image = "ghcr.io/fletchto99/nicotine-plus-docker:latest"
memory_hard_limit = "1024"
network_mode = "container:slskd-vpn-${NOMAD_ALLOC_ID}"
volumes = [
"/mnt/Public/Downloads/news/slskd/config:/config",
"/mnt/Public/Downloads/news/slskd/shared:/shared",
"/mnt/Public/Downloads/news/slskd/downloads/:/downloads",
"/mnt/Public/Downloads/news/slskd/incomplete/:/incomplete",
]
}
env {
PUID=1000
PGID=1000
TZ="Etc/UTC"
LISTENING_PORT="53304"
}
}
network {
// mbits = 100
port "http" {
static = 5000
to = 5000
}
port "https" {
static = 5001
to = 5001
}
port "guac" {
static = 6080
to = 6080
}
}
}
}

158
nomad_backup/sonarr.hcl Normal file
View File

@@ -0,0 +1,158 @@
# There can only be a single job definition per file.
# Create a job with ID and Name 'example'
job "sonarr" {
datacenters = ["dc1"]
type = "service"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
constraint {
attribute = "${attr.cpu.arch}"
operator = "regexp"
value = "amd64"
}
update {
stagger = "10s"
max_parallel = 1
}
group "sonarr" {
count = 1
restart {
attempts = 2
interval = "1m"
delay = "10s"
mode = "fail"
}
task "sonarr" {
// driver = "raw_exec" // config { // command = "docker" // args = ["run", // "--rm", // "--name=sonarr", // "-e", "PUID=1000", // "-e", "PGID=1000", // "-e", "TZ=EST5EDT", // "-p", "8989:8989", // "-v", "/mnt/syncthing/sonarrv3:/config", // "-v", "/mnt/Public/Downloads/tv:/tv", // "-v", "/mnt/Public/Downloads/news:/downloads", // "--cpuset-cpus","4-7", // "linuxserver/sonarr:preview"] // }
driver = "docker"
config {
image = "linuxserver/sonarr:develop"
ports = ["http"]
// dns_servers = ["192.168.1.1", "1.1.1.1"]
memory_hard_limit = "2048"
// cpuset_cpus = "4-7"
volumes = [
"/mnt/Public/Downloads/news:/downloads",
"/mnt/Public/Downloads/tv:/tv",
// "/mnt/configs/sonarr:/config",
"/mnt/Public/configs/sonarr_pg:/config",
// "local/config.xml:/config/config.xml"
]
// "/mnt/gv0/sonarr:/config",
force_pull = false
}
service {
name = "${TASKGROUP}"
tags = ["sonarr", "tools"]
port = "http"
check {
type = "script"
name = "check_up"
command = "/local/healthcheck.sh"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
template {
data = <<EOH
#!/bin/bash
/usr/bin/curl -f "http://localhost:${NOMAD_PORT_http}/api/v3/system/status?apikey=$(grep -Eo '<ApiKey>(.*)</ApiKey>' /config/config.xml | sed -e 's/<[^>]*>//g')"
EOH
destination = "local/healthcheck.sh"
perms = "0755"
}
template {
data = "---\nkey: {{ key \"ovpn-client\" }}"
destination = "local/file.yml"
change_mode = "restart"
}
template {
data = <<EOH
<Config>
<LogLevel>Info</LogLevel>
<Port>8989</Port>
<UrlBase>/sonarr</UrlBase>
<BindAddress>*</BindAddress>
<SslPort>9898</SslPort>
<EnableSsl>False</EnableSsl>
<ApiKey>1632787062fb47a9a6eb4c88e32b3ff3</ApiKey>
<AuthenticationMethod>None</AuthenticationMethod>
<Branch>develop</Branch>
<LaunchBrowser>True</LaunchBrowser>
<SslCertHash/>
<UpdateMechanism>Docker</UpdateMechanism>
<AnalyticsEnabled>False</AnalyticsEnabled>
<UpdateScriptPath>/config/restart.sh</UpdateScriptPath>
<InstanceName>Sonarr</InstanceName>
<PostgresUser>postgres</PostgresUser>
<PostgresPassword>postgres</PostgresPassword>
<PostgresPort>5432</PostgresPort>
<PostgresHost>master.postgres.service.dc1.consul</PostgresHost>
</Config>
EOH
destination = "local/config.xml"
perms = "0755"
}
env {
// SHARE = "Public;/mount/Public;yes;no;yes;all;none;;Public"
# GLOBAL = "socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=65536 SO_SNDBUF=65536"
# PERMISSIONS = "true"
# WORKGROUP = "WORKGROUP"
TZ = "EST5EDT"
PUID = 1000
PGID = 1000
}
# We must specify the resources required for
# this task to ensure it runs on a machine with
# enough capacity.
resources {
cpu = 500 # 500 MHz
memory = 256 # 128MB
}
# Specify configuration related to log rotation
logs {
max_files = 12
max_file_size = 15
}
# Controls the timeout between signalling a task it will be killed
# and killing the task. If not set a default is used.
kill_timeout = "10s"
} #End main task
network {
// mbits = 100
port "http" {
static = 8989
to = 8989
}
}
}
}

226
nomad_backup/traefik.hcl Normal file
View File

@@ -0,0 +1,226 @@
job "traefik" {
datacenters = ["dc1"]
type = "system"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
update {
stagger = "10s"
max_parallel = 1
healthy_deadline = "5m"
}
group "traefik" {
count = 1
restart {
attempts = 6
interval = "1m"
delay = "10s"
mode = "delay"
}
task "traefik" {
driver = "docker"
config {
image = "traefik:2.9"
// network_mode = "host"
args = [
// "--api.dashboard",
// "--providers.consulcatalog.defaultRule=Host(`{{ .Name }}.service.dc1.consul`)",
// "--providers.consulcatalog.endpoint.address=${attr.unique.network.ip-address}:8500",
// "--providers.consulcatalog.exposedbydefault=true",
// "--metrics=true",
// "--metrics.prometheus=true",
// "--metrics.prometheus.entryPoint=web",
// "--entryPoints.web.address=:80",
// "--entryPoints.websecure.address=:443",
// "--entryPoints.openvpn.address=:1194/udp",
"--configFile=/local/file.yml",
// "--certificatesresolvers.myresolver.acme.email=stuart.stent@gmail.com",
// "--certificatesresolvers.myresolver.acme.storage=/acmecert/acme.json",
// "--certificatesresolvers.myresolver.acme.tlschallenge=true",
// "--certificatesresolvers.myresolver-int.acme.email=stuart.stent@gmail.com",
// "--certificatesresolvers.myresolver-int.acme.storage=/acmecert/acme.json",
// "--certificatesresolvers.myresolver-int.acme.tlschallenge=true",
// "--certificatesresolvers.myresolver-int.acme.dnschallenge=true",
// "--certificatesresolvers.myresolver-int.acme.dnschallenge.provider=duckdns",
"--accesslog=true",
// "--serversTransport.insecureSkipVerify=true",
]
volumes = [
"/var/run/docker.sock:/var/run/docker.sock",
"/mnt/mnt/configs/letsencrypt:/acmecert/",
]
// dns_servers = ["192.168.4.1", "192.168.4.250"]
ports = ["traefik", "traefikhttps","traefikui"]
memory_hard_limit = 20480
}
env {
TZ = "EST5EDT"
PUID = 1000
PGID = 1000
DUCKDNS_TOKEN="e4b5ca33-1f4d-494b-b06d-6dd4600df662"
}
template {
left_delimiter = "[["
right_delimiter = "]]"
data = <<EOH
http:
serversTransports:
insecureSkipVerify: true
entryPoints:
web:
address: :80
websecure:
address: :443
log:
level: INFO
accessLog:
fields:
names:
RequestPath: keep
filters:
retryAttempts: true
minDuration: "10ms"
metrics:
prometheus:
addRoutersLabels: true
addServicesLabels: true
api:
dashboard: true
insecure: true
providers:
consulCatalog:
exposedByDefault: true
refreshInterval: 30s
defaultRule: "Host(`{{ .Name }}.service.dc1.consul`)"
endpoint:
address: "[[env "attr.unique.network.ip-address"]]:8500"
file:
filename: /local/tls.yml
EOH
destination = "local/file.yml"
}
template {
data = <<EOH
tls:
certificates:
- certFile: /local/duckdns_fullchain.pem
keyFile: /local/duckdns_privkey.pem
- certFile: /local/dedyn_fullchain.pem
keyFile: /local/dedyn_privkey.pem
stores:
default:
defaultCertificate:
certFile: /local/duckdns_fullchain.pem
keyFile: /local/duckdns_privkey.pem
EOH
destination = "local/tls.yml"
}
// file:
// directory: /local/tls.yaml
template {
change_mode = "restart"
data = "{{ key \"letsconsul/*.fbleagh.duckdns.org/fullchain.cer\" }}"
destination = "local/duckdns_fullchain.pem"
perms = 0777
}
template {
change_mode = "noop"
data = "{{ key \"letsconsul/*.fbleagh.duckdns.org/*.fbleagh.duckdns.org.key\" }}"
destination = "local/duckdns_privkey.pem"
perms = 0777
}
template {
change_mode = "restart"
data = "{{ key \"letsconsul/*.fbleagh.dedyn.io/fullchain.cer\" }}"
destination = "local/dedyn_fullchain.pem"
perms = 0777
}
template {
change_mode = "noop"
data = "{{ key \"letsconsul/*.fbleagh.dedyn.io/*.fbleagh.dedyn.io.key\" }}"
destination = "local/dedyn_privkey.pem"
perms = 0777
}
service {
name = "${TASKGROUP}"
tags = [
"global",
"traefik",
"enable_gocast",
"gocast_vip=192.168.1.249/32",
"gocast_nat=tcp:443:443",
"gocast_nat=udp:443:443"]
port = "traefik"
}
service {
name = "${TASKGROUP}-ui"
tags = [
"global",
"traefik",
"traefik.http.routers.dashboard.rule=Host(`traefik-ui.service.dc1.consul`)",
"traefik.http.routers.dashboard.service=api@internal",
]
port = "traefik"
} #end service
resources {
cpu = 256 # 500 MHz
memory = 256 # 128MB
} #end resources
} #end task
network {
port "traefik" {
static = 80
to = 80
}
port "traefikui" {
static = 8090
to = 8080
}
port "traefikhttps" {
static = 443
to = 443
}
}
} # end group
} # end job

107
nomad_backup/trilium.hcl Normal file
View File

@@ -0,0 +1,107 @@
job "trilium" {
datacenters = ["dc1"]
type = "service"
group "trilium" {
count = 1
# Prestart restore task
task "restore" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "litestream/litestream:latest"
args = [
"restore",
"-if-replica-exists",
"-if-db-not-exists",
"-o", "/alloc/tmp/trilium.db",
"sftp://root:odroid@192.168.4.63/mnt/Shares/litestream/trilium.db"
]
volumes = [
"/mnt/configs/trilium:/data"
]
}
}
# Main trilium task
task "trilium" {
driver = "docker"
config {
image = "triliumnext/trilium:stable"
force_pull = true # This ensures the image is always pulled
memory_hard_limit = "2048"
ports = ["http"]
volumes = [
"/mnt/configs/trilium:/home/node/trilium-data/data",
"/mnt/Public/configs/trilium/backup:/home/node/trilium-data/backup",
"/mnt/Public/configs/trilium/config:/home/node/trilium-data/config"
]
}
env {
TRILIUM_DATA_DIR="/home/node/trilium-data/data"
TRILIUM_BACKUP_DIR="/home/node/trilium-data/backup"
TRILIUM_CONFIG_INI_PATH= "/home/node/trilium-data/config/config.ini"
}
resources {
cpu = 100
memory = 128
}
service {
name = "trilium"
tags = [
"trilium",
"web",
"urlprefix-/trilium",
"tools",
"traefik.http.routers.triliumlan.rule=Host(`trilium.service.dc1.consul`)",
"traefik.http.routers.triliumwan.rule=Host(`tril.fbleagh.duckdns.org`)",
"traefik.http.routers.triliumwan.middlewares=dex@consulcatalog",
"traefik.http.routers.triliumwan.tls=true",
]
port = "http"
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
# Litestream sidecar for continuous replication
task "litestream" {
driver = "docker"
lifecycle {
hook = "poststart" # runs after main task starts
sidecar = true
}
config {
image = "litestream/litestream:latest"
args = [
"replicate",
"/alloc/tmp/trilium.db",
"sftp://root:odroid@192.168.4.63/mnt/Shares/litestream/trilium.db"
]
volumes = [
"/mnt/configs/trilium:/data"
]
}
}
network {
port "http" {
static = 6699
to = 8080
}
}
}
}

View File

@@ -0,0 +1,230 @@
job "vaultwarden" {
datacenters = ["dc1"]
type = "service"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
constraint {
attribute = "${attr.cpu.arch}"
operator = "regexp"
value = "arm64"
}
group "vaultwarden" {
count = 1
task "vaultwarden" {
driver = "docker"
config {
# image = "vaultwarden/server:latest"
image = "vaultwarden/server:1.32.7"
memory_hard_limit = 2048
ports = ["http"]
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}/data:/data",
]
}
env {
// vaultwarden_SCAN_INTERVAL = "120"
// vaultwarden_MUSIC_PATH = "/music"
// vaultwarden_PODCAST_PATH = "/podcasts"
ADMIN_TOKEN = "VReYRX0RuSw3mxmGFG4+2ECY71l/wYmuD52NOWDur6e43z/inbUmJGUr5KU4wtjW"
ENABLE_DB_WAL = "false"
DATABASE_URL= "postgresql://postgres:postgres@master.postgres.service.dc1.consul:5432/vaultwarden"
}
resources {
cpu = 100 # 100 MHz
memory = 128 # 128 MB
}
service {
name = "vaultwarden"
tags = [
"vaultwarden",
"web",
"urlprefix-/vaultwarden",
"backend",
"traefik.http.routers.vaultwardenlan.rule=Host(`vaultwarden.service.dc1.consul`)",
"traefik.http.routers.vaultwardenwan.rule=Host(`vault.fbleagh.duckdns.org`)",
"traefik.http.routers.vaultwardenwan-admin.rule=(Host(`vault.fbleagh.duckdns.org`) && PathPrefix(`/admin/`))",
"traefik.http.routers.vaultwardenwan.tls=true",
// "traefik.http.routers.vaultwardenwan.tls.certresolver=myresolver-int",
"traefik.http.middlewares.vaultwardenwan-admin-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.4.0/22",
"enable_gocast",
"gocast_vip=192.168.1.246/32",
"gocast_nat=tcp:8081:8081",
"gocast_nat=udp:8081:8081"
]
// "traefik.http.middlewares.vaultwarden_auth.basicauth.users=admin:$2y$05$cJGONoS0BFTeBUXqmETikeI14QhLDKIDkYuPdA1umIOC129grVMAm",
// https://github.com/dani-garcia/vaultwarden/issues/676
// labels:
// - traefik.enable=true
// - traefik.docker.network=traefik
// # Define middleware of 'redirect-https', set scheme and set permanent redirect as true
// - traefik.http.middlewares.redirect-https.redirectScheme.scheme=https
// - traefik.http.middlewares.redirect-https.redirectScheme.permanent=true
// # Define service of 'bitwarden' for UI port when using entrypoint websecure (port 443)
// - traefik.http.services.bitwarden.loadbalancer.server.port=80
// - traefik.http.routers.bitwarden-https.rule=Host(`bitwarden.domain.tld`)
// - traefik.http.routers.bitwarden-https.entrypoints=websecure
// - traefik.http.routers.bitwarden-https.service=bitwarden
// # Define service of 'bitwarden' for UI port when using entrypoint web (port 80)
// - traefik.http.routers.bitwarden-http.rule=Host(`bitwarden.domain.tld`)
// - traefik.http.routers.bitwarden-http.entrypoints=web
// - traefik.http.routers.bitwarden-http.middlewares=redirect-https
// - traefik.http.routers.bitwarden-http.service=bitwarden
// # Define service of 'bitwarden-websocket' for socket port when using entrypoint websecure (port 443)
// - traefik.http.services.bitwarden-websocket.loadbalancer.server.port=3012
// - traefik.http.routers.bitwarden-websocket-https.rule=Host(`bitwarden.domain.tld`) && Path(`/notifications/hub`)
// - traefik.http.routers.bitwarden-websocket-https.entrypoints=websecure
// - traefik.http.routers.bitwarden-websocket-https.service=bitwarden-websocket
// # Define service of 'bitwarden-websocket' for socket port when using entrypoint web (port 80)
// - traefik.http.routers.bitwarden-websocket-http.rule=Host(`bitwarden.domain.tld`) && Path(`/notifications/hub`)
// - traefik.http.routers.bitwarden-websocket-http.entrypoints=web
// - traefik.http.routers.bitwarden-websocket-http.middlewares=redirect-https
// - traefik.http.routers.bitwarden-websocket-http.service=bitwarden-websocket
port = "http"
meta {
ALLOC = "${NOMAD_ALLOC_ID}"
}
check {
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
} #end vaultwarden
task "init" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "ghcr.io/sstent/rsync"
memory_hard_limit = 2048
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}/data:/config",
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
"/mnt/Public/config/locks:/locks",
]
args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-avz", "--exclude='logs'", "--exclude='/run/*.sock'", "/configbackup/", "/config/", "--delete-before"]
}
resources {
cpu = 20 # 500 MHz
memory = 20 # 128MB
}
} #end init task
task "finalsync" {
driver = "docker"
lifecycle {
hook = "poststop"
}
config {
// image = "pebalk/rsync"
image = "ghcr.io/sstent/rsync"
memory_hard_limit = 2048
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}/data:/config",
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
"/mnt/Public/config/locks:/locks",
]
args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-av", "--exclude='logs'", "--exclude='/run/*.sock'", "/config/", "/configbackup/", "--remove-source-files"]
}
resources {
cpu = 20 # 500 MHz
memory = 20 # 128MB
}
} #end finalsync task
task "sync" {
driver = "docker"
lifecycle {
hook = "poststart"
sidecar = true
}
config {
memory_hard_limit = "2048"
image = "ghcr.io/sstent/rsync"
volumes = [
"/mnt/configs/${NOMAD_GROUP_NAME}/data:/config",
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
]
args = ["client"]
}
env {
CRON_TASK_1 = "50 * * * * rsync -av --exclude='*.db' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /config/ /configbackup/;"
}
resources {
cpu = 20 # 500 MHz
memory = 20 # 128MB
}
}
network {
// mbits = 100
port "http" {
static = 8081
to = 80
}
}
}
}
// Restore
// There is no automated restore process to prevent accidential data loss. So if you need to restore a backup you need to do this manually by following the steps below (assuming your backups are located at ./backup/ and your vaultwarden data ist located at /var/lib/docker/volumes/vaultwarden/_data/)
// # Delete any existing sqlite3 files
// rm /var/lib/docker/volumes/vaultwarden/_data/db.sqlite3*
// # Copy the database to the vaultwarden folder
// cp ./backup/db.sqlite3 /var/lib/docker/volumes/vaultwarden/_data/db.sqlite3
// # Extract the additional folder from the archive
// tar -xzvf ./backup/data.tar.gz -C /var/lib/docker/volumes/vaultwarden/_data/
// ENV Description
// BACKUP_ADD_DATABASE 1 Set to true to include the database itself in the backup
// BACKUP_ADD_ATTACHMENTS 1 Set to true to include the attachments folder in the backup
// BACKUP_ADD_CONFIG_JSON 1 Set to true to include config.json in the backup
// BACKUP_ADD_ICON_CACHE 1 Set to true to include the icon cache folder in the backup
// BACKUP_ADD_RSA_KEY 1 Set to true to include the RSA keys in the backup
// BACKUP_ADD_SENDS 1 Set to true to include the sends folder in the backup
// BACKUP_DIR Seths the path of the backup folder inside the container
// BACKUP_DIR_PERMISSIONS Sets the permissions of the backup folder (CAUTION 2). Set to -1 to disable.
// CRONFILE Path to the cron file inside the container
// CRON_TIME Cronjob format "Minute Hour Day_of_month Month_of_year Day_of_week Year"
// DELETE_AFTER Delete old backups after X many days. Set to 0 to disable
// TIMESTAMP Set to true to append timestamp to the backup file
// GID Group ID to run the cron job with
// HEALTHCHECK_URL Set a healthcheck url like https://hc-ping.com/xyz
// LOG_LEVEL DEBUG, INFO, WARNING, ERROR, CRITICAL are supported
// LOG_DIR Path to the logfile folder inside the container
// LOG_DIR_PERMISSIONS Sets the permissions of the backup folder. Set to -1 to disable.
// TZ Set the timezone inside the container 3
// UID User ID to run the cron job with
// VW_DATA_FOLDER 4 Set the location of the vaultwarden data folder inside the container
// VW_DATABASE_URL 4 Set the location of the vaultwarden database file inside the container
// VW_ATTACHMENTS_FOLDER 4 Set the location of the vaultwarden attachments folder inside the container
// VW_ICON_CACHE_FOLDER 4 Set the location of the vaultwarden icon cache folder inside the container

82
nomad_backup/wallabag.hcl Normal file
View File

@@ -0,0 +1,82 @@
job "wallabag" {
datacenters = ["dc1"]
group "wallabag" {
count = 1
task "wallabag" {
driver = "docker"
config {
image = "wallabag/wallabag:latest"
ports = ["http"]
memory_hard_limit = 2048
#health_check {
# test = ["CMD", "curl", "-f", "http://localhost:8080/health"]
# interval = "10s"
# timeout = "5s"
#}
}
env {
POSTGRES_PASSWORD="postgres"
POSTGRES_USER="postgres"
SYMFONY__ENV__DATABASE_DRIVER="pdo_pgsql"
SYMFONY__ENV__DATABASE_HOST="master.postgres.service.dc1.consul"
SYMFONY__ENV__DATABASE_NAME="wallabag"
SYMFONY__ENV__DATABASE_USER="wallabag"
SYMFONY__ENV__DATABASE_PASSWORD="wallabag"
SYMFONY__ENV__DOMAIN_NAME="https://wallabag.fbleagh.duckdns.org"
SYMFONY__ENV__DATABASE_PORT="5432"
POPULATE_DATABASE="False"
SYMFONY__ENV__MAILER_DSN= "gmail+smtp://stuart.stent@gmail.com:lsrhrzfembksmvgc@default"
SYMFONY__ENV__FROM_EMAIL="wallabag@wallabag.fbleagh.duckdns.org"
}
service {
name = "${TASKGROUP}"
tags = ["wallabag", "tools",
"traefik.http.routers.wallabaglan.rule=Host(`wallabag.service.dc1.consul`)",
"traefik.http.routers.wallabagwan.rule=Host(`wallabag.fbleagh.duckdns.org`)",
"traefik.http.routers.wallabagwan.tls=true",
]
port = "http"
#check {
# type = "script"
# name = "check_up"
# command = "/local/healthcheck.sh"
# interval = "60s"
# timeout = "5s"
# check_restart {
# limit = 3
# grace = "90s"
# ignore_warnings = false
# }
#}
}
resources {
cpu = 500
memory = 256
}
}
network {
// mbits = 100
port "http" {
to = 80
}
}
}
}

View File

@@ -0,0 +1,65 @@
job "wireguard" {
region = "global"
datacenters = ["dc1"]
type = "service"
constraint {
attribute = "${attr.cpu.arch}"
operator = "regexp"
value = "amd64"
}
group "wireguard" {
count = 1
task "wireguard" {
driver = "docker"
config {
image = "ghcr.io/wg-easy/wg-easy"
ports = ["vpn","vpn_ui"]
volumes = [
"/mnt/Public/config/wireguard:/etc/wireguard",
]
cap_add = ["NET_ADMIN","SYS_MODULE"]
sysctl = {
"net.ipv4.conf.all.src_valid_mark"="1"
"net.ipv4.ip_forward"="1"
}
}
env {
TZ = "EST5EDT"
PUID = 1000
PGID = 1000
WG_HOST="wireguard.fbleagh.duckdns.org"
WG_PORT=51820
PORT=51821
WEBUI_HOST="0.0.0.0"
WG_ALLOWED_IPS="0.0.0.0/0"
UI_TRAFFIC_STATS="true"
UI_CHART_TYPE=1
WG_DEFAULT_DNS="192.168.4.36, 8.8.8.8"
}
service {
name = "${TASKGROUP}"
port = "vpn"
}
resources {
cpu = 50
memory = 100
}
}
network {
port "vpn" {
static = 51820
to = 51820
}
port "vpn_ui" {
static = 51821
to = 51821
}
}
}
}