chore: backup infrastructure configurations [skip ci]
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"backup_timestamp": "2025-11-23T02:00:42.913047",
|
||||
"backup_timestamp": "2025-11-24T15:25:00.879116",
|
||||
"total_keys": 79,
|
||||
"successful_backups": 74,
|
||||
"failed_backups": 5,
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"conn_url":"postgres://192.168.4.226:5432/postgres","api_url":"http://192.168.4.226:8008/patroni","state":"running","role":"replica","version":"4.0.4","xlog_location":587880588200,"replication_state":"streaming","timeline":248}
|
||||
{"conn_url":"postgres://192.168.4.226:5432/postgres","api_url":"http://192.168.4.226:8008/patroni","state":"running","role":"replica","version":"4.0.4","xlog_location":589299797968,"replication_state":"streaming","timeline":248}
|
||||
@@ -1 +1 @@
|
||||
{"conn_url":"postgres://192.168.4.227:5432/postgres","api_url":"http://192.168.4.227:8008/patroni","state":"running","role":"replica","version":"4.0.4","xlog_location":587880560592,"replication_state":"streaming","timeline":248}
|
||||
{"conn_url":"postgres://192.168.4.227:5432/postgres","api_url":"http://192.168.4.227:8008/patroni","state":"running","role":"replica","version":"4.0.4","xlog_location":589299779856,"replication_state":"streaming","timeline":248}
|
||||
@@ -1 +1 @@
|
||||
{"conn_url":"postgres://192.168.4.228:5432/postgres","api_url":"http://192.168.4.228:8008/patroni","state":"running","role":"primary","version":"4.0.4","xlog_location":587880553712,"timeline":248}
|
||||
{"conn_url":"postgres://192.168.4.228:5432/postgres","api_url":"http://192.168.4.228:8008/patroni","state":"running","role":"primary","version":"4.0.4","xlog_location":589299779856,"timeline":248}
|
||||
@@ -1 +1 @@
|
||||
{"conn_url":"postgres://192.168.4.36:5432/postgres","api_url":"http://192.168.4.36:8008/patroni","state":"running","role":"replica","version":"4.0.4","xlog_location":587880830616,"replication_state":"streaming","timeline":248}
|
||||
{"conn_url":"postgres://192.168.4.36:5432/postgres","api_url":"http://192.168.4.36:8008/patroni","state":"running","role":"replica","version":"4.0.4","xlog_location":589299713952,"replication_state":"streaming","timeline":248}
|
||||
@@ -1 +1 @@
|
||||
{"optime":587880553712,"slots":{"pg_odroid6":587880553712,"pg_odroid7":587880553712,"pg_opti1":587880553712,"pg_odroid8":587880553712},"retain_slots":["pg_odroid6","pg_odroid7","pg_odroid8","pg_opti1"]}
|
||||
{"optime":589299779856,"slots":{"pg_odroid6":589299779856,"pg_odroid7":589299779856,"pg_opti1":589299779856,"pg_odroid8":589299779856},"retain_slots":["pg_odroid6","pg_odroid7","pg_odroid8","pg_opti1"]}
|
||||
@@ -1 +1 @@
|
||||
{"connection_state": "stable", "last_state_change_time": 1763829844.4920733, "consecutive_failures": 21, "consecutive_stable_checks": 122, "last_failure_time": 1763829749.067393}
|
||||
{"connection_state": "unstable", "last_state_change_time": 1763997771.3138738, "consecutive_failures": 0, "consecutive_stable_checks": 0, "last_failure_time": 1763829749.067393}
|
||||
@@ -1 +1 @@
|
||||
{"vpn_status": "running", "last_vpn_status_change": 1763829147.0930555, "public_ip": "192.30.89.67", "last_public_ip_change": 1763829147.2012818, "public_ip_details": {"public_ip": "192.30.89.67", "region": "British Columbia", "country": "Canada", "city": "Vancouver", "location": "49.2497,-123.1193", "organization": "AS394256 Tech Futures Interactive Inc.", "postal_code": "V5Y", "timezone": "America/Vancouver"}}
|
||||
{"vpn_status": "running", "last_vpn_status_change": 1763829147.0930555, "public_ip": "139.28.218.235", "last_public_ip_change": 1763997771.0223732, "public_ip_details": {"public_ip": "139.28.218.235", "region": "Quebec", "country": "Canada", "city": "Montreal", "location": "45.499401,-73.570297", "organization": "M247 Europe SRL", "postal_code": "H3B", "timezone": "America/Toronto"}}
|
||||
@@ -68,7 +68,6 @@ job "gitea" {
|
||||
GITEA__server__SSH_PORT = "${NOMAD_HOST_PORT_ssh}" # The port exposed on the host for SSH
|
||||
|
||||
GITEA__actions__ENABLED=true
|
||||
GITEA__packages__ENABLED=true
|
||||
|
||||
# Set the user Gitea runs as inside the container (important for permissions)
|
||||
USER_UID = "1000"
|
||||
|
||||
@@ -48,6 +48,7 @@ job "gonic" {
|
||||
"traefik.http.routers.goniclan.rule=Host(`gonic.service.dc1.consul`)",
|
||||
"traefik.http.routers.gonicwan.rule=Host(`mg.fbleagh.duckdns.org`)",
|
||||
"traefik.http.routers.gonicwan.tls=true",
|
||||
"traefik.http.routers.gonicwan.tls.certresolver=myresolver"
|
||||
|
||||
]
|
||||
// "traefik.http.middlewares.gonic_auth.basicauth.users=admin:$2y$05$cJGONoS0BFTeBUXqmETikeI14QhLDKIDkYuPdA1umIOC129grVMAm",
|
||||
|
||||
@@ -2,7 +2,9 @@ job "grafana" {
|
||||
# region = "global"
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
# priority = 50
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
@@ -19,23 +21,29 @@ job "grafana" {
|
||||
}
|
||||
group "grafana" {
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 2
|
||||
interval = "1m"
|
||||
delay = "10s"
|
||||
mode = "delay"
|
||||
}
|
||||
|
||||
task "grafana" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
// image = "fg2it/grafana-armhf:v5.1.4"
|
||||
image = "grafana/grafana:latest"
|
||||
ports = ["http"]
|
||||
|
||||
logging {
|
||||
type = "json-file"
|
||||
}
|
||||
|
||||
memory_hard_limit = 2048
|
||||
}
|
||||
|
||||
env {
|
||||
disable_login_form = "EST5EDT"
|
||||
PUID = 1000
|
||||
@@ -45,28 +53,25 @@ job "grafana" {
|
||||
GF_AUTH_ANONYMOUS_ORG_NAME = "Main Org."
|
||||
GF_AUTH_ANONYMOUS_ORG_ROLE = "Admin"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
url: http://prometheus.service.dc1.consul:9090
|
||||
isDefault: true
|
||||
- name: Loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://{{ range service "loki" }}{{ .Address }}:{{ .Port }}{{ end }}
|
||||
isDefault: false
|
||||
editable: true
|
||||
jsonData:
|
||||
maxLines: 1000
|
||||
isDefault:
|
||||
EOH
|
||||
|
||||
destination = "local/datasources/prometheus.yaml"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: dashboards
|
||||
type: file
|
||||
@@ -75,24 +80,28 @@ providers:
|
||||
path: /local/dashboard_definitons
|
||||
foldersFromFilesStructure: true
|
||||
EOH
|
||||
|
||||
destination = "local/dashboards/dashboards.yaml"
|
||||
}
|
||||
|
||||
template {
|
||||
data = "{{ key \"grafana_dashboards/nomad\" }}"
|
||||
destination = "local/dashboard_definitons/nomad.json"
|
||||
}
|
||||
template {
|
||||
template {
|
||||
data = "{{ key \"grafana_dashboards/thermals\" }}"
|
||||
destination = "local/dashboard_definitons/thermals.json"
|
||||
}
|
||||
template {
|
||||
template {
|
||||
data = "{{ key \"grafana_dashboards/NomadMem\" }}"
|
||||
destination = "local/dashboard_definitons/NomadMem.json"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "${TASKGROUP}"
|
||||
tags = ["global", "backend"]
|
||||
port = "http"
|
||||
|
||||
check {
|
||||
name = "alive"
|
||||
type = "http"
|
||||
@@ -100,6 +109,7 @@ providers:
|
||||
timeout = "120s"
|
||||
path = "/login"
|
||||
port = "http"
|
||||
|
||||
check_restart {
|
||||
limit = 3
|
||||
grace = "120s"
|
||||
@@ -107,17 +117,21 @@ providers:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 128 # 500 MHz
|
||||
memory = 64 # 128MB
|
||||
}
|
||||
|
||||
# Specify configuration related to log rotation
|
||||
logs {
|
||||
max_files = 10
|
||||
max_file_size = 15
|
||||
}
|
||||
|
||||
kill_timeout = "10s"
|
||||
}
|
||||
|
||||
network {
|
||||
port "http" {
|
||||
static = 3100
|
||||
|
||||
@@ -28,6 +28,8 @@ job "hass" {
|
||||
# Stagger updates every 60 seconds
|
||||
stagger = "10s"
|
||||
max_parallel = 1
|
||||
healthy_deadline = "15m"
|
||||
progress_deadline = "20m"
|
||||
}
|
||||
group "hass" {
|
||||
count = 1
|
||||
@@ -39,92 +41,92 @@ job "hass" {
|
||||
mode = "delay"
|
||||
}
|
||||
|
||||
task "init" {
|
||||
driver = "docker"
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
config {
|
||||
memory_hard_limit = "2048"
|
||||
image_pull_timeout = "10m"
|
||||
force_pull = false
|
||||
image = "ghcr.io/sstent/rsync"
|
||||
volumes = [
|
||||
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
|
||||
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
"/mnt/Public/config/locks:/locks"
|
||||
]
|
||||
|
||||
}
|
||||
env {
|
||||
DB_NAME = "home-assistant_v2.db"
|
||||
}
|
||||
resources {
|
||||
cpu = 20 # 500 MHz
|
||||
memory = 20 # 128MB
|
||||
}
|
||||
// template {
|
||||
// data = <<EOH
|
||||
// dbs:
|
||||
// - path: /config/radarr.db
|
||||
// replicas:
|
||||
// - path: /configbackup
|
||||
// EOH
|
||||
|
||||
// destination = "local/litestream.yml"
|
||||
// task "init" {
|
||||
// driver = "docker"
|
||||
// lifecycle {
|
||||
// hook = "prestart"
|
||||
// sidecar = false
|
||||
// }
|
||||
}
|
||||
// config {
|
||||
// memory_hard_limit = "2048"
|
||||
// image_pull_timeout = "10m"
|
||||
// force_pull = false
|
||||
// image = "ghcr.io/sstent/rsync"
|
||||
// volumes = [
|
||||
// "/mnt/configs/${NOMAD_GROUP_NAME}:/config",
|
||||
// "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
// "/mnt/Public/config/locks:/locks"
|
||||
// ]
|
||||
|
||||
// }
|
||||
// env {
|
||||
// DB_NAME = "home-assistant_v2.db"
|
||||
// }
|
||||
// resources {
|
||||
// cpu = 20 # 500 MHz
|
||||
// memory = 20 # 128MB
|
||||
// }
|
||||
// // template {
|
||||
// // data = <<EOH
|
||||
// // dbs:
|
||||
// // - path: /config/radarr.db
|
||||
// // replicas:
|
||||
// // - path: /configbackup
|
||||
// // EOH
|
||||
|
||||
// // destination = "local/litestream.yml"
|
||||
// // }
|
||||
// }
|
||||
|
||||
|
||||
task "finalsync" {
|
||||
driver = "docker"
|
||||
lifecycle {
|
||||
hook = "poststop"
|
||||
}
|
||||
config {
|
||||
memory_hard_limit = "2048"
|
||||
// task "finalsync" {
|
||||
// driver = "docker"
|
||||
// lifecycle {
|
||||
// hook = "poststop"
|
||||
// }
|
||||
// config {
|
||||
// memory_hard_limit = "2048"
|
||||
|
||||
image = "ghcr.io/sstent/rsync"
|
||||
volumes = [
|
||||
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
|
||||
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
"/mnt/Public/config/locks:/locks"
|
||||
]
|
||||
// image = "ghcr.io/sstent/rsync"
|
||||
// volumes = [
|
||||
// "/mnt/configs/${NOMAD_GROUP_NAME}:/config",
|
||||
// "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
// "/mnt/Public/config/locks:/locks"
|
||||
// ]
|
||||
|
||||
// args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-av","--exclude='8-20 * * *.db8-20 * * *'","--exclude='8-20 * * *.db'","--exclude='8-20 * * *.db-litestream'","--exclude='generations'","/config/","/configbackup/"]
|
||||
}
|
||||
resources {
|
||||
cpu = 20 # 500 MHz
|
||||
memory = 128 # 128MB
|
||||
}
|
||||
}
|
||||
// // args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-av","--exclude='8-20 * * *.db8-20 * * *'","--exclude='8-20 * * *.db'","--exclude='8-20 * * *.db-litestream'","--exclude='generations'","/config/","/configbackup/"]
|
||||
// }
|
||||
// resources {
|
||||
// cpu = 20 # 500 MHz
|
||||
// memory = 128 # 128MB
|
||||
// }
|
||||
// }
|
||||
|
||||
|
||||
task "sync" {
|
||||
driver = "docker"
|
||||
lifecycle {
|
||||
hook = "poststart"
|
||||
sidecar = true
|
||||
}
|
||||
config {
|
||||
memory_hard_limit = "2048"
|
||||
// task "sync" {
|
||||
// driver = "docker"
|
||||
// lifecycle {
|
||||
// hook = "poststart"
|
||||
// sidecar = true
|
||||
// }
|
||||
// config {
|
||||
// memory_hard_limit = "2048"
|
||||
|
||||
image = "ghcr.io/sstent/rsync"
|
||||
volumes = [
|
||||
"/mnt/configs/:/configs",
|
||||
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
]
|
||||
args = ["client"]
|
||||
}
|
||||
env {
|
||||
CRON_TASK_1 = "50 8-20 * * *rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;"
|
||||
}
|
||||
resources {
|
||||
cpu = 20 # 500 MHz
|
||||
memory = 20 # 128MB
|
||||
}
|
||||
}
|
||||
// image = "ghcr.io/sstent/rsync"
|
||||
// volumes = [
|
||||
// "/mnt/configs/:/configs",
|
||||
// "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
// ]
|
||||
// args = ["client"]
|
||||
// }
|
||||
// env {
|
||||
// CRON_TASK_1 = "50 8-20 * * *rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;"
|
||||
// }
|
||||
// resources {
|
||||
// cpu = 20 # 500 MHz
|
||||
// memory = 20 # 128MB
|
||||
// }
|
||||
// }
|
||||
|
||||
|
||||
|
||||
@@ -146,6 +148,7 @@ job "hass" {
|
||||
"local/configuration.yaml:/config/configuration.yaml",
|
||||
"local/ui-lovelace.yaml:/config/ui-lovelace.yaml",
|
||||
"/mnt/configs/hass:/config",
|
||||
// "/mnt/Public/config/hass:/config",
|
||||
]
|
||||
// "local/auth_provider.homeassistant:/config/.storage/auth_provider.homeassistant"
|
||||
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
job "immich" {
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
operator = "!="
|
||||
value = "arm"
|
||||
}
|
||||
|
||||
datacenters = ["dc1"] # Specify your datacenter
|
||||
type = "service"
|
||||
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
job "minihass" {
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
operator = "!="
|
||||
value = "armhf"
|
||||
}
|
||||
|
||||
datacenters = ["dc1"]
|
||||
|
||||
group "smart-home" {
|
||||
|
||||
@@ -69,12 +69,7 @@ job "nginx" {
|
||||
}
|
||||
service {
|
||||
name = "nginx"
|
||||
tags = ["nginx", "web", "urlprefix-/nginx", "backend",
|
||||
"traefik.http.routers.nginxlan.rule=Host(`nginx.service.dc1.consul`)",
|
||||
"traefik.http.routers.nginxwan.rule=Host(`nginx.service.dc1.fbleagh.duckdns.org`)",
|
||||
"traefik.http.routers.nginxwan.tls=true",
|
||||
|
||||
]
|
||||
tags = ["nginx", "web", "urlprefix-/nginx", "backend"]
|
||||
port = "http"
|
||||
|
||||
check {
|
||||
|
||||
@@ -47,6 +47,59 @@ job "nzbget" {
|
||||
// }
|
||||
// }
|
||||
|
||||
task "nzbget" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "linuxserver/nzbget"
|
||||
network_mode = "container:ovpn-client-${NOMAD_ALLOC_ID}"
|
||||
|
||||
// ports = ["nzbget"]
|
||||
volumes = [
|
||||
"/mnt/Public/Downloads/news:/downloads",
|
||||
"/mnt/Public/incoming:/incomplete-downloads",
|
||||
"/mnt/configs/nzbget/nzbget:/config",
|
||||
]
|
||||
}
|
||||
|
||||
env {
|
||||
TZ = "EST5EDT"
|
||||
PUID = 1000
|
||||
PGID = 1000
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 500 # 500 MHz
|
||||
memory = 200 # 128MB
|
||||
}
|
||||
} #task nzbget
|
||||
|
||||
task "saznzb" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
// image = "linuxserver/sabnzbd:2.3.8-0ubuntu1jcfp118.04-ls13"
|
||||
image = "linuxserver/sabnzbd"
|
||||
network_mode = "container:ovpn-client-${NOMAD_ALLOC_ID}"
|
||||
|
||||
volumes = [
|
||||
"/mnt/Public/Downloads/news:/downloads",
|
||||
"/mnt/Public/incoming:/incomplete-downloads",
|
||||
"/mnt/configs/saznzb/saznzb:/config",
|
||||
]
|
||||
}
|
||||
|
||||
env {
|
||||
TZ = "EST5EDT"
|
||||
PUID = 1000
|
||||
PGID = 1000
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 500 # 500 MHz
|
||||
memory = 1024 # 128MB
|
||||
}
|
||||
}
|
||||
|
||||
task "ovpn-client" {
|
||||
driver = "docker"
|
||||
@@ -101,8 +154,8 @@ job "nzbget" {
|
||||
COUNTRY = "Canada"
|
||||
CITY = "Toronto"
|
||||
FIREWALL_VPN_INPUT_PORTS = "56987"
|
||||
WIREGUARD_PRIVATE_KEY = "2FHwQX1jxk+qeMmXUtSGRc2kKF1WHeSCyIgHNW+7akA=" #ActiveLynx
|
||||
WIREGUARD_ADDRESS = "10.66.246.4/32"
|
||||
WIREGUARD_PRIVATE_KEY = "iA64ImY2XNvml7s+HEHWNNGXeqpzFN0/KYGxhCsHLV8="
|
||||
WIREGUARD_ADDRESS = "10.64.141.217/32"
|
||||
|
||||
|
||||
|
||||
@@ -173,10 +226,6 @@ service {
|
||||
},
|
||||
]
|
||||
}
|
||||
env {
|
||||
REQUIRE_AUTH = "false"
|
||||
}
|
||||
|
||||
|
||||
resources {
|
||||
cpu = 64 # 500 MHz
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
job "postgres-15" {
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
operator = "!="
|
||||
value = "arm"
|
||||
}
|
||||
|
||||
type = "system"
|
||||
datacenters = ["dc1"]
|
||||
|
||||
|
||||
@@ -3,6 +3,14 @@ job "promtail" {
|
||||
type = "system" # Runs on every node
|
||||
|
||||
group "promtail" {
|
||||
count = 1
|
||||
|
||||
# Allow spreading across multiple node pools
|
||||
constraint {
|
||||
operator = "regexp"
|
||||
attribute = "${node.pool}"
|
||||
value = "^(default|backup)$"
|
||||
}
|
||||
network {
|
||||
port "http" {
|
||||
static = 9080
|
||||
@@ -50,6 +58,44 @@ clients:
|
||||
- url: http://{{ range service "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
|
||||
|
||||
scrape_configs:
|
||||
# Scrape Consul server logs from journald
|
||||
- job_name: consul-server
|
||||
journal:
|
||||
json: false
|
||||
max_age: 12h
|
||||
path: /var/log/journal
|
||||
labels:
|
||||
job: consul-server
|
||||
component: server
|
||||
host: {{ env "HOSTNAME" }}
|
||||
relabel_configs:
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
regex: 'consul.service'
|
||||
action: keep
|
||||
- source_labels: ['__journal__hostname']
|
||||
target_label: 'journal_host'
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
target_label: 'unit'
|
||||
|
||||
# Scrape Consul Template logs from journald
|
||||
- job_name: consul-template
|
||||
journal:
|
||||
json: false
|
||||
max_age: 12h
|
||||
path: /var/log/journal
|
||||
labels:
|
||||
job: consul-template
|
||||
component: template
|
||||
host: {{ env "HOSTNAME" }}
|
||||
relabel_configs:
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
regex: 'consul-template.service'
|
||||
action: keep
|
||||
- source_labels: ['__journal__hostname']
|
||||
target_label: 'journal_host'
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
target_label: 'unit'
|
||||
|
||||
# Scrape Nomad server logs from journald
|
||||
- job_name: nomad-server
|
||||
journal:
|
||||
@@ -69,6 +115,63 @@ scrape_configs:
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
target_label: 'unit'
|
||||
|
||||
# Scrape CoreDNS logs from journald
|
||||
- job_name: coredns
|
||||
journal:
|
||||
json: false
|
||||
max_age: 12h
|
||||
path: /var/log/journal
|
||||
labels:
|
||||
job: coredns
|
||||
component: dns
|
||||
host: {{ env "HOSTNAME" }}
|
||||
relabel_configs:
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
regex: 'coredns.service'
|
||||
action: keep
|
||||
- source_labels: ['__journal__hostname']
|
||||
target_label: 'journal_host'
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
target_label: 'unit'
|
||||
|
||||
# Scrape Docker daemon logs from journald
|
||||
- job_name: docker-daemon
|
||||
journal:
|
||||
json: false
|
||||
max_age: 12h
|
||||
path: /var/log/journal
|
||||
labels:
|
||||
job: docker-daemon
|
||||
component: docker
|
||||
host: {{ env "HOSTNAME" }}
|
||||
relabel_configs:
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
regex: 'docker.service'
|
||||
action: keep
|
||||
- source_labels: ['__journal__hostname']
|
||||
target_label: 'journal_host'
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
target_label: 'unit'
|
||||
|
||||
# Scrape Keepalived logs from journald
|
||||
- job_name: keepalived
|
||||
journal:
|
||||
json: false
|
||||
max_age: 12h
|
||||
path: /var/log/journal
|
||||
labels:
|
||||
job: keepalived
|
||||
component: ha
|
||||
host: {{ env "HOSTNAME" }}
|
||||
relabel_configs:
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
regex: 'keepalived.service'
|
||||
action: keep
|
||||
- source_labels: ['__journal__hostname']
|
||||
target_label: 'journal_host'
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
target_label: 'unit'
|
||||
|
||||
# Scrape Nomad allocation logs
|
||||
- job_name: nomad-alloc-logs
|
||||
static_configs:
|
||||
@@ -79,12 +182,11 @@ scrape_configs:
|
||||
host: {{ env "HOSTNAME" }}
|
||||
__path__: /alloc/logs/*
|
||||
|
||||
# Exclude Promtail's own FIFO files to avoid seek errors
|
||||
pipeline_stages:
|
||||
- match:
|
||||
selector: '{job="nomad-logs"}'
|
||||
stages:
|
||||
- drop:
|
||||
expression: '.*\.promtail\.(stdout|stderr)\.fifo.*'
|
||||
selector: '{filename=~".*/\\.promtail\\.(stdout|stderr)\\.fifo"}'
|
||||
action: drop
|
||||
|
||||
# Scrape Docker container logs
|
||||
- job_name: docker
|
||||
|
||||
151
nomad_backup/promtail_backup.hcl
Normal file
151
nomad_backup/promtail_backup.hcl
Normal file
@@ -0,0 +1,151 @@
|
||||
job "promtail_backup" {
|
||||
node_pool = "backup"
|
||||
|
||||
datacenters = ["dc1"]
|
||||
type = "system" # Runs on every node
|
||||
|
||||
group "promtail" {
|
||||
network {
|
||||
port "http" {
|
||||
static = 9080
|
||||
}
|
||||
}
|
||||
|
||||
task "promtail" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "grafana/promtail:2.9.3"
|
||||
ports = ["http"]
|
||||
|
||||
args = [
|
||||
"-config.file=/local/promtail-config.yml",
|
||||
]
|
||||
|
||||
# Mount the Docker socket, journal, and machine-id for systemd logs
|
||||
volumes = [
|
||||
"/var/lib/docker/containers:/var/lib/docker/containers:ro",
|
||||
"/var/log/journal:/var/log/journal:ro",
|
||||
"/run/log/journal:/run/log/journal:ro",
|
||||
"/etc/machine-id:/etc/machine-id:ro",
|
||||
"/var/run/docker.sock:/var/run/docker.sock:ro",
|
||||
]
|
||||
|
||||
# Run as root to access Docker socket
|
||||
privileged = true
|
||||
}
|
||||
|
||||
env {
|
||||
HOSTNAME = "${node.unique.name}"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
server:
|
||||
http_listen_port: 9080
|
||||
grpc_listen_port: 0
|
||||
|
||||
positions:
|
||||
filename: /alloc/data/positions.yaml
|
||||
|
||||
clients:
|
||||
- url: http://{{ range service "loki" }}{{ .Address }}:{{ .Port }}{{ end }}/loki/api/v1/push
|
||||
|
||||
scrape_configs:
|
||||
# Scrape Nomad server logs from journald
|
||||
- job_name: nomad-server
|
||||
journal:
|
||||
json: false
|
||||
max_age: 12h
|
||||
path: /var/log/journal
|
||||
labels:
|
||||
job: nomad-server
|
||||
component: server
|
||||
host: {{ env "HOSTNAME" }}
|
||||
relabel_configs:
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
regex: 'nomad.service'
|
||||
action: keep
|
||||
- source_labels: ['__journal__hostname']
|
||||
target_label: 'journal_host'
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
target_label: 'unit'
|
||||
|
||||
# Scrape Nomad allocation logs
|
||||
- job_name: nomad-alloc-logs
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: nomad-logs
|
||||
host: {{ env "HOSTNAME" }}
|
||||
__path__: /alloc/logs/*
|
||||
|
||||
pipeline_stages:
|
||||
- match:
|
||||
selector: '{job="nomad-logs"}'
|
||||
stages:
|
||||
- drop:
|
||||
expression: '.*\.promtail\.(stdout|stderr)\.fifo.*'
|
||||
|
||||
# Scrape Docker container logs
|
||||
- job_name: docker
|
||||
docker_sd_configs:
|
||||
- host: unix:///var/run/docker.sock
|
||||
refresh_interval: 5s
|
||||
|
||||
relabel_configs:
|
||||
# Add hostname label
|
||||
- replacement: {{ env "HOSTNAME" }}
|
||||
target_label: 'host'
|
||||
|
||||
# Extract container name
|
||||
- source_labels: ['__meta_docker_container_name']
|
||||
regex: '/(.*)'
|
||||
target_label: 'container'
|
||||
|
||||
# Extract Nomad job name if available
|
||||
- source_labels: ['__meta_docker_container_label_com_hashicorp_nomad_job_name']
|
||||
target_label: 'nomad_job'
|
||||
|
||||
# Extract Nomad task name if available
|
||||
- source_labels: ['__meta_docker_container_label_com_hashicorp_nomad_task_name']
|
||||
target_label: 'nomad_task'
|
||||
|
||||
# Extract Nomad namespace if available
|
||||
- source_labels: ['__meta_docker_container_label_com_hashicorp_nomad_namespace']
|
||||
target_label: 'nomad_namespace'
|
||||
|
||||
# Set the log path
|
||||
- source_labels: ['__meta_docker_container_id']
|
||||
target_label: '__path__'
|
||||
replacement: '/var/lib/docker/containers/$1/*.log'
|
||||
EOH
|
||||
|
||||
destination = "local/promtail-config.yml"
|
||||
change_mode = "restart"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 200
|
||||
memory = 128
|
||||
}
|
||||
|
||||
service {
|
||||
name = "promtail"
|
||||
port = "http"
|
||||
|
||||
tags = [
|
||||
"logging",
|
||||
]
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
path = "/ready"
|
||||
interval = "10s"
|
||||
timeout = "2s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,12 @@
|
||||
# There can only be a single job definition per file.
|
||||
# Create a job with ID and Name 'example'
|
||||
job "prowlarr" {
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
operator = "!="
|
||||
value = "arm"
|
||||
}
|
||||
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
@@ -30,28 +36,11 @@ job "prowlarr" {
|
||||
}
|
||||
|
||||
|
||||
task "flaresolver" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/flaresolverr/flaresolverr:latest"
|
||||
ports = ["http_flare"]
|
||||
dns_servers = ["192.168.4.250", "1.1.1.1"]
|
||||
memory_hard_limit = "2048"
|
||||
force_pull = false
|
||||
}
|
||||
service {
|
||||
name = "${TASKGROUP}"
|
||||
tags = ["prowlarr_pg", "tools"]
|
||||
port = "http_flare"
|
||||
}
|
||||
}
|
||||
|
||||
task "prowlarr" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/linuxserver/prowlarr:develop"
|
||||
ports = ["http"]
|
||||
dns_servers = ["192.168.4.250", "1.1.1.1"]
|
||||
memory_hard_limit = "2048"
|
||||
volumes = [
|
||||
"/mnt/Public/configs/prowlarr_pg:/config",
|
||||
|
||||
@@ -3,21 +3,28 @@
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
|
||||
# priority = 50
|
||||
constraint {
|
||||
attribute = "${attr.unique.hostname}"
|
||||
operator = "regexp"
|
||||
value = "odroid.*"
|
||||
// weight = 100
|
||||
}
|
||||
affinity {
|
||||
attribute = "${attr.unique.hostname}"
|
||||
value = "odroid3"
|
||||
weight = 100
|
||||
}
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
operator = "regexp"
|
||||
value = "amd64"
|
||||
}
|
||||
|
||||
update {
|
||||
# Stagger updates every 60 seconds
|
||||
stagger = "90s"
|
||||
max_parallel = 1
|
||||
healthy_deadline = "2m"
|
||||
healthy_deadline = "4m"
|
||||
health_check = "task_states"
|
||||
|
||||
}
|
||||
@@ -65,7 +72,6 @@ constraint {
|
||||
volumes = [
|
||||
"/etc/localtime:/etc/localtime",
|
||||
"/mnt/syncthing/mullvad:/vpn",
|
||||
"local/gluetun.toml:/gluetun/config.toml"
|
||||
|
||||
]
|
||||
devices = [
|
||||
@@ -74,37 +80,21 @@ constraint {
|
||||
container_path = "/dev/net/tun"
|
||||
},
|
||||
]
|
||||
|
||||
}
|
||||
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
[[roles]]
|
||||
name = "qbittorrent"
|
||||
# Define a list of routes with the syntax "Http-Method /path"
|
||||
routes = ["GET /v1/openvpn/status", "PUT /v1/openvpn/status", "PUT /v1/openvpn/status", "GET /v1/openvpn/portforwarded", "GET /v1/openvpn/settings", "GET /v1/dns/status", "PUT /v1/dns/status", "PUT /v1/dns/status", "GET /v1/updater/status", "PUT /v1/updater/status", "PUT /v1/updater/status", "GET /v1/publicip/ip", "GET /v1/version", "GET /v1/vpn/status", "PUT /v1/vpn/status", "GET /v1/vpn/settings", "PUT /v1/vpn/settings"]
|
||||
auth = "none"
|
||||
EOH
|
||||
|
||||
destination = "custom/auth.toml"
|
||||
}
|
||||
|
||||
|
||||
env {
|
||||
VPN_SERVICE_PROVIDER="airvpn"
|
||||
VPN_TYPE="wireguard"
|
||||
WIREGUARD_PRIVATE_KEY="EIos1A9eGCIoCHr02aOsEu8S4C0gqhNLIYF/vMykEV0="
|
||||
WIREGUARD_PRESHARED_KEY="RzPcraPA24hLFxGiB7z5JMWrtI+JBbEzvmeiEX36XWo="
|
||||
WIREGUARD_ADDRESSES="10.161.31.240/32"
|
||||
WIREGUARD_PRIVATE_KEY="EF39fCd2/ycBG88qrk0Wgzak4wDLwq9kxLpzNM2se0s="
|
||||
WIREGUARD_PRESHARED_KEY="utk1Fqp8N1T20qwof6xGxIDPnrOO49tM6+nQBCBC9OY="
|
||||
WIREGUARD_ADDRESSES="10.152.125.142/32"
|
||||
SERVER_COUNTRIES="Canada"
|
||||
SERVER_CITIES="Vancouver,Montreal,Toronto"
|
||||
FIREWALL_VPN_INPUT_PORTS = "61944,53304"
|
||||
SERVER_CITIES="Montreal"
|
||||
FIREWALL_VPN_INPUT_PORTS = "54547"
|
||||
FIREWALL_INPUT_PORTS="8080,1080"
|
||||
FIREWALL_DEBUG="on"
|
||||
HEALTH_TARGET_ADDRESS="1.1.1.1"
|
||||
HEALTH_SUCCESS_WAIT_DURATION="30s"
|
||||
HEALTH_VPN_DURATION_INITIAL="3600s"
|
||||
HEALTH_VPN_DURATION_ADDITION="600s"
|
||||
HTTP_CONTROL_SERVER_AUTH_CONFIG_FILEPATH="/custom/auth.toml"
|
||||
// HEALTH_TARGET_ADDRESS="cloudflare.com:443"
|
||||
|
||||
|
||||
@@ -121,7 +111,7 @@ auth = "none"
|
||||
SHADOWSOCKS = "on"
|
||||
DOT_PROVIDERS = "cloudflare,google,quad9,quadrant"
|
||||
DOT = "off"
|
||||
WEBUI_PORT=8080
|
||||
WEBUI_PORT=8081
|
||||
}
|
||||
|
||||
service {
|
||||
@@ -168,7 +158,7 @@ auth = "none"
|
||||
},
|
||||
]
|
||||
}
|
||||
env {REQUIRE_AUTH="false"}
|
||||
|
||||
resources {
|
||||
cpu = 64 # 500 MHz
|
||||
memory = 128 # 128MB
|
||||
@@ -176,80 +166,80 @@ auth = "none"
|
||||
} #end dante
|
||||
|
||||
|
||||
# task "init" {
|
||||
# driver = "docker"
|
||||
# lifecycle {
|
||||
# hook = "prestart"
|
||||
# sidecar = false
|
||||
# }
|
||||
# config {
|
||||
# memory_hard_limit = "2048"
|
||||
task "init" {
|
||||
driver = "docker"
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
config {
|
||||
memory_hard_limit = "2048"
|
||||
|
||||
# image = "ghcr.io/sstent/rsync"
|
||||
# volumes = [
|
||||
# "/mnt/configs/${NOMAD_GROUP_NAME}:/config",
|
||||
# "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
# "/mnt/Public/config/locks:/locks"
|
||||
# ]
|
||||
image = "ghcr.io/sstent/rsync"
|
||||
volumes = [
|
||||
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
|
||||
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
"/mnt/Public/config/locks:/locks"
|
||||
]
|
||||
|
||||
# args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-av", "/configbackup/", "/config/", "--delete-before"]
|
||||
# }
|
||||
# resources {
|
||||
# cpu = 20 # 500 MHz
|
||||
# memory = 20 # 128MB
|
||||
# }
|
||||
# }
|
||||
args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-av", "/configbackup/", "/config/", "--delete-before"]
|
||||
}
|
||||
resources {
|
||||
cpu = 20 # 500 MHz
|
||||
memory = 20 # 128MB
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# task "finalsync" {
|
||||
# driver = "docker"
|
||||
# lifecycle {
|
||||
# hook = "poststop"
|
||||
# }
|
||||
# config {
|
||||
# memory_hard_limit = "2048"
|
||||
task "finalsync" {
|
||||
driver = "docker"
|
||||
lifecycle {
|
||||
hook = "poststop"
|
||||
}
|
||||
config {
|
||||
memory_hard_limit = "2048"
|
||||
|
||||
# image = "ghcr.io/sstent/rsync"
|
||||
# volumes = [
|
||||
# "/mnt/configs/${NOMAD_GROUP_NAME}:/config",
|
||||
# "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
# "/mnt/Public/config/locks:/locks"
|
||||
# ]
|
||||
image = "ghcr.io/sstent/rsync"
|
||||
volumes = [
|
||||
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
|
||||
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
"/mnt/Public/config/locks:/locks"
|
||||
]
|
||||
|
||||
# args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-av", "--delete", "/config/", "/configbackup/"]
|
||||
# }
|
||||
# resources {
|
||||
# cpu = 20 # 500 MHz
|
||||
# memory = 128 # 128MB
|
||||
# }
|
||||
# }
|
||||
args = ["flock", "-x", "/locks/${NOMAD_GROUP_NAME}_rsync.lock", "rsync", "-av", "--delete", "/config/", "/configbackup/"]
|
||||
}
|
||||
resources {
|
||||
cpu = 20 # 500 MHz
|
||||
memory = 128 # 128MB
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# task "sync" {
|
||||
# driver = "docker"
|
||||
# lifecycle {
|
||||
# hook = "poststart"
|
||||
# sidecar = true
|
||||
# }
|
||||
# config {
|
||||
# memory_hard_limit = "2048"
|
||||
task "sync" {
|
||||
driver = "docker"
|
||||
lifecycle {
|
||||
hook = "poststart"
|
||||
sidecar = true
|
||||
}
|
||||
config {
|
||||
memory_hard_limit = "2048"
|
||||
|
||||
# image = "ghcr.io/sstent/rsync"
|
||||
# volumes = [
|
||||
# "/mnt/configs/:/configs",
|
||||
# "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
# ]
|
||||
# args = ["client"]
|
||||
# }
|
||||
# env {
|
||||
# CRON_TASK_1 = "25 8-20 * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz --delete /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;"
|
||||
image = "ghcr.io/sstent/rsync"
|
||||
volumes = [
|
||||
"/mnt/configs/:/configs",
|
||||
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
]
|
||||
args = ["client"]
|
||||
}
|
||||
env {
|
||||
CRON_TASK_1 = "25 8-20 * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz --delete /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;"
|
||||
|
||||
# }
|
||||
# resources {
|
||||
# cpu = 20 # 500 MHz
|
||||
# memory = 20 # 128MB
|
||||
# }
|
||||
# } #end sync
|
||||
}
|
||||
resources {
|
||||
cpu = 20 # 500 MHz
|
||||
memory = 20 # 128MB
|
||||
}
|
||||
} #end sync
|
||||
|
||||
|
||||
task "qbittorrent" {
|
||||
@@ -257,7 +247,8 @@ auth = "none"
|
||||
|
||||
// "/mnt/Public/config/qbittorrent:/config",
|
||||
config {
|
||||
image = "linuxserver/qbittorrent:5.1.0"
|
||||
// image = "lscr.io/linuxserver/qbittorrent:latest"
|
||||
image = "linuxserver/qbittorrent:4.6.5"
|
||||
network_mode = "container:qbittorrent-vpn-${NOMAD_ALLOC_ID}"
|
||||
memory_hard_limit = 2048
|
||||
|
||||
@@ -293,6 +284,20 @@ auth = "none"
|
||||
]
|
||||
port = "qbittorrent_80"
|
||||
|
||||
check {
|
||||
type = "script"
|
||||
name = "check_up"
|
||||
command = "/bin/bash"
|
||||
args = ["/local/qbithealth.sh"]
|
||||
interval = "120s"
|
||||
timeout = "10s"
|
||||
|
||||
check_restart {
|
||||
limit = 10
|
||||
grace = "120s"
|
||||
ignore_warnings = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template {
|
||||
|
||||
@@ -41,7 +41,7 @@ constraint {
|
||||
config {
|
||||
// image = "linuxserver/radarr:nightly"
|
||||
image = "linuxserver/radarr:latest"
|
||||
dns_servers = ["192.168.4.250", "192.168.4.1"]
|
||||
dns_servers = ["${attr.unique.network.ip-address}","192.168.4.250","8.8.8.8"]
|
||||
ports = ["http"]
|
||||
memory_hard_limit = "2048"
|
||||
// cpuset_cpus = "4-7"
|
||||
|
||||
@@ -37,38 +37,21 @@ job "seaweedfsmaster" {
|
||||
mode = "delay"
|
||||
}
|
||||
|
||||
task "seaweedfsadmin" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "chrislusf/seaweedfs"
|
||||
memory_hard_limit = "2048"
|
||||
entrypoint = ["/usr/bin/weed"]
|
||||
|
||||
args = [
|
||||
"admin",
|
||||
"-masters=${NOMAD_GROUP_NAME}0.service.dc1.consul:9333,${NOMAD_GROUP_NAME}1.service.dc1.consul:9333,${NOMAD_GROUP_NAME}2.service.dc1.consul:9333",
|
||||
"-dataDir=/data",
|
||||
]
|
||||
|
||||
volumes = [
|
||||
"/mnt/Public/configs/seaweedfadmin:/data/",
|
||||
]
|
||||
|
||||
ports = ["seaweedfs_admin"]
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
task "seaweedfsmaster" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "chrislusf/seaweedfs"
|
||||
// image = "ghcr.io/sstent/seaweedfs:latest"
|
||||
// command = ""
|
||||
// entrypoint = ["/usr/bin/seaweedfs"]
|
||||
// entrypoint = ["/usr/bin/nslookup","odroid3.node.dc1.consul"]
|
||||
memory_hard_limit = "2048"
|
||||
|
||||
// dns_servers = ["192.168.1.1"]
|
||||
|
||||
// hostname = "${attr.unique.hostname}"
|
||||
// hostname = "${attr.unique.network.ip-address}"
|
||||
entrypoint = ["/usr/bin/weed"]
|
||||
|
||||
args = [
|
||||
@@ -89,9 +72,10 @@ job "seaweedfsmaster" {
|
||||
"-volume.port.grpc=19444",
|
||||
"-volume.max=100"
|
||||
]
|
||||
// "-filer.peers=${NOMAD_GROUP_NAME}0.service.dc1.consul:8877,${NOMAD_GROUP_NAME}1.service.dc1.consul:8877,${NOMAD_GROUP_NAME}2.service.dc1.consul:8877",
|
||||
|
||||
volumes = [
|
||||
"/mnt/configs/seaweedfs:/data/",
|
||||
"/data/seaweedfs/:/data/",
|
||||
]
|
||||
|
||||
ports = ["seaweedfs", "seaweedfs_high", "seaweedfs_filer", "seaweed_s3", "seaweed_webdav", "seaweedfs_filer_high","seaweedfs_vol","seaweedfs_vol_high"]
|
||||
@@ -157,9 +141,6 @@ job "seaweedfsmaster" {
|
||||
port "seaweedfs_vol_high" {
|
||||
static = "19444"
|
||||
}
|
||||
port "seaweedfs_admin" {
|
||||
static = "23646"
|
||||
}
|
||||
// port "s8080" {
|
||||
// static = 8080
|
||||
// to = 8080
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
# There can only be a single job definition per file.
|
||||
# Create a job with ID and Name 'example'
|
||||
job "slskd" {
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
operator = "!="
|
||||
value = "arm"
|
||||
}
|
||||
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
|
||||
136
nomad_backup/sonarr-small.hcl
Normal file
136
nomad_backup/sonarr-small.hcl
Normal file
@@ -0,0 +1,136 @@
|
||||
# There can only be a single job definition per file.
|
||||
# Create a job with ID and Name 'example'
|
||||
job "sonarr-small" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
operator = "regexp"
|
||||
value = "arm64"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${attr.unique.hostname}"
|
||||
operator = "regexp"
|
||||
value = "odroid.*"
|
||||
}
|
||||
|
||||
update {
|
||||
stagger = "10s"
|
||||
max_parallel = 1
|
||||
}
|
||||
group "sonarr-small" {
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 2
|
||||
interval = "1m"
|
||||
delay = "10s"
|
||||
mode = "fail"
|
||||
}
|
||||
|
||||
task "sonarr" {
|
||||
// driver = "raw_exec" // config { // command = "docker" // args = ["run", // "--rm", // "--name=sonarr", // "-e", "PUID=1000", // "-e", "PGID=1000", // "-e", "TZ=EST5EDT", // "-p", "8989:8989", // "-v", "/mnt/syncthing/sonarrv3:/config", // "-v", "/mnt/Public/Downloads/tv:/tv", // "-v", "/mnt/Public/Downloads/news:/downloads", // "--cpuset-cpus","4-7", // "linuxserver/sonarr:preview"] // }
|
||||
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "linuxserver/sonarr:develop"
|
||||
|
||||
ports = ["http"]
|
||||
|
||||
// dns_servers = ["192.168.1.1", "1.1.1.1"]
|
||||
memory_hard_limit = "2048"
|
||||
// cpuset_cpus = "4-7"
|
||||
|
||||
volumes = [
|
||||
"/mnt/Public/Downloads/news:/downloads",
|
||||
"/mnt/Public/Downloads/tv:/tv",
|
||||
// "/mnt/configs/sonarr:/config",
|
||||
"/mnt/Public/configs/sonarr-small:/config",
|
||||
// "local/config.xml:/config/config.xml"
|
||||
]
|
||||
|
||||
// "/mnt/gv0/sonarr:/config",
|
||||
force_pull = false
|
||||
}
|
||||
|
||||
service {
|
||||
name = "${TASKGROUP}"
|
||||
tags = ["sonarr", "tools"]
|
||||
port = "http"
|
||||
|
||||
check {
|
||||
type = "script"
|
||||
name = "check_up"
|
||||
command = "/local/healthcheck.sh"
|
||||
interval = "60s"
|
||||
timeout = "5s"
|
||||
|
||||
check_restart {
|
||||
limit = 3
|
||||
grace = "90s"
|
||||
ignore_warnings = false
|
||||
}
|
||||
}
|
||||
}
|
||||
template {
|
||||
data = <<EOH
|
||||
#!/bin/bash
|
||||
/usr/bin/curl -f "http://localhost:${NOMAD_PORT_http}/api/v3/system/status?apikey=$(grep -Eo '<ApiKey>(.*)</ApiKey>' /config/config.xml | sed -e 's/<[^>]*>//g')"
|
||||
EOH
|
||||
|
||||
destination = "local/healthcheck.sh"
|
||||
perms = "0755"
|
||||
}
|
||||
|
||||
template {
|
||||
data = "---\nkey: {{ key \"ovpn-client\" }}"
|
||||
destination = "local/file.yml"
|
||||
change_mode = "restart"
|
||||
}
|
||||
|
||||
env {
|
||||
// SHARE = "Public;/mount/Public;yes;no;yes;all;none;;Public"
|
||||
# GLOBAL = "socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=65536 SO_SNDBUF=65536"
|
||||
# PERMISSIONS = "true"
|
||||
# WORKGROUP = "WORKGROUP"
|
||||
TZ = "EST5EDT"
|
||||
|
||||
PUID = 1000
|
||||
PGID = 1000
|
||||
}
|
||||
|
||||
# We must specify the resources required for
|
||||
# this task to ensure it runs on a machine with
|
||||
# enough capacity.
|
||||
resources {
|
||||
cpu = 500 # 500 MHz
|
||||
memory = 256 # 128MB
|
||||
}
|
||||
|
||||
# Specify configuration related to log rotation
|
||||
logs {
|
||||
max_files = 10
|
||||
max_file_size = 15
|
||||
}
|
||||
|
||||
# Controls the timeout between signalling a task it will be killed
|
||||
# and killing the task. If not set a default is used.
|
||||
kill_timeout = "10s"
|
||||
} #End main task
|
||||
|
||||
network {
|
||||
// mbits = 100
|
||||
|
||||
port "http" {
|
||||
static = 8989
|
||||
to = 8989
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -8,11 +8,6 @@ job "sonarr" {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
operator = "regexp"
|
||||
value = "amd64"
|
||||
}
|
||||
|
||||
update {
|
||||
stagger = "10s"
|
||||
@@ -34,23 +29,18 @@ job "sonarr" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "linuxserver/sonarr:develop"
|
||||
image = "lscr.io/linuxserver/sonarr:develop"
|
||||
|
||||
ports = ["http"]
|
||||
|
||||
// dns_servers = ["192.168.1.1", "1.1.1.1"]
|
||||
dns_servers = ["${attr.unique.network.ip-address}","192.168.4.250","8.8.8.8"]
|
||||
memory_hard_limit = "2048"
|
||||
// cpuset_cpus = "4-7"
|
||||
|
||||
volumes = [
|
||||
"/mnt/Public/Downloads/news:/downloads",
|
||||
"/mnt/Public/Downloads/tv:/tv",
|
||||
// "/mnt/configs/sonarr:/config",
|
||||
"/mnt/Public/configs/sonarr_pg:/config",
|
||||
// "local/config.xml:/config/config.xml"
|
||||
]
|
||||
|
||||
// "/mnt/gv0/sonarr:/config",
|
||||
force_pull = false
|
||||
}
|
||||
|
||||
@@ -137,7 +127,7 @@ job "sonarr" {
|
||||
|
||||
# Specify configuration related to log rotation
|
||||
logs {
|
||||
max_files = 12
|
||||
max_files = 10
|
||||
max_file_size = 15
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,11 @@
|
||||
job "traefik" {
|
||||
node_pool = "default"
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
operator = "!="
|
||||
value = "arm"
|
||||
}
|
||||
|
||||
datacenters = ["dc1"]
|
||||
type = "system"
|
||||
|
||||
|
||||
228
nomad_backup/traefik_backup.hcl
Normal file
228
nomad_backup/traefik_backup.hcl
Normal file
@@ -0,0 +1,228 @@
|
||||
job "traefik_backup" {
|
||||
node_pool = "backup"
|
||||
|
||||
datacenters = ["dc1"]
|
||||
type = "system"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
update {
|
||||
stagger = "10s"
|
||||
max_parallel = 1
|
||||
healthy_deadline = "5m"
|
||||
}
|
||||
|
||||
group "traefik" {
|
||||
count = 1
|
||||
|
||||
restart {
|
||||
attempts = 6
|
||||
interval = "1m"
|
||||
delay = "10s"
|
||||
mode = "delay"
|
||||
}
|
||||
|
||||
task "traefik" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "traefik:2.9"
|
||||
// network_mode = "host"
|
||||
|
||||
args = [
|
||||
// "--api.dashboard",
|
||||
// "--providers.consulcatalog.defaultRule=Host(`{{ .Name }}.service.dc1.consul`)",
|
||||
// "--providers.consulcatalog.endpoint.address=${attr.unique.network.ip-address}:8500",
|
||||
// "--providers.consulcatalog.exposedbydefault=true",
|
||||
// "--metrics=true",
|
||||
// "--metrics.prometheus=true",
|
||||
// "--metrics.prometheus.entryPoint=web",
|
||||
// "--entryPoints.web.address=:80",
|
||||
// "--entryPoints.websecure.address=:443",
|
||||
// "--entryPoints.openvpn.address=:1194/udp",
|
||||
"--configFile=/local/file.yml",
|
||||
// "--certificatesresolvers.myresolver.acme.email=stuart.stent@gmail.com",
|
||||
// "--certificatesresolvers.myresolver.acme.storage=/acmecert/acme.json",
|
||||
// "--certificatesresolvers.myresolver.acme.tlschallenge=true",
|
||||
// "--certificatesresolvers.myresolver-int.acme.email=stuart.stent@gmail.com",
|
||||
// "--certificatesresolvers.myresolver-int.acme.storage=/acmecert/acme.json",
|
||||
// "--certificatesresolvers.myresolver-int.acme.tlschallenge=true",
|
||||
// "--certificatesresolvers.myresolver-int.acme.dnschallenge=true",
|
||||
// "--certificatesresolvers.myresolver-int.acme.dnschallenge.provider=duckdns",
|
||||
"--accesslog=true",
|
||||
// "--serversTransport.insecureSkipVerify=true",
|
||||
]
|
||||
volumes = [
|
||||
"/var/run/docker.sock:/var/run/docker.sock",
|
||||
"/mnt/mnt/configs/letsencrypt:/acmecert/",
|
||||
]
|
||||
|
||||
// dns_servers = ["192.168.4.1", "192.168.4.250"]
|
||||
ports = ["traefik", "traefikhttps","traefikui"]
|
||||
|
||||
memory_hard_limit = 20480
|
||||
}
|
||||
|
||||
env {
|
||||
TZ = "EST5EDT"
|
||||
PUID = 1000
|
||||
PGID = 1000
|
||||
DUCKDNS_TOKEN="e4b5ca33-1f4d-494b-b06d-6dd4600df662"
|
||||
}
|
||||
|
||||
template {
|
||||
left_delimiter = "[["
|
||||
right_delimiter = "]]"
|
||||
|
||||
data = <<EOH
|
||||
http:
|
||||
serversTransports:
|
||||
insecureSkipVerify: true
|
||||
|
||||
entryPoints:
|
||||
web:
|
||||
address: :80
|
||||
websecure:
|
||||
address: :443
|
||||
|
||||
log:
|
||||
level: INFO
|
||||
|
||||
accessLog:
|
||||
fields:
|
||||
names:
|
||||
RequestPath: keep
|
||||
filters:
|
||||
retryAttempts: true
|
||||
minDuration: "10ms"
|
||||
|
||||
metrics:
|
||||
prometheus:
|
||||
addRoutersLabels: true
|
||||
addServicesLabels: true
|
||||
|
||||
api:
|
||||
dashboard: true
|
||||
insecure: true
|
||||
|
||||
providers:
|
||||
consulCatalog:
|
||||
exposedByDefault: true
|
||||
refreshInterval: 30s
|
||||
defaultRule: "Host(`{{ .Name }}.service.dc1.consul`)"
|
||||
endpoint:
|
||||
address: "[[env "attr.unique.network.ip-address"]]:8500"
|
||||
file:
|
||||
filename: /local/tls.yml
|
||||
|
||||
|
||||
EOH
|
||||
|
||||
destination = "local/file.yml"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
tls:
|
||||
certificates:
|
||||
- certFile: /local/duckdns_fullchain.pem
|
||||
keyFile: /local/duckdns_privkey.pem
|
||||
- certFile: /local/dedyn_fullchain.pem
|
||||
keyFile: /local/dedyn_privkey.pem
|
||||
stores:
|
||||
default:
|
||||
defaultCertificate:
|
||||
certFile: /local/duckdns_fullchain.pem
|
||||
keyFile: /local/duckdns_privkey.pem
|
||||
|
||||
EOH
|
||||
|
||||
destination = "local/tls.yml"
|
||||
}
|
||||
|
||||
|
||||
// file:
|
||||
// directory: /local/tls.yaml
|
||||
|
||||
|
||||
template {
|
||||
change_mode = "restart"
|
||||
data = "{{ key \"letsconsul/*.fbleagh.duckdns.org/fullchain.cer\" }}"
|
||||
destination = "local/duckdns_fullchain.pem"
|
||||
perms = 0777
|
||||
}
|
||||
template {
|
||||
change_mode = "noop"
|
||||
data = "{{ key \"letsconsul/*.fbleagh.duckdns.org/*.fbleagh.duckdns.org.key\" }}"
|
||||
destination = "local/duckdns_privkey.pem"
|
||||
perms = 0777
|
||||
}
|
||||
template {
|
||||
change_mode = "restart"
|
||||
data = "{{ key \"letsconsul/*.fbleagh.dedyn.io/fullchain.cer\" }}"
|
||||
destination = "local/dedyn_fullchain.pem"
|
||||
perms = 0777
|
||||
}
|
||||
template {
|
||||
change_mode = "noop"
|
||||
data = "{{ key \"letsconsul/*.fbleagh.dedyn.io/*.fbleagh.dedyn.io.key\" }}"
|
||||
destination = "local/dedyn_privkey.pem"
|
||||
perms = 0777
|
||||
}
|
||||
|
||||
service {
|
||||
name = "${TASKGROUP}"
|
||||
|
||||
tags = [
|
||||
"global",
|
||||
"traefik",
|
||||
"enable_gocast",
|
||||
"gocast_vip=192.168.1.249/32",
|
||||
"gocast_nat=tcp:443:443",
|
||||
"gocast_nat=udp:443:443"]
|
||||
|
||||
|
||||
|
||||
port = "traefik"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "${TASKGROUP}-ui"
|
||||
|
||||
tags = [
|
||||
"global",
|
||||
"traefik",
|
||||
"traefik.http.routers.dashboard.rule=Host(`traefik-ui.service.dc1.consul`)",
|
||||
"traefik.http.routers.dashboard.service=api@internal",
|
||||
]
|
||||
|
||||
port = "traefik"
|
||||
} #end service
|
||||
|
||||
resources {
|
||||
cpu = 256 # 500 MHz
|
||||
memory = 256 # 128MB
|
||||
} #end resources
|
||||
} #end task
|
||||
|
||||
network {
|
||||
port "traefik" {
|
||||
static = 80
|
||||
to = 80
|
||||
}
|
||||
|
||||
port "traefikui" {
|
||||
static = 8090
|
||||
to = 8080
|
||||
}
|
||||
|
||||
port "traefikhttps" {
|
||||
static = 443
|
||||
to = 443
|
||||
}
|
||||
}
|
||||
} # end group
|
||||
} # end job
|
||||
@@ -17,8 +17,7 @@ job "vaultwarden" {
|
||||
task "vaultwarden" {
|
||||
driver = "docker"
|
||||
config {
|
||||
# image = "vaultwarden/server:latest"
|
||||
image = "vaultwarden/server:1.32.7"
|
||||
image = "vaultwarden/server:latest"
|
||||
memory_hard_limit = 2048
|
||||
|
||||
ports = ["http"]
|
||||
@@ -33,7 +32,6 @@ job "vaultwarden" {
|
||||
// vaultwarden_PODCAST_PATH = "/podcasts"
|
||||
ADMIN_TOKEN = "VReYRX0RuSw3mxmGFG4+2ECY71l/wYmuD52NOWDur6e43z/inbUmJGUr5KU4wtjW"
|
||||
ENABLE_DB_WAL = "false"
|
||||
DATABASE_URL= "postgresql://postgres:postgres@master.postgres.service.dc1.consul:5432/vaultwarden"
|
||||
}
|
||||
resources {
|
||||
cpu = 100 # 100 MHz
|
||||
@@ -51,7 +49,8 @@ job "vaultwarden" {
|
||||
"traefik.http.routers.vaultwardenwan-admin.rule=(Host(`vault.fbleagh.duckdns.org`) && PathPrefix(`/admin/`))",
|
||||
"traefik.http.routers.vaultwardenwan.tls=true",
|
||||
// "traefik.http.routers.vaultwardenwan.tls.certresolver=myresolver-int",
|
||||
"traefik.http.middlewares.vaultwardenwan-admin-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.4.0/22",
|
||||
"traefik.http.middlewares.vaultwardenwan-admin-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.0/24",
|
||||
"traefik.http.routers.nginx-admin.middlewares=vaultwardenwan-admin-ipwhitelist",
|
||||
"enable_gocast",
|
||||
"gocast_vip=192.168.1.246/32",
|
||||
"gocast_nat=tcp:8081:8081",
|
||||
@@ -162,20 +161,53 @@ job "vaultwarden" {
|
||||
|
||||
image = "ghcr.io/sstent/rsync"
|
||||
volumes = [
|
||||
"/mnt/configs/${NOMAD_GROUP_NAME}/data:/config",
|
||||
"/mnt/configs/:/configs",
|
||||
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
]
|
||||
args = ["client"]
|
||||
}
|
||||
env {
|
||||
CRON_TASK_1 = "50 * * * * rsync -av --exclude='*.db' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /config/ /configbackup/;"
|
||||
CRON_TASK_1 = "50 * * * * rsync -av --exclude='*.db' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;"
|
||||
}
|
||||
resources {
|
||||
cpu = 20 # 500 MHz
|
||||
memory = 20 # 128MB
|
||||
}
|
||||
}
|
||||
task "Backup" {
|
||||
driver = "docker"
|
||||
|
||||
lifecycle {
|
||||
hook = "poststart"
|
||||
sidecar = true
|
||||
}
|
||||
|
||||
config {
|
||||
image = "bruceforce/vaultwarden-backup"
|
||||
memory_hard_limit = 2048
|
||||
|
||||
volumes = [
|
||||
"/mnt/configs/${NOMAD_GROUP_NAME}/data:/data",
|
||||
// "/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
]
|
||||
|
||||
args = ["client"]
|
||||
}
|
||||
|
||||
env {
|
||||
TIMESTAMP = true
|
||||
DELETE_AFTER = 60
|
||||
CRON_TIME = "0 5 * * *"
|
||||
BACKUP_DIR = "/data/backups"
|
||||
BACKUP_DIR_PERMISSIONS = "-1"
|
||||
UID = 1000
|
||||
GID = 1000
|
||||
}
|
||||
resources {
|
||||
cpu = 20 # 500 MHz
|
||||
memory = 20 # 128MB
|
||||
}
|
||||
} #end sync task
|
||||
|
||||
network {
|
||||
// mbits = 100
|
||||
|
||||
@@ -43,6 +43,9 @@ job "wallabag" {
|
||||
"traefik.http.routers.wallabaglan.rule=Host(`wallabag.service.dc1.consul`)",
|
||||
"traefik.http.routers.wallabagwan.rule=Host(`wallabag.fbleagh.duckdns.org`)",
|
||||
"traefik.http.routers.wallabagwan.tls=true",
|
||||
// "traefik.http.routers.vaultwardenwan.tls.certresolver=myresolver-int",
|
||||
"traefik.http.middlewares.vaultwardenwan-admin-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.0/24",
|
||||
"traefik.http.routers.nginx-admin.middlewares=vaultwardenwan-admin-ipwhitelist",
|
||||
]
|
||||
port = "http"
|
||||
|
||||
|
||||
@@ -2,27 +2,31 @@ job "wireguard" {
|
||||
region = "global"
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
operator = "regexp"
|
||||
value = "amd64"
|
||||
}
|
||||
|
||||
// constraint {
|
||||
// attribute = "${attr.cpu.arch}"
|
||||
// operator = "regexp"
|
||||
// value = "arm"
|
||||
// }
|
||||
group "wireguard" {
|
||||
count = 1
|
||||
|
||||
|
||||
task "wireguard" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "ghcr.io/wg-easy/wg-easy"
|
||||
ports = ["vpn","vpn_ui"]
|
||||
image = "lscr.io/linuxserver/wireguard"
|
||||
ports = ["vpn"]
|
||||
volumes = [
|
||||
"/mnt/Public/config/wireguard:/etc/wireguard",
|
||||
"/mnt/Public/config/wireguard:/config",
|
||||
"/lib/modules:/lib/modules"
|
||||
]
|
||||
cap_add = ["NET_ADMIN","SYS_MODULE"]
|
||||
// network_mode = "host"
|
||||
|
||||
// network_mode = "container:gocast-${NOMAD_ALLOC_ID}"
|
||||
sysctl = {
|
||||
"net.ipv4.conf.all.src_valid_mark"="1"
|
||||
"net.ipv4.ip_forward"="1"
|
||||
}
|
||||
|
||||
}
|
||||
@@ -30,18 +34,20 @@ constraint {
|
||||
TZ = "EST5EDT"
|
||||
PUID = 1000
|
||||
PGID = 1000
|
||||
WG_HOST="wireguard.fbleagh.duckdns.org"
|
||||
WG_PORT=51820
|
||||
PORT=51821
|
||||
WEBUI_HOST="0.0.0.0"
|
||||
WG_ALLOWED_IPS="0.0.0.0/0"
|
||||
UI_TRAFFIC_STATS="true"
|
||||
UI_CHART_TYPE=1
|
||||
WG_DEFAULT_DNS="192.168.4.36, 8.8.8.8"
|
||||
SERVERURL="wireguard.fbleagh.duckdns.org"
|
||||
SERVERPORT=51820
|
||||
PEERS="StuPhone,SurfaceGo,Surface,SurfaceGo3"
|
||||
PEERDNS="192.168.1.250,192.168.1.1,1.1.1.1"
|
||||
// INTERNAL_SUBNET= "192.168.1.0"
|
||||
ALLOWEDIPS="0.0.0.0/0"
|
||||
}
|
||||
service {
|
||||
name = "${TASKGROUP}"
|
||||
port = "vpn"
|
||||
tags = ["enable_gocast",
|
||||
"gocast_vip=192.168.1.241/32",
|
||||
"gocast_nat=tcp:51820:51820",
|
||||
"gocast_nat=udp:51820:51820"]
|
||||
}
|
||||
|
||||
resources {
|
||||
@@ -55,11 +61,6 @@ constraint {
|
||||
static = 51820
|
||||
to = 51820
|
||||
}
|
||||
port "vpn_ui" {
|
||||
static = 51821
|
||||
to = 51821
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user