chore: backup infrastructure configurations [skip ci]
This commit is contained in:
@@ -6,15 +6,29 @@ job "gonic" {
|
||||
|
||||
group "gonic" {
|
||||
count = 1
|
||||
|
||||
volume "gonic-csi-vol" {
|
||||
type = "csi"
|
||||
source = "gonic-volume"
|
||||
attachment_mode = "file-system"
|
||||
access_mode = "multi-node-multi-writer"
|
||||
}
|
||||
|
||||
task "gonic" {
|
||||
driver = "docker"
|
||||
|
||||
volume_mount {
|
||||
volume = "gonic-csi-vol" # Matches the name in the volume block above
|
||||
destination = "/data" # Where it appears inside the container
|
||||
read_only = false
|
||||
}
|
||||
|
||||
config {
|
||||
image = "sentriz/gonic"
|
||||
port_map {
|
||||
http = 80
|
||||
}
|
||||
volumes = [
|
||||
"/mnt/configs/gonic/data:/data",
|
||||
"/mnt/configs/gonic/cache:/cache",
|
||||
"/mnt/Public/Downloads/Clean_Music:/music/CleanMusic:ro",
|
||||
"/mnt/Public/Downloads/incoming_music:/music/incomingmusic:ro",
|
||||
@@ -48,7 +62,6 @@ job "gonic" {
|
||||
"traefik.http.routers.goniclan.rule=Host(`gonic.service.dc1.consul`)",
|
||||
"traefik.http.routers.gonicwan.rule=Host(`mg.fbleagh.duckdns.org`)",
|
||||
"traefik.http.routers.gonicwan.tls=true",
|
||||
"traefik.http.routers.gonicwan.tls.certresolver=myresolver"
|
||||
|
||||
]
|
||||
// "traefik.http.middlewares.gonic_auth.basicauth.users=admin:$2y$05$cJGONoS0BFTeBUXqmETikeI14QhLDKIDkYuPdA1umIOC129grVMAm",
|
||||
@@ -63,89 +76,5 @@ job "gonic" {
|
||||
}
|
||||
}
|
||||
} #end gonic
|
||||
|
||||
task "init-manual" {
|
||||
driver = "docker"
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
config {
|
||||
memory_hard_limit = "2048"
|
||||
|
||||
image = "ghcr.io/sstent/rsync"
|
||||
volumes = [
|
||||
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
|
||||
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
"/mnt/Public/config/locks:/locks"
|
||||
]
|
||||
|
||||
args = ["flock", "-x","/locks/${NOMAD_GROUP_NAME}_rsync.lock","rsync","-av","--exclude=Backups","/configbackup/","/config/","--delete-before"]
|
||||
}
|
||||
resources {
|
||||
cpu = 20 # 500 MHz
|
||||
memory = 20 # 128MB
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
task "finalsync-manual" {
|
||||
driver = "docker"
|
||||
lifecycle {
|
||||
hook = "poststop"
|
||||
}
|
||||
config {
|
||||
memory_hard_limit = "2048"
|
||||
|
||||
image = "ghcr.io/sstent/rsync"
|
||||
volumes = [
|
||||
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
|
||||
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
"/mnt/Public/config/locks:/locks"
|
||||
]
|
||||
|
||||
args = ["flock", "-x","/locks/${NOMAD_GROUP_NAME}_rsync.lock","rsync","-av","/config/","/configbackup/"]
|
||||
}
|
||||
resources {
|
||||
cpu = 20 # 500 MHz
|
||||
memory = 20 # 128MB
|
||||
}
|
||||
}
|
||||
|
||||
task "db-sync" {
|
||||
driver = "docker"
|
||||
lifecycle {
|
||||
hook = "poststart"
|
||||
sidecar = true
|
||||
}
|
||||
config {
|
||||
memory_hard_limit = "2048"
|
||||
|
||||
image = "ghcr.io/sstent/rsync"
|
||||
volumes = [
|
||||
"/mnt/configs/${NOMAD_GROUP_NAME}:/config",
|
||||
"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup",
|
||||
]
|
||||
|
||||
// args = ["/usr/local/bin/litestream", "replicate", "-config","/local/litestream.yml"]
|
||||
}
|
||||
resources {
|
||||
cpu = 20 # 500 MHz
|
||||
memory = 20 # 128MB
|
||||
}
|
||||
template {
|
||||
data = <<EOH
|
||||
dbs:
|
||||
- path: /data/gonic.db
|
||||
replicas:
|
||||
- path: /configbackup
|
||||
EOH
|
||||
|
||||
destination = "local/litestream.yml"
|
||||
}
|
||||
|
||||
} #####
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
37
nomad_backup/jfs-controller.hcl
Normal file
37
nomad_backup/jfs-controller.hcl
Normal file
@@ -0,0 +1,37 @@
|
||||
job "jfs-controller" {
|
||||
datacenters = ["dc1"]
|
||||
type = "system"
|
||||
|
||||
group "controller" {
|
||||
task "plugin" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "juicedata/juicefs-csi-driver:v0.31.1"
|
||||
|
||||
args = [
|
||||
"--endpoint=unix://csi/csi.sock",
|
||||
"--logtostderr",
|
||||
"--nodeid=test",
|
||||
"--v=5",
|
||||
"--by-process=true"
|
||||
]
|
||||
|
||||
privileged = true
|
||||
}
|
||||
|
||||
csi_plugin {
|
||||
id = "juicefs0"
|
||||
type = "controller"
|
||||
mount_dir = "/csi"
|
||||
}
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 512
|
||||
}
|
||||
env {
|
||||
POD_NAME = "csi-controller"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
38
nomad_backup/jfs-node.hcl
Normal file
38
nomad_backup/jfs-node.hcl
Normal file
@@ -0,0 +1,38 @@
|
||||
job "jfs-node" {
|
||||
datacenters = ["dc1"]
|
||||
type = "system"
|
||||
|
||||
group "nodes" {
|
||||
task "juicefs-plugin" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "juicedata/juicefs-csi-driver:v0.31.1"
|
||||
memory_hard_limit = 2048
|
||||
args = [
|
||||
"--endpoint=unix://csi/csi.sock",
|
||||
"--logtostderr",
|
||||
"--v=5",
|
||||
"--nodeid=test",
|
||||
"--by-process=true",
|
||||
]
|
||||
|
||||
privileged = true
|
||||
}
|
||||
|
||||
csi_plugin {
|
||||
id = "juicefs0"
|
||||
type = "node"
|
||||
mount_dir = "/csi"
|
||||
}
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 100
|
||||
}
|
||||
env {
|
||||
POD_NAME = "csi-node"
|
||||
AWS_REGION = "garage"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user