commit 11ebd95956a661b5c936bc99a29bccf24baf6288 Author: sstent Date: Sat Feb 7 16:34:17 2026 -0800 first diff --git a/cleanup.nomad b/cleanup.nomad new file mode 100644 index 0000000..99e3fda --- /dev/null +++ b/cleanup.nomad @@ -0,0 +1,25 @@ +job "cleanup-litefs-all" { + datacenters = ["dc1"] + type = "batch" + + group "cleanup" { + count = 2 + constraint { + attribute = "${attr.unique.hostname}" + operator = "regexp" + value = "odroid7|odroid8" + } + + task "clean" { + driver = "docker" + config { + image = "busybox" + volumes = [ + "/mnt/configs/navidrome_litefs:/mnt/data" + ] + command = "sh" + args = ["-c", "rm -rf /mnt/data/* && echo \"Cleaned $(hostname)\""] + } + } + } +} \ No newline at end of file diff --git a/juicefs-controller.nomad b/juicefs-controller.nomad new file mode 100644 index 0000000..00b4f71 --- /dev/null +++ b/juicefs-controller.nomad @@ -0,0 +1,38 @@ +job "jfs-controller" { + datacenters = ["dc1"] + type = "system" + + group "controller" { + task "plugin" { + driver = "docker" + + config { + image = "juicedata/juicefs-csi-driver:v0.31.1" + + args = [ + "--endpoint=unix://csi/csi.sock", + "--logtostderr", + "--nodeid=test", + "--v=5", + "--by-process=true" + ] + + privileged = true + } + + csi_plugin { + id = "juicefs0" + type = "controller" + mount_dir = "/csi" + } + resources { + cpu = 100 + memory = 512 + } + env { + POD_NAME = "csi-controller" + POD_NAMESPACE = "default" + } + } + } +} \ No newline at end of file diff --git a/juicefs-node.nomad b/juicefs-node.nomad new file mode 100644 index 0000000..83c84b8 --- /dev/null +++ b/juicefs-node.nomad @@ -0,0 +1,63 @@ +job "jfs-node" { + datacenters = ["dc1"] + type = "system" + + group "nodes" { + network { + port "metrics" { + static = 9567 + to = 8080 + } + } + + service { + name = "juicefs-metrics" + port = "metrics" + tags = ["prometheus"] + check { + type = "http" + path = "/metrics" + interval = "10s" + timeout = "2s" + } + } + + task "juicefs-plugin" { + driver = "docker" + + config { + image = "juicedata/juicefs-csi-driver:v0.31.1" + memory_hard_limit = 2048 + ports = ["metrics"] + args = [ + "--endpoint=unix://csi/csi.sock", + "--logtostderr", + "--v=5", + "--nodeid=${node.unique.name}", + "--by-process=true", + ] + + privileged = true + } + + csi_plugin { + id = "juicefs0" + type = "node" + mount_dir = "/csi" + health_timeout = "3m" + } + resources { + cpu = 100 + memory = 100 + } + env { + POD_NAME = "csi-node" + POD_NAMESPACE = "default" + # Aggregates metrics from children onto the 8080 port + JFS_METRICS = "0.0.0.0:8080" + # Ensures mounts run as background processes managed by the driver + JFS_MOUNT_MODE = "process" + } + } + } +} \ No newline at end of file diff --git a/navidrome-juice.nomad b/navidrome-juice.nomad new file mode 100644 index 0000000..3baa4a7 --- /dev/null +++ b/navidrome-juice.nomad @@ -0,0 +1,92 @@ +job "navidrome" { + datacenters = ["dc1"] + type = "service" + + constraint { + attribute = "${attr.unique.hostname}" + operator = "regexp" + value = "odroid.*" + } + + group "navidrome" { + count = 1 + + volume "navidrome-csi-vol" { + type = "csi" + source = "navidrome-volume" # This must match the 'id' in your volume registration + attachment_mode = "file-system" + access_mode = "multi-node-multi-writer" + } + + + + # Main Navidrome task + task "navidrome" { + driver = "docker" + + volume_mount { + volume = "navidrome-csi-vol" # Matches the name in the volume block above + destination = "/data" # Where it appears inside the container + read_only = false + } + + + config { + image = "ghcr.io/navidrome/navidrome:latest" + memory_hard_limit = "2048" + ports = ["http"] + volumes = [ + "/mnt/Public/Downloads/Clean_Music:/music/CleanMusic:ro", + "/mnt/Public/Downloads/news/slskd/downloads:/music/slskd:ro", + "/mnt/Public/Downloads/incoming_music:/music/incomingmusic:ro" + ] + } + env { + ND_DATAFOLDER = "/data" + ND_CACHEFOLDER = "/data/cache" + ND_CONFIGFILE= "/data/navidrome.toml" + ND_DBPATH = "/data/navidrome.db?_busy_timeout=30000&_journal_mode=DELETE&_foreign_keys=on&synchronous=NORMAL&cache=shared&nolock=1" + ND_SCANSCHEDULE = "32 8-20 * * *" + ND_LOGLEVEL = "trace" + ND_REVERSEPROXYWHITELIST = "0.0.0.0/0" + ND_REVERSEPROXYUSERHEADER = "X-Forwarded-User" + ND_SCANNER_GROUPALBUMRELEASES = "False" + ND_BACKUP_PATH = "/data/backups" + ND_BACKUP_SCHEDULE = "0 0 * * *" + ND_BACKUP_COUNT = "7" + } + resources { + cpu = 100 + memory = 128 + } + service { + name = "navidrome" + tags = [ + "navidrome", + "web", + "urlprefix-/navidrome", + "tools", + "traefik.http.routers.navidromelan.rule=Host(`navidrome.service.dc1.consul`)", + "traefik.http.routers.navidromewan.rule=Host(`m.fbleagh.duckdns.org`)", + "traefik.http.routers.navidromewan.middlewares=dex@consulcatalog", + "traefik.http.routers.navidromewan.tls=true", + ] + port = "http" + check { + type = "tcp" + interval = "10s" + timeout = "2s" + } + } + } + + + network { + port "http" { + static = 4533 + to = 4533 + } + } + } +} + diff --git a/navidrome-litefs.nomad b/navidrome-litefs.nomad new file mode 100644 index 0000000..0ccbed0 --- /dev/null +++ b/navidrome-litefs.nomad @@ -0,0 +1,184 @@ +job "navidrome-litefs" { + datacenters = ["dc1"] + type = "service" + + # We pin to Linux because LiteFS requires FUSE + constraint { + attribute = "${attr.kernel.name}" + value = "linux" + } + + group "navidrome" { + count = 2 + + constraint { + distinct_hosts = true + } + + network { + mode = "host" + port "http" {} + } + + # --- Setup Task --- + task "setup" { + driver = "docker" + lifecycle { + hook = "prestart" + sidecar = false + } + config { + image = "busybox" + command = "mkdir" + args = ["-p", "/alloc/sqlite"] + network_mode = "host" + } + } + + # --- LiteFS Task --- + task "litefs" { + driver = "docker" + + config { + image = "flyio/litefs:0.5" + privileged = true # Needed for FUSE + ports = ["http"] + network_mode = "host" + + # 1. Bind mount for LiteFS internal data (chunks/WAL) + # 2. Bind mount for the config + # 3. Mount the shared alloc dir so we can mount FUSE on it + volumes = [ + "/mnt/configs/navidrome_litefs:/var/lib/litefs", + "local/litefs.yml:/etc/litefs.yml" + ] + + mounts = [ + { + type = "bind" + source = "../alloc/sqlite" + target = "/mnt/sqlite" + bind_options = { + propagation = "shared" + } + } + ] + } + + # Create the config file + template { + left_delimiter = "[[" + right_delimiter = "]]" + data = <