1599 lines
172 KiB
Plaintext
1599 lines
172 KiB
Plaintext
{
|
|
"version": 4,
|
|
"terraform_version": "1.0.5",
|
|
"serial": 23,
|
|
"lineage": "1fae1a6a-296c-42b0-091e-3bef007d21ff",
|
|
"outputs": {},
|
|
"resources": [
|
|
{
|
|
"mode": "managed",
|
|
"type": "nomad_job",
|
|
"name": "Job",
|
|
"provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]",
|
|
"instances": [
|
|
{
|
|
"index_key": "nomad_jobs/enabled/consulbackup.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "consulbackup",
|
|
"jobspec": "job \"consulbackup\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"batch\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n periodic {\n // Launch every 20 seconds\n cron = \"0 8 * * *\"\n time_zone = \"America/New_York\"\n\n // Do not allow overlapping runs.\n prohibit_overlap = true\n }\n group \"consulbackup\" {\n count = 1\n\n restart {\n attempts = 8\n interval = \"20m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"consulbackup\" {\n driver = \"raw_exec\"\n\n config {\n command = \"/bin/bash\"\n args = [\"-c\", \"/usr/local/bin/consul kv export \u003e /mnt/Public/config/consul_kv_backup.json\"]\n }\n\n resources {\n cpu = 64 # 500 MHz\n memory = 64 # 128MB\n }\n \n } #task consulbackup\n } #group\n} #job\n",
|
|
"json": null,
|
|
"modify_index": "5127602",
|
|
"name": "consulbackup",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "consulbackup",
|
|
"task": [
|
|
{
|
|
"driver": "raw_exec",
|
|
"meta": {},
|
|
"name": "consulbackup",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "batch"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/dex.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"20a9eb63-8f7a-7018-825e-1e529d3b0746"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "auth",
|
|
"jobspec": "job \"auth\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n group \"auth\" {\n count = 1\n\n task \"fwdauth\" {\n driver = \"docker\"\n\n config {\n // image = \"npawelek/traefik-forward-auth\"\n image = \"thomseddon/traefik-forward-auth:2-arm\"\n\n port_map {\n auth = 4181\n }\n\n volumes = [\n \"/etc/localtime:/etc/localtime:ro\",\n ]\n }\n\n env {\n PROVIDERS_GOOGLE_CLIENT_ID = \"807888907647-uog95jmiolsuh6ql1t8jm53l1jvuajck.apps.googleusercontent.com\"\n PROVIDERS_GOOGLE_CLIENT_SECRET = \"B8bDri5mFvGv-Ghzbt8fLj4W\"\n SECRET = \"ladskfdjmqwermnnbasfnmldas\"\n CONFIG = \"/local/config.ini\"\n LIFETIME = \"31536000\"\n WHITELIST = \"stuart.stent@gmail.com,stephen.bunt@gmail.com\"\n\n // AUTH_HOST = \"fwdauth.fbleagh.duckdns.org\"\n COOKIE_DOMAIN = \"fbleagh.duckdns.org\"\n }\n\n template {\n data = \"{{ key \\\"Dex\\\" }}\"\n destination = \"local/config.ini\"\n change_mode = \"restart\"\n }\n\n resources {\n cpu = 100 # 100 MHz\n memory = 64 # 128 MB\n\n network {\n port \"auth\" {\n static = 4181\n }\n }\n }\n\n service {\n name = \"dex\"\n\n tags = [\n \"fwdauth\",\n \"web\",\n \"traefik.http.routers.dex.rule=Host(`fwdauth.fbleagh.duckdns.org`)\",\n \"traefik.http.routers.dex.entrypoints=websecure\",\n \"traefik.http.routers.dex.tls=true\",\n \"traefik.http.routers.dex.tls.certresolver=myresolver\",\n \"traefik.http.middlewares.dex.forwardauth.address=http://dex.service.dc1.consul:4181\",\n \"traefik.http.middlewares.dex.forwardauth.trustForwardHeader=true\",\n \"traefik.http.middlewares.dex.forwardauth.authResponseHeaders=X-Forwarded-User\",\n \"traefik.http.routers.auth.middlewares=dex\",\n \"traefik.http.routers.traefik-forward-auth.middlewares=dex\",\n ]\n\n port = \"auth\"\n\n check {\n type = \"tcp\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n } #end Dex\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5077852",
|
|
"name": "auth",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "auth",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "fwdauth",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/fitbit.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "fitbit",
|
|
"jobspec": "job \"fitbit\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"batch\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n periodic {\n // Launch every 20 seconds\n cron = \"0 8 * * *\"\n time_zone = \"America/New_York\"\n\n // Do not allow overlapping runs.\n prohibit_overlap = true\n }\n group \"fitbit\" {\n count = 1\n\n restart {\n attempts = 8\n interval = \"20m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"fitbit\" {\n driver = \"docker\"\n\n config {\n image = \"registry.service.dc1.consul:5000/fitbit:latest\"\n memory_hard_limit = 1024\n }\n\n resources {\n cpu = 64 # 500 MHz\n memory = 64 # 128MB\n }\n } #task fitbit\n } #group\n} #job\n",
|
|
"json": null,
|
|
"modify_index": "5127613",
|
|
"name": "fitbit",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "fitbit",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "fitbit",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "batch"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/freshrss.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"b35ee750-39ba-21b6-61dc-0e4525f8b59b"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "freshrss",
|
|
"jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"freshrss\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid1\"\n weight = 80\n }\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"freshrss\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"fail\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n args = [\"flock\", \"-x\",\"/locks/${NOMAD_GROUP_NAME}_rsync.lock\",\"rsync\",\"-av\",\"--exclude=Backups\",\"/configbackup/\",\"/config/\",\"--delete-before\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n args = [\"flock\", \"-x\",\"/locks/${NOMAD_GROUP_NAME}_rsync.lock\",\"rsync\",\"-av\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"25 * * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;\"\n\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end sync\n\n\n task \"freshrss\" {\n driver = \"docker\"\n\n config {\n image = \"linuxserver/freshrss\"\n\n ports = [\"freshrss\"]\n\n volumes = [\n \"/mnt/configs/freshrss:/config\",\n ]\n }\n\n service {\n name = \"${TASKGROUP}\"\n\n tags = [\n \"freshrss\",\n \"tools\",\n \"traefik.http.routers.freshlan.rule=Host(`freshrss.service.dc1.consul`)\",\n \"traefik.http.routers.freshwan.rule=Host(`fbleagh-rss.ignorelist.com`)\",\n \"traefik.http.routers.freshwan.rule=Host(`rss.fbleagh.duckdns.org`)\",\n \"traefik.http.routers.freshwan.middlewares=dex@consulcatalog\",\n \"traefik.http.routers.freshwan.tls=true\",\n \"traefik.http.routers.freshwan.tls.options=default\",\n \"traefik.http.routers.freshwan.tls.certresolver=myresolver\",\n ]\n\n // \"traefik.http.middlewares.fresh_auth.basicauth.users=fbleagh:$2y$05$ug6n0zTAXE1A7yP4EOZJn.eO5dMhAGVvOH.FJgimbWH5/QQPPGez6\",\n // \"traefik.http.routers.freshwan.middlewares=fresh_auth\",\n // \"traefik.http.routers.freshwan.tls=true\",\n // \"traefik.http.routers.freshwan.tls.certresolver=myresolver\",\n\n port = \"freshrss\"\n }\n\n env {\n // SHARE = \"Public;/mount/Public;yes;no;yes;all;none;;Public\"\n # GLOBAL = \"socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=65536 SO_SNDBUF=65536\"\n # PERMISSIONS = \"true\"\n # WORKGROUP = \"WORKGROUP\"\n TZ = \"EST5EDT\"\n\n PUID = 1000\n PGID = 1000\n }\n\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 128 # 500 MHz\n memory = 128 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n }\n\n network {\n port \"freshrss\" {\n to = 80\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5127619",
|
|
"name": "freshrss",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "freshrss",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "init",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "finalsync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "freshrss",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/garminexport.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "garminexport",
|
|
"jobspec": "job \"garminexport\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"batch\"\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n\n\n periodic {\n // Launch every 20 seconds\n cron = \"1 7 * * *\"\n time_zone = \"America/New_York\"\n // Do not allow overlapping runs.\n prohibit_overlap = true\n }\n\n\n group \"garminexport\" {\n count = 1\n restart {\n attempts = 8\n interval = \"20m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n \n\n task \"garminexport\" {\n driver = \"docker\"\n config {\n image = \"registry.service.dc1.consul:5000/garminexport:latest\"\n args = [\"--backup-dir=/activities\",\"--password=Farscape5\",\"fbleagh\",\"--log-level\",\"INFO\",\"--ignore-errors\",\"--max-retries=1\",\n \"-f\",\"tcx\",\n \"-f\",\"fit\",\n \"-f\",\"gpx\",\n \"-f\",\"json_summary\",\n \"-f\",\"json_details\"\n ]\n volumes = [\n \"/mnt/Public/Garmin:/activities\",\n ]\n memory_hard_limit = 2048\n }\n\n resources {\n cpu = 64 # 500 MHz\n memory = 64 # 128MB\n }\n } #task garminexport\n\n\n } #group\n } #job\n",
|
|
"json": null,
|
|
"modify_index": "5127603",
|
|
"name": "garminexport",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "garminexport",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "garminexport",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "batch"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/gitea.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"5392fdc4-aa90-e752-9d84-5b6ed4ea783b"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "gitea",
|
|
"jobspec": "job \"gitea\" {\n region = \"global\"\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n\n group \"gitea\" {\n count = 1\n\n restart {\n attempts = 3\n delay = \"20s\"\n mode = \"delay\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n }\n env {\n DB_NAME=\"gitea.db\"\n PUID=1000\n PGID=0\n DBCHMOD=\"0777\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n template {\n data = \u003c\u003cEOH\ndbs:\n - path: /config/gitea.db\n replicas:\n - path: /configbackup\n EOH\n\n destination = \"local/litestream.yml\"\n }\n }\n\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n task \"db-sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n // memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n \n // args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n template {\n data = \u003c\u003cEOH\ndbs:\n - path: /config/gitea.db\n replicas:\n - path: /configbackup\n EOH\n\n destination = \"local/litestream.yml\"\n }\n\n } #####\n\n\n task \"gitea\" {\n driver = \"docker\"\n\n config {\n image = \"registry.service.dc1.consul:5000/gitea\"\n memory_hard_limit = 2048\n volumes = [\n \"/mnt/configs/gitea:/data\",\n \"/etc/timezone:/etc/timezone:ro\",\n \"/etc/localtime:/etc/localtime:ro\"\n ]\n\n ports = [\"http\",\"ssh\"]\n }\n\nenv {\n USER_UID=1000\n USER_GID=1000\n // GITEA_CUSTOM=\"/data/custom/\"\n}\n\n\n service {\n name = \"gitea\"\n\n tags = [\n \"metrics\",\n ]\n\n port = \"http\"\n\n // check {\n // type = \"http\"\n // path = \"/metrics/\"\n // interval = \"10s\"\n // timeout = \"2s\"\n // }\n }\n\n resources {\n cpu = 50\n memory = 10\n }\n } #task\n\n network {\n port \"http\" {\n to = 3000\n }\n port \"ssh\" {\n static = 2222\n to = 2222\n }\n }\n } #group\n}\n",
|
|
"json": null,
|
|
"modify_index": "5127618",
|
|
"name": "gitea",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "gitea",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "init",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "finalsync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "db-sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "gitea",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/gocast.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"3e08b211-d5c3-1299-2668-ffef65f24db8",
|
|
"c305ef2b-d28b-0139-dfa1-929dcfcf3b87",
|
|
"c6e3f41d-89e9-033b-2277-6ad6f32c314e",
|
|
"0515ae06-926c-8280-4b1a-ae6a39f402f0"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "gocast",
|
|
"jobspec": "job \"gocast\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"system\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n constraint {\n operator = \"distinct_hosts\"\n value = \"true\"\n }\n update {\n # Stagger updates every 60 seconds\n stagger = \"10s\"\n max_parallel = 3\n }\n group \"gocast\" {\n count = 1\n\n restart {\n attempts = 99\n interval = \"1h\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"gocast\" {\n driver = \"docker\"\n\n config {\n // image = \"homeassistant/armhf-homeassistant:latest\"\n image = \"ghcr.io/sstent/gocast\"\n ports = [\"http\"]\n network_mode = \"host\"\n cap_add = [\"NET_ADMIN\"]\n args = [\"-config=/local/config.yaml\", \"-logtostderr\"]\n }\n env {\n CONSUL_NODE = \"${node.unique.name}\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"global\", \"homeassistant\"]\n port = \"http\"\n\n }\n\n template {\n data = \u003c\u003cEOH\nagent:\n # http server listen addr\n listen_addr: :9080\n # Interval for health check\n monitor_interval: 10s\n # Time to flush out inactive apps\n cleanup_timer: 15m\n # Consul api addr for dynamic discovery\n consul_addr: http://127.0.0.1:8500/v1\n # interval to query consul for app discovery\n consul_query_interval: 5m\n\nbgp:\n local_as: 64512 \n remote_as: 64512 \n # override the peer IP to use instead of auto discovering\n communities:\n - asn:nnnn\n - asn:nnnn\n origin: igp\n\n# optional list of apps to register on startup\napps:\n - name: app1\n vip: 192.168.1.240/32\n vip_config:\n # additional per VIP BGP communities\n bgp_communities: [ aaaa:bbbb ]\n monitor: port:tcp:6000\n EOH\n\n destination = \"local/config.yaml\"\n }\n\n template {\n data = \u003c\u003cEOH\n{\n \"data\": {\n \"users\": [\n {\n \"password\": \"JDJiJDEyJGRjLjQ5WkNkbzg3Q0xmcHkzM2EyTC5RWWJrRDlRTFBRdUhjanNoeS81SUlPN0taYzFUcEVh\",\n \"username\": \"fbleagh\"\n }\n ]\n },\n \"key\": \"auth_provider.homeassistant\",\n \"version\": 1\n}\n EOH\n\n destination = \"local/auth_provider.homeassistant\"\n }\n\n resources {\n cpu = 256 # 500 MHz\n memory = 64 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n kill_timeout = \"10s\"\n }\n\n network {\n port \"http\" {\n static = 9080\n to = 9080\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5127614",
|
|
"name": "gocast",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "gocast",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "gocast",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "system"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/gotify.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"cb2fc110-c177-ed6e-806e-f0c30cc186da",
|
|
"5c69d598-6e7d-9385-5607-b1666b021a3b"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "gotify",
|
|
"jobspec": "job \"gotify\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n group \"gotify\" {\n count = 1\n restart {\n attempts = 5\n interval = \"4m\"\n delay = \"30s\"\n mode = \"fail\"\n }\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n }\n env {\n DB_NAME=\"gotify.db\"\n PUID=1000\n PGID=0\n DBCHMOD=\"0777\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n\n\n\n\n\n\n\n\n task \"gotify\" {\n driver = \"docker\"\n\n config {\n image = \"gotify/server-arm7:2.0\"\n ports = [\"http\", \"https\"]\n\n volumes = [\"/mnt/configs/gotify:/app/data\", ]\n }\n env {\n// GOTIFY_SERVER_PORT=80\n// GOTIFY_SERVER_KEEPALIVEPERIODSECONDS=0\n// GOTIFY_SERVER_LISTENADDR=\"0.0.0.0\"\n// GOTIFY_SERVER_SSL_ENABLED=false\n// GOTIFY_SERVER_SSL_REDIRECTTOHTTPS=true\n// #GOTIFY_SERVER_SSL_LISTENADDR=\n// GOTIFY_SERVER_SSL_PORT=443\n// #GOTIFY_SERVER_SSL_CERTFILE=\n// #GOTIFY_SERVER_SSL_CERTKEY=\n// GOTIFY_SERVER_SSL_LETSENCRYPT_ENABLED=false\n// GOTIFY_SERVER_SSL_LETSENCRYPT_ACCEPTTOS=false\n// GOTIFY_SERVER_SSL_LETSENCRYPT_CACHE=\"certs\"\n// # lists are a little weird but do-able (:\n// # GOTIFY_SERVER_SSL_LETSENCRYPT_HOSTS=- mydomain.tld\\n- myotherdomain.tld\n// GOTIFY_SERVER_RESPONSEHEADERS=\"X-Custom-Header: \\\"custom value\\\"\"\n// # GOTIFY_SERVER_CORS_ALLOWORIGINS=\"- \\\".+.example.com\\\"\\n- \\\"otherdomain.com\\\"\"\n// # GOTIFY_SERVER_CORS_ALLOWMETHODS=\"- \\\"GET\\\"\\n- \\\"POST\\\"\"\n// # GOTIFY_SERVER_CORS_ALLOWHEADERS=\"- \\\"Authorization\\\"\\n- \\\"content-type\\\"\"\n// # GOTIFY_SERVER_STREAM_ALLOWEDORIGINS=\"- \\\".+.example.com\\\"\\n- \\\"otherdomain.com\\\"\"\n// GOTIFY_SERVER_STREAM_PINGPERIODSECONDS=45\nGOTIFY_DATABASE_DIALECT=\"sqlite3\"\nGOTIFY_DATABASE_CONNECTION=\"data/gotify.db\"\n// GOTIFY_DEFAULTUSER_NAME=\"admin\"\n// GOTIFY_DEFAULTUSER_PASS=\"admin\"\n// GOTIFY_PASSSTRENGTH=10\n// GOTIFY_UPLOADEDIMAGESDIR=\"data/images\"\n// GOTIFY_PLUGINSDIR=\"data/plugins\"\n }\n\n resources {\n cpu = 100 # 100 MHz\n memory = 64 # 128 MB\n }\n service {\n name = \"gotify\"\n tags = [\"gotify\", \"web\", \"urlprefix-/gotify\", \"backend\",\n \"traefik.http.routers.gotifylan.rule=Host(`gotify.service.dc1.consul`)\",\n \"traefik.http.routers.gotifywan.rule=Host(`gotify.fbleagh.duckdns.org`)\",\n \"traefik.http.routers.gotifywan.tls=true\",\n \"traefik.http.routers.gotifywan.tls.certresolver=myresolver\",\n ]\n\n\n port = \"http\"\n\n check {\n type = \"tcp\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n }\n\n network {\n port \"http\" {\n to = 80\n }\n\n port \"https\" {\n to = 443\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5136663",
|
|
"name": "gotify",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "gotify",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "init",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "finalsync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "gotify",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/grafana.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"0ffa7f59-0532-4b89-055d-a7af237108b0"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "grafana",
|
|
"jobspec": "job \"grafana\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"service\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n update {\n # Stagger updates every 60 seconds\n stagger = \"60s\"\n max_parallel = 1\n }\n group \"grafana\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"grafana\" {\n driver = \"docker\"\n\n config {\n // image = \"fg2it/grafana-armhf:v5.1.4\"\n image = \"grafana/grafana:latest\"\n ports = [\"http\"]\n\n logging {\n type = \"json-file\"\n }\n\n memory_hard_limit = 2048\n }\n\n env {\n disable_login_form = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n GF_PATHS_PROVISIONING = \"/local/\"\n GF_AUTH_ANONYMOUS_ENABLED = true\n GF_AUTH_ANONYMOUS_ORG_NAME = \"Main Org.\"\n GF_AUTH_ANONYMOUS_ORG_ROLE = \"Admin\"\n }\n\n template {\n data = \u003c\u003cEOH\napiVersion: 1\n\ndatasources:\n - name: Prometheus\n type: prometheus\n url: http://prometheus.service.dc1.consul:9090\n isDefault:\n EOH\n\n destination = \"local/datasources/prometheus.yaml\"\n }\n\n template {\n data = \u003c\u003cEOH\napiVersion: 1\n\nproviders:\n- name: dashboards\n type: file\n updateIntervalSeconds: 30\n options:\n path: /local/dashboard_definitons\n foldersFromFilesStructure: true\n EOH\n\n destination = \"local/dashboards/dashboards.yaml\"\n }\n\n template {\n data = \"{{ key \\\"grafana_dashboards/nomad\\\" }}\"\n destination = \"local/dashboard_definitons/nomad.json\"\n }\n template {\n data = \"{{ key \\\"grafana_dashboards/thermals\\\" }}\"\n destination = \"local/dashboard_definitons/thermals.json\"\n }\n template {\n data = \"{{ key \\\"grafana_dashboards/NomadMem\\\" }}\"\n destination = \"local/dashboard_definitons/NomadMem.json\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"global\", \"backend\"]\n port = \"http\"\n\n check {\n name = \"alive\"\n type = \"http\"\n interval = \"60s\"\n timeout = \"120s\"\n path = \"/login\"\n port = \"http\"\n\n check_restart {\n limit = 3\n grace = \"120s\"\n ignore_warnings = false\n }\n }\n }\n\n resources {\n cpu = 128 # 500 MHz\n memory = 64 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n kill_timeout = \"10s\"\n }\n\n network {\n port \"http\" {\n static = 3100\n to = 3000\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5134431",
|
|
"name": "grafana",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "grafana",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "grafana",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/hass.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"67e6bb90-faa6-2e28-0965-46fb1ab5ba29",
|
|
"49081f7d-7656-34fe-a995-cff15355bd33",
|
|
"ecdf88e2-eb3d-a9ec-5900-40f790d7c68a",
|
|
"4a35e955-d271-80d8-6aaa-f8d266acc9f7"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "hass",
|
|
"jobspec": "job \"hass\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"system\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n constraint {\n operator = \"distinct_hosts\"\n value = \"true\"\n }\n update {\n # Stagger updates every 60 seconds\n stagger = \"10s\"\n max_parallel = 3\n }\n group \"hass\" {\n count = 1\n\n restart {\n attempts = 99\n interval = \"1h\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"hass\" {\n driver = \"docker\"\n\n config {\n // image = \"homeassistant/armhf-homeassistant:latest\"\n image = \"homeassistant/armhf-homeassistant:0.71.0\"\n ports = [\"http\"]\n\n volumes = [\n \"/etc/localtime:/etc/localtime\",\n \"local/configuration.yaml:/config/configuration.yaml\",\n ]\n\n // \"local/auth_provider.homeassistant:/config/.storage/auth_provider.homeassistant\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"global\", \"homeassistant\", \"tools\",\"logo=home-assistant\"]\n port = \"http\"\n\n // check {\n // name = \"hass-alive\"\n // type = \"http\"\n // type = \"script\"\n // command = \"curl -sS http://localhost:8123\"\n // interval = \"120s\"\n // timeout = \"15s\"\n // path = \"/api/\"\n // port = \"http\"\n\n // check_restart {\n // limit = 10\n // grace = \"90s\"\n // ignore_warnings = false\n // }\n // }\n }\n\n template {\n data = \u003c\u003cEOH\n homeassistant:\n name: Our_House\n latitude: 40.7654\n longitude: -73.8175\n elevation: 26\n unit_system: metric\n time_zone: America/New_York\n frontend:\n config:\n http:\n sun:\n automation:\n - alias: LightsAtSunset\n trigger:\n platform: sun\n event: sunset\n action:\n service: switch.turn_on\n entity_id: switch.lampdrawers\n wemo:\n static:\n - 192.168.99.200 # StuBed\n - 192.168.99.201 # LampDrawers\n - 192.168.99.202 # BigLamp\n - 192.168.99.203 # TallTree\n - 192.168.99.204 # ShortTree\n - 192.168.99.205 # TallTree\n\n EOH\n\n destination = \"local/configuration.yaml\"\n }\n\n template {\n data = \u003c\u003cEOH\n{\n \"data\": {\n \"users\": [\n {\n \"password\": \"JDJiJDEyJGRjLjQ5WkNkbzg3Q0xmcHkzM2EyTC5RWWJrRDlRTFBRdUhjanNoeS81SUlPN0taYzFUcEVh\",\n \"username\": \"fbleagh\"\n }\n ]\n },\n \"key\": \"auth_provider.homeassistant\",\n \"version\": 1\n}\n EOH\n\n destination = \"local/auth_provider.homeassistant\"\n }\n\n resources {\n cpu = 256 # 500 MHz\n memory = 64 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n kill_timeout = \"10s\"\n }\n\n network {\n port \"http\" {\n static = 8123\n to = 8123\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5127617",
|
|
"name": "hass",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "hass",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "hass",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "system"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/lidarr.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"a29f8509-df3c-529f-fb7f-5f129defb800"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "lidarr",
|
|
"jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"lidarr\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid4\"\n weight = 100\n }\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"lidarr\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"fail\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n }\n env {\n DB_NAME = \"lidarr.db\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/lidarr.db\n// replicas:\n// - path: /configbackup\n// snapshot-interval: 2h\n// validation-interval: 6h\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n }\n\n // task \"db-init\" {\n // driver = \"docker\"\n // lifecycle {\n // hook = \"prestart\"\n // sidecar = false\n // }\n // config {\n // memory_hard_limit = \"2048\"\n\n // image = \"ghcr.io/sstent/rsync\"\n // volumes = [\n // \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n // \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n // ]\n\n\n // args = [\"/usr/local/bin/litestream\", \"restore\",\"-config\",\"/local/litestream.yml\",\"/config/lidarr.db\"]\n // }\n // resources {\n // cpu = 20 # 500 MHz\n // memory = 20 # 128MB\n // }\n\n // template {\n // data = \u003c\u003cEOH\n // dbs:\n // - path: /config/lidarr.db\n // replicas:\n // - path: /configbackup\n // EOH\n\n // destination = \"local/litestream.yml\"\n // }\n\n\n // }\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1 = \"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n// task \"db-sync\" {\n// driver = \"docker\"\n// lifecycle {\n// hook = \"poststart\"\n// sidecar = true\n// }\n// config {\n// // memory_hard_limit = \"2048\"\n\n// image = \"ghcr.io/sstent/rsync\"\n// volumes = [\n// \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n// \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n// ]\n\n// // args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n// }\n// resources {\n// cpu = 20 # 500 MHz\n// memory = 128 # 128MB\n// }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/lidarr.db\n// replicas:\n// - path: /configbackup\n// snapshot-interval: 2h\n// validation-interval: 6h\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n\n// } #####\n\n\n\n task \"lidarr\" {\n driver = \"docker\"\n\n config {\n image = \"ghcr.io/linuxserver/lidarr:develop\"\n dns_servers = [\"192.168.1.1\", \"1.1.1.1\"]\n\n ports = [\"http\"]\n\n memory_hard_limit = \"2048\"\n cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/Public/Downloads/news:/downloads\",\n \"/mnt/Public/Downloads/music:/music\",\n \"/mnt/Archive/seeding:/archive\",\n \"/mnt/configs/lidarr:/config\",\n ]\n }\n\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"lidarr\", \"tools\"]\n port = \"http\"\n\n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/local/healthcheck.sh\"\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n } \n }\n }\n template {\n data = \u003c\u003cEOH\n#!/bin/bash\n/usr/bin/curl -f \"http://localhost:${NOMAD_PORT_http}/api/v1/system/status?apikey=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\"\n EOH\n\n destination = \"local/healthcheck.sh\"\n perms = \"0755\"\n }\n\n env {\n // SHARE = \"Public;/mount/Public;yes;no;yes;all;none;;Public\"\n # GLOBAL = \"socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=65536 SO_SNDBUF=65536\"\n # PERMISSIONS = \"true\"\n # WORKGROUP = \"WORKGROUP\"\n TZ = \"EST5EDT\"\n\n PUID = 1000\n PGID = 1000\n }\n template {\n data = \"---\\nkey: {{ key \\\"ovpn-client\\\" }}\"\n destination = \"local/file.yml\"\n change_mode = \"restart\"\n }\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 256 # 500 MHz\n memory = 200 # 128MB\n\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n }\n\n network {\n port \"http\" {\n static = 8686\n to = 8686\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5127609",
|
|
"name": "lidarr",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "lidarr",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "init",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "finalsync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "lidarr",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/lufi.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"36b8cbd7-2278-2a63-2183-1cc4349b7e3d"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "lufi",
|
|
"jobspec": "job \"lufi\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n group \"lufi\" {\n count = 1\n task \"lufi\" {\n driver = \"docker\"\n config {\n image = \"ghcr.io/sstent/lufi:latest\"\n memory_hard_limit = \"2048\"\n ports = [\"http\"]\n volumes = [\n \"/mnt/configs/lufi:/config\",\n \"/mnt/Archive/files:/files\",\n ]\n }\n\n resources {\n cpu = 100 # 100 MHz\n memory = 256 # 128 MB\n\n }\n service {\n name = \"lufi\"\n tags = [ \n \"lufi\",\n \"web\",\n \"urlprefix-/lufi\",\n \"backend\",\n \"traefik.http.routers.lufilan.rule=Host(`lufi.service.dc1.consul`)\",\n \"traefik.http.routers.lufiwan.rule=Host(`fd.fbleagh.duckdns.org`)\",\n \"traefik.http.routers.lufiwan.middlewares=dex@consulcatalog\",\n \"traefik.http.routers.lufiwan.tls=true\",\n \"traefik.http.routers.lufiwan.tls.certresolver=myresolver\"\n ]\n // \"traefik.http.middlewares.lufi_auth.basicauth.users=admin:$2y$05$cJGONoS0BFTeBUXqmETikeI14QhLDKIDkYuPdA1umIOC129grVMAm\",\n // \"traefik.http.middlewares.lufi_auth.basicauth.users=ShieldWrinklySquirePulseEcosphereCoroner:$2y$05$ogdqaYki8pEqVan4S7YvHOTGdB7W3j5Qv3sSKnij1Xy8yuRJ5gbpi\",\n // \"traefik.http.routers.lufiwan.middlewares=lufi_auth\",\n port = \"http\"\n meta {\n ALLOC = \"${NOMAD_ALLOC_ID}\"\n }\n check {\n type = \"tcp\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n }\n network {\n port \"http\" {\n to = 8081\n }\n }\n\n\n }\n}",
|
|
"json": null,
|
|
"modify_index": "5125816",
|
|
"name": "lufi",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "lufi",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "lufi",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/navidrome.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"294f016d-95f3-1c3c-0569-c60c75198a10"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "navidrome",
|
|
"jobspec": "job \"navidrome\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n group \"navidrome\" {\n count = 1\n\n task \"navidrome\" {\n driver = \"docker\"\n\n config {\n // image = \"registry.service.dc1.consul:5000/navidrome:latest\"\n image = \"deluan/navidrome:latest\"\n memory_hard_limit = \"2048\"\n\n ports = [\"http\"]\n\n volumes = [\n \"/mnt/configs/navidrome:/data\",\n \"/mnt/Public/Downloads/music:/music:ro\",\n ]\n }\n\n env {\n ND_SCANINTERVAL = \"1m\"\n ND_LOGLEVEL = \"debug\"\n ND_REVERSEPROXYWHITELIST = \"0.0.0.0/0\"\n ND_REVERSEPROXYUSERHEADER = \"X-Forwarded-User\"\n\n //ND_BASEURL = \"\"\n }\n\n resources {\n cpu = 100 # 100 MHz\n memory = 128 # 128 MB\n }\n\n service {\n name = \"navidrome\"\n\n tags = [\n \"navidrome\",\n \"web\",\n \"urlprefix-/navidrome\",\n \"tools\",\n \"traefik.http.routers.navidromelan.rule=Host(`navidrome.service.dc1.consul`)\",\n \"traefik.http.routers.navidromewan.rule=Host(`fbleagh-m.ignorelist.com`)\",\n \"traefik.http.routers.navidromewan.rule=Host(`m.fbleagh.duckdns.org`)\",\n \"traefik.http.routers.navidromewan.middlewares=dex@consulcatalog\",\n \"traefik.http.routers.navidromewan.tls=true\",\n \"traefik.http.routers.navidromewan.tls.certresolver=myresolver\",\n ]\n\n // \"traefik.http.routers.navidromewan.middlewares=navidrome_auth\",\n\n // \"traefik.http.middlewares.navidrome_auth.basicauth.users=ShieldWrinklySquirePulseEcosphereCoroner:$2y$05$ogdqaYki8pEqVan4S7YvHOTGdB7W3j5Qv3sSKnij1Xy8yuRJ5gbpi\",\n // \"traefik.http.middlewares.navidrome_auth.basicauth.users=admin:$2y$05$cJGONoS0BFTeBUXqmETikeI14QhLDKIDkYuPdA1umIOC129grVMAm\",\n port = \"http\"\n meta {\n ALLOC = \"${NOMAD_ALLOC_ID}\"\n }\n check {\n type = \"tcp\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n }\n\n task \"init-manual\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n args = [\"flock\", \"-x\",\"/locks/${NOMAD_GROUP_NAME}_rsync.lock\",\"rsync\",\"-av\",\"--exclude=Backups\",\"/configbackup/\",\"/config/\",\"--delete-before\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n\n task \"finalsync-manual\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n args = [\"flock\", \"-x\",\"/locks/${NOMAD_GROUP_NAME}_rsync.lock\",\"rsync\",\"-av\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n task \"db-sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n \n // args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n template {\n data = \u003c\u003cEOH\ndbs:\n - path: /config/navidrome.db\n replicas:\n - path: /configbackup\n EOH\n\n destination = \"local/litestream.yml\"\n }\n\n } #####\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"32 * * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;\"\n\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end sync\n\n network {\n port \"http\" {\n static = 4533\n to = 4533\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5127637",
|
|
"name": "navidrome",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "navidrome",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "navidrome",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "init-manual",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "finalsync-manual",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "db-sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sync",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/nginx.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"abd3342c-8ced-8097-73e0-61783bb899b2"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "nginx",
|
|
"jobspec": "job \"nginx\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n group \"nginx\" {\n count = 1\n\n task \"nginx\" {\n driver = \"docker\"\n\n config {\n image = \"nginx\"\n ports = [\"http\", \"https\"]\n\n volumes = [\n \"custom/default.conf:/etc/nginx/conf.d/default.conf\",\n ]\n }\n\n template {\n data = \u003c\u003cEOH\n server {\n listen 8080;\n server_name nginx.service.dc1.consul;\n location / {\n root /local/data;\n }\n }\n EOH\n\n destination = \"custom/default.conf\"\n }\n artifact {\n source = \"git::https://github.com/WalkxCode/dashboard-icons.git\"\n destination = \"local/data/repo\"\n}\n # consul kv put features/demo 'Consul Rocks!'\n // \u003ca href=\"http://{{.Name}}.service.dc1.consul:{{ .Port }}\" target=\"_blank\"\u003e{{.Name}}service.dc1.consul:{{ .Port }}\u003c/a\u003e\n // \u003ca href=\"http://{{.Name}}.service.dc1.consul\" target=\"_blank\"\u003e{{.Name}}service.dc1.consul\u003c/a\u003e\n\n template {\n data = \u003c\u003cEOH\n \u003cp\u003eLocal Services\u003c/p\u003e\n \u003ctable style=\"width:100%\"\u003e\n \u003ctr\u003e\n \u003cth\u003eService Name\u003c/th\u003e\n \u003cth\u003eTraefik\u003c/th\u003e\n \u003cth\u003eDirect\u003c/th\u003e\n \u003c/tr\u003e\n {{range services}}\n {{range $i, $s :=service .Name}}\n {{ if eq $i 0 }}\n\n\u003ctr\u003e\n \u003ctd\u003e{{.Name}}\u003c/td\u003e\n \u003ctd\u003e\u003ca href=\"http://{{.Name}}.service.dc1.consul\" target=\"_blank\"\u003e{{.Name}}.service.dc1.consul\u003c/a\u003e\u003c/td\u003e\n \u003ctd\u003e\u003ca href=\"http://{{.Name}}.service.dc1.consul:{{.Port}}\" target=\"_blank\"\u003e{{.Name}}.service.dc1.consul:{{.Port}}\u003c/a\u003e\u003c/td\u003e\n \u003c/tr\u003e\n {{end}}\n {{end}}\n {{end}}\n \u003c/table\u003e\n \u003cp\u003e\n Node Environment Information: \u003cbr /\u003e\n node_id: {{ env \"node.unique.id\" }} \u003cbr/\u003e\n datacenter: {{ env \"NOMAD_DC\" }}\n \u003c/p\u003e\n EOH\n\n destination = \"local/data/index.html\"\n change_mode = \"noop\"\n }\n template {\n data = \u003c\u003cEOH\n\u003c!DOCTYPE html\u003e\n\u003chtml\u003e\n\u003ctitle\u003eDashboard\u003c/title\u003e\n\u003cmeta name=\"viewport\" content=\"width=device-width, initial-scale=1\"\u003e\n#\u003cmeta http-equiv=\"refresh\" content=\"120\"\u003e\n\u003clink rel=\"stylesheet\" href=\"https://www.w3schools.com/w3css/4/w3.css\"\u003e\n\u003clink rel=\"stylesheet\" href=\"https://www.w3schools.com/lib/w3-theme-blue-grey.css\"\u003e\n\u003clink rel=\"stylesheet\" href=\"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css\"\u003e\n\u003cbody class=\"w3-theme-l4\"\u003e\n\n\u003cdiv style=\"min-width:60px\"\u003e\n\u003cdiv class=\"w3-bar w3-large w3-theme-d4\"\u003e\n \u003ca href=\"#\" class=\"w3-bar-item w3-button\"\u003e\u003ci class=\"fa fa-bars\"\u003e\u003c/i\u003e\u003c/a\u003e\n \u003cspan class=\"w3-bar-item\"\u003eDashboard\u003c/span\u003e\n \u003ca href=\"#\" class=\"w3-bar-item w3-button w3-right\"\u003e\u003ci class=\"fa fa-search\"\u003e\u003c/i\u003e\u003c/a\u003e\n\u003c/div\u003e\n\u003cdiv class=\"w3-container w3-content w3-padding\"\u003e\n \u003cp class=\"w3-opacity\"\u003e\u003cb\u003eTools\u003c/b\u003e\u003c/p\u003e \n\n\n {{range services}}\n {{range $i, $s :=service .Name}}\n {{ if eq $i 0 }}\n {{if .Tags | contains \"tools\"}}\n\u003cdiv class=\"w3-col\"\u003e\n\u003cdiv class=\"w3-row\"\u003e\n \u003cdiv class=\"w3-white w3-center w3-cell-padding w3-card w3-mobile w3-margin\" style=\"width=80%\"\u003e\n \u003cp class=\"w3-text-blue\"\u003e\u003cb\u003e{{.Name}}\u003c/b\u003e\u003c/p\u003e\n {{$iconname := .Name}}\n {{range $tag, $services := service .Name | byTag }}{{if $tag | regexMatch \"logo=*\"}}{{$iconname = index ($tag | split \"=\") 1}}{{end}}{{end}}\n \u003ca href=\"http://{{.Name}}.service.dc1.consul\" target=\"_blank\"\u003e\u003cimg src=\"./repo/png/{{$iconname}}.png\" alt=\"HTML tutorial\" class=\"w3-margin\" style=\"width:84px;height:84px;\"\u003e\u003c/a\u003e\n \u003c/div\u003e\n\u003c/div\u003e\n\u003c/div\u003e\n {{end}}{{end}}{{end}}{{end}}\n\n\n\u003c/div\u003e\n \u003cp\u003e\n Node Environment Information: \u003cbr /\u003e\n node_id: {{ env \"node.unique.id\" }} \u003cbr/\u003e\n datacenter: {{ env \"NOMAD_DC\" }}\n \u003c/p\u003e\n\u003c/div\u003e\n\n\u003c/body\u003e\n\u003c/html\u003e\n EOH\n\n destination = \"local/data/newindex.html\"\n change_mode = \"noop\"\n }\n resources {\n cpu = 100 # 100 MHz\n memory = 64 # 128 MB\n }\n service {\n name = \"nginx\"\n tags = [\"nginx\", \"web\", \"urlprefix-/nginx\", \"backend\"]\n port = \"http\"\n\n check {\n type = \"tcp\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n }\n\n network {\n port \"http\" {\n to = 8080\n }\n\n port \"https\" {\n to = 443\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5127627",
|
|
"name": "nginx",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "nginx",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "nginx",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/nodeexporter.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"067db5bf-cb0a-226d-50c4-ba12083713b6",
|
|
"ac791771-d035-0af4-bef8-da3c31d07cfd",
|
|
"c82b219a-0ebc-6043-cb17-06a0b8db9b5b",
|
|
"fa620815-4478-b593-a980-7b028e776bde"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "node-exporter",
|
|
"jobspec": "job \"node-exporter\" {\n region = \"global\"\n datacenters = [\"dc1\"]\n type = \"system\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n\n group \"node-exporter\" {\n count = 1\n\n restart {\n attempts = 3\n delay = \"20s\"\n mode = \"delay\"\n }\n\n task \"node-exporter\" {\n driver = \"docker\"\n\n config {\n #image = \"anzevalher/node-exporter\"\n image = \"prom/node-exporter\"\n\n force_pull = true\n\n args = [\"--collector.cpu\",\"--collector.filesystem\",\"--collector.meminfo\",\"--collector.thermal_zone\",\"--collector.disable-defaults\",]\n volumes = [\n \"/proc:/host/proc\",\n \"/sys:/host/sys\",\n \"/:/rootfs\",\n ]\n\n port_map {\n http = 9100\n }\n }\n\n service {\n name = \"node-exporter\"\n\n tags = [\n \"metrics\",\n ]\n\n port = \"http\"\n\n check {\n type = \"http\"\n path = \"/metrics/\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n resources {\n cpu = 20\n memory = 20\n\n network {\n port \"http\" {\n static = \"9100\"\n }\n }\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5136279",
|
|
"name": "node-exporter",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "node-exporter",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "node-exporter",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "system"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/ovpn-openpyn.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "ovpn-client",
|
|
"jobspec": "job \"ovpn-client\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"service\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n update {\n # Stagger updates every 60 seconds\n stagger = \"90s\"\n max_parallel = 1\n healthy_deadline = \"5m\"\n }\n group \"ovpn-client\" {\n count = 1\n\n restart {\n attempts = 8\n interval = \"20m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n // task \"init-trigger\" {\n // driver = \"docker\"\n\n // lifecycle {\n // hook = \"prestart\"\n // }\n\n // config {\n // image = \"curlimages/curl\"\n // args = [\"--request\", \"PUT\", \"--data\", \"${NOMAD_ALLOC_ID}\", \"http://${attr.unique.network.ip-address}:8500/v1/kv/${NOMAD_GROUP_NAME}\"]\n // }\n // resources {\n // cpu = 20 # 500 MHz\n // memory = 20 # 128MB\n // }\n // }\n\n task \"nzbget\" {\n driver = \"docker\"\n\n config {\n image = \"linuxserver/nzbget\"\n network_mode = \"container:ovpn-client-${NOMAD_ALLOC_ID}\"\n\n // ports = [\"nzbget\"]\n volumes = [\n \"/mnt/Public/Downloads/news:/downloads\",\n \"/mnt/Public/incoming:/incomplete-downloads\",\n \"/mnt/configs/ovpn-client/nzbget:/config\",\n ]\n }\n\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n }\n\n resources {\n cpu = 500 # 500 MHz\n memory = 200 # 128MB\n }\n } #task nzbget\n\n // task \"saznzb\" {\n // driver = \"docker\"\n\n // config {\n // // image = \"linuxserver/sabnzbd:2.3.8-0ubuntu1jcfp118.04-ls13\"\n // image = \"linuxserver/sabnzbd\"\n // network_mode = \"container:ovpn-client-${NOMAD_ALLOC_ID}\"\n\n // volumes = [\n // \"/mnt/Public/Downloads/news:/downloads\",\n // \"/mnt/Public/incoming:/incomplete-downloads\",\n // \"/mnt/configs/ovpn-client/saznzb:/config\",\n // ]\n // }\n\n // env {\n // TZ = \"EST5EDT\"\n // PUID = 1000\n // PGID = 1000\n // }\n\n // resources {\n // cpu = 100 # 500 MHz\n // memory = 512 # 128MB\n // }\n // }\n\n task \"ovpn-client\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = true\n }\n\n config {\n // image = \"registry.service.dc1.consul:5000/openpyn:latest\"\n image = \"qmcgaw/gluetun\"\n // memory_hard_limit = \"200\"\n\n ports = [\n \"shadowsocks\",\n \"nzbget\",\n \"http_proxy\",\n \"http_admin\",\n \"socks\"\n ]\n\n cap_add = [\n \"NET_ADMIN\",\n \"NET_BIND_SERVICE\",\n ]\n\n #network_mode = \"host\"\n #network_mode = \"vpn\"\n\n volumes = [\n \"/etc/localtime:/etc/localtime\",\n ]\n devices = [\n {\n host_path = \"/dev/net/tun\"\n container_path = \"/dev/net/tun\"\n },\n ]\n }\n\n env {\n // VPNFLAGS = \"us --max-load 70 --top-servers 10 --pings 5\"\n // VPNFLAGS = \"nl --max-load 70 --top-servers 10 --pings 5\"\n VPNSP = \"nordvpn\"\n OPENVPN_USER = \"stuart.stent@gmail.com\"\n OPENVPN_PASSWORD = \"drRp4mQBVU6awAFOk9lO\"\n REGION = \"Netherlands\"\n HTTPPROXY = \"on\"\n SHADOWSOCKS_PASSWORD = \"farscape5\"\n SHADOWSOCKS = \"off\"\n }\n\n service {\n name = \"${TASKGROUP}-admin\"\n tags = [\"global\", \"ovpn-openpyn\"]\n port = \"http_admin\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"global\", \"ovpn-openpyn\"]\n port = \"shadowsocks\"\n }\n\n service {\n name = \"nzbget\"\n tags = [\"global\", \"ovpn-openpyn\"]\n port = \"nzbget\"\n }\n\n resources {\n cpu = 100 # 500 MHz\n memory = 100 # 128MB\n }\n } #task ovpn\n task \"dante\" {\n driver = \"docker\"\n\n config {\n image = \"ghcr.io/sstent/dante:latest\"\n network_mode = \"container:ovpn-client-${NOMAD_ALLOC_ID}\"\n memory_hard_limit = 256\n\n devices = [\n {\n host_path = \"/dev/net/tun\"\n container_path = \"/dev/net/tun\"\n },\n ]\n }\n service {\n name = \"socks-nord\"\n tags = [\"global\", \"ovpn-openpyn\"]\n port = \"socks\"\n \n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/usr/bin/curl\"\n args = [\"--proxy\", \"socks5://localhost:1080\",\"http://neverssl.com/\"]\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n }\n }\n }\n resources {\n cpu = 64 # 500 MHz\n memory = 128 # 128MB\n }\n }\n ######################################################################\n ######################################################################\n ######################################################################\n\n task \"init\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n\n config {\n image = \"ghcr.io/sstent/rsync:v0.3.5\"\n memory_hard_limit = \"2048\"\n\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\",\n ]\n\n args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-avz\", \"--exclude=Backups\", \"/configbackup/\", \"/config/\", \"--delete-before\"]\n }\n\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end init task\n task \"finalsync\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"poststop\"\n }\n\n config {\n // image = \"pebalk/rsync\"\n image = \"ghcr.io/sstent/rsync:v0.3.5\"\n memory_hard_limit = \"2048\"\n\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\",\n ]\n\n args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-avz\", \"/config/\", \"/configbackup/\"]\n }\n\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end finalsync task\n task \"sync\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n\n config {\n image = \"ghcr.io/sstent/rsync:v0.3.5\"\n memory_hard_limit = \"2048\"\n\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n\n args = [\"client\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n env {\n CRON_TASK_1 = \"*/20 * * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;\"\n }\n } #end sync task\n\n ######################################################################\n ######################################################################\n ######################################################################\n\n network {\n port \"shadowsocks\" {\n static = \"8338\"\n to = \"8388\"\n }\n\n port \"http_proxy\" {\n static = \"8888\"\n to = \"8888\"\n }\n port \"http_admin\" {\n static = \"8000\"\n to = \"8000\"\n }\n\n port \"socks\" {\n static = \"1080\"\n to = \"1080\"\n }\n\n port \"nzbget\" {\n static = \"6789\"\n to = \"6789\"\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5136713",
|
|
"name": "ovpn-client",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "ovpn-client",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "nzbget",
|
|
"volume_mounts": null
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "ovpn-client",
|
|
"volume_mounts": null
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "dante",
|
|
"volume_mounts": null
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "init",
|
|
"volume_mounts": null
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "finalsync",
|
|
"volume_mounts": null
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sync",
|
|
"volume_mounts": null
|
|
}
|
|
],
|
|
"volumes": null
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/prometheus.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"4d382d41-9038-f25a-37da-3a32515775fb",
|
|
"be48b4d9-8e41-b723-57b6-564bc522c2ec"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "prometheus",
|
|
"jobspec": "job \"prometheus\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"service\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n\n # constraint {\n # \tattribute = \"${attr.unique.hostname}\"\n # operator = \"regexp\"\n # value = \"pi.*\"\n # }\n\n update {\n # Stagger updates every 60 seconds\n stagger = \"60s\"\n max_parallel = 1\n }\n group \"prometheus\" {\n count = 2\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"prometheus\" {\n driver = \"docker\"\n\n config {\n image = \"prom/prometheus\"\nargs = [\"--web.enable-admin-api\",\"--config.file=/etc/prometheus/prometheus.yml\"]\n \n\n ports = [\"http\"]\n memory_hard_limit = \"512\"\n volumes = [\n \"local/prometheus.yml:/etc/prometheus/prometheus.yml\",\n \"local/alerts.yml:/etc/prometheus/alerts.yml\",\n ]\n\n // \"/mnt/gv0/prom_data:/data\",\n // \"/etc/localtime:/etc/localtime\",\n\n logging {\n type = \"json-file\"\n }\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"global\", \"prometheus\"]\n port = \"http\"\n\n check {\n name = \"alive\"\n type = \"http\"\n interval = \"10s\"\n timeout = \"120s\"\n path = \"/status\"\n port = \"http\"\n }\n }\n\n template {\n change_mode = \"signal\"\n change_signal = \"SIGHUP\"\n data = \"{{ key \\\"prometheus_yml\\\" }}\"\n destination = \"local/prometheus.yml\"\n }\n\n template {\n change_mode = \"restart\"\n \n destination = \"local/alerts.yml\"\n data = \"{{ key \\\"alerts\\\" }}\"\n }\n resources {\n cpu = 500 # 500 MHz\n memory = 48 # 128MB\n }\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n kill_timeout = \"10s\"\n } ## end prometheus\n\n task \"alertmanager\" {\n driver = \"docker\"\n\n config {\n image = \"prom/alertmanager\"\n\n ports = [\"alerthttp\"]\n\n // volumes = [\n // \"local/alertmanager.yml:/etc/prometheus/prometheus.yml\",\n // ]\n args = [\"--config.file=/local/alertmanager.yml\"]\n // \"/mnt/gv0/prom_data:/data\",\n // \"/etc/localtime:/etc/localtime\",\n\n logging {\n type = \"json-file\"\n }\n }\n\n service {\n name = \"${TASK}\"\n tags = [\"global\", \"prometheus\"]\n port = \"alerthttp\"\n\n check {\n name = \"alive\"\n type = \"http\"\n interval = \"60s\"\n timeout = \"120s\"\n path = \"/status\"\n port = \"http\"\n }\n }\n\n template {\n data = \u003c\u003cEOH\nglobal:\nreceivers:\n - name: default-receiver\n - name: gotify-webhook\n webhook_configs:\n - url: \"http://prometheus.service.dc1.consul:9094/gotify_webhook\"\nroute:\n group_wait: 10s\n group_interval: 5m\n receiver: gotify-webhook\n repeat_interval: 3h\n\nEOH\n\n destination = \"local/alertmanager.yml\"\n }\n resources {\n cpu = 128 # 500 MHz\n memory = 48 # 128MB\n }\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n kill_timeout = \"10s\"\n } ## end alertmanager\n\n task \"gotifybridge\" {\n driver = \"docker\"\n config {\n image = \"ghcr.io/druggeri/alertmanager_gotify_bridge\"\n ports = [\"gotifybridge\"]\n args = [\"--debug\"]\n }\n env {\n GOTIFY_TOKEN=\"Ajdp.V.rvrXGoJd\"\n GOTIFY_ENDPOINT=\"http://gotify.service.dc1.consul/message\"\n }\n}\n\n network {\n port \"http\" {\n static = 9090\n to = 9090\n }\n port \"alerthttp\" {\n static = 9093\n to = 9093\n }\n port \"gotifybridge\" {\n static = 9094\n to = 8080\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5135086",
|
|
"name": "prometheus",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 2,
|
|
"meta": {},
|
|
"name": "prometheus",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "prometheus",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "alertmanager",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "gotifybridge",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/prowlarr.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"376eaf67-5433-a5da-3e28-f4002777a80a"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "prowlarr",
|
|
"jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"prowlarr\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n // affinity {\n // attribute = \"${attr.unique.hostname}\"\n // value = \"odroid2\"\n // weight = 100\n // }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"prowlarr\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"fail\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n // args = [\"flock\", \"-x\",\"/locks/${NOMAD_GROUP_NAME}_rsync.lock\",\"rsync\",\"-av\",\"--exclude=Backups\",\"/configbackup/\",\"/config/\",\"--delete-before\"]\n\n }\n env {\n DB_NAME=\"prowlarr.db\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/prowlarr.db\n// replicas:\n// - path: /configbackup\n// snapshot-interval: 2h\n// validation-interval: 6h\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n }\n\n// task \"db-init\" {\n// driver = \"docker\"\n// lifecycle {\n// hook = \"prestart\"\n// sidecar = false\n// }\n// config {\n// memory_hard_limit = \"2048\"\n\n// image = \"ghcr.io/sstent/rsync\"\n// volumes = [\n// \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n// \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n// ]\n\n \n// args = [\"/usr/local/bin/litestream\", \"restore\",\"-config\",\"/local/litestream.yml\",\"/config/lidarr.db\"]\n// }\n// resources {\n// cpu = 20 # 500 MHz\n// memory = 20 # 128MB\n// }\n\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/lidarr.db\n// replicas:\n// - path: /configbackup\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n\n\n // }\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"50 * * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz --exclude='*.db-litestream' --exclude='generations'/configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n// task \"db-sync\" {\n// driver = \"docker\"\n// lifecycle {\n// hook = \"poststart\"\n// sidecar = true\n// }\n// config {\n// image = \"ghcr.io/sstent/rsync\"\n// volumes = [\n// \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n// \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n// ]\n \n// // args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n// }\n// resources {\n// cpu = 20 # 500 MHz\n// memory = 128 # 128MB\n// }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/prowlarr.db\n// replicas:\n// - path: /configbackup\n// snapshot-interval: 2h\n// validation-interval: 6h\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n\n // } #####\n\n task \"prowlarr\" {\n // driver = \"raw_exec\" // config { // command = \"docker\" // args = [\"run\", // \"--rm\", // \"--name=prowlarr\", // \"-e\", \"PUID=1000\", // \"-e\", \"PGID=1000\", // \"-e\", \"TZ=EST5EDT\", // \"-p\", \"8989:8989\", // \"-v\", \"/mnt/syncthing/prowlarrv3:/config\", // \"-v\", \"/mnt/Public/Downloads/tv:/tv\", // \"-v\", \"/mnt/Public/Downloads/news:/downloads\", // \"--cpuset-cpus\",\"4-7\", // \"linuxserver/prowlarr:preview\"] // }\n\n driver = \"docker\"\n\n config {\n image = \"ghcr.io/linuxserver/prowlarr:develop\"\n\n ports = [\"http\"]\n\n dns_servers = [\"192.168.1.1\", \"1.1.1.1\"]\n memory_hard_limit = \"2048\"\n cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/configs/prowlarr:/config\",\n ]\n\n // \"/mnt/gv0/prowlarr:/config\",\n force_pull = false\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"prowlarr\", \"tools\"]\n port = \"http\"\n\n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/local/healthcheck.sh\"\n\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n }\n \n // export API=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\n // curl -f \"http://localhost:9696/api/v1/system/status?apikey=$API\"\n }\n }\n template {\n data = \u003c\u003cEOH\n#!/bin/bash\n/usr/bin/curl -f \"http://localhost:${NOMAD_PORT_http}/api/v1/system/status?apikey=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\"\n EOH\n\n destination = \"local/healthcheck.sh\"\n perms = \"0755\"\n }\n\n env {\n // SHARE = \"Public;/mount/Public;yes;no;yes;all;none;;Public\"\n # GLOBAL = \"socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=65536 SO_SNDBUF=65536\"\n # PERMISSIONS = \"true\"\n # WORKGROUP = \"WORKGROUP\"\n TZ = \"EST5EDT\"\n\n PUID = 1000\n PGID = 1000\n }\n\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 500 # 500 MHz\n memory = 256 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n } #End main task\n\n network {\n // mbits = 100\n\n port \"http\" {\n static = 9696\n to = 9696\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5127612",
|
|
"name": "prowlarr",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "prowlarr",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "init",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "finalsync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "prowlarr",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/qbittorrent.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"54da0c06-5ad3-d72e-8091-4147f8bd58cc"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "qbittorrent",
|
|
"jobspec": "job \"qbittorrent\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"service\"\n\n # priority = 50\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid*\"\n // weight = 100\n }\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n update {\n # Stagger updates every 60 seconds\n stagger = \"90s\"\n max_parallel = 1\n healthy_deadline = \"5m\"\n }\n\n group \"qbittorrent\" {\n count = 1\n\n restart {\n attempts = 8\n interval = \"20m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"qbittorrent-vpn\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = true\n }\n\n config {\n // image = \"registry.service.dc1.consul:5000/openpyn:latest\"\n image = \"qmcgaw/gluetun\"\n\n memory_hard_limit = \"1024\"\n ports = [\n \"shadowsocks\",\n \"http_proxy\",\n \"http_admin\",\n \"qbittorrent_51413\",\n \"qbittorrent_80\",\n \"socks\"\n ]\n\n cap_add = [\n \"NET_ADMIN\",\n \"NET_BIND_SERVICE\",\n ]\n\n #network_mode = \"host\"\n #network_mode = \"vpn\"\n\n volumes = [\n \"/etc/localtime:/etc/localtime\",\n \"/mnt/syncthing/mullvad:/vpn\",\n\n ]\n devices = [\n {\n host_path = \"/dev/net/tun\"\n container_path = \"/dev/net/tun\"\n },\n ]\n }\n\n env {\n VPNSP = \"mullvad\"\n VPN_TYPE = \"wireguard\"\n COUNTRY = \"Canada\"\n CITY = \"Toronto\"\n FIREWALL_VPN_INPUT_PORTS = \"56987\"\n WIREGUARD_PRIVATE_KEY = \"iA64ImY2XNvml7s+HEHWNNGXeqpzFN0/KYGxhCsHLV8=\"\n WIREGUARD_ADDRESS = \"10.64.141.217/32\"\n HTTPPROXY = \"on\"\n SHADOWSOCKS_PASSWORD = \"farscape5\"\n SHADOWSOCKS = \"on\"\n }\n\n service {\n name = \"qbittorrent\"\n tags = [\"global\", \"tools\"]\n port = \"qbittorrent_80\"\n }\n\n service {\n name = \"${TASKGROUP}-admin\"\n tags = [\"global\"]\n port = \"http_admin\"\n }\n\n\n resources {\n cpu = 100 # 500 MHz\n memory = 250 # 128MB\n }\n } #task ovpn\n\n task \"dante\" {\n driver = \"docker\"\n\n config {\n image = \"ghcr.io/sstent/dante-wg:latest\"\n network_mode = \"container:qbittorrent-vpn-${NOMAD_ALLOC_ID}\"\n memory_hard_limit = 256\n\n devices = [\n {\n host_path = \"/dev/net/tun\"\n container_path = \"/dev/net/tun\"\n },\n ]\n }\n service {\n name = \"socks-mullvad\"\n tags = [\"global\", \"ovpn-openpyn\"]\n port = \"socks\"\n \n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/usr/bin/curl\"\n args = [\"--proxy\", \"socks5://localhost:1080\",\"http://neverssl.com/\"]\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n }\n }\n }\n resources {\n cpu = 64 # 500 MHz\n memory = 128 # 128MB\n }\n } #end dante\n\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\", \"--exclude=Backups\", \"/configbackup/\", \"/config/\", \"--delete-before\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\", \"--delete\", \"/config/\", \"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1 = \"25 * * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz --delete /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;\"\n\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end sync\n\n\n task \"qbittorrent\" {\n driver = \"docker\"\n\n // \"/mnt/Public/config/qbittorrent:/config\",\n\n config {\n image = \"linuxserver/qbittorrent\"\n network_mode = \"container:qbittorrent-vpn-${NOMAD_ALLOC_ID}\"\n memory_hard_limit = 2048\n\n #cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/Public/Downloads/news/qbittorrent:/downloads/\",\n \"/mnt/configs/qbittorrent:/config\",\n \"/mnt/Archive/seeding:/archiveseeding\",\n ]\n // ulimit {\n // nproc = \"4242\"\n // nofile = \"2048:4096\"\n // }\n sysctl = {\n \"net.core.somaxconn\" = \"4096\"\n }\n }\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n }\n resources {\n cpu = 128 # 500 MHz\n memory = 256 # 128MB\n }\n } #task resilio-sync\n\n network {\n // mode = \"bridge\"\n port \"qbittorrent_51413\" {\n static = 6881\n to = 6881\n }\n\n port \"shadowsocks\" {\n static = \"8388\"\n to = \"8388\"\n }\n port \"socks\" {\n static = \"1080\"\n to = \"1080\"\n }\n\n port \"http_proxy\" {\n static = \"8888\"\n to = \"8888\"\n }\n port \"http_admin\" {\n static = \"8000\"\n to = \"8000\"\n }\n\n\n port \"qbittorrent_80\" {\n static = 8080\n to = 8080\n }\n\n }\n } #group \"au-proxy\"\n}\n\n// }\n\n",
|
|
"json": null,
|
|
"modify_index": "5081280",
|
|
"name": "qbittorrent",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "qbittorrent",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "qbittorrent-vpn",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "dante",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "init",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "finalsync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "qbittorrent",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/radarr.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"fa5a91f0-505e-5258-b924-917b15eb6ee7"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "radarr",
|
|
"jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"radarr\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid2\"\n weight = 100\n }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"radarr\" {\n count = 1\n\n restart {\n attempts = 5\n interval = \"1m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n }\n env {\n DB_NAME = \"radarr.db\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/radarr.db\n// replicas:\n// - path: /configbackup\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n }\n\n // task \"db-init\" {\n // driver = \"docker\"\n // lifecycle {\n // hook = \"prestart\"\n // sidecar = false\n // }\n // config {\n // memory_hard_limit = \"2048\"\n\n // image = \"ghcr.io/sstent/rsync\"\n // volumes = [\n // \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n // \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n // ]\n\n\n // args = [\"/usr/local/bin/litestream\", \"restore\",\"-config\",\"/local/litestream.yml\",\"/config/lidarr.db\"]\n // }\n // resources {\n // cpu = 20 # 500 MHz\n // memory = 20 # 128MB\n // }\n\n // template {\n // data = \u003c\u003cEOH\n // dbs:\n // - path: /config/lidarr.db\n // replicas:\n // - path: /configbackup\n // EOH\n\n // destination = \"local/litestream.yml\"\n // }\n\n\n // }\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1 = \"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n// task \"db-sync\" {\n// driver = \"docker\"\n// lifecycle {\n// hook = \"poststart\"\n// sidecar = true\n// }\n// config {\n// memory_hard_limit = \"256\"\n\n// image = \"ghcr.io/sstent/rsync\"\n// volumes = [\n// \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n// \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n// ]\n\n// // args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n// }\n// resources {\n// cpu = 20 # 500 MHz\n// memory = 128 # 128MB\n// }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/radarr.db\n// replicas:\n// - path: /configbackup\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n\n// } #####\n\n task \"radarr\" {\n driver = \"docker\"\n\n config {\n // image = \"linuxserver/radarr:nightly\"\n image = \"linuxserver/radarr:latest\"\n dns_servers = [\"192.168.1.1\", \"1.1.1.1\"]\n ports = [\"http\"]\n memory_hard_limit = \"2048\"\n cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/Public/Downloads/news:/downloads\",\n \"/mnt/Public/Downloads/movies:/movies\",\n \"/mnt/configs/radarr:/config\",\n \"/mnt/Archive/Movies:/archive\",\n ]\n }\n\n template {\n data = \"---\\nkey: {{ key \\\"ovpn-client\\\" }}\"\n destination = \"local/file.yml\"\n change_mode = \"restart\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"radarr\", \"tools\"]\n port = \"http\"\n\n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/local/healthcheck.sh\"\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n } \n }\n }\n template {\n data = \u003c\u003cEOH\n#!/bin/bash\n/usr/bin/curl -f \"http://localhost:${NOMAD_PORT_http}/api/v3/system/status?apikey=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\"\n EOH\n\n destination = \"local/healthcheck.sh\"\n perms = \"0755\"\n }\n\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n }\n\n resources {\n cpu = 500 # 500 MHz\n memory = 256 # 128MB\n\n // network {\n // port \"radarr\" {\n // static = \"7878\"\n // }\n // }\n }\n\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n kill_timeout = \"10s\"\n } #Task\n\n network {\n // mode = \"bridge\"\n port \"http\" {\n static = 7878\n to = 7878\n }\n }\n } #Group\n}\n",
|
|
"json": null,
|
|
"modify_index": "5127623",
|
|
"name": "radarr",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "radarr",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "init",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "finalsync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "radarr",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/readarr.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"b145f13c-b7e7-44f5-5cfc-7ce67c1cdb26"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "readarr",
|
|
"jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"readarr\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid4\"\n weight = 100\n }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"readarr\" {\n count = 1\n\n restart {\n attempts = 5\n interval = \"3m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync:latest\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n }\n env {\n DB_NAME=\"readarr.db\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/readarr.db\n// replicas:\n// - name: file_replica\n// path: /configbackup\n// - name: minio_replica\n// url: s3://litestream/readarr\n// endpoint: http://minio.service.dc1.consul:9000\n// access-key-id: minio\n// secret-access-key: minio123\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n }\n\n// task \"db-init\" {\n// driver = \"docker\"\n// lifecycle {\n// hook = \"prestart\"\n// sidecar = false\n// }\n// config {\n// memory_hard_limit = \"2048\"\n\n// image = \"ghcr.io/sstent/rsync\"\n// volumes = [\n// \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n// \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n// ]\n\n \n// args = [\"/usr/local/bin/litestream\", \"restore\",\"-config\",\"/local/litestream.yml\",\"/config/lidarr.db\"]\n// }\n// resources {\n// cpu = 20 # 500 MHz\n// memory = 20 # 128MB\n// }\n\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/lidarr.db\n// replicas:\n// - path: /configbackup\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n\n\n// }\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n// task \"db-sync\" {\n// driver = \"docker\"\n// lifecycle {\n// hook = \"poststart\"\n// sidecar = true\n// }\n// config {\n// memory_hard_limit = \"2048\"\n\n// image = \"ghcr.io/sstent/rsync:v0.3.5\"\n// volumes = [\n// \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n// \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n// ]\n \n// args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n// }\n// resources {\n// cpu = 20 # 500 MHz\n// memory = 128 # 128MB\n// }\n// env{\n// SLEEPTIME=\"1m\"\n// }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/readarr.db\n// replicas:\n// - name: file_replica\n// path: /configbackup\n// - name: minio_replica\n// url: s3://litestream/readarr\n// endpoint: http://minio.service.dc1.consul:9000\n// access-key-id: minio\n// secret-access-key: minio123\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n\n// } #####\n\n task \"readarr\" {\n driver = \"docker\"\n\n config {\n // image = \"linuxserver/readarr:nightly\"\n image = \"hotio/readarr:nightly-0.1.0.963\"\n dns_servers = [\"192.168.1.1\", \"1.1.1.1\"]\n ports = [\"http\"]\n memory_hard_limit = \"2048\"\n cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/Public/Downloads/news:/downloads\",\n \"/mnt/Public/Downloads/books2:/books\",\n \"/mnt/Public/Downloads/PublicCalibreLibrary:/PublicCalibreLibrary\",\n \"/mnt/configs/readarr:/config\",\n ]\n }\n\n template {\n data = \"---\\nkey: {{ key \\\"ovpn-client\\\" }}\"\n destination = \"local/file.yml\"\n change_mode = \"restart\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"readarr\", \"tools\"]\n port = \"http\"\n\n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/local/healthcheck.sh\"\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n } \n }\n }\n template {\n data = \u003c\u003cEOH\n#!/bin/bash\n/usr/bin/curl -f \"http://localhost:${NOMAD_PORT_http}/api/v1/system/status?apikey=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\"\n EOH\n\n destination = \"local/healthcheck.sh\"\n perms = \"0755\"\n }\n\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n DEBUG = \"yes\"\n }\n\n resources {\n cpu = 500 # 500 MHz\n memory = 256 # 128MB\n\n // network {\n // port \"readarr\" {\n // static = \"8787\"\n // }\n // }\n }\n\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n kill_timeout = \"10s\"\n } #Task\n\n network {\n // mode = \"bridge\"\n port \"http\" {\n static = 8787\n to = 8787\n }\n }\n } #Group\n}\n",
|
|
"json": null,
|
|
"modify_index": "5128092",
|
|
"name": "readarr",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "readarr",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "init",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "finalsync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "readarr",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/readarrAudio.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"9cc929bc-f746-752a-422f-444d0eb773c7"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "readarrAudio",
|
|
"jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"readarrAudio\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid4\"\n weight = 100\n }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"readarrAudio\" {\n count = 1\n\n restart {\n attempts = 5\n interval = \"3m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n }\n env {\n DB_NAME=\"readarr.db\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/readarr.db\n// replicas:\n// - name: file_replica\n// path: /configbackup\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n }\n\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n\n task \"readarrAudio\" {\n driver = \"docker\"\n\n config {\n // image = \"linuxserver/readarr:nightly\"\n image = \"hotio/readarr:nightly-0.1.0.963\"\n dns_servers = [\"192.168.1.1\", \"1.1.1.1\"]\n ports = [\"http\"]\n memory_hard_limit = \"2048\"\n cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/Public/Downloads/news:/downloads\",\n \"/mnt/Public/Downloads/audiobooks:/books\",\n \"/mnt/configs/readarrAudio:/config\",\n ]\n }\n\n template {\n data = \"---\\nkey: {{ key \\\"ovpn-client\\\" }}\"\n destination = \"local/file.yml\"\n change_mode = \"restart\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"readarr\", \"tools\",\"logo=readarr\"]\n port = \"http\"\n\n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/local/healthcheck.sh\"\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n } \n }\n }\n template {\n data = \u003c\u003cEOH\n#!/bin/bash\n/usr/bin/curl -f \"http://localhost:${NOMAD_PORT_http}/api/v1/system/status?apikey=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\"\n EOH\n\n destination = \"local/healthcheck.sh\"\n perms = \"0755\"\n }\n\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n DEBUG = \"yes\"\n }\n\n resources {\n cpu = 500 # 500 MHz\n memory = 160 # 128MB\n\n // network {\n // port \"readarr\" {\n // static = \"8787\"\n // }\n // }\n }\n\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n kill_timeout = \"10s\"\n } #Task\n\n network {\n // mode = \"bridge\"\n port \"http\" {\n static = 8787\n to = 8787\n }\n }\n } #Group\n}\n",
|
|
"json": null,
|
|
"modify_index": "5127608",
|
|
"name": "readarrAudio",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "readarrAudio",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "init",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "finalsync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "readarrAudio",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/registry.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"3c34c9f3-0b41-43f4-73f4-eeb93eb833cd"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "registry",
|
|
"jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"registry\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid4\"\n weight = 100\n }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"registry\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"fail\"\n }\n\n\n\n task \"registryui\" {\n driver = \"docker\"\n config {\n image = \"joxit/docker-registry-ui:latest\"\n ports = [\"httpgui\"]\n }\n\n env{\n REGISTRY_TITLE=\"My Private Docker Registry\"\n REGISTRY_URL=\"https://registry.service.dc1.consul:5000\"\n NGINX_PROXY_PASS_URL=\"https://registry.service.dc1.consul:5000\"\n SINGLE_REGISTRY=\"true\"\n }\n\n\n\n \n\n }\n\n task \"registry\" {\n driver = \"docker\"\n\n config {\n image = \"registry:latest\"\n\n ports = [\"http\"]\n\n volumes = [\n \"/mnt/Public/config/registry/certs:/data/certs\",\n \"/mnt/Public/config/registry/registry:/var/lib/registry\"\n ]\n }\n\n\n env {\n REGISTRY_HTTP_TLS_KEY = \"/data/certs/domain.key\"\n REGISTRY_HTTP_TLS_CERTIFICATE = \"/data/certs/domain.crt\"\n REGISTRY_HTTP_HEADERS_Access-Control-Allow-Origin=\"['*']\"\n // REGISTRY_PROXY_REMOTEURL = \"https://registry-1.docker.io\"\n // REGISTRY_PROXY_USERNAME = \"fbleagh\"\n // REGISTRY_PROXY_PASSWORD = \"aad31d60-4340-4adc-a21d-fac4942c2fb8\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"registry\"]\n port = \"httpgui\"\n\n check {\n name = \"alive\"\n type = \"http\"\n port = \"http\"\n protocol = \"https\"\n tls_skip_verify = true\n interval = \"10s\"\n timeout = \"10s\"\n path = \"/\"\n }\n }\n\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 64 # 500 MHz\n memory = 64 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n }\n\n task \"registry-cache\" {\n driver = \"docker\"\n\n config {\n image = \"registry\"\n\n ports = [\"httpcache\"]\n\n volumes = [\n \"/mnt/Public/config/registry-cache:/data\",\n ]\n }\n\n env {\n REGISTRY_HTTP_TLS_KEY = \"/data/certs/domain.key\"\n REGISTRY_HTTP_TLS_CERTIFICATE = \"/data/certs/domain.crt\"\n\n // REGISTRY_HTTP_TLS_LETSENCRYPT_CACHEFILE = \"/data/certs/letsencrypt.crt\"\n\n // REGISTRY_HTTP_TLS_LETSENCRYPT_EMAIL = \"stuart.stent@gmail.com\"\n // REGISTRY_HTTP_TLS_LETSENCRYPT_HOSTS = \"[regcache.fbleagh.duckdns.org]\"\n REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY = \"/data/registry\"\n REGISTRY_PROXY_REMOTEURL = \"https://registry-1.docker.io\"\n REGISTRY_PROXY_USERNAME = \"fbleagh\"\n REGISTRY_PROXY_PASSWORD = \"aad31d60-4340-4adc-a21d-fac4942c2fb8\"\n }\n\n service {\n name = \"${TASKGROUP}-cache\"\n tags = [\"registry\"]\n port = \"httpcache\"\n\n check {\n name = \"alive\"\n type = \"http\"\n port = \"http\"\n protocol = \"https\"\n tls_skip_verify = true\n interval = \"10s\"\n timeout = \"10s\"\n path = \"/\"\n }\n }\n\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 64 # 500 MHz\n memory = 64 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n }\n\n network {\n port \"http\" {\n static = 5000\n to = 5000\n }\n\n port \"httpcache\" {\n static = 5001\n to = 5000\n }\n port \"httpgui\" {\n to = 80\n }\n }\n }\n}\n\n// openssl req \\\n// -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \\\n// -addext \"subjectAltName = DNS:http://registry-cache.service.dc1.consul\" \\\n// -x509 -days 365 -out certs/domain.crt\n//\n///\n// Copy to \n\n",
|
|
"json": null,
|
|
"modify_index": "4988360",
|
|
"name": "registry",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "registry",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "registryui",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "registry",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "registry-cache",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/sonarr.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"75e37c6b-57a8-bcdb-f8de-02f4d9a3f420"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "sonarr",
|
|
"jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"sonarr\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid1\"\n weight = 100\n }\n\n // constraint {\n // \tattribute = \"${attr.unique.hostname}\"\n // operator = \"=\"\n // value = \"sync\"\n // }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n group \"sonarr\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"fail\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n }\n env {\n DB_NAME = \"sonarr.db\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n template {\n data = \u003c\u003cEOH\ndbs:\n - path: /config/sonarr.db\n replicas:\n - path: /configbackup\n snapshot-interval: 2h\n validation-interval: 6h\n EOH\n\n destination = \"local/litestream.yml\"\n }\n }\n\n // task \"db-init\" {\n // driver = \"docker\"\n // lifecycle {\n // hook = \"prestart\"\n // sidecar = false\n // }\n // config {\n // memory_hard_limit = \"2048\"\n\n // image = \"ghcr.io/sstent/rsync\"\n // volumes = [\n // \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n // \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n // ]\n\n\n // args = [\"/usr/local/bin/litestream\", \"restore\",\"-config\",\"/local/litestream.yml\",\"/config/lidarr.db\"]\n // }\n // resources {\n // cpu = 20 # 500 MHz\n // memory = 20 # 128MB\n // }\n\n // template {\n // data = \u003c\u003cEOH\n // dbs:\n // - path: /config/lidarr.db\n // replicas:\n // - path: /configbackup\n // EOH\n\n // destination = \"local/litestream.yml\"\n // }\n\n\n // }\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1 = \"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n task \"db-sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"256\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n\n // args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n template {\n data = \u003c\u003cEOH\ndbs:\n - path: /config/sonarr.db\n replicas:\n - path: /configbackup\n snapshot-interval: 2h\n validation-interval: 6h\n EOH\n\n destination = \"local/litestream.yml\"\n }\n\n } #####\n\n task \"sonarr\" {\n // driver = \"raw_exec\" // config { // command = \"docker\" // args = [\"run\", // \"--rm\", // \"--name=sonarr\", // \"-e\", \"PUID=1000\", // \"-e\", \"PGID=1000\", // \"-e\", \"TZ=EST5EDT\", // \"-p\", \"8989:8989\", // \"-v\", \"/mnt/syncthing/sonarrv3:/config\", // \"-v\", \"/mnt/Public/Downloads/tv:/tv\", // \"-v\", \"/mnt/Public/Downloads/news:/downloads\", // \"--cpuset-cpus\",\"4-7\", // \"linuxserver/sonarr:preview\"] // }\n\n driver = \"docker\"\n\n config {\n image = \"ghcr.io/linuxserver/sonarr:latest\"\n\n ports = [\"http\"]\n\n dns_servers = [\"192.168.1.1\", \"1.1.1.1\"]\n memory_hard_limit = \"2048\"\n cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/Public/Downloads/news:/downloads\",\n \"/mnt/Public/Downloads/tv:/tv\",\n \"/mnt/configs/sonarr:/config\",\n ]\n\n // \"/mnt/gv0/sonarr:/config\",\n force_pull = false\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"sonarr\", \"tools\"]\n port = \"http\"\n\n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/local/healthcheck.sh\"\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n } \n }\n }\n template {\n data = \u003c\u003cEOH\n#!/bin/bash\n/usr/bin/curl -f \"http://localhost:${NOMAD_PORT_http}/api/v3/system/status?apikey=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\"\n EOH\n\n destination = \"local/healthcheck.sh\"\n perms = \"0755\"\n }\n\n template {\n data = \"---\\nkey: {{ key \\\"ovpn-client\\\" }}\"\n destination = \"local/file.yml\"\n change_mode = \"restart\"\n }\n\n env {\n // SHARE = \"Public;/mount/Public;yes;no;yes;all;none;;Public\"\n # GLOBAL = \"socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=65536 SO_SNDBUF=65536\"\n # PERMISSIONS = \"true\"\n # WORKGROUP = \"WORKGROUP\"\n TZ = \"EST5EDT\"\n\n PUID = 1000\n PGID = 1000\n }\n\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 500 # 500 MHz\n memory = 256 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n } #End main task\n\n network {\n // mbits = 100\n\n port \"http\" {\n static = 8989\n to = 8989\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "5127611",
|
|
"name": "sonarr",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "sonarr",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "init",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "finalsync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "db-sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sonarr",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/traefik.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"7d42d2ef-739f-7516-f597-fe0c7764f1df",
|
|
"1c08c688-2d3d-f768-3073-0ca1b738d7a6",
|
|
"64bf5dd8-ab7f-bb69-3c06-308783324a9f",
|
|
"d6ffd29e-e301-fdb0-7173-2a6810295b35"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "traefik",
|
|
"jobspec": "job \"traefik\" {\n datacenters = [\"dc1\"]\n type = \"system\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n constraint {\n attribute = \"${attr.cpu.arch}\"\n value = \"arm\"\n }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n healthy_deadline = \"5m\"\n }\n\n group \"traefik\" {\n count = 1\n\n restart {\n attempts = 6\n interval = \"1m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"traefik\" {\n driver = \"docker\"\n\n config {\n image = \"traefik:2.3\"\n network_mode = \"host\"\n\n args = [\n \"--api.dashboard\",\n \"--providers.consulcatalog.defaultRule=Host(`{{ .Name }}.service.dc1.consul`)\",\n \"--providers.consulcatalog.endpoint.address=${attr.unique.network.ip-address}:8500\",\n \"--providers.consulcatalog.exposedbydefault=true\",\n \"--metrics=true\",\n \"--metrics.prometheus=true\",\n \"--metrics.prometheus.entryPoint=web\",\n \"--entryPoints.web.address=:80\",\n \"--entryPoints.websecure.address=:443\",\n \"--entryPoints.openvpn.address=:1194/udp\",\n \"--certificatesresolvers.myresolver.acme.email=stuart.stent@gmail.com\",\n \"--certificatesresolvers.myresolver.acme.storage=/acmecert/acme.json\",\n \"--certificatesresolvers.myresolver.acme.tlschallenge=true\",\n \"--accesslog=true\",\n ]\n\n volumes = [\n \"/var/run/docker.sock:/var/run/docker.sock\",\n \"/mnt/gv0/letsencrypt:/acmecert/\",\n ]\n\n dns_servers = [\"192.168.1.1\", \"192.168.1.250\"]\n ports = [\"traefik\", \"traefikhttps\"]\n\n memory_hard_limit = 128\n }\n\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n }\n\n template {\n data = \u003c\u003cEOH\ndebug = true\n[log]\n level = \"DEBUG\"\n\n[metrics]\n [metrics.prometheus]\n addRoutersLabels = true\n addServicesLabels = true\n\n[api]\ndashboard = true\ninsecure = false\n\n[providers.consulCatalog]\nexposedByDefault = true\ndefaultRule='Host(`{{ .routerName }}.mydomain.com`)'\n [providers.consulCatalog.endpoint]\n address = \"{{env \"attr.unique.network.ip-address\"}}:8500\"\n\nEOH\n\n destination = \"local/file.yml\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n\n tags = [\n \"global\",\n \"traefik\",\n ]\n\n port = \"traefik\"\n }\n\n service {\n name = \"${TASKGROUP}-ui\"\n\n tags = [\n \"global\",\n \"traefik\",\n \"traefik.http.routers.dashboard.rule=Host(`traefik-ui.service.dc1.consul`)\",\n \"traefik.http.routers.dashboard.service=api@internal\",\n ]\n\n port = \"traefik\"\n } #end service\n\n resources {\n cpu = 256 # 500 MHz\n memory = 10 # 128MB\n } #end resources\n } #end task\n\n network {\n port \"traefik\" {\n static = 80\n to = 80\n }\n\n // port \"traefikui\" {\n // static = 8840\n // to = 8080\n // }\n\n port \"traefikhttps\" {\n static = 443\n to = 443\n }\n }\n } # end group\n} # end job\n",
|
|
"json": null,
|
|
"modify_index": "5076422",
|
|
"name": "traefik",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "traefik",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "traefik",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "system"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/unifi.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"8962f8be-4cae-9b3a-2b6c-2f62af0e800d"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "unifi",
|
|
"jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"unifi\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n affinity {\n attribute = \"${attr.unique.hostname}\"\n operator = \"=\"\n value = \"odroid2\"\n weight = 100\n }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"unifi\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"fail\"\n }\n\n // docker create \\\n // --name unifi \\\n // -v \u003c/path/to/appdata\u003e:/config \\\n // -v \u003cpath/to/tvseries\u003e:/tv \\\n // -v \u003cpath/to/downloadclient-downloads\u003e:/downloads \\\n // lsioarmhf/unifi\n task \"init\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n\n config {\n image = \"ghcr.io/sstent/rsync\"\n memory_hard_limit = 2048\n\n volumes = [\n \"/data/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\",\n ]\n\n args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-avz\", \"--exclude=Backups\", \"/configbackup/\", \"/config/\", \"--delete-before\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end init task\n\n task \"finalsync\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"poststop\"\n }\n\n config {\n // image = \"pebalk/rsync\"\n image = \"ghcr.io/sstent/rsync\"\n memory_hard_limit = 2048\n\n volumes = [\n \"/data/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\",\n ]\n\n args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-avz\", \"/config/\", \"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end finalsync task\n\n task \"sync\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n\n config {\n image = \"ghcr.io/sstent/rsync\"\n memory_hard_limit = 2048\n\n volumes = [\n \"/data/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n\n args = [\"client\"]\n }\n\n env {\n CRON_TASK_1 = \"*/10 * * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end sync task\n\n task \"unifi\" {\n driver = \"docker\"\n\n config {\n image = \"linuxserver/unifi-controller:LTS\"\n network_mode = \"host\"\n memory_hard_limit = 2048\n ports = [\"unifi_8080\", \"unifi_8081\", \"unifi_8443\", \"unifi_8843\", \"unifi_8880\", \"unifi_6789\", \"unifi_udp_3478\", \"unifi_udp_10001\", \"unifi_udp_1900\"]\n\n volumes = [\n \"/data/unifi:/config\",\n ]\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"unifi\"]\n port = \"unifi_8443\"\n }\n\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n }\n\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 128 # 500 MHz\n memory = 256 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n } #task\n\n network {\n port \"unifi_8080\" {\n static = \"8080\"\n to = \"8080\"\n }\n\n port \"unifi_8081\" {\n static = \"8081\"\n to = \"8081\"\n }\n\n port \"unifi_8443\" {\n static = \"8443\"\n to = \"8443\"\n }\n\n port \"unifi_8843\" {\n static = \"8843\"\n to = \"8843\"\n }\n\n port \"unifi_8880\" {\n static = \"8880\"\n to = \"8880\"\n }\n\n port \"unifi_6789\" {\n static = \"6789\"\n to = \"6789\"\n }\n\n port \"unifi_udp_3478\" {\n static = \"3478\"\n to = \"3478\"\n }\n\n port \"unifi_udp_1900\" {\n static = \"1900\"\n to = \"1900\"\n }\n\n port \"unifi_udp_10001\" {\n static = \"10001\"\n to = \"10001\"\n }\n }\n } #group\n}\n",
|
|
"json": null,
|
|
"modify_index": "5127615",
|
|
"name": "unifi",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "unifi",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "init",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "finalsync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "sync",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "unifi",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
},
|
|
{
|
|
"index_key": "nomad_jobs/enabled/wireguard.nomad",
|
|
"schema_version": 0,
|
|
"attributes": {
|
|
"allocation_ids": [
|
|
"35e39061-8a70-e271-bcca-ef94673256bc"
|
|
],
|
|
"datacenters": [
|
|
"dc1"
|
|
],
|
|
"deployment_id": null,
|
|
"deployment_status": null,
|
|
"deregister_on_destroy": true,
|
|
"deregister_on_id_change": true,
|
|
"detach": true,
|
|
"hcl2": [],
|
|
"id": "wireguard",
|
|
"jobspec": "job \"wireguard\" {\n region = \"global\"\n datacenters = [\"dc1\"]\n type = \"service\"\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid*\"\n }\n group \"wireguard\" {\n count = 1\n\n task \"portfwd\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n\n config {\n image = \"registry.service.dc1.consul:5000/portfwd\"\n }\n env {\n IP_ADDR = \"${attr.unique.network.ip-address}\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end init task\n\n\n task \"wireguard\" {\n driver = \"docker\"\n\n config {\n image = \"ghcr.io/linuxserver/wireguard\"\n ports = [\"vpn\"]\n volumes = [\n \"/mnt/Public/config/wireguard:/config\",\n \"/lib/modules:/lib/modules\"\n ]\n cap_add = [\"NET_ADMIN\",\"SYS_MODULE\"]\n sysctl = {\n \"net.ipv4.conf.all.src_valid_mark\"=\"1\"\n }\n\n }\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n SERVERURL=\"wireguard.fbleagh.duckdns.org\"\n SERVERPORT=51820 \n PEERS=\"StuPhone,SurfaceGo,Surface\" \n PEERDNS=\"${attr.unique.network.ip-address},192.168.1.1,1.1.1.1\"\n // INTERNAL_SUBNET= \"192.168.1.0\" \n ALLOWEDIPS=\"0.0.0.0/0\"\n }\n service {\n name = \"${TASKGROUP}\"\n port = \"vpn\"\n }\n\n resources {\n cpu = 50\n memory = 100\n }\n }\n\n network {\n port \"vpn\" {\n static = 51820\n to = 51820\n }\n }\n }\n}\n",
|
|
"json": null,
|
|
"modify_index": "4990566",
|
|
"name": "wireguard",
|
|
"namespace": "default",
|
|
"policy_override": null,
|
|
"purge_on_destroy": null,
|
|
"region": "global",
|
|
"task_groups": [
|
|
{
|
|
"count": 1,
|
|
"meta": {},
|
|
"name": "wireguard",
|
|
"task": [
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "portfwd",
|
|
"volume_mounts": []
|
|
},
|
|
{
|
|
"driver": "docker",
|
|
"meta": {},
|
|
"name": "wireguard",
|
|
"volume_mounts": []
|
|
}
|
|
],
|
|
"volumes": []
|
|
}
|
|
],
|
|
"timeouts": null,
|
|
"type": "service"
|
|
},
|
|
"sensitive_attributes": [],
|
|
"private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0="
|
|
}
|
|
]
|
|
}
|
|
]
|
|
}
|