diff --git a/consul_backup/Authelia b/consul_backup/Authelia new file mode 100644 index 0000000..f789878 --- /dev/null +++ b/consul_backup/Authelia @@ -0,0 +1,74 @@ +--- +############################################################### +# Authelia configuration # +############################################################### + +host: 0.0.0.0 +port: 9091 +# This secret can also be set using the env variables AUTHELIA_JWT_SECRET_FILE +jwt_secret: StifleUnifierRimmedHeadlampCornmealBobcat +default_redirection_url: https://fbleagh.duckdns.org +totp: + issuer: authelia.com +log_level: debug +# duo_api: +# hostname: api-123456789.example.com +# integration_key: ABCDEF +# # This secret can also be set using the env variables AUTHELIA_DUO_API_SECRET_KEY_FILE +# secret_key: 1234567890abcdefghifjkl + +authentication_backend: + file: + path: /config/users_database.yml + +access_control: + default_policy: deny + rules: + # Rules applied to everyone + - domain: "fbleagh.duckdns.org" + policy: one_factor + - domain: "m.fbleagh.duckdns.org" + policy: bypass + resources: + - "^/rest.*" + - domain: "rss.fbleagh.duckdns.org" + policy: bypass + resources: + - "^/api." + - domain: "*.fbleagh.duckdns.org" + policy: one_factor + + +session: + name: authelia_session + # This secret can also be set using the env variables AUTHELIA_SESSION_SECRET_FILE + secret: UpstreamClutterSlumPreplanCompactedBackspin + expiration: 3600 # 1 hour + inactivity: 300 # 5 minutes + domain: fbleagh.duckdns.org # Should match whatever your root protected domain is + + + +regulation: + max_retries: 3 + find_time: 120 + ban_time: 300 + +storage: + local: + path: /config/db.sqlite3 + +notifier: + disable_startup_check: false + filesystem: + filename: /config/notification.txt + +# notifier: +# smtp: +# username: test +# # This secret can also be set using the env variables AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE +# password: password +# host: mail.gmail.com +# port: 25 +# sender: admin@example.com +# ... \ No newline at end of file diff --git a/consul_backup/Dex b/consul_backup/Dex new file mode 100644 index 0000000..cced317 --- /dev/null +++ b/consul_backup/Dex @@ -0,0 +1,16 @@ +log-level = debug +whitelist = stuart.stent@gmail.com,stephen.bunt@gmail.com,Stephen.Bunt@gmail.com,inavinay@gmail.com,sdoltonz@gmail.com,andrewsayer@gmail.com + +rule.music.action = allow +rule.music.rule = Host(`m.fbleagh.duckdns.org`) && PathPrefix(`/rest/`) + +rule.musicdedyn.action = allow +rule.musicdedyn.rule = Host(`m.fbleagh.dedyn.io`) && PathPrefix(`/rest/`) + +rule.rss.action = allow +rule.rss.rule = Host(`rss.fbleagh.duckdns.org`) && PathPrefix(`/api/`) +rule.rss.whitelist = stuart.stent@gmail.com +rule.rssdedyn.action = allow +rule.rssdedyn.rule = Host(`rss.fbleagh.dedyn.io`) && PathPrefix(`/api/`) +rule.rssdedyn.whitelist = stuart.stent@gmail.com + diff --git a/consul_backup/MiniHass/config b/consul_backup/MiniHass/config new file mode 100644 index 0000000..2567e78 --- /dev/null +++ b/consul_backup/MiniHass/config @@ -0,0 +1 @@ +{"tplink_ip": "192.168.4.52", "tv_ip": "192.168.4.51", "tv_mac": "c0:d7:aa:1d:a6:7e"} \ No newline at end of file diff --git a/consul_backup/MiniHass/tv_credentials/192_168_4_51 b/consul_backup/MiniHass/tv_credentials/192_168_4_51 new file mode 100644 index 0000000..ed35f38 --- /dev/null +++ b/consul_backup/MiniHass/tv_credentials/192_168_4_51 @@ -0,0 +1 @@ +08408673506401e6f0546171c15cd181 \ No newline at end of file diff --git a/consul_backup/MinioAuth b/consul_backup/MinioAuth new file mode 100644 index 0000000..0ecb6cd --- /dev/null +++ b/consul_backup/MinioAuth @@ -0,0 +1 @@ +minio:minio123 \ No newline at end of file diff --git a/consul_backup/Rclone_conf b/consul_backup/Rclone_conf new file mode 100644 index 0000000..f48245b --- /dev/null +++ b/consul_backup/Rclone_conf @@ -0,0 +1,10 @@ +[minio] +type = s3 +provider = Minio +env_auth = false +access_key_id = minio +secret_access_key = minio123 +endpoint = http://minio.service.dc1.consul:9000 +location_constraint = +server_side_encryption = +acl = public-read-write \ No newline at end of file diff --git a/consul_backup/access_token b/consul_backup/access_token new file mode 100644 index 0000000..b09ab90 --- /dev/null +++ b/consul_backup/access_token @@ -0,0 +1 @@ +eyJhbGciOiJIUzI1NiJ9.eyJhdWQiOiIyMkJRTTkiLCJzdWIiOiIyRk5WTkYiLCJpc3MiOiJGaXRiaXQiLCJ0eXAiOiJhY2Nlc3NfdG9rZW4iLCJzY29wZXMiOiJyc29jIHJlY2cgcnNldCByb3h5IHJudXQgcnBybyByc2xlIHJjZiByYWN0IHJyZXMgcmxvYyByd2VpIHJociBydGVtIiwiZXhwIjoxNzM3NDkzMjIzLCJpYXQiOjE3Mzc0NjQ0MjN9.xkeiYOsdjCnL1mj3iN0wRYxPusCKItCdwcY2c8bcw4k \ No newline at end of file diff --git a/consul_backup/accessexpires_in_token b/consul_backup/accessexpires_in_token new file mode 100644 index 0000000..b913d7e --- /dev/null +++ b/consul_backup/accessexpires_in_token @@ -0,0 +1 @@ +28800 \ No newline at end of file diff --git a/consul_backup/alerts b/consul_backup/alerts new file mode 100644 index 0000000..5f03796 --- /dev/null +++ b/consul_backup/alerts @@ -0,0 +1,66 @@ +--- +groups: +- name: hosts + rules: + - alert: NodeDown + expr: up{job="node-exporter"} == 0 + for: 5s + labels: + severity: critical + annotations: + summary: "{{$labels.node}} Down" + description: "{{$labels.node}} is down" + + - alert: Disk High + expr: nomad_client_host_disk_used_percent{disk="/dev/sda1"} > 80 + for: 5m + labels: + severity: critical + annotations: + summary: "{{$labels.host}} Disk Usage > 80%" + description: "{{$labels.host}} disk usage at {{ $value }}%" + + - alert: NomadJobFailed + expr: increase(nomad_nomad_job_summary_failed[1h]) > 0 + for: 60m + labels: + severity: warning + annotations: + summary: Nomad job failed (instance {{ $labels.instance }}) + description: "Nomad job failed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: NomadJobLost + expr: increase(nomad_nomad_job_summary_lost[1h]) > 0 + for: 60m + labels: + severity: warning + annotations: + summary: Nomad job lost (instance {{ $labels.instance }}) + description: "Nomad job lost\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: NomadJobQueued + expr: nomad_nomad_job_summary_queued > 0 + for: 2m + labels: + severity: warning + annotations: + summary: Nomad job queued (instance {{ $labels.instance }}) + description: "Nomad job queued\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: NomadBlockedEvaluation + expr: nomad_nomad_blocked_evals_total_blocked > 0 + for: 10m + labels: + severity: warning + annotations: + summary: Nomad blocked evaluation (instance {{ $labels.instance }}) + description: "Nomad blocked evaluation\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: ConsulServiceHealthcheckFailed + expr: consul_catalog_service_node_healthy == 0 + for: 10m + labels: + severity: critical + annotations: + summary: Consul service healthcheck failed (instance {{ $labels.instance }}) + description: "Service: `{{ $labels.service_name }}` Healthcheck: `{{ $labels.service_id }}`\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" diff --git a/consul_backup/configs/qbitmanage b/consul_backup/configs/qbitmanage new file mode 100644 index 0000000..a409fe5 --- /dev/null +++ b/consul_backup/configs/qbitmanage @@ -0,0 +1,199 @@ +# This is an example configuration file that documents all the options. +# It will need to be modified for your specific use case. +# Please refer to the link below for more details on how to set up the configuration file +# https://github.com/StuffAnThings/qbit_manage/wiki/Config-Setup + +commands: + # The commands defined below will IGNORE any commands used in command line and docker env variables. + dry_run: False + cross_seed: False + recheck: False + cat_update: False + tag_update: False + rem_unregistered: False + tag_tracker_error: False + rem_orphaned: True + tag_nohardlinks: False + skip_cleanup: False + +qbt: + # qBittorrent parameters + host: "qbittorrent.service.dc1.consul:8080" + #user: "username" + #pass: "password" + +settings: + force_auto_tmm: False # Will force qBittorrent to enable Automatic Torrent Management for each torrent. + tracker_error_tag: issue # Will set the tag of any torrents that do not have a working tracker. + ignoreTags_OnUpdate: # When running tag-update function, it will update torrent tags for a given torrent even if the torrent has one or more of the tags defined here. + - noHL + - issue + - cross-seed +directory: + # Do not remove these + # Cross-seed var: # Output directory of cross-seed + # root_dir var: # Root downloads directory used to check for orphaned files, noHL, and RecycleBin. + # remote_dir var: # Path of docker host mapping of root_dir. + # Must be set if you're running qbit_manage locally and qBittorrent/cross_seed is in a docker + # recycle_bin var: # Path of the RecycleBin folder. Default location is set to remote_dir/.RecycleBin + # torrents_dir var: # Path of the your qbittorrent torrents directory. Required for `save_torrents` attribute in recyclebin + + cross_seed: "/downloads/cross_seed/" + root_dir: "/downloads/" + recycle_bin: "/downloads/.RecycleBin/" + #torrents_dir: "/qbittorrent/data/BT_backup" + orphaned_dir: "/downloads/orphaned_data" + +cat: + # Category & Path Parameters + # : # Path of your save directory. + movies: "/data/torrents/Movies" + tv: "/data/torrents/TV" + +cat_change: + # This moves all the torrents from one category to another category. This executes on --cat-update + # WARNING: if the paths are different and Default Torrent Management Mode is set to automatic the files could be moved !!! + # : + Radarr-HD.cross-seed: movies-hd + Radarr-UHD.cross-seed: movies-uhd + movies-hd.cross-seed: movies-hd + movies-uhd.cross-seed: movies-uhd + + +tracker: + # Tag Parameters + # : # This is the keyword in the tracker url + # Set tag name. Can be a list of tags or a single tag + # tag: + # Will set the torrent Maximum share ratio until torrent is stopped from seeding/uploading. -2 means the global limit should be used, -1 means no limit. + # max_ratio: 5.0 + # Will set the torrent Maximum seeding time (min) until torrent is stopped from seeding. -2 means the global limit should be used, -1 means no limit. + # max_seeding_time: 129600 + # Will limit the upload speed KiB/s (KiloBytes/second) (-1 sets the limit to infinity) + # limit_upload_speed: 150 + # Set this to the notifiarr react name. This is used to add indexer reactions to the notifications sent by Notifiarr + # notifiarr: + animebytes.tv: + tag: AnimeBytes + notifiarr: animebytes + avistaz: + tag: + - Avistaz + - tag2 + - tag3 + max_ratio: 5.0 + max_seeding_time: 129600 + limit_upload_speed: 150 + notifiarr: avistaz + beyond-hd: + tag: [Beyond-HD, tag2, tag3] + notifiarr: beyondhd + blutopia: + tag: Blutopia + notifiarr: blutopia + cartoonchaos: + tag: CartoonChaos + digitalcore: + tag: DigitalCore + max_ratio: 5.0 + notifiarr: digitalcore + gazellegames: + tag: GGn + limit_upload_speed: 150 + hdts: + tag: HDTorrents + max_seeding_time: 129600 + landof.tv: + tag: BroadcasTheNet + notifiarr: broadcasthenet + myanonamouse: + tag: MaM + passthepopcorn: + tag: PassThePopcorn + notifiarr: passthepopcorn + privatehd: + tag: PrivateHD + notifiarr: + tleechreload: + tag: TorrentLeech + notifiarr: torrentleech + torrentdb: + tag: TorrentDB + notifiarr: torrentdb + torrentleech: + tag: TorrentLeech + notifiarr: torrentleech + tv-vault: + tag: TV-Vault + +nohardlinks: + # Tag Movies/Series that are not hard linked + # Mandatory to fill out directory parameter above to use this function (root_dir/remote_dir) + # This variable should be set to your category name of your completed movies/completed series in qbit. Acceptable variable can be any category you would like to tag if there are no hardlinks found + movies-completed: + # exclude_tags var: Will exclude the following tags when searching through the category. + exclude_tags: + - Beyond-HD + - AnimeBytes + - MaM + # cleanup var: WARNING!! Setting this as true Will remove and delete contents of any torrents that are in paused state and has the NoHL tag + cleanup: false + # max_ratio var: Will set the torrent Maximum share ratio until torrent is stopped from seeding/uploading + max_ratio: 4.0 + # max seeding time var: Will set the torrent Maximum seeding time (min) until torrent is stopped from seeding + max_seeding_time: 86400 + # Limit Upload Speed var: Will limit the upload speed KiB/s (KiloBytes/second) (`-1` : No Limit) + limit_upload_speed: + # min seeding time var: Will ensure that torrent is not deleted by cleanup variable if torrent does not meet minimum seeding time (min). + min_seeding_time: 43200 + + # Can have additional categories set with separate ratio/seeding times defined. + series-completed: + # exclude_tags var: Will exclude the following tags when searching through the category. + exclude_tags: + - Beyond-HD + - BroadcasTheNet + # cleanup var: WARNING!! Setting this as true Will remove and delete contents of any torrents that are in paused state and has the NoHL tag + cleanup: false + # max_ratio var: Will set the torrent Maximum share ratio until torrent is stopped from seeding/uploading + max_ratio: 4.0 + # max seeding time var: Will set the torrent Maximum seeding time (min) until torrent is stopped from seeding + max_seeding_time: 86400 + # Limit Upload Speed var: Will limit the upload speed KiB/s (KiloBytes/second) (`-1` : No Limit) + limit_upload_speed: + # min seeding time var: Will ensure that torrent is not deleted by cleanup variable if torrent does not meet minimum seeding time (min). + min_seeding_time: 43200 + +recyclebin: + # Recycle Bin method of deletion will move files into the recycle bin (Located in /root_dir/.RecycleBin) instead of directly deleting them in qbit + # By default the Recycle Bin will be emptied on every run of the qbit_manage script if empty_after_x_days is defined. + enabled: false + # empty_after_x_days var: + # Will automatically remove all files and folders in recycle bin after x days. (Checks every script run) + # If this variable is not defined it, the RecycleBin will never be emptied. + # WARNING: Setting this variable to 0 will delete all files immediately upon script run! + empty_after_x_days: 60 + # save_torrents var: + # If this option is set to true you MUST fill out the torrents_dir in the directory attribute. + # This will save a copy of your .torrent and .fastresume file in the recycle bin before deleting it from qbittorrent + save_torrents: true + # split_by_category var: + # This will split the recycle bin folder by the save path defined in the `cat` attribute + # and add the base folder name of the recycle bin that was defined in the `recycle_bin` sub-attribute under directory. + split_by_category: false + +orphaned: + # Orphaned files are those in the root_dir download directory that are not referenced by any active torrents. + # Will automatically remove all files and folders in orphaned data after x days. (Checks every script run) + # If this variable is not defined it, the orphaned data will never be emptied. + # WARNING: Setting this variable to 0 will delete all files immediately upon script run! + empty_after_x_days: 60 + # File patterns that will not be considered orphaned files. Handy for generated files that aren't part of the torrent but belong with the torrent's files + exclude_patterns: + - "**/.DS_Store" + - "**/Thumbs.db" + - "**/@eaDir" + - "/data/torrents/temp/**" + - "**/*.!qB" + - "/downloads/torrentfiles/*" + diff --git a/consul_backup/configs/sonarr/job_name b/consul_backup/configs/sonarr/job_name new file mode 100644 index 0000000..1e714ed --- /dev/null +++ b/consul_backup/configs/sonarr/job_name @@ -0,0 +1 @@ +sonarr \ No newline at end of file diff --git a/consul_backup/dns/hosts/nas.fbleagh.duckdns.org b/consul_backup/dns/hosts/nas.fbleagh.duckdns.org new file mode 100644 index 0000000..844f7ab --- /dev/null +++ b/consul_backup/dns/hosts/nas.fbleagh.duckdns.org @@ -0,0 +1 @@ +192.168.4.109 \ No newline at end of file diff --git a/consul_backup/expires_at b/consul_backup/expires_at new file mode 100644 index 0000000..9dab3e1 --- /dev/null +++ b/consul_backup/expires_at @@ -0,0 +1 @@ +1737493223.8591053 \ No newline at end of file diff --git a/consul_backup/foo b/consul_backup/foo new file mode 100644 index 0000000..1572178 --- /dev/null +++ b/consul_backup/foo @@ -0,0 +1 @@ +'hello consul' \ No newline at end of file diff --git a/consul_backup/functions/torrent_tagger b/consul_backup/functions/torrent_tagger new file mode 100644 index 0000000..760e5ef --- /dev/null +++ b/consul_backup/functions/torrent_tagger @@ -0,0 +1,59 @@ +import qbittorrentapi +import logging +import sys + +#Creating and Configuring Logger +Log_Format = "%(levelname)s %(asctime)s - %(message)s" +logging.basicConfig(stream = sys.stdout, + format = Log_Format, + level = logging.INFO) + +logger = logging.getLogger() + +#Testing our Logger + +qbt_client = qbittorrentapi.Client( + host='qbittorrent.service.dc1.consul', + port=8080, + username='admin', + password='adminadmin', +) +try: + qbt_client.auth_log_in() +except qbittorrentapi.LoginFailed as e: + print(e) + sys.exit(1) + +# display qBittorrent info + +logger.info(f'qBittorrent: {qbt_client.app.version}') +logger.info(f'qBittorrent Web API: {qbt_client.app.web_api_version}') +# logger.info(f'Web API Supported: {qbittorrentapi.supported_app_versions}') + +for k,v in qbt_client.app.build_info.items(): print(f'{k}: {v}') +for torrent in qbt_client.torrents_info(): + tags = [word.strip() for word in torrent.tags.split(',')] + + if "812415449bd161ba71e1a30fb5450e3d346a0594" == torrent.hash: + logger.info(f'Deleting: {torrent.name}') + qbt_client.torrents_delete(delete_files=True, torrent_hashes=torrent.hash) + continue + + + if "/downloads/" in torrent.save_path and "PR2100" not in tags: + logger.info(f'Tagging as PR2100: {torrent.name}') + qbt_client.torrents_add_tags(tags="PR2100",torrent_hashes=torrent.hash) + qbt_client.torrents_remove_tags(tags="WDMYCLOUD",torrent_hashes=torrent.hash) + + if "/archiveseeding/" in torrent.save_path and "WDMYCLOUD" not in tags: + logger.info(f'Tagging as WDMYCLOUD: {torrent.name}') + qbt_client.torrents_remove_tags(tags="PR2100",torrent_hashes=torrent.hash) + qbt_client.torrents_add_tags(tags="WDMYCLOUD",torrent_hashes=torrent.hash) + + if torrent.trackers[0]["msg"] != "This torrent is private" and len(torrent.trackers) == 4: + if "rawkbawx" not in torrent.trackers[3]["url"]: + logger.info(f'Tagging as Private: {torrent.name} {torrent.hash}') + qbt_client.torrents_set_share_limits(ratio_limit=1,seeding_time_limit=1440,inactive_seeding_time_limit=-1,torrent_hashes=torrent.hash) + qbt_client.torrents_add_tags(tags="PublicTracker",torrent_hashes=torrent.hash) + +sys.exit(0) diff --git a/consul_backup/functions/torrent_tidy b/consul_backup/functions/torrent_tidy new file mode 100644 index 0000000..dd7aa8e --- /dev/null +++ b/consul_backup/functions/torrent_tidy @@ -0,0 +1,87 @@ +import qbittorrentapi +from pprint import pprint + +# instantiate a Client using the appropriate WebUI configuration +qbt_client = qbittorrentapi.Client( + host='qbittorrent.service.dc1.consul', + port=8080, + username='admin', + password='adminadmin', +) + +# the Client will automatically acquire/maintain a logged-in state +# in line with any request. therefore, this is not strictly necessary; +# however, you may want to test the provided login credentials. +try: + qbt_client.auth_log_in() +except qbittorrentapi.LoginFailed as e: + print(e) + +# display qBittorrent info +print(f'qBittorrent: {qbt_client.app.version}') +print(f'qBittorrent Web API: {qbt_client.app.web_api_version}') +for k,v in qbt_client.app.build_info.items(): print(f'{k}: {v}') + + +# Creating an empty dictionary +trackermap = {} + +# Adding list as value +trackermap["Docspedia"] = ["http://science.docspedia.world:2710/f200baf50d45595c269b7b2d8c475a56/announce"] +trackermap["MMA"] = ["http://a.mma-tracker.org:2710/ed6d78535267e979de36ec2401999d3a/announce"] +trackermap["IPT"] = ["http://127.0.0.1.stackoverflow.tech/cc7288bf91565af486c8e4bad2b63a37/announce" + "http://routing.bgp.technology/cc7288bf91565af486c8e4bad2b63a37/announce", + "http://async.empirehost.me/cc7288bf91565af486c8e4bad2b63a37/announce", + ] +trackermap["Anthelion"] = ["https://tracker.anthelion.me:34001/LmD45Qf7p0MVgYkPm1Uogc8wNqDtvsjF/announce"] +trackermap["Cathode"] = ["https://signal.cathode-ray.tube/yebawgmvnvojwjnfw2a1qr5wg3pqwe4o/announce"] +trackermap["RedSeeding"] = ["https://flacsfor.me/f08a15129e4276f609c8b99abb746195/announce"] +trackermap["cinemaz"] = ["https://tracker.cinemaz.to/50500ba3815e18c837cd753ceb0080e3/announce"] +trackermap["iMetal"] = ["http://metal.iplay.ro/announce.php?passkey=2b4d98fe0f4b7325a15e5654961498ea"] +trackermap["rawk"] = ["http://rawkbawx.rocks:2710/announce","http://rawkbawx.rocks:2710/f7903677d2c030b89b69799f4bd9edbd/announce"] +trackermap["torrentleech"] = ["https://tracker.tleechreload.org/a/3d6cde5fd3bf1a375f3466d40f9ee9bb/announce" + ,"https://tracker.torrentleech.org/a/3d6cde5fd3bf1a375f3466d40f9ee9bb/announce"] +trackermap["MyAnonamouse"] = ["https://t.myanonamouse.net/tracker.php/VPRYYAL-WpTwnr9G9aIN6044YVZ7x8Ao/announce"] +trackermap["Nebulance"] = ["https://tracker.nebulance.io/edcd6847fb3c31fd9958dd7144f0ea15/announce"] +trackermap["Orpheus_seeding"] = ["https://home.opsfet.ch/EAvBpDtmBtbziuydzwzhasgqAxrCqFwo/announce"] +trackermap["tvchaos"] = ["https://tvchaosuk.com/announce/cbaade5ac5612edf854b295153a60e6b"] +trackermap["filelist"] = ["http://reactor.filelist.io/98ece6e971fe7e89a0c86a00c20c1037/announce", + "http://reactor.flro.org/98ece6e971fe7e89a0c86a00c20c1037/announce"] + + + +categories_to_tidy = ["radarr","tv-sonarr","lidar","readarr","readarrAudio","Uncategorized"] + +#ensutre cats exist +torrents_cats = qbt_client.torrents_categories() + +for tracker in trackermap: + if tracker not in torrents_cats: + + savepath = "/downloads/seeding/" + tracker + qbt_client.torrents_create_category(name=tracker, save_path=savepath) + if tracker in torrents_cats: + savepath = "/downloads/seeding/" + tracker + if torrents_cats[tracker]["savePath"] != savepath: + # print(tracker) + # print(torrents_cats[tracker]["savePath"]) + qbt_client.torrents_edit_category(name=tracker, save_path=savepath) + +# retrieve and show all torrents +for torrent in qbt_client.torrents_info(): + # pprint(torrent.category) + for messycat in categories_to_tidy: + if messycat == torrent.category: + # pprint(torrent["name"]) + # pprint(torrent.trackers) + for tracker in torrent.trackers: + for knowntracker in trackermap: + if tracker["url"] in trackermap[knowntracker]: + name = torrent["name"] + pprint(f"{knowntracker} detected: {name}") + #seeding_time in seconds + if int(torrent.seeding_time) > 86400: + pprint(f"Moving {name} to {knowntracker}") + qbt_client.torrents_set_category(category=knowntracker,torrent_hashes=torrent.hash) + else: + pprint(f"seedtime {name} to {knowntracker}") \ No newline at end of file diff --git a/consul_backup/garage/admin_token b/consul_backup/garage/admin_token new file mode 100644 index 0000000..6aa747f --- /dev/null +++ b/consul_backup/garage/admin_token @@ -0,0 +1 @@ +your-admin-token \ No newline at end of file diff --git a/consul_backup/garage/metrics_token b/consul_backup/garage/metrics_token new file mode 100644 index 0000000..a4b8dc0 --- /dev/null +++ b/consul_backup/garage/metrics_token @@ -0,0 +1 @@ +your-metrics-token \ No newline at end of file diff --git a/consul_backup/garage/rpc_secret b/consul_backup/garage/rpc_secret new file mode 100644 index 0000000..974b6da --- /dev/null +++ b/consul_backup/garage/rpc_secret @@ -0,0 +1 @@ +a656794c861ee8f9870050f9c4d271182f4f384d68784ce5ea501185566e1f8c \ No newline at end of file diff --git a/consul_backup/gitea/instance_url b/consul_backup/gitea/instance_url new file mode 100644 index 0000000..5990aa4 --- /dev/null +++ b/consul_backup/gitea/instance_url @@ -0,0 +1 @@ +https://gitea.service.dc1.fbleagh.duckdns.org \ No newline at end of file diff --git a/consul_backup/gitea/runner_token b/consul_backup/gitea/runner_token new file mode 100644 index 0000000..ff1ecd4 --- /dev/null +++ b/consul_backup/gitea/runner_token @@ -0,0 +1 @@ +jwNHqpUReAdJBIFkVqhEpi8OFYIJypCczi2rIMml \ No newline at end of file diff --git a/consul_backup/gitea_app_ini b/consul_backup/gitea_app_ini new file mode 100644 index 0000000..b7c98a4 --- /dev/null +++ b/consul_backup/gitea_app_ini @@ -0,0 +1,75 @@ +APP_NAME = Gitea: Git with a cup of tea +RUN_USER = root +RUN_MODE = prod + +[oauth2] +JWT_SECRET = kkj2kyhM1zFeAzxBeal4ldqTk8GByno3ZGzTXOaTpxM + +[security] +INTERNAL_TOKEN = eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYmYiOjE2MzA2OTA0ODB9.msVfikOWJX7Po0V8x9AJDGYJUuQNy60gCq9N7ZWx_2s +INSTALL_LOCK = true +SECRET_KEY = MjBF7z9rRsvKRdJDVc8hbx5ywH2IrUofn3hC5sGGK6joHsq7syGpqJBxb8OUj02c +PASSWORD_HASH_ALGO = pbkdf2 + +[database] +DB_TYPE = sqlite3 +HOST = 127.0.0.1:3306 +NAME = gitea +USER = gitea +PASSWD = +SCHEMA = +SSL_MODE = disable +CHARSET = utf8 +PATH = /data/gitea.db +LOG_SQL = false + +[repository] +ROOT = /data/gitea-repositories +ENABLE_PUSH_CREATE_USER = true + +[server] +SSH_DOMAIN = gitea.service.dc1.consul +DOMAIN = gitea.service.dc1.consul +HTTP_PORT = 3000 +ROOT_URL = http://gitea.service.dc1.consul/ +DISABLE_SSH = false +START_SSH_SERVER = true +SSH_PORT = 2222 +LFS_START_SERVER = true +LFS_CONTENT_PATH = /data/lfs +LFS_JWT_SECRET = U89qoyVUCrRNCJu4iAtBc0Sh_LPJJA4DiVAkqqEWRy8 +OFFLINE_MODE = false +APP_DATA_PATH = /data + +[mailer] +ENABLED = false + +[service] +REGISTER_EMAIL_CONFIRM = false +ENABLE_NOTIFY_MAIL = false +DISABLE_REGISTRATION = false +ALLOW_ONLY_EXTERNAL_REGISTRATION = false +ENABLE_CAPTCHA = false +REQUIRE_SIGNIN_VIEW = false +DEFAULT_KEEP_EMAIL_PRIVATE = false +DEFAULT_ALLOW_CREATE_ORGANIZATION = false +DEFAULT_ENABLE_TIMETRACKING = false +NO_REPLY_ADDRESS = + +[picture] +DISABLE_GRAVATAR = false +ENABLE_FEDERATED_AVATAR = true + +[openid] +ENABLE_OPENID_SIGNIN = false +ENABLE_OPENID_SIGNUP = false + +[session] +PROVIDER = file + +[log] +MODE = console +LEVEL = info +ROOT_PATH = /data/log +ROUTER = console + diff --git a/consul_backup/grafana_dashboards/NomadMem b/consul_backup/grafana_dashboards/NomadMem new file mode 100644 index 0000000..8de0bc8 --- /dev/null +++ b/consul_backup/grafana_dashboards/NomadMem @@ -0,0 +1,406 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "Nomad Jobs metrics", + "editable": true, + "gnetId": 6281, + "graphTooltip": 0, + "id": 4, + "iteration": 1632354194441, + "links": [], + "panels": [ + { + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 9, + "panels": [], + "repeat": "host", + "title": "$host", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 3, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 15, + "w": 10, + "x": 0, + "y": 1 + }, + "id": 2, + "links": [], + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.5", + "repeat": null, + "targets": [ + { + "exemplar": true, + "expr": "avg(nomad_client_allocs_cpu_total_percent{host=~\"$host\"}) by(exported_job, task)/8", + "format": "time_series", + "interval": "", + "intervalFactor": 5, + "legendFormat": "{{task}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage Percent", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 3, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 15, + "w": 8, + "x": 10, + "y": 1 + }, + "id": 6, + "links": [], + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.5", + "repeat": null, + "targets": [ + { + "expr": "avg(nomad_client_allocs_memory_rss{host=~\"$host\"}) by(exported_job, task)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{task}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "RSS", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 15, + "w": 6, + "x": 18, + "y": 1 + }, + "id": 7, + "links": [], + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.5", + "repeat": null, + "targets": [ + { + "exemplar": true, + "expr": "(\navg by(exported_job) (nomad_client_allocs_memory_cache{host=~\"$host\"}) )", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{task}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Cache", + "type": "stat" + } + ], + "schemaVersion": 30, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": false, + "text": "dc1", + "value": "dc1" + }, + "datasource": "Prometheus", + "definition": "", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "DC", + "multi": false, + "name": "datacenter", + "options": [], + "query": { + "query": "label_values(nomad_client_uptime, datacenter)", + "refId": "Prometheus-datacenter-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": true, + "text": [ + "odroid1.node.dc1.consul", + "odroid2.node.dc1.consul", + "odroid3.node.dc1.consul", + "odroid4.node.dc1.consul" + ], + "value": [ + "odroid1.node.dc1.consul", + "odroid2.node.dc1.consul", + "odroid3.node.dc1.consul", + "odroid4.node.dc1.consul" + ] + }, + "datasource": "Prometheus", + "definition": "", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": "Host", + "multi": true, + "name": "host", + "options": [], + "query": { + "query": "label_values(nomad_client_uptime{datacenter=~\"$datacenter\"}, host)", + "refId": "Prometheus-host-Variable-Query" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Nomad JobsNEW", + "uid": "TvqbbhViz", + "version": 2 +} \ No newline at end of file diff --git a/consul_backup/grafana_dashboards/nomad b/consul_backup/grafana_dashboards/nomad new file mode 100644 index 0000000..a21b59d --- /dev/null +++ b/consul_backup/grafana_dashboards/nomad @@ -0,0 +1,972 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "Nomad built-in telemetry https://github.com/dereknex/nomad-grafana", + "editable": true, + "gnetId": 6278, + "graphTooltip": 1, + "iteration": 1632433053988, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 24, + "panels": [], + "repeat": "node", + "title": "Node Stats - $node", + "type": "row" + }, + { + "cacheTimeout": null, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 1, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "green", + "value": 86400 + } + ] + }, + "unit": "dtdurations" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 0, + "y": 1 + }, + "id": 4, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.5", + "repeat": null, + "targets": [ + { + "exemplar": true, + "expr": "nomad_client_uptime{datacenter=~\"$datacenter\", node=~\"$node\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Uptime($node)", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 2, + "y": 1 + }, + "id": 33, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "value" + }, + "pluginVersion": "8.1.5", + "repeat": null, + "targets": [ + { + "exemplar": true, + "expr": "nomad_client_allocated_cpu{datacenter=~\"$datacenter\", node=~\"$node\"}/(nomad_client_unallocated_cpu{datacenter=~\"$datacenter\", node=~\"$node\"}+nomad_client_allocated_cpu{datacenter=~\"$datacenter\", node=~\"$node\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "CPU Alloc", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 4, + "y": 1 + }, + "id": 74, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.5", + "repeatDirection": "v", + "targets": [ + { + "exemplar": true, + "expr": "nomad_client_allocated_memory{datacenter=~\"$datacenter\", node=~\"$node\"}/(nomad_client_unallocated_memory{datacenter=~\"$datacenter\", node=~\"$node\"}+nomad_client_allocated_memory{datacenter=~\"$datacenter\", node=~\"$node\"})", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "title": "Mem Alloc", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 6, + "y": 1 + }, + "id": 73, + "links": [], + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.5", + "repeat": null, + "targets": [ + { + "exemplar": true, + "expr": "(avg by(instance)(nomad_client_host_cpu_total{datacenter=~\"$datacenter\", node=~\"$node\"}))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Total", + "refId": "A" + }, + { + "expr": "(avg by(instance)(nomad_client_host_cpu_system{datacenter=~\"$datacenter\", node=~\"$node\"}))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "System", + "refId": "B" + }, + { + "expr": "(avg by(instance)(nomad_client_host_cpu_user{datacenter=~\"$datacenter\", node=~\"$node\"}))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "User", + "refId": "C" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU ($node)", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbits" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 11, + "y": 1 + }, + "id": 103, + "links": [], + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.5", + "targets": [ + { + "exemplar": true, + "expr": "(sum by(instance)(nomad_client_allocs_memory_usage{node=~\"$node\"}))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Allocated", + "refId": "A" + }, + { + "exemplar": true, + "expr": "(sum by(instance)(nomad_client_allocs_memory_allocated{node=~\"$node\"}))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "used", + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Mem - $node", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 15, + "y": 1 + }, + "id": 76, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.4", + "targets": [ + { + "exemplar": true, + "expr": "node_thermal_zone_temp{node=~\"$node\"}", + "interval": "", + "intervalFactor": 3, + "legendFormat": "{{zone}}", + "refId": "A" + } + ], + "title": "Temps - $node", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 4, + "gradientMode": "opacity", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "0" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "text", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 19, + "y": 1 + }, + "id": 78, + "options": { + "legend": { + "calcs": [ + "max", + "min", + "mean" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.4", + "targets": [ + { + "exemplar": true, + "expr": "node_cpu_scaling_frequency_hertz{node=~\"$node\"}/1000/1000\n", + "interval": "", + "intervalFactor": 10, + "legendFormat": "{{cpu}}", + "refId": "A" + } + ], + "type": "timeseries" + }, + { + "cacheTimeout": null, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 2, + "y": 4 + }, + "id": 48, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.5", + "repeat": null, + "targets": [ + { + "expr": "nomad_client_host_disk_used_percent{datacenter=~\"$datacenter\", node=~\"$node\", disk=\"/dev/sda1\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Disk Used (Root)", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 2, + "x": 4, + "y": 4 + }, + "id": 131, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.5", + "repeatDirection": "v", + "targets": [ + { + "exemplar": true, + "expr": "nomad_client_host_memory_used{datacenter=~\"$datacenter\", node=~\"$node\"}/nomad_client_host_memory_total{datacenter=~\"$datacenter\", node=~\"$node\"}", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "title": "Mem Used", + "type": "stat" + } + ], + "refresh": "30s", + "schemaVersion": 30, + "style": "dark", + "tags": [ + "nomad", + "host", + "cluster" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": false, + "text": "dc1", + "value": "dc1" + }, + "datasource": "Prometheus", + "definition": "", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "DC", + "multi": false, + "name": "datacenter", + "options": [], + "query": { + "query": "label_values(nomad_client_uptime, datacenter)", + "refId": "Prometheus-datacenter-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": "Prometheus", + "definition": "", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": "Host", + "multi": true, + "name": "host", + "options": [], + "query": { + "query": "label_values(nomad_client_uptime{datacenter=~\"$datacenter\"}, host)", + "refId": "Prometheus-host-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": true, + "text": [ + "odroid1", + "odroid2", + "odroid3", + "odroid4" + ], + "value": [ + "odroid1", + "odroid2", + "odroid3", + "odroid4" + ] + }, + "datasource": "Prometheus", + "definition": "label_values(nomad_client_uptime{datacenter=~\"$datacenter\"}, node)", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": "Node", + "multi": true, + "name": "node", + "options": [], + "query": { + "query": "label_values(nomad_client_uptime{datacenter=~\"$datacenter\"}, node)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "NomadDashboard", + "uid": "CiP3mZVik", + "version": 1 +} \ No newline at end of file diff --git a/consul_backup/grafana_dashboards/thermals b/consul_backup/grafana_dashboards/thermals new file mode 100644 index 0000000..c7f0d75 --- /dev/null +++ b/consul_backup/grafana_dashboards/thermals @@ -0,0 +1,353 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 5, + "iteration": 1632162223791, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 10, + "panels": [], + "repeat": "instance", + "title": "Thermals - $instance", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 4, + "x": 0, + "y": 1 + }, + "id": 4, + "options": { + "displayMode": "gradient", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.1.4", + "targets": [ + { + "exemplar": true, + "expr": "node_thermal_zone_temp{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{zone}}", + "refId": "A" + } + ], + "title": "$instance", + "type": "bargauge" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 4, + "y": 1 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.4", + "targets": [ + { + "exemplar": true, + "expr": "node_thermal_zone_temp{instance=~\"$instance\"}", + "interval": "", + "intervalFactor": 3, + "legendFormat": "{{zone}}", + "refId": "A" + } + ], + "title": "$instance", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "0" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "text", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 12, + "y": 1 + }, + "id": 20, + "options": { + "legend": { + "calcs": [ + "max", + "min", + "mean" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.4", + "targets": [ + { + "exemplar": true, + "expr": "node_cpu_scaling_frequency_hertz{instance=~\"$instance\"}/1000/1000\n", + "interval": "", + "intervalFactor": 10, + "legendFormat": "{{cpu}}", + "refId": "A" + } + ], + "title": "CPU Freq - $instance", + "type": "timeseries" + }, + { + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 2, + "title": "Row title", + "type": "row" + } + ], + "refresh": "30s", + "schemaVersion": 30, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": false, + "text": [ + "192.168.1.221:9100", + "192.168.1.222:9100", + "192.168.1.223:9100", + "192.168.1.224:9100" + ], + "value": [ + "192.168.1.221:9100", + "192.168.1.222:9100", + "192.168.1.223:9100", + "192.168.1.224:9100" + ] + }, + "datasource": "Prometheus", + "definition": "label_values(node_thermal_zone_temp, instance)", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": null, + "multi": true, + "name": "instance", + "options": [], + "query": { + "query": "label_values(node_thermal_zone_temp, instance)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Thermals", + "uid": "bSxgAGRgz", + "version": 11 +} \ No newline at end of file diff --git a/consul_backup/homeassistant/configuration.yml b/consul_backup/homeassistant/configuration.yml new file mode 100644 index 0000000..dd71f1d --- /dev/null +++ b/consul_backup/homeassistant/configuration.yml @@ -0,0 +1,96 @@ +homeassistant: + name: Our_House + latitude: 40.7654 + longitude: -73.8175 + elevation: 26 + unit_system: metric + time_zone: America/New_York + auth_providers: + - type: trusted_networks + trusted_networks: + - 127.0.0.1 + - ::1 + - 192.168.1.0/24 + - 192.168.4.0/22 + allow_bypass_login: true + - type: homeassistant +frontend: +lovelace: + mode: yaml +config: +zeroconf: +http: +sun: +mobile_app: +ecobee: + api_key: hstTGuQedckV2an8XDiLSmWA4GlZ1Hy5 +sensor: + - platform: time_date + display_options: + - 'date_time' +automation: + - alias: bedroomblindsup + trigger: + platform: time + at: "06:50" #military time + action: + entity_id: cover.bedroom + service: cover.open_cover + - alias: bedroomblindsdown + trigger: + platform: time + at: "09:40" #military time + action: + entity_id: cover.bedroom + service: cover.close_cover + - alias: harddrives_on + trigger: + platform: time + at: "09:30" #military time + action: + entity_id: switch.harddrives + service: switch.turn_on + - alias: harddrives_off + trigger: + platform: time + at: "21:45" #military time + action: + entity_id: switch.harddrives + service: switch.turn_off + - alias: LightsAtSunset + trigger: + platform: sun + event: sunset + action: + service: switch.turn_on + entity_id: switch.lampdrawers +tplink: + discovery: true +wemo: + static: + - 192.168.99.200 # StuBed + - 192.168.99.201 # LampDrawers + - 192.168.99.202 # BigLamp + - 192.168.99.203 # TallTree + - 192.168.99.204 # ShortTree + - 192.168.99.205 # TallTree + - 192.168.99.210 # Harddrives +switch: + - platform: template + switches: + living_room_screen_off: + friendly_name: 'living_room_screen_off' + turn_off: + - service: webostv.command + data: + { + "entity_id": "media_player.lg_webos_smart_tv", + "command": "com.webos.service.tvpower/power/turnOffScreen" + } + turn_on: + - service: webostv.command + data: + { + "entity_id": "media_player.lg_webos_smart_tv", + "command": "com.webos.service.tvpower/power/turnOnScreen" + } \ No newline at end of file diff --git a/consul_backup/homeassistant/lovelace.yml b/consul_backup/homeassistant/lovelace.yml new file mode 100644 index 0000000..ab1e8eb --- /dev/null +++ b/consul_backup/homeassistant/lovelace.yml @@ -0,0 +1,76 @@ +title: Our_House +views: + - path: default_view + title: Home + cards: + - type: entities + entities: + - entity: switch.thinlampstudesk + - entity: switch.living_room_screen_off + title: Switch + - path: servers + title: Servers + cards: + - type: entities + entities: + - entity: switch.odroid1 + - entity: switch.odroid2 + - entity: switch.odroid3 + - entity: switch.odroid4 + - entity: switch.odroid5 + - entity: switch.harddrives + title: Servers + - path: all + title: blinds + cards: + - type: entities + entities: + - entity: cover.lounge_farleft + - entity: cover.lounge_left + - entity: cover.lounge_right + - entity: cover.lounge_farright + title: Blinds + - type: button + name: Close Right + show_state: false + tap_action: + action: call-service + service: scene.turn_on + data: + entity_id: scene.close_right + - type: button + name: Close Left + show_state: false + tap_action: + action: call-service + service: scene.turn_on + data: + entity_id: scene.close_left + - type: button + name: All Close + show_state: false + tap_action: + action: call-service + service: scene.turn_on + data: + entity_id: scene.all_close + - type: button + name: All Open + show_state: false + tap_action: + action: call-service + service: scene.turn_on + data: + entity_id: scene.all_open + - type: sensor + entity: sensor.date_time + title: Time + - type: entities + entities: + - automation.bedroomblindsup + - automation.lightsatsunset + - automation.harddrives_on + - automation.harddrives_off + title: Automations + - type: media-control + entity: media_player.lg_webos_smart_tv \ No newline at end of file diff --git a/consul_backup/letsconsul/*.fbleagh.dedyn.io/*.fbleagh.dedyn.io.cer b/consul_backup/letsconsul/*.fbleagh.dedyn.io/*.fbleagh.dedyn.io.cer new file mode 100644 index 0000000..a81abd7 --- /dev/null +++ b/consul_backup/letsconsul/*.fbleagh.dedyn.io/*.fbleagh.dedyn.io.cer @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIECDCCA46gAwIBAgIRAOI5ocMIin5u5Gc2BvGVfkkwCgYIKoZIzj0EAwMwSzEL +MAkGA1UEBhMCQVQxEDAOBgNVBAoTB1plcm9TU0wxKjAoBgNVBAMTIVplcm9TU0wg +RUNDIERvbWFpbiBTZWN1cmUgU2l0ZSBDQTAeFw0yMzEyMzEwMDAwMDBaFw0yNDAz +MzAyMzU5NTlaMB0xGzAZBgNVBAMMEiouZmJsZWFnaC5kZWR5bi5pbzBZMBMGByqG +SM49AgEGCCqGSM49AwEHA0IABLwyO8TC7ENV77I2CIG5YR7xahnyTE4oE13Hpulb +XhPluUn0ovz51D8Fk9t162e+h+8NljRVbEzKg8OH42NbI0ejggJ/MIICezAfBgNV +HSMEGDAWgBQPa+ZLzjlHrvZ+kB558DCRkshfozAdBgNVHQ4EFgQU/q5zRHcinAcH +DqsW0ah+pt29Bz4wDgYDVR0PAQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwHQYDVR0l +BBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMEkGA1UdIARCMEAwNAYLKwYBBAGyMQEC +Ak4wJTAjBggrBgEFBQcCARYXaHR0cHM6Ly9zZWN0aWdvLmNvbS9DUFMwCAYGZ4EM +AQIBMIGIBggrBgEFBQcBAQR8MHowSwYIKwYBBQUHMAKGP2h0dHA6Ly96ZXJvc3Ns +LmNydC5zZWN0aWdvLmNvbS9aZXJvU1NMRUNDRG9tYWluU2VjdXJlU2l0ZUNBLmNy +dDArBggrBgEFBQcwAYYfaHR0cDovL3plcm9zc2wub2NzcC5zZWN0aWdvLmNvbTCC +AQUGCisGAQQB1nkCBAIEgfYEgfMA8QB2AHb/iD8KtvuVUcJhzPWHujS0pM27Kdxo +Qgqf5mdMWjp0AAABjL0tTCoAAAQDAEcwRQIhAMuzR0SfI6ZCCgrq9es6jIxLSOoF +PWltmxXb3UKrfvX2AiA6ANYjox6S3IGgM6oqZvYtr4hxvUOnYRU9GuwbU4n+wgB3 +ADtTd3U+LbmAToswWwb+QDtn2E/D9Me9AA0tcm/h+tQXAAABjL0tS84AAAQDAEgw +RgIhAJ/vVIARfM+nq64kzQ0x/QD2O+lAaucIDh9eBF8EquDiAiEA26IXr/li5tMr +rgak8jgpN2YIycs3qaCZ++RnzSQqSBkwHQYDVR0RBBYwFIISKi5mYmxlYWdoLmRl +ZHluLmlvMAoGCCqGSM49BAMDA2gAMGUCMDpYUA6nlJ0sDnNsJVggNZOFTeSJJo9N +9Tpqde9bXVFN1vrC34Pjozwzk7wA4CfxDAIxAPAw1BtGF8EVRQuMm3A9hkqzv9s8 +12b1OKqrDPqREYmTZF4nafvtkHuuzvzB1voTJQ== +-----END CERTIFICATE----- diff --git a/consul_backup/letsconsul/*.fbleagh.dedyn.io/*.fbleagh.dedyn.io.key b/consul_backup/letsconsul/*.fbleagh.dedyn.io/*.fbleagh.dedyn.io.key new file mode 100644 index 0000000..60e2e6b --- /dev/null +++ b/consul_backup/letsconsul/*.fbleagh.dedyn.io/*.fbleagh.dedyn.io.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIEJs9e+wI54/HX12ueakomnBgNq9O0Vb9ad95Q3XzWUioAoGCCqGSM49 +AwEHoUQDQgAEvDI7xMLsQ1XvsjYIgblhHvFqGfJMTigTXcem6VteE+W5SfSi/PnU +PwWT23XrZ76H7w2WNFVsTMqDw4fjY1sjRw== +-----END EC PRIVATE KEY----- diff --git a/consul_backup/letsconsul/*.fbleagh.dedyn.io/ca.cer b/consul_backup/letsconsul/*.fbleagh.dedyn.io/ca.cer new file mode 100644 index 0000000..a0f1644 --- /dev/null +++ b/consul_backup/letsconsul/*.fbleagh.dedyn.io/ca.cer @@ -0,0 +1,44 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAwygAwIBAgIQI7dt48G7KxpRlh4I6rdk6DAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMjAwMTMw +MDAwMDAwWhcNMzAwMTI5MjM1OTU5WjBLMQswCQYDVQQGEwJBVDEQMA4GA1UEChMH +WmVyb1NTTDEqMCgGA1UEAxMhWmVyb1NTTCBFQ0MgRG9tYWluIFNlY3VyZSBTaXRl +IENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAENkFhFytTJe2qypTk1tpIV+9QuoRk +gte7BRvWHwYk9qUznYzn8QtVaGOCMBBfjWXsqqivl8q1hs4wAYl03uNOXgFu7iZ7 +zFP6I6T3RB0+TR5fZqathfby47yOCZiAJI4go4IBdTCCAXEwHwYDVR0jBBgwFoAU +OuEJhtTPGcKWdnRJdtzgNcZjY5owHQYDVR0OBBYEFA9r5kvOOUeu9n6QHnnwMJGS +yF+jMA4GA1UdDwEB/wQEAwIBhjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdJQQW +MBQGCCsGAQUFBwMBBggrBgEFBQcDAjAiBgNVHSAEGzAZMA0GCysGAQQBsjEBAgJO +MAgGBmeBDAECATBQBgNVHR8ESTBHMEWgQ6BBhj9odHRwOi8vY3JsLnVzZXJ0cnVz +dC5jb20vVVNFUlRydXN0RUNDQ2VydGlmaWNhdGlvbkF1dGhvcml0eS5jcmwwdgYI +KwYBBQUHAQEEajBoMD8GCCsGAQUFBzAChjNodHRwOi8vY3J0LnVzZXJ0cnVzdC5j +b20vVVNFUlRydXN0RUNDQWRkVHJ1c3RDQS5jcnQwJQYIKwYBBQUHMAGGGWh0dHA6 +Ly9vY3NwLnVzZXJ0cnVzdC5jb20wCgYIKoZIzj0EAwMDZwAwZAIwJHBUDwHJQN3I +VNltVMrICMqYQ3TYP/TXqV9t8mG5cAomG2MwqIsxnL937Gewf6WIAjAlrauksO6N +UuDdDXyd330druJcZJx0+H5j5cFOYBaGsKdeGW7sCMaR2PsDFKGllas= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID0zCCArugAwIBAgIQVmcdBOpPmUxvEIFHWdJ1lDANBgkqhkiG9w0BAQwFADB7 +MQswCQYDVQQGEwJHQjEbMBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYD +VQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UE +AwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTE5MDMxMjAwMDAwMFoXDTI4 +MTIzMTIzNTk1OVowgYgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5 +MRQwEgYDVQQHEwtKZXJzZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBO +ZXR3b3JrMS4wLAYDVQQDEyVVU0VSVHJ1c3QgRUNDIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEGqxUWqn5aCPnetUkb1PGWthL +q8bVttHmc3Gu3ZzWDGH926CJA7gFFOxXzu5dP+Ihs8731Ip54KODfi2X0GHE8Znc +JZFjq38wo7Rw4sehM5zzvy5cU7Ffs30yf4o043l5o4HyMIHvMB8GA1UdIwQYMBaA +FKARCiM+lvEH7OKvKe+CpX/QMKS0MB0GA1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1 +xmNjmjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zARBgNVHSAECjAI +MAYGBFUdIAAwQwYDVR0fBDwwOjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5j +b20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNAYIKwYBBQUHAQEEKDAmMCQG +CCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20wDQYJKoZIhvcNAQEM +BQADggEBABns652JLCALBIAdGN5CmXKZFjK9Dpx1WywV4ilAbe7/ctvbq5AfjJXy +ij0IckKJUAfiORVsAYfZFhr1wHUrxeZWEQff2Ji8fJ8ZOd+LygBkc7xGEJuTI42+ +FsMuCIKchjN0djsoTI0DQoWz4rIjQtUfenVqGtF8qmchxDM6OW1TyaLtYiKou+JV +bJlsQ2uRl9EMC5MCHdK8aXdJ5htN978UeAOwproLtOGFfy/cQjutdAFI3tZs4RmY +CV4Ks2dH/hzg1cEo70qLRDEmBDeNiXQ2Lu+lIg+DdEmSx/cQwgwp+7e9un/jX9Wf +8qn0dNW44bOwgeThpWOjzOoEeJBuv/c= +-----END CERTIFICATE----- diff --git a/consul_backup/letsconsul/*.fbleagh.dedyn.io/fullchain.cer b/consul_backup/letsconsul/*.fbleagh.dedyn.io/fullchain.cer new file mode 100644 index 0000000..a1672f5 --- /dev/null +++ b/consul_backup/letsconsul/*.fbleagh.dedyn.io/fullchain.cer @@ -0,0 +1,68 @@ +-----BEGIN CERTIFICATE----- +MIIECDCCA46gAwIBAgIRAOI5ocMIin5u5Gc2BvGVfkkwCgYIKoZIzj0EAwMwSzEL +MAkGA1UEBhMCQVQxEDAOBgNVBAoTB1plcm9TU0wxKjAoBgNVBAMTIVplcm9TU0wg +RUNDIERvbWFpbiBTZWN1cmUgU2l0ZSBDQTAeFw0yMzEyMzEwMDAwMDBaFw0yNDAz +MzAyMzU5NTlaMB0xGzAZBgNVBAMMEiouZmJsZWFnaC5kZWR5bi5pbzBZMBMGByqG +SM49AgEGCCqGSM49AwEHA0IABLwyO8TC7ENV77I2CIG5YR7xahnyTE4oE13Hpulb +XhPluUn0ovz51D8Fk9t162e+h+8NljRVbEzKg8OH42NbI0ejggJ/MIICezAfBgNV +HSMEGDAWgBQPa+ZLzjlHrvZ+kB558DCRkshfozAdBgNVHQ4EFgQU/q5zRHcinAcH +DqsW0ah+pt29Bz4wDgYDVR0PAQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwHQYDVR0l +BBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMEkGA1UdIARCMEAwNAYLKwYBBAGyMQEC +Ak4wJTAjBggrBgEFBQcCARYXaHR0cHM6Ly9zZWN0aWdvLmNvbS9DUFMwCAYGZ4EM +AQIBMIGIBggrBgEFBQcBAQR8MHowSwYIKwYBBQUHMAKGP2h0dHA6Ly96ZXJvc3Ns +LmNydC5zZWN0aWdvLmNvbS9aZXJvU1NMRUNDRG9tYWluU2VjdXJlU2l0ZUNBLmNy +dDArBggrBgEFBQcwAYYfaHR0cDovL3plcm9zc2wub2NzcC5zZWN0aWdvLmNvbTCC +AQUGCisGAQQB1nkCBAIEgfYEgfMA8QB2AHb/iD8KtvuVUcJhzPWHujS0pM27Kdxo +Qgqf5mdMWjp0AAABjL0tTCoAAAQDAEcwRQIhAMuzR0SfI6ZCCgrq9es6jIxLSOoF +PWltmxXb3UKrfvX2AiA6ANYjox6S3IGgM6oqZvYtr4hxvUOnYRU9GuwbU4n+wgB3 +ADtTd3U+LbmAToswWwb+QDtn2E/D9Me9AA0tcm/h+tQXAAABjL0tS84AAAQDAEgw +RgIhAJ/vVIARfM+nq64kzQ0x/QD2O+lAaucIDh9eBF8EquDiAiEA26IXr/li5tMr +rgak8jgpN2YIycs3qaCZ++RnzSQqSBkwHQYDVR0RBBYwFIISKi5mYmxlYWdoLmRl +ZHluLmlvMAoGCCqGSM49BAMDA2gAMGUCMDpYUA6nlJ0sDnNsJVggNZOFTeSJJo9N +9Tpqde9bXVFN1vrC34Pjozwzk7wA4CfxDAIxAPAw1BtGF8EVRQuMm3A9hkqzv9s8 +12b1OKqrDPqREYmTZF4nafvtkHuuzvzB1voTJQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDhTCCAwygAwIBAgIQI7dt48G7KxpRlh4I6rdk6DAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMjAwMTMw +MDAwMDAwWhcNMzAwMTI5MjM1OTU5WjBLMQswCQYDVQQGEwJBVDEQMA4GA1UEChMH +WmVyb1NTTDEqMCgGA1UEAxMhWmVyb1NTTCBFQ0MgRG9tYWluIFNlY3VyZSBTaXRl +IENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAENkFhFytTJe2qypTk1tpIV+9QuoRk +gte7BRvWHwYk9qUznYzn8QtVaGOCMBBfjWXsqqivl8q1hs4wAYl03uNOXgFu7iZ7 +zFP6I6T3RB0+TR5fZqathfby47yOCZiAJI4go4IBdTCCAXEwHwYDVR0jBBgwFoAU +OuEJhtTPGcKWdnRJdtzgNcZjY5owHQYDVR0OBBYEFA9r5kvOOUeu9n6QHnnwMJGS +yF+jMA4GA1UdDwEB/wQEAwIBhjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdJQQW +MBQGCCsGAQUFBwMBBggrBgEFBQcDAjAiBgNVHSAEGzAZMA0GCysGAQQBsjEBAgJO +MAgGBmeBDAECATBQBgNVHR8ESTBHMEWgQ6BBhj9odHRwOi8vY3JsLnVzZXJ0cnVz +dC5jb20vVVNFUlRydXN0RUNDQ2VydGlmaWNhdGlvbkF1dGhvcml0eS5jcmwwdgYI +KwYBBQUHAQEEajBoMD8GCCsGAQUFBzAChjNodHRwOi8vY3J0LnVzZXJ0cnVzdC5j +b20vVVNFUlRydXN0RUNDQWRkVHJ1c3RDQS5jcnQwJQYIKwYBBQUHMAGGGWh0dHA6 +Ly9vY3NwLnVzZXJ0cnVzdC5jb20wCgYIKoZIzj0EAwMDZwAwZAIwJHBUDwHJQN3I +VNltVMrICMqYQ3TYP/TXqV9t8mG5cAomG2MwqIsxnL937Gewf6WIAjAlrauksO6N +UuDdDXyd330druJcZJx0+H5j5cFOYBaGsKdeGW7sCMaR2PsDFKGllas= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID0zCCArugAwIBAgIQVmcdBOpPmUxvEIFHWdJ1lDANBgkqhkiG9w0BAQwFADB7 +MQswCQYDVQQGEwJHQjEbMBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYD +VQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UE +AwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTE5MDMxMjAwMDAwMFoXDTI4 +MTIzMTIzNTk1OVowgYgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5 +MRQwEgYDVQQHEwtKZXJzZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBO +ZXR3b3JrMS4wLAYDVQQDEyVVU0VSVHJ1c3QgRUNDIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEGqxUWqn5aCPnetUkb1PGWthL +q8bVttHmc3Gu3ZzWDGH926CJA7gFFOxXzu5dP+Ihs8731Ip54KODfi2X0GHE8Znc +JZFjq38wo7Rw4sehM5zzvy5cU7Ffs30yf4o043l5o4HyMIHvMB8GA1UdIwQYMBaA +FKARCiM+lvEH7OKvKe+CpX/QMKS0MB0GA1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1 +xmNjmjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zARBgNVHSAECjAI +MAYGBFUdIAAwQwYDVR0fBDwwOjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5j +b20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNAYIKwYBBQUHAQEEKDAmMCQG +CCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20wDQYJKoZIhvcNAQEM +BQADggEBABns652JLCALBIAdGN5CmXKZFjK9Dpx1WywV4ilAbe7/ctvbq5AfjJXy +ij0IckKJUAfiORVsAYfZFhr1wHUrxeZWEQff2Ji8fJ8ZOd+LygBkc7xGEJuTI42+ +FsMuCIKchjN0djsoTI0DQoWz4rIjQtUfenVqGtF8qmchxDM6OW1TyaLtYiKou+JV +bJlsQ2uRl9EMC5MCHdK8aXdJ5htN978UeAOwproLtOGFfy/cQjutdAFI3tZs4RmY +CV4Ks2dH/hzg1cEo70qLRDEmBDeNiXQ2Lu+lIg+DdEmSx/cQwgwp+7e9un/jX9Wf +8qn0dNW44bOwgeThpWOjzOoEeJBuv/c= +-----END CERTIFICATE----- diff --git a/consul_backup/letsconsul/*.fbleagh.duckdns.org/*.fbleagh.duckdns.org.cer b/consul_backup/letsconsul/*.fbleagh.duckdns.org/*.fbleagh.duckdns.org.cer new file mode 100644 index 0000000..f7e09ea --- /dev/null +++ b/consul_backup/letsconsul/*.fbleagh.duckdns.org/*.fbleagh.duckdns.org.cer @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDvDCCA0KgAwIBAgISBSkxFMnJGPCskIgEO0Mk6EKmMAoGCCqGSM49BAMDMDIx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQDEwJF +NzAeFw0yNTExMjAxNTU1MDVaFw0yNjAyMTgxNTU1MDRaMCAxHjAcBgNVBAMMFSou +ZmJsZWFnaC5kdWNrZG5zLm9yZzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABEMp +g8Wlki34WWSjip7jID1y5+Cj0VNJ5ASEWpfdz0UZaPWyysNeAPJskQoFN7kTvEhb +NWRDMb+PnJoI+r4dKZyjggJIMIICRDAOBgNVHQ8BAf8EBAMCB4AwHQYDVR0lBBYw +FAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFHZq +MAGYe8+YzwC4/4L9xSaUEidqMB8GA1UdIwQYMBaAFK5IntyHHUSgb9qi5WB0BHjC +nACAMDIGCCsGAQUFBwEBBCYwJDAiBggrBgEFBQcwAoYWaHR0cDovL2U3LmkubGVu +Y3Iub3JnLzBHBgNVHREEQDA+ghUqLmZibGVhZ2guZHVja2Rucy5vcmeCJWdpdGVh +LnNlcnZpY2UuZGMxLmZibGVhZ2guZHVja2Rucy5vcmcwEwYDVR0gBAwwCjAIBgZn +gQwBAgEwLQYDVR0fBCYwJDAioCCgHoYcaHR0cDovL2U3LmMubGVuY3Iub3JnLzM5 +LmNybDCCAQIGCisGAQQB1nkCBAIEgfMEgfAA7gB2AMs49xWJfIShRF9bwd37yW7y +mlnNRwppBYWwyxTDFFjnAAABmqIvidQAAAQDAEcwRQIgGTOO3akMZ6o3l3It0uAJ +/lSNdxz+DJKS+w8uke4kQvUCIQDKcSlkC7ByBXKYNxiUJTwgwwLrUqV6WHE+hxE1 +tGNvPQB0AA5XlLzzrqk+MxssmQez95Dfm8I9cTIl3SGpJaxhxU4hAAABmqIvib0A +AAQDAEUwQwIgM2SNEpVheNImpowlGvrb5bvzOb86JgQCdWgmOlHhqycCH1Fdsxnj +poDzuPBguZmyjXH+7mXtiWfgICrGuNkAiWgwCgYIKoZIzj0EAwMDaAAwZQIwFEW8 +I6lBUKaEP7SDj0oljwGvC1W9H+/vti+glXHH1rhiW6UVtatYIW2S44/wi6XyAjEA +kLO3IOeXjFOhiix0cxcizUBAPCmSYPF6B7T1i1mg3VQwFQ6pmImhYVdRl3s47sCB +-----END CERTIFICATE----- diff --git a/consul_backup/letsconsul/*.fbleagh.duckdns.org/*.fbleagh.duckdns.org.key b/consul_backup/letsconsul/*.fbleagh.duckdns.org/*.fbleagh.duckdns.org.key new file mode 100644 index 0000000..d0484cc --- /dev/null +++ b/consul_backup/letsconsul/*.fbleagh.duckdns.org/*.fbleagh.duckdns.org.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIEctj4JDhGfmH9dAKmqCgYGNake1XwGr9GpUzEbO62+loAoGCCqGSM49 +AwEHoUQDQgAEQymDxaWSLfhZZKOKnuMgPXLn4KPRU0nkBIRal93PRRlo9bLKw14A +8myRCgU3uRO8SFs1ZEMxv4+cmgj6vh0pnA== +-----END EC PRIVATE KEY----- diff --git a/consul_backup/letsconsul/*.fbleagh.duckdns.org/ca.cer b/consul_backup/letsconsul/*.fbleagh.duckdns.org/ca.cer new file mode 100644 index 0000000..049e753 --- /dev/null +++ b/consul_backup/letsconsul/*.fbleagh.duckdns.org/ca.cer @@ -0,0 +1,27 @@ + +-----BEGIN CERTIFICATE----- +MIIEVzCCAj+gAwIBAgIRAKp18eYrjwoiCWbTi7/UuqEwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjQwMzEzMDAwMDAw +WhcNMjcwMzEyMjM1OTU5WjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg +RW5jcnlwdDELMAkGA1UEAxMCRTcwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARB6AST +CFh/vjcwDMCgQer+VtqEkz7JANurZxLP+U9TCeioL6sp5Z8VRvRbYk4P1INBmbef +QHJFHCxcSjKmwtvGBWpl/9ra8HW0QDsUaJW2qOJqceJ0ZVFT3hbUHifBM/2jgfgw +gfUwDgYDVR0PAQH/BAQDAgGGMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcD +ATASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBSuSJ7chx1EoG/aouVgdAR4 +wpwAgDAfBgNVHSMEGDAWgBR5tFnme7bl5AFzgAiIyBpY9umbbjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAKGFmh0dHA6Ly94MS5pLmxlbmNyLm9yZy8wEwYDVR0g +BAwwCjAIBgZngQwBAgEwJwYDVR0fBCAwHjAcoBqgGIYWaHR0cDovL3gxLmMubGVu +Y3Iub3JnLzANBgkqhkiG9w0BAQsFAAOCAgEAjx66fDdLk5ywFn3CzA1w1qfylHUD +aEf0QZpXcJseddJGSfbUUOvbNR9N/QQ16K1lXl4VFyhmGXDT5Kdfcr0RvIIVrNxF +h4lqHtRRCP6RBRstqbZ2zURgqakn/Xip0iaQL0IdfHBZr396FgknniRYFckKORPG +yM3QKnd66gtMst8I5nkRQlAg/Jb+Gc3egIvuGKWboE1G89NTsN9LTDD3PLj0dUMr +OIuqVjLB8pEC6yk9enrlrqjXQgkLEYhXzq7dLafv5Vkig6Gl0nuuqjqfp0Q1bi1o +yVNAlXe6aUXw92CcghC9bNsKEO1+M52YY5+ofIXlS/SEQbvVYYBLZ5yeiglV6t3S +M6H+vTG0aP9YHzLn/KVOHzGQfXDP7qM5tkf+7diZe7o2fw6O7IvN6fsQXEQQj8TJ +UXJxv2/uJhcuy/tSDgXwHM8Uk34WNbRT7zGTGkQRX0gsbjAea/jYAoWv0ZvQRwpq +Pe79D/i7Cep8qWnA+7AE/3B3S/3dEEYmc0lpe1366A/6GEgk3ktr9PEoQrLChs6I +tu3wnNLB2euC8IKGLQFpGtOO/2/hiAKjyajaBP25w1jF0Wl8Bbqne3uZ2q1GyPFJ +YRmT7/OXpmOH/FVLtwS+8ng1cAmpCujPwteJZNcDG0sF2n/sc0+SQf49fdyUK0ty ++VUwFj9tmWxyR/M= +-----END CERTIFICATE----- diff --git a/consul_backup/letsconsul/*.fbleagh.duckdns.org/fullchain.cer b/consul_backup/letsconsul/*.fbleagh.duckdns.org/fullchain.cer new file mode 100644 index 0000000..3c61280 --- /dev/null +++ b/consul_backup/letsconsul/*.fbleagh.duckdns.org/fullchain.cer @@ -0,0 +1,49 @@ +-----BEGIN CERTIFICATE----- +MIIDvDCCA0KgAwIBAgISBSkxFMnJGPCskIgEO0Mk6EKmMAoGCCqGSM49BAMDMDIx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQDEwJF +NzAeFw0yNTExMjAxNTU1MDVaFw0yNjAyMTgxNTU1MDRaMCAxHjAcBgNVBAMMFSou +ZmJsZWFnaC5kdWNrZG5zLm9yZzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABEMp +g8Wlki34WWSjip7jID1y5+Cj0VNJ5ASEWpfdz0UZaPWyysNeAPJskQoFN7kTvEhb +NWRDMb+PnJoI+r4dKZyjggJIMIICRDAOBgNVHQ8BAf8EBAMCB4AwHQYDVR0lBBYw +FAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFHZq +MAGYe8+YzwC4/4L9xSaUEidqMB8GA1UdIwQYMBaAFK5IntyHHUSgb9qi5WB0BHjC +nACAMDIGCCsGAQUFBwEBBCYwJDAiBggrBgEFBQcwAoYWaHR0cDovL2U3LmkubGVu +Y3Iub3JnLzBHBgNVHREEQDA+ghUqLmZibGVhZ2guZHVja2Rucy5vcmeCJWdpdGVh +LnNlcnZpY2UuZGMxLmZibGVhZ2guZHVja2Rucy5vcmcwEwYDVR0gBAwwCjAIBgZn +gQwBAgEwLQYDVR0fBCYwJDAioCCgHoYcaHR0cDovL2U3LmMubGVuY3Iub3JnLzM5 +LmNybDCCAQIGCisGAQQB1nkCBAIEgfMEgfAA7gB2AMs49xWJfIShRF9bwd37yW7y +mlnNRwppBYWwyxTDFFjnAAABmqIvidQAAAQDAEcwRQIgGTOO3akMZ6o3l3It0uAJ +/lSNdxz+DJKS+w8uke4kQvUCIQDKcSlkC7ByBXKYNxiUJTwgwwLrUqV6WHE+hxE1 +tGNvPQB0AA5XlLzzrqk+MxssmQez95Dfm8I9cTIl3SGpJaxhxU4hAAABmqIvib0A +AAQDAEUwQwIgM2SNEpVheNImpowlGvrb5bvzOb86JgQCdWgmOlHhqycCH1Fdsxnj +poDzuPBguZmyjXH+7mXtiWfgICrGuNkAiWgwCgYIKoZIzj0EAwMDaAAwZQIwFEW8 +I6lBUKaEP7SDj0oljwGvC1W9H+/vti+glXHH1rhiW6UVtatYIW2S44/wi6XyAjEA +kLO3IOeXjFOhiix0cxcizUBAPCmSYPF6B7T1i1mg3VQwFQ6pmImhYVdRl3s47sCB +-----END CERTIFICATE----- + +-----BEGIN CERTIFICATE----- +MIIEVzCCAj+gAwIBAgIRAKp18eYrjwoiCWbTi7/UuqEwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjQwMzEzMDAwMDAw +WhcNMjcwMzEyMjM1OTU5WjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg +RW5jcnlwdDELMAkGA1UEAxMCRTcwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARB6AST +CFh/vjcwDMCgQer+VtqEkz7JANurZxLP+U9TCeioL6sp5Z8VRvRbYk4P1INBmbef +QHJFHCxcSjKmwtvGBWpl/9ra8HW0QDsUaJW2qOJqceJ0ZVFT3hbUHifBM/2jgfgw +gfUwDgYDVR0PAQH/BAQDAgGGMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcD +ATASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBSuSJ7chx1EoG/aouVgdAR4 +wpwAgDAfBgNVHSMEGDAWgBR5tFnme7bl5AFzgAiIyBpY9umbbjAyBggrBgEFBQcB +AQQmMCQwIgYIKwYBBQUHMAKGFmh0dHA6Ly94MS5pLmxlbmNyLm9yZy8wEwYDVR0g +BAwwCjAIBgZngQwBAgEwJwYDVR0fBCAwHjAcoBqgGIYWaHR0cDovL3gxLmMubGVu +Y3Iub3JnLzANBgkqhkiG9w0BAQsFAAOCAgEAjx66fDdLk5ywFn3CzA1w1qfylHUD +aEf0QZpXcJseddJGSfbUUOvbNR9N/QQ16K1lXl4VFyhmGXDT5Kdfcr0RvIIVrNxF +h4lqHtRRCP6RBRstqbZ2zURgqakn/Xip0iaQL0IdfHBZr396FgknniRYFckKORPG +yM3QKnd66gtMst8I5nkRQlAg/Jb+Gc3egIvuGKWboE1G89NTsN9LTDD3PLj0dUMr +OIuqVjLB8pEC6yk9enrlrqjXQgkLEYhXzq7dLafv5Vkig6Gl0nuuqjqfp0Q1bi1o +yVNAlXe6aUXw92CcghC9bNsKEO1+M52YY5+ofIXlS/SEQbvVYYBLZ5yeiglV6t3S +M6H+vTG0aP9YHzLn/KVOHzGQfXDP7qM5tkf+7diZe7o2fw6O7IvN6fsQXEQQj8TJ +UXJxv2/uJhcuy/tSDgXwHM8Uk34WNbRT7zGTGkQRX0gsbjAea/jYAoWv0ZvQRwpq +Pe79D/i7Cep8qWnA+7AE/3B3S/3dEEYmc0lpe1366A/6GEgk3ktr9PEoQrLChs6I +tu3wnNLB2euC8IKGLQFpGtOO/2/hiAKjyajaBP25w1jF0Wl8Bbqne3uZ2q1GyPFJ +YRmT7/OXpmOH/FVLtwS+8ng1cAmpCujPwteJZNcDG0sF2n/sc0+SQf49fdyUK0ty ++VUwFj9tmWxyR/M= +-----END CERTIFICATE----- diff --git a/consul_backup/letsconsul/fbleagh.dedyn.io/ca.cer b/consul_backup/letsconsul/fbleagh.dedyn.io/ca.cer new file mode 100644 index 0000000..a0f1644 --- /dev/null +++ b/consul_backup/letsconsul/fbleagh.dedyn.io/ca.cer @@ -0,0 +1,44 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAwygAwIBAgIQI7dt48G7KxpRlh4I6rdk6DAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMjAwMTMw +MDAwMDAwWhcNMzAwMTI5MjM1OTU5WjBLMQswCQYDVQQGEwJBVDEQMA4GA1UEChMH +WmVyb1NTTDEqMCgGA1UEAxMhWmVyb1NTTCBFQ0MgRG9tYWluIFNlY3VyZSBTaXRl +IENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAENkFhFytTJe2qypTk1tpIV+9QuoRk +gte7BRvWHwYk9qUznYzn8QtVaGOCMBBfjWXsqqivl8q1hs4wAYl03uNOXgFu7iZ7 +zFP6I6T3RB0+TR5fZqathfby47yOCZiAJI4go4IBdTCCAXEwHwYDVR0jBBgwFoAU +OuEJhtTPGcKWdnRJdtzgNcZjY5owHQYDVR0OBBYEFA9r5kvOOUeu9n6QHnnwMJGS +yF+jMA4GA1UdDwEB/wQEAwIBhjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdJQQW +MBQGCCsGAQUFBwMBBggrBgEFBQcDAjAiBgNVHSAEGzAZMA0GCysGAQQBsjEBAgJO +MAgGBmeBDAECATBQBgNVHR8ESTBHMEWgQ6BBhj9odHRwOi8vY3JsLnVzZXJ0cnVz +dC5jb20vVVNFUlRydXN0RUNDQ2VydGlmaWNhdGlvbkF1dGhvcml0eS5jcmwwdgYI +KwYBBQUHAQEEajBoMD8GCCsGAQUFBzAChjNodHRwOi8vY3J0LnVzZXJ0cnVzdC5j +b20vVVNFUlRydXN0RUNDQWRkVHJ1c3RDQS5jcnQwJQYIKwYBBQUHMAGGGWh0dHA6 +Ly9vY3NwLnVzZXJ0cnVzdC5jb20wCgYIKoZIzj0EAwMDZwAwZAIwJHBUDwHJQN3I +VNltVMrICMqYQ3TYP/TXqV9t8mG5cAomG2MwqIsxnL937Gewf6WIAjAlrauksO6N +UuDdDXyd330druJcZJx0+H5j5cFOYBaGsKdeGW7sCMaR2PsDFKGllas= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID0zCCArugAwIBAgIQVmcdBOpPmUxvEIFHWdJ1lDANBgkqhkiG9w0BAQwFADB7 +MQswCQYDVQQGEwJHQjEbMBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYD +VQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UE +AwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTE5MDMxMjAwMDAwMFoXDTI4 +MTIzMTIzNTk1OVowgYgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5 +MRQwEgYDVQQHEwtKZXJzZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBO +ZXR3b3JrMS4wLAYDVQQDEyVVU0VSVHJ1c3QgRUNDIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEGqxUWqn5aCPnetUkb1PGWthL +q8bVttHmc3Gu3ZzWDGH926CJA7gFFOxXzu5dP+Ihs8731Ip54KODfi2X0GHE8Znc +JZFjq38wo7Rw4sehM5zzvy5cU7Ffs30yf4o043l5o4HyMIHvMB8GA1UdIwQYMBaA +FKARCiM+lvEH7OKvKe+CpX/QMKS0MB0GA1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1 +xmNjmjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zARBgNVHSAECjAI +MAYGBFUdIAAwQwYDVR0fBDwwOjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5j +b20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNAYIKwYBBQUHAQEEKDAmMCQG +CCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20wDQYJKoZIhvcNAQEM +BQADggEBABns652JLCALBIAdGN5CmXKZFjK9Dpx1WywV4ilAbe7/ctvbq5AfjJXy +ij0IckKJUAfiORVsAYfZFhr1wHUrxeZWEQff2Ji8fJ8ZOd+LygBkc7xGEJuTI42+ +FsMuCIKchjN0djsoTI0DQoWz4rIjQtUfenVqGtF8qmchxDM6OW1TyaLtYiKou+JV +bJlsQ2uRl9EMC5MCHdK8aXdJ5htN978UeAOwproLtOGFfy/cQjutdAFI3tZs4RmY +CV4Ks2dH/hzg1cEo70qLRDEmBDeNiXQ2Lu+lIg+DdEmSx/cQwgwp+7e9un/jX9Wf +8qn0dNW44bOwgeThpWOjzOoEeJBuv/c= +-----END CERTIFICATE----- diff --git a/consul_backup/letsconsul/fbleagh.dedyn.io/fbleagh.dedyn.io.cer b/consul_backup/letsconsul/fbleagh.dedyn.io/fbleagh.dedyn.io.cer new file mode 100644 index 0000000..91497fe --- /dev/null +++ b/consul_backup/letsconsul/fbleagh.dedyn.io/fbleagh.dedyn.io.cer @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEAzCCA4mgAwIBAgIQWtkE2yzjtUSkH+96px28OTAKBggqhkjOPQQDAzBLMQsw +CQYDVQQGEwJBVDEQMA4GA1UEChMHWmVyb1NTTDEqMCgGA1UEAxMhWmVyb1NTTCBF +Q0MgRG9tYWluIFNlY3VyZSBTaXRlIENBMB4XDTIzMDMwODAwMDAwMFoXDTIzMDYw +NjIzNTk1OVowGzEZMBcGA1UEAxMQZmJsZWFnaC5kZWR5bi5pbzBZMBMGByqGSM49 +AgEGCCqGSM49AwEHA0IABGqkxVFQH7yCqOZP0MQ8WjZLYX319DcG1eArZ7C9zJ5I +nELzasTTPiFMh2SB8AcNSRPYtX+6CyoBjIXPgOWYvRCjggJ9MIICeTAfBgNVHSME +GDAWgBQPa+ZLzjlHrvZ+kB558DCRkshfozAdBgNVHQ4EFgQU01SDptDSomC9SRKU ++bY0XJck8zkwDgYDVR0PAQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwHQYDVR0lBBYw +FAYIKwYBBQUHAwEGCCsGAQUFBwMCMEkGA1UdIARCMEAwNAYLKwYBBAGyMQECAk4w +JTAjBggrBgEFBQcCARYXaHR0cHM6Ly9zZWN0aWdvLmNvbS9DUFMwCAYGZ4EMAQIB +MIGIBggrBgEFBQcBAQR8MHowSwYIKwYBBQUHMAKGP2h0dHA6Ly96ZXJvc3NsLmNy +dC5zZWN0aWdvLmNvbS9aZXJvU1NMRUNDRG9tYWluU2VjdXJlU2l0ZUNBLmNydDAr +BggrBgEFBQcwAYYfaHR0cDovL3plcm9zc2wub2NzcC5zZWN0aWdvLmNvbTCCAQUG +CisGAQQB1nkCBAIEgfYEgfMA8QB2AK33vvp8/xDIi509nB4+GGq0Zyldz7EMJMqF +hjTr3IKKAAABhsKcwf4AAAQDAEcwRQIgA6y7EukoNVTev0mEXlbOtIhJROjFIuLl +X0qvXlq9WLICIQDpopPhM16oT6IkpEwGkjnbmho18JAl7/MAbFpDNi89KwB3AHoy +jFTYty22IOo44FIe6YQWcDIThU070ivBOlejUutSAAABhsKcwmIAAAQDAEgwRgIh +AMQvXj0sJ+80JoPLb1Y5J9dDMnB1ahIl1gourbCqbC2oAiEA1PKQu47rlUAdiDDm +j3nEQK01u2HjQVjxVa7VQ8WNLT8wGwYDVR0RBBQwEoIQZmJsZWFnaC5kZWR5bi5p +bzAKBggqhkjOPQQDAwNoADBlAjEA6qTxxO89gKy5v7uYCJix2an2y2GiPhmg2jbC ++1JMSGAmA6R9NgF3vQLPkYlGexShAjBAgiOuRzx2RTtv/JI84xXyTPBkTunQ1ikH +9fsxIA8PkZP/Fgj0ctifN5Lxy7gmZgI= +-----END CERTIFICATE----- diff --git a/consul_backup/letsconsul/fbleagh.dedyn.io/fbleagh.dedyn.io.key b/consul_backup/letsconsul/fbleagh.dedyn.io/fbleagh.dedyn.io.key new file mode 100644 index 0000000..13c42db --- /dev/null +++ b/consul_backup/letsconsul/fbleagh.dedyn.io/fbleagh.dedyn.io.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIOODcSkLPIoEqIwrFc7Uw6Mvy4FQlIMqVp01CYDV6HRqoAoGCCqGSM49 +AwEHoUQDQgAEaqTFUVAfvIKo5k/QxDxaNkthffX0NwbV4CtnsL3MnkicQvNqxNM+ +IUyHZIHwBw1JE9i1f7oLKgGMhc+A5Zi9EA== +-----END EC PRIVATE KEY----- diff --git a/consul_backup/letsconsul/fbleagh.dedyn.io/fullchain.cer b/consul_backup/letsconsul/fbleagh.dedyn.io/fullchain.cer new file mode 100644 index 0000000..1185ab6 --- /dev/null +++ b/consul_backup/letsconsul/fbleagh.dedyn.io/fullchain.cer @@ -0,0 +1,68 @@ +-----BEGIN CERTIFICATE----- +MIIEAzCCA4mgAwIBAgIQWtkE2yzjtUSkH+96px28OTAKBggqhkjOPQQDAzBLMQsw +CQYDVQQGEwJBVDEQMA4GA1UEChMHWmVyb1NTTDEqMCgGA1UEAxMhWmVyb1NTTCBF +Q0MgRG9tYWluIFNlY3VyZSBTaXRlIENBMB4XDTIzMDMwODAwMDAwMFoXDTIzMDYw +NjIzNTk1OVowGzEZMBcGA1UEAxMQZmJsZWFnaC5kZWR5bi5pbzBZMBMGByqGSM49 +AgEGCCqGSM49AwEHA0IABGqkxVFQH7yCqOZP0MQ8WjZLYX319DcG1eArZ7C9zJ5I +nELzasTTPiFMh2SB8AcNSRPYtX+6CyoBjIXPgOWYvRCjggJ9MIICeTAfBgNVHSME +GDAWgBQPa+ZLzjlHrvZ+kB558DCRkshfozAdBgNVHQ4EFgQU01SDptDSomC9SRKU ++bY0XJck8zkwDgYDVR0PAQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwHQYDVR0lBBYw +FAYIKwYBBQUHAwEGCCsGAQUFBwMCMEkGA1UdIARCMEAwNAYLKwYBBAGyMQECAk4w +JTAjBggrBgEFBQcCARYXaHR0cHM6Ly9zZWN0aWdvLmNvbS9DUFMwCAYGZ4EMAQIB +MIGIBggrBgEFBQcBAQR8MHowSwYIKwYBBQUHMAKGP2h0dHA6Ly96ZXJvc3NsLmNy +dC5zZWN0aWdvLmNvbS9aZXJvU1NMRUNDRG9tYWluU2VjdXJlU2l0ZUNBLmNydDAr +BggrBgEFBQcwAYYfaHR0cDovL3plcm9zc2wub2NzcC5zZWN0aWdvLmNvbTCCAQUG +CisGAQQB1nkCBAIEgfYEgfMA8QB2AK33vvp8/xDIi509nB4+GGq0Zyldz7EMJMqF +hjTr3IKKAAABhsKcwf4AAAQDAEcwRQIgA6y7EukoNVTev0mEXlbOtIhJROjFIuLl +X0qvXlq9WLICIQDpopPhM16oT6IkpEwGkjnbmho18JAl7/MAbFpDNi89KwB3AHoy +jFTYty22IOo44FIe6YQWcDIThU070ivBOlejUutSAAABhsKcwmIAAAQDAEgwRgIh +AMQvXj0sJ+80JoPLb1Y5J9dDMnB1ahIl1gourbCqbC2oAiEA1PKQu47rlUAdiDDm +j3nEQK01u2HjQVjxVa7VQ8WNLT8wGwYDVR0RBBQwEoIQZmJsZWFnaC5kZWR5bi5p +bzAKBggqhkjOPQQDAwNoADBlAjEA6qTxxO89gKy5v7uYCJix2an2y2GiPhmg2jbC ++1JMSGAmA6R9NgF3vQLPkYlGexShAjBAgiOuRzx2RTtv/JI84xXyTPBkTunQ1ikH +9fsxIA8PkZP/Fgj0ctifN5Lxy7gmZgI= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDhTCCAwygAwIBAgIQI7dt48G7KxpRlh4I6rdk6DAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMjAwMTMw +MDAwMDAwWhcNMzAwMTI5MjM1OTU5WjBLMQswCQYDVQQGEwJBVDEQMA4GA1UEChMH +WmVyb1NTTDEqMCgGA1UEAxMhWmVyb1NTTCBFQ0MgRG9tYWluIFNlY3VyZSBTaXRl +IENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAENkFhFytTJe2qypTk1tpIV+9QuoRk +gte7BRvWHwYk9qUznYzn8QtVaGOCMBBfjWXsqqivl8q1hs4wAYl03uNOXgFu7iZ7 +zFP6I6T3RB0+TR5fZqathfby47yOCZiAJI4go4IBdTCCAXEwHwYDVR0jBBgwFoAU +OuEJhtTPGcKWdnRJdtzgNcZjY5owHQYDVR0OBBYEFA9r5kvOOUeu9n6QHnnwMJGS +yF+jMA4GA1UdDwEB/wQEAwIBhjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdJQQW +MBQGCCsGAQUFBwMBBggrBgEFBQcDAjAiBgNVHSAEGzAZMA0GCysGAQQBsjEBAgJO +MAgGBmeBDAECATBQBgNVHR8ESTBHMEWgQ6BBhj9odHRwOi8vY3JsLnVzZXJ0cnVz +dC5jb20vVVNFUlRydXN0RUNDQ2VydGlmaWNhdGlvbkF1dGhvcml0eS5jcmwwdgYI +KwYBBQUHAQEEajBoMD8GCCsGAQUFBzAChjNodHRwOi8vY3J0LnVzZXJ0cnVzdC5j +b20vVVNFUlRydXN0RUNDQWRkVHJ1c3RDQS5jcnQwJQYIKwYBBQUHMAGGGWh0dHA6 +Ly9vY3NwLnVzZXJ0cnVzdC5jb20wCgYIKoZIzj0EAwMDZwAwZAIwJHBUDwHJQN3I +VNltVMrICMqYQ3TYP/TXqV9t8mG5cAomG2MwqIsxnL937Gewf6WIAjAlrauksO6N +UuDdDXyd330druJcZJx0+H5j5cFOYBaGsKdeGW7sCMaR2PsDFKGllas= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID0zCCArugAwIBAgIQVmcdBOpPmUxvEIFHWdJ1lDANBgkqhkiG9w0BAQwFADB7 +MQswCQYDVQQGEwJHQjEbMBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYD +VQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UE +AwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTE5MDMxMjAwMDAwMFoXDTI4 +MTIzMTIzNTk1OVowgYgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5 +MRQwEgYDVQQHEwtKZXJzZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBO +ZXR3b3JrMS4wLAYDVQQDEyVVU0VSVHJ1c3QgRUNDIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEGqxUWqn5aCPnetUkb1PGWthL +q8bVttHmc3Gu3ZzWDGH926CJA7gFFOxXzu5dP+Ihs8731Ip54KODfi2X0GHE8Znc +JZFjq38wo7Rw4sehM5zzvy5cU7Ffs30yf4o043l5o4HyMIHvMB8GA1UdIwQYMBaA +FKARCiM+lvEH7OKvKe+CpX/QMKS0MB0GA1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1 +xmNjmjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zARBgNVHSAECjAI +MAYGBFUdIAAwQwYDVR0fBDwwOjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5j +b20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNAYIKwYBBQUHAQEEKDAmMCQG +CCsGAQUFBzABhhhodHRwOi8vb2NzcC5jb21vZG9jYS5jb20wDQYJKoZIhvcNAQEM +BQADggEBABns652JLCALBIAdGN5CmXKZFjK9Dpx1WywV4ilAbe7/ctvbq5AfjJXy +ij0IckKJUAfiORVsAYfZFhr1wHUrxeZWEQff2Ji8fJ8ZOd+LygBkc7xGEJuTI42+ +FsMuCIKchjN0djsoTI0DQoWz4rIjQtUfenVqGtF8qmchxDM6OW1TyaLtYiKou+JV +bJlsQ2uRl9EMC5MCHdK8aXdJ5htN978UeAOwproLtOGFfy/cQjutdAFI3tZs4RmY +CV4Ks2dH/hzg1cEo70qLRDEmBDeNiXQ2Lu+lIg+DdEmSx/cQwgwp+7e9un/jX9Wf +8qn0dNW44bOwgeThpWOjzOoEeJBuv/c= +-----END CERTIFICATE----- diff --git a/consul_backup/letsconsul/fbleagh.duckdns.org/ca.cer b/consul_backup/letsconsul/fbleagh.duckdns.org/ca.cer new file mode 100644 index 0000000..6626b9c --- /dev/null +++ b/consul_backup/letsconsul/fbleagh.duckdns.org/ca.cer @@ -0,0 +1,31 @@ + +-----BEGIN CERTIFICATE----- +MIIFFjCCAv6gAwIBAgIRAJErCErPDBinU/bWLiWnX1owDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjAwOTA0MDAwMDAw +WhcNMjUwOTE1MTYwMDAwWjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg +RW5jcnlwdDELMAkGA1UEAxMCUjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC7AhUozPaglNMPEuyNVZLD+ILxmaZ6QoinXSaqtSu5xUyxr45r+XXIo9cP +R5QUVTVXjJ6oojkZ9YI8QqlObvU7wy7bjcCwXPNZOOftz2nwWgsbvsCUJCWH+jdx +sxPnHKzhm+/b5DtFUkWWqcFTzjTIUu61ru2P3mBw4qVUq7ZtDpelQDRrK9O8Zutm +NHz6a4uPVymZ+DAXXbpyb/uBxa3Shlg9F8fnCbvxK/eG3MHacV3URuPMrSXBiLxg +Z3Vms/EY96Jc5lP/Ooi2R6X/ExjqmAl3P51T+c8B5fWmcBcUr2Ok/5mzk53cU6cG +/kiFHaFpriV1uxPMUgP17VGhi9sVAgMBAAGjggEIMIIBBDAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMBIGA1UdEwEB/wQIMAYB +Af8CAQAwHQYDVR0OBBYEFBQusxe3WFbLrlAJQOYfr52LFMLGMB8GA1UdIwQYMBaA +FHm0WeZ7tuXkAXOACIjIGlj26ZtuMDIGCCsGAQUFBwEBBCYwJDAiBggrBgEFBQcw +AoYWaHR0cDovL3gxLmkubGVuY3Iub3JnLzAnBgNVHR8EIDAeMBygGqAYhhZodHRw +Oi8veDEuYy5sZW5jci5vcmcvMCIGA1UdIAQbMBkwCAYGZ4EMAQIBMA0GCysGAQQB +gt8TAQEBMA0GCSqGSIb3DQEBCwUAA4ICAQCFyk5HPqP3hUSFvNVneLKYY611TR6W +PTNlclQtgaDqw+34IL9fzLdwALduO/ZelN7kIJ+m74uyA+eitRY8kc607TkC53wl +ikfmZW4/RvTZ8M6UK+5UzhK8jCdLuMGYL6KvzXGRSgi3yLgjewQtCPkIVz6D2QQz +CkcheAmCJ8MqyJu5zlzyZMjAvnnAT45tRAxekrsu94sQ4egdRCnbWSDtY7kh+BIm +lJNXoB1lBMEKIq4QDUOXoRgffuDghje1WrG9ML+Hbisq/yFOGwXD9RiX8F6sw6W4 +avAuvDszue5L3sz85K+EC4Y/wFVDNvZo4TYXao6Z0f+lQKc0t8DQYzk1OXVu8rp2 +yJMC6alLbBfODALZvYH7n7do1AZls4I9d1P4jnkDrQoxB3UqQ9hVl3LEKQ73xF1O +yK5GhDDX8oVfGKF5u+decIsH4YaTw7mP3GFxJSqv3+0lUFJoi5Lc5da149p90Ids +hCExroL1+7mryIkXPeFM5TgO9r0rvZaBFOvV2z0gp35Z0+L4WPlbuEjN/lxPFin+ +HlUjr8gRsI3qfJOQFy/9rKIJR0Y/8Omwt/8oTWgy1mdeHmmjk7j1nYsvC9JSQ6Zv +MldlTTKB3zhThV1+XWYp6rjd5JW1zbVWEkLNxE7GJThEUG3szgBVGP7pSWTUTsqX +nLRbwHOoq7hHwg== +-----END CERTIFICATE----- diff --git a/consul_backup/letsconsul/fbleagh.duckdns.org/fbleagh.duckdns.org.cer b/consul_backup/letsconsul/fbleagh.duckdns.org/fbleagh.duckdns.org.cer new file mode 100644 index 0000000..3e21a26 --- /dev/null +++ b/consul_backup/letsconsul/fbleagh.duckdns.org/fbleagh.duckdns.org.cer @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEKTCCAxGgAwIBAgISBFI4R7wyurqkuEa8OoOaWmktMA0GCSqGSIb3DQEBCwUA +MDIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQD +EwJSMzAeFw0yNDAyMjcxNDM5MzNaFw0yNDA1MjcxNDM5MzJaMB4xHDAaBgNVBAMT +E2ZibGVhZ2guZHVja2Rucy5vcmcwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQg +GhGMUACI6cinDLdFVX4YMRJsJMqHrroKmjVLpcb7k4GZpp5IZ/5ik/O5RnLyhhDU +qJbszmOfNhxooCV24Xfio4ICFjCCAhIwDgYDVR0PAQH/BAQDAgeAMB0GA1UdJQQW +MBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBS/ +JmOInfWkqR+OBCYBP4qPBNLo8DAfBgNVHSMEGDAWgBQULrMXt1hWy65QCUDmH6+d +ixTCxjBVBggrBgEFBQcBAQRJMEcwIQYIKwYBBQUHMAGGFWh0dHA6Ly9yMy5vLmxl +bmNyLm9yZzAiBggrBgEFBQcwAoYWaHR0cDovL3IzLmkubGVuY3Iub3JnLzAeBgNV +HREEFzAVghNmYmxlYWdoLmR1Y2tkbnMub3JnMBMGA1UdIAQMMAowCAYGZ4EMAQIB +MIIBBQYKKwYBBAHWeQIEAgSB9gSB8wDxAHcAO1N3dT4tuYBOizBbBv5AO2fYT8P0 +x70ADS1yb+H61BcAAAGN6ziiQgAABAMASDBGAiEAyN/Q4W+6a68JUJqPJDziB5SX +OhgQcwdFL3GKuJ+8BPACIQDM3b6jFUMO/39+Yy7JDNUEfv7Fy93lZqxamlzm1BR6 +qgB2AKLiv9Ye3i8vB6DWTm03p9xlQ7DGtS6i2reK+Jpt9RfYAAABjes4okEAAAQD +AEcwRQIhALWq457YbcZQvY7tDxRAza1x9SkWyPYpo4BZZqKuqnq+AiBXnC2+O/yh +czGL/VJ5/JGva6KlgmtPGZZY9Rj4bJfw/TANBgkqhkiG9w0BAQsFAAOCAQEAsA8U +URxOF+bE4dZtSy3rPGE7txh9hJ61Qs/Qb+GjX5zVm7FVqtl+JvB7KQO4C81ckHak +kIWA6u59oCRHZ+rQcHadbghQYSN3LX3cFJGnf5oOfeln0CpKWdR7aSdsd4KjNHA3 +hzMTZsaAb5iq6kvs045mpEKCOAaahPgA5w3FUVuFHTrFon0qw++U3Wy+QdBi4eR3 +auVLLzMWMSQrrVbm8MyRt0i0A/1sqM+uhe4D6rnaHGp84AW5yKwRAqzs8UydHyDr +YKlYfXzqD9gOJl1qrpIg9u90oEVIRQzqEWJByHrDf/dc++ub41stftLNAL6GWDbT +80uKExuPI0424OIWFw== +-----END CERTIFICATE----- diff --git a/consul_backup/letsconsul/fbleagh.duckdns.org/fbleagh.duckdns.org.key b/consul_backup/letsconsul/fbleagh.duckdns.org/fbleagh.duckdns.org.key new file mode 100644 index 0000000..9ba6fa8 --- /dev/null +++ b/consul_backup/letsconsul/fbleagh.duckdns.org/fbleagh.duckdns.org.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIDTRN7o5o3K8DpFpas9m3+lxK7DgDTRDnfwtt7ReuL4loAoGCCqGSM49 +AwEHoUQDQgAEIBoRjFAAiOnIpwy3RVV+GDESbCTKh666Cpo1S6XG+5OBmaaeSGf+ +YpPzuUZy8oYQ1KiW7M5jnzYcaKAlduF34g== +-----END EC PRIVATE KEY----- diff --git a/consul_backup/letsconsul/fbleagh.duckdns.org/fullchain.cer b/consul_backup/letsconsul/fbleagh.duckdns.org/fullchain.cer new file mode 100644 index 0000000..f61c51d --- /dev/null +++ b/consul_backup/letsconsul/fbleagh.duckdns.org/fullchain.cer @@ -0,0 +1,56 @@ +-----BEGIN CERTIFICATE----- +MIIEKTCCAxGgAwIBAgISBFI4R7wyurqkuEa8OoOaWmktMA0GCSqGSIb3DQEBCwUA +MDIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQD +EwJSMzAeFw0yNDAyMjcxNDM5MzNaFw0yNDA1MjcxNDM5MzJaMB4xHDAaBgNVBAMT +E2ZibGVhZ2guZHVja2Rucy5vcmcwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQg +GhGMUACI6cinDLdFVX4YMRJsJMqHrroKmjVLpcb7k4GZpp5IZ/5ik/O5RnLyhhDU +qJbszmOfNhxooCV24Xfio4ICFjCCAhIwDgYDVR0PAQH/BAQDAgeAMB0GA1UdJQQW +MBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBS/ +JmOInfWkqR+OBCYBP4qPBNLo8DAfBgNVHSMEGDAWgBQULrMXt1hWy65QCUDmH6+d +ixTCxjBVBggrBgEFBQcBAQRJMEcwIQYIKwYBBQUHMAGGFWh0dHA6Ly9yMy5vLmxl +bmNyLm9yZzAiBggrBgEFBQcwAoYWaHR0cDovL3IzLmkubGVuY3Iub3JnLzAeBgNV +HREEFzAVghNmYmxlYWdoLmR1Y2tkbnMub3JnMBMGA1UdIAQMMAowCAYGZ4EMAQIB +MIIBBQYKKwYBBAHWeQIEAgSB9gSB8wDxAHcAO1N3dT4tuYBOizBbBv5AO2fYT8P0 +x70ADS1yb+H61BcAAAGN6ziiQgAABAMASDBGAiEAyN/Q4W+6a68JUJqPJDziB5SX +OhgQcwdFL3GKuJ+8BPACIQDM3b6jFUMO/39+Yy7JDNUEfv7Fy93lZqxamlzm1BR6 +qgB2AKLiv9Ye3i8vB6DWTm03p9xlQ7DGtS6i2reK+Jpt9RfYAAABjes4okEAAAQD +AEcwRQIhALWq457YbcZQvY7tDxRAza1x9SkWyPYpo4BZZqKuqnq+AiBXnC2+O/yh +czGL/VJ5/JGva6KlgmtPGZZY9Rj4bJfw/TANBgkqhkiG9w0BAQsFAAOCAQEAsA8U +URxOF+bE4dZtSy3rPGE7txh9hJ61Qs/Qb+GjX5zVm7FVqtl+JvB7KQO4C81ckHak +kIWA6u59oCRHZ+rQcHadbghQYSN3LX3cFJGnf5oOfeln0CpKWdR7aSdsd4KjNHA3 +hzMTZsaAb5iq6kvs045mpEKCOAaahPgA5w3FUVuFHTrFon0qw++U3Wy+QdBi4eR3 +auVLLzMWMSQrrVbm8MyRt0i0A/1sqM+uhe4D6rnaHGp84AW5yKwRAqzs8UydHyDr +YKlYfXzqD9gOJl1qrpIg9u90oEVIRQzqEWJByHrDf/dc++ub41stftLNAL6GWDbT +80uKExuPI0424OIWFw== +-----END CERTIFICATE----- + +-----BEGIN CERTIFICATE----- +MIIFFjCCAv6gAwIBAgIRAJErCErPDBinU/bWLiWnX1owDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjAwOTA0MDAwMDAw +WhcNMjUwOTE1MTYwMDAwWjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg +RW5jcnlwdDELMAkGA1UEAxMCUjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC7AhUozPaglNMPEuyNVZLD+ILxmaZ6QoinXSaqtSu5xUyxr45r+XXIo9cP +R5QUVTVXjJ6oojkZ9YI8QqlObvU7wy7bjcCwXPNZOOftz2nwWgsbvsCUJCWH+jdx +sxPnHKzhm+/b5DtFUkWWqcFTzjTIUu61ru2P3mBw4qVUq7ZtDpelQDRrK9O8Zutm +NHz6a4uPVymZ+DAXXbpyb/uBxa3Shlg9F8fnCbvxK/eG3MHacV3URuPMrSXBiLxg +Z3Vms/EY96Jc5lP/Ooi2R6X/ExjqmAl3P51T+c8B5fWmcBcUr2Ok/5mzk53cU6cG +/kiFHaFpriV1uxPMUgP17VGhi9sVAgMBAAGjggEIMIIBBDAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMBIGA1UdEwEB/wQIMAYB +Af8CAQAwHQYDVR0OBBYEFBQusxe3WFbLrlAJQOYfr52LFMLGMB8GA1UdIwQYMBaA +FHm0WeZ7tuXkAXOACIjIGlj26ZtuMDIGCCsGAQUFBwEBBCYwJDAiBggrBgEFBQcw +AoYWaHR0cDovL3gxLmkubGVuY3Iub3JnLzAnBgNVHR8EIDAeMBygGqAYhhZodHRw +Oi8veDEuYy5sZW5jci5vcmcvMCIGA1UdIAQbMBkwCAYGZ4EMAQIBMA0GCysGAQQB +gt8TAQEBMA0GCSqGSIb3DQEBCwUAA4ICAQCFyk5HPqP3hUSFvNVneLKYY611TR6W +PTNlclQtgaDqw+34IL9fzLdwALduO/ZelN7kIJ+m74uyA+eitRY8kc607TkC53wl +ikfmZW4/RvTZ8M6UK+5UzhK8jCdLuMGYL6KvzXGRSgi3yLgjewQtCPkIVz6D2QQz +CkcheAmCJ8MqyJu5zlzyZMjAvnnAT45tRAxekrsu94sQ4egdRCnbWSDtY7kh+BIm +lJNXoB1lBMEKIq4QDUOXoRgffuDghje1WrG9ML+Hbisq/yFOGwXD9RiX8F6sw6W4 +avAuvDszue5L3sz85K+EC4Y/wFVDNvZo4TYXao6Z0f+lQKc0t8DQYzk1OXVu8rp2 +yJMC6alLbBfODALZvYH7n7do1AZls4I9d1P4jnkDrQoxB3UqQ9hVl3LEKQ73xF1O +yK5GhDDX8oVfGKF5u+decIsH4YaTw7mP3GFxJSqv3+0lUFJoi5Lc5da149p90Ids +hCExroL1+7mryIkXPeFM5TgO9r0rvZaBFOvV2z0gp35Z0+L4WPlbuEjN/lxPFin+ +HlUjr8gRsI3qfJOQFy/9rKIJR0Y/8Omwt/8oTWgy1mdeHmmjk7j1nYsvC9JSQ6Zv +MldlTTKB3zhThV1+XWYp6rjd5JW1zbVWEkLNxE7GJThEUG3szgBVGP7pSWTUTsqX +nLRbwHOoq7hHwg== +-----END CERTIFICATE----- diff --git a/consul_backup/metadata.json b/consul_backup/metadata.json index 074934d..38fa4c1 100644 --- a/consul_backup/metadata.json +++ b/consul_backup/metadata.json @@ -1,8 +1,7 @@ { - "backup_timestamp": "2025-11-22T17:50:56.601241", - "total_keys": 0, - "successful_backups": 0, - "failed_backups": 0, - "consul_address": "http://192.168.4.250:4646", - "status": "empty_kv_store" + "backup_timestamp": "2025-11-22T09:57:22.836671", + "total_keys": 79, + "successful_backups": 74, + "failed_backups": 5, + "consul_address": "http://consul.service.dc1.consul:8500" } \ No newline at end of file diff --git a/consul_backup/nginx b/consul_backup/nginx new file mode 100644 index 0000000..be0465d --- /dev/null +++ b/consul_backup/nginx @@ -0,0 +1 @@ +ee2c95d2-3341-ab8b-0067-b663febe56cb \ No newline at end of file diff --git a/consul_backup/nginx_index b/consul_backup/nginx_index new file mode 100644 index 0000000..7a6d998 --- /dev/null +++ b/consul_backup/nginx_index @@ -0,0 +1,42 @@ + + +Dashboard + + + + + + +
+
+ + Dashboard + +
+
+

Tools

+ + + {{range services}} + {{range $i, $s :=service .Name}} + {{ if eq $i 0 }} + {{if .Tags | contains "tools"}} +
+

{{.Name}}

+ {{$iconname := .Name}} + {{range $tag, $services := service .Name | byTag }}{{if $tag | regexMatch "logo=*"}}{{$iconname = index ($tag | split "=") 1}}{{end}}{{end}} + HTML tutorial +
+ {{end}}{{end}}{{end}}{{end}} + + +
+

+ Node Environment Information:
+ node_id: {{ env "node.unique.id" }}
+ datacenter: {{ env "NOMAD_DC" }} +

+
+ + + \ No newline at end of file diff --git a/consul_backup/nomad/postgres/config b/consul_backup/nomad/postgres/config new file mode 100644 index 0000000..7bca08c --- /dev/null +++ b/consul_backup/nomad/postgres/config @@ -0,0 +1 @@ +{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":1048576,"postgresql":{"use_pg_rewind":true,"use_slots":true,"parameters":{"timezone":"UTC","max_connections":100,"shared_preload_libraries":"pg_stat_statements","shared_buffers":"64MB","work_mem":"16MB","effective_cache_size":"512MB","tcp_keepalives_idle":300,"timescaledb.telemetry_level":false}}} \ No newline at end of file diff --git a/consul_backup/nomad/postgres/failover b/consul_backup/nomad/postgres/failover new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/consul_backup/nomad/postgres/failover @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/consul_backup/nomad/postgres/history b/consul_backup/nomad/postgres/history new file mode 100644 index 0000000..fa4c90b --- /dev/null +++ b/consul_backup/nomad/postgres/history @@ -0,0 +1 @@ +[[1,100663456,"no recovery target specified","2023-11-22T14:54:03.917160+00:00","pg-odroid8"],[2,117440672,"no recovery target specified","2023-11-22T14:54:24.270462+00:00","pg-odroid8"],[3,134217888,"no recovery target specified","2023-11-22T14:54:55.000427+00:00","pg-odroid8"],[4,53314174768,"no recovery target specified","2024-01-19T18:25:00.418034+00:00","pg-odroid5"],[5,145734093624,"no recovery target specified","2024-04-10T15:36:16.021244+00:00","pg-odroid6"],[6,145734220176,"no recovery target specified","2024-04-10T15:37:20.554160+00:00","pg-odroid8"],[7,145868225648,"no recovery target specified","2024-04-10T18:37:28.673762+00:00","pg-odroid5"],[8,145911155312,"no recovery target specified","2024-04-10T19:07:56.388325+00:00","pg-odroid8"],[9,145963488664,"no recovery target specified","2024-04-10T19:48:27.471045+00:00","pg-odroid8"],[10,197199837088,"no recovery target specified","2024-05-28T09:44:54.248336+00:00","pg-odroid5"],[11,285017495384,"no recovery target specified","2024-08-20T15:56:09.323339+00:00","pg-odroid5"],[12,285128296656,"no recovery target specified","2024-08-20T18:38:02.849375+00:00","pg-odroid8"],[13,285145563296,"no recovery target specified","2024-08-20T18:39:40.239689+00:00","pg-odroid8"],[14,285162340512,"no recovery target specified","2024-08-20T18:41:47.511937+00:00","pg-odroid8"],[15,285179117728,"no recovery target specified"],[16,285195894944,"no recovery target specified","2024-08-20T18:46:15.849316+00:00","pg-odroid8"],[17,285212672160,"no recovery target specified","2024-08-20T18:47:28.161998+00:00","pg-odroid8"],[18,285246226592,"no recovery target specified","2024-08-20T18:59:05.195106+00:00","pg-odroid8"],[19,285263003808,"no recovery target specified","2024-08-20T19:04:11.441998+00:00","pg-odroid8"],[20,285263886904,"no recovery target specified","2024-08-20T19:09:34.595897+00:00","pg-odroid6"],[21,285279781024,"no recovery target specified","2024-08-20T19:11:34.043102+00:00","pg-odroid6"],[22,285313335456,"no recovery target specified","2024-08-20T19:25:24.800781+00:00","pg-odroid6"],[23,285330112672,"no recovery target specified","2024-08-20T19:29:04.145698+00:00","pg-odroid6"],[24,285346889888,"no recovery target specified","2024-08-20T19:30:20.102163+00:00","pg-odroid6"],[25,285363667104,"no recovery target specified","2024-08-20T19:33:57.034897+00:00","pg-odroid6"],[26,285397221536,"no recovery target specified","2024-08-20T19:55:48.258175+00:00","pg-odroid6"],[27,285430775968,"no recovery target specified","2024-08-20T20:04:32.489640+00:00","pg-odroid6"],[28,285447553184,"no recovery target specified","2024-08-20T20:06:58.382332+00:00","pg-odroid6"],[29,285464330400,"no recovery target specified","2024-08-20T20:08:50.312989+00:00","pg-odroid6"],[30,285481107616,"no recovery target specified","2024-08-20T20:10:38.771433+00:00","pg-odroid6"],[31,285497884832,"no recovery target specified","2024-08-20T20:12:07.516702+00:00","pg-odroid6"],[32,285514662048,"no recovery target specified","2024-08-20T20:19:29.254260+00:00","pg-odroid6"],[33,299830577256,"no recovery target specified","2024-09-04T16:28:41.656898+00:00","pg-odroid6"],[34,299944591936,"no recovery target specified","2024-09-04T19:23:33.941710+00:00","pg-odroid6"],[35,299958135512,"no recovery target specified","2024-09-04T19:37:25.226900+00:00","pg-odroid5"],[36,299958153080,"no recovery target specified","2024-09-04T19:38:17.426424+00:00","pg-odroid8"],[37,299959378976,"no recovery target specified","2024-09-04T19:39:48.779276+00:00","pg-odroid5"],[38,299963735048,"no recovery target specified","2024-09-04T19:42:50.612400+00:00","pg-odroid8"],[39,299966741144,"no recovery target specified","2024-09-04T19:45:22.125366+00:00","pg-odroid7"],[40,299966890480,"no recovery target specified","2024-09-04T19:45:56.502777+00:00","pg-odroid7"],[41,299976622240,"no recovery target specified","2024-09-04T19:46:21.003800+00:00","pg-odroid7"],[42,299993399456,"no recovery target specified"],[44,300005897288,"no recovery target specified","2024-09-04T19:50:34.839147+00:00","pg-odroid7"],[45,300007823328,"no recovery target specified","2024-09-04T19:53:58.032952+00:00","pg-odroid7"],[46,300012403136,"no recovery target specified","2024-09-04T19:58:27.998599+00:00","pg-odroid7"],[47,300012552728,"no recovery target specified","2024-09-04T19:59:30.765842+00:00","pg-odroid7"],[48,300012667200,"no recovery target specified","2024-09-04T20:00:27.792807+00:00","pg-odroid7"],[49,300012855960,"no recovery target specified","2024-09-04T20:02:27.671086+00:00","pg-odroid7"],[50,300013651480,"no recovery target specified","2024-09-04T20:03:26.142169+00:00","pg-odroid7"],[51,300013651880,"no recovery target specified","2024-09-04T20:04:08.728422+00:00","pg-odroid7"],[52,300014037512,"no recovery target specified","2024-09-04T20:07:07.985958+00:00","pg-odroid7"],[53,300014184904,"no recovery target specified","2024-09-04T20:07:40.323687+00:00","pg-odroid7"],[54,300014185304,"no recovery target specified","2024-09-04T20:08:12.833426+00:00","pg-odroid7"],[55,300014185704,"no recovery target specified","2024-09-04T20:08:45.107155+00:00","pg-odroid7"],[56,300017646896,"no recovery target specified","2024-09-04T20:11:09.682921+00:00","pg-odroid7"],[57,300077285536,"no recovery target specified","2024-09-04T21:04:11.429796+00:00","pg-odroid7"],[58,300078125664,"no recovery target specified","2024-09-04T21:06:40.565980+00:00","pg-odroid7"],[59,300078126064,"no recovery target specified"],[60,300078126464,"no recovery target specified","2024-09-04T21:08:09.658868+00:00","pg-odroid7"],[61,300088657584,"no recovery target specified","2024-09-04T21:12:12.032158+00:00","pg-odroid7"],[62,300090413936,"no recovery target specified","2024-09-04T21:18:54.270204+00:00","pg-odroid7"],[63,300099182896,"no recovery target specified","2024-09-04T21:32:16.493126+00:00","pg-odroid7"],[64,300099183296,"no recovery target specified","2024-09-04T21:32:49.663018+00:00","pg-odroid7"],[65,300103813624,"no recovery target specified","2024-09-04T21:39:11.424612+00:00","pg-odroid7"],[66,300104125440,"no recovery target specified","2024-09-04T21:40:15.384206+00:00","pg-odroid7"],[67,300104636976,"no recovery target specified","2024-09-04T21:41:13.731481+00:00","pg-odroid7"],[68,300105605720,"no recovery target specified","2024-09-04T21:44:45.955364+00:00","pg-odroid7"],[69,300106642064,"no recovery target specified","2024-09-04T21:46:48.194194+00:00","pg-odroid7"],[70,300110839968,"no recovery target specified","2024-09-04T21:48:50.341010+00:00","pg-odroid7"],[71,300111876496,"no recovery target specified","2024-09-04T21:53:12.527622+00:00","pg-odroid7"],[72,300127617184,"no recovery target specified","2024-09-04T21:57:07.140681+00:00","pg-odroid7"],[73,300129726880,"no recovery target specified","2024-09-04T22:01:13.906407+00:00","pg-odroid7"],[74,300130294648,"no recovery target specified"],[75,300130295048,"no recovery target specified","2024-09-04T22:03:10.832909+00:00","pg-odroid7"],[76,300130295448,"no recovery target specified","2024-09-04T22:04:12.756352+00:00","pg-odroid7"],[77,300130552752,"no recovery target specified","2024-09-04T22:06:44.800804+00:00","pg-odroid7"],[78,300135553992,"no recovery target specified","2024-09-04T22:11:43.201389+00:00","pg-odroid7"],[79,300137003624,"no recovery target specified","2024-09-04T22:13:18.586689+00:00","pg-odroid7"],[80,300137004024,"no recovery target specified","2024-09-04T22:14:06.073328+00:00","pg-odroid7"],[81,300137665896,"no recovery target specified","2024-09-04T22:16:50.850485+00:00","pg-odroid7"],[82,300137945088,"no recovery target specified"],[83,300137945488,"no recovery target specified","2024-09-04T22:18:18.123334+00:00","pg-odroid7"],[84,300138388664,"no recovery target specified","2024-09-04T22:19:44.608140+00:00","pg-odroid7"],[85,300139292448,"no recovery target specified","2024-09-04T22:21:57.339515+00:00","pg-odroid7"],[86,300139517744,"no recovery target specified","2024-09-04T22:24:28.259900+00:00","pg-odroid7"],[87,300140419848,"no recovery target specified","2024-09-04T22:26:56.824155+00:00","pg-odroid7"],[88,300144394400,"no recovery target specified","2024-09-04T22:30:24.313409+00:00","pg-odroid7"],[89,300145056240,"no recovery target specified","2024-09-04T22:34:34.859528+00:00","pg-odroid7"],[90,300145892360,"no recovery target specified","2024-09-04T22:40:37.240125+00:00","pg-odroid7"],[91,300146069608,"no recovery target specified"],[92,300146070008,"no recovery target specified"],[93,300146070408,"no recovery target specified","2024-09-04T22:43:03.555225+00:00","pg-odroid7"],[94,300146448256,"no recovery target specified","2024-09-04T22:47:17.263862+00:00","pg-odroid7"],[95,300146568600,"no recovery target specified","2024-09-04T22:49:10.265587+00:00","pg-odroid7"],[96,300146833528,"no recovery target specified","2024-09-04T22:53:05.373624+00:00","pg-odroid7"],[97,300147010216,"no recovery target specified","2024-09-04T22:54:33.250156+00:00","pg-odroid7"],[98,300147040464,"no recovery target specified","2024-09-04T22:55:45.581897+00:00","pg-odroid7"],[99,300197458368,"no recovery target specified","2024-09-04T23:08:17.544300+00:00","pg-odroid7"],[100,300197469168,"no recovery target specified","2024-09-04T23:08:49.834772+00:00","pg-odroid7"],[101,300198111000,"no recovery target specified","2024-09-04T23:10:18.585379+00:00","pg-odroid7"],[102,300198581400,"no recovery target specified","2024-09-04T23:11:45.843640+00:00","pg-odroid7"],[103,300198748992,"no recovery target specified","2024-09-04T23:12:17.901888+00:00","pg-odroid7"],[104,300205502016,"no recovery target specified","2024-09-04T23:14:42.063714+00:00","pg-odroid7"],[105,319975063712,"no recovery target specified","2024-09-24T13:29:44.309425+00:00","pg-odroid7"],[106,319976602400,"no recovery target specified","2024-09-24T13:41:53.858739+00:00","pg-odroid7"],[107,323016834888,"no recovery target specified","2024-09-27T16:42:03.259135+00:00","pg-odroid7"],[108,323112403336,"no recovery target specified","2024-09-27T22:32:57.344571+00:00","pg-odroid5"],[109,338583102360,"no recovery target specified","2024-10-13T23:31:07.453823+00:00","pg-odroid7"],[110,341558601040,"no recovery target specified","2024-10-16T23:10:29.770621+00:00","pg-odroid7"],[111,341651065792,"no recovery target specified","2024-10-17T00:43:14.722842+00:00","pg-odroid7"],[112,341848734096,"no recovery target specified","2024-10-17T05:00:23.856057+00:00","pg-odroid8"],[113,343175244952,"no recovery target specified","2024-10-18T12:05:18.238880+00:00","pg-odroid8"],[114,343177953440,"no recovery target specified","2024-10-18T12:08:04.961575+00:00","pg-odroid8"],[115,344318138400,"no recovery target specified","2024-10-19T16:31:45.720104+00:00","pg-odroid8"],[116,344318804128,"no recovery target specified","2024-10-19T16:32:01.193112+00:00","pg-odroid8"],[117,344331534920,"no recovery target specified","2024-10-19T16:48:43.238372+00:00","pg-odroid8"],[118,344378282296,"no recovery target specified","2024-10-19T17:13:12.439214+00:00","pg-odroid8"],[119,346224769080,"no recovery target specified","2024-10-21T14:06:46.047151+00:00","pg-odroid8"],[120,350291493024,"no recovery target specified","2024-10-25T10:03:25.936310+00:00","pg-odroid8"],[121,350392156320,"no recovery target specified","2024-10-25T12:48:24.869028+00:00","pg-odroid8"],[122,350425710752,"no recovery target specified","2024-10-25T13:32:29.096586+00:00","pg-odroid8"],[123,350442487968,"no recovery target specified","2024-10-25T13:40:00.366408+00:00","pg-odroid8"],[124,350508670392,"no recovery target specified","2024-10-25T15:29:41.138250+00:00","pg-odroid5"],[125,350512485408,"no recovery target specified","2024-10-25T15:38:24.297736+00:00","pg-odroid5"],[126,350513980848,"no recovery target specified","2024-10-25T15:44:26.607351+00:00","pg-odroid5"],[127,350514151488,"no recovery target specified","2024-10-25T15:47:11.428070+00:00","pg-odroid5"],[128,350526374048,"no recovery target specified","2024-10-25T15:48:45.245157+00:00","pg-odroid5"],[129,350543151264,"no recovery target specified","2024-10-25T15:50:55.788352+00:00","pg-odroid5"],[130,350559928480,"no recovery target specified","2024-10-25T15:51:47.935257+00:00","pg-odroid5"],[131,350593482912,"no recovery target specified","2024-10-25T16:23:23.749017+00:00","pg-odroid5"],[132,357765940160,"no recovery target specified","2024-11-01T14:56:35.876599+00:00","pg-odroid7"],[133,357807685792,"no recovery target specified","2024-11-01T15:35:34.494282+00:00","pg-odroid7"],[134,388639428168,"no recovery target specified","2024-12-05T17:31:27.559391+00:00","pg-odroid6"],[135,388644208800,"no recovery target specified","2024-12-05T17:36:51.009660+00:00","pg-odroid6"],[136,389568717472,"no recovery target specified","2024-12-06T16:40:34.234905+00:00","pg-odroid7"],[137,393297532776,"no recovery target specified","2024-12-10T06:22:36.318994+00:00","pg-odroid8"],[138,401940563536,"no recovery target specified","2024-12-20T00:25:02.812144+00:00","pg-odroid8"],[139,401948541088,"no recovery target specified","2024-12-20T00:35:38.691440+00:00","pg-odroid8"],[140,402145508968,"no recovery target specified","2024-12-20T06:09:53.781244+00:00","pg-odroid8"],[141,402201138432,"no recovery target specified","2024-12-20T07:31:05.846267+00:00","pg-odroid8"],[142,411613672296,"no recovery target specified","2024-12-31T04:33:04.612763+00:00","pg-odroid6"],[143,411627091096,"no recovery target specified","2024-12-31T04:34:30.901567+00:00","pg-odroid6"],[144,411629688384,"no recovery target specified"],[145,411629689520,"no recovery target specified"],[146,411629689920,"no recovery target specified"],[147,411629772744,"no recovery target specified"],[148,411629811992,"no recovery target specified","2024-12-31T04:41:59.222631+00:00","pg-odroid6"],[149,411630497240,"no recovery target specified","2024-12-31T04:45:22.138010+00:00","pg-odroid6"],[150,411630497640,"no recovery target specified","2024-12-31T04:48:10.595466+00:00","pg-odroid6"],[151,412971172000,"no recovery target specified","2025-01-01T17:12:46.492771+00:00","pg-odroid6"],[152,413055345648,"no recovery target specified"],[153,413055346048,"no recovery target specified","2025-01-01T19:27:40.440563+00:00","pg-odroid6"],[154,413058205680,"no recovery target specified","2025-01-01T19:39:35.580703+00:00","pg-odroid8"],[155,414699225248,"no recovery target specified","2025-01-03T16:23:36.983037+00:00","pg-odroid8"],[156,414723076112,"no recovery target specified","2025-01-03T16:44:20.924165+00:00","pg-odroid8"],[157,416328881640,"no recovery target specified","2025-01-05T13:03:54.500006+00:00","pg-odroid8"],[158,416328905848,"no recovery target specified","2025-01-05T13:04:58.252111+00:00","pg-odroid8"],[159,417244613128,"no recovery target specified","2025-01-06T13:05:12.832072+00:00","pg-odroid6"],[160,418498498872,"no recovery target specified","2025-01-07T22:24:22.832518+00:00","pg-odroid7"],[161,418870865344,"no recovery target specified","2025-01-08T06:54:27.113847+00:00","pg-odroid6"],[162,418889475728,"no recovery target specified","2025-01-08T07:33:49.748319+00:00","pg-odroid6"],[163,418928961784,"no recovery target specified","2025-01-08T08:20:14.491624+00:00","pg-odroid7"],[164,418943860896,"no recovery target specified","2025-01-08T08:20:45.689337+00:00","pg-odroid7"],[165,418949541728,"no recovery target specified","2025-01-08T08:29:33.054190+00:00","pg-odroid7"],[166,418950966928,"no recovery target specified","2025-01-08T08:40:55.603296+00:00","pg-odroid7"],[167,419081757952,"no recovery target specified","2025-01-08T13:03:39.688514+00:00","pg-odroid6"],[168,420637650976,"no recovery target specified","2025-01-10T13:09:53.011440+00:00","pg-odroid7"],[169,420642935984,"no recovery target specified","2025-01-10T13:16:01.750098+00:00","pg-odroid6"],[170,421719213656,"no recovery target specified","2025-01-12T00:00:36.698739+00:00","pg-odroid7"],[171,422138973160,"no recovery target specified","2025-01-12T13:04:59.773871+00:00","pg-odroid6"],[172,422272075000,"no recovery target specified","2025-01-12T17:32:42.204294+00:00","pg-odroid7"],[173,422418656568,"no recovery target specified","2025-01-12T23:10:57.371061+00:00","pg-odroid6"],[174,424392232936,"no recovery target specified","2025-01-15T13:06:17.545058+00:00","pg-odroid7"],[175,424725961656,"no recovery target specified","2025-01-16T00:03:24.375798+00:00","pg-odroid7"],[176,424732000416,"no recovery target specified","2025-01-16T00:06:40.906594+00:00","pg-odroid7"],[177,424739526992,"no recovery target specified","2025-01-16T00:37:46.505026+00:00","pg-odroid6"],[178,424788921384,"no recovery target specified","2025-01-16T01:34:23.247817+00:00","pg-odroid6"],[179,425852863208,"no recovery target specified","2025-01-17T12:47:41.800804+00:00","pg-odroid7"],[180,425861561968,"no recovery target specified","2025-01-17T13:04:01.605318+00:00","pg-odroid6"],[181,425863647672,"no recovery target specified","2025-01-17T13:07:32.345343+00:00","pg-odroid6"],[182,426306639784,"no recovery target specified","2025-01-18T03:18:13.002697+00:00","pg-odroid7"],[183,426309073536,"no recovery target specified","2025-01-18T03:28:08.848371+00:00","pg-odroid6"],[184,427000053672,"no recovery target specified","2025-01-19T00:02:43.341669+00:00","pg-odroid7"],[185,427002456472,"no recovery target specified","2025-01-19T00:13:09.249279+00:00","pg-odroid6"],[186,428720339824,"no recovery target specified","2025-01-21T00:28:25.572907+00:00","pg-odroid6"],[187,428954982800,"no recovery target specified","2025-01-21T07:08:07.305203+00:00","pg-odroid6"],[188,428975154496,"no recovery target specified","2025-01-21T07:39:36.044816+00:00","pg-odroid6"],[189,428975154776,"no recovery target specified","2025-01-21T07:40:29.908727+00:00","pg-odroid7"],[190,428990047752,"no recovery target specified","2025-01-21T07:51:16.388138+00:00","pg-odroid6"],[191,429106301736,"no recovery target specified","2025-01-21T09:01:01.119137+00:00","pg-odroid6"],[192,429110539008,"no recovery target specified","2025-01-21T09:02:33.668151+00:00","pg-odroid6"],[193,429117470176,"no recovery target specified","2025-01-21T09:05:32.049889+00:00","pg-odroid6"],[194,429194739872,"no recovery target specified","2025-01-21T10:33:11.200259+00:00","pg-odroid6"],[195,429286312432,"no recovery target specified","2025-01-21T13:07:32.821582+00:00","pg-odroid7"],[196,429292117784,"no recovery target specified","2025-01-21T13:16:57.679872+00:00","pg-odroid7"],[197,429293290328,"no recovery target specified","2025-01-21T13:31:00.973138+00:00","pg-odroid7"],[198,429293687688,"no recovery target specified","2025-01-21T13:36:22.379996+00:00","pg-odroid6"],[199,429330581256,"no recovery target specified","2025-01-21T14:19:16.329363+00:00","pg-odroid7"],[200,429345734816,"no recovery target specified","2025-01-21T14:19:43.870835+00:00","pg-odroid7"],[201,429348780496,"no recovery target specified","2025-01-21T14:24:47.479125+00:00","pg-odroid7"],[202,429375011704,"no recovery target specified","2025-01-21T14:37:15.887552+00:00","pg-odroid7"],[203,429375136776,"no recovery target specified","2025-01-21T15:10:34.234558+00:00","pg-odroid7"],[204,429384204512,"no recovery target specified","2025-01-21T15:38:08.960604+00:00","pg-odroid7"],[205,429406283136,"no recovery target specified","2025-01-21T15:45:20.667312+00:00","pg-odroid7"],[206,429412843680,"no recovery target specified","2025-01-21T15:47:57.183620+00:00","pg-odroid7"],[207,429414535720,"no recovery target specified","2025-01-21T15:57:44.883942+00:00","pg-odroid6"],[208,429422821896,"no recovery target specified","2025-01-21T16:20:12.737914+00:00","pg-odroid6"],[209,429471066632,"no recovery target specified","2025-01-30T22:40:49.498589+00:00","pg-odroid6"],[210,429547061408,"no recovery target specified","2025-01-30T23:24:18.662338+00:00","pg-odroid6"],[211,429563838624,"no recovery target specified","2025-01-31T01:45:21.130482+00:00","pg-odroid6"],[212,430163168312,"no recovery target specified","2025-02-02T15:26:44.448725+00:00","pg-odroid8"],[213,437131827112,"no recovery target specified","2025-02-20T17:36:27.493501+00:00","pg-opti1"],[214,439196061952,"no recovery target specified","2025-02-26T16:09:54.225011+00:00","pg-odroid6"],[215,440296461000,"no recovery target specified","2025-03-01T20:40:16.402555+00:00","pg-opti1"],[216,440589297872,"no recovery target specified","2025-03-02T19:11:26.456004+00:00","pg-odroid8"],[217,440589368408,"no recovery target specified","2025-03-02T19:13:18.740965+00:00","pg-odroid6"],[218,443651205392,"no recovery target specified","2025-03-10T09:33:34.980592+00:00","pg-odroid6"],[219,443651439624,"no recovery target specified","2025-03-10T09:36:17.025944+00:00","pg-odroid6"],[220,455323824992,"no recovery target specified","2025-04-07T14:37:30.548057+00:00","pg-odroid8"],[221,470601788080,"no recovery target specified","2025-05-05T09:36:03.089566+00:00","pg-odroid8"],[222,478310936136,"no recovery target specified","2025-05-18T17:45:14.794671+00:00","pg-odroid7"],[223,478311042472,"no recovery target specified","2025-05-18T17:49:58.343370+00:00","pg-odroid7"],[224,502147164672,"no recovery target specified","2025-07-01T14:03:51.162497+00:00","pg-odroid7"],[225,502772329328,"no recovery target specified","2025-07-02T16:34:55.854741+00:00","pg-odroid8"],[226,503451535816,"no recovery target specified","2025-07-03T16:32:39.421837+00:00","pg-odroid6"],[227,519722169880,"no recovery target specified","2025-07-31T19:27:02.878423+00:00","pg-odroid7"],[228,526768200984,"no recovery target specified","2025-08-12T21:29:14.773135+00:00","pg-odroid7"],[229,526920788832,"no recovery target specified","2025-08-13T05:04:16.577449+00:00","pg-odroid7"],[230,527089795232,"no recovery target specified","2025-08-13T10:19:40.196086+00:00","pg-odroid7"],[231,527106572448,"no recovery target specified","2025-08-13T11:06:03.851247+00:00","pg-odroid7"],[232,527123349664,"no recovery target specified","2025-08-13T11:46:27.304525+00:00","pg-odroid7"],[233,527126466656,"no recovery target specified","2025-08-13T12:24:29.685493+00:00","pg-odroid7"],[234,527128611944,"no recovery target specified","2025-08-13T12:35:31.771070+00:00","pg-odroid7"],[235,527156904096,"no recovery target specified","2025-08-13T13:30:00.545652+00:00","pg-odroid7"],[236,527844769952,"no recovery target specified","2025-08-14T18:24:17.712613+00:00","pg-odroid7"],[237,527862997048,"no recovery target specified","2025-08-14T21:46:50.503772+00:00","pg-odroid7"],[238,527862997448,"no recovery target specified","2025-08-14T21:47:31.706072+00:00","pg-odroid7"],[239,527876193184,"no recovery target specified","2025-08-14T23:48:04.022797+00:00","pg-odroid7"],[240,546090202296,"no recovery target specified","2025-09-14T13:00:37.056676+00:00","pg-opti1"],[241,564346966400,"no recovery target specified","2025-10-16T09:09:35.871181+00:00","pg-odroid8"],[242,586180615232,"no recovery target specified","2025-11-20T15:22:48.220735+00:00","pg-odroid8"],[243,586180934352,"no recovery target specified","2025-11-20T15:26:51.638859+00:00","pg-opti1"],[244,586268539872,"no recovery target specified","2025-11-20T17:56:05.502481+00:00","pg-odroid7"],[245,586910485512,"no recovery target specified","2025-11-21T15:14:27.325190+00:00","pg-odroid7"],[246,587601003000,"no recovery target specified","2025-11-22T16:16:34.031787+00:00","pg-opti1"],[247,587607173504,"no recovery target specified","2025-11-22T16:31:04.735188+00:00","pg-odroid8"]] \ No newline at end of file diff --git a/consul_backup/nomad/postgres/initialize b/consul_backup/nomad/postgres/initialize new file mode 100644 index 0000000..25410c4 --- /dev/null +++ b/consul_backup/nomad/postgres/initialize @@ -0,0 +1 @@ +7304297315619373086 \ No newline at end of file diff --git a/consul_backup/nomad/postgres/leader b/consul_backup/nomad/postgres/leader new file mode 100644 index 0000000..9b9b6b7 --- /dev/null +++ b/consul_backup/nomad/postgres/leader @@ -0,0 +1 @@ +pg-odroid8 \ No newline at end of file diff --git a/consul_backup/nomad/postgres/members/pg-odroid6 b/consul_backup/nomad/postgres/members/pg-odroid6 new file mode 100644 index 0000000..b550096 --- /dev/null +++ b/consul_backup/nomad/postgres/members/pg-odroid6 @@ -0,0 +1 @@ +{"conn_url":"postgres://192.168.4.226:5432/postgres","api_url":"http://192.168.4.226:8008/patroni","state":"running","role":"replica","version":"4.0.4","xlog_location":587649285064,"replication_state":"streaming","timeline":248} \ No newline at end of file diff --git a/consul_backup/nomad/postgres/members/pg-odroid7 b/consul_backup/nomad/postgres/members/pg-odroid7 new file mode 100644 index 0000000..a123bc3 --- /dev/null +++ b/consul_backup/nomad/postgres/members/pg-odroid7 @@ -0,0 +1 @@ +{"conn_url":"postgres://192.168.4.227:5432/postgres","api_url":"http://192.168.4.227:8008/patroni","state":"running","role":"replica","version":"4.0.4","xlog_location":587649284200,"replication_state":"streaming","timeline":248} \ No newline at end of file diff --git a/consul_backup/nomad/postgres/members/pg-odroid8 b/consul_backup/nomad/postgres/members/pg-odroid8 new file mode 100644 index 0000000..9ba82fb --- /dev/null +++ b/consul_backup/nomad/postgres/members/pg-odroid8 @@ -0,0 +1 @@ +{"conn_url":"postgres://192.168.4.228:5432/postgres","api_url":"http://192.168.4.228:8008/patroni","state":"running","role":"primary","version":"4.0.4","xlog_location":587649284200,"timeline":248} \ No newline at end of file diff --git a/consul_backup/nomad/postgres/members/pg-opti1 b/consul_backup/nomad/postgres/members/pg-opti1 new file mode 100644 index 0000000..d164e0e --- /dev/null +++ b/consul_backup/nomad/postgres/members/pg-opti1 @@ -0,0 +1 @@ +{"conn_url":"postgres://192.168.4.36:5432/postgres","api_url":"http://192.168.4.36:8008/patroni","state":"running","role":"replica","version":"4.0.4","xlog_location":587649285816,"replication_state":"streaming","timeline":248} \ No newline at end of file diff --git a/consul_backup/nomad/postgres/status b/consul_backup/nomad/postgres/status new file mode 100644 index 0000000..ef1759a --- /dev/null +++ b/consul_backup/nomad/postgres/status @@ -0,0 +1 @@ +{"optime":587649284200,"slots":{"pg_odroid6":587649284200,"pg_odroid7":587649284200,"pg_opti1":587649284200,"pg_odroid8":587649284200},"retain_slots":["pg_odroid6","pg_odroid7","pg_odroid8","pg_opti1"]} \ No newline at end of file diff --git a/consul_backup/ovpn-client b/consul_backup/ovpn-client new file mode 100644 index 0000000..1b304b1 --- /dev/null +++ b/consul_backup/ovpn-client @@ -0,0 +1 @@ +8ab24a39-8747-2d2b-3789-9316e77035cc \ No newline at end of file diff --git a/consul_backup/prometheus_yml b/consul_backup/prometheus_yml new file mode 100644 index 0000000..2be587f --- /dev/null +++ b/consul_backup/prometheus_yml @@ -0,0 +1,90 @@ +global: + scrape_interval: 15s + evaluation_interval: 15s + scrape_timeout: 12s + external_labels: + monitor: 'example' +rule_files: + - "alerts.yml" +alerting: + alertmanagers: + - scheme: http + static_configs: + - targets: + - "alertmanager.service.dc1.consul:9093" +scrape_configs: + - job_name: postgres-job + metrics_path: /metrics + scheme: http + static_configs: + - targets: ['postgres.service.dc1.consul:8008'] + - job_name: minio-job + metrics_path: /minio/v2/metrics/cluster + scheme: http + static_configs: + - targets: ['minio.service.dc1.consul:9000'] + - job_name: 'traefik' + static_configs: + - targets: + - 'traefik.service.dc1.consul:80' + - job_name: 'consulservices' + consul_sd_configs: + - server: 'consul.service.dc1.consul:8500' + services: + - cadvisor + relabel_configs: + - source_labels: [__meta_consul_service] + target_label: job + - source_labels: [__meta_consul_node] + regex: (.+) + target_label: node + replacement: '${1}' + - job_name: nomad_metrics + params: + format: + - prometheus + scrape_interval: 10s + scrape_timeout: 10s + metrics_path: /v1/metrics + consul_sd_configs: + - server: 'consul.service.dc1.consul:8500' + datacenter: 'dc1' + tag_separator: ',' + scheme: http + services: + - nomad-client + relabel_configs: + - source_labels: [__meta_consul_tags] + separator: ; + regex: (.*)http(.*) + replacement: $1 + action: keep + - source_labels: [__meta_consul_address] + separator: ; + regex: (.*) + target_label: __meta_consul_service_address + replacement: $1 + action: replace + - source_labels: [__meta_consul_node] + regex: (.+) + target_label: node + replacement: '${1}' + - job_name: node_exporter + params: + format: + - prometheus + scrape_interval: 10s + scrape_timeout: 10s + metrics_path: /metrics + consul_sd_configs: + - server: 'consul.service.dc1.consul:8500' + datacenter: 'dc1' + tag_separator: ',' + scheme: http + services: + - node-exporter + relabel_configs: + - source_labels: [__meta_consul_node] + regex: (.+) + target_label: node + replacement: '${1}' \ No newline at end of file diff --git a/consul_backup/qbitcheck/connection_monitor/remediation b/consul_backup/qbitcheck/connection_monitor/remediation new file mode 100644 index 0000000..d7c6e7b --- /dev/null +++ b/consul_backup/qbitcheck/connection_monitor/remediation @@ -0,0 +1 @@ +{"state": null, "start_time": null, "stabilization_checks": 0} \ No newline at end of file diff --git a/consul_backup/qbitcheck/connection_monitor/stability b/consul_backup/qbitcheck/connection_monitor/stability new file mode 100644 index 0000000..6b920f8 --- /dev/null +++ b/consul_backup/qbitcheck/connection_monitor/stability @@ -0,0 +1 @@ +{"start_time": null} \ No newline at end of file diff --git a/consul_backup/qbitcheck/connection_monitor/state b/consul_backup/qbitcheck/connection_monitor/state new file mode 100644 index 0000000..4059104 --- /dev/null +++ b/consul_backup/qbitcheck/connection_monitor/state @@ -0,0 +1 @@ +{"connection_state": "stable", "last_state_change_time": 1763829844.4920733, "consecutive_failures": 21, "consecutive_stable_checks": 122, "last_failure_time": 1763829749.067393} \ No newline at end of file diff --git a/consul_backup/qbitcheck/connection_monitor/vpn b/consul_backup/qbitcheck/connection_monitor/vpn new file mode 100644 index 0000000..9ffd145 --- /dev/null +++ b/consul_backup/qbitcheck/connection_monitor/vpn @@ -0,0 +1 @@ +{"vpn_status": "running", "last_vpn_status_change": 1763829147.0930555, "public_ip": "192.30.89.67", "last_public_ip_change": 1763829147.2012818, "public_ip_details": {"public_ip": "192.30.89.67", "region": "British Columbia", "country": "Canada", "city": "Vancouver", "location": "49.2497,-123.1193", "organization": "AS394256 Tech Futures Interactive Inc.", "postal_code": "V5Y", "timezone": "America/Vancouver"}} \ No newline at end of file diff --git a/consul_backup/refresh_token b/consul_backup/refresh_token new file mode 100644 index 0000000..f18fb89 --- /dev/null +++ b/consul_backup/refresh_token @@ -0,0 +1 @@ +cd4bdd286cd7aef23033995ffcbd77f5059aefb13638966e059d752d59bce851 \ No newline at end of file diff --git a/consul_backup/scripts/acme.sh b/consul_backup/scripts/acme.sh new file mode 100644 index 0000000..7128de9 --- /dev/null +++ b/consul_backup/scripts/acme.sh @@ -0,0 +1,122 @@ +#!/bin/sh +set -x +ls -la +pwd +whoami + +# Define active DuckDNS variables +DUCKDNS_DOMAIN="*.fbleagh.duckdns.org" +DUCKDNS_BASE="fbleagh.duckdns.org" +# CONSUL_URL is set externally by Nomad: ${attr.unique.network.ip-address} + +# --- ACME Setup (Switching BACK to Let's Encrypt) --- +# Using a specific email to register a fresh account context +/usr/local/bin/acme.sh --register-account --server letsencrypt -m "admin-${DUCKDNS_BASE}@mailinator.com" +/usr/local/bin/acme.sh --set-default-ca --server letsencrypt +sleep 10 + +# --- 🎯 Dynamic Service Discovery --- +SERVICES="" +CHALLENGE_ALIAS_ARGS="" + +for service in $(curl -s "http://$CONSUL_URL:8500/v1/catalog/services" | jq -r 'to_entries[] | select(.value | index("sslcert")) | .key'); do + SERVICE_DOMAIN="${service}.service.dc1.fbleagh.duckdns.org" + SERVICES="$SERVICES -d $SERVICE_DOMAIN" + + # Correct pipe syntax for service domain alias + CHALLENGE_ALIAS_ARGS="$CHALLENGE_ALIAS_ARGS --challenge-alias $SERVICE_DOMAIN|$DUCKDNS_BASE" +done + +# Build the complete domain list for logging/reference +DOMAIN_ARGS="-d $DUCKDNS_DOMAIN $SERVICES" + +echo "Requesting certificate for: $DOMAIN_ARGS" +echo "--- Starting 2-STEP ISSUANCE (Back to Let's Encrypt) ---" + +# ---------------------------------------------------------------------------------- +## ➡️ STEP 1: Issue Wildcard Domain +# Note: We removed the explicit alias for the wildcard to simplify the request. +# The dns_duckdns hook naturally handles the root domain for the wildcard. +echo "--- STEP 1: Issuing Wildcard Domain ---" +/usr/local/bin/acme.sh --insecure --issue --dns dns_duckdns \ + -d "$DUCKDNS_DOMAIN" \ + --ecc --force --dnssleep 60 + +ACME_STATUS_1=$? +if [ $ACME_STATUS_1 -ne 0 ]; then + echo "🚨 ERROR: acme.sh STEP 1 (Wildcard) failed with exit code $ACME_STATUS_1." + # If step 1 fails on Let's Encrypt, we cannot proceed. + exit $ACME_STATUS_1 +fi +echo "✅ STEP 1 (Wildcard) completed successfully." +# ---------------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------------- +## ➡️ STEP 2: Issue Wildcard + Service Domains +echo "--- STEP 2: Issuing Wildcard + Service Domains ---" +/usr/local/bin/acme.sh --insecure --issue --dns dns_duckdns \ + -d "$DUCKDNS_DOMAIN" \ + $SERVICES \ + $CHALLENGE_ALIAS_ARGS \ + --ecc --force --dnssleep 60 + +ACME_STATUS_2=$? +if [ $ACME_STATUS_2 -ne 0 ]; then + echo "🚨 ERROR: acme.sh STEP 2 (Wildcard + Services) failed with exit code $ACME_STATUS_2." + exit $ACME_STATUS_2 +fi +echo "✅ STEP 2 (Wildcard + Services) completed successfully. Proceeding to upload." +# ---------------------------------------------------------------------------------- + +# --- Upload to Consul --- +CERT_DIR="/acme.sh/${DUCKDNS_DOMAIN}_ecc" +CONSUL_KV_PATH="/v1/kv/letsconsul/${DUCKDNS_DOMAIN}" + +upload_to_consul () { + FILE_PATH="$1" + KV_KEY="$2" + + CURL_RESPONSE=$(curl -XPUT --data-binary "@$FILE_PATH" "http://$CONSUL_URL:8500$KV_KEY" 2>&1) + + if echo "$CURL_RESPONSE" | grep -q "^true$"; then + echo "✅ Consul upload SUCCESS: $KV_KEY" + else + echo "❌ Consul upload FAILED: $KV_KEY" + echo "--- Full CURL Output ---" + echo "$CURL_RESPONSE" + exit 1 + fi +} + +echo "Starting Consul Uploads for ${DUCKDNS_DOMAIN}..." + +upload_to_consul "$CERT_DIR/${DUCKDNS_DOMAIN}.key" "${CONSUL_KV_PATH}/${DUCKDNS_DOMAIN}.key" +upload_to_consul "$CERT_DIR/fullchain.cer" "${CONSUL_KV_PATH}/fullchain.cer" +upload_to_consul "$CERT_DIR/${DUCKDNS_DOMAIN}.cer" "${CONSUL_KV_PATH}/${DUCKDNS_DOMAIN}.cer" +upload_to_consul "$CERT_DIR/ca.cer" "${CONSUL_KV_PATH}/ca.cer" + +# --- Verification --- +echo "--- Starting Certificate and Consul Verification ---" + +echo "1. Checking SANs in local fullchain.cer..." +if [ -f "$CERT_DIR/fullchain.cer" ]; then + openssl x509 -in "$CERT_DIR/fullchain.cer" -text -noout | grep -A1 "X509v3 Subject Alternative Name" + echo "✅ Local SAN check complete." +else + echo "❌ ERROR: fullchain.cer not found." + exit 1 +fi + +CONSUL_CHECK_KEY="${CONSUL_KV_PATH}/fullchain.cer" +echo "2. Checking existence of ${CONSUL_CHECK_KEY} in Consul..." +CONSUL_METADATA=$(curl -s "http://$CONSUL_URL:8500${CONSUL_CHECK_KEY}?stale&keys&index=1") +CERT_VALUE_LENGTH=$(echo "$CONSUL_METADATA" | jq -r '.[0].Value' | wc -c) + +if [ "$CERT_VALUE_LENGTH" -gt 100 ]; then + echo "✅ Consul upload verification SUCCESS." +else + echo "❌ Consul upload verification FAILED." + exit 1 +fi + +exit 0 \ No newline at end of file diff --git a/consul_backup/scripts/letsconsul b/consul_backup/scripts/letsconsul new file mode 100644 index 0000000..c75e40c --- /dev/null +++ b/consul_backup/scripts/letsconsul @@ -0,0 +1,27 @@ +#!/bin/sh +set -e +set -x + + +ls -ld $(find /etc/letsencrypt) + +apk add curl +#$LETSCONSUL_DOMAIN +certbot certonly \ + -v \ + --cert-name $LETSCONSUL_DOMAIN \ + --preferred-challenges dns \ + --authenticator dns-duckdns \ + --dns-duckdns-token e4b5ca33-1f4d-494b-b06d-6dd4600df662 \ + --dns-duckdns-propagation-seconds 60 \ + --non-interactive \ + --agree-tos \ + -d *.${LETSCONSUL_DOMAIN} --email "stuart.stent@gmail.com" + +curl -XPUT --data-bin "@/etc/letsencrypt/live/$LETSCONSUL_DOMAIN/privkey.pem" http://$LETSCONSUL_CONSUL_URL:8500/v1/kv/letsconsul/$LETSCONSUL_DOMAIN/privkey +curl -XPUT --data-bin "@/etc/letsencrypt/live/$LETSCONSUL_DOMAIN/fullchain.pem" http://$LETSCONSUL_CONSUL_URL:8500/v1/kv/letsconsul/$LETSCONSUL_DOMAIN/fullchain +curl -XPUT --data-bin "@/etc/letsencrypt/live/$LETSCONSUL_DOMAIN/cert.pem" http://$LETSCONSUL_CONSUL_URL:8500/v1/kv/letsconsul/$LETSCONSUL_DOMAIN/cert +curl -XPUT --data-bin "@/etc/letsencrypt/live/$LETSCONSUL_DOMAIN/chain.pem" http://$LETSCONSUL_CONSUL_URL:8500/v1/kv/letsconsul/$LETSCONSUL_DOMAIN/chain + +#curl -XPUT --data-bin @/var/log/letsencrypt/letsencrypt.log http://$LETSCONSUL_CONSUL_URL:8500/v1/kv/letsconsul/letsencrypt.log +/bin/sleep 600 \ No newline at end of file diff --git a/consul_backup/scripts/letsconsul_auto b/consul_backup/scripts/letsconsul_auto new file mode 100644 index 0000000..ade323c --- /dev/null +++ b/consul_backup/scripts/letsconsul_auto @@ -0,0 +1,40 @@ +#!/bin/sh +set -e +set -x + +IFS=";" + +apk add curl +for group in $LETSCONSUL_DOMAINS +do + echo + echo "-----------------------" + IFS=" " + domains="" + domain_main="" + for domain in $group + do + if [ "$domain_main" = "" ] + then + domain_main="$domain" + fi + domains="$domains -d $domain" + done + + echo certbot certonly \ + -vvv \ + --test-cert \ + --preferred-challenges dns \ + --authenticator dns-duckdns \ + --dns-duckdns-token e4b5ca33-1f4d-494b-b06d-6dd4600df662 \ + --dns-duckdns-propagation-seconds 120 \ + --non-interactive \ + --agree-tos \ + --renew-by-default \ + $domains --email "stuart.stent@gmail.com" + + echo curl -XPUT --data-bin "@/etc/letsencrypt/live/\*.${domain_main}/privkey.pem" http://$LETSCONSUL_CONSUL_URL:8500/v1/kv/letsconsul/$domain_main/privkey + echo curl -XPUT --data-bin "@/etc/letsencrypt/live/\*.${domain_main}/fullchain.pem" http://$LETSCONSUL_CONSUL_URL:8500/v1/kv/letsconsul/$domain_main/fullchain +done + +echo curl -XPUT --data-bin @/var/log/letsencrypt/letsencrypt.log http://$LETSCONSUL_CONSUL_URL:8500/v1/kv/letsconsul/letsencrypt.log diff --git a/consul_backup/scripts/letsconsul_fixed b/consul_backup/scripts/letsconsul_fixed new file mode 100644 index 0000000..bf32540 --- /dev/null +++ b/consul_backup/scripts/letsconsul_fixed @@ -0,0 +1,24 @@ +#!/bin/sh +set -e +set -x + +IFS=";" + +apk add curl + echo certbot certonly \ + -vvv \ + --test-cert \ + --preferred-challenges dns \ + --authenticator dns-duckdns \ + --dns-duckdns-token e4b5ca33-1f4d-494b-b06d-6dd4600df662 \ + --dns-duckdns-propagation-seconds 120 \ + --non-interactive \ + --agree-tos \ + --renew-by-default \ + *.fbleagh-int.duckdns.org --email "stuart.stent@gmail.com" + + echo curl -XPUT --data-bin "@/etc/letsencrypt/live/\*.fbleagh-int.duckdns.org/privkey.pem" http://$LETSCONSUL_CONSUL_URL:8500/v1/kv/letsconsul/$domain_main/privkey + echo curl -XPUT --data-bin "@/etc/letsencrypt/live/\*.fbleagh-int.duckdns.org/fullchain.pem" http://$LETSCONSUL_CONSUL_URL:8500/v1/kv/letsconsul/$domain_main/fullchain +done + +echo curl -XPUT --data-bin @/var/log/letsencrypt/letsencrypt.log http://$LETSCONSUL_CONSUL_URL:8500/v1/kv/letsconsul/letsencrypt.log diff --git a/consul_backup/seaweedfs/config/filer_nodes b/consul_backup/seaweedfs/config/filer_nodes new file mode 100644 index 0000000..f8c8881 --- /dev/null +++ b/consul_backup/seaweedfs/config/filer_nodes @@ -0,0 +1 @@ +odroid6,odroid7,odroid8 \ No newline at end of file diff --git a/consul_backup/seaweedfs/config/master_nodes b/consul_backup/seaweedfs/config/master_nodes new file mode 100644 index 0000000..f8c8881 --- /dev/null +++ b/consul_backup/seaweedfs/config/master_nodes @@ -0,0 +1 @@ +odroid6,odroid7,odroid8 \ No newline at end of file diff --git a/consul_backup/seaweedfs/config/volume_size b/consul_backup/seaweedfs/config/volume_size new file mode 100644 index 0000000..9a03714 --- /dev/null +++ b/consul_backup/seaweedfs/config/volume_size @@ -0,0 +1 @@ +10 \ No newline at end of file diff --git a/consul_backup/sheets_creds b/consul_backup/sheets_creds new file mode 100644 index 0000000..079e087 --- /dev/null +++ b/consul_backup/sheets_creds @@ -0,0 +1 @@ +{"installed":{"client_id":"182877671696-qj1oq6pi50s6v7nk16m59ulmg28klo0r.apps.googleusercontent.com","project_id":"quickstart-1588344492360","auth_uri":"https://accounts.google.com/o/oauth2/auth","token_uri":"https://oauth2.googleapis.com/token","auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs","client_secret":"oSI3LMMY9caNiGgH0NKSO3oS","redirect_uris":["urn:ietf:wg:oauth:2.0:oob","http://localhost"]}} \ No newline at end of file diff --git a/consul_backup/sheets_token b/consul_backup/sheets_token new file mode 100644 index 0000000..fd99253 --- /dev/null +++ b/consul_backup/sheets_token @@ -0,0 +1 @@ +{"access_token":"ya29.a0ARrdaM9Cp7ib2ELAhWSmJWJIHN7xQe3CIuo7q-g1w4szCwTtcdPmSpfhMYX2Hes9GZP9RTernaEf6AaocBtGcv468yYnlbNOAQXBmizVUj12ZEY851NJU9UGwHl7JaB8NbfX5nGz1ndjzzBZoDN-drEHzIvf","token_type":"Bearer","refresh_token":"1//0duM7hhKdeXdmCgYIARAAGA0SNwF-L9Ir4W6QgqnyQ4RVIaHleOqkicvJTUc-Vd827_x1WabNQfP-S1P-WSDMC59XqHjn8uMEirQ","expiry":"2021-10-20T14:45:39.828013486-04:00"} \ No newline at end of file diff --git a/consul_backup/terraform_state b/consul_backup/terraform_state new file mode 100644 index 0000000..7860474 --- /dev/null +++ b/consul_backup/terraform_state @@ -0,0 +1,1598 @@ +{ + "version": 4, + "terraform_version": "1.0.5", + "serial": 23, + "lineage": "1fae1a6a-296c-42b0-091e-3bef007d21ff", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "nomad_job", + "name": "Job", + "provider": "provider[\"registry.terraform.io/hashicorp/nomad\"]", + "instances": [ + { + "index_key": "nomad_jobs/enabled/consulbackup.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "consulbackup", + "jobspec": "job \"consulbackup\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"batch\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n periodic {\n // Launch every 20 seconds\n cron = \"0 8 * * *\"\n time_zone = \"America/New_York\"\n\n // Do not allow overlapping runs.\n prohibit_overlap = true\n }\n group \"consulbackup\" {\n count = 1\n\n restart {\n attempts = 8\n interval = \"20m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"consulbackup\" {\n driver = \"raw_exec\"\n\n config {\n command = \"/bin/bash\"\n args = [\"-c\", \"/usr/local/bin/consul kv export \u003e /mnt/Public/config/consul_kv_backup.json\"]\n }\n\n resources {\n cpu = 64 # 500 MHz\n memory = 64 # 128MB\n }\n \n } #task consulbackup\n } #group\n} #job\n", + "json": null, + "modify_index": "5127602", + "name": "consulbackup", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "consulbackup", + "task": [ + { + "driver": "raw_exec", + "meta": {}, + "name": "consulbackup", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "batch" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/dex.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "20a9eb63-8f7a-7018-825e-1e529d3b0746" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "auth", + "jobspec": "job \"auth\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n group \"auth\" {\n count = 1\n\n task \"fwdauth\" {\n driver = \"docker\"\n\n config {\n // image = \"npawelek/traefik-forward-auth\"\n image = \"thomseddon/traefik-forward-auth:2-arm\"\n\n port_map {\n auth = 4181\n }\n\n volumes = [\n \"/etc/localtime:/etc/localtime:ro\",\n ]\n }\n\n env {\n PROVIDERS_GOOGLE_CLIENT_ID = \"807888907647-uog95jmiolsuh6ql1t8jm53l1jvuajck.apps.googleusercontent.com\"\n PROVIDERS_GOOGLE_CLIENT_SECRET = \"B8bDri5mFvGv-Ghzbt8fLj4W\"\n SECRET = \"ladskfdjmqwermnnbasfnmldas\"\n CONFIG = \"/local/config.ini\"\n LIFETIME = \"31536000\"\n WHITELIST = \"stuart.stent@gmail.com,stephen.bunt@gmail.com\"\n\n // AUTH_HOST = \"fwdauth.fbleagh.duckdns.org\"\n COOKIE_DOMAIN = \"fbleagh.duckdns.org\"\n }\n\n template {\n data = \"{{ key \\\"Dex\\\" }}\"\n destination = \"local/config.ini\"\n change_mode = \"restart\"\n }\n\n resources {\n cpu = 100 # 100 MHz\n memory = 64 # 128 MB\n\n network {\n port \"auth\" {\n static = 4181\n }\n }\n }\n\n service {\n name = \"dex\"\n\n tags = [\n \"fwdauth\",\n \"web\",\n \"traefik.http.routers.dex.rule=Host(`fwdauth.fbleagh.duckdns.org`)\",\n \"traefik.http.routers.dex.entrypoints=websecure\",\n \"traefik.http.routers.dex.tls=true\",\n \"traefik.http.routers.dex.tls.certresolver=myresolver\",\n \"traefik.http.middlewares.dex.forwardauth.address=http://dex.service.dc1.consul:4181\",\n \"traefik.http.middlewares.dex.forwardauth.trustForwardHeader=true\",\n \"traefik.http.middlewares.dex.forwardauth.authResponseHeaders=X-Forwarded-User\",\n \"traefik.http.routers.auth.middlewares=dex\",\n \"traefik.http.routers.traefik-forward-auth.middlewares=dex\",\n ]\n\n port = \"auth\"\n\n check {\n type = \"tcp\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n } #end Dex\n }\n}\n", + "json": null, + "modify_index": "5077852", + "name": "auth", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "auth", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "fwdauth", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/fitbit.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "fitbit", + "jobspec": "job \"fitbit\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"batch\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n periodic {\n // Launch every 20 seconds\n cron = \"0 8 * * *\"\n time_zone = \"America/New_York\"\n\n // Do not allow overlapping runs.\n prohibit_overlap = true\n }\n group \"fitbit\" {\n count = 1\n\n restart {\n attempts = 8\n interval = \"20m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"fitbit\" {\n driver = \"docker\"\n\n config {\n image = \"registry.service.dc1.consul:5000/fitbit:latest\"\n memory_hard_limit = 1024\n }\n\n resources {\n cpu = 64 # 500 MHz\n memory = 64 # 128MB\n }\n } #task fitbit\n } #group\n} #job\n", + "json": null, + "modify_index": "5127613", + "name": "fitbit", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "fitbit", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "fitbit", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "batch" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/freshrss.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "b35ee750-39ba-21b6-61dc-0e4525f8b59b" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "freshrss", + "jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"freshrss\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid1\"\n weight = 80\n }\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"freshrss\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"fail\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n args = [\"flock\", \"-x\",\"/locks/${NOMAD_GROUP_NAME}_rsync.lock\",\"rsync\",\"-av\",\"--exclude=Backups\",\"/configbackup/\",\"/config/\",\"--delete-before\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n args = [\"flock\", \"-x\",\"/locks/${NOMAD_GROUP_NAME}_rsync.lock\",\"rsync\",\"-av\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"25 * * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;\"\n\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end sync\n\n\n task \"freshrss\" {\n driver = \"docker\"\n\n config {\n image = \"linuxserver/freshrss\"\n\n ports = [\"freshrss\"]\n\n volumes = [\n \"/mnt/configs/freshrss:/config\",\n ]\n }\n\n service {\n name = \"${TASKGROUP}\"\n\n tags = [\n \"freshrss\",\n \"tools\",\n \"traefik.http.routers.freshlan.rule=Host(`freshrss.service.dc1.consul`)\",\n \"traefik.http.routers.freshwan.rule=Host(`fbleagh-rss.ignorelist.com`)\",\n \"traefik.http.routers.freshwan.rule=Host(`rss.fbleagh.duckdns.org`)\",\n \"traefik.http.routers.freshwan.middlewares=dex@consulcatalog\",\n \"traefik.http.routers.freshwan.tls=true\",\n \"traefik.http.routers.freshwan.tls.options=default\",\n \"traefik.http.routers.freshwan.tls.certresolver=myresolver\",\n ]\n\n // \"traefik.http.middlewares.fresh_auth.basicauth.users=fbleagh:$2y$05$ug6n0zTAXE1A7yP4EOZJn.eO5dMhAGVvOH.FJgimbWH5/QQPPGez6\",\n // \"traefik.http.routers.freshwan.middlewares=fresh_auth\",\n // \"traefik.http.routers.freshwan.tls=true\",\n // \"traefik.http.routers.freshwan.tls.certresolver=myresolver\",\n\n port = \"freshrss\"\n }\n\n env {\n // SHARE = \"Public;/mount/Public;yes;no;yes;all;none;;Public\"\n # GLOBAL = \"socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=65536 SO_SNDBUF=65536\"\n # PERMISSIONS = \"true\"\n # WORKGROUP = \"WORKGROUP\"\n TZ = \"EST5EDT\"\n\n PUID = 1000\n PGID = 1000\n }\n\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 128 # 500 MHz\n memory = 128 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n }\n\n network {\n port \"freshrss\" {\n to = 80\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5127619", + "name": "freshrss", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "freshrss", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "init", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "finalsync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "freshrss", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/garminexport.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "garminexport", + "jobspec": "job \"garminexport\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"batch\"\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n\n\n periodic {\n // Launch every 20 seconds\n cron = \"1 7 * * *\"\n time_zone = \"America/New_York\"\n // Do not allow overlapping runs.\n prohibit_overlap = true\n }\n\n\n group \"garminexport\" {\n count = 1\n restart {\n attempts = 8\n interval = \"20m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n \n\n task \"garminexport\" {\n driver = \"docker\"\n config {\n image = \"registry.service.dc1.consul:5000/garminexport:latest\"\n args = [\"--backup-dir=/activities\",\"--password=Farscape5\",\"fbleagh\",\"--log-level\",\"INFO\",\"--ignore-errors\",\"--max-retries=1\",\n \"-f\",\"tcx\",\n \"-f\",\"fit\",\n \"-f\",\"gpx\",\n \"-f\",\"json_summary\",\n \"-f\",\"json_details\"\n ]\n volumes = [\n \"/mnt/Public/Garmin:/activities\",\n ]\n memory_hard_limit = 2048\n }\n\n resources {\n cpu = 64 # 500 MHz\n memory = 64 # 128MB\n }\n } #task garminexport\n\n\n } #group\n } #job\n", + "json": null, + "modify_index": "5127603", + "name": "garminexport", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "garminexport", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "garminexport", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "batch" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/gitea.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "5392fdc4-aa90-e752-9d84-5b6ed4ea783b" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "gitea", + "jobspec": "job \"gitea\" {\n region = \"global\"\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n\n group \"gitea\" {\n count = 1\n\n restart {\n attempts = 3\n delay = \"20s\"\n mode = \"delay\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n }\n env {\n DB_NAME=\"gitea.db\"\n PUID=1000\n PGID=0\n DBCHMOD=\"0777\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n template {\n data = \u003c\u003cEOH\ndbs:\n - path: /config/gitea.db\n replicas:\n - path: /configbackup\n EOH\n\n destination = \"local/litestream.yml\"\n }\n }\n\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n task \"db-sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n // memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n \n // args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n template {\n data = \u003c\u003cEOH\ndbs:\n - path: /config/gitea.db\n replicas:\n - path: /configbackup\n EOH\n\n destination = \"local/litestream.yml\"\n }\n\n } #####\n\n\n task \"gitea\" {\n driver = \"docker\"\n\n config {\n image = \"registry.service.dc1.consul:5000/gitea\"\n memory_hard_limit = 2048\n volumes = [\n \"/mnt/configs/gitea:/data\",\n \"/etc/timezone:/etc/timezone:ro\",\n \"/etc/localtime:/etc/localtime:ro\"\n ]\n\n ports = [\"http\",\"ssh\"]\n }\n\nenv {\n USER_UID=1000\n USER_GID=1000\n // GITEA_CUSTOM=\"/data/custom/\"\n}\n\n\n service {\n name = \"gitea\"\n\n tags = [\n \"metrics\",\n ]\n\n port = \"http\"\n\n // check {\n // type = \"http\"\n // path = \"/metrics/\"\n // interval = \"10s\"\n // timeout = \"2s\"\n // }\n }\n\n resources {\n cpu = 50\n memory = 10\n }\n } #task\n\n network {\n port \"http\" {\n to = 3000\n }\n port \"ssh\" {\n static = 2222\n to = 2222\n }\n }\n } #group\n}\n", + "json": null, + "modify_index": "5127618", + "name": "gitea", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "gitea", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "init", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "finalsync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "db-sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "gitea", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/gocast.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "3e08b211-d5c3-1299-2668-ffef65f24db8", + "c305ef2b-d28b-0139-dfa1-929dcfcf3b87", + "c6e3f41d-89e9-033b-2277-6ad6f32c314e", + "0515ae06-926c-8280-4b1a-ae6a39f402f0" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "gocast", + "jobspec": "job \"gocast\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"system\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n constraint {\n operator = \"distinct_hosts\"\n value = \"true\"\n }\n update {\n # Stagger updates every 60 seconds\n stagger = \"10s\"\n max_parallel = 3\n }\n group \"gocast\" {\n count = 1\n\n restart {\n attempts = 99\n interval = \"1h\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"gocast\" {\n driver = \"docker\"\n\n config {\n // image = \"homeassistant/armhf-homeassistant:latest\"\n image = \"ghcr.io/sstent/gocast\"\n ports = [\"http\"]\n network_mode = \"host\"\n cap_add = [\"NET_ADMIN\"]\n args = [\"-config=/local/config.yaml\", \"-logtostderr\"]\n }\n env {\n CONSUL_NODE = \"${node.unique.name}\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"global\", \"homeassistant\"]\n port = \"http\"\n\n }\n\n template {\n data = \u003c\u003cEOH\nagent:\n # http server listen addr\n listen_addr: :9080\n # Interval for health check\n monitor_interval: 10s\n # Time to flush out inactive apps\n cleanup_timer: 15m\n # Consul api addr for dynamic discovery\n consul_addr: http://127.0.0.1:8500/v1\n # interval to query consul for app discovery\n consul_query_interval: 5m\n\nbgp:\n local_as: 64512 \n remote_as: 64512 \n # override the peer IP to use instead of auto discovering\n communities:\n - asn:nnnn\n - asn:nnnn\n origin: igp\n\n# optional list of apps to register on startup\napps:\n - name: app1\n vip: 192.168.1.240/32\n vip_config:\n # additional per VIP BGP communities\n bgp_communities: [ aaaa:bbbb ]\n monitor: port:tcp:6000\n EOH\n\n destination = \"local/config.yaml\"\n }\n\n template {\n data = \u003c\u003cEOH\n{\n \"data\": {\n \"users\": [\n {\n \"password\": \"JDJiJDEyJGRjLjQ5WkNkbzg3Q0xmcHkzM2EyTC5RWWJrRDlRTFBRdUhjanNoeS81SUlPN0taYzFUcEVh\",\n \"username\": \"fbleagh\"\n }\n ]\n },\n \"key\": \"auth_provider.homeassistant\",\n \"version\": 1\n}\n EOH\n\n destination = \"local/auth_provider.homeassistant\"\n }\n\n resources {\n cpu = 256 # 500 MHz\n memory = 64 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n kill_timeout = \"10s\"\n }\n\n network {\n port \"http\" {\n static = 9080\n to = 9080\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5127614", + "name": "gocast", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "gocast", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "gocast", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "system" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/gotify.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "cb2fc110-c177-ed6e-806e-f0c30cc186da", + "5c69d598-6e7d-9385-5607-b1666b021a3b" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "gotify", + "jobspec": "job \"gotify\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n group \"gotify\" {\n count = 1\n restart {\n attempts = 5\n interval = \"4m\"\n delay = \"30s\"\n mode = \"fail\"\n }\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n }\n env {\n DB_NAME=\"gotify.db\"\n PUID=1000\n PGID=0\n DBCHMOD=\"0777\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n\n\n\n\n\n\n\n\n task \"gotify\" {\n driver = \"docker\"\n\n config {\n image = \"gotify/server-arm7:2.0\"\n ports = [\"http\", \"https\"]\n\n volumes = [\"/mnt/configs/gotify:/app/data\", ]\n }\n env {\n// GOTIFY_SERVER_PORT=80\n// GOTIFY_SERVER_KEEPALIVEPERIODSECONDS=0\n// GOTIFY_SERVER_LISTENADDR=\"0.0.0.0\"\n// GOTIFY_SERVER_SSL_ENABLED=false\n// GOTIFY_SERVER_SSL_REDIRECTTOHTTPS=true\n// #GOTIFY_SERVER_SSL_LISTENADDR=\n// GOTIFY_SERVER_SSL_PORT=443\n// #GOTIFY_SERVER_SSL_CERTFILE=\n// #GOTIFY_SERVER_SSL_CERTKEY=\n// GOTIFY_SERVER_SSL_LETSENCRYPT_ENABLED=false\n// GOTIFY_SERVER_SSL_LETSENCRYPT_ACCEPTTOS=false\n// GOTIFY_SERVER_SSL_LETSENCRYPT_CACHE=\"certs\"\n// # lists are a little weird but do-able (:\n// # GOTIFY_SERVER_SSL_LETSENCRYPT_HOSTS=- mydomain.tld\\n- myotherdomain.tld\n// GOTIFY_SERVER_RESPONSEHEADERS=\"X-Custom-Header: \\\"custom value\\\"\"\n// # GOTIFY_SERVER_CORS_ALLOWORIGINS=\"- \\\".+.example.com\\\"\\n- \\\"otherdomain.com\\\"\"\n// # GOTIFY_SERVER_CORS_ALLOWMETHODS=\"- \\\"GET\\\"\\n- \\\"POST\\\"\"\n// # GOTIFY_SERVER_CORS_ALLOWHEADERS=\"- \\\"Authorization\\\"\\n- \\\"content-type\\\"\"\n// # GOTIFY_SERVER_STREAM_ALLOWEDORIGINS=\"- \\\".+.example.com\\\"\\n- \\\"otherdomain.com\\\"\"\n// GOTIFY_SERVER_STREAM_PINGPERIODSECONDS=45\nGOTIFY_DATABASE_DIALECT=\"sqlite3\"\nGOTIFY_DATABASE_CONNECTION=\"data/gotify.db\"\n// GOTIFY_DEFAULTUSER_NAME=\"admin\"\n// GOTIFY_DEFAULTUSER_PASS=\"admin\"\n// GOTIFY_PASSSTRENGTH=10\n// GOTIFY_UPLOADEDIMAGESDIR=\"data/images\"\n// GOTIFY_PLUGINSDIR=\"data/plugins\"\n }\n\n resources {\n cpu = 100 # 100 MHz\n memory = 64 # 128 MB\n }\n service {\n name = \"gotify\"\n tags = [\"gotify\", \"web\", \"urlprefix-/gotify\", \"backend\",\n \"traefik.http.routers.gotifylan.rule=Host(`gotify.service.dc1.consul`)\",\n \"traefik.http.routers.gotifywan.rule=Host(`gotify.fbleagh.duckdns.org`)\",\n \"traefik.http.routers.gotifywan.tls=true\",\n \"traefik.http.routers.gotifywan.tls.certresolver=myresolver\",\n ]\n\n\n port = \"http\"\n\n check {\n type = \"tcp\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n }\n\n network {\n port \"http\" {\n to = 80\n }\n\n port \"https\" {\n to = 443\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5136663", + "name": "gotify", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "gotify", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "init", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "finalsync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "gotify", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/grafana.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "0ffa7f59-0532-4b89-055d-a7af237108b0" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "grafana", + "jobspec": "job \"grafana\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"service\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n update {\n # Stagger updates every 60 seconds\n stagger = \"60s\"\n max_parallel = 1\n }\n group \"grafana\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"grafana\" {\n driver = \"docker\"\n\n config {\n // image = \"fg2it/grafana-armhf:v5.1.4\"\n image = \"grafana/grafana:latest\"\n ports = [\"http\"]\n\n logging {\n type = \"json-file\"\n }\n\n memory_hard_limit = 2048\n }\n\n env {\n disable_login_form = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n GF_PATHS_PROVISIONING = \"/local/\"\n GF_AUTH_ANONYMOUS_ENABLED = true\n GF_AUTH_ANONYMOUS_ORG_NAME = \"Main Org.\"\n GF_AUTH_ANONYMOUS_ORG_ROLE = \"Admin\"\n }\n\n template {\n data = \u003c\u003cEOH\napiVersion: 1\n\ndatasources:\n - name: Prometheus\n type: prometheus\n url: http://prometheus.service.dc1.consul:9090\n isDefault:\n EOH\n\n destination = \"local/datasources/prometheus.yaml\"\n }\n\n template {\n data = \u003c\u003cEOH\napiVersion: 1\n\nproviders:\n- name: dashboards\n type: file\n updateIntervalSeconds: 30\n options:\n path: /local/dashboard_definitons\n foldersFromFilesStructure: true\n EOH\n\n destination = \"local/dashboards/dashboards.yaml\"\n }\n\n template {\n data = \"{{ key \\\"grafana_dashboards/nomad\\\" }}\"\n destination = \"local/dashboard_definitons/nomad.json\"\n }\n template {\n data = \"{{ key \\\"grafana_dashboards/thermals\\\" }}\"\n destination = \"local/dashboard_definitons/thermals.json\"\n }\n template {\n data = \"{{ key \\\"grafana_dashboards/NomadMem\\\" }}\"\n destination = \"local/dashboard_definitons/NomadMem.json\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"global\", \"backend\"]\n port = \"http\"\n\n check {\n name = \"alive\"\n type = \"http\"\n interval = \"60s\"\n timeout = \"120s\"\n path = \"/login\"\n port = \"http\"\n\n check_restart {\n limit = 3\n grace = \"120s\"\n ignore_warnings = false\n }\n }\n }\n\n resources {\n cpu = 128 # 500 MHz\n memory = 64 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n kill_timeout = \"10s\"\n }\n\n network {\n port \"http\" {\n static = 3100\n to = 3000\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5134431", + "name": "grafana", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "grafana", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "grafana", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/hass.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "67e6bb90-faa6-2e28-0965-46fb1ab5ba29", + "49081f7d-7656-34fe-a995-cff15355bd33", + "ecdf88e2-eb3d-a9ec-5900-40f790d7c68a", + "4a35e955-d271-80d8-6aaa-f8d266acc9f7" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "hass", + "jobspec": "job \"hass\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"system\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n constraint {\n operator = \"distinct_hosts\"\n value = \"true\"\n }\n update {\n # Stagger updates every 60 seconds\n stagger = \"10s\"\n max_parallel = 3\n }\n group \"hass\" {\n count = 1\n\n restart {\n attempts = 99\n interval = \"1h\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"hass\" {\n driver = \"docker\"\n\n config {\n // image = \"homeassistant/armhf-homeassistant:latest\"\n image = \"homeassistant/armhf-homeassistant:0.71.0\"\n ports = [\"http\"]\n\n volumes = [\n \"/etc/localtime:/etc/localtime\",\n \"local/configuration.yaml:/config/configuration.yaml\",\n ]\n\n // \"local/auth_provider.homeassistant:/config/.storage/auth_provider.homeassistant\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"global\", \"homeassistant\", \"tools\",\"logo=home-assistant\"]\n port = \"http\"\n\n // check {\n // name = \"hass-alive\"\n // type = \"http\"\n // type = \"script\"\n // command = \"curl -sS http://localhost:8123\"\n // interval = \"120s\"\n // timeout = \"15s\"\n // path = \"/api/\"\n // port = \"http\"\n\n // check_restart {\n // limit = 10\n // grace = \"90s\"\n // ignore_warnings = false\n // }\n // }\n }\n\n template {\n data = \u003c\u003cEOH\n homeassistant:\n name: Our_House\n latitude: 40.7654\n longitude: -73.8175\n elevation: 26\n unit_system: metric\n time_zone: America/New_York\n frontend:\n config:\n http:\n sun:\n automation:\n - alias: LightsAtSunset\n trigger:\n platform: sun\n event: sunset\n action:\n service: switch.turn_on\n entity_id: switch.lampdrawers\n wemo:\n static:\n - 192.168.99.200 # StuBed\n - 192.168.99.201 # LampDrawers\n - 192.168.99.202 # BigLamp\n - 192.168.99.203 # TallTree\n - 192.168.99.204 # ShortTree\n - 192.168.99.205 # TallTree\n\n EOH\n\n destination = \"local/configuration.yaml\"\n }\n\n template {\n data = \u003c\u003cEOH\n{\n \"data\": {\n \"users\": [\n {\n \"password\": \"JDJiJDEyJGRjLjQ5WkNkbzg3Q0xmcHkzM2EyTC5RWWJrRDlRTFBRdUhjanNoeS81SUlPN0taYzFUcEVh\",\n \"username\": \"fbleagh\"\n }\n ]\n },\n \"key\": \"auth_provider.homeassistant\",\n \"version\": 1\n}\n EOH\n\n destination = \"local/auth_provider.homeassistant\"\n }\n\n resources {\n cpu = 256 # 500 MHz\n memory = 64 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n kill_timeout = \"10s\"\n }\n\n network {\n port \"http\" {\n static = 8123\n to = 8123\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5127617", + "name": "hass", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "hass", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "hass", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "system" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/lidarr.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "a29f8509-df3c-529f-fb7f-5f129defb800" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "lidarr", + "jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"lidarr\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid4\"\n weight = 100\n }\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"lidarr\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"fail\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n }\n env {\n DB_NAME = \"lidarr.db\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/lidarr.db\n// replicas:\n// - path: /configbackup\n// snapshot-interval: 2h\n// validation-interval: 6h\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n }\n\n // task \"db-init\" {\n // driver = \"docker\"\n // lifecycle {\n // hook = \"prestart\"\n // sidecar = false\n // }\n // config {\n // memory_hard_limit = \"2048\"\n\n // image = \"ghcr.io/sstent/rsync\"\n // volumes = [\n // \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n // \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n // ]\n\n\n // args = [\"/usr/local/bin/litestream\", \"restore\",\"-config\",\"/local/litestream.yml\",\"/config/lidarr.db\"]\n // }\n // resources {\n // cpu = 20 # 500 MHz\n // memory = 20 # 128MB\n // }\n\n // template {\n // data = \u003c\u003cEOH\n // dbs:\n // - path: /config/lidarr.db\n // replicas:\n // - path: /configbackup\n // EOH\n\n // destination = \"local/litestream.yml\"\n // }\n\n\n // }\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1 = \"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n// task \"db-sync\" {\n// driver = \"docker\"\n// lifecycle {\n// hook = \"poststart\"\n// sidecar = true\n// }\n// config {\n// // memory_hard_limit = \"2048\"\n\n// image = \"ghcr.io/sstent/rsync\"\n// volumes = [\n// \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n// \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n// ]\n\n// // args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n// }\n// resources {\n// cpu = 20 # 500 MHz\n// memory = 128 # 128MB\n// }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/lidarr.db\n// replicas:\n// - path: /configbackup\n// snapshot-interval: 2h\n// validation-interval: 6h\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n\n// } #####\n\n\n\n task \"lidarr\" {\n driver = \"docker\"\n\n config {\n image = \"ghcr.io/linuxserver/lidarr:develop\"\n dns_servers = [\"192.168.1.1\", \"1.1.1.1\"]\n\n ports = [\"http\"]\n\n memory_hard_limit = \"2048\"\n cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/Public/Downloads/news:/downloads\",\n \"/mnt/Public/Downloads/music:/music\",\n \"/mnt/Archive/seeding:/archive\",\n \"/mnt/configs/lidarr:/config\",\n ]\n }\n\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"lidarr\", \"tools\"]\n port = \"http\"\n\n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/local/healthcheck.sh\"\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n } \n }\n }\n template {\n data = \u003c\u003cEOH\n#!/bin/bash\n/usr/bin/curl -f \"http://localhost:${NOMAD_PORT_http}/api/v1/system/status?apikey=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\"\n EOH\n\n destination = \"local/healthcheck.sh\"\n perms = \"0755\"\n }\n\n env {\n // SHARE = \"Public;/mount/Public;yes;no;yes;all;none;;Public\"\n # GLOBAL = \"socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=65536 SO_SNDBUF=65536\"\n # PERMISSIONS = \"true\"\n # WORKGROUP = \"WORKGROUP\"\n TZ = \"EST5EDT\"\n\n PUID = 1000\n PGID = 1000\n }\n template {\n data = \"---\\nkey: {{ key \\\"ovpn-client\\\" }}\"\n destination = \"local/file.yml\"\n change_mode = \"restart\"\n }\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 256 # 500 MHz\n memory = 200 # 128MB\n\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n }\n\n network {\n port \"http\" {\n static = 8686\n to = 8686\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5127609", + "name": "lidarr", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "lidarr", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "init", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "finalsync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "lidarr", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/lufi.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "36b8cbd7-2278-2a63-2183-1cc4349b7e3d" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "lufi", + "jobspec": "job \"lufi\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n group \"lufi\" {\n count = 1\n task \"lufi\" {\n driver = \"docker\"\n config {\n image = \"ghcr.io/sstent/lufi:latest\"\n memory_hard_limit = \"2048\"\n ports = [\"http\"]\n volumes = [\n \"/mnt/configs/lufi:/config\",\n \"/mnt/Archive/files:/files\",\n ]\n }\n\n resources {\n cpu = 100 # 100 MHz\n memory = 256 # 128 MB\n\n }\n service {\n name = \"lufi\"\n tags = [ \n \"lufi\",\n \"web\",\n \"urlprefix-/lufi\",\n \"backend\",\n \"traefik.http.routers.lufilan.rule=Host(`lufi.service.dc1.consul`)\",\n \"traefik.http.routers.lufiwan.rule=Host(`fd.fbleagh.duckdns.org`)\",\n \"traefik.http.routers.lufiwan.middlewares=dex@consulcatalog\",\n \"traefik.http.routers.lufiwan.tls=true\",\n \"traefik.http.routers.lufiwan.tls.certresolver=myresolver\"\n ]\n // \"traefik.http.middlewares.lufi_auth.basicauth.users=admin:$2y$05$cJGONoS0BFTeBUXqmETikeI14QhLDKIDkYuPdA1umIOC129grVMAm\",\n // \"traefik.http.middlewares.lufi_auth.basicauth.users=ShieldWrinklySquirePulseEcosphereCoroner:$2y$05$ogdqaYki8pEqVan4S7YvHOTGdB7W3j5Qv3sSKnij1Xy8yuRJ5gbpi\",\n // \"traefik.http.routers.lufiwan.middlewares=lufi_auth\",\n port = \"http\"\n meta {\n ALLOC = \"${NOMAD_ALLOC_ID}\"\n }\n check {\n type = \"tcp\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n }\n network {\n port \"http\" {\n to = 8081\n }\n }\n\n\n }\n}", + "json": null, + "modify_index": "5125816", + "name": "lufi", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "lufi", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "lufi", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/navidrome.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "294f016d-95f3-1c3c-0569-c60c75198a10" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "navidrome", + "jobspec": "job \"navidrome\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n group \"navidrome\" {\n count = 1\n\n task \"navidrome\" {\n driver = \"docker\"\n\n config {\n // image = \"registry.service.dc1.consul:5000/navidrome:latest\"\n image = \"deluan/navidrome:latest\"\n memory_hard_limit = \"2048\"\n\n ports = [\"http\"]\n\n volumes = [\n \"/mnt/configs/navidrome:/data\",\n \"/mnt/Public/Downloads/music:/music:ro\",\n ]\n }\n\n env {\n ND_SCANINTERVAL = \"1m\"\n ND_LOGLEVEL = \"debug\"\n ND_REVERSEPROXYWHITELIST = \"0.0.0.0/0\"\n ND_REVERSEPROXYUSERHEADER = \"X-Forwarded-User\"\n\n //ND_BASEURL = \"\"\n }\n\n resources {\n cpu = 100 # 100 MHz\n memory = 128 # 128 MB\n }\n\n service {\n name = \"navidrome\"\n\n tags = [\n \"navidrome\",\n \"web\",\n \"urlprefix-/navidrome\",\n \"tools\",\n \"traefik.http.routers.navidromelan.rule=Host(`navidrome.service.dc1.consul`)\",\n \"traefik.http.routers.navidromewan.rule=Host(`fbleagh-m.ignorelist.com`)\",\n \"traefik.http.routers.navidromewan.rule=Host(`m.fbleagh.duckdns.org`)\",\n \"traefik.http.routers.navidromewan.middlewares=dex@consulcatalog\",\n \"traefik.http.routers.navidromewan.tls=true\",\n \"traefik.http.routers.navidromewan.tls.certresolver=myresolver\",\n ]\n\n // \"traefik.http.routers.navidromewan.middlewares=navidrome_auth\",\n\n // \"traefik.http.middlewares.navidrome_auth.basicauth.users=ShieldWrinklySquirePulseEcosphereCoroner:$2y$05$ogdqaYki8pEqVan4S7YvHOTGdB7W3j5Qv3sSKnij1Xy8yuRJ5gbpi\",\n // \"traefik.http.middlewares.navidrome_auth.basicauth.users=admin:$2y$05$cJGONoS0BFTeBUXqmETikeI14QhLDKIDkYuPdA1umIOC129grVMAm\",\n port = \"http\"\n meta {\n ALLOC = \"${NOMAD_ALLOC_ID}\"\n }\n check {\n type = \"tcp\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n }\n\n task \"init-manual\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n args = [\"flock\", \"-x\",\"/locks/${NOMAD_GROUP_NAME}_rsync.lock\",\"rsync\",\"-av\",\"--exclude=Backups\",\"/configbackup/\",\"/config/\",\"--delete-before\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n\n task \"finalsync-manual\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n args = [\"flock\", \"-x\",\"/locks/${NOMAD_GROUP_NAME}_rsync.lock\",\"rsync\",\"-av\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n task \"db-sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n \n // args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n template {\n data = \u003c\u003cEOH\ndbs:\n - path: /config/navidrome.db\n replicas:\n - path: /configbackup\n EOH\n\n destination = \"local/litestream.yml\"\n }\n\n } #####\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"32 * * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;\"\n\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end sync\n\n network {\n port \"http\" {\n static = 4533\n to = 4533\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5127637", + "name": "navidrome", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "navidrome", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "navidrome", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "init-manual", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "finalsync-manual", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "db-sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "sync", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/nginx.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "abd3342c-8ced-8097-73e0-61783bb899b2" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "nginx", + "jobspec": "job \"nginx\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n group \"nginx\" {\n count = 1\n\n task \"nginx\" {\n driver = \"docker\"\n\n config {\n image = \"nginx\"\n ports = [\"http\", \"https\"]\n\n volumes = [\n \"custom/default.conf:/etc/nginx/conf.d/default.conf\",\n ]\n }\n\n template {\n data = \u003c\u003cEOH\n server {\n listen 8080;\n server_name nginx.service.dc1.consul;\n location / {\n root /local/data;\n }\n }\n EOH\n\n destination = \"custom/default.conf\"\n }\n artifact {\n source = \"git::https://github.com/WalkxCode/dashboard-icons.git\"\n destination = \"local/data/repo\"\n}\n # consul kv put features/demo 'Consul Rocks!'\n // \u003ca href=\"http://{{.Name}}.service.dc1.consul:{{ .Port }}\" target=\"_blank\"\u003e{{.Name}}service.dc1.consul:{{ .Port }}\u003c/a\u003e\n // \u003ca href=\"http://{{.Name}}.service.dc1.consul\" target=\"_blank\"\u003e{{.Name}}service.dc1.consul\u003c/a\u003e\n\n template {\n data = \u003c\u003cEOH\n \u003cp\u003eLocal Services\u003c/p\u003e\n \u003ctable style=\"width:100%\"\u003e\n \u003ctr\u003e\n \u003cth\u003eService Name\u003c/th\u003e\n \u003cth\u003eTraefik\u003c/th\u003e\n \u003cth\u003eDirect\u003c/th\u003e\n \u003c/tr\u003e\n {{range services}}\n {{range $i, $s :=service .Name}}\n {{ if eq $i 0 }}\n\n\u003ctr\u003e\n \u003ctd\u003e{{.Name}}\u003c/td\u003e\n \u003ctd\u003e\u003ca href=\"http://{{.Name}}.service.dc1.consul\" target=\"_blank\"\u003e{{.Name}}.service.dc1.consul\u003c/a\u003e\u003c/td\u003e\n \u003ctd\u003e\u003ca href=\"http://{{.Name}}.service.dc1.consul:{{.Port}}\" target=\"_blank\"\u003e{{.Name}}.service.dc1.consul:{{.Port}}\u003c/a\u003e\u003c/td\u003e\n \u003c/tr\u003e\n {{end}}\n {{end}}\n {{end}}\n \u003c/table\u003e\n \u003cp\u003e\n Node Environment Information: \u003cbr /\u003e\n node_id: {{ env \"node.unique.id\" }} \u003cbr/\u003e\n datacenter: {{ env \"NOMAD_DC\" }}\n \u003c/p\u003e\n EOH\n\n destination = \"local/data/index.html\"\n change_mode = \"noop\"\n }\n template {\n data = \u003c\u003cEOH\n\u003c!DOCTYPE html\u003e\n\u003chtml\u003e\n\u003ctitle\u003eDashboard\u003c/title\u003e\n\u003cmeta name=\"viewport\" content=\"width=device-width, initial-scale=1\"\u003e\n#\u003cmeta http-equiv=\"refresh\" content=\"120\"\u003e\n\u003clink rel=\"stylesheet\" href=\"https://www.w3schools.com/w3css/4/w3.css\"\u003e\n\u003clink rel=\"stylesheet\" href=\"https://www.w3schools.com/lib/w3-theme-blue-grey.css\"\u003e\n\u003clink rel=\"stylesheet\" href=\"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css\"\u003e\n\u003cbody class=\"w3-theme-l4\"\u003e\n\n\u003cdiv style=\"min-width:60px\"\u003e\n\u003cdiv class=\"w3-bar w3-large w3-theme-d4\"\u003e\n \u003ca href=\"#\" class=\"w3-bar-item w3-button\"\u003e\u003ci class=\"fa fa-bars\"\u003e\u003c/i\u003e\u003c/a\u003e\n \u003cspan class=\"w3-bar-item\"\u003eDashboard\u003c/span\u003e\n \u003ca href=\"#\" class=\"w3-bar-item w3-button w3-right\"\u003e\u003ci class=\"fa fa-search\"\u003e\u003c/i\u003e\u003c/a\u003e\n\u003c/div\u003e\n\u003cdiv class=\"w3-container w3-content w3-padding\"\u003e\n \u003cp class=\"w3-opacity\"\u003e\u003cb\u003eTools\u003c/b\u003e\u003c/p\u003e \n\n\n {{range services}}\n {{range $i, $s :=service .Name}}\n {{ if eq $i 0 }}\n {{if .Tags | contains \"tools\"}}\n\u003cdiv class=\"w3-col\"\u003e\n\u003cdiv class=\"w3-row\"\u003e\n \u003cdiv class=\"w3-white w3-center w3-cell-padding w3-card w3-mobile w3-margin\" style=\"width=80%\"\u003e\n \u003cp class=\"w3-text-blue\"\u003e\u003cb\u003e{{.Name}}\u003c/b\u003e\u003c/p\u003e\n {{$iconname := .Name}}\n {{range $tag, $services := service .Name | byTag }}{{if $tag | regexMatch \"logo=*\"}}{{$iconname = index ($tag | split \"=\") 1}}{{end}}{{end}}\n \u003ca href=\"http://{{.Name}}.service.dc1.consul\" target=\"_blank\"\u003e\u003cimg src=\"./repo/png/{{$iconname}}.png\" alt=\"HTML tutorial\" class=\"w3-margin\" style=\"width:84px;height:84px;\"\u003e\u003c/a\u003e\n \u003c/div\u003e\n\u003c/div\u003e\n\u003c/div\u003e\n {{end}}{{end}}{{end}}{{end}}\n\n\n\u003c/div\u003e\n \u003cp\u003e\n Node Environment Information: \u003cbr /\u003e\n node_id: {{ env \"node.unique.id\" }} \u003cbr/\u003e\n datacenter: {{ env \"NOMAD_DC\" }}\n \u003c/p\u003e\n\u003c/div\u003e\n\n\u003c/body\u003e\n\u003c/html\u003e\n EOH\n\n destination = \"local/data/newindex.html\"\n change_mode = \"noop\"\n }\n resources {\n cpu = 100 # 100 MHz\n memory = 64 # 128 MB\n }\n service {\n name = \"nginx\"\n tags = [\"nginx\", \"web\", \"urlprefix-/nginx\", \"backend\"]\n port = \"http\"\n\n check {\n type = \"tcp\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n }\n\n network {\n port \"http\" {\n to = 8080\n }\n\n port \"https\" {\n to = 443\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5127627", + "name": "nginx", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "nginx", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "nginx", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/nodeexporter.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "067db5bf-cb0a-226d-50c4-ba12083713b6", + "ac791771-d035-0af4-bef8-da3c31d07cfd", + "c82b219a-0ebc-6043-cb17-06a0b8db9b5b", + "fa620815-4478-b593-a980-7b028e776bde" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "node-exporter", + "jobspec": "job \"node-exporter\" {\n region = \"global\"\n datacenters = [\"dc1\"]\n type = \"system\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n\n group \"node-exporter\" {\n count = 1\n\n restart {\n attempts = 3\n delay = \"20s\"\n mode = \"delay\"\n }\n\n task \"node-exporter\" {\n driver = \"docker\"\n\n config {\n #image = \"anzevalher/node-exporter\"\n image = \"prom/node-exporter\"\n\n force_pull = true\n\n args = [\"--collector.cpu\",\"--collector.filesystem\",\"--collector.meminfo\",\"--collector.thermal_zone\",\"--collector.disable-defaults\",]\n volumes = [\n \"/proc:/host/proc\",\n \"/sys:/host/sys\",\n \"/:/rootfs\",\n ]\n\n port_map {\n http = 9100\n }\n }\n\n service {\n name = \"node-exporter\"\n\n tags = [\n \"metrics\",\n ]\n\n port = \"http\"\n\n check {\n type = \"http\"\n path = \"/metrics/\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n resources {\n cpu = 20\n memory = 20\n\n network {\n port \"http\" {\n static = \"9100\"\n }\n }\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5136279", + "name": "node-exporter", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "node-exporter", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "node-exporter", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "system" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/ovpn-openpyn.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "ovpn-client", + "jobspec": "job \"ovpn-client\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"service\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n update {\n # Stagger updates every 60 seconds\n stagger = \"90s\"\n max_parallel = 1\n healthy_deadline = \"5m\"\n }\n group \"ovpn-client\" {\n count = 1\n\n restart {\n attempts = 8\n interval = \"20m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n // task \"init-trigger\" {\n // driver = \"docker\"\n\n // lifecycle {\n // hook = \"prestart\"\n // }\n\n // config {\n // image = \"curlimages/curl\"\n // args = [\"--request\", \"PUT\", \"--data\", \"${NOMAD_ALLOC_ID}\", \"http://${attr.unique.network.ip-address}:8500/v1/kv/${NOMAD_GROUP_NAME}\"]\n // }\n // resources {\n // cpu = 20 # 500 MHz\n // memory = 20 # 128MB\n // }\n // }\n\n task \"nzbget\" {\n driver = \"docker\"\n\n config {\n image = \"linuxserver/nzbget\"\n network_mode = \"container:ovpn-client-${NOMAD_ALLOC_ID}\"\n\n // ports = [\"nzbget\"]\n volumes = [\n \"/mnt/Public/Downloads/news:/downloads\",\n \"/mnt/Public/incoming:/incomplete-downloads\",\n \"/mnt/configs/ovpn-client/nzbget:/config\",\n ]\n }\n\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n }\n\n resources {\n cpu = 500 # 500 MHz\n memory = 200 # 128MB\n }\n } #task nzbget\n\n // task \"saznzb\" {\n // driver = \"docker\"\n\n // config {\n // // image = \"linuxserver/sabnzbd:2.3.8-0ubuntu1jcfp118.04-ls13\"\n // image = \"linuxserver/sabnzbd\"\n // network_mode = \"container:ovpn-client-${NOMAD_ALLOC_ID}\"\n\n // volumes = [\n // \"/mnt/Public/Downloads/news:/downloads\",\n // \"/mnt/Public/incoming:/incomplete-downloads\",\n // \"/mnt/configs/ovpn-client/saznzb:/config\",\n // ]\n // }\n\n // env {\n // TZ = \"EST5EDT\"\n // PUID = 1000\n // PGID = 1000\n // }\n\n // resources {\n // cpu = 100 # 500 MHz\n // memory = 512 # 128MB\n // }\n // }\n\n task \"ovpn-client\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = true\n }\n\n config {\n // image = \"registry.service.dc1.consul:5000/openpyn:latest\"\n image = \"qmcgaw/gluetun\"\n // memory_hard_limit = \"200\"\n\n ports = [\n \"shadowsocks\",\n \"nzbget\",\n \"http_proxy\",\n \"http_admin\",\n \"socks\"\n ]\n\n cap_add = [\n \"NET_ADMIN\",\n \"NET_BIND_SERVICE\",\n ]\n\n #network_mode = \"host\"\n #network_mode = \"vpn\"\n\n volumes = [\n \"/etc/localtime:/etc/localtime\",\n ]\n devices = [\n {\n host_path = \"/dev/net/tun\"\n container_path = \"/dev/net/tun\"\n },\n ]\n }\n\n env {\n // VPNFLAGS = \"us --max-load 70 --top-servers 10 --pings 5\"\n // VPNFLAGS = \"nl --max-load 70 --top-servers 10 --pings 5\"\n VPNSP = \"nordvpn\"\n OPENVPN_USER = \"stuart.stent@gmail.com\"\n OPENVPN_PASSWORD = \"drRp4mQBVU6awAFOk9lO\"\n REGION = \"Netherlands\"\n HTTPPROXY = \"on\"\n SHADOWSOCKS_PASSWORD = \"farscape5\"\n SHADOWSOCKS = \"off\"\n }\n\n service {\n name = \"${TASKGROUP}-admin\"\n tags = [\"global\", \"ovpn-openpyn\"]\n port = \"http_admin\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"global\", \"ovpn-openpyn\"]\n port = \"shadowsocks\"\n }\n\n service {\n name = \"nzbget\"\n tags = [\"global\", \"ovpn-openpyn\"]\n port = \"nzbget\"\n }\n\n resources {\n cpu = 100 # 500 MHz\n memory = 100 # 128MB\n }\n } #task ovpn\n task \"dante\" {\n driver = \"docker\"\n\n config {\n image = \"ghcr.io/sstent/dante:latest\"\n network_mode = \"container:ovpn-client-${NOMAD_ALLOC_ID}\"\n memory_hard_limit = 256\n\n devices = [\n {\n host_path = \"/dev/net/tun\"\n container_path = \"/dev/net/tun\"\n },\n ]\n }\n service {\n name = \"socks-nord\"\n tags = [\"global\", \"ovpn-openpyn\"]\n port = \"socks\"\n \n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/usr/bin/curl\"\n args = [\"--proxy\", \"socks5://localhost:1080\",\"http://neverssl.com/\"]\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n }\n }\n }\n resources {\n cpu = 64 # 500 MHz\n memory = 128 # 128MB\n }\n }\n ######################################################################\n ######################################################################\n ######################################################################\n\n task \"init\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n\n config {\n image = \"ghcr.io/sstent/rsync:v0.3.5\"\n memory_hard_limit = \"2048\"\n\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\",\n ]\n\n args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-avz\", \"--exclude=Backups\", \"/configbackup/\", \"/config/\", \"--delete-before\"]\n }\n\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end init task\n task \"finalsync\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"poststop\"\n }\n\n config {\n // image = \"pebalk/rsync\"\n image = \"ghcr.io/sstent/rsync:v0.3.5\"\n memory_hard_limit = \"2048\"\n\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\",\n ]\n\n args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-avz\", \"/config/\", \"/configbackup/\"]\n }\n\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end finalsync task\n task \"sync\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n\n config {\n image = \"ghcr.io/sstent/rsync:v0.3.5\"\n memory_hard_limit = \"2048\"\n\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n\n args = [\"client\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n env {\n CRON_TASK_1 = \"*/20 * * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;\"\n }\n } #end sync task\n\n ######################################################################\n ######################################################################\n ######################################################################\n\n network {\n port \"shadowsocks\" {\n static = \"8338\"\n to = \"8388\"\n }\n\n port \"http_proxy\" {\n static = \"8888\"\n to = \"8888\"\n }\n port \"http_admin\" {\n static = \"8000\"\n to = \"8000\"\n }\n\n port \"socks\" {\n static = \"1080\"\n to = \"1080\"\n }\n\n port \"nzbget\" {\n static = \"6789\"\n to = \"6789\"\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5136713", + "name": "ovpn-client", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "ovpn-client", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "nzbget", + "volume_mounts": null + }, + { + "driver": "docker", + "meta": {}, + "name": "ovpn-client", + "volume_mounts": null + }, + { + "driver": "docker", + "meta": {}, + "name": "dante", + "volume_mounts": null + }, + { + "driver": "docker", + "meta": {}, + "name": "init", + "volume_mounts": null + }, + { + "driver": "docker", + "meta": {}, + "name": "finalsync", + "volume_mounts": null + }, + { + "driver": "docker", + "meta": {}, + "name": "sync", + "volume_mounts": null + } + ], + "volumes": null + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/prometheus.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "4d382d41-9038-f25a-37da-3a32515775fb", + "be48b4d9-8e41-b723-57b6-564bc522c2ec" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "prometheus", + "jobspec": "job \"prometheus\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"service\"\n\n # priority = 50\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n\n # constraint {\n # \tattribute = \"${attr.unique.hostname}\"\n # operator = \"regexp\"\n # value = \"pi.*\"\n # }\n\n update {\n # Stagger updates every 60 seconds\n stagger = \"60s\"\n max_parallel = 1\n }\n group \"prometheus\" {\n count = 2\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"prometheus\" {\n driver = \"docker\"\n\n config {\n image = \"prom/prometheus\"\nargs = [\"--web.enable-admin-api\",\"--config.file=/etc/prometheus/prometheus.yml\"]\n \n\n ports = [\"http\"]\n memory_hard_limit = \"512\"\n volumes = [\n \"local/prometheus.yml:/etc/prometheus/prometheus.yml\",\n \"local/alerts.yml:/etc/prometheus/alerts.yml\",\n ]\n\n // \"/mnt/gv0/prom_data:/data\",\n // \"/etc/localtime:/etc/localtime\",\n\n logging {\n type = \"json-file\"\n }\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"global\", \"prometheus\"]\n port = \"http\"\n\n check {\n name = \"alive\"\n type = \"http\"\n interval = \"10s\"\n timeout = \"120s\"\n path = \"/status\"\n port = \"http\"\n }\n }\n\n template {\n change_mode = \"signal\"\n change_signal = \"SIGHUP\"\n data = \"{{ key \\\"prometheus_yml\\\" }}\"\n destination = \"local/prometheus.yml\"\n }\n\n template {\n change_mode = \"restart\"\n \n destination = \"local/alerts.yml\"\n data = \"{{ key \\\"alerts\\\" }}\"\n }\n resources {\n cpu = 500 # 500 MHz\n memory = 48 # 128MB\n }\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n kill_timeout = \"10s\"\n } ## end prometheus\n\n task \"alertmanager\" {\n driver = \"docker\"\n\n config {\n image = \"prom/alertmanager\"\n\n ports = [\"alerthttp\"]\n\n // volumes = [\n // \"local/alertmanager.yml:/etc/prometheus/prometheus.yml\",\n // ]\n args = [\"--config.file=/local/alertmanager.yml\"]\n // \"/mnt/gv0/prom_data:/data\",\n // \"/etc/localtime:/etc/localtime\",\n\n logging {\n type = \"json-file\"\n }\n }\n\n service {\n name = \"${TASK}\"\n tags = [\"global\", \"prometheus\"]\n port = \"alerthttp\"\n\n check {\n name = \"alive\"\n type = \"http\"\n interval = \"60s\"\n timeout = \"120s\"\n path = \"/status\"\n port = \"http\"\n }\n }\n\n template {\n data = \u003c\u003cEOH\nglobal:\nreceivers:\n - name: default-receiver\n - name: gotify-webhook\n webhook_configs:\n - url: \"http://prometheus.service.dc1.consul:9094/gotify_webhook\"\nroute:\n group_wait: 10s\n group_interval: 5m\n receiver: gotify-webhook\n repeat_interval: 3h\n\nEOH\n\n destination = \"local/alertmanager.yml\"\n }\n resources {\n cpu = 128 # 500 MHz\n memory = 48 # 128MB\n }\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n kill_timeout = \"10s\"\n } ## end alertmanager\n\n task \"gotifybridge\" {\n driver = \"docker\"\n config {\n image = \"ghcr.io/druggeri/alertmanager_gotify_bridge\"\n ports = [\"gotifybridge\"]\n args = [\"--debug\"]\n }\n env {\n GOTIFY_TOKEN=\"Ajdp.V.rvrXGoJd\"\n GOTIFY_ENDPOINT=\"http://gotify.service.dc1.consul/message\"\n }\n}\n\n network {\n port \"http\" {\n static = 9090\n to = 9090\n }\n port \"alerthttp\" {\n static = 9093\n to = 9093\n }\n port \"gotifybridge\" {\n static = 9094\n to = 8080\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5135086", + "name": "prometheus", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 2, + "meta": {}, + "name": "prometheus", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "prometheus", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "alertmanager", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "gotifybridge", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/prowlarr.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "376eaf67-5433-a5da-3e28-f4002777a80a" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "prowlarr", + "jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"prowlarr\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n // affinity {\n // attribute = \"${attr.unique.hostname}\"\n // value = \"odroid2\"\n // weight = 100\n // }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"prowlarr\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"fail\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n // args = [\"flock\", \"-x\",\"/locks/${NOMAD_GROUP_NAME}_rsync.lock\",\"rsync\",\"-av\",\"--exclude=Backups\",\"/configbackup/\",\"/config/\",\"--delete-before\"]\n\n }\n env {\n DB_NAME=\"prowlarr.db\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/prowlarr.db\n// replicas:\n// - path: /configbackup\n// snapshot-interval: 2h\n// validation-interval: 6h\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n }\n\n// task \"db-init\" {\n// driver = \"docker\"\n// lifecycle {\n// hook = \"prestart\"\n// sidecar = false\n// }\n// config {\n// memory_hard_limit = \"2048\"\n\n// image = \"ghcr.io/sstent/rsync\"\n// volumes = [\n// \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n// \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n// ]\n\n \n// args = [\"/usr/local/bin/litestream\", \"restore\",\"-config\",\"/local/litestream.yml\",\"/config/lidarr.db\"]\n// }\n// resources {\n// cpu = 20 # 500 MHz\n// memory = 20 # 128MB\n// }\n\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/lidarr.db\n// replicas:\n// - path: /configbackup\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n\n\n // }\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"50 * * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz --exclude='*.db-litestream' --exclude='generations'/configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n// task \"db-sync\" {\n// driver = \"docker\"\n// lifecycle {\n// hook = \"poststart\"\n// sidecar = true\n// }\n// config {\n// image = \"ghcr.io/sstent/rsync\"\n// volumes = [\n// \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n// \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n// ]\n \n// // args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n// }\n// resources {\n// cpu = 20 # 500 MHz\n// memory = 128 # 128MB\n// }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/prowlarr.db\n// replicas:\n// - path: /configbackup\n// snapshot-interval: 2h\n// validation-interval: 6h\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n\n // } #####\n\n task \"prowlarr\" {\n // driver = \"raw_exec\" // config { // command = \"docker\" // args = [\"run\", // \"--rm\", // \"--name=prowlarr\", // \"-e\", \"PUID=1000\", // \"-e\", \"PGID=1000\", // \"-e\", \"TZ=EST5EDT\", // \"-p\", \"8989:8989\", // \"-v\", \"/mnt/syncthing/prowlarrv3:/config\", // \"-v\", \"/mnt/Public/Downloads/tv:/tv\", // \"-v\", \"/mnt/Public/Downloads/news:/downloads\", // \"--cpuset-cpus\",\"4-7\", // \"linuxserver/prowlarr:preview\"] // }\n\n driver = \"docker\"\n\n config {\n image = \"ghcr.io/linuxserver/prowlarr:develop\"\n\n ports = [\"http\"]\n\n dns_servers = [\"192.168.1.1\", \"1.1.1.1\"]\n memory_hard_limit = \"2048\"\n cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/configs/prowlarr:/config\",\n ]\n\n // \"/mnt/gv0/prowlarr:/config\",\n force_pull = false\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"prowlarr\", \"tools\"]\n port = \"http\"\n\n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/local/healthcheck.sh\"\n\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n }\n \n // export API=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\n // curl -f \"http://localhost:9696/api/v1/system/status?apikey=$API\"\n }\n }\n template {\n data = \u003c\u003cEOH\n#!/bin/bash\n/usr/bin/curl -f \"http://localhost:${NOMAD_PORT_http}/api/v1/system/status?apikey=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\"\n EOH\n\n destination = \"local/healthcheck.sh\"\n perms = \"0755\"\n }\n\n env {\n // SHARE = \"Public;/mount/Public;yes;no;yes;all;none;;Public\"\n # GLOBAL = \"socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=65536 SO_SNDBUF=65536\"\n # PERMISSIONS = \"true\"\n # WORKGROUP = \"WORKGROUP\"\n TZ = \"EST5EDT\"\n\n PUID = 1000\n PGID = 1000\n }\n\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 500 # 500 MHz\n memory = 256 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n } #End main task\n\n network {\n // mbits = 100\n\n port \"http\" {\n static = 9696\n to = 9696\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5127612", + "name": "prowlarr", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "prowlarr", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "init", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "finalsync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "prowlarr", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/qbittorrent.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "54da0c06-5ad3-d72e-8091-4147f8bd58cc" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "qbittorrent", + "jobspec": "job \"qbittorrent\" {\n # region = \"global\"\n datacenters = [\"dc1\"]\n type = \"service\"\n\n # priority = 50\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid*\"\n // weight = 100\n }\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n update {\n # Stagger updates every 60 seconds\n stagger = \"90s\"\n max_parallel = 1\n healthy_deadline = \"5m\"\n }\n\n group \"qbittorrent\" {\n count = 1\n\n restart {\n attempts = 8\n interval = \"20m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"qbittorrent-vpn\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = true\n }\n\n config {\n // image = \"registry.service.dc1.consul:5000/openpyn:latest\"\n image = \"qmcgaw/gluetun\"\n\n memory_hard_limit = \"1024\"\n ports = [\n \"shadowsocks\",\n \"http_proxy\",\n \"http_admin\",\n \"qbittorrent_51413\",\n \"qbittorrent_80\",\n \"socks\"\n ]\n\n cap_add = [\n \"NET_ADMIN\",\n \"NET_BIND_SERVICE\",\n ]\n\n #network_mode = \"host\"\n #network_mode = \"vpn\"\n\n volumes = [\n \"/etc/localtime:/etc/localtime\",\n \"/mnt/syncthing/mullvad:/vpn\",\n\n ]\n devices = [\n {\n host_path = \"/dev/net/tun\"\n container_path = \"/dev/net/tun\"\n },\n ]\n }\n\n env {\n VPNSP = \"mullvad\"\n VPN_TYPE = \"wireguard\"\n COUNTRY = \"Canada\"\n CITY = \"Toronto\"\n FIREWALL_VPN_INPUT_PORTS = \"56987\"\n WIREGUARD_PRIVATE_KEY = \"iA64ImY2XNvml7s+HEHWNNGXeqpzFN0/KYGxhCsHLV8=\"\n WIREGUARD_ADDRESS = \"10.64.141.217/32\"\n HTTPPROXY = \"on\"\n SHADOWSOCKS_PASSWORD = \"farscape5\"\n SHADOWSOCKS = \"on\"\n }\n\n service {\n name = \"qbittorrent\"\n tags = [\"global\", \"tools\"]\n port = \"qbittorrent_80\"\n }\n\n service {\n name = \"${TASKGROUP}-admin\"\n tags = [\"global\"]\n port = \"http_admin\"\n }\n\n\n resources {\n cpu = 100 # 500 MHz\n memory = 250 # 128MB\n }\n } #task ovpn\n\n task \"dante\" {\n driver = \"docker\"\n\n config {\n image = \"ghcr.io/sstent/dante-wg:latest\"\n network_mode = \"container:qbittorrent-vpn-${NOMAD_ALLOC_ID}\"\n memory_hard_limit = 256\n\n devices = [\n {\n host_path = \"/dev/net/tun\"\n container_path = \"/dev/net/tun\"\n },\n ]\n }\n service {\n name = \"socks-mullvad\"\n tags = [\"global\", \"ovpn-openpyn\"]\n port = \"socks\"\n \n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/usr/bin/curl\"\n args = [\"--proxy\", \"socks5://localhost:1080\",\"http://neverssl.com/\"]\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n }\n }\n }\n resources {\n cpu = 64 # 500 MHz\n memory = 128 # 128MB\n }\n } #end dante\n\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\", \"--exclude=Backups\", \"/configbackup/\", \"/config/\", \"--delete-before\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\", \"--delete\", \"/config/\", \"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1 = \"25 * * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz --delete /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;\"\n\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end sync\n\n\n task \"qbittorrent\" {\n driver = \"docker\"\n\n // \"/mnt/Public/config/qbittorrent:/config\",\n\n config {\n image = \"linuxserver/qbittorrent\"\n network_mode = \"container:qbittorrent-vpn-${NOMAD_ALLOC_ID}\"\n memory_hard_limit = 2048\n\n #cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/Public/Downloads/news/qbittorrent:/downloads/\",\n \"/mnt/configs/qbittorrent:/config\",\n \"/mnt/Archive/seeding:/archiveseeding\",\n ]\n // ulimit {\n // nproc = \"4242\"\n // nofile = \"2048:4096\"\n // }\n sysctl = {\n \"net.core.somaxconn\" = \"4096\"\n }\n }\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n }\n resources {\n cpu = 128 # 500 MHz\n memory = 256 # 128MB\n }\n } #task resilio-sync\n\n network {\n // mode = \"bridge\"\n port \"qbittorrent_51413\" {\n static = 6881\n to = 6881\n }\n\n port \"shadowsocks\" {\n static = \"8388\"\n to = \"8388\"\n }\n port \"socks\" {\n static = \"1080\"\n to = \"1080\"\n }\n\n port \"http_proxy\" {\n static = \"8888\"\n to = \"8888\"\n }\n port \"http_admin\" {\n static = \"8000\"\n to = \"8000\"\n }\n\n\n port \"qbittorrent_80\" {\n static = 8080\n to = 8080\n }\n\n }\n } #group \"au-proxy\"\n}\n\n// }\n\n", + "json": null, + "modify_index": "5081280", + "name": "qbittorrent", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "qbittorrent", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "qbittorrent-vpn", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "dante", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "init", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "finalsync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "qbittorrent", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/radarr.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "fa5a91f0-505e-5258-b924-917b15eb6ee7" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "radarr", + "jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"radarr\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid2\"\n weight = 100\n }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"radarr\" {\n count = 1\n\n restart {\n attempts = 5\n interval = \"1m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n }\n env {\n DB_NAME = \"radarr.db\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/radarr.db\n// replicas:\n// - path: /configbackup\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n }\n\n // task \"db-init\" {\n // driver = \"docker\"\n // lifecycle {\n // hook = \"prestart\"\n // sidecar = false\n // }\n // config {\n // memory_hard_limit = \"2048\"\n\n // image = \"ghcr.io/sstent/rsync\"\n // volumes = [\n // \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n // \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n // ]\n\n\n // args = [\"/usr/local/bin/litestream\", \"restore\",\"-config\",\"/local/litestream.yml\",\"/config/lidarr.db\"]\n // }\n // resources {\n // cpu = 20 # 500 MHz\n // memory = 20 # 128MB\n // }\n\n // template {\n // data = \u003c\u003cEOH\n // dbs:\n // - path: /config/lidarr.db\n // replicas:\n // - path: /configbackup\n // EOH\n\n // destination = \"local/litestream.yml\"\n // }\n\n\n // }\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1 = \"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n// task \"db-sync\" {\n// driver = \"docker\"\n// lifecycle {\n// hook = \"poststart\"\n// sidecar = true\n// }\n// config {\n// memory_hard_limit = \"256\"\n\n// image = \"ghcr.io/sstent/rsync\"\n// volumes = [\n// \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n// \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n// ]\n\n// // args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n// }\n// resources {\n// cpu = 20 # 500 MHz\n// memory = 128 # 128MB\n// }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/radarr.db\n// replicas:\n// - path: /configbackup\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n\n// } #####\n\n task \"radarr\" {\n driver = \"docker\"\n\n config {\n // image = \"linuxserver/radarr:nightly\"\n image = \"linuxserver/radarr:latest\"\n dns_servers = [\"192.168.1.1\", \"1.1.1.1\"]\n ports = [\"http\"]\n memory_hard_limit = \"2048\"\n cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/Public/Downloads/news:/downloads\",\n \"/mnt/Public/Downloads/movies:/movies\",\n \"/mnt/configs/radarr:/config\",\n \"/mnt/Archive/Movies:/archive\",\n ]\n }\n\n template {\n data = \"---\\nkey: {{ key \\\"ovpn-client\\\" }}\"\n destination = \"local/file.yml\"\n change_mode = \"restart\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"radarr\", \"tools\"]\n port = \"http\"\n\n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/local/healthcheck.sh\"\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n } \n }\n }\n template {\n data = \u003c\u003cEOH\n#!/bin/bash\n/usr/bin/curl -f \"http://localhost:${NOMAD_PORT_http}/api/v3/system/status?apikey=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\"\n EOH\n\n destination = \"local/healthcheck.sh\"\n perms = \"0755\"\n }\n\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n }\n\n resources {\n cpu = 500 # 500 MHz\n memory = 256 # 128MB\n\n // network {\n // port \"radarr\" {\n // static = \"7878\"\n // }\n // }\n }\n\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n kill_timeout = \"10s\"\n } #Task\n\n network {\n // mode = \"bridge\"\n port \"http\" {\n static = 7878\n to = 7878\n }\n }\n } #Group\n}\n", + "json": null, + "modify_index": "5127623", + "name": "radarr", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "radarr", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "init", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "finalsync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "radarr", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/readarr.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "b145f13c-b7e7-44f5-5cfc-7ce67c1cdb26" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "readarr", + "jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"readarr\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid4\"\n weight = 100\n }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"readarr\" {\n count = 1\n\n restart {\n attempts = 5\n interval = \"3m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync:latest\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n }\n env {\n DB_NAME=\"readarr.db\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/readarr.db\n// replicas:\n// - name: file_replica\n// path: /configbackup\n// - name: minio_replica\n// url: s3://litestream/readarr\n// endpoint: http://minio.service.dc1.consul:9000\n// access-key-id: minio\n// secret-access-key: minio123\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n }\n\n// task \"db-init\" {\n// driver = \"docker\"\n// lifecycle {\n// hook = \"prestart\"\n// sidecar = false\n// }\n// config {\n// memory_hard_limit = \"2048\"\n\n// image = \"ghcr.io/sstent/rsync\"\n// volumes = [\n// \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n// \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n// ]\n\n \n// args = [\"/usr/local/bin/litestream\", \"restore\",\"-config\",\"/local/litestream.yml\",\"/config/lidarr.db\"]\n// }\n// resources {\n// cpu = 20 # 500 MHz\n// memory = 20 # 128MB\n// }\n\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/lidarr.db\n// replicas:\n// - path: /configbackup\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n\n\n// }\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n// task \"db-sync\" {\n// driver = \"docker\"\n// lifecycle {\n// hook = \"poststart\"\n// sidecar = true\n// }\n// config {\n// memory_hard_limit = \"2048\"\n\n// image = \"ghcr.io/sstent/rsync:v0.3.5\"\n// volumes = [\n// \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n// \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n// ]\n \n// args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n// }\n// resources {\n// cpu = 20 # 500 MHz\n// memory = 128 # 128MB\n// }\n// env{\n// SLEEPTIME=\"1m\"\n// }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/readarr.db\n// replicas:\n// - name: file_replica\n// path: /configbackup\n// - name: minio_replica\n// url: s3://litestream/readarr\n// endpoint: http://minio.service.dc1.consul:9000\n// access-key-id: minio\n// secret-access-key: minio123\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n\n// } #####\n\n task \"readarr\" {\n driver = \"docker\"\n\n config {\n // image = \"linuxserver/readarr:nightly\"\n image = \"hotio/readarr:nightly-0.1.0.963\"\n dns_servers = [\"192.168.1.1\", \"1.1.1.1\"]\n ports = [\"http\"]\n memory_hard_limit = \"2048\"\n cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/Public/Downloads/news:/downloads\",\n \"/mnt/Public/Downloads/books2:/books\",\n \"/mnt/Public/Downloads/PublicCalibreLibrary:/PublicCalibreLibrary\",\n \"/mnt/configs/readarr:/config\",\n ]\n }\n\n template {\n data = \"---\\nkey: {{ key \\\"ovpn-client\\\" }}\"\n destination = \"local/file.yml\"\n change_mode = \"restart\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"readarr\", \"tools\"]\n port = \"http\"\n\n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/local/healthcheck.sh\"\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n } \n }\n }\n template {\n data = \u003c\u003cEOH\n#!/bin/bash\n/usr/bin/curl -f \"http://localhost:${NOMAD_PORT_http}/api/v1/system/status?apikey=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\"\n EOH\n\n destination = \"local/healthcheck.sh\"\n perms = \"0755\"\n }\n\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n DEBUG = \"yes\"\n }\n\n resources {\n cpu = 500 # 500 MHz\n memory = 256 # 128MB\n\n // network {\n // port \"readarr\" {\n // static = \"8787\"\n // }\n // }\n }\n\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n kill_timeout = \"10s\"\n } #Task\n\n network {\n // mode = \"bridge\"\n port \"http\" {\n static = 8787\n to = 8787\n }\n }\n } #Group\n}\n", + "json": null, + "modify_index": "5128092", + "name": "readarr", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "readarr", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "init", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "finalsync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "readarr", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/readarrAudio.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "9cc929bc-f746-752a-422f-444d0eb773c7" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "readarrAudio", + "jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"readarrAudio\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid4\"\n weight = 100\n }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"readarrAudio\" {\n count = 1\n\n restart {\n attempts = 5\n interval = \"3m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n }\n env {\n DB_NAME=\"readarr.db\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n// template {\n// data = \u003c\u003cEOH\n// dbs:\n// - path: /config/readarr.db\n// replicas:\n// - name: file_replica\n// path: /configbackup\n// EOH\n\n// destination = \"local/litestream.yml\"\n// }\n }\n\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\" \n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\", \n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1=\"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n\n task \"readarrAudio\" {\n driver = \"docker\"\n\n config {\n // image = \"linuxserver/readarr:nightly\"\n image = \"hotio/readarr:nightly-0.1.0.963\"\n dns_servers = [\"192.168.1.1\", \"1.1.1.1\"]\n ports = [\"http\"]\n memory_hard_limit = \"2048\"\n cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/Public/Downloads/news:/downloads\",\n \"/mnt/Public/Downloads/audiobooks:/books\",\n \"/mnt/configs/readarrAudio:/config\",\n ]\n }\n\n template {\n data = \"---\\nkey: {{ key \\\"ovpn-client\\\" }}\"\n destination = \"local/file.yml\"\n change_mode = \"restart\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"readarr\", \"tools\",\"logo=readarr\"]\n port = \"http\"\n\n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/local/healthcheck.sh\"\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n } \n }\n }\n template {\n data = \u003c\u003cEOH\n#!/bin/bash\n/usr/bin/curl -f \"http://localhost:${NOMAD_PORT_http}/api/v1/system/status?apikey=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\"\n EOH\n\n destination = \"local/healthcheck.sh\"\n perms = \"0755\"\n }\n\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n DEBUG = \"yes\"\n }\n\n resources {\n cpu = 500 # 500 MHz\n memory = 160 # 128MB\n\n // network {\n // port \"readarr\" {\n // static = \"8787\"\n // }\n // }\n }\n\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n kill_timeout = \"10s\"\n } #Task\n\n network {\n // mode = \"bridge\"\n port \"http\" {\n static = 8787\n to = 8787\n }\n }\n } #Group\n}\n", + "json": null, + "modify_index": "5127608", + "name": "readarrAudio", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "readarrAudio", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "init", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "finalsync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "readarrAudio", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/registry.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "3c34c9f3-0b41-43f4-73f4-eeb93eb833cd" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "registry", + "jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"registry\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid4\"\n weight = 100\n }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"registry\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"fail\"\n }\n\n\n\n task \"registryui\" {\n driver = \"docker\"\n config {\n image = \"joxit/docker-registry-ui:latest\"\n ports = [\"httpgui\"]\n }\n\n env{\n REGISTRY_TITLE=\"My Private Docker Registry\"\n REGISTRY_URL=\"https://registry.service.dc1.consul:5000\"\n NGINX_PROXY_PASS_URL=\"https://registry.service.dc1.consul:5000\"\n SINGLE_REGISTRY=\"true\"\n }\n\n\n\n \n\n }\n\n task \"registry\" {\n driver = \"docker\"\n\n config {\n image = \"registry:latest\"\n\n ports = [\"http\"]\n\n volumes = [\n \"/mnt/Public/config/registry/certs:/data/certs\",\n \"/mnt/Public/config/registry/registry:/var/lib/registry\"\n ]\n }\n\n\n env {\n REGISTRY_HTTP_TLS_KEY = \"/data/certs/domain.key\"\n REGISTRY_HTTP_TLS_CERTIFICATE = \"/data/certs/domain.crt\"\n REGISTRY_HTTP_HEADERS_Access-Control-Allow-Origin=\"['*']\"\n // REGISTRY_PROXY_REMOTEURL = \"https://registry-1.docker.io\"\n // REGISTRY_PROXY_USERNAME = \"fbleagh\"\n // REGISTRY_PROXY_PASSWORD = \"aad31d60-4340-4adc-a21d-fac4942c2fb8\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"registry\"]\n port = \"httpgui\"\n\n check {\n name = \"alive\"\n type = \"http\"\n port = \"http\"\n protocol = \"https\"\n tls_skip_verify = true\n interval = \"10s\"\n timeout = \"10s\"\n path = \"/\"\n }\n }\n\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 64 # 500 MHz\n memory = 64 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n }\n\n task \"registry-cache\" {\n driver = \"docker\"\n\n config {\n image = \"registry\"\n\n ports = [\"httpcache\"]\n\n volumes = [\n \"/mnt/Public/config/registry-cache:/data\",\n ]\n }\n\n env {\n REGISTRY_HTTP_TLS_KEY = \"/data/certs/domain.key\"\n REGISTRY_HTTP_TLS_CERTIFICATE = \"/data/certs/domain.crt\"\n\n // REGISTRY_HTTP_TLS_LETSENCRYPT_CACHEFILE = \"/data/certs/letsencrypt.crt\"\n\n // REGISTRY_HTTP_TLS_LETSENCRYPT_EMAIL = \"stuart.stent@gmail.com\"\n // REGISTRY_HTTP_TLS_LETSENCRYPT_HOSTS = \"[regcache.fbleagh.duckdns.org]\"\n REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY = \"/data/registry\"\n REGISTRY_PROXY_REMOTEURL = \"https://registry-1.docker.io\"\n REGISTRY_PROXY_USERNAME = \"fbleagh\"\n REGISTRY_PROXY_PASSWORD = \"aad31d60-4340-4adc-a21d-fac4942c2fb8\"\n }\n\n service {\n name = \"${TASKGROUP}-cache\"\n tags = [\"registry\"]\n port = \"httpcache\"\n\n check {\n name = \"alive\"\n type = \"http\"\n port = \"http\"\n protocol = \"https\"\n tls_skip_verify = true\n interval = \"10s\"\n timeout = \"10s\"\n path = \"/\"\n }\n }\n\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 64 # 500 MHz\n memory = 64 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n }\n\n network {\n port \"http\" {\n static = 5000\n to = 5000\n }\n\n port \"httpcache\" {\n static = 5001\n to = 5000\n }\n port \"httpgui\" {\n to = 80\n }\n }\n }\n}\n\n// openssl req \\\n// -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \\\n// -addext \"subjectAltName = DNS:http://registry-cache.service.dc1.consul\" \\\n// -x509 -days 365 -out certs/domain.crt\n//\n///\n// Copy to \n\n", + "json": null, + "modify_index": "4988360", + "name": "registry", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "registry", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "registryui", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "registry", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "registry-cache", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/sonarr.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "75e37c6b-57a8-bcdb-f8de-02f4d9a3f420" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "sonarr", + "jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"sonarr\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n affinity {\n attribute = \"${attr.unique.hostname}\"\n value = \"odroid1\"\n weight = 100\n }\n\n // constraint {\n // \tattribute = \"${attr.unique.hostname}\"\n // operator = \"=\"\n // value = \"sync\"\n // }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n group \"sonarr\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"fail\"\n }\n\n task \"init\" {\n driver = \"docker\"\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n }\n env {\n DB_NAME = \"sonarr.db\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n template {\n data = \u003c\u003cEOH\ndbs:\n - path: /config/sonarr.db\n replicas:\n - path: /configbackup\n snapshot-interval: 2h\n validation-interval: 6h\n EOH\n\n destination = \"local/litestream.yml\"\n }\n }\n\n // task \"db-init\" {\n // driver = \"docker\"\n // lifecycle {\n // hook = \"prestart\"\n // sidecar = false\n // }\n // config {\n // memory_hard_limit = \"2048\"\n\n // image = \"ghcr.io/sstent/rsync\"\n // volumes = [\n // \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n // \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n // ]\n\n\n // args = [\"/usr/local/bin/litestream\", \"restore\",\"-config\",\"/local/litestream.yml\",\"/config/lidarr.db\"]\n // }\n // resources {\n // cpu = 20 # 500 MHz\n // memory = 20 # 128MB\n // }\n\n // template {\n // data = \u003c\u003cEOH\n // dbs:\n // - path: /config/lidarr.db\n // replicas:\n // - path: /configbackup\n // EOH\n\n // destination = \"local/litestream.yml\"\n // }\n\n\n // }\n\n task \"finalsync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststop\"\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\"\n ]\n\n // args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-av\",\"--exclude='*.db*'\",\"--exclude='*.db'\",\"--exclude='*.db-litestream'\",\"--exclude='generations'\",\"/config/\",\"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n }\n\n\n task \"sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"2048\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n args = [\"client\"]\n }\n env {\n CRON_TASK_1 = \"50 * * * * rsync -av --exclude='*.db*' --exclude='*.db' --exclude='.*.db-litestream' --exclude='generations' /configs/${NOMAD_GROUP_NAME}/ /configbackup/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n }\n\n task \"db-sync\" {\n driver = \"docker\"\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n config {\n memory_hard_limit = \"256\"\n\n image = \"ghcr.io/sstent/rsync\"\n volumes = [\n \"/mnt/configs/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n\n // args = [\"/usr/local/bin/litestream\", \"replicate\", \"-config\",\"/local/litestream.yml\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 128 # 128MB\n }\n template {\n data = \u003c\u003cEOH\ndbs:\n - path: /config/sonarr.db\n replicas:\n - path: /configbackup\n snapshot-interval: 2h\n validation-interval: 6h\n EOH\n\n destination = \"local/litestream.yml\"\n }\n\n } #####\n\n task \"sonarr\" {\n // driver = \"raw_exec\" // config { // command = \"docker\" // args = [\"run\", // \"--rm\", // \"--name=sonarr\", // \"-e\", \"PUID=1000\", // \"-e\", \"PGID=1000\", // \"-e\", \"TZ=EST5EDT\", // \"-p\", \"8989:8989\", // \"-v\", \"/mnt/syncthing/sonarrv3:/config\", // \"-v\", \"/mnt/Public/Downloads/tv:/tv\", // \"-v\", \"/mnt/Public/Downloads/news:/downloads\", // \"--cpuset-cpus\",\"4-7\", // \"linuxserver/sonarr:preview\"] // }\n\n driver = \"docker\"\n\n config {\n image = \"ghcr.io/linuxserver/sonarr:latest\"\n\n ports = [\"http\"]\n\n dns_servers = [\"192.168.1.1\", \"1.1.1.1\"]\n memory_hard_limit = \"2048\"\n cpuset_cpus = \"4-7\"\n\n volumes = [\n \"/mnt/Public/Downloads/news:/downloads\",\n \"/mnt/Public/Downloads/tv:/tv\",\n \"/mnt/configs/sonarr:/config\",\n ]\n\n // \"/mnt/gv0/sonarr:/config\",\n force_pull = false\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"sonarr\", \"tools\"]\n port = \"http\"\n\n check {\n type = \"script\"\n name = \"check_up\"\n command = \"/local/healthcheck.sh\"\n interval = \"60s\"\n timeout = \"5s\"\n\n check_restart {\n limit = 3\n grace = \"90s\"\n ignore_warnings = false\n } \n }\n }\n template {\n data = \u003c\u003cEOH\n#!/bin/bash\n/usr/bin/curl -f \"http://localhost:${NOMAD_PORT_http}/api/v3/system/status?apikey=$(grep -Po '\u003cApiKey\u003e\\K.*?(?=\u003c.*?\u003e)' /config/config.xml)\"\n EOH\n\n destination = \"local/healthcheck.sh\"\n perms = \"0755\"\n }\n\n template {\n data = \"---\\nkey: {{ key \\\"ovpn-client\\\" }}\"\n destination = \"local/file.yml\"\n change_mode = \"restart\"\n }\n\n env {\n // SHARE = \"Public;/mount/Public;yes;no;yes;all;none;;Public\"\n # GLOBAL = \"socket options = TCP_NODELAY IPTOS_LOWDELAY SO_RCVBUF=65536 SO_SNDBUF=65536\"\n # PERMISSIONS = \"true\"\n # WORKGROUP = \"WORKGROUP\"\n TZ = \"EST5EDT\"\n\n PUID = 1000\n PGID = 1000\n }\n\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 500 # 500 MHz\n memory = 256 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n } #End main task\n\n network {\n // mbits = 100\n\n port \"http\" {\n static = 8989\n to = 8989\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5127611", + "name": "sonarr", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "sonarr", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "init", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "finalsync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "db-sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "sonarr", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/traefik.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "7d42d2ef-739f-7516-f597-fe0c7764f1df", + "1c08c688-2d3d-f768-3073-0ca1b738d7a6", + "64bf5dd8-ab7f-bb69-3c06-308783324a9f", + "d6ffd29e-e301-fdb0-7173-2a6810295b35" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "traefik", + "jobspec": "job \"traefik\" {\n datacenters = [\"dc1\"]\n type = \"system\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n\n constraint {\n attribute = \"${attr.cpu.arch}\"\n value = \"arm\"\n }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n healthy_deadline = \"5m\"\n }\n\n group \"traefik\" {\n count = 1\n\n restart {\n attempts = 6\n interval = \"1m\"\n delay = \"10s\"\n mode = \"delay\"\n }\n\n task \"traefik\" {\n driver = \"docker\"\n\n config {\n image = \"traefik:2.3\"\n network_mode = \"host\"\n\n args = [\n \"--api.dashboard\",\n \"--providers.consulcatalog.defaultRule=Host(`{{ .Name }}.service.dc1.consul`)\",\n \"--providers.consulcatalog.endpoint.address=${attr.unique.network.ip-address}:8500\",\n \"--providers.consulcatalog.exposedbydefault=true\",\n \"--metrics=true\",\n \"--metrics.prometheus=true\",\n \"--metrics.prometheus.entryPoint=web\",\n \"--entryPoints.web.address=:80\",\n \"--entryPoints.websecure.address=:443\",\n \"--entryPoints.openvpn.address=:1194/udp\",\n \"--certificatesresolvers.myresolver.acme.email=stuart.stent@gmail.com\",\n \"--certificatesresolvers.myresolver.acme.storage=/acmecert/acme.json\",\n \"--certificatesresolvers.myresolver.acme.tlschallenge=true\",\n \"--accesslog=true\",\n ]\n\n volumes = [\n \"/var/run/docker.sock:/var/run/docker.sock\",\n \"/mnt/gv0/letsencrypt:/acmecert/\",\n ]\n\n dns_servers = [\"192.168.1.1\", \"192.168.1.250\"]\n ports = [\"traefik\", \"traefikhttps\"]\n\n memory_hard_limit = 128\n }\n\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n }\n\n template {\n data = \u003c\u003cEOH\ndebug = true\n[log]\n level = \"DEBUG\"\n\n[metrics]\n [metrics.prometheus]\n addRoutersLabels = true\n addServicesLabels = true\n\n[api]\ndashboard = true\ninsecure = false\n\n[providers.consulCatalog]\nexposedByDefault = true\ndefaultRule='Host(`{{ .routerName }}.mydomain.com`)'\n [providers.consulCatalog.endpoint]\n address = \"{{env \"attr.unique.network.ip-address\"}}:8500\"\n\nEOH\n\n destination = \"local/file.yml\"\n }\n\n service {\n name = \"${TASKGROUP}\"\n\n tags = [\n \"global\",\n \"traefik\",\n ]\n\n port = \"traefik\"\n }\n\n service {\n name = \"${TASKGROUP}-ui\"\n\n tags = [\n \"global\",\n \"traefik\",\n \"traefik.http.routers.dashboard.rule=Host(`traefik-ui.service.dc1.consul`)\",\n \"traefik.http.routers.dashboard.service=api@internal\",\n ]\n\n port = \"traefik\"\n } #end service\n\n resources {\n cpu = 256 # 500 MHz\n memory = 10 # 128MB\n } #end resources\n } #end task\n\n network {\n port \"traefik\" {\n static = 80\n to = 80\n }\n\n // port \"traefikui\" {\n // static = 8840\n // to = 8080\n // }\n\n port \"traefikhttps\" {\n static = 443\n to = 443\n }\n }\n } # end group\n} # end job\n", + "json": null, + "modify_index": "5076422", + "name": "traefik", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "traefik", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "traefik", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "system" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/unifi.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "8962f8be-4cae-9b3a-2b6c-2f62af0e800d" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "unifi", + "jobspec": "# There can only be a single job definition per file.\n# Create a job with ID and Name 'example'\njob \"unifi\" {\n datacenters = [\"dc1\"]\n type = \"service\"\n\n constraint {\n attribute = \"${attr.kernel.name}\"\n value = \"linux\"\n }\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid.*\"\n }\n affinity {\n attribute = \"${attr.unique.hostname}\"\n operator = \"=\"\n value = \"odroid2\"\n weight = 100\n }\n\n update {\n stagger = \"10s\"\n max_parallel = 1\n }\n\n group \"unifi\" {\n count = 1\n\n restart {\n attempts = 2\n interval = \"1m\"\n delay = \"10s\"\n mode = \"fail\"\n }\n\n // docker create \\\n // --name unifi \\\n // -v \u003c/path/to/appdata\u003e:/config \\\n // -v \u003cpath/to/tvseries\u003e:/tv \\\n // -v \u003cpath/to/downloadclient-downloads\u003e:/downloads \\\n // lsioarmhf/unifi\n task \"init\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n\n config {\n image = \"ghcr.io/sstent/rsync\"\n memory_hard_limit = 2048\n\n volumes = [\n \"/data/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\",\n ]\n\n args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-avz\", \"--exclude=Backups\", \"/configbackup/\", \"/config/\", \"--delete-before\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end init task\n\n task \"finalsync\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"poststop\"\n }\n\n config {\n // image = \"pebalk/rsync\"\n image = \"ghcr.io/sstent/rsync\"\n memory_hard_limit = 2048\n\n volumes = [\n \"/data/${NOMAD_GROUP_NAME}:/config\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n \"/mnt/Public/config/locks:/locks\",\n ]\n\n args = [\"flock\", \"-x\", \"/locks/${NOMAD_GROUP_NAME}_rsync.lock\", \"rsync\", \"-avz\", \"/config/\", \"/configbackup/\"]\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end finalsync task\n\n task \"sync\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"poststart\"\n sidecar = true\n }\n\n config {\n image = \"ghcr.io/sstent/rsync\"\n memory_hard_limit = 2048\n\n volumes = [\n \"/data/:/configs\",\n \"/mnt/Public/config/${NOMAD_GROUP_NAME}:/configbackup\",\n ]\n\n args = [\"client\"]\n }\n\n env {\n CRON_TASK_1 = \"*/10 * * * * chmod a-w /configs/${NOMAD_GROUP_NAME}/ ; rsync -avz /configs/${NOMAD_GROUP_NAME}/ /configbackup/; chmod a+w /configs/${NOMAD_GROUP_NAME}/;\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end sync task\n\n task \"unifi\" {\n driver = \"docker\"\n\n config {\n image = \"linuxserver/unifi-controller:LTS\"\n network_mode = \"host\"\n memory_hard_limit = 2048\n ports = [\"unifi_8080\", \"unifi_8081\", \"unifi_8443\", \"unifi_8843\", \"unifi_8880\", \"unifi_6789\", \"unifi_udp_3478\", \"unifi_udp_10001\", \"unifi_udp_1900\"]\n\n volumes = [\n \"/data/unifi:/config\",\n ]\n }\n\n service {\n name = \"${TASKGROUP}\"\n tags = [\"unifi\"]\n port = \"unifi_8443\"\n }\n\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n }\n\n # We must specify the resources required for\n # this task to ensure it runs on a machine with\n # enough capacity.\n resources {\n cpu = 128 # 500 MHz\n memory = 256 # 128MB\n }\n\n # Specify configuration related to log rotation\n logs {\n max_files = 10\n max_file_size = 15\n }\n\n # Controls the timeout between signalling a task it will be killed\n # and killing the task. If not set a default is used.\n kill_timeout = \"10s\"\n } #task\n\n network {\n port \"unifi_8080\" {\n static = \"8080\"\n to = \"8080\"\n }\n\n port \"unifi_8081\" {\n static = \"8081\"\n to = \"8081\"\n }\n\n port \"unifi_8443\" {\n static = \"8443\"\n to = \"8443\"\n }\n\n port \"unifi_8843\" {\n static = \"8843\"\n to = \"8843\"\n }\n\n port \"unifi_8880\" {\n static = \"8880\"\n to = \"8880\"\n }\n\n port \"unifi_6789\" {\n static = \"6789\"\n to = \"6789\"\n }\n\n port \"unifi_udp_3478\" {\n static = \"3478\"\n to = \"3478\"\n }\n\n port \"unifi_udp_1900\" {\n static = \"1900\"\n to = \"1900\"\n }\n\n port \"unifi_udp_10001\" {\n static = \"10001\"\n to = \"10001\"\n }\n }\n } #group\n}\n", + "json": null, + "modify_index": "5127615", + "name": "unifi", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "unifi", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "init", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "finalsync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "sync", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "unifi", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + }, + { + "index_key": "nomad_jobs/enabled/wireguard.nomad", + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "35e39061-8a70-e271-bcca-ef94673256bc" + ], + "datacenters": [ + "dc1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "hcl2": [], + "id": "wireguard", + "jobspec": "job \"wireguard\" {\n region = \"global\"\n datacenters = [\"dc1\"]\n type = \"service\"\n constraint {\n attribute = \"${attr.unique.hostname}\"\n operator = \"regexp\"\n value = \"odroid*\"\n }\n group \"wireguard\" {\n count = 1\n\n task \"portfwd\" {\n driver = \"docker\"\n\n lifecycle {\n hook = \"prestart\"\n sidecar = false\n }\n\n config {\n image = \"registry.service.dc1.consul:5000/portfwd\"\n }\n env {\n IP_ADDR = \"${attr.unique.network.ip-address}\"\n }\n resources {\n cpu = 20 # 500 MHz\n memory = 20 # 128MB\n }\n } #end init task\n\n\n task \"wireguard\" {\n driver = \"docker\"\n\n config {\n image = \"ghcr.io/linuxserver/wireguard\"\n ports = [\"vpn\"]\n volumes = [\n \"/mnt/Public/config/wireguard:/config\",\n \"/lib/modules:/lib/modules\"\n ]\n cap_add = [\"NET_ADMIN\",\"SYS_MODULE\"]\n sysctl = {\n \"net.ipv4.conf.all.src_valid_mark\"=\"1\"\n }\n\n }\n env {\n TZ = \"EST5EDT\"\n PUID = 1000\n PGID = 1000\n SERVERURL=\"wireguard.fbleagh.duckdns.org\"\n SERVERPORT=51820 \n PEERS=\"StuPhone,SurfaceGo,Surface\" \n PEERDNS=\"${attr.unique.network.ip-address},192.168.1.1,1.1.1.1\"\n // INTERNAL_SUBNET= \"192.168.1.0\" \n ALLOWEDIPS=\"0.0.0.0/0\"\n }\n service {\n name = \"${TASKGROUP}\"\n port = \"vpn\"\n }\n\n resources {\n cpu = 50\n memory = 100\n }\n }\n\n network {\n port \"vpn\" {\n static = 51820\n to = 51820\n }\n }\n }\n}\n", + "json": null, + "modify_index": "4990566", + "name": "wireguard", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "wireguard", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "portfwd", + "volume_mounts": [] + }, + { + "driver": "docker", + "meta": {}, + "name": "wireguard", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "timeouts": null, + "type": "service" + }, + "sensitive_attributes": [], + "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDAsInVwZGF0ZSI6MzAwMDAwMDAwMDAwfX0=" + } + ] + } + ] +} diff --git a/consul_backup/testkey b/consul_backup/testkey new file mode 100644 index 0000000..6ed22e0 --- /dev/null +++ b/consul_backup/testkey @@ -0,0 +1 @@ +tesstvale \ No newline at end of file diff --git a/consul_backup/traefik/leader b/consul_backup/traefik/leader new file mode 100644 index 0000000..f82961a --- /dev/null +++ b/consul_backup/traefik/leader @@ -0,0 +1 @@ +0f37bd66-43c8-4fa2-a164-4721fd60c85e \ No newline at end of file