From 9af9846216212083bc5539ae3d5709ffdc74db38 Mon Sep 17 00:00:00 2001 From: Ben Martin Date: Thu, 26 Sep 2024 18:17:46 +0000 Subject: [PATCH] feat(matrix): add Nomad job specification for Matrix service feat(matrix): update resource allocation in Nomad job specification feat(matrix): onboard element service to traefik feat(matrix): add port configuration for Element service chore(matrix): reformat feat(matrix): update resource allocation in Nomad job specification fix(matrix): minimum MemoryMB value is 10 feat(matrix): update resource allocation in Nomad job specification feat(matrix): split server and clients into seperate groups feat(matrix): well known to be served by nginx fix(matrix): add well known route for all hosts feat(matrix): use separate traefik router for well known feat(matrix): migrate config.yaml for mas feat(matrix): divide mas config between nomad and volume feat(matrix): split cinny and element task groups refactor(media-centre): Migrate media-centre job spec to Nomad HCL format fix(media-centre): remove json from resource nomad job fix(media-centre): update media-centre job spec to use Nomad HCL format feat(media-centre): add downloader group - Added a new group called "downloaders" to handle proxy tasks for downloading media files. - Configured the proxy task with necessary settings and environment variables. fix(media-centre): use OPENVPN_USER env variable in proxy task fix(media-centre): Add /dev/net/tun device to proxy task feat(media-centre): Add resource limits to proxy task feat(media-centre): Add Plex task to media-centre job spec fix(media-centre): add constraints to media-centre job spec fix(media-centre): nomad doesn't allow sharing devices fix(media-centre): disable change config dir ownership fix(media-centre): plex process user is set using env vars fix(media-centre): update PLEX_GID in job spec fix(media-centre): update PLEX_GID in job spec fix(media-centre): update PLEX_UID in job spec feat(media-centre): enable nvidia gpu capabilities feat(media-centre): add Tautulli service to media-centre job spec fix(media-centre): update tautulli volumes feat(plextraktsync): add plextraktsync module fix(plextraktsync): update plextraktsync job spec "type" to "batch" feat(plextraktsync): update resource allocation fix(plextraktsync): fix cron schedule in plextraktsync job spec feat(nfs-csi): add nfs-csi module chore: update .gitignore to include .env file chore: format files feat(seedbox): add seedbox module feat(seedbox): add qbittorrent module and NFS volume feat(seedbox): add timezone configuration for seedbox job fix(seedbox): vuetorrent-lsio-mod image env var feat(seedbox): add HTTP_PORT environment variable for qbittorrent module feat(seedbox): update access mode for NFS volume feat(seedbox): add node constraint for seedbox job feat(seedbox): add subdirectories for NFS volumes feat(seedbox): add nolock mount flag for NFS volumes feat(seedbox): Update NFS volume configuration feat(seedbox): update Docker image and enable force pull feat(seedbox): pause container network definition feat(elk): create kibana feat(elk): update kibana cpu allocation feat(elk): add elasticsearch container to elk job This commit adds a new task "elasticsearch" to the "elk" job in the "node" group. The task uses the "podman" driver and pulls the "docker.elastic.co/elasticsearch/elasticsearch:8.15.2" image with force pull enabled. It exposes the "transport" port and mounts the "/mnt/docker/elastic/elasticsearch/config" and "/mnt/docker/elastic/elasticsearch/data" volumes. The task is allocated with 500 CPU and 1024 memory resources. feat(seedbox): update resource allocation in seedbox job fix(elk): remove ulimit from elk job See: https://github.com/hashicorp/nomad-driver-podman/issues/341 fix(elk): add selinuxlabel to volume mounts in elk job refactor(modules): remove unused modules and jobspecs refactor(elk): update CPU allocation in elk job feat(media-centre): Plex to use host network feat(elk): add 9200 port to es node feat(elk): allocate more ram to node feat(elk): allocate even more ram to node feat(media-centre): reduce memory allocation of tautulli feat(elk): revert memory allocation after shard tidy-up feat(media-centre): set memory soft limit feat(media-centre): update memory hard limit for tautulli feat(elk): tweak node mem alloc See: https://www.elastic.co/guide/en/elasticsearch/reference/current/size-your-shards.html#_example_11 feat(seedbox): add memory soft limit to vpn client feat(seedbox): update memory hard limit for vpn client fix(matrix): increase whatsapp-bridge memory allocation refactor(elk): update elastic and kibana image versions in elk job feat: add latest image versions and add force pull feat: enable force pull for all podman driver tasks feat(matrix): increase syncv3 memory allocation feat: migrate podman memory allocation to nomad max memory fix: nomad max memory is defined by memory_max feat(matrix): add ecs fields to task metadata refactor(matrix): migrate shared meta to parent refactor(matrix): update resource allocation in jobspec.nomad.hcl refactor(matrix): update resource allocation in jobspec.nomad.hcl refactor(matrix): update resource allocation in jobspec.nomad.hcl refactor(plextraktsync): update resource allocation in jobspec.nomad.hcl refactor(plextraktsync): remove task node constraint refactor: migrate podman tasks to docker tasks feat(elk): update ulimit for elasticsearch container refactor(elk): update volume paths in jobspec.nomad.hcl feat(seedbox): remove pause container feat(elk): update kibana count in jobspec.nomad.hcl refactor(elk): remove node constraint from kibana refactor(elk): add spread attribute to kibana refactor(elk): update port configuration in jobspec.nomad.hcl refactor(dummy): migrate json jobspec to hcl feat(dummy): update service provider to consul fix(dummy): add port label to port definition refactor(dummy): rename jobspec to match standard feat(dummy): migrate to service mesh chore(dummy): update Nomad provider version to 2.4.0 chore(dummy): update Nomad provider version to 2.4.0 feat(dummy): configure traefik refactor(dummy): update provider to use consul instead of nomad feat(renovate): create module for automated dependency updates Add renovate.json fix(renovate): increase memory allocation feat(renovate): add GITHUB_COM_TOKEN variable refactor(renovate): pin version feat(renovate): enable dependency dashboard refactor(matrix): use bridge netowrking for server group refactor(matrix): update URLs to use allocated addresses refactor(matrix): remove host.docker.internal host fix(matrix): update SYNCV3_BINDADDR fix(matrix): update SYNCV3_BINDADDR port to 8009 fix(elk): increase memory allocation feat(elk): disable co-located kibana allocations refactor(jobspec): update provider to consul for elk and media-centre services feat(media-centre): reduce memory allocation from 4096 to 1024 fix(jobspec): replace constraints with new neto client id feat(elk): update data volume path to use unique node name feat(elk): migrate elastic config to nfs feat(elk): add Nyx refactor(workflows): reformat (#17) Reviewed-on: https://git.brmartin.co.uk/ben/cluster-state/pulls/17 fix(elk): increase memory allocation to 2048 MB refactor(matrix): remove specific node constraint from job specification feat(matrix): implement consul service mesh feat(elk): use allocation index for node state location refactor(media-centre): remove deprecated NVIDIA_DRIVER_CAPABILITIES fix(media-centre): plex transcode dir not writable fix(media-centre): set transcode dir to world writable fix(media-centre): set transcode dir to world writable feat(media-centre): replace plex transcode dir with a persistent volume feat(media-centre): increase plex memory limit For caching chore(elk): promote elastic version feat(elk): remove force_pull option from Elasticsearch and Kibana configurations style(jobspec): improve formatting in HCL files feat(elk): add health check feat(media-centre): add NVIDIA visible devices for Jellyfin and Plex fix(media-centre): increase max memory for tautulli feat(plugin-csi): add NFS CSI driver jobspec and main configuration feat(main.tf): add plugin-csi module to main configuration fix(plugin-csi): refactor NFS job specifications into separate files for controller and node fix(plugin-csi): add NFS path variable for controller and node resources fix(plugin-csi): add NFS path variable to controller and node job specifications fix(plugin-csi): add provisioner name to NFS job specifications for controller and node fix(plugin-csi): update NFS job specifications feat(seedbox): restructure job specifications and add NFS volume registrations for media and qbittorrent config feat(workflows): add lint workflow for Terraform and Nomad formatting fix(seedbox): add attachment and access modes for media and qbittorrent_config volumes feat(seedbox): remove node constraint Update modules/seedbox/main.tf fix(seedbox): add mount options with nolock flag for media and qbittorrent_config volumes fix(seedbox): update share paths to use lowercase in media and qbittorrent_config volumes fix(seedbox): remove unused device configuration from jobspec feat(matrix): add health check configuration feat(matrix): add health check ports for synapse, mas, and nginx fix(matrix): remove health check configuration for synapse, mas, and nginx feat(main.tf): remove unused and broken seedbox module feat(renovate): use JSON log format chore(elk): upgrade version to latest feat(elk): use 2 kibana replicas feat(elk): add on_update ignore option to ready check configuration fix(elk): update volume paths to use node unique name for configuration and data feat(matrix): add envoy_metrics port and update service metadata for Consul integration feat(matrix): add health check configuration to synapse job feat(matrix): add /metrics endpoint exposure for envoy_metrics fix(matrix): update service port configurations to use static port numbers feat(matrix): restructure ingress groups and enhance service configurations for improved routing fix(matrix): update whatsapp bridge tokens and change push to receive ephemeral feat(media-centre): remove node constraint from tautulli task configuration feat(elk): onboard hestia node to nomad feat(elk): enhance job specification with Envoy metrics and update service configurations feat(renovate): onboard nomad docker image updates chore(deps): update ghcr.io/renovatebot/renovate docker tag to v38.142.7 chore(jobspec): use explicit image version tags where possible fix(jobspec): formatting chore(deps): update busybox docker tag to v1.37.0 chore(deps): update docker.io/library/nginx docker tag to v1.27.3 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v39 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v39.59.0 chore(deps): update ghcr.io/renovatebot/renovate docker tag to v39.60.0 chore(matrix): format multiline string in jobspec.nomad.hcl for improved readability chore(secrets): refactor jobspecs to use templates for sensitive environment variables --- .gitea/workflows/lint.yaml | 30 + .gitea/workflows/plan-and-apply.yaml | 73 +-- .gitignore | 1 + .terraform.lock.hcl | 28 +- main.tf | 20 +- modules/dummy/jobspec.json | 62 -- modules/dummy/jobspec.nomad.hcl | 45 ++ modules/dummy/main.tf | 3 +- modules/elk/jobspec.nomad.hcl | 140 ++++ modules/elk/main.tf | 9 + modules/matrix/jobspec.nomad.hcl | 618 ++++++++++++++++++ modules/matrix/main.tf | 3 + modules/media-centre/jobspec.json | 84 --- modules/media-centre/jobspec.nomad.hcl | 203 ++++++ modules/media-centre/main.tf | 3 +- modules/plextraktsync/jobspec.nomad.hcl | 28 + modules/plextraktsync/main.tf | 3 + .../plugin-csi/jobspec-controller.nomad.hcl | 28 + modules/plugin-csi/jobspec-nodes.nomad.hcl | 31 + modules/plugin-csi/main.tf | 7 + modules/renovate/jobspec.nomad.hcl | 44 ++ modules/renovate/main.tf | 3 + modules/seedbox/jobspec.nomad.hcl | 117 ++++ modules/seedbox/main.tf | 59 ++ modules/uptime/jobspec.json | 63 -- modules/uptime/main.tf | 4 - provider.tf | 2 +- renovate.json | 13 + 28 files changed, 1428 insertions(+), 296 deletions(-) create mode 100644 .gitea/workflows/lint.yaml delete mode 100644 modules/dummy/jobspec.json create mode 100644 modules/dummy/jobspec.nomad.hcl create mode 100644 modules/elk/jobspec.nomad.hcl create mode 100644 modules/elk/main.tf create mode 100644 modules/matrix/jobspec.nomad.hcl create mode 100644 modules/matrix/main.tf delete mode 100644 modules/media-centre/jobspec.json create mode 100644 modules/media-centre/jobspec.nomad.hcl create mode 100644 modules/plextraktsync/jobspec.nomad.hcl create mode 100644 modules/plextraktsync/main.tf create mode 100644 modules/plugin-csi/jobspec-controller.nomad.hcl create mode 100644 modules/plugin-csi/jobspec-nodes.nomad.hcl create mode 100644 modules/plugin-csi/main.tf create mode 100644 modules/renovate/jobspec.nomad.hcl create mode 100644 modules/renovate/main.tf create mode 100644 modules/seedbox/jobspec.nomad.hcl create mode 100644 modules/seedbox/main.tf delete mode 100644 modules/uptime/jobspec.json delete mode 100644 modules/uptime/main.tf create mode 100644 renovate.json diff --git a/.gitea/workflows/lint.yaml b/.gitea/workflows/lint.yaml new file mode 100644 index 0000000..74eff73 --- /dev/null +++ b/.gitea/workflows/lint.yaml @@ -0,0 +1,30 @@ +name: Lint + +on: + pull_request: + branches: + - main + push: + branches: + - main + +env: + TF_IN_AUTOMATION: true + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Setup Nomad + uses: hashicorp/setup-nomad@main + + - name: Terraform fmt + run: terraform fmt -recursive -check + + - name: Nomad fmt + run: nomad fmt -recursive -check diff --git a/.gitea/workflows/plan-and-apply.yaml b/.gitea/workflows/plan-and-apply.yaml index ea39506..02d7669 100644 --- a/.gitea/workflows/plan-and-apply.yaml +++ b/.gitea/workflows/plan-and-apply.yaml @@ -8,9 +8,6 @@ on: branches: - main -permissions: - pull-requests: write - env: TF_PLUGIN_CACHE_DIR: ${{ gitea.workspace }}/.terraform.d/plugin-cache TF_IN_AUTOMATION: true @@ -24,79 +21,31 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: hashicorp/setup-terraform@v3 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + - name: Create Terraform Plugin Cache Dir run: mkdir -v -p $TF_PLUGIN_CACHE_DIR - - uses: actions/cache@v4 + + - name: Cache Terraform Plugins + uses: actions/cache@v4 with: path: ${{ env.TF_PLUGIN_CACHE_DIR }} key: ${{ runner.os }}-terraform-${{ hashFiles('**/.terraform.lock.hcl') }} - - name: Terraform fmt - id: fmt - run: terraform fmt -recursive -check - continue-on-error: true + - name: Terraform Init id: init run: terraform init -input=false + - name: Terraform Validate id: validate run: terraform validate + - name: Terraform Plan id: plan run: terraform plan -out=tfplan - continue-on-error: true - - uses: actions/github-script@v7 - if: github.event_name == 'pull_request' - env: - PLAN: "terraform\n${{ steps.plan.outputs.stdout }}" - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - script: | - const { data: comments } = await github.rest.issues.listComments({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.issue.number, - }) - const botComment = comments.find(comment => { - return comment.user.type === 'Bot' && comment.body.includes('Terraform Format and Style') - }) - const output = `#### Terraform Format and Style 🖌\`${{ steps.fmt.outcome }}\` - #### Terraform Initialization ⚙️\`${{ steps.init.outcome }}\` - #### Terraform Validation 🤖\`${{ steps.validate.outcome }}\` -
Validation Output - \`\`\`\n - ${{ steps.validate.outputs.stdout }} - \`\`\` - -
- - #### Terraform Plan 📖\`${{ steps.plan.outcome }}\` - -
Show Plan - - \`\`\`\n - ${process.env.PLAN} - \`\`\` - -
- - *Pusher: @${{ github.actor }}, Action: \`${{ github.event_name }}\`, Working Directory: \`${{ env.tf_actions_working_dir }}\`, Workflow: \`${{ github.workflow }}\`*`; - if (botComment) { - github.rest.issues.updateComment({ - owner: context.repo.owner, - repo: context.repo.repo, - comment_id: botComment.id, - body: output - }) - } else { - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: output - }) - } - - name: Terraform apply + - name: Terraform Apply if: github.ref == 'refs/heads/main' && steps.plan.outcome == 'success' run: terraform apply -auto-approve tfplan diff --git a/.gitignore b/.gitignore index bb002f3..402a011 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ override.tf.json .terraform.tfstate.lock.info .terraformrc terraform.rc +.env diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl index 4299d9f..0f209ba 100644 --- a/.terraform.lock.hcl +++ b/.terraform.lock.hcl @@ -2,21 +2,21 @@ # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/nomad" { - version = "2.2.0" - constraints = "2.2.0" + version = "2.4.0" + constraints = "2.4.0" hashes = [ - "h1:BAjqzVkuXxHtRKG+l9unaZJPk2kWZpSTCEcQPRcl2so=", - "zh:052f909d25121e93dc799290216292fca67943ccde12ba515068b838a6ff8c66", - "zh:20e29aeb9989f7a1e04bb4093817c7acc4e1e737bb21a3066f3ea46f2001feff", - "zh:2326d101ef427599b72cce30c0e0c1d18ae783f1a897c20f2319fbf54bab0a61", - "zh:3420cbe4fd19cdc96d715d0ae8e79c272608023a76033bbf582c30637f6d570f", - "zh:41ec570f87f578f1c57655e2e4fbdb9932d94cf92dc9cd11828cccedf36dd4a4", - "zh:5f90dcc58e3356ffead82ea211ecb4a2d7094d3c2fbd14ff85527c3652a595a2", - "zh:64aaa48609d2db868fcfd347490df0e12c6c3fcb8e4f12908c5d52b1a0adf73f", + "h1:MnNLz6rQIqiLk6EKf5XjJlE5S/wmqh+z0A4ndDHWEZA=", + "zh:0825c5d2e6cb6a92aa247366f10c74275fdf9027bdb874d374aa3a9c3983ec68", + "zh:0c939ce35dce82da62c4cc8642903b43292c9915ac1a13099885e7c89edb87ae", + "zh:23dd5d8300e7d6b42a1a55a541296227b3c054ad19dc8a6eb411ef8b2d689f5e", + "zh:26b76c1d2c25f1b9730d5b6fe0fce355a0a5f666c0818f7284d9663ee556adec", + "zh:4915e1f176c4aa910504113629dbe25120a3915e703cb8f8b637dd2d20a4ad6f", + "zh:4f9d3bb2e97c9a4a135aa9d8d65f37902a7f838655e21cc22fffc8ebab8d2d66", + "zh:51dad4566c56b9bbe0a59c25287f3e14c35b5fbfde167fdab6ae98dfc23a6ae1", + "zh:56a7f7939bc41dbcdadf1fbbc7096090a26aa38552060cef472c82181af26cc8", + "zh:68bc2c7d28e1a7de2655e194423e911433ea8f3b87ab0a54ed1833f44ef63bb5", + "zh:75f9d9c4c031c3ac83b2c2cf37163edf3b8eea9f58a379d1b83d096f0b3d98cc", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:86b4923e10e6ba407d1d2aab83740b702058e8b01460af4f5f0e4008f40e492c", - "zh:ae89dcba33097af33a306344d20e4e25181f15dcc1a860b42db5b7199a97c6a6", - "zh:ce56d68cdfba60891765e94f9c0bf69eddb985d44d97db9f91874bea027f08e2", - "zh:e993bcde5dbddaedf3331e3014ffab904f98ab0f5e8b5d6082b7ca5083e0a2f1", + "zh:fea4d2a0df951ab8fad4d2f8da0e2c2198e93220cf9269d5c51d80db1988ae52", ] } diff --git a/main.tf b/main.tf index 53a08dc..cab2e4b 100644 --- a/main.tf +++ b/main.tf @@ -10,6 +10,22 @@ module "media-centre" { source = "./modules/media-centre" } -module "uptime" { - source = "./modules/uptime" +module "plextraktsync" { + source = "./modules/plextraktsync" +} + +module "matrix" { + source = "./modules/matrix" +} + +module "elk" { + source = "./modules/elk" +} + +module "renovate" { + source = "./modules/renovate" +} + +module "plugin-csi" { + source = "./modules/plugin-csi" } diff --git a/modules/dummy/jobspec.json b/modules/dummy/jobspec.json deleted file mode 100644 index ba1f007..0000000 --- a/modules/dummy/jobspec.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "ID": "hello-world", - "Name": "hello-world", - "TaskGroups": [ - { - "Name": "servers", - "Tasks": [ - { - "Name": "web", - "Driver": "docker", - "Config": { - "image": "busybox:1", - "command": "httpd", - "args": [ - "-v", - "-f", - "-p", - "${NOMAD_PORT_www}", - "-h", - "/local" - ], - "ports": [ - "www" - ] - }, - "Templates": [ - { - "DestPath": "local/index.html", - "EmbeddedTmpl": "

Hello, Ben!

\n" - } - ], - "Resources": { - "CPU": 50, - "MemoryMB": 64 - } - } - ], - "Networks": [ - { - "DynamicPorts": [ - { - "Label": "www", - "To": 8001 - } - ] - } - ], - "Services": [ - { - "PortLabel": "www", - "Provider": "nomad", - "Name": "web", - "Tags": [ - "traefik.enable=true", - "traefik.http.routers.web.entrypoints=websecure", - "traefik.http.routers.web.rule=Host(`hello-world.brmartin.co.uk`)" - ] - } - ] - } - ] -} \ No newline at end of file diff --git a/modules/dummy/jobspec.nomad.hcl b/modules/dummy/jobspec.nomad.hcl new file mode 100644 index 0000000..12dc396 --- /dev/null +++ b/modules/dummy/jobspec.nomad.hcl @@ -0,0 +1,45 @@ +job "hello-world" { + datacenters = ["dc1"] + + group "servers" { + count = 1 + + task "web" { + driver = "docker" + + config { + image = "busybox:1.37.0" + command = "httpd" + args = ["-v", "-f", "-p", "${NOMAD_PORT_www}", "-h", "/local"] + ports = ["www"] + } + + template { + destination = "local/index.html" + data = "

Hello, Ben!

\n" + } + + resources { + cpu = 50 + memory = 64 + } + } + + network { + mode = "bridge" + port "www" { + to = 8001 + } + } + + service { + port = "www" + provider = "consul" + + tags = [ + "traefik.enable=true", + "traefik.http.routers.web.rule=Host(`hello-world.brmartin.co.uk`)", + ] + } + } +} diff --git a/modules/dummy/main.tf b/modules/dummy/main.tf index 8cd9998..ad91892 100644 --- a/modules/dummy/main.tf +++ b/modules/dummy/main.tf @@ -1,4 +1,3 @@ resource "nomad_job" "dummy" { - jobspec = file("${path.module}/jobspec.json") - json = true + jobspec = file("${path.module}/jobspec.nomad.hcl") } \ No newline at end of file diff --git a/modules/elk/jobspec.nomad.hcl b/modules/elk/jobspec.nomad.hcl new file mode 100644 index 0000000..c5d29ab --- /dev/null +++ b/modules/elk/jobspec.nomad.hcl @@ -0,0 +1,140 @@ +variable "elastic_version" { + type = string +} + +job "elk" { + + group "node" { + + count = 3 + + constraint { + distinct_hosts = true + } + + network { + mode = "bridge" + port "http" { + static = 9200 + } + port "transport" { + static = 9300 + } + port "envoy_metrics" { + to = 9102 + } + } + + service { + provider = "consul" + port = "9200" + + meta { + envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}" + } + + connect { + sidecar_service { + proxy { + expose { + path { + path = "/metrics" + protocol = "http" + local_path_port = 9102 + listener_port = "envoy_metrics" + } + } + transparent_proxy { + exclude_inbound_ports = ["9200", "9300"] + exclude_outbound_ports = [9200, 9300] + } + } + } + } + } + + service { + provider = "consul" + port = "transport" + } + + task "elasticsearch" { + driver = "docker" + + config { + image = "docker.elastic.co/elasticsearch/elasticsearch:${var.elastic_version}" + + ports = ["9200", "9300"] + + volumes = [ + "/mnt/docker/elastic-${node.unique.name}/config:/usr/share/elasticsearch/config", + "/mnt/docker/elastic-${node.unique.name}/data:/usr/share/elasticsearch/data", + ] + + ulimit { + memlock = "-1:-1" + } + } + + resources { + cpu = 2000 + memory = 2048 + } + } + } + + group "kibana" { + + count = 2 + + constraint { + distinct_hosts = true + } + + network { + mode = "bridge" + port "web" { + static = 5601 + } + } + + task "kibana" { + driver = "docker" + + config { + image = "docker.elastic.co/kibana/kibana:${var.elastic_version}" + + ports = ["web"] + + volumes = [ + "/mnt/docker/elastic/kibana/config:/usr/share/kibana/config", + ] + } + + resources { + cpu = 1500 + memory = 1024 + } + + service { + tags = [ + "traefik.enable=true", + "traefik.http.routers.kibana.rule=Host(`kibana.brmartin.co.uk`)", + "traefik.http.routers.kibana.entrypoints=websecure", + ] + + port = "web" + address_mode = "host" + provider = "consul" + + check { + type = "http" + path = "/api/status" + interval = "10s" + timeout = "2s" + on_update = "ignore" + } + } + } + } +} diff --git a/modules/elk/main.tf b/modules/elk/main.tf new file mode 100644 index 0000000..eb87d49 --- /dev/null +++ b/modules/elk/main.tf @@ -0,0 +1,9 @@ +resource "nomad_job" "elk" { + jobspec = file("${path.module}/jobspec.nomad.hcl") + + hcl2 { + vars = { + "elastic_version" = "8.16.1", + } + } +} diff --git a/modules/matrix/jobspec.nomad.hcl b/modules/matrix/jobspec.nomad.hcl new file mode 100644 index 0000000..cbbfeab --- /dev/null +++ b/modules/matrix/jobspec.nomad.hcl @@ -0,0 +1,618 @@ +job "matrix" { + + meta = { + "service.type" = "matrix" + } + + group "synapse" { + + network { + mode = "bridge" + port "envoy_metrics" { + to = 9102 + } + } + + service { + provider = "consul" + port = "8008" + + meta { + envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}" + } + + check { + type = "http" + path = "/health" + interval = "20s" + timeout = "5s" + expose = true + } + + connect { + sidecar_service { + proxy { + config { + protocol = "http" + local_idle_timeout_ms = 120000 + } + expose { + path { + path = "/metrics" + protocol = "http" + local_path_port = 9102 + listener_port = "envoy_metrics" + } + } + transparent_proxy {} + } + } + } + } + + task "synapse" { + driver = "docker" + + config { + image = "ghcr.io/element-hq/synapse:v1.120.2" + + ports = ["8008"] + + volumes = [ + "/mnt/docker/matrix/synapse:/data", + "/mnt/docker/matrix/media_store:/media_store", + ] + } + + env = { + SYNAPSE_WORKER = "synapse.app.homeserver" + } + + template { + data = <<-EOF + id: whatsapp + url: http://matrix-whatsapp-bridge.virtual.consul + {{with nomadVar "nomad/jobs/matrix/synapse/synapse"}} + as_token="{{.as_token}}" + hs_token="{{.hs_token}}" + {{end}} + sender_localpart: ctvppZV8epjY9iUtTt0nR29e92V4nIJb + rate_limited: false + namespaces: + users: + - regex: ^@whatsappbot:brmartin\.co\.uk$ + exclusive: true + - regex: ^@whatsapp_.*:brmartin\.co\.uk$ + exclusive: true + de.sorunome.msc2409.push_ephemeral: true + receive_ephemeral: true + EOF + + destination = "local/matrix-whatsapp-registration.yaml" + } + + resources { + cpu = 500 + memory = 128 + memory_max = 256 + } + + meta = { + "service.name" = "synapse" + } + } + } + + group "whatsapp-bridge" { + + network { + mode = "bridge" + port "envoy_metrics" { + to = 9102 + } + } + + service { + provider = "consul" + port = "8082" + + meta { + envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}" + } + + connect { + sidecar_service { + proxy { + config { + protocol = "http" + } + expose { + path { + path = "/metrics" + protocol = "http" + local_path_port = 9102 + listener_port = "envoy_metrics" + } + } + transparent_proxy {} + } + } + } + } + + task "whatsapp-bridge" { + driver = "docker" + + config { + image = "dock.mau.dev/mautrix/whatsapp:v0.11.1" + + ports = ["8082"] + + volumes = [ + "/mnt/docker/matrix/whatsapp-data:/data" + ] + } + + resources { + cpu = 50 + memory = 16 + memory_max = 32 + } + + meta = { + "service.name" = "whatsapp" + } + } + } + + group "mas" { + + network { + mode = "bridge" + port "envoy_metrics" { + to = 9102 + } + } + + service { + port = "8081" + provider = "consul" + + meta { + envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}" + } + + connect { + sidecar_service { + proxy { + config { + protocol = "http" + } + expose { + path { + path = "/metrics" + protocol = "http" + local_path_port = 9102 + listener_port = "envoy_metrics" + } + } + transparent_proxy {} + } + } + } + } + + task "mas" { + driver = "docker" + + config { + image = "ghcr.io/matrix-org/matrix-authentication-service:main" + force_pull = true + + ports = ["8081"] + + volumes = [ + "/mnt/docker/matrix/synapse-mas/config.yaml:/config.yaml:ro" + ] + } + + env { + MAS_CONFIG = "/config.yaml" + } + + resources { + cpu = 100 + memory = 32 + memory_max = 64 + } + + meta = { + "service.name" = "mas" + } + } + } + + group "syncv3" { + + network { + mode = "bridge" + port "envoy_metrics" { + to = 9102 + } + } + + service { + provider = "consul" + port = "8008" + + meta { + envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}" + } + + connect { + sidecar_service { + proxy { + config { + protocol = "http" + } + expose { + path { + path = "/metrics" + protocol = "http" + local_path_port = 9102 + listener_port = "envoy_metrics" + } + } + transparent_proxy {} + } + } + } + } + + task "syncv3" { + driver = "docker" + + config { + image = "ghcr.io/matrix-org/sliding-sync:v0.99.19" + + ports = ["8008"] + } + + env = { + SYNCV3_SERVER = "http://synapse.service.consul" + } + + template { + data = <<-EOH + {{with nomadVar "nomad/jobs/matrix/syncv3/syncv3"}} + SYNCV3_SECRET="{{.SYNCV3_SECRET}}" + SYNCV3_DB="{{.SYNCV3_DB}}" + {{end}} + EOH + + destination = "secrets/file.env" + env = true + } + + resources { + cpu = 50 + memory = 16 + memory_max = 32 + } + + meta = { + "service.name" = "syncv3" + } + } + } + + group "nginx" { + + network { + mode = "bridge" + port "nginx" { + to = 80 + } + port "envoy_metrics" { + to = 9102 + } + } + + service { + provider = "consul" + port = "80" + + meta { + envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}" + } + + connect { + sidecar_service { + proxy { + config { + protocol = "http" + local_idle_timeout_ms = 120000 + } + expose { + path { + path = "/metrics" + protocol = "http" + local_path_port = 9102 + listener_port = "envoy_metrics" + } + } + transparent_proxy {} + } + } + } + } + + task "nginx" { + driver = "docker" + + config { + image = "docker.io/library/nginx:1.27.3-alpine" + + ports = ["80"] + + volumes = [ + "/mnt/docker/matrix/nginx/templates:/etc/nginx/templates:ro", + "/mnt/docker/matrix/nginx/html:/usr/share/nginx/html:ro", + ] + } + + env = { + NGINX_PORT = "80" + } + + resources { + cpu = 50 + memory = 16 + } + + meta = { + "service.name" = "nginx" + } + } + } + + group "synapse-ingress-group" { + + network { + mode = "bridge" + port "inbound" { + to = 8080 + } + } + + service { + port = "inbound" + tags = [ + "traefik.enable=true", + + "traefik.http.routers.synapse.rule=Host(`matrix.brmartin.co.uk`)", + "traefik.http.routers.synapse.entrypoints=websecure", + "traefik.http.routers.synapse.middlewares=synapseHeaders,synapseBuffering", + "traefik.http.middlewares.synapseHeaders.headers.accesscontrolallowmethods=GET,POST,PUT,DELETE,OPTIONS", + "traefik.http.middlewares.synapseHeaders.headers.accesscontrolallowheaders=Origin,X-Requested-With,Content-Type,Accept,Authorization", + "traefik.http.middlewares.synapseHeaders.headers.accesscontrolalloworiginlist=*", + "traefik.http.middlewares.synapseBuffering.buffering.maxRequestBodyBytes=1000000000", + ] + + connect { + gateway { + proxy { + config { + local_idle_timeout_ms = 120000 + } + } + ingress { + listener { + port = 8080 + protocol = "http" + service { + name = "matrix-synapse" + hosts = ["*"] + } + } + } + } + } + } + } + + group "mas-ingress-group" { + + network { + mode = "bridge" + port "inbound" { + to = 8080 + } + } + + service { + port = "inbound" + tags = [ + "traefik.enable=true", + + "traefik.http.routers.mas.rule=Host(`mas.brmartin.co.uk`) || (Host(`matrix.brmartin.co.uk`) && PathRegexp(`^/_matrix/client/(.*)/(login|logout|refresh)`))", + "traefik.http.routers.mas.entrypoints=websecure", + ] + + connect { + gateway { + ingress { + listener { + port = 8080 + protocol = "http" + service { + name = "matrix-mas" + hosts = ["*"] + } + } + } + } + } + } + } + + group "wellknown-ingress-group" { + + network { + mode = "bridge" + port "inbound" { + to = 8080 + } + } + + service { + port = "inbound" + tags = [ + "traefik.enable=true", + + "traefik.http.routers.matrixWellKnown.rule=PathPrefix(`/.well-known/matrix`)", + "traefik.http.routers.matrixWellKnown.entrypoints=websecure", + "traefik.http.routers.matrixWellKnown.middlewares=matrixWellKnown", + "traefik.http.middlewares.matrixWellKnown.headers.accesscontrolalloworiginlist=*", + ] + + connect { + gateway { + ingress { + listener { + port = 8080 + protocol = "http" + service { + name = "matrix-nginx" + hosts = ["*"] + } + } + } + } + } + } + } + + group "syncv3-ingress-group" { + + network { + mode = "bridge" + port "inbound" { + to = 8080 + } + } + + service { + port = "inbound" + tags = [ + "traefik.enable=true", + + "traefik.http.routers.matrixsyncv3.rule=Host(`matrix.brmartin.co.uk`) && (PathPrefix(`/client`) || PathPrefix(`/_matrix/client/unstable/org.matrix.msc3575/sync`))", + "traefik.http.routers.matrixsyncv3.entrypoints=websecure", + ] + + connect { + gateway { + ingress { + listener { + port = 8080 + protocol = "http" + service { + name = "matrix-syncv3" + hosts = ["*"] + } + } + } + } + } + } + } + + group "element" { + + network { + port "element" { + to = 80 + } + } + + task "element" { + driver = "docker" + + config { + image = "docker.io/vectorim/element-web:v1.11.87" + + ports = ["element"] + + volumes = [ + "/mnt/docker/matrix/element/config.json:/app/config.json:ro" + ] + } + + resources { + cpu = 100 + memory = 16 + } + + service { + tags = [ + "traefik.enable=true", + "traefik.http.routers.element.rule=Host(`element.brmartin.co.uk`)", + "traefik.http.routers.element.entrypoints=websecure", + ] + + port = "element" + address_mode = "host" + provider = "consul" + } + + meta = { + "service.name" = "element" + } + } + } + + group "cinny" { + + network { + port "cinny" { + to = 80 + } + } + + task "cinny" { + driver = "docker" + + config { + image = "ghcr.io/cinnyapp/cinny:v4.2.3" + + ports = ["cinny"] + + volumes = [ + "/mnt/docker/matrix/cinny/config.json:/app/config.json:ro" + ] + } + + resources { + cpu = 50 + memory = 16 + } + + service { + tags = [ + "traefik.enable=true", + "traefik.http.routers.cinny.rule=Host(`cinny.brmartin.co.uk`)", + "traefik.http.routers.cinny.entrypoints=websecure", + ] + + port = "cinny" + address_mode = "host" + provider = "consul" + } + + meta = { + "service.name" = "cinny" + } + } + } +} diff --git a/modules/matrix/main.tf b/modules/matrix/main.tf new file mode 100644 index 0000000..67964cb --- /dev/null +++ b/modules/matrix/main.tf @@ -0,0 +1,3 @@ +resource "nomad_job" "matrix" { + jobspec = file("${path.module}/jobspec.nomad.hcl") +} diff --git a/modules/media-centre/jobspec.json b/modules/media-centre/jobspec.json deleted file mode 100644 index 89044b7..0000000 --- a/modules/media-centre/jobspec.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "ID": "media-centre", - "Name": "Media Centre", - "TaskGroups": [ - { - "Name": "Media Servers", - "Tasks": [ - { - "Name": "Jellyfin", - "User": "985", - "Driver": "docker", - "Config": { - "image": "jellyfin/jellyfin", - "runtime": "nvidia", - "group_add": [ - "997" - ], - "ports": [ - "jellyfin" - ], - "mounts": [ - { - "type": "volume", - "target": "/media", - "volume_options": { - "driver_config": { - "name": "local", - "options": [ - { - "type": "nfs", - "o": "addr=martinibar.lan,nolock,soft,rw", - "device": ":/volume1/docker" - } - ] - } - } - }, - { - "type": "volume", - "target": "/config", - "source": "jellyfin-config" - } - ] - }, - "Env": { - "JELLYFIN_PublishedServerUrl": "192.168.1.5" - }, - "Resources": { - "CPU": 1200, - "MemoryMB": 4096, - "Devices": [ - { - "Name": "nvidia/gpu", - "Count": 1 - } - ] - } - } - ], - "Services": [ - { - "Name": "Jellyfin", - "Provider": "nomad", - "PortLabel": "jellyfin", - "Tags": [ - "traefik.enable=true", - "traefik.http.routers.jellyfin.entrypoints=websecure", - "traefik.http.routers.jellyfin.rule=Host(`jellyfin.brmartin.co.uk`)" - ] - } - ], - "Networks": [ - { - "DynamicPorts": [ - { - "Label": "jellyfin", - "To": 8096 - } - ] - } - ] - } - ] -} \ No newline at end of file diff --git a/modules/media-centre/jobspec.nomad.hcl b/modules/media-centre/jobspec.nomad.hcl new file mode 100644 index 0000000..b73f625 --- /dev/null +++ b/modules/media-centre/jobspec.nomad.hcl @@ -0,0 +1,203 @@ +job "media-centre" { + group "jellyfin" { + task "jellyfin" { + user = "985" + driver = "docker" + + constraint { + attribute = "${node.unique.id}" + value = "3f6d897a-f755-5677-27c3-e3f0af1dfb7e" + } + + config { + image = "ghcr.io/jellyfin/jellyfin:10.10.3" + runtime = "nvidia" + group_add = ["997"] + ports = ["jellyfin"] + + mount { + type = "volume" + target = "/media" + volume_options { + driver_config { + name = "local" + options { + type = "nfs" + o = "addr=martinibar.lan,nolock,soft,rw" + device = ":/volume1/docker" + } + } + } + } + + mount { + type = "volume" + target = "/config" + source = "jellyfin-config" + } + } + + env { + JELLYFIN_PublishedServerUrl = "https://jellyfin.brmartin.co.uk" + NVIDIA_DRIVER_CAPABILITIES = "all" + NVIDIA_VISIBLE_DEVICES = "all" + } + + resources { + cpu = 1200 + memory = 4096 + } + } + + service { + name = "Jellyfin" + provider = "consul" + port = "jellyfin" + tags = [ + "traefik.enable=true", + "traefik.http.routers.jellyfin.entrypoints=websecure", + "traefik.http.routers.jellyfin.rule=Host(`jellyfin.brmartin.co.uk`)" + ] + } + + network { + port "jellyfin" { + to = 8096 + } + } + } + + group "plex" { + task "plex" { + driver = "docker" + + constraint { + attribute = "${node.unique.id}" + value = "3f6d897a-f755-5677-27c3-e3f0af1dfb7e" + } + + config { + image = "plexinc/pms-docker:latest" + runtime = "nvidia" + ports = ["plex"] + network_mode = "host" + + mount { + type = "volume" + target = "/data" + volume_options { + driver_config { + name = "local" + options { + type = "nfs" + o = "addr=martinibar.lan,nolock,soft,rw" + device = ":/volume1/docker" + } + } + } + } + + mount { + type = "volume" + target = "/share" + volume_options { + driver_config { + name = "local" + options { + type = "nfs" + o = "addr=martinibar.lan,nolock,soft,rw" + device = ":/volume1/Share" + } + } + } + } + + mount { + type = "volume" + target = "/config" + source = "plex-config" + } + + mount { + type = "volume" + target = "/transcode" + source = "plex-transcode" + } + } + + env { + TZ = "Europe/London" + CHANGE_CONFIG_DIR_OWNERSHIP = "false" + PLEX_UID = "990" + PLEX_GID = "997" + NVIDIA_DRIVER_CAPABILITIES = "all" + NVIDIA_VISIBLE_DEVICES = "all" + } + + resources { + cpu = 1200 + memory = 4096 + } + } + + service { + name = "Plex" + provider = "consul" + port = "plex" + tags = [ + "traefik.enable=true", + "traefik.http.routers.plex.entrypoints=websecure", + "traefik.http.routers.plex.rule=Host(`plex.brmartin.co.uk`)" + ] + } + + network { + port "plex" { + static = 32400 + } + } + } + + group "tautulli" { + task "tautulli" { + driver = "docker" + + config { + image = "ghcr.io/tautulli/tautulli:v2.15.0" + ports = ["tautulli"] + + volumes = [ + "/mnt/docker/downloads/config/tautulli:/config", + ] + } + + env { + PUID = "994" + PGID = "997" + TZ = "Europe/London" + } + + resources { + cpu = 100 + memory = 128 + memory_max = 256 + } + } + + service { + provider = "consul" + port = "tautulli" + tags = [ + "traefik.enable=true", + "traefik.http.routers.tautulli.entrypoints=websecure", + "traefik.http.routers.tautulli.rule=Host(`tautulli.brmartin.co.uk`)" + ] + } + + network { + port "tautulli" { + to = 8181 + } + } + } +} diff --git a/modules/media-centre/main.tf b/modules/media-centre/main.tf index f285e4d..f35b8ca 100644 --- a/modules/media-centre/main.tf +++ b/modules/media-centre/main.tf @@ -1,4 +1,3 @@ resource "nomad_job" "media-centre" { - jobspec = file("${path.module}/jobspec.json") - json = true + jobspec = file("${path.module}/jobspec.nomad.hcl") } diff --git a/modules/plextraktsync/jobspec.nomad.hcl b/modules/plextraktsync/jobspec.nomad.hcl new file mode 100644 index 0000000..8e2ace9 --- /dev/null +++ b/modules/plextraktsync/jobspec.nomad.hcl @@ -0,0 +1,28 @@ +job "plextraktsync" { + type = "batch" + + periodic { + crons = ["0 0/2 * * *"] + prohibit_overlap = true + } + + + group "plextraktsync" { + task "plextraktsync" { + driver = "docker" + + config { + image = "ghcr.io/taxel/plextraktsync:0.32.2" + volumes = [ + "/mnt/docker/downloads/config/plextraktsync:/app/config" + ] + command = "sync" + } + + resources { + cpu = 2000 + memory = 128 + } + } + } +} diff --git a/modules/plextraktsync/main.tf b/modules/plextraktsync/main.tf new file mode 100644 index 0000000..b530264 --- /dev/null +++ b/modules/plextraktsync/main.tf @@ -0,0 +1,3 @@ +resource "nomad_job" "plextraktsync" { + jobspec = file("${path.module}/jobspec.nomad.hcl") +} diff --git a/modules/plugin-csi/jobspec-controller.nomad.hcl b/modules/plugin-csi/jobspec-controller.nomad.hcl new file mode 100644 index 0000000..c52f42b --- /dev/null +++ b/modules/plugin-csi/jobspec-controller.nomad.hcl @@ -0,0 +1,28 @@ +job "plugin-nfs-controller" { + group "controller" { + task "plugin" { + driver = "docker" + + config { + image = "mcr.microsoft.com/k8s/csi/nfs-csi:latest" + + args = [ + "--endpoint=unix://csi/csi.sock", + "--nodeid=${attr.unique.hostname}", + "--v=5", + ] + } + + csi_plugin { + id = "nfs" + type = "controller" + mount_dir = "/csi" + } + + resources { + cpu = 100 + memory = 128 + } + } + } +} diff --git a/modules/plugin-csi/jobspec-nodes.nomad.hcl b/modules/plugin-csi/jobspec-nodes.nomad.hcl new file mode 100644 index 0000000..fed4897 --- /dev/null +++ b/modules/plugin-csi/jobspec-nodes.nomad.hcl @@ -0,0 +1,31 @@ +job "plugin-nfs-nodes" { + type = "system" + + group "nodes" { + task "plugin" { + driver = "docker" + + config { + image = "mcr.microsoft.com/k8s/csi/nfs-csi:latest" + privileged = true + + args = [ + "--endpoint=unix://csi/csi.sock", + "--nodeid=${attr.unique.hostname}", + "--v=5", + ] + } + + csi_plugin { + id = "nfs" + type = "node" + mount_dir = "/csi" + } + + resources { + cpu = 100 + memory = 128 + } + } + } +} diff --git a/modules/plugin-csi/main.tf b/modules/plugin-csi/main.tf new file mode 100644 index 0000000..b54434c --- /dev/null +++ b/modules/plugin-csi/main.tf @@ -0,0 +1,7 @@ +resource "nomad_job" "nfs-controller" { + jobspec = file("${path.module}/jobspec-controller.nomad.hcl") +} + +resource "nomad_job" "nfs-nodes" { + jobspec = file("${path.module}/jobspec-nodes.nomad.hcl") +} diff --git a/modules/renovate/jobspec.nomad.hcl b/modules/renovate/jobspec.nomad.hcl new file mode 100644 index 0000000..1f55e2f --- /dev/null +++ b/modules/renovate/jobspec.nomad.hcl @@ -0,0 +1,44 @@ +job "renovate" { + type = "batch" + + periodic { + crons = ["0 * * * *"] + prohibit_overlap = true + } + + group "renovate" { + task "renovate" { + driver = "docker" + + config { + image = "ghcr.io/renovatebot/renovate:39.60.0" + } + + resources { + cpu = 2000 + memory = 512 + } + + env { + RENOVATE_PLATFORM = "gitea" + RENOVATE_AUTODISCOVER = "true" + RENOVATE_ENDPOINT = "https://git.brmartin.co.uk" + RENOVATE_GIT_AUTHOR = "Renovate Bot " + LOG_FORMAT = "json" + RENOVATE_DEPENDENCY_DASHBOARD = "true" + } + + template { + data = <<-EOH + {{with nomadVar "nomad/jobs/renovate/renovate/renovate" }} + RENOVATE_TOKEN = "{{.RENOVATE_TOKEN}}" + GITHUB_COM_TOKEN = "{{.GITHUB_COM_TOKEN}}" + {{end}} + EOH + + destination = "secrets/file.env" + env = true + } + } + } +} diff --git a/modules/renovate/main.tf b/modules/renovate/main.tf new file mode 100644 index 0000000..e2c8564 --- /dev/null +++ b/modules/renovate/main.tf @@ -0,0 +1,3 @@ +resource "nomad_job" "renovate" { + jobspec = file("${path.module}/jobspec.nomad.hcl") +} diff --git a/modules/seedbox/jobspec.nomad.hcl b/modules/seedbox/jobspec.nomad.hcl new file mode 100644 index 0000000..2e578a2 --- /dev/null +++ b/modules/seedbox/jobspec.nomad.hcl @@ -0,0 +1,117 @@ +job "seedbox" { + + group "proxy" { + + task "proxy" { + driver = "docker" + + config { + image = "docker.io/qmcgaw/gluetun:v3.39.1" + force_pull = true + + cap_add = ["NET_ADMIN"] + + sysctl = { + "net.ipv6.conf.all.disable_ipv6" = "1" + } + } + + resources { + cpu = 100 + memory = 128 + memory_max = 512 + } + + env { + VPN_SERVICE_PROVIDER = "ipvanish" + SERVER_COUNTRIES = "Switzerland" + HTTPPROXY = "on" + } + + template { + data = <<-EOH + {{with nomadVar "nomad/jobs/seedbox/proxy/proxy" }} + OPENVPN_USER = "{{.OPENVPN_USER}}" + OPENVPN_PASSWORD = "{{.OPENVPN_PASSWORD}}" + {{end}} + EOH + + destination = "secrets/file.env" + env = true + } + } + } + + group "client" { + + network { + port "qbittorrent" {} + } + + service { + port = "qbittorrent" + + provider = "consul" + + check { + type = "http" + path = "/" + interval = "10s" + timeout = "2s" + } + } + + volume "media" { + type = "csi" + source = "media" + attachment_mode = "file-system" + access_mode = "single-node-writer" + + mount_options { + mount_flags = ["nolock"] + } + } + + volume "qbittorrent_config" { + type = "csi" + source = "qbittorrent_config" + attachment_mode = "file-system" + access_mode = "single-node-writer" + + mount_options { + mount_flags = ["nolock"] + } + } + + task "qbittorrent" { + driver = "docker" + + config { + image = "ghcr.io/linuxserver/qbittorrent:5.0.2" + } + + resources { + cpu = 500 + memory = 128 + } + + env { + PUID = "991" + PGID = "997" + WEBUI_PORT = "${NOMAD_PORT_qbittorrent}" + TZ = "Europe/London" + DOCKER_MODS = "ghcr.io/vuetorrent/vuetorrent-lsio-mod:latest" + } + + volume_mount { + volume = "media" + destination = "/media" + } + + volume_mount { + volume = "qbittorrent_config" + destination = "/config" + } + } + } +} diff --git a/modules/seedbox/main.tf b/modules/seedbox/main.tf new file mode 100644 index 0000000..f0b427c --- /dev/null +++ b/modules/seedbox/main.tf @@ -0,0 +1,59 @@ +resource "nomad_job" "seedbox" { + depends_on = [ + nomad_csi_volume_registration.nfs_volume_media, + nomad_csi_volume_registration.nfs_volume_qbittorrent_config, + ] + + jobspec = file("${path.module}/jobspec.nomad.hcl") +} + +data "nomad_plugin" "nfs" { + plugin_id = "nfs" + wait_for_healthy = true +} + +resource "nomad_csi_volume_registration" "nfs_volume_media" { + depends_on = [data.nomad_plugin.nfs] + + lifecycle { + prevent_destroy = true + } + + plugin_id = "nfs" + name = "media" + volume_id = "media" + external_id = "media" + + capability { + access_mode = "single-node-writer" + attachment_mode = "file-system" + } + + context = { + "server" = "martinibar.lan", + "share" = "/volume1/csi/media", + } +} + +resource "nomad_csi_volume_registration" "nfs_volume_qbittorrent_config" { + depends_on = [data.nomad_plugin.nfs] + + lifecycle { + prevent_destroy = true + } + + plugin_id = "nfs" + name = "qbittorrent_config" + volume_id = "qbittorrent_config" + external_id = "qbittorrent_config" + + capability { + access_mode = "single-node-writer" + attachment_mode = "file-system" + } + + context = { + "server" = "martinibar.lan", + "share" = "/volume1/csi/qbittorrent_config", + } +} diff --git a/modules/uptime/jobspec.json b/modules/uptime/jobspec.json deleted file mode 100644 index 6faa539..0000000 --- a/modules/uptime/jobspec.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "ID": "uptime", - "Name": "Uptime", - "TaskGroups": [ - { - "Name": "Uptime Servers", - "Tasks": [ - { - "Name": "kuma", - "Driver": "docker", - "Config": { - "image": "louislam/uptime-kuma:latest", - "ports": [ - "web" - ], - "mounts": [ - { - "type": "volume", - "target": "/app/data", - "source": "kuma-data" - }, - { - "type": "bind", - "target": "/var/run/docker.sock", - "source": "/var/run/docker.sock" - } - ], - "extra_hosts": [ - "host.docker.internal:host-gateway" - ] - }, - "Resources": { - "CPU": 500, - "MemoryMB": 512 - } - } - ], - "Services": [ - { - "Name": "Kuma", - "Provider": "nomad", - "PortLabel": "web", - "Tags": [ - "traefik.enable=true", - "traefik.http.routers.kuma.entrypoints=websecure", - "traefik.http.routers.kuma.rule=Host(`status.brmartin.co.uk`)" - ] - - } - ], - "Networks": [ - { - "DynamicPorts": [ - { - "Label": "web", - "To": 3001 - } - ] - } - ] - } - ] -} \ No newline at end of file diff --git a/modules/uptime/main.tf b/modules/uptime/main.tf deleted file mode 100644 index 2726e34..0000000 --- a/modules/uptime/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -resource "nomad_job" "uptime" { - jobspec = file("${path.module}/jobspec.json") - json = true -} diff --git a/provider.tf b/provider.tf index 80f6028..b96c2a0 100644 --- a/provider.tf +++ b/provider.tf @@ -2,7 +2,7 @@ terraform { required_providers { nomad = { source = "hashicorp/nomad" - version = "2.2.0" + version = "2.4.0" } } } diff --git a/renovate.json b/renovate.json new file mode 100644 index 0000000..8221b48 --- /dev/null +++ b/renovate.json @@ -0,0 +1,13 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "customManagers": [ + { + "customType": "regex", + "datasourceTemplate": "docker", + "fileMatch": ["\\.hcl$"], + "matchStrings": [ + "\\s*image\\s*=\\s*\\\"(?.*?):(?.*?)\\\"" + ] + } + ] +}