diff --git a/.gitea/workflows/lint.yaml b/.gitea/workflows/lint.yaml
new file mode 100644
index 0000000..74eff73
--- /dev/null
+++ b/.gitea/workflows/lint.yaml
@@ -0,0 +1,30 @@
+name: Lint
+
+on:
+ pull_request:
+ branches:
+ - main
+ push:
+ branches:
+ - main
+
+env:
+ TF_IN_AUTOMATION: true
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Setup Terraform
+ uses: hashicorp/setup-terraform@v3
+
+ - name: Setup Nomad
+ uses: hashicorp/setup-nomad@main
+
+ - name: Terraform fmt
+ run: terraform fmt -recursive -check
+
+ - name: Nomad fmt
+ run: nomad fmt -recursive -check
diff --git a/.gitea/workflows/plan-and-apply.yaml b/.gitea/workflows/plan-and-apply.yaml
index ea39506..02d7669 100644
--- a/.gitea/workflows/plan-and-apply.yaml
+++ b/.gitea/workflows/plan-and-apply.yaml
@@ -8,9 +8,6 @@ on:
branches:
- main
-permissions:
- pull-requests: write
-
env:
TF_PLUGIN_CACHE_DIR: ${{ gitea.workspace }}/.terraform.d/plugin-cache
TF_IN_AUTOMATION: true
@@ -24,79 +21,31 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- - uses: hashicorp/setup-terraform@v3
+
+ - name: Setup Terraform
+ uses: hashicorp/setup-terraform@v3
+
- name: Create Terraform Plugin Cache Dir
run: mkdir -v -p $TF_PLUGIN_CACHE_DIR
- - uses: actions/cache@v4
+
+ - name: Cache Terraform Plugins
+ uses: actions/cache@v4
with:
path: ${{ env.TF_PLUGIN_CACHE_DIR }}
key: ${{ runner.os }}-terraform-${{ hashFiles('**/.terraform.lock.hcl') }}
- - name: Terraform fmt
- id: fmt
- run: terraform fmt -recursive -check
- continue-on-error: true
+
- name: Terraform Init
id: init
run: terraform init -input=false
+
- name: Terraform Validate
id: validate
run: terraform validate
+
- name: Terraform Plan
id: plan
run: terraform plan -out=tfplan
- continue-on-error: true
- - uses: actions/github-script@v7
- if: github.event_name == 'pull_request'
- env:
- PLAN: "terraform\n${{ steps.plan.outputs.stdout }}"
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- script: |
- const { data: comments } = await github.rest.issues.listComments({
- owner: context.repo.owner,
- repo: context.repo.repo,
- issue_number: context.issue.number,
- })
- const botComment = comments.find(comment => {
- return comment.user.type === 'Bot' && comment.body.includes('Terraform Format and Style')
- })
- const output = `#### Terraform Format and Style 🖌\`${{ steps.fmt.outcome }}\`
- #### Terraform Initialization ⚙️\`${{ steps.init.outcome }}\`
- #### Terraform Validation 🤖\`${{ steps.validate.outcome }}\`
- Validation Output
- \`\`\`\n
- ${{ steps.validate.outputs.stdout }}
- \`\`\`
-
-
-
- #### Terraform Plan 📖\`${{ steps.plan.outcome }}\`
-
- Show Plan
-
- \`\`\`\n
- ${process.env.PLAN}
- \`\`\`
-
-
-
- *Pusher: @${{ github.actor }}, Action: \`${{ github.event_name }}\`, Working Directory: \`${{ env.tf_actions_working_dir }}\`, Workflow: \`${{ github.workflow }}\`*`;
- if (botComment) {
- github.rest.issues.updateComment({
- owner: context.repo.owner,
- repo: context.repo.repo,
- comment_id: botComment.id,
- body: output
- })
- } else {
- github.rest.issues.createComment({
- issue_number: context.issue.number,
- owner: context.repo.owner,
- repo: context.repo.repo,
- body: output
- })
- }
- - name: Terraform apply
+ - name: Terraform Apply
if: github.ref == 'refs/heads/main' && steps.plan.outcome == 'success'
run: terraform apply -auto-approve tfplan
diff --git a/.gitignore b/.gitignore
index bb002f3..402a011 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,3 +13,4 @@ override.tf.json
.terraform.tfstate.lock.info
.terraformrc
terraform.rc
+.env
diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl
index 4299d9f..0f209ba 100644
--- a/.terraform.lock.hcl
+++ b/.terraform.lock.hcl
@@ -2,21 +2,21 @@
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" {
- version = "2.2.0"
- constraints = "2.2.0"
+ version = "2.4.0"
+ constraints = "2.4.0"
hashes = [
- "h1:BAjqzVkuXxHtRKG+l9unaZJPk2kWZpSTCEcQPRcl2so=",
- "zh:052f909d25121e93dc799290216292fca67943ccde12ba515068b838a6ff8c66",
- "zh:20e29aeb9989f7a1e04bb4093817c7acc4e1e737bb21a3066f3ea46f2001feff",
- "zh:2326d101ef427599b72cce30c0e0c1d18ae783f1a897c20f2319fbf54bab0a61",
- "zh:3420cbe4fd19cdc96d715d0ae8e79c272608023a76033bbf582c30637f6d570f",
- "zh:41ec570f87f578f1c57655e2e4fbdb9932d94cf92dc9cd11828cccedf36dd4a4",
- "zh:5f90dcc58e3356ffead82ea211ecb4a2d7094d3c2fbd14ff85527c3652a595a2",
- "zh:64aaa48609d2db868fcfd347490df0e12c6c3fcb8e4f12908c5d52b1a0adf73f",
+ "h1:MnNLz6rQIqiLk6EKf5XjJlE5S/wmqh+z0A4ndDHWEZA=",
+ "zh:0825c5d2e6cb6a92aa247366f10c74275fdf9027bdb874d374aa3a9c3983ec68",
+ "zh:0c939ce35dce82da62c4cc8642903b43292c9915ac1a13099885e7c89edb87ae",
+ "zh:23dd5d8300e7d6b42a1a55a541296227b3c054ad19dc8a6eb411ef8b2d689f5e",
+ "zh:26b76c1d2c25f1b9730d5b6fe0fce355a0a5f666c0818f7284d9663ee556adec",
+ "zh:4915e1f176c4aa910504113629dbe25120a3915e703cb8f8b637dd2d20a4ad6f",
+ "zh:4f9d3bb2e97c9a4a135aa9d8d65f37902a7f838655e21cc22fffc8ebab8d2d66",
+ "zh:51dad4566c56b9bbe0a59c25287f3e14c35b5fbfde167fdab6ae98dfc23a6ae1",
+ "zh:56a7f7939bc41dbcdadf1fbbc7096090a26aa38552060cef472c82181af26cc8",
+ "zh:68bc2c7d28e1a7de2655e194423e911433ea8f3b87ab0a54ed1833f44ef63bb5",
+ "zh:75f9d9c4c031c3ac83b2c2cf37163edf3b8eea9f58a379d1b83d096f0b3d98cc",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
- "zh:86b4923e10e6ba407d1d2aab83740b702058e8b01460af4f5f0e4008f40e492c",
- "zh:ae89dcba33097af33a306344d20e4e25181f15dcc1a860b42db5b7199a97c6a6",
- "zh:ce56d68cdfba60891765e94f9c0bf69eddb985d44d97db9f91874bea027f08e2",
- "zh:e993bcde5dbddaedf3331e3014ffab904f98ab0f5e8b5d6082b7ca5083e0a2f1",
+ "zh:fea4d2a0df951ab8fad4d2f8da0e2c2198e93220cf9269d5c51d80db1988ae52",
]
}
diff --git a/main.tf b/main.tf
index 53a08dc..cab2e4b 100644
--- a/main.tf
+++ b/main.tf
@@ -10,6 +10,22 @@ module "media-centre" {
source = "./modules/media-centre"
}
-module "uptime" {
- source = "./modules/uptime"
+module "plextraktsync" {
+ source = "./modules/plextraktsync"
+}
+
+module "matrix" {
+ source = "./modules/matrix"
+}
+
+module "elk" {
+ source = "./modules/elk"
+}
+
+module "renovate" {
+ source = "./modules/renovate"
+}
+
+module "plugin-csi" {
+ source = "./modules/plugin-csi"
}
diff --git a/modules/dummy/jobspec.json b/modules/dummy/jobspec.json
deleted file mode 100644
index ba1f007..0000000
--- a/modules/dummy/jobspec.json
+++ /dev/null
@@ -1,62 +0,0 @@
-{
- "ID": "hello-world",
- "Name": "hello-world",
- "TaskGroups": [
- {
- "Name": "servers",
- "Tasks": [
- {
- "Name": "web",
- "Driver": "docker",
- "Config": {
- "image": "busybox:1",
- "command": "httpd",
- "args": [
- "-v",
- "-f",
- "-p",
- "${NOMAD_PORT_www}",
- "-h",
- "/local"
- ],
- "ports": [
- "www"
- ]
- },
- "Templates": [
- {
- "DestPath": "local/index.html",
- "EmbeddedTmpl": "
Hello, Ben!
\n"
- }
- ],
- "Resources": {
- "CPU": 50,
- "MemoryMB": 64
- }
- }
- ],
- "Networks": [
- {
- "DynamicPorts": [
- {
- "Label": "www",
- "To": 8001
- }
- ]
- }
- ],
- "Services": [
- {
- "PortLabel": "www",
- "Provider": "nomad",
- "Name": "web",
- "Tags": [
- "traefik.enable=true",
- "traefik.http.routers.web.entrypoints=websecure",
- "traefik.http.routers.web.rule=Host(`hello-world.brmartin.co.uk`)"
- ]
- }
- ]
- }
- ]
-}
\ No newline at end of file
diff --git a/modules/dummy/jobspec.nomad.hcl b/modules/dummy/jobspec.nomad.hcl
new file mode 100644
index 0000000..12dc396
--- /dev/null
+++ b/modules/dummy/jobspec.nomad.hcl
@@ -0,0 +1,45 @@
+job "hello-world" {
+ datacenters = ["dc1"]
+
+ group "servers" {
+ count = 1
+
+ task "web" {
+ driver = "docker"
+
+ config {
+ image = "busybox:1.37.0"
+ command = "httpd"
+ args = ["-v", "-f", "-p", "${NOMAD_PORT_www}", "-h", "/local"]
+ ports = ["www"]
+ }
+
+ template {
+ destination = "local/index.html"
+ data = "Hello, Ben!
\n"
+ }
+
+ resources {
+ cpu = 50
+ memory = 64
+ }
+ }
+
+ network {
+ mode = "bridge"
+ port "www" {
+ to = 8001
+ }
+ }
+
+ service {
+ port = "www"
+ provider = "consul"
+
+ tags = [
+ "traefik.enable=true",
+ "traefik.http.routers.web.rule=Host(`hello-world.brmartin.co.uk`)",
+ ]
+ }
+ }
+}
diff --git a/modules/dummy/main.tf b/modules/dummy/main.tf
index 8cd9998..ad91892 100644
--- a/modules/dummy/main.tf
+++ b/modules/dummy/main.tf
@@ -1,4 +1,3 @@
resource "nomad_job" "dummy" {
- jobspec = file("${path.module}/jobspec.json")
- json = true
+ jobspec = file("${path.module}/jobspec.nomad.hcl")
}
\ No newline at end of file
diff --git a/modules/elk/jobspec.nomad.hcl b/modules/elk/jobspec.nomad.hcl
new file mode 100644
index 0000000..c5d29ab
--- /dev/null
+++ b/modules/elk/jobspec.nomad.hcl
@@ -0,0 +1,140 @@
+variable "elastic_version" {
+ type = string
+}
+
+job "elk" {
+
+ group "node" {
+
+ count = 3
+
+ constraint {
+ distinct_hosts = true
+ }
+
+ network {
+ mode = "bridge"
+ port "http" {
+ static = 9200
+ }
+ port "transport" {
+ static = 9300
+ }
+ port "envoy_metrics" {
+ to = 9102
+ }
+ }
+
+ service {
+ provider = "consul"
+ port = "9200"
+
+ meta {
+ envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}"
+ }
+
+ connect {
+ sidecar_service {
+ proxy {
+ expose {
+ path {
+ path = "/metrics"
+ protocol = "http"
+ local_path_port = 9102
+ listener_port = "envoy_metrics"
+ }
+ }
+ transparent_proxy {
+ exclude_inbound_ports = ["9200", "9300"]
+ exclude_outbound_ports = [9200, 9300]
+ }
+ }
+ }
+ }
+ }
+
+ service {
+ provider = "consul"
+ port = "transport"
+ }
+
+ task "elasticsearch" {
+ driver = "docker"
+
+ config {
+ image = "docker.elastic.co/elasticsearch/elasticsearch:${var.elastic_version}"
+
+ ports = ["9200", "9300"]
+
+ volumes = [
+ "/mnt/docker/elastic-${node.unique.name}/config:/usr/share/elasticsearch/config",
+ "/mnt/docker/elastic-${node.unique.name}/data:/usr/share/elasticsearch/data",
+ ]
+
+ ulimit {
+ memlock = "-1:-1"
+ }
+ }
+
+ resources {
+ cpu = 2000
+ memory = 2048
+ }
+ }
+ }
+
+ group "kibana" {
+
+ count = 2
+
+ constraint {
+ distinct_hosts = true
+ }
+
+ network {
+ mode = "bridge"
+ port "web" {
+ static = 5601
+ }
+ }
+
+ task "kibana" {
+ driver = "docker"
+
+ config {
+ image = "docker.elastic.co/kibana/kibana:${var.elastic_version}"
+
+ ports = ["web"]
+
+ volumes = [
+ "/mnt/docker/elastic/kibana/config:/usr/share/kibana/config",
+ ]
+ }
+
+ resources {
+ cpu = 1500
+ memory = 1024
+ }
+
+ service {
+ tags = [
+ "traefik.enable=true",
+ "traefik.http.routers.kibana.rule=Host(`kibana.brmartin.co.uk`)",
+ "traefik.http.routers.kibana.entrypoints=websecure",
+ ]
+
+ port = "web"
+ address_mode = "host"
+ provider = "consul"
+
+ check {
+ type = "http"
+ path = "/api/status"
+ interval = "10s"
+ timeout = "2s"
+ on_update = "ignore"
+ }
+ }
+ }
+ }
+}
diff --git a/modules/elk/main.tf b/modules/elk/main.tf
new file mode 100644
index 0000000..eb87d49
--- /dev/null
+++ b/modules/elk/main.tf
@@ -0,0 +1,9 @@
+resource "nomad_job" "elk" {
+ jobspec = file("${path.module}/jobspec.nomad.hcl")
+
+ hcl2 {
+ vars = {
+ "elastic_version" = "8.16.1",
+ }
+ }
+}
diff --git a/modules/matrix/jobspec.nomad.hcl b/modules/matrix/jobspec.nomad.hcl
new file mode 100644
index 0000000..cbbfeab
--- /dev/null
+++ b/modules/matrix/jobspec.nomad.hcl
@@ -0,0 +1,618 @@
+job "matrix" {
+
+ meta = {
+ "service.type" = "matrix"
+ }
+
+ group "synapse" {
+
+ network {
+ mode = "bridge"
+ port "envoy_metrics" {
+ to = 9102
+ }
+ }
+
+ service {
+ provider = "consul"
+ port = "8008"
+
+ meta {
+ envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}"
+ }
+
+ check {
+ type = "http"
+ path = "/health"
+ interval = "20s"
+ timeout = "5s"
+ expose = true
+ }
+
+ connect {
+ sidecar_service {
+ proxy {
+ config {
+ protocol = "http"
+ local_idle_timeout_ms = 120000
+ }
+ expose {
+ path {
+ path = "/metrics"
+ protocol = "http"
+ local_path_port = 9102
+ listener_port = "envoy_metrics"
+ }
+ }
+ transparent_proxy {}
+ }
+ }
+ }
+ }
+
+ task "synapse" {
+ driver = "docker"
+
+ config {
+ image = "ghcr.io/element-hq/synapse:v1.120.2"
+
+ ports = ["8008"]
+
+ volumes = [
+ "/mnt/docker/matrix/synapse:/data",
+ "/mnt/docker/matrix/media_store:/media_store",
+ ]
+ }
+
+ env = {
+ SYNAPSE_WORKER = "synapse.app.homeserver"
+ }
+
+ template {
+ data = <<-EOF
+ id: whatsapp
+ url: http://matrix-whatsapp-bridge.virtual.consul
+ {{with nomadVar "nomad/jobs/matrix/synapse/synapse"}}
+ as_token="{{.as_token}}"
+ hs_token="{{.hs_token}}"
+ {{end}}
+ sender_localpart: ctvppZV8epjY9iUtTt0nR29e92V4nIJb
+ rate_limited: false
+ namespaces:
+ users:
+ - regex: ^@whatsappbot:brmartin\.co\.uk$
+ exclusive: true
+ - regex: ^@whatsapp_.*:brmartin\.co\.uk$
+ exclusive: true
+ de.sorunome.msc2409.push_ephemeral: true
+ receive_ephemeral: true
+ EOF
+
+ destination = "local/matrix-whatsapp-registration.yaml"
+ }
+
+ resources {
+ cpu = 500
+ memory = 128
+ memory_max = 256
+ }
+
+ meta = {
+ "service.name" = "synapse"
+ }
+ }
+ }
+
+ group "whatsapp-bridge" {
+
+ network {
+ mode = "bridge"
+ port "envoy_metrics" {
+ to = 9102
+ }
+ }
+
+ service {
+ provider = "consul"
+ port = "8082"
+
+ meta {
+ envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}"
+ }
+
+ connect {
+ sidecar_service {
+ proxy {
+ config {
+ protocol = "http"
+ }
+ expose {
+ path {
+ path = "/metrics"
+ protocol = "http"
+ local_path_port = 9102
+ listener_port = "envoy_metrics"
+ }
+ }
+ transparent_proxy {}
+ }
+ }
+ }
+ }
+
+ task "whatsapp-bridge" {
+ driver = "docker"
+
+ config {
+ image = "dock.mau.dev/mautrix/whatsapp:v0.11.1"
+
+ ports = ["8082"]
+
+ volumes = [
+ "/mnt/docker/matrix/whatsapp-data:/data"
+ ]
+ }
+
+ resources {
+ cpu = 50
+ memory = 16
+ memory_max = 32
+ }
+
+ meta = {
+ "service.name" = "whatsapp"
+ }
+ }
+ }
+
+ group "mas" {
+
+ network {
+ mode = "bridge"
+ port "envoy_metrics" {
+ to = 9102
+ }
+ }
+
+ service {
+ port = "8081"
+ provider = "consul"
+
+ meta {
+ envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}"
+ }
+
+ connect {
+ sidecar_service {
+ proxy {
+ config {
+ protocol = "http"
+ }
+ expose {
+ path {
+ path = "/metrics"
+ protocol = "http"
+ local_path_port = 9102
+ listener_port = "envoy_metrics"
+ }
+ }
+ transparent_proxy {}
+ }
+ }
+ }
+ }
+
+ task "mas" {
+ driver = "docker"
+
+ config {
+ image = "ghcr.io/matrix-org/matrix-authentication-service:main"
+ force_pull = true
+
+ ports = ["8081"]
+
+ volumes = [
+ "/mnt/docker/matrix/synapse-mas/config.yaml:/config.yaml:ro"
+ ]
+ }
+
+ env {
+ MAS_CONFIG = "/config.yaml"
+ }
+
+ resources {
+ cpu = 100
+ memory = 32
+ memory_max = 64
+ }
+
+ meta = {
+ "service.name" = "mas"
+ }
+ }
+ }
+
+ group "syncv3" {
+
+ network {
+ mode = "bridge"
+ port "envoy_metrics" {
+ to = 9102
+ }
+ }
+
+ service {
+ provider = "consul"
+ port = "8008"
+
+ meta {
+ envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}"
+ }
+
+ connect {
+ sidecar_service {
+ proxy {
+ config {
+ protocol = "http"
+ }
+ expose {
+ path {
+ path = "/metrics"
+ protocol = "http"
+ local_path_port = 9102
+ listener_port = "envoy_metrics"
+ }
+ }
+ transparent_proxy {}
+ }
+ }
+ }
+ }
+
+ task "syncv3" {
+ driver = "docker"
+
+ config {
+ image = "ghcr.io/matrix-org/sliding-sync:v0.99.19"
+
+ ports = ["8008"]
+ }
+
+ env = {
+ SYNCV3_SERVER = "http://synapse.service.consul"
+ }
+
+ template {
+ data = <<-EOH
+ {{with nomadVar "nomad/jobs/matrix/syncv3/syncv3"}}
+ SYNCV3_SECRET="{{.SYNCV3_SECRET}}"
+ SYNCV3_DB="{{.SYNCV3_DB}}"
+ {{end}}
+ EOH
+
+ destination = "secrets/file.env"
+ env = true
+ }
+
+ resources {
+ cpu = 50
+ memory = 16
+ memory_max = 32
+ }
+
+ meta = {
+ "service.name" = "syncv3"
+ }
+ }
+ }
+
+ group "nginx" {
+
+ network {
+ mode = "bridge"
+ port "nginx" {
+ to = 80
+ }
+ port "envoy_metrics" {
+ to = 9102
+ }
+ }
+
+ service {
+ provider = "consul"
+ port = "80"
+
+ meta {
+ envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}"
+ }
+
+ connect {
+ sidecar_service {
+ proxy {
+ config {
+ protocol = "http"
+ local_idle_timeout_ms = 120000
+ }
+ expose {
+ path {
+ path = "/metrics"
+ protocol = "http"
+ local_path_port = 9102
+ listener_port = "envoy_metrics"
+ }
+ }
+ transparent_proxy {}
+ }
+ }
+ }
+ }
+
+ task "nginx" {
+ driver = "docker"
+
+ config {
+ image = "docker.io/library/nginx:1.27.3-alpine"
+
+ ports = ["80"]
+
+ volumes = [
+ "/mnt/docker/matrix/nginx/templates:/etc/nginx/templates:ro",
+ "/mnt/docker/matrix/nginx/html:/usr/share/nginx/html:ro",
+ ]
+ }
+
+ env = {
+ NGINX_PORT = "80"
+ }
+
+ resources {
+ cpu = 50
+ memory = 16
+ }
+
+ meta = {
+ "service.name" = "nginx"
+ }
+ }
+ }
+
+ group "synapse-ingress-group" {
+
+ network {
+ mode = "bridge"
+ port "inbound" {
+ to = 8080
+ }
+ }
+
+ service {
+ port = "inbound"
+ tags = [
+ "traefik.enable=true",
+
+ "traefik.http.routers.synapse.rule=Host(`matrix.brmartin.co.uk`)",
+ "traefik.http.routers.synapse.entrypoints=websecure",
+ "traefik.http.routers.synapse.middlewares=synapseHeaders,synapseBuffering",
+ "traefik.http.middlewares.synapseHeaders.headers.accesscontrolallowmethods=GET,POST,PUT,DELETE,OPTIONS",
+ "traefik.http.middlewares.synapseHeaders.headers.accesscontrolallowheaders=Origin,X-Requested-With,Content-Type,Accept,Authorization",
+ "traefik.http.middlewares.synapseHeaders.headers.accesscontrolalloworiginlist=*",
+ "traefik.http.middlewares.synapseBuffering.buffering.maxRequestBodyBytes=1000000000",
+ ]
+
+ connect {
+ gateway {
+ proxy {
+ config {
+ local_idle_timeout_ms = 120000
+ }
+ }
+ ingress {
+ listener {
+ port = 8080
+ protocol = "http"
+ service {
+ name = "matrix-synapse"
+ hosts = ["*"]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ group "mas-ingress-group" {
+
+ network {
+ mode = "bridge"
+ port "inbound" {
+ to = 8080
+ }
+ }
+
+ service {
+ port = "inbound"
+ tags = [
+ "traefik.enable=true",
+
+ "traefik.http.routers.mas.rule=Host(`mas.brmartin.co.uk`) || (Host(`matrix.brmartin.co.uk`) && PathRegexp(`^/_matrix/client/(.*)/(login|logout|refresh)`))",
+ "traefik.http.routers.mas.entrypoints=websecure",
+ ]
+
+ connect {
+ gateway {
+ ingress {
+ listener {
+ port = 8080
+ protocol = "http"
+ service {
+ name = "matrix-mas"
+ hosts = ["*"]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ group "wellknown-ingress-group" {
+
+ network {
+ mode = "bridge"
+ port "inbound" {
+ to = 8080
+ }
+ }
+
+ service {
+ port = "inbound"
+ tags = [
+ "traefik.enable=true",
+
+ "traefik.http.routers.matrixWellKnown.rule=PathPrefix(`/.well-known/matrix`)",
+ "traefik.http.routers.matrixWellKnown.entrypoints=websecure",
+ "traefik.http.routers.matrixWellKnown.middlewares=matrixWellKnown",
+ "traefik.http.middlewares.matrixWellKnown.headers.accesscontrolalloworiginlist=*",
+ ]
+
+ connect {
+ gateway {
+ ingress {
+ listener {
+ port = 8080
+ protocol = "http"
+ service {
+ name = "matrix-nginx"
+ hosts = ["*"]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ group "syncv3-ingress-group" {
+
+ network {
+ mode = "bridge"
+ port "inbound" {
+ to = 8080
+ }
+ }
+
+ service {
+ port = "inbound"
+ tags = [
+ "traefik.enable=true",
+
+ "traefik.http.routers.matrixsyncv3.rule=Host(`matrix.brmartin.co.uk`) && (PathPrefix(`/client`) || PathPrefix(`/_matrix/client/unstable/org.matrix.msc3575/sync`))",
+ "traefik.http.routers.matrixsyncv3.entrypoints=websecure",
+ ]
+
+ connect {
+ gateway {
+ ingress {
+ listener {
+ port = 8080
+ protocol = "http"
+ service {
+ name = "matrix-syncv3"
+ hosts = ["*"]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ group "element" {
+
+ network {
+ port "element" {
+ to = 80
+ }
+ }
+
+ task "element" {
+ driver = "docker"
+
+ config {
+ image = "docker.io/vectorim/element-web:v1.11.87"
+
+ ports = ["element"]
+
+ volumes = [
+ "/mnt/docker/matrix/element/config.json:/app/config.json:ro"
+ ]
+ }
+
+ resources {
+ cpu = 100
+ memory = 16
+ }
+
+ service {
+ tags = [
+ "traefik.enable=true",
+ "traefik.http.routers.element.rule=Host(`element.brmartin.co.uk`)",
+ "traefik.http.routers.element.entrypoints=websecure",
+ ]
+
+ port = "element"
+ address_mode = "host"
+ provider = "consul"
+ }
+
+ meta = {
+ "service.name" = "element"
+ }
+ }
+ }
+
+ group "cinny" {
+
+ network {
+ port "cinny" {
+ to = 80
+ }
+ }
+
+ task "cinny" {
+ driver = "docker"
+
+ config {
+ image = "ghcr.io/cinnyapp/cinny:v4.2.3"
+
+ ports = ["cinny"]
+
+ volumes = [
+ "/mnt/docker/matrix/cinny/config.json:/app/config.json:ro"
+ ]
+ }
+
+ resources {
+ cpu = 50
+ memory = 16
+ }
+
+ service {
+ tags = [
+ "traefik.enable=true",
+ "traefik.http.routers.cinny.rule=Host(`cinny.brmartin.co.uk`)",
+ "traefik.http.routers.cinny.entrypoints=websecure",
+ ]
+
+ port = "cinny"
+ address_mode = "host"
+ provider = "consul"
+ }
+
+ meta = {
+ "service.name" = "cinny"
+ }
+ }
+ }
+}
diff --git a/modules/matrix/main.tf b/modules/matrix/main.tf
new file mode 100644
index 0000000..67964cb
--- /dev/null
+++ b/modules/matrix/main.tf
@@ -0,0 +1,3 @@
+resource "nomad_job" "matrix" {
+ jobspec = file("${path.module}/jobspec.nomad.hcl")
+}
diff --git a/modules/media-centre/jobspec.json b/modules/media-centre/jobspec.json
deleted file mode 100644
index 89044b7..0000000
--- a/modules/media-centre/jobspec.json
+++ /dev/null
@@ -1,84 +0,0 @@
-{
- "ID": "media-centre",
- "Name": "Media Centre",
- "TaskGroups": [
- {
- "Name": "Media Servers",
- "Tasks": [
- {
- "Name": "Jellyfin",
- "User": "985",
- "Driver": "docker",
- "Config": {
- "image": "jellyfin/jellyfin",
- "runtime": "nvidia",
- "group_add": [
- "997"
- ],
- "ports": [
- "jellyfin"
- ],
- "mounts": [
- {
- "type": "volume",
- "target": "/media",
- "volume_options": {
- "driver_config": {
- "name": "local",
- "options": [
- {
- "type": "nfs",
- "o": "addr=martinibar.lan,nolock,soft,rw",
- "device": ":/volume1/docker"
- }
- ]
- }
- }
- },
- {
- "type": "volume",
- "target": "/config",
- "source": "jellyfin-config"
- }
- ]
- },
- "Env": {
- "JELLYFIN_PublishedServerUrl": "192.168.1.5"
- },
- "Resources": {
- "CPU": 1200,
- "MemoryMB": 4096,
- "Devices": [
- {
- "Name": "nvidia/gpu",
- "Count": 1
- }
- ]
- }
- }
- ],
- "Services": [
- {
- "Name": "Jellyfin",
- "Provider": "nomad",
- "PortLabel": "jellyfin",
- "Tags": [
- "traefik.enable=true",
- "traefik.http.routers.jellyfin.entrypoints=websecure",
- "traefik.http.routers.jellyfin.rule=Host(`jellyfin.brmartin.co.uk`)"
- ]
- }
- ],
- "Networks": [
- {
- "DynamicPorts": [
- {
- "Label": "jellyfin",
- "To": 8096
- }
- ]
- }
- ]
- }
- ]
-}
\ No newline at end of file
diff --git a/modules/media-centre/jobspec.nomad.hcl b/modules/media-centre/jobspec.nomad.hcl
new file mode 100644
index 0000000..b73f625
--- /dev/null
+++ b/modules/media-centre/jobspec.nomad.hcl
@@ -0,0 +1,203 @@
+job "media-centre" {
+ group "jellyfin" {
+ task "jellyfin" {
+ user = "985"
+ driver = "docker"
+
+ constraint {
+ attribute = "${node.unique.id}"
+ value = "3f6d897a-f755-5677-27c3-e3f0af1dfb7e"
+ }
+
+ config {
+ image = "ghcr.io/jellyfin/jellyfin:10.10.3"
+ runtime = "nvidia"
+ group_add = ["997"]
+ ports = ["jellyfin"]
+
+ mount {
+ type = "volume"
+ target = "/media"
+ volume_options {
+ driver_config {
+ name = "local"
+ options {
+ type = "nfs"
+ o = "addr=martinibar.lan,nolock,soft,rw"
+ device = ":/volume1/docker"
+ }
+ }
+ }
+ }
+
+ mount {
+ type = "volume"
+ target = "/config"
+ source = "jellyfin-config"
+ }
+ }
+
+ env {
+ JELLYFIN_PublishedServerUrl = "https://jellyfin.brmartin.co.uk"
+ NVIDIA_DRIVER_CAPABILITIES = "all"
+ NVIDIA_VISIBLE_DEVICES = "all"
+ }
+
+ resources {
+ cpu = 1200
+ memory = 4096
+ }
+ }
+
+ service {
+ name = "Jellyfin"
+ provider = "consul"
+ port = "jellyfin"
+ tags = [
+ "traefik.enable=true",
+ "traefik.http.routers.jellyfin.entrypoints=websecure",
+ "traefik.http.routers.jellyfin.rule=Host(`jellyfin.brmartin.co.uk`)"
+ ]
+ }
+
+ network {
+ port "jellyfin" {
+ to = 8096
+ }
+ }
+ }
+
+ group "plex" {
+ task "plex" {
+ driver = "docker"
+
+ constraint {
+ attribute = "${node.unique.id}"
+ value = "3f6d897a-f755-5677-27c3-e3f0af1dfb7e"
+ }
+
+ config {
+ image = "plexinc/pms-docker:latest"
+ runtime = "nvidia"
+ ports = ["plex"]
+ network_mode = "host"
+
+ mount {
+ type = "volume"
+ target = "/data"
+ volume_options {
+ driver_config {
+ name = "local"
+ options {
+ type = "nfs"
+ o = "addr=martinibar.lan,nolock,soft,rw"
+ device = ":/volume1/docker"
+ }
+ }
+ }
+ }
+
+ mount {
+ type = "volume"
+ target = "/share"
+ volume_options {
+ driver_config {
+ name = "local"
+ options {
+ type = "nfs"
+ o = "addr=martinibar.lan,nolock,soft,rw"
+ device = ":/volume1/Share"
+ }
+ }
+ }
+ }
+
+ mount {
+ type = "volume"
+ target = "/config"
+ source = "plex-config"
+ }
+
+ mount {
+ type = "volume"
+ target = "/transcode"
+ source = "plex-transcode"
+ }
+ }
+
+ env {
+ TZ = "Europe/London"
+ CHANGE_CONFIG_DIR_OWNERSHIP = "false"
+ PLEX_UID = "990"
+ PLEX_GID = "997"
+ NVIDIA_DRIVER_CAPABILITIES = "all"
+ NVIDIA_VISIBLE_DEVICES = "all"
+ }
+
+ resources {
+ cpu = 1200
+ memory = 4096
+ }
+ }
+
+ service {
+ name = "Plex"
+ provider = "consul"
+ port = "plex"
+ tags = [
+ "traefik.enable=true",
+ "traefik.http.routers.plex.entrypoints=websecure",
+ "traefik.http.routers.plex.rule=Host(`plex.brmartin.co.uk`)"
+ ]
+ }
+
+ network {
+ port "plex" {
+ static = 32400
+ }
+ }
+ }
+
+ group "tautulli" {
+ task "tautulli" {
+ driver = "docker"
+
+ config {
+ image = "ghcr.io/tautulli/tautulli:v2.15.0"
+ ports = ["tautulli"]
+
+ volumes = [
+ "/mnt/docker/downloads/config/tautulli:/config",
+ ]
+ }
+
+ env {
+ PUID = "994"
+ PGID = "997"
+ TZ = "Europe/London"
+ }
+
+ resources {
+ cpu = 100
+ memory = 128
+ memory_max = 256
+ }
+ }
+
+ service {
+ provider = "consul"
+ port = "tautulli"
+ tags = [
+ "traefik.enable=true",
+ "traefik.http.routers.tautulli.entrypoints=websecure",
+ "traefik.http.routers.tautulli.rule=Host(`tautulli.brmartin.co.uk`)"
+ ]
+ }
+
+ network {
+ port "tautulli" {
+ to = 8181
+ }
+ }
+ }
+}
diff --git a/modules/media-centre/main.tf b/modules/media-centre/main.tf
index f285e4d..f35b8ca 100644
--- a/modules/media-centre/main.tf
+++ b/modules/media-centre/main.tf
@@ -1,4 +1,3 @@
resource "nomad_job" "media-centre" {
- jobspec = file("${path.module}/jobspec.json")
- json = true
+ jobspec = file("${path.module}/jobspec.nomad.hcl")
}
diff --git a/modules/plextraktsync/jobspec.nomad.hcl b/modules/plextraktsync/jobspec.nomad.hcl
new file mode 100644
index 0000000..8e2ace9
--- /dev/null
+++ b/modules/plextraktsync/jobspec.nomad.hcl
@@ -0,0 +1,28 @@
+job "plextraktsync" {
+ type = "batch"
+
+ periodic {
+ crons = ["0 0/2 * * *"]
+ prohibit_overlap = true
+ }
+
+
+ group "plextraktsync" {
+ task "plextraktsync" {
+ driver = "docker"
+
+ config {
+ image = "ghcr.io/taxel/plextraktsync:0.32.2"
+ volumes = [
+ "/mnt/docker/downloads/config/plextraktsync:/app/config"
+ ]
+ command = "sync"
+ }
+
+ resources {
+ cpu = 2000
+ memory = 128
+ }
+ }
+ }
+}
diff --git a/modules/plextraktsync/main.tf b/modules/plextraktsync/main.tf
new file mode 100644
index 0000000..b530264
--- /dev/null
+++ b/modules/plextraktsync/main.tf
@@ -0,0 +1,3 @@
+resource "nomad_job" "plextraktsync" {
+ jobspec = file("${path.module}/jobspec.nomad.hcl")
+}
diff --git a/modules/plugin-csi/jobspec-controller.nomad.hcl b/modules/plugin-csi/jobspec-controller.nomad.hcl
new file mode 100644
index 0000000..c52f42b
--- /dev/null
+++ b/modules/plugin-csi/jobspec-controller.nomad.hcl
@@ -0,0 +1,28 @@
+job "plugin-nfs-controller" {
+ group "controller" {
+ task "plugin" {
+ driver = "docker"
+
+ config {
+ image = "mcr.microsoft.com/k8s/csi/nfs-csi:latest"
+
+ args = [
+ "--endpoint=unix://csi/csi.sock",
+ "--nodeid=${attr.unique.hostname}",
+ "--v=5",
+ ]
+ }
+
+ csi_plugin {
+ id = "nfs"
+ type = "controller"
+ mount_dir = "/csi"
+ }
+
+ resources {
+ cpu = 100
+ memory = 128
+ }
+ }
+ }
+}
diff --git a/modules/plugin-csi/jobspec-nodes.nomad.hcl b/modules/plugin-csi/jobspec-nodes.nomad.hcl
new file mode 100644
index 0000000..fed4897
--- /dev/null
+++ b/modules/plugin-csi/jobspec-nodes.nomad.hcl
@@ -0,0 +1,31 @@
+job "plugin-nfs-nodes" {
+ type = "system"
+
+ group "nodes" {
+ task "plugin" {
+ driver = "docker"
+
+ config {
+ image = "mcr.microsoft.com/k8s/csi/nfs-csi:latest"
+ privileged = true
+
+ args = [
+ "--endpoint=unix://csi/csi.sock",
+ "--nodeid=${attr.unique.hostname}",
+ "--v=5",
+ ]
+ }
+
+ csi_plugin {
+ id = "nfs"
+ type = "node"
+ mount_dir = "/csi"
+ }
+
+ resources {
+ cpu = 100
+ memory = 128
+ }
+ }
+ }
+}
diff --git a/modules/plugin-csi/main.tf b/modules/plugin-csi/main.tf
new file mode 100644
index 0000000..b54434c
--- /dev/null
+++ b/modules/plugin-csi/main.tf
@@ -0,0 +1,7 @@
+resource "nomad_job" "nfs-controller" {
+ jobspec = file("${path.module}/jobspec-controller.nomad.hcl")
+}
+
+resource "nomad_job" "nfs-nodes" {
+ jobspec = file("${path.module}/jobspec-nodes.nomad.hcl")
+}
diff --git a/modules/renovate/jobspec.nomad.hcl b/modules/renovate/jobspec.nomad.hcl
new file mode 100644
index 0000000..1f55e2f
--- /dev/null
+++ b/modules/renovate/jobspec.nomad.hcl
@@ -0,0 +1,44 @@
+job "renovate" {
+ type = "batch"
+
+ periodic {
+ crons = ["0 * * * *"]
+ prohibit_overlap = true
+ }
+
+ group "renovate" {
+ task "renovate" {
+ driver = "docker"
+
+ config {
+ image = "ghcr.io/renovatebot/renovate:39.60.0"
+ }
+
+ resources {
+ cpu = 2000
+ memory = 512
+ }
+
+ env {
+ RENOVATE_PLATFORM = "gitea"
+ RENOVATE_AUTODISCOVER = "true"
+ RENOVATE_ENDPOINT = "https://git.brmartin.co.uk"
+ RENOVATE_GIT_AUTHOR = "Renovate Bot "
+ LOG_FORMAT = "json"
+ RENOVATE_DEPENDENCY_DASHBOARD = "true"
+ }
+
+ template {
+ data = <<-EOH
+ {{with nomadVar "nomad/jobs/renovate/renovate/renovate" }}
+ RENOVATE_TOKEN = "{{.RENOVATE_TOKEN}}"
+ GITHUB_COM_TOKEN = "{{.GITHUB_COM_TOKEN}}"
+ {{end}}
+ EOH
+
+ destination = "secrets/file.env"
+ env = true
+ }
+ }
+ }
+}
diff --git a/modules/renovate/main.tf b/modules/renovate/main.tf
new file mode 100644
index 0000000..e2c8564
--- /dev/null
+++ b/modules/renovate/main.tf
@@ -0,0 +1,3 @@
+resource "nomad_job" "renovate" {
+ jobspec = file("${path.module}/jobspec.nomad.hcl")
+}
diff --git a/modules/seedbox/jobspec.nomad.hcl b/modules/seedbox/jobspec.nomad.hcl
new file mode 100644
index 0000000..2e578a2
--- /dev/null
+++ b/modules/seedbox/jobspec.nomad.hcl
@@ -0,0 +1,117 @@
+job "seedbox" {
+
+ group "proxy" {
+
+ task "proxy" {
+ driver = "docker"
+
+ config {
+ image = "docker.io/qmcgaw/gluetun:v3.39.1"
+ force_pull = true
+
+ cap_add = ["NET_ADMIN"]
+
+ sysctl = {
+ "net.ipv6.conf.all.disable_ipv6" = "1"
+ }
+ }
+
+ resources {
+ cpu = 100
+ memory = 128
+ memory_max = 512
+ }
+
+ env {
+ VPN_SERVICE_PROVIDER = "ipvanish"
+ SERVER_COUNTRIES = "Switzerland"
+ HTTPPROXY = "on"
+ }
+
+ template {
+ data = <<-EOH
+ {{with nomadVar "nomad/jobs/seedbox/proxy/proxy" }}
+ OPENVPN_USER = "{{.OPENVPN_USER}}"
+ OPENVPN_PASSWORD = "{{.OPENVPN_PASSWORD}}"
+ {{end}}
+ EOH
+
+ destination = "secrets/file.env"
+ env = true
+ }
+ }
+ }
+
+ group "client" {
+
+ network {
+ port "qbittorrent" {}
+ }
+
+ service {
+ port = "qbittorrent"
+
+ provider = "consul"
+
+ check {
+ type = "http"
+ path = "/"
+ interval = "10s"
+ timeout = "2s"
+ }
+ }
+
+ volume "media" {
+ type = "csi"
+ source = "media"
+ attachment_mode = "file-system"
+ access_mode = "single-node-writer"
+
+ mount_options {
+ mount_flags = ["nolock"]
+ }
+ }
+
+ volume "qbittorrent_config" {
+ type = "csi"
+ source = "qbittorrent_config"
+ attachment_mode = "file-system"
+ access_mode = "single-node-writer"
+
+ mount_options {
+ mount_flags = ["nolock"]
+ }
+ }
+
+ task "qbittorrent" {
+ driver = "docker"
+
+ config {
+ image = "ghcr.io/linuxserver/qbittorrent:5.0.2"
+ }
+
+ resources {
+ cpu = 500
+ memory = 128
+ }
+
+ env {
+ PUID = "991"
+ PGID = "997"
+ WEBUI_PORT = "${NOMAD_PORT_qbittorrent}"
+ TZ = "Europe/London"
+ DOCKER_MODS = "ghcr.io/vuetorrent/vuetorrent-lsio-mod:latest"
+ }
+
+ volume_mount {
+ volume = "media"
+ destination = "/media"
+ }
+
+ volume_mount {
+ volume = "qbittorrent_config"
+ destination = "/config"
+ }
+ }
+ }
+}
diff --git a/modules/seedbox/main.tf b/modules/seedbox/main.tf
new file mode 100644
index 0000000..f0b427c
--- /dev/null
+++ b/modules/seedbox/main.tf
@@ -0,0 +1,59 @@
+resource "nomad_job" "seedbox" {
+ depends_on = [
+ nomad_csi_volume_registration.nfs_volume_media,
+ nomad_csi_volume_registration.nfs_volume_qbittorrent_config,
+ ]
+
+ jobspec = file("${path.module}/jobspec.nomad.hcl")
+}
+
+data "nomad_plugin" "nfs" {
+ plugin_id = "nfs"
+ wait_for_healthy = true
+}
+
+resource "nomad_csi_volume_registration" "nfs_volume_media" {
+ depends_on = [data.nomad_plugin.nfs]
+
+ lifecycle {
+ prevent_destroy = true
+ }
+
+ plugin_id = "nfs"
+ name = "media"
+ volume_id = "media"
+ external_id = "media"
+
+ capability {
+ access_mode = "single-node-writer"
+ attachment_mode = "file-system"
+ }
+
+ context = {
+ "server" = "martinibar.lan",
+ "share" = "/volume1/csi/media",
+ }
+}
+
+resource "nomad_csi_volume_registration" "nfs_volume_qbittorrent_config" {
+ depends_on = [data.nomad_plugin.nfs]
+
+ lifecycle {
+ prevent_destroy = true
+ }
+
+ plugin_id = "nfs"
+ name = "qbittorrent_config"
+ volume_id = "qbittorrent_config"
+ external_id = "qbittorrent_config"
+
+ capability {
+ access_mode = "single-node-writer"
+ attachment_mode = "file-system"
+ }
+
+ context = {
+ "server" = "martinibar.lan",
+ "share" = "/volume1/csi/qbittorrent_config",
+ }
+}
diff --git a/modules/uptime/jobspec.json b/modules/uptime/jobspec.json
deleted file mode 100644
index 6faa539..0000000
--- a/modules/uptime/jobspec.json
+++ /dev/null
@@ -1,63 +0,0 @@
-{
- "ID": "uptime",
- "Name": "Uptime",
- "TaskGroups": [
- {
- "Name": "Uptime Servers",
- "Tasks": [
- {
- "Name": "kuma",
- "Driver": "docker",
- "Config": {
- "image": "louislam/uptime-kuma:latest",
- "ports": [
- "web"
- ],
- "mounts": [
- {
- "type": "volume",
- "target": "/app/data",
- "source": "kuma-data"
- },
- {
- "type": "bind",
- "target": "/var/run/docker.sock",
- "source": "/var/run/docker.sock"
- }
- ],
- "extra_hosts": [
- "host.docker.internal:host-gateway"
- ]
- },
- "Resources": {
- "CPU": 500,
- "MemoryMB": 512
- }
- }
- ],
- "Services": [
- {
- "Name": "Kuma",
- "Provider": "nomad",
- "PortLabel": "web",
- "Tags": [
- "traefik.enable=true",
- "traefik.http.routers.kuma.entrypoints=websecure",
- "traefik.http.routers.kuma.rule=Host(`status.brmartin.co.uk`)"
- ]
-
- }
- ],
- "Networks": [
- {
- "DynamicPorts": [
- {
- "Label": "web",
- "To": 3001
- }
- ]
- }
- ]
- }
- ]
-}
\ No newline at end of file
diff --git a/modules/uptime/main.tf b/modules/uptime/main.tf
deleted file mode 100644
index 2726e34..0000000
--- a/modules/uptime/main.tf
+++ /dev/null
@@ -1,4 +0,0 @@
-resource "nomad_job" "uptime" {
- jobspec = file("${path.module}/jobspec.json")
- json = true
-}
diff --git a/provider.tf b/provider.tf
index 80f6028..b96c2a0 100644
--- a/provider.tf
+++ b/provider.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
nomad = {
source = "hashicorp/nomad"
- version = "2.2.0"
+ version = "2.4.0"
}
}
}
diff --git a/renovate.json b/renovate.json
new file mode 100644
index 0000000..8221b48
--- /dev/null
+++ b/renovate.json
@@ -0,0 +1,13 @@
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "customManagers": [
+ {
+ "customType": "regex",
+ "datasourceTemplate": "docker",
+ "fileMatch": ["\\.hcl$"],
+ "matchStrings": [
+ "\\s*image\\s*=\\s*\\\"(?.*?):(?.*?)\\\""
+ ]
+ }
+ ]
+}