feat(matrix): add Nomad job specification for Matrix service

feat(matrix): update resource allocation in Nomad job specification

feat(matrix): onboard element service to traefik

feat(matrix): add port configuration for Element service

chore(matrix): reformat

feat(matrix): update resource allocation in Nomad job specification

fix(matrix): minimum MemoryMB value is 10

feat(matrix): update resource allocation in Nomad job specification

feat(matrix): split server and clients into seperate groups

feat(matrix): well known to be served by nginx

fix(matrix): add well known route for all hosts

feat(matrix): use separate traefik router for well known

feat(matrix): migrate config.yaml for mas

feat(matrix): divide mas config between nomad and volume

feat(matrix): split cinny and element task groups

refactor(media-centre): Migrate media-centre job spec to Nomad HCL format

fix(media-centre): remove json from resource nomad job

fix(media-centre): update media-centre job spec to use Nomad HCL format

feat(media-centre): add downloader group

- Added a new group called "downloaders" to handle proxy tasks for downloading media files.
- Configured the proxy task with necessary settings and environment variables.

fix(media-centre): use OPENVPN_USER env variable in proxy task

fix(media-centre): Add /dev/net/tun device to proxy task

feat(media-centre): Add resource limits to proxy task

feat(media-centre): Add Plex task to media-centre job spec

fix(media-centre): add constraints to media-centre job spec

fix(media-centre): nomad doesn't allow sharing devices

fix(media-centre): disable change config dir ownership

fix(media-centre): plex process user is set using env vars

fix(media-centre): update PLEX_GID in job spec

fix(media-centre): update PLEX_GID in job spec

fix(media-centre): update PLEX_UID in job spec

feat(media-centre): enable nvidia gpu capabilities

feat(media-centre): add Tautulli service to media-centre job spec

fix(media-centre): update tautulli volumes

feat(plextraktsync): add plextraktsync module

fix(plextraktsync): update plextraktsync job spec "type" to "batch"

feat(plextraktsync): update resource allocation

fix(plextraktsync): fix cron schedule in plextraktsync job spec

feat(nfs-csi): add nfs-csi module

chore: update .gitignore to include .env file

chore: format files

feat(seedbox): add seedbox module

feat(seedbox): add qbittorrent module and NFS volume

feat(seedbox): add timezone configuration for seedbox job

fix(seedbox): vuetorrent-lsio-mod image env var

feat(seedbox): add HTTP_PORT environment variable for qbittorrent module

feat(seedbox): update access mode for NFS volume

feat(seedbox): add node constraint for seedbox job

feat(seedbox): add subdirectories for NFS volumes

feat(seedbox): add nolock mount flag for NFS volumes

feat(seedbox): Update NFS volume configuration

feat(seedbox): update Docker image and enable force pull

feat(seedbox): pause container network definition

feat(elk): create kibana

feat(elk): update kibana cpu allocation

feat(elk): add elasticsearch container to elk job

This commit adds a new task "elasticsearch" to the "elk" job in the "node" group. The task uses the "podman" driver and pulls the "docker.elastic.co/elasticsearch/elasticsearch:8.15.2" image with force pull enabled. It exposes the "transport" port and mounts the "/mnt/docker/elastic/elasticsearch/config" and "/mnt/docker/elastic/elasticsearch/data" volumes. The task is allocated with 500 CPU and 1024 memory resources.

feat(seedbox): update resource allocation in seedbox job

fix(elk): remove ulimit from elk job

See: https://github.com/hashicorp/nomad-driver-podman/issues/341

fix(elk): add selinuxlabel to volume mounts in elk job

refactor(modules): remove unused modules and jobspecs

refactor(elk): update CPU allocation in elk job

feat(media-centre): Plex to use host network

feat(elk): add 9200 port to es node

feat(elk): allocate more ram to node

feat(elk): allocate even more ram to node

feat(media-centre): reduce memory allocation of tautulli

feat(elk): revert memory allocation after shard tidy-up

feat(media-centre): set memory soft limit

feat(media-centre): update memory hard limit for tautulli

feat(elk): tweak node mem alloc

See: https://www.elastic.co/guide/en/elasticsearch/reference/current/size-your-shards.html#_example_11

feat(seedbox): add memory soft limit to vpn client

feat(seedbox): update memory hard limit for vpn client

fix(matrix): increase whatsapp-bridge memory allocation

refactor(elk): update elastic and kibana image versions in elk job

feat: add latest image versions and add force pull

feat: enable force pull for all podman driver tasks

feat(matrix): increase syncv3 memory allocation

feat: migrate podman memory allocation to nomad max memory

fix: nomad max memory is defined by memory_max

feat(matrix): add ecs fields to task metadata

refactor(matrix): migrate shared meta to parent

refactor(matrix): update resource allocation in jobspec.nomad.hcl

refactor(matrix): update resource allocation in jobspec.nomad.hcl

refactor(matrix): update resource allocation in jobspec.nomad.hcl

refactor(plextraktsync): update resource allocation in jobspec.nomad.hcl

refactor(plextraktsync): remove task node constraint

refactor: migrate podman tasks to docker tasks

feat(elk): update ulimit for elasticsearch container

refactor(elk): update volume paths in jobspec.nomad.hcl

feat(seedbox): remove pause container

feat(elk): update kibana count in jobspec.nomad.hcl

refactor(elk): remove node constraint from kibana

refactor(elk): add spread attribute to kibana

refactor(elk): update port configuration in jobspec.nomad.hcl

refactor(dummy): migrate json jobspec to hcl

feat(dummy): update service provider to consul

fix(dummy): add port label to port definition

refactor(dummy): rename jobspec to match standard

feat(dummy): migrate to service mesh

chore(dummy): update Nomad provider version to 2.4.0

chore(dummy): update Nomad provider version to 2.4.0

feat(dummy): configure traefik

refactor(dummy): update provider to use consul instead of nomad

feat(renovate): create module for automated dependency updates

Add renovate.json

fix(renovate): increase memory allocation

feat(renovate): add GITHUB_COM_TOKEN variable

refactor(renovate): pin version

feat(renovate): enable dependency dashboard

refactor(matrix): use bridge netowrking for server group

refactor(matrix): update URLs to use allocated addresses

refactor(matrix): remove host.docker.internal host

fix(matrix): update SYNCV3_BINDADDR

fix(matrix): update SYNCV3_BINDADDR port to 8009

fix(elk): increase memory allocation

feat(elk): disable co-located kibana allocations

refactor(jobspec): update provider to consul for elk and media-centre services

feat(media-centre): reduce memory allocation from 4096 to 1024

fix(jobspec): replace constraints with new neto client id

feat(elk): update data volume path to use unique node name

feat(elk): migrate elastic config to nfs

feat(elk): add Nyx

refactor(workflows): reformat (#17)

Reviewed-on: #17

fix(elk): increase memory allocation to 2048 MB

refactor(matrix): remove specific node constraint from job specification

feat(matrix): implement consul service mesh

feat(elk): use allocation index for node state location

refactor(media-centre): remove deprecated NVIDIA_DRIVER_CAPABILITIES

fix(media-centre): plex transcode dir not writable

fix(media-centre): set transcode dir to world writable

fix(media-centre): set transcode dir to world writable

feat(media-centre): replace plex transcode dir with a persistent volume

feat(media-centre): increase plex memory limit

For caching

chore(elk): promote elastic version

feat(elk): remove force_pull option from Elasticsearch and Kibana configurations

style(jobspec): improve formatting in HCL files

feat(elk): add health check

feat(media-centre): add NVIDIA visible devices for Jellyfin and Plex

fix(media-centre): increase max memory for tautulli

feat(plugin-csi): add NFS CSI driver jobspec and main configuration

feat(main.tf): add plugin-csi module to main configuration

fix(plugin-csi): refactor NFS job specifications into separate files for controller and node

fix(plugin-csi): add NFS path variable for controller and node resources

fix(plugin-csi): add NFS path variable to controller and node job specifications

fix(plugin-csi): add provisioner name to NFS job specifications for controller and node

fix(plugin-csi): update NFS job specifications

feat(seedbox): restructure job specifications and add NFS volume registrations for media and qbittorrent config

feat(workflows): add lint workflow for Terraform and Nomad formatting

fix(seedbox): add attachment and access modes for media and qbittorrent_config volumes

feat(seedbox): remove node constraint

Update modules/seedbox/main.tf

fix(seedbox): add mount options with nolock flag for media and qbittorrent_config volumes

fix(seedbox): update share paths to use lowercase in media and qbittorrent_config volumes

fix(seedbox): remove unused device configuration from jobspec

feat(matrix): add health check configuration

feat(matrix): add health check ports for synapse, mas, and nginx

fix(matrix): remove health check configuration for synapse, mas, and nginx

feat(main.tf): remove unused and broken seedbox module

feat(renovate): use JSON log format

chore(elk): upgrade version to latest

feat(elk): use 2 kibana replicas

feat(elk): add on_update ignore option to ready check configuration

fix(elk): update volume paths to use node unique name for configuration and data

feat(matrix): add envoy_metrics port and update service metadata for Consul integration

feat(matrix): add health check configuration to synapse job

feat(matrix): add /metrics endpoint exposure for envoy_metrics

fix(matrix): update service port configurations to use static port numbers

feat(matrix): restructure ingress groups and enhance service configurations for improved routing

fix(matrix): update whatsapp bridge tokens and change push to receive ephemeral

feat(media-centre): remove node constraint from tautulli task configuration

feat(elk): onboard hestia node to nomad

feat(elk): enhance job specification with Envoy metrics and update service configurations

feat(renovate): onboard nomad docker image updates

chore(deps): update ghcr.io/renovatebot/renovate docker tag to v38.142.7

chore(jobspec): use explicit image version tags where possible

fix(jobspec): formatting

chore(deps): update busybox docker tag to v1.37.0

chore(deps): update docker.io/library/nginx docker tag to v1.27.3

chore(deps): update ghcr.io/renovatebot/renovate docker tag to v39

chore(deps): update ghcr.io/renovatebot/renovate docker tag to v39.59.0

chore(deps): update ghcr.io/renovatebot/renovate docker tag to v39.60.0

chore(matrix): format multiline string in jobspec.nomad.hcl for improved readability

chore(secrets): refactor jobspecs to use templates for sensitive environment variables
This commit is contained in:
Ben Martin 2024-09-26 18:17:46 +00:00
parent ea6c8893b6
commit 9af9846216
Signed by: ben
GPG key ID: 859A655FCD290E4A
28 changed files with 1428 additions and 296 deletions

View file

@ -0,0 +1,30 @@
name: Lint
on:
pull_request:
branches:
- main
push:
branches:
- main
env:
TF_IN_AUTOMATION: true
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Terraform
uses: hashicorp/setup-terraform@v3
- name: Setup Nomad
uses: hashicorp/setup-nomad@main
- name: Terraform fmt
run: terraform fmt -recursive -check
- name: Nomad fmt
run: nomad fmt -recursive -check

View file

@ -8,9 +8,6 @@ on:
branches: branches:
- main - main
permissions:
pull-requests: write
env: env:
TF_PLUGIN_CACHE_DIR: ${{ gitea.workspace }}/.terraform.d/plugin-cache TF_PLUGIN_CACHE_DIR: ${{ gitea.workspace }}/.terraform.d/plugin-cache
TF_IN_AUTOMATION: true TF_IN_AUTOMATION: true
@ -24,79 +21,31 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: hashicorp/setup-terraform@v3
- name: Setup Terraform
uses: hashicorp/setup-terraform@v3
- name: Create Terraform Plugin Cache Dir - name: Create Terraform Plugin Cache Dir
run: mkdir -v -p $TF_PLUGIN_CACHE_DIR run: mkdir -v -p $TF_PLUGIN_CACHE_DIR
- uses: actions/cache@v4
- name: Cache Terraform Plugins
uses: actions/cache@v4
with: with:
path: ${{ env.TF_PLUGIN_CACHE_DIR }} path: ${{ env.TF_PLUGIN_CACHE_DIR }}
key: ${{ runner.os }}-terraform-${{ hashFiles('**/.terraform.lock.hcl') }} key: ${{ runner.os }}-terraform-${{ hashFiles('**/.terraform.lock.hcl') }}
- name: Terraform fmt
id: fmt
run: terraform fmt -recursive -check
continue-on-error: true
- name: Terraform Init - name: Terraform Init
id: init id: init
run: terraform init -input=false run: terraform init -input=false
- name: Terraform Validate - name: Terraform Validate
id: validate id: validate
run: terraform validate run: terraform validate
- name: Terraform Plan - name: Terraform Plan
id: plan id: plan
run: terraform plan -out=tfplan run: terraform plan -out=tfplan
continue-on-error: true
- uses: actions/github-script@v7
if: github.event_name == 'pull_request'
env:
PLAN: "terraform\n${{ steps.plan.outputs.stdout }}"
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
})
const botComment = comments.find(comment => {
return comment.user.type === 'Bot' && comment.body.includes('Terraform Format and Style')
})
const output = `#### Terraform Format and Style 🖌\`${{ steps.fmt.outcome }}\`
#### Terraform Initialization ⚙️\`${{ steps.init.outcome }}\`
#### Terraform Validation 🤖\`${{ steps.validate.outcome }}\`
<details><summary>Validation Output</summary>
\`\`\`\n - name: Terraform Apply
${{ steps.validate.outputs.stdout }}
\`\`\`
</details>
#### Terraform Plan 📖\`${{ steps.plan.outcome }}\`
<details><summary>Show Plan</summary>
\`\`\`\n
${process.env.PLAN}
\`\`\`
</details>
*Pusher: @${{ github.actor }}, Action: \`${{ github.event_name }}\`, Working Directory: \`${{ env.tf_actions_working_dir }}\`, Workflow: \`${{ github.workflow }}\`*`;
if (botComment) {
github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id,
body: output
})
} else {
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: output
})
}
- name: Terraform apply
if: github.ref == 'refs/heads/main' && steps.plan.outcome == 'success' if: github.ref == 'refs/heads/main' && steps.plan.outcome == 'success'
run: terraform apply -auto-approve tfplan run: terraform apply -auto-approve tfplan

1
.gitignore vendored
View file

@ -13,3 +13,4 @@ override.tf.json
.terraform.tfstate.lock.info .terraform.tfstate.lock.info
.terraformrc .terraformrc
terraform.rc terraform.rc
.env

28
.terraform.lock.hcl generated
View file

@ -2,21 +2,21 @@
# Manual edits may be lost in future updates. # Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/nomad" { provider "registry.terraform.io/hashicorp/nomad" {
version = "2.2.0" version = "2.4.0"
constraints = "2.2.0" constraints = "2.4.0"
hashes = [ hashes = [
"h1:BAjqzVkuXxHtRKG+l9unaZJPk2kWZpSTCEcQPRcl2so=", "h1:MnNLz6rQIqiLk6EKf5XjJlE5S/wmqh+z0A4ndDHWEZA=",
"zh:052f909d25121e93dc799290216292fca67943ccde12ba515068b838a6ff8c66", "zh:0825c5d2e6cb6a92aa247366f10c74275fdf9027bdb874d374aa3a9c3983ec68",
"zh:20e29aeb9989f7a1e04bb4093817c7acc4e1e737bb21a3066f3ea46f2001feff", "zh:0c939ce35dce82da62c4cc8642903b43292c9915ac1a13099885e7c89edb87ae",
"zh:2326d101ef427599b72cce30c0e0c1d18ae783f1a897c20f2319fbf54bab0a61", "zh:23dd5d8300e7d6b42a1a55a541296227b3c054ad19dc8a6eb411ef8b2d689f5e",
"zh:3420cbe4fd19cdc96d715d0ae8e79c272608023a76033bbf582c30637f6d570f", "zh:26b76c1d2c25f1b9730d5b6fe0fce355a0a5f666c0818f7284d9663ee556adec",
"zh:41ec570f87f578f1c57655e2e4fbdb9932d94cf92dc9cd11828cccedf36dd4a4", "zh:4915e1f176c4aa910504113629dbe25120a3915e703cb8f8b637dd2d20a4ad6f",
"zh:5f90dcc58e3356ffead82ea211ecb4a2d7094d3c2fbd14ff85527c3652a595a2", "zh:4f9d3bb2e97c9a4a135aa9d8d65f37902a7f838655e21cc22fffc8ebab8d2d66",
"zh:64aaa48609d2db868fcfd347490df0e12c6c3fcb8e4f12908c5d52b1a0adf73f", "zh:51dad4566c56b9bbe0a59c25287f3e14c35b5fbfde167fdab6ae98dfc23a6ae1",
"zh:56a7f7939bc41dbcdadf1fbbc7096090a26aa38552060cef472c82181af26cc8",
"zh:68bc2c7d28e1a7de2655e194423e911433ea8f3b87ab0a54ed1833f44ef63bb5",
"zh:75f9d9c4c031c3ac83b2c2cf37163edf3b8eea9f58a379d1b83d096f0b3d98cc",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:86b4923e10e6ba407d1d2aab83740b702058e8b01460af4f5f0e4008f40e492c", "zh:fea4d2a0df951ab8fad4d2f8da0e2c2198e93220cf9269d5c51d80db1988ae52",
"zh:ae89dcba33097af33a306344d20e4e25181f15dcc1a860b42db5b7199a97c6a6",
"zh:ce56d68cdfba60891765e94f9c0bf69eddb985d44d97db9f91874bea027f08e2",
"zh:e993bcde5dbddaedf3331e3014ffab904f98ab0f5e8b5d6082b7ca5083e0a2f1",
] ]
} }

20
main.tf
View file

@ -10,6 +10,22 @@ module "media-centre" {
source = "./modules/media-centre" source = "./modules/media-centre"
} }
module "uptime" { module "plextraktsync" {
source = "./modules/uptime" source = "./modules/plextraktsync"
}
module "matrix" {
source = "./modules/matrix"
}
module "elk" {
source = "./modules/elk"
}
module "renovate" {
source = "./modules/renovate"
}
module "plugin-csi" {
source = "./modules/plugin-csi"
} }

View file

@ -1,62 +0,0 @@
{
"ID": "hello-world",
"Name": "hello-world",
"TaskGroups": [
{
"Name": "servers",
"Tasks": [
{
"Name": "web",
"Driver": "docker",
"Config": {
"image": "busybox:1",
"command": "httpd",
"args": [
"-v",
"-f",
"-p",
"${NOMAD_PORT_www}",
"-h",
"/local"
],
"ports": [
"www"
]
},
"Templates": [
{
"DestPath": "local/index.html",
"EmbeddedTmpl": "<h1>Hello, Ben!</h1>\n"
}
],
"Resources": {
"CPU": 50,
"MemoryMB": 64
}
}
],
"Networks": [
{
"DynamicPorts": [
{
"Label": "www",
"To": 8001
}
]
}
],
"Services": [
{
"PortLabel": "www",
"Provider": "nomad",
"Name": "web",
"Tags": [
"traefik.enable=true",
"traefik.http.routers.web.entrypoints=websecure",
"traefik.http.routers.web.rule=Host(`hello-world.brmartin.co.uk`)"
]
}
]
}
]
}

View file

@ -0,0 +1,45 @@
job "hello-world" {
datacenters = ["dc1"]
group "servers" {
count = 1
task "web" {
driver = "docker"
config {
image = "busybox:1.37.0"
command = "httpd"
args = ["-v", "-f", "-p", "${NOMAD_PORT_www}", "-h", "/local"]
ports = ["www"]
}
template {
destination = "local/index.html"
data = "<h1>Hello, Ben!</h1>\n"
}
resources {
cpu = 50
memory = 64
}
}
network {
mode = "bridge"
port "www" {
to = 8001
}
}
service {
port = "www"
provider = "consul"
tags = [
"traefik.enable=true",
"traefik.http.routers.web.rule=Host(`hello-world.brmartin.co.uk`)",
]
}
}
}

View file

@ -1,4 +1,3 @@
resource "nomad_job" "dummy" { resource "nomad_job" "dummy" {
jobspec = file("${path.module}/jobspec.json") jobspec = file("${path.module}/jobspec.nomad.hcl")
json = true
} }

View file

@ -0,0 +1,140 @@
variable "elastic_version" {
type = string
}
job "elk" {
group "node" {
count = 3
constraint {
distinct_hosts = true
}
network {
mode = "bridge"
port "http" {
static = 9200
}
port "transport" {
static = 9300
}
port "envoy_metrics" {
to = 9102
}
}
service {
provider = "consul"
port = "9200"
meta {
envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}"
}
connect {
sidecar_service {
proxy {
expose {
path {
path = "/metrics"
protocol = "http"
local_path_port = 9102
listener_port = "envoy_metrics"
}
}
transparent_proxy {
exclude_inbound_ports = ["9200", "9300"]
exclude_outbound_ports = [9200, 9300]
}
}
}
}
}
service {
provider = "consul"
port = "transport"
}
task "elasticsearch" {
driver = "docker"
config {
image = "docker.elastic.co/elasticsearch/elasticsearch:${var.elastic_version}"
ports = ["9200", "9300"]
volumes = [
"/mnt/docker/elastic-${node.unique.name}/config:/usr/share/elasticsearch/config",
"/mnt/docker/elastic-${node.unique.name}/data:/usr/share/elasticsearch/data",
]
ulimit {
memlock = "-1:-1"
}
}
resources {
cpu = 2000
memory = 2048
}
}
}
group "kibana" {
count = 2
constraint {
distinct_hosts = true
}
network {
mode = "bridge"
port "web" {
static = 5601
}
}
task "kibana" {
driver = "docker"
config {
image = "docker.elastic.co/kibana/kibana:${var.elastic_version}"
ports = ["web"]
volumes = [
"/mnt/docker/elastic/kibana/config:/usr/share/kibana/config",
]
}
resources {
cpu = 1500
memory = 1024
}
service {
tags = [
"traefik.enable=true",
"traefik.http.routers.kibana.rule=Host(`kibana.brmartin.co.uk`)",
"traefik.http.routers.kibana.entrypoints=websecure",
]
port = "web"
address_mode = "host"
provider = "consul"
check {
type = "http"
path = "/api/status"
interval = "10s"
timeout = "2s"
on_update = "ignore"
}
}
}
}
}

9
modules/elk/main.tf Normal file
View file

@ -0,0 +1,9 @@
resource "nomad_job" "elk" {
jobspec = file("${path.module}/jobspec.nomad.hcl")
hcl2 {
vars = {
"elastic_version" = "8.16.1",
}
}
}

View file

@ -0,0 +1,618 @@
job "matrix" {
meta = {
"service.type" = "matrix"
}
group "synapse" {
network {
mode = "bridge"
port "envoy_metrics" {
to = 9102
}
}
service {
provider = "consul"
port = "8008"
meta {
envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}"
}
check {
type = "http"
path = "/health"
interval = "20s"
timeout = "5s"
expose = true
}
connect {
sidecar_service {
proxy {
config {
protocol = "http"
local_idle_timeout_ms = 120000
}
expose {
path {
path = "/metrics"
protocol = "http"
local_path_port = 9102
listener_port = "envoy_metrics"
}
}
transparent_proxy {}
}
}
}
}
task "synapse" {
driver = "docker"
config {
image = "ghcr.io/element-hq/synapse:v1.120.2"
ports = ["8008"]
volumes = [
"/mnt/docker/matrix/synapse:/data",
"/mnt/docker/matrix/media_store:/media_store",
]
}
env = {
SYNAPSE_WORKER = "synapse.app.homeserver"
}
template {
data = <<-EOF
id: whatsapp
url: http://matrix-whatsapp-bridge.virtual.consul
{{with nomadVar "nomad/jobs/matrix/synapse/synapse"}}
as_token="{{.as_token}}"
hs_token="{{.hs_token}}"
{{end}}
sender_localpart: ctvppZV8epjY9iUtTt0nR29e92V4nIJb
rate_limited: false
namespaces:
users:
- regex: ^@whatsappbot:brmartin\.co\.uk$
exclusive: true
- regex: ^@whatsapp_.*:brmartin\.co\.uk$
exclusive: true
de.sorunome.msc2409.push_ephemeral: true
receive_ephemeral: true
EOF
destination = "local/matrix-whatsapp-registration.yaml"
}
resources {
cpu = 500
memory = 128
memory_max = 256
}
meta = {
"service.name" = "synapse"
}
}
}
group "whatsapp-bridge" {
network {
mode = "bridge"
port "envoy_metrics" {
to = 9102
}
}
service {
provider = "consul"
port = "8082"
meta {
envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}"
}
connect {
sidecar_service {
proxy {
config {
protocol = "http"
}
expose {
path {
path = "/metrics"
protocol = "http"
local_path_port = 9102
listener_port = "envoy_metrics"
}
}
transparent_proxy {}
}
}
}
}
task "whatsapp-bridge" {
driver = "docker"
config {
image = "dock.mau.dev/mautrix/whatsapp:v0.11.1"
ports = ["8082"]
volumes = [
"/mnt/docker/matrix/whatsapp-data:/data"
]
}
resources {
cpu = 50
memory = 16
memory_max = 32
}
meta = {
"service.name" = "whatsapp"
}
}
}
group "mas" {
network {
mode = "bridge"
port "envoy_metrics" {
to = 9102
}
}
service {
port = "8081"
provider = "consul"
meta {
envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}"
}
connect {
sidecar_service {
proxy {
config {
protocol = "http"
}
expose {
path {
path = "/metrics"
protocol = "http"
local_path_port = 9102
listener_port = "envoy_metrics"
}
}
transparent_proxy {}
}
}
}
}
task "mas" {
driver = "docker"
config {
image = "ghcr.io/matrix-org/matrix-authentication-service:main"
force_pull = true
ports = ["8081"]
volumes = [
"/mnt/docker/matrix/synapse-mas/config.yaml:/config.yaml:ro"
]
}
env {
MAS_CONFIG = "/config.yaml"
}
resources {
cpu = 100
memory = 32
memory_max = 64
}
meta = {
"service.name" = "mas"
}
}
}
group "syncv3" {
network {
mode = "bridge"
port "envoy_metrics" {
to = 9102
}
}
service {
provider = "consul"
port = "8008"
meta {
envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}"
}
connect {
sidecar_service {
proxy {
config {
protocol = "http"
}
expose {
path {
path = "/metrics"
protocol = "http"
local_path_port = 9102
listener_port = "envoy_metrics"
}
}
transparent_proxy {}
}
}
}
}
task "syncv3" {
driver = "docker"
config {
image = "ghcr.io/matrix-org/sliding-sync:v0.99.19"
ports = ["8008"]
}
env = {
SYNCV3_SERVER = "http://synapse.service.consul"
}
template {
data = <<-EOH
{{with nomadVar "nomad/jobs/matrix/syncv3/syncv3"}}
SYNCV3_SECRET="{{.SYNCV3_SECRET}}"
SYNCV3_DB="{{.SYNCV3_DB}}"
{{end}}
EOH
destination = "secrets/file.env"
env = true
}
resources {
cpu = 50
memory = 16
memory_max = 32
}
meta = {
"service.name" = "syncv3"
}
}
}
group "nginx" {
network {
mode = "bridge"
port "nginx" {
to = 80
}
port "envoy_metrics" {
to = 9102
}
}
service {
provider = "consul"
port = "80"
meta {
envoy_metrics_port = "${NOMAD_HOST_PORT_envoy_metrics}"
}
connect {
sidecar_service {
proxy {
config {
protocol = "http"
local_idle_timeout_ms = 120000
}
expose {
path {
path = "/metrics"
protocol = "http"
local_path_port = 9102
listener_port = "envoy_metrics"
}
}
transparent_proxy {}
}
}
}
}
task "nginx" {
driver = "docker"
config {
image = "docker.io/library/nginx:1.27.3-alpine"
ports = ["80"]
volumes = [
"/mnt/docker/matrix/nginx/templates:/etc/nginx/templates:ro",
"/mnt/docker/matrix/nginx/html:/usr/share/nginx/html:ro",
]
}
env = {
NGINX_PORT = "80"
}
resources {
cpu = 50
memory = 16
}
meta = {
"service.name" = "nginx"
}
}
}
group "synapse-ingress-group" {
network {
mode = "bridge"
port "inbound" {
to = 8080
}
}
service {
port = "inbound"
tags = [
"traefik.enable=true",
"traefik.http.routers.synapse.rule=Host(`matrix.brmartin.co.uk`)",
"traefik.http.routers.synapse.entrypoints=websecure",
"traefik.http.routers.synapse.middlewares=synapseHeaders,synapseBuffering",
"traefik.http.middlewares.synapseHeaders.headers.accesscontrolallowmethods=GET,POST,PUT,DELETE,OPTIONS",
"traefik.http.middlewares.synapseHeaders.headers.accesscontrolallowheaders=Origin,X-Requested-With,Content-Type,Accept,Authorization",
"traefik.http.middlewares.synapseHeaders.headers.accesscontrolalloworiginlist=*",
"traefik.http.middlewares.synapseBuffering.buffering.maxRequestBodyBytes=1000000000",
]
connect {
gateway {
proxy {
config {
local_idle_timeout_ms = 120000
}
}
ingress {
listener {
port = 8080
protocol = "http"
service {
name = "matrix-synapse"
hosts = ["*"]
}
}
}
}
}
}
}
group "mas-ingress-group" {
network {
mode = "bridge"
port "inbound" {
to = 8080
}
}
service {
port = "inbound"
tags = [
"traefik.enable=true",
"traefik.http.routers.mas.rule=Host(`mas.brmartin.co.uk`) || (Host(`matrix.brmartin.co.uk`) && PathRegexp(`^/_matrix/client/(.*)/(login|logout|refresh)`))",
"traefik.http.routers.mas.entrypoints=websecure",
]
connect {
gateway {
ingress {
listener {
port = 8080
protocol = "http"
service {
name = "matrix-mas"
hosts = ["*"]
}
}
}
}
}
}
}
group "wellknown-ingress-group" {
network {
mode = "bridge"
port "inbound" {
to = 8080
}
}
service {
port = "inbound"
tags = [
"traefik.enable=true",
"traefik.http.routers.matrixWellKnown.rule=PathPrefix(`/.well-known/matrix`)",
"traefik.http.routers.matrixWellKnown.entrypoints=websecure",
"traefik.http.routers.matrixWellKnown.middlewares=matrixWellKnown",
"traefik.http.middlewares.matrixWellKnown.headers.accesscontrolalloworiginlist=*",
]
connect {
gateway {
ingress {
listener {
port = 8080
protocol = "http"
service {
name = "matrix-nginx"
hosts = ["*"]
}
}
}
}
}
}
}
group "syncv3-ingress-group" {
network {
mode = "bridge"
port "inbound" {
to = 8080
}
}
service {
port = "inbound"
tags = [
"traefik.enable=true",
"traefik.http.routers.matrixsyncv3.rule=Host(`matrix.brmartin.co.uk`) && (PathPrefix(`/client`) || PathPrefix(`/_matrix/client/unstable/org.matrix.msc3575/sync`))",
"traefik.http.routers.matrixsyncv3.entrypoints=websecure",
]
connect {
gateway {
ingress {
listener {
port = 8080
protocol = "http"
service {
name = "matrix-syncv3"
hosts = ["*"]
}
}
}
}
}
}
}
group "element" {
network {
port "element" {
to = 80
}
}
task "element" {
driver = "docker"
config {
image = "docker.io/vectorim/element-web:v1.11.87"
ports = ["element"]
volumes = [
"/mnt/docker/matrix/element/config.json:/app/config.json:ro"
]
}
resources {
cpu = 100
memory = 16
}
service {
tags = [
"traefik.enable=true",
"traefik.http.routers.element.rule=Host(`element.brmartin.co.uk`)",
"traefik.http.routers.element.entrypoints=websecure",
]
port = "element"
address_mode = "host"
provider = "consul"
}
meta = {
"service.name" = "element"
}
}
}
group "cinny" {
network {
port "cinny" {
to = 80
}
}
task "cinny" {
driver = "docker"
config {
image = "ghcr.io/cinnyapp/cinny:v4.2.3"
ports = ["cinny"]
volumes = [
"/mnt/docker/matrix/cinny/config.json:/app/config.json:ro"
]
}
resources {
cpu = 50
memory = 16
}
service {
tags = [
"traefik.enable=true",
"traefik.http.routers.cinny.rule=Host(`cinny.brmartin.co.uk`)",
"traefik.http.routers.cinny.entrypoints=websecure",
]
port = "cinny"
address_mode = "host"
provider = "consul"
}
meta = {
"service.name" = "cinny"
}
}
}
}

3
modules/matrix/main.tf Normal file
View file

@ -0,0 +1,3 @@
resource "nomad_job" "matrix" {
jobspec = file("${path.module}/jobspec.nomad.hcl")
}

View file

@ -1,84 +0,0 @@
{
"ID": "media-centre",
"Name": "Media Centre",
"TaskGroups": [
{
"Name": "Media Servers",
"Tasks": [
{
"Name": "Jellyfin",
"User": "985",
"Driver": "docker",
"Config": {
"image": "jellyfin/jellyfin",
"runtime": "nvidia",
"group_add": [
"997"
],
"ports": [
"jellyfin"
],
"mounts": [
{
"type": "volume",
"target": "/media",
"volume_options": {
"driver_config": {
"name": "local",
"options": [
{
"type": "nfs",
"o": "addr=martinibar.lan,nolock,soft,rw",
"device": ":/volume1/docker"
}
]
}
}
},
{
"type": "volume",
"target": "/config",
"source": "jellyfin-config"
}
]
},
"Env": {
"JELLYFIN_PublishedServerUrl": "192.168.1.5"
},
"Resources": {
"CPU": 1200,
"MemoryMB": 4096,
"Devices": [
{
"Name": "nvidia/gpu",
"Count": 1
}
]
}
}
],
"Services": [
{
"Name": "Jellyfin",
"Provider": "nomad",
"PortLabel": "jellyfin",
"Tags": [
"traefik.enable=true",
"traefik.http.routers.jellyfin.entrypoints=websecure",
"traefik.http.routers.jellyfin.rule=Host(`jellyfin.brmartin.co.uk`)"
]
}
],
"Networks": [
{
"DynamicPorts": [
{
"Label": "jellyfin",
"To": 8096
}
]
}
]
}
]
}

View file

@ -0,0 +1,203 @@
job "media-centre" {
group "jellyfin" {
task "jellyfin" {
user = "985"
driver = "docker"
constraint {
attribute = "${node.unique.id}"
value = "3f6d897a-f755-5677-27c3-e3f0af1dfb7e"
}
config {
image = "ghcr.io/jellyfin/jellyfin:10.10.3"
runtime = "nvidia"
group_add = ["997"]
ports = ["jellyfin"]
mount {
type = "volume"
target = "/media"
volume_options {
driver_config {
name = "local"
options {
type = "nfs"
o = "addr=martinibar.lan,nolock,soft,rw"
device = ":/volume1/docker"
}
}
}
}
mount {
type = "volume"
target = "/config"
source = "jellyfin-config"
}
}
env {
JELLYFIN_PublishedServerUrl = "https://jellyfin.brmartin.co.uk"
NVIDIA_DRIVER_CAPABILITIES = "all"
NVIDIA_VISIBLE_DEVICES = "all"
}
resources {
cpu = 1200
memory = 4096
}
}
service {
name = "Jellyfin"
provider = "consul"
port = "jellyfin"
tags = [
"traefik.enable=true",
"traefik.http.routers.jellyfin.entrypoints=websecure",
"traefik.http.routers.jellyfin.rule=Host(`jellyfin.brmartin.co.uk`)"
]
}
network {
port "jellyfin" {
to = 8096
}
}
}
group "plex" {
task "plex" {
driver = "docker"
constraint {
attribute = "${node.unique.id}"
value = "3f6d897a-f755-5677-27c3-e3f0af1dfb7e"
}
config {
image = "plexinc/pms-docker:latest"
runtime = "nvidia"
ports = ["plex"]
network_mode = "host"
mount {
type = "volume"
target = "/data"
volume_options {
driver_config {
name = "local"
options {
type = "nfs"
o = "addr=martinibar.lan,nolock,soft,rw"
device = ":/volume1/docker"
}
}
}
}
mount {
type = "volume"
target = "/share"
volume_options {
driver_config {
name = "local"
options {
type = "nfs"
o = "addr=martinibar.lan,nolock,soft,rw"
device = ":/volume1/Share"
}
}
}
}
mount {
type = "volume"
target = "/config"
source = "plex-config"
}
mount {
type = "volume"
target = "/transcode"
source = "plex-transcode"
}
}
env {
TZ = "Europe/London"
CHANGE_CONFIG_DIR_OWNERSHIP = "false"
PLEX_UID = "990"
PLEX_GID = "997"
NVIDIA_DRIVER_CAPABILITIES = "all"
NVIDIA_VISIBLE_DEVICES = "all"
}
resources {
cpu = 1200
memory = 4096
}
}
service {
name = "Plex"
provider = "consul"
port = "plex"
tags = [
"traefik.enable=true",
"traefik.http.routers.plex.entrypoints=websecure",
"traefik.http.routers.plex.rule=Host(`plex.brmartin.co.uk`)"
]
}
network {
port "plex" {
static = 32400
}
}
}
group "tautulli" {
task "tautulli" {
driver = "docker"
config {
image = "ghcr.io/tautulli/tautulli:v2.15.0"
ports = ["tautulli"]
volumes = [
"/mnt/docker/downloads/config/tautulli:/config",
]
}
env {
PUID = "994"
PGID = "997"
TZ = "Europe/London"
}
resources {
cpu = 100
memory = 128
memory_max = 256
}
}
service {
provider = "consul"
port = "tautulli"
tags = [
"traefik.enable=true",
"traefik.http.routers.tautulli.entrypoints=websecure",
"traefik.http.routers.tautulli.rule=Host(`tautulli.brmartin.co.uk`)"
]
}
network {
port "tautulli" {
to = 8181
}
}
}
}

View file

@ -1,4 +1,3 @@
resource "nomad_job" "media-centre" { resource "nomad_job" "media-centre" {
jobspec = file("${path.module}/jobspec.json") jobspec = file("${path.module}/jobspec.nomad.hcl")
json = true
} }

View file

@ -0,0 +1,28 @@
job "plextraktsync" {
type = "batch"
periodic {
crons = ["0 0/2 * * *"]
prohibit_overlap = true
}
group "plextraktsync" {
task "plextraktsync" {
driver = "docker"
config {
image = "ghcr.io/taxel/plextraktsync:0.32.2"
volumes = [
"/mnt/docker/downloads/config/plextraktsync:/app/config"
]
command = "sync"
}
resources {
cpu = 2000
memory = 128
}
}
}
}

View file

@ -0,0 +1,3 @@
resource "nomad_job" "plextraktsync" {
jobspec = file("${path.module}/jobspec.nomad.hcl")
}

View file

@ -0,0 +1,28 @@
job "plugin-nfs-controller" {
group "controller" {
task "plugin" {
driver = "docker"
config {
image = "mcr.microsoft.com/k8s/csi/nfs-csi:latest"
args = [
"--endpoint=unix://csi/csi.sock",
"--nodeid=${attr.unique.hostname}",
"--v=5",
]
}
csi_plugin {
id = "nfs"
type = "controller"
mount_dir = "/csi"
}
resources {
cpu = 100
memory = 128
}
}
}
}

View file

@ -0,0 +1,31 @@
job "plugin-nfs-nodes" {
type = "system"
group "nodes" {
task "plugin" {
driver = "docker"
config {
image = "mcr.microsoft.com/k8s/csi/nfs-csi:latest"
privileged = true
args = [
"--endpoint=unix://csi/csi.sock",
"--nodeid=${attr.unique.hostname}",
"--v=5",
]
}
csi_plugin {
id = "nfs"
type = "node"
mount_dir = "/csi"
}
resources {
cpu = 100
memory = 128
}
}
}
}

View file

@ -0,0 +1,7 @@
resource "nomad_job" "nfs-controller" {
jobspec = file("${path.module}/jobspec-controller.nomad.hcl")
}
resource "nomad_job" "nfs-nodes" {
jobspec = file("${path.module}/jobspec-nodes.nomad.hcl")
}

View file

@ -0,0 +1,44 @@
job "renovate" {
type = "batch"
periodic {
crons = ["0 * * * *"]
prohibit_overlap = true
}
group "renovate" {
task "renovate" {
driver = "docker"
config {
image = "ghcr.io/renovatebot/renovate:39.60.0"
}
resources {
cpu = 2000
memory = 512
}
env {
RENOVATE_PLATFORM = "gitea"
RENOVATE_AUTODISCOVER = "true"
RENOVATE_ENDPOINT = "https://git.brmartin.co.uk"
RENOVATE_GIT_AUTHOR = "Renovate Bot <renovate@brmartin.co.uk>"
LOG_FORMAT = "json"
RENOVATE_DEPENDENCY_DASHBOARD = "true"
}
template {
data = <<-EOH
{{with nomadVar "nomad/jobs/renovate/renovate/renovate" }}
RENOVATE_TOKEN = "{{.RENOVATE_TOKEN}}"
GITHUB_COM_TOKEN = "{{.GITHUB_COM_TOKEN}}"
{{end}}
EOH
destination = "secrets/file.env"
env = true
}
}
}
}

3
modules/renovate/main.tf Normal file
View file

@ -0,0 +1,3 @@
resource "nomad_job" "renovate" {
jobspec = file("${path.module}/jobspec.nomad.hcl")
}

View file

@ -0,0 +1,117 @@
job "seedbox" {
group "proxy" {
task "proxy" {
driver = "docker"
config {
image = "docker.io/qmcgaw/gluetun:v3.39.1"
force_pull = true
cap_add = ["NET_ADMIN"]
sysctl = {
"net.ipv6.conf.all.disable_ipv6" = "1"
}
}
resources {
cpu = 100
memory = 128
memory_max = 512
}
env {
VPN_SERVICE_PROVIDER = "ipvanish"
SERVER_COUNTRIES = "Switzerland"
HTTPPROXY = "on"
}
template {
data = <<-EOH
{{with nomadVar "nomad/jobs/seedbox/proxy/proxy" }}
OPENVPN_USER = "{{.OPENVPN_USER}}"
OPENVPN_PASSWORD = "{{.OPENVPN_PASSWORD}}"
{{end}}
EOH
destination = "secrets/file.env"
env = true
}
}
}
group "client" {
network {
port "qbittorrent" {}
}
service {
port = "qbittorrent"
provider = "consul"
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
volume "media" {
type = "csi"
source = "media"
attachment_mode = "file-system"
access_mode = "single-node-writer"
mount_options {
mount_flags = ["nolock"]
}
}
volume "qbittorrent_config" {
type = "csi"
source = "qbittorrent_config"
attachment_mode = "file-system"
access_mode = "single-node-writer"
mount_options {
mount_flags = ["nolock"]
}
}
task "qbittorrent" {
driver = "docker"
config {
image = "ghcr.io/linuxserver/qbittorrent:5.0.2"
}
resources {
cpu = 500
memory = 128
}
env {
PUID = "991"
PGID = "997"
WEBUI_PORT = "${NOMAD_PORT_qbittorrent}"
TZ = "Europe/London"
DOCKER_MODS = "ghcr.io/vuetorrent/vuetorrent-lsio-mod:latest"
}
volume_mount {
volume = "media"
destination = "/media"
}
volume_mount {
volume = "qbittorrent_config"
destination = "/config"
}
}
}
}

59
modules/seedbox/main.tf Normal file
View file

@ -0,0 +1,59 @@
resource "nomad_job" "seedbox" {
depends_on = [
nomad_csi_volume_registration.nfs_volume_media,
nomad_csi_volume_registration.nfs_volume_qbittorrent_config,
]
jobspec = file("${path.module}/jobspec.nomad.hcl")
}
data "nomad_plugin" "nfs" {
plugin_id = "nfs"
wait_for_healthy = true
}
resource "nomad_csi_volume_registration" "nfs_volume_media" {
depends_on = [data.nomad_plugin.nfs]
lifecycle {
prevent_destroy = true
}
plugin_id = "nfs"
name = "media"
volume_id = "media"
external_id = "media"
capability {
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
context = {
"server" = "martinibar.lan",
"share" = "/volume1/csi/media",
}
}
resource "nomad_csi_volume_registration" "nfs_volume_qbittorrent_config" {
depends_on = [data.nomad_plugin.nfs]
lifecycle {
prevent_destroy = true
}
plugin_id = "nfs"
name = "qbittorrent_config"
volume_id = "qbittorrent_config"
external_id = "qbittorrent_config"
capability {
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
context = {
"server" = "martinibar.lan",
"share" = "/volume1/csi/qbittorrent_config",
}
}

View file

@ -1,63 +0,0 @@
{
"ID": "uptime",
"Name": "Uptime",
"TaskGroups": [
{
"Name": "Uptime Servers",
"Tasks": [
{
"Name": "kuma",
"Driver": "docker",
"Config": {
"image": "louislam/uptime-kuma:latest",
"ports": [
"web"
],
"mounts": [
{
"type": "volume",
"target": "/app/data",
"source": "kuma-data"
},
{
"type": "bind",
"target": "/var/run/docker.sock",
"source": "/var/run/docker.sock"
}
],
"extra_hosts": [
"host.docker.internal:host-gateway"
]
},
"Resources": {
"CPU": 500,
"MemoryMB": 512
}
}
],
"Services": [
{
"Name": "Kuma",
"Provider": "nomad",
"PortLabel": "web",
"Tags": [
"traefik.enable=true",
"traefik.http.routers.kuma.entrypoints=websecure",
"traefik.http.routers.kuma.rule=Host(`status.brmartin.co.uk`)"
]
}
],
"Networks": [
{
"DynamicPorts": [
{
"Label": "web",
"To": 3001
}
]
}
]
}
]
}

View file

@ -1,4 +0,0 @@
resource "nomad_job" "uptime" {
jobspec = file("${path.module}/jobspec.json")
json = true
}

View file

@ -2,7 +2,7 @@ terraform {
required_providers { required_providers {
nomad = { nomad = {
source = "hashicorp/nomad" source = "hashicorp/nomad"
version = "2.2.0" version = "2.4.0"
} }
} }
} }

13
renovate.json Normal file
View file

@ -0,0 +1,13 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"customManagers": [
{
"customType": "regex",
"datasourceTemplate": "docker",
"fileMatch": ["\\.hcl$"],
"matchStrings": [
"\\s*image\\s*=\\s*\\\"(?<depName>.*?):(?<currentValue>.*?)\\\""
]
}
]
}