From 72c4a448ff3a8dce35ff944dff9d55aa97b66aa0 Mon Sep 17 00:00:00 2001 From: Alex Clarke Date: Wed, 19 Feb 2025 15:08:58 -0700 Subject: [PATCH] Created the initial k3s configuration for personal use --- README.md | 93 ++++++- inventories/k3s/group_vars/all.yml | 4 + inventories/k3s/hosts.yml | 10 + requirements.yml | 6 + .../bazarr-claim0-persistentvolumeclaim.yaml | 12 + .../bazarr-claim1-persistentvolumeclaim.yaml | 12 + .../bazarr-claim2-persistentvolumeclaim.yaml | 12 + .../apps/files/bazarr/bazarr-deployment.yaml | 55 ++++ roles/apps/files/bazarr/bazarr-service.yaml | 16 ++ roles/apps/files/bazarr/docker-compose.yml | 16 ++ roles/apps/files/jellyfin/docker-compose.yml | 19 ++ ...jellyfin-claim0-persistentvolumeclaim.yaml | 12 + ...jellyfin-claim1-persistentvolumeclaim.yaml | 12 + ...jellyfin-claim2-persistentvolumeclaim.yaml | 12 + .../files/jellyfin/jellyfin-deployment.yaml | 70 ++++++ .../apps/files/jellyfin/jellyfin-service.yaml | 27 ++ roles/apps/files/lidarr/docker-compose.yml | 16 ++ .../lidarr-claim0-persistentvolumeclaim.yaml | 12 + .../lidarr-claim1-persistentvolumeclaim.yaml | 12 + .../lidarr-claim2-persistentvolumeclaim.yaml | 12 + .../apps/files/lidarr/lidarr-deployment.yaml | 55 ++++ roles/apps/files/lidarr/lidarr-service.yaml | 16 ++ roles/apps/files/nzbget/docker-compose.yml | 17 ++ .../nzbget-claim0-persistentvolumeclaim.yaml | 12 + .../nzbget-claim1-persistentvolumeclaim.yaml | 12 + .../apps/files/nzbget/nzbget-deployment.yaml | 54 ++++ roles/apps/files/nzbget/nzbget-service.yaml | 16 ++ roles/apps/files/overseerr/docker-compose.yml | 14 ++ ...verseerr-claim0-persistentvolumeclaim.yaml | 12 + .../files/overseerr/overseerr-deployment.yaml | 45 ++++ .../files/overseerr/overseerr-service.yaml | 16 ++ roles/apps/files/plex/docker-compose.yml | 16 ++ .../plex-claim0-persistentvolumeclaim.yaml | 12 + .../plex-claim1-persistentvolumeclaim.yaml | 12 + .../plex-claim2-persistentvolumeclaim.yaml | 12 + roles/apps/files/plex/plex-deployment.yaml | 63 +++++ roles/apps/files/prowlarr/docker-compose.yml | 14 ++ ...prowlarr-claim0-persistentvolumeclaim.yaml | 12 + .../files/prowlarr/prowlarr-deployment.yaml | 45 ++++ .../apps/files/prowlarr/prowlarr-service.yaml | 16 ++ roles/apps/files/radarr/docker-compose.yml | 16 ++ .../radarr-claim0-persistentvolumeclaim.yaml | 12 + .../apps/files/radarr/radarr-deployment.yaml | 45 ++++ roles/apps/files/radarr/radarr-service.yaml | 16 ++ roles/apps/files/readarr/docker-compose.yml | 17 ++ .../readarr-claim0-persistentvolumeclaim.yaml | 12 + .../readarr-claim1-persistentvolumeclaim.yaml | 12 + .../readarr-claim2-persistentvolumeclaim.yaml | 12 + .../files/readarr/readarr-deployment.yaml | 64 +++++ roles/apps/files/readarr/readarr-service.yaml | 16 ++ roles/apps/files/sonarr/docker-compose.yml | 16 ++ .../sonarr-claim0-persistentvolumeclaim.yaml | 12 + .../sonarr-claim1-persistentvolumeclaim.yaml | 12 + .../sonarr-claim2-persistentvolumeclaim.yaml | 12 + .../apps/files/sonarr/sonarr-deployment.yaml | 55 ++++ roles/apps/files/sonarr/sonarr-service.yaml | 16 ++ roles/apps/files/tautulli/docker-compose.yml | 14 ++ ...tautulli-claim0-persistentvolumeclaim.yaml | 12 + .../files/tautulli/tautulli-deployment.yaml | 45 ++++ .../apps/files/tautulli/tautulli-service.yaml | 16 ++ .../files/transmission/docker-compose.yml | 23 ++ ...smission-claim0-persistentvolumeclaim.yaml | 12 + ...smission-claim1-persistentvolumeclaim.yaml | 12 + .../transmission/transmission-deployment.yaml | 60 +++++ .../transmission/transmission-service.yaml | 23 ++ roles/apps/tasks/bazarr.yml | 34 +++ roles/apps/tasks/jellyfin.yml | 34 +++ roles/apps/tasks/lidarr.yml | 34 +++ roles/apps/tasks/main.yml | 24 ++ roles/apps/tasks/nzbget.yml | 27 ++ roles/apps/tasks/overseerr.yml | 20 ++ roles/apps/tasks/plex.yml | 27 ++ roles/apps/tasks/prowlarr.yml | 20 ++ roles/apps/tasks/radarr.yml | 20 ++ roles/apps/tasks/readarr.yml | 34 +++ roles/apps/tasks/sonarr.yml | 34 +++ roles/apps/tasks/tautulli.yml | 20 ++ roles/apps/tasks/transmission.yml | 27 ++ roles/k3s/files/10-flannel.conf | 10 + .../ingress-controller-load-balancer.yaml | 21 ++ roles/k3s/files/value.yaml | 4 + roles/k3s/tasks/main.yml | 234 ++++++++++++++++++ setup-k3s.yml | 13 + 83 files changed, 2109 insertions(+), 1 deletion(-) create mode 100644 inventories/k3s/group_vars/all.yml create mode 100644 inventories/k3s/hosts.yml create mode 100644 requirements.yml create mode 100644 roles/apps/files/bazarr/bazarr-claim0-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/bazarr/bazarr-claim1-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/bazarr/bazarr-claim2-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/bazarr/bazarr-deployment.yaml create mode 100644 roles/apps/files/bazarr/bazarr-service.yaml create mode 100644 roles/apps/files/bazarr/docker-compose.yml create mode 100644 roles/apps/files/jellyfin/docker-compose.yml create mode 100644 roles/apps/files/jellyfin/jellyfin-claim0-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/jellyfin/jellyfin-claim1-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/jellyfin/jellyfin-claim2-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/jellyfin/jellyfin-deployment.yaml create mode 100644 roles/apps/files/jellyfin/jellyfin-service.yaml create mode 100644 roles/apps/files/lidarr/docker-compose.yml create mode 100644 roles/apps/files/lidarr/lidarr-claim0-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/lidarr/lidarr-claim1-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/lidarr/lidarr-claim2-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/lidarr/lidarr-deployment.yaml create mode 100644 roles/apps/files/lidarr/lidarr-service.yaml create mode 100644 roles/apps/files/nzbget/docker-compose.yml create mode 100644 roles/apps/files/nzbget/nzbget-claim0-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/nzbget/nzbget-claim1-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/nzbget/nzbget-deployment.yaml create mode 100644 roles/apps/files/nzbget/nzbget-service.yaml create mode 100644 roles/apps/files/overseerr/docker-compose.yml create mode 100644 roles/apps/files/overseerr/overseerr-claim0-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/overseerr/overseerr-deployment.yaml create mode 100644 roles/apps/files/overseerr/overseerr-service.yaml create mode 100644 roles/apps/files/plex/docker-compose.yml create mode 100644 roles/apps/files/plex/plex-claim0-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/plex/plex-claim1-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/plex/plex-claim2-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/plex/plex-deployment.yaml create mode 100644 roles/apps/files/prowlarr/docker-compose.yml create mode 100644 roles/apps/files/prowlarr/prowlarr-claim0-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/prowlarr/prowlarr-deployment.yaml create mode 100644 roles/apps/files/prowlarr/prowlarr-service.yaml create mode 100644 roles/apps/files/radarr/docker-compose.yml create mode 100644 roles/apps/files/radarr/radarr-claim0-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/radarr/radarr-deployment.yaml create mode 100644 roles/apps/files/radarr/radarr-service.yaml create mode 100644 roles/apps/files/readarr/docker-compose.yml create mode 100644 roles/apps/files/readarr/readarr-claim0-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/readarr/readarr-claim1-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/readarr/readarr-claim2-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/readarr/readarr-deployment.yaml create mode 100644 roles/apps/files/readarr/readarr-service.yaml create mode 100644 roles/apps/files/sonarr/docker-compose.yml create mode 100644 roles/apps/files/sonarr/sonarr-claim0-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/sonarr/sonarr-claim1-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/sonarr/sonarr-claim2-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/sonarr/sonarr-deployment.yaml create mode 100644 roles/apps/files/sonarr/sonarr-service.yaml create mode 100644 roles/apps/files/tautulli/docker-compose.yml create mode 100644 roles/apps/files/tautulli/tautulli-claim0-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/tautulli/tautulli-deployment.yaml create mode 100644 roles/apps/files/tautulli/tautulli-service.yaml create mode 100644 roles/apps/files/transmission/docker-compose.yml create mode 100644 roles/apps/files/transmission/transmission-claim0-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/transmission/transmission-claim1-persistentvolumeclaim.yaml create mode 100644 roles/apps/files/transmission/transmission-deployment.yaml create mode 100644 roles/apps/files/transmission/transmission-service.yaml create mode 100644 roles/apps/tasks/bazarr.yml create mode 100644 roles/apps/tasks/jellyfin.yml create mode 100644 roles/apps/tasks/lidarr.yml create mode 100644 roles/apps/tasks/main.yml create mode 100644 roles/apps/tasks/nzbget.yml create mode 100644 roles/apps/tasks/overseerr.yml create mode 100644 roles/apps/tasks/plex.yml create mode 100644 roles/apps/tasks/prowlarr.yml create mode 100644 roles/apps/tasks/radarr.yml create mode 100644 roles/apps/tasks/readarr.yml create mode 100644 roles/apps/tasks/sonarr.yml create mode 100644 roles/apps/tasks/tautulli.yml create mode 100644 roles/apps/tasks/transmission.yml create mode 100644 roles/k3s/files/10-flannel.conf create mode 100644 roles/k3s/files/ingress-controller-load-balancer.yaml create mode 100644 roles/k3s/files/value.yaml create mode 100644 roles/k3s/tasks/main.yml create mode 100644 setup-k3s.yml diff --git a/README.md b/README.md index 9dcafd0..6afcf84 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,93 @@ # demo-k3s-cluster -A demonstration k3s cluster with a full *arr suite +A demonstration k3s cluster with a full *arr suite. + +## Prerequisites +**Note:** All steps in this guide are performed on Ubuntu 22.04. User experience may differ. + +* Install pip (Assuming python3 is already installed): `sudo apt-get install python3-pip` +* Install the most recent version of Ansible from pip: `pip3 install --user ansible` +* Export the local bin path: `export PATH=~/.local/bin:$PATH` +* Install the required Ansible dependencies using Ansible Galaxy (`ansible-galaxy install -r requirements.yml`) +* 1 or more Raspberry Pis with Raspbian or some other Debian-based OS installed + +Once you have the IP of your nodes, update the [hosts](./inventories/k3s/hosts.yml) file with the IP addresses of your nodes accordingly. + +## Initialize the Cluster +To create the master node for the cluster, run the following command: + +```shell +ansible-playbook -i inventories/k3s --user $USER --ask-pass --ask-vault-pass --tags k3s-cluster setup-k3s.yml +``` + +This will prompt you for +* Your SSH password for the master node +* A vault password (any password you like to encrypt the secrets in the vault) + +You need the vault password because once the master node is created, the cluster token will be fetched from the master node +and encrypted and placed inside the [group_vars](./inventories/k3s/group_vars/all.yml) file. This will allow Ansible +to then automate the creation and configuration of the worker nodes. + +**Note:** Ansible detects whether or not to initialize a master node solely based on the presence of the `k3s_cluster_token` in your +group vars. If you need to run the command again to create a new master node, you must first remove the token from the group vars. + +This command will do the following: +* Install kubectl and helm locally so you can control your cluster +* Install docker on the pi +* Install kubectl on the pi +* Edit your boot options to enable virtualization and enable the required features +* Disable swap memory +* Reboot +* Setup the K3s master node +* Pull down the config to your local machine and make it so you can directly control the cluster via kubectl +* It places this config in `/home//.kube/config` +* It will install NGINX as the ingress controller and LB +* Encrypt the cluster token and add it to your ansible group_vars so you can later create children nodes +* Install the Kubernetes dashboard so you have a nice UI to use + +## Adding a Child Node to the Cluster +To add a child node to the cluster, simply run the same command again once you've created the master node: + +```shell +ansible-playbook -i inventories/k3s --user $USER --ask-pass --ask-vault-pass --tags k3s-cluster setup-k3s.yml +``` + +## Deploy Some Services +This repo comes prepared with some services that can deploy to your cluster to start playing with K3s. These services are all the traditional [Servarrs](https://wiki.servarr.com/). +To deploy all the included services, run the following command: + +```shell +ansible-playbook -i inventories/k3s --user $USER --ask-pass --tags apps setup-k3s.yml +``` + +You can control which services are deployed using the following tags: + +* bazarr +* jellyfin +* lidarr +* nzbget +* overseerr +* plex +* prowlarr +* radarr +* readarr +* sonarr +* tautulli +* transmission + +**Example:** To deploy Plex and Overseerr, you'd use the following command: + +```shell +ansible-playbook -i inventories/k3s --user $USER --ask-pass --tags plex,overseerr setup-k3s.yml +``` + +All the resource definitions are created via [kompose](https://kompose.io/) based on the included `docker-compose.yml` files. They are created simply with `kompose convert`. + +## Additional Features to Deploy (Optional) +If you look at the [cluster configuration role](./roles/k3s/tasks/main.yml), you'll see that I commented out a few things like setting up an nfs persistent volume provisioner and the Longhorn block storage system. You can try those out if you like but there's no guarantee that those will work because it's been a hot minute since I've run the Longhorn setup specifically. The NFS one should work just fine though. + +## Additional Notes +* Some images are only available on ARM64 nodes, and thus some services, such as the [Plex](./roles/plex/files/plex-deployment.yml) service use node + affinity to ensure they're only deployed on ARM64 nodes. This is done to avoid deployment errors when the image isn't available for a specific node's platform. + +## Creator +* [Alex Clarke](https://github.com/Dark-Alex-17) diff --git a/inventories/k3s/group_vars/all.yml b/inventories/k3s/group_vars/all.yml new file mode 100644 index 0000000..3e19bae --- /dev/null +++ b/inventories/k3s/group_vars/all.yml @@ -0,0 +1,4 @@ +k3s_primary_node_ip: 192.168.0.188 +longhorn_version: 1.7.2 +nfs_server_ip: 192.168.0.62 +user_name: "{{ lookup('env', 'USER') }}" diff --git a/inventories/k3s/hosts.yml b/inventories/k3s/hosts.yml new file mode 100644 index 0000000..d5caba8 --- /dev/null +++ b/inventories/k3s/hosts.yml @@ -0,0 +1,10 @@ +all: + children: + kube-primary: + hosts: + 192.168.0.188: # Change me + kube: + hosts: + 192.168.0.59: # Change me too once you have initialized the primary node + children: + kube-primary: diff --git a/requirements.yml b/requirements.yml new file mode 100644 index 0000000..2b76518 --- /dev/null +++ b/requirements.yml @@ -0,0 +1,6 @@ +--- +collections: + - name: community.general + - name: ansible.posix + - name: kubernetes.core + - name: community.kubernetes diff --git a/roles/apps/files/bazarr/bazarr-claim0-persistentvolumeclaim.yaml b/roles/apps/files/bazarr/bazarr-claim0-persistentvolumeclaim.yaml new file mode 100644 index 0000000..addbf9c --- /dev/null +++ b/roles/apps/files/bazarr/bazarr-claim0-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: bazarr-claim0 + name: bazarr-claim0 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/bazarr/bazarr-claim1-persistentvolumeclaim.yaml b/roles/apps/files/bazarr/bazarr-claim1-persistentvolumeclaim.yaml new file mode 100644 index 0000000..efd048b --- /dev/null +++ b/roles/apps/files/bazarr/bazarr-claim1-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: bazarr-claim1 + name: bazarr-claim1 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/bazarr/bazarr-claim2-persistentvolumeclaim.yaml b/roles/apps/files/bazarr/bazarr-claim2-persistentvolumeclaim.yaml new file mode 100644 index 0000000..a08c82a --- /dev/null +++ b/roles/apps/files/bazarr/bazarr-claim2-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: bazarr-claim2 + name: bazarr-claim2 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/bazarr/bazarr-deployment.yaml b/roles/apps/files/bazarr/bazarr-deployment.yaml new file mode 100644 index 0000000..975bdce --- /dev/null +++ b/roles/apps/files/bazarr/bazarr-deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: bazarr + name: bazarr +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: bazarr + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: bazarr + spec: + containers: + - env: + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: TZ + value: Etc/UTC + image: lscr.io/linuxserver/bazarr:latest + name: bazarr + ports: + - containerPort: 6767 + protocol: TCP + volumeMounts: + - mountPath: /config + name: bazarr-claim0 + - mountPath: /movies + name: bazarr-claim1 + - mountPath: /tv + name: bazarr-claim2 + restartPolicy: Always + volumes: + - name: bazarr-claim0 + persistentVolumeClaim: + claimName: bazarr-claim0 + - name: bazarr-claim1 + persistentVolumeClaim: + claimName: bazarr-claim1 + - name: bazarr-claim2 + persistentVolumeClaim: + claimName: bazarr-claim2 diff --git a/roles/apps/files/bazarr/bazarr-service.yaml b/roles/apps/files/bazarr/bazarr-service.yaml new file mode 100644 index 0000000..453cd1a --- /dev/null +++ b/roles/apps/files/bazarr/bazarr-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: bazarr + name: bazarr +spec: + ports: + - name: "6767" + port: 6767 + targetPort: 6767 + selector: + io.kompose.service: bazarr diff --git a/roles/apps/files/bazarr/docker-compose.yml b/roles/apps/files/bazarr/docker-compose.yml new file mode 100644 index 0000000..dfa3b61 --- /dev/null +++ b/roles/apps/files/bazarr/docker-compose.yml @@ -0,0 +1,16 @@ +--- +services: + bazarr: + image: lscr.io/linuxserver/bazarr:latest + container_name: bazarr + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + volumes: + - /opt/bazarr:/config + - /opt/radarr/movies:/movies #optional + - /opt/sonarr/tv:/tv #optional + ports: + - 6767:6767 + restart: always diff --git a/roles/apps/files/jellyfin/docker-compose.yml b/roles/apps/files/jellyfin/docker-compose.yml new file mode 100644 index 0000000..359d5b3 --- /dev/null +++ b/roles/apps/files/jellyfin/docker-compose.yml @@ -0,0 +1,19 @@ +--- +services: + jellyfin: + image: lscr.io/linuxserver/jellyfin:latest + container_name: jellyfin + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + volumes: + - /opt/jellyfin:/config + - /opt/sonarr/tv:/data/tvshows + - /opt/radarr/movies:/data/movies + ports: + - 8096:8096 + - 8920:8920 #optional + - 7359:7359/udp #optional + - 1900:1900/udp #optional + restart: always diff --git a/roles/apps/files/jellyfin/jellyfin-claim0-persistentvolumeclaim.yaml b/roles/apps/files/jellyfin/jellyfin-claim0-persistentvolumeclaim.yaml new file mode 100644 index 0000000..c3087a5 --- /dev/null +++ b/roles/apps/files/jellyfin/jellyfin-claim0-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: jellyfin-claim0 + name: jellyfin-claim0 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/jellyfin/jellyfin-claim1-persistentvolumeclaim.yaml b/roles/apps/files/jellyfin/jellyfin-claim1-persistentvolumeclaim.yaml new file mode 100644 index 0000000..b4ccd45 --- /dev/null +++ b/roles/apps/files/jellyfin/jellyfin-claim1-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: jellyfin-claim1 + name: jellyfin-claim1 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/jellyfin/jellyfin-claim2-persistentvolumeclaim.yaml b/roles/apps/files/jellyfin/jellyfin-claim2-persistentvolumeclaim.yaml new file mode 100644 index 0000000..195a03a --- /dev/null +++ b/roles/apps/files/jellyfin/jellyfin-claim2-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: jellyfin-claim2 + name: jellyfin-claim2 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/jellyfin/jellyfin-deployment.yaml b/roles/apps/files/jellyfin/jellyfin-deployment.yaml new file mode 100644 index 0000000..092867d --- /dev/null +++ b/roles/apps/files/jellyfin/jellyfin-deployment.yaml @@ -0,0 +1,70 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: jellyfin + name: jellyfin +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: jellyfin + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: jellyfin + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - arm64 + containers: + - env: + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: TZ + value: Etc/UTC + image: lscr.io/linuxserver/jellyfin:latest + name: jellyfin + ports: + - containerPort: 8096 + protocol: TCP + - containerPort: 8920 + protocol: TCP + - containerPort: 7359 + protocol: UDP + - containerPort: 1900 + protocol: UDP + volumeMounts: + - mountPath: /config + name: jellyfin-claim0 + - mountPath: /data/tvshows + name: jellyfin-claim1 + - mountPath: /data/movies + name: jellyfin-claim2 + restartPolicy: Always + volumes: + - name: jellyfin-claim0 + persistentVolumeClaim: + claimName: jellyfin-claim0 + - name: jellyfin-claim1 + persistentVolumeClaim: + claimName: jellyfin-claim1 + - name: jellyfin-claim2 + persistentVolumeClaim: + claimName: jellyfin-claim2 diff --git a/roles/apps/files/jellyfin/jellyfin-service.yaml b/roles/apps/files/jellyfin/jellyfin-service.yaml new file mode 100644 index 0000000..da6ebbb --- /dev/null +++ b/roles/apps/files/jellyfin/jellyfin-service.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: jellyfin + name: jellyfin +spec: + ports: + - name: "8096" + port: 8096 + targetPort: 8096 + - name: "8920" + port: 8920 + targetPort: 8920 + - name: "7359" + port: 7359 + protocol: UDP + targetPort: 7359 + - name: "1900" + port: 1900 + protocol: UDP + targetPort: 1900 + selector: + io.kompose.service: jellyfin diff --git a/roles/apps/files/lidarr/docker-compose.yml b/roles/apps/files/lidarr/docker-compose.yml new file mode 100644 index 0000000..83e13a6 --- /dev/null +++ b/roles/apps/files/lidarr/docker-compose.yml @@ -0,0 +1,16 @@ +--- +services: + lidarr: + image: lscr.io/linuxserver/lidarr:latest + container_name: lidarr + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + volumes: + - /opt/lidarr:/config + - /opt/lidarr/music:/music #optional + - /opt/downloads:/downloads #optional + ports: + - 8686:8686 + restart: always diff --git a/roles/apps/files/lidarr/lidarr-claim0-persistentvolumeclaim.yaml b/roles/apps/files/lidarr/lidarr-claim0-persistentvolumeclaim.yaml new file mode 100644 index 0000000..138a6bc --- /dev/null +++ b/roles/apps/files/lidarr/lidarr-claim0-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: lidarr-claim0 + name: lidarr-claim0 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/lidarr/lidarr-claim1-persistentvolumeclaim.yaml b/roles/apps/files/lidarr/lidarr-claim1-persistentvolumeclaim.yaml new file mode 100644 index 0000000..271b382 --- /dev/null +++ b/roles/apps/files/lidarr/lidarr-claim1-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: lidarr-claim1 + name: lidarr-claim1 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/lidarr/lidarr-claim2-persistentvolumeclaim.yaml b/roles/apps/files/lidarr/lidarr-claim2-persistentvolumeclaim.yaml new file mode 100644 index 0000000..9178d81 --- /dev/null +++ b/roles/apps/files/lidarr/lidarr-claim2-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: lidarr-claim2 + name: lidarr-claim2 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/lidarr/lidarr-deployment.yaml b/roles/apps/files/lidarr/lidarr-deployment.yaml new file mode 100644 index 0000000..2d2c63e --- /dev/null +++ b/roles/apps/files/lidarr/lidarr-deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: lidarr + name: lidarr +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: lidarr + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: lidarr + spec: + containers: + - env: + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: TZ + value: Etc/UTC + image: lscr.io/linuxserver/lidarr:latest + name: lidarr + ports: + - containerPort: 8686 + protocol: TCP + volumeMounts: + - mountPath: /config + name: lidarr-claim0 + - mountPath: /music + name: lidarr-claim1 + - mountPath: /downloads + name: lidarr-claim2 + restartPolicy: Always + volumes: + - name: lidarr-claim0 + persistentVolumeClaim: + claimName: lidarr-claim0 + - name: lidarr-claim1 + persistentVolumeClaim: + claimName: lidarr-claim1 + - name: lidarr-claim2 + persistentVolumeClaim: + claimName: lidarr-claim2 diff --git a/roles/apps/files/lidarr/lidarr-service.yaml b/roles/apps/files/lidarr/lidarr-service.yaml new file mode 100644 index 0000000..eea3857 --- /dev/null +++ b/roles/apps/files/lidarr/lidarr-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: lidarr + name: lidarr +spec: + ports: + - name: "8686" + port: 8686 + targetPort: 8686 + selector: + io.kompose.service: lidarr diff --git a/roles/apps/files/nzbget/docker-compose.yml b/roles/apps/files/nzbget/docker-compose.yml new file mode 100644 index 0000000..c6f6875 --- /dev/null +++ b/roles/apps/files/nzbget/docker-compose.yml @@ -0,0 +1,17 @@ +--- +services: + nzbget: + image: lscr.io/linuxserver/nzbget:latest + container_name: nzbget + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + - NZBGET_USER=nzbget #optional + - NZBGET_PASS=tegbzn6789 #optional + volumes: + - /opt/nzbget:/config + - /opt/downloads:/downloads #optional + ports: + - 6789:6789 + restart: always diff --git a/roles/apps/files/nzbget/nzbget-claim0-persistentvolumeclaim.yaml b/roles/apps/files/nzbget/nzbget-claim0-persistentvolumeclaim.yaml new file mode 100644 index 0000000..1067439 --- /dev/null +++ b/roles/apps/files/nzbget/nzbget-claim0-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: nzbget-claim0 + name: nzbget-claim0 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/nzbget/nzbget-claim1-persistentvolumeclaim.yaml b/roles/apps/files/nzbget/nzbget-claim1-persistentvolumeclaim.yaml new file mode 100644 index 0000000..1bb7e58 --- /dev/null +++ b/roles/apps/files/nzbget/nzbget-claim1-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: nzbget-claim1 + name: nzbget-claim1 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/nzbget/nzbget-deployment.yaml b/roles/apps/files/nzbget/nzbget-deployment.yaml new file mode 100644 index 0000000..c64855a --- /dev/null +++ b/roles/apps/files/nzbget/nzbget-deployment.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: nzbget + name: nzbget +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: nzbget + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: nzbget + spec: + containers: + - env: + - name: NZBGET_PASS + value: tegbzn6789 + - name: NZBGET_USER + value: nzbget + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: TZ + value: Etc/UTC + image: lscr.io/linuxserver/nzbget:latest + name: nzbget + ports: + - containerPort: 6789 + protocol: TCP + volumeMounts: + - mountPath: /config + name: nzbget-claim0 + - mountPath: /downloads + name: nzbget-claim1 + restartPolicy: Always + volumes: + - name: nzbget-claim0 + persistentVolumeClaim: + claimName: nzbget-claim0 + - name: nzbget-claim1 + persistentVolumeClaim: + claimName: nzbget-claim1 diff --git a/roles/apps/files/nzbget/nzbget-service.yaml b/roles/apps/files/nzbget/nzbget-service.yaml new file mode 100644 index 0000000..38512a1 --- /dev/null +++ b/roles/apps/files/nzbget/nzbget-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: nzbget + name: nzbget +spec: + ports: + - name: "6789" + port: 6789 + targetPort: 6789 + selector: + io.kompose.service: nzbget diff --git a/roles/apps/files/overseerr/docker-compose.yml b/roles/apps/files/overseerr/docker-compose.yml new file mode 100644 index 0000000..e7e2b10 --- /dev/null +++ b/roles/apps/files/overseerr/docker-compose.yml @@ -0,0 +1,14 @@ +--- +services: + overseerr: + image: lscr.io/linuxserver/overseerr:latest + container_name: overseerr + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + volumes: + - /opt/overseerr:/config + ports: + - 5055:5055 + restart: always diff --git a/roles/apps/files/overseerr/overseerr-claim0-persistentvolumeclaim.yaml b/roles/apps/files/overseerr/overseerr-claim0-persistentvolumeclaim.yaml new file mode 100644 index 0000000..d0dcdfa --- /dev/null +++ b/roles/apps/files/overseerr/overseerr-claim0-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: overseerr-claim0 + name: overseerr-claim0 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/overseerr/overseerr-deployment.yaml b/roles/apps/files/overseerr/overseerr-deployment.yaml new file mode 100644 index 0000000..94b1046 --- /dev/null +++ b/roles/apps/files/overseerr/overseerr-deployment.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: overseerr + name: overseerr +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: overseerr + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: overseerr + spec: + containers: + - env: + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: TZ + value: Etc/UTC + image: lscr.io/linuxserver/overseerr:latest + name: overseerr + ports: + - containerPort: 5055 + protocol: TCP + volumeMounts: + - mountPath: /config + name: overseerr-claim0 + restartPolicy: Always + volumes: + - name: overseerr-claim0 + persistentVolumeClaim: + claimName: overseerr-claim0 diff --git a/roles/apps/files/overseerr/overseerr-service.yaml b/roles/apps/files/overseerr/overseerr-service.yaml new file mode 100644 index 0000000..e1f22ee --- /dev/null +++ b/roles/apps/files/overseerr/overseerr-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: overseerr + name: overseerr +spec: + ports: + - name: "5055" + port: 5055 + targetPort: 5055 + selector: + io.kompose.service: overseerr diff --git a/roles/apps/files/plex/docker-compose.yml b/roles/apps/files/plex/docker-compose.yml new file mode 100644 index 0000000..fe2dc02 --- /dev/null +++ b/roles/apps/files/plex/docker-compose.yml @@ -0,0 +1,16 @@ +--- +services: + plex: + image: lscr.io/linuxserver/plex:latest + container_name: plex + network_mode: host + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + - VERSION=docker + volumes: + - /opt/plex:/config + - /opt/sonarr/tv:/tv + - /opt/radarr/movies:/movies + restart: always diff --git a/roles/apps/files/plex/plex-claim0-persistentvolumeclaim.yaml b/roles/apps/files/plex/plex-claim0-persistentvolumeclaim.yaml new file mode 100644 index 0000000..2fadbda --- /dev/null +++ b/roles/apps/files/plex/plex-claim0-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: plex-claim0 + name: plex-claim0 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/plex/plex-claim1-persistentvolumeclaim.yaml b/roles/apps/files/plex/plex-claim1-persistentvolumeclaim.yaml new file mode 100644 index 0000000..3372b6e --- /dev/null +++ b/roles/apps/files/plex/plex-claim1-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: plex-claim1 + name: plex-claim1 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/plex/plex-claim2-persistentvolumeclaim.yaml b/roles/apps/files/plex/plex-claim2-persistentvolumeclaim.yaml new file mode 100644 index 0000000..1eeb25f --- /dev/null +++ b/roles/apps/files/plex/plex-claim2-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: plex-claim2 + name: plex-claim2 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/plex/plex-deployment.yaml b/roles/apps/files/plex/plex-deployment.yaml new file mode 100644 index 0000000..78b93d0 --- /dev/null +++ b/roles/apps/files/plex/plex-deployment.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: plex + name: plex +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: plex + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: plex + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - arm64 + containers: + - env: + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: TZ + value: Etc/UTC + - name: VERSION + value: docker + image: lscr.io/linuxserver/plex + name: plex + volumeMounts: + - mountPath: /config + name: plex-claim0 + - mountPath: /tv + name: plex-claim1 + - mountPath: /movies + name: plex-claim2 + restartPolicy: Always + volumes: + - name: plex-claim0 + persistentVolumeClaim: + claimName: plex-claim0 + - name: plex-claim1 + persistentVolumeClaim: + claimName: plex-claim1 + - name: plex-claim2 + persistentVolumeClaim: + claimName: plex-claim2 diff --git a/roles/apps/files/prowlarr/docker-compose.yml b/roles/apps/files/prowlarr/docker-compose.yml new file mode 100644 index 0000000..fcc4a64 --- /dev/null +++ b/roles/apps/files/prowlarr/docker-compose.yml @@ -0,0 +1,14 @@ +--- +services: + prowlarr: + image: lscr.io/linuxserver/prowlarr:latest + container_name: prowlarr + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + volumes: + - /opt/prowlarr:/config + ports: + - 9696:9696 + restart: always diff --git a/roles/apps/files/prowlarr/prowlarr-claim0-persistentvolumeclaim.yaml b/roles/apps/files/prowlarr/prowlarr-claim0-persistentvolumeclaim.yaml new file mode 100644 index 0000000..f21750e --- /dev/null +++ b/roles/apps/files/prowlarr/prowlarr-claim0-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: prowlarr-claim0 + name: prowlarr-claim0 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/prowlarr/prowlarr-deployment.yaml b/roles/apps/files/prowlarr/prowlarr-deployment.yaml new file mode 100644 index 0000000..38c8a59 --- /dev/null +++ b/roles/apps/files/prowlarr/prowlarr-deployment.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: prowlarr + name: prowlarr +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: prowlarr + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: prowlarr + spec: + containers: + - env: + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: TZ + value: Etc/UTC + image: lscr.io/linuxserver/prowlarr:latest + name: prowlarr + ports: + - containerPort: 9696 + protocol: TCP + volumeMounts: + - mountPath: /config + name: prowlarr-claim0 + restartPolicy: Always + volumes: + - name: prowlarr-claim0 + persistentVolumeClaim: + claimName: prowlarr-claim0 diff --git a/roles/apps/files/prowlarr/prowlarr-service.yaml b/roles/apps/files/prowlarr/prowlarr-service.yaml new file mode 100644 index 0000000..0e716f7 --- /dev/null +++ b/roles/apps/files/prowlarr/prowlarr-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: prowlarr + name: prowlarr +spec: + ports: + - name: "9696" + port: 9696 + targetPort: 9696 + selector: + io.kompose.service: prowlarr diff --git a/roles/apps/files/radarr/docker-compose.yml b/roles/apps/files/radarr/docker-compose.yml new file mode 100644 index 0000000..0add8f5 --- /dev/null +++ b/roles/apps/files/radarr/docker-compose.yml @@ -0,0 +1,16 @@ +--- +services: + radarr: + image: lscr.io/linuxserver/radarr:latest + container_name: radarr + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + volumes: + - /home/atusa/testing/k3s-test/radarr:/config + # - /path/to/movies:/movies #optional + # - /path/to/download-client-downloads:/downloads #optional + ports: + - 7878:7878 + restart: unless-stopped diff --git a/roles/apps/files/radarr/radarr-claim0-persistentvolumeclaim.yaml b/roles/apps/files/radarr/radarr-claim0-persistentvolumeclaim.yaml new file mode 100644 index 0000000..9883edc --- /dev/null +++ b/roles/apps/files/radarr/radarr-claim0-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: radarr-claim0 + name: radarr-claim0 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/radarr/radarr-deployment.yaml b/roles/apps/files/radarr/radarr-deployment.yaml new file mode 100644 index 0000000..665ec1e --- /dev/null +++ b/roles/apps/files/radarr/radarr-deployment.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: radarr + name: radarr +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: radarr + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: radarr + spec: + containers: + - env: + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: TZ + value: Etc/UTC + image: lscr.io/linuxserver/radarr:latest + name: radarr + ports: + - containerPort: 7878 + protocol: TCP + volumeMounts: + - mountPath: /config + name: radarr-claim0 + restartPolicy: Always + volumes: + - name: radarr-claim0 + persistentVolumeClaim: + claimName: radarr-claim0 diff --git a/roles/apps/files/radarr/radarr-service.yaml b/roles/apps/files/radarr/radarr-service.yaml new file mode 100644 index 0000000..e162980 --- /dev/null +++ b/roles/apps/files/radarr/radarr-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: radarr + name: radarr +spec: + ports: + - name: "7878" + port: 7878 + targetPort: 7878 + selector: + io.kompose.service: radarr diff --git a/roles/apps/files/readarr/docker-compose.yml b/roles/apps/files/readarr/docker-compose.yml new file mode 100644 index 0000000..d0d4ebf --- /dev/null +++ b/roles/apps/files/readarr/docker-compose.yml @@ -0,0 +1,17 @@ +--- +version: "2.1" +services: + readarr: + image: lscr.io/linuxserver/readarr:develop + container_name: readarr + environment: + - PUID=1000 + - PGID=1000 + - TZ=Europe/London + volumes: + - /opt/readarr:/config + - /opt/readarr/books:/books #optional + - /opt/downloads:/downloads #optional + ports: + - 8787:8787 + restart: always diff --git a/roles/apps/files/readarr/readarr-claim0-persistentvolumeclaim.yaml b/roles/apps/files/readarr/readarr-claim0-persistentvolumeclaim.yaml new file mode 100644 index 0000000..2e96c99 --- /dev/null +++ b/roles/apps/files/readarr/readarr-claim0-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: readarr-claim0 + name: readarr-claim0 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/readarr/readarr-claim1-persistentvolumeclaim.yaml b/roles/apps/files/readarr/readarr-claim1-persistentvolumeclaim.yaml new file mode 100644 index 0000000..9fbc6af --- /dev/null +++ b/roles/apps/files/readarr/readarr-claim1-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: readarr-claim1 + name: readarr-claim1 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/readarr/readarr-claim2-persistentvolumeclaim.yaml b/roles/apps/files/readarr/readarr-claim2-persistentvolumeclaim.yaml new file mode 100644 index 0000000..16c9641 --- /dev/null +++ b/roles/apps/files/readarr/readarr-claim2-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: readarr-claim2 + name: readarr-claim2 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/readarr/readarr-deployment.yaml b/roles/apps/files/readarr/readarr-deployment.yaml new file mode 100644 index 0000000..38cd6f8 --- /dev/null +++ b/roles/apps/files/readarr/readarr-deployment.yaml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: readarr + name: readarr +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: readarr + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: readarr + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - arm64 + containers: + - env: + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: TZ + value: Europe/London + image: lscr.io/linuxserver/readarr:nightly + name: readarr + ports: + - containerPort: 8787 + protocol: TCP + volumeMounts: + - mountPath: /config + name: readarr-claim0 + - mountPath: /books + name: readarr-claim1 + - mountPath: /downloads + name: readarr-claim2 + restartPolicy: Always + volumes: + - name: readarr-claim0 + persistentVolumeClaim: + claimName: readarr-claim0 + - name: readarr-claim1 + persistentVolumeClaim: + claimName: readarr-claim1 + - name: readarr-claim2 + persistentVolumeClaim: + claimName: readarr-claim2 diff --git a/roles/apps/files/readarr/readarr-service.yaml b/roles/apps/files/readarr/readarr-service.yaml new file mode 100644 index 0000000..492bf71 --- /dev/null +++ b/roles/apps/files/readarr/readarr-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: readarr + name: readarr +spec: + ports: + - name: "8787" + port: 8787 + targetPort: 8787 + selector: + io.kompose.service: readarr diff --git a/roles/apps/files/sonarr/docker-compose.yml b/roles/apps/files/sonarr/docker-compose.yml new file mode 100644 index 0000000..93edf93 --- /dev/null +++ b/roles/apps/files/sonarr/docker-compose.yml @@ -0,0 +1,16 @@ +--- +services: + sonarr: + image: lscr.io/linuxserver/sonarr:latest + container_name: sonarr + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + volumes: + - /opt/sonarr:/config + - /opt/sonarr/tv:/tv #optional + - /opt/downloads:/downloads #optional + ports: + - 8989:8989 + restart: always diff --git a/roles/apps/files/sonarr/sonarr-claim0-persistentvolumeclaim.yaml b/roles/apps/files/sonarr/sonarr-claim0-persistentvolumeclaim.yaml new file mode 100644 index 0000000..be2f37e --- /dev/null +++ b/roles/apps/files/sonarr/sonarr-claim0-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: sonarr-claim0 + name: sonarr-claim0 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/sonarr/sonarr-claim1-persistentvolumeclaim.yaml b/roles/apps/files/sonarr/sonarr-claim1-persistentvolumeclaim.yaml new file mode 100644 index 0000000..ae385de --- /dev/null +++ b/roles/apps/files/sonarr/sonarr-claim1-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: sonarr-claim1 + name: sonarr-claim1 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/sonarr/sonarr-claim2-persistentvolumeclaim.yaml b/roles/apps/files/sonarr/sonarr-claim2-persistentvolumeclaim.yaml new file mode 100644 index 0000000..3500e5b --- /dev/null +++ b/roles/apps/files/sonarr/sonarr-claim2-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: sonarr-claim2 + name: sonarr-claim2 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/sonarr/sonarr-deployment.yaml b/roles/apps/files/sonarr/sonarr-deployment.yaml new file mode 100644 index 0000000..1e65faa --- /dev/null +++ b/roles/apps/files/sonarr/sonarr-deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: sonarr + name: sonarr +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: sonarr + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: sonarr + spec: + containers: + - env: + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: TZ + value: Etc/UTC + image: lscr.io/linuxserver/sonarr:latest + name: sonarr + ports: + - containerPort: 8989 + protocol: TCP + volumeMounts: + - mountPath: /config + name: sonarr-claim0 + - mountPath: /tv + name: sonarr-claim1 + - mountPath: /downloads + name: sonarr-claim2 + restartPolicy: Always + volumes: + - name: sonarr-claim0 + persistentVolumeClaim: + claimName: sonarr-claim0 + - name: sonarr-claim1 + persistentVolumeClaim: + claimName: sonarr-claim1 + - name: sonarr-claim2 + persistentVolumeClaim: + claimName: sonarr-claim2 diff --git a/roles/apps/files/sonarr/sonarr-service.yaml b/roles/apps/files/sonarr/sonarr-service.yaml new file mode 100644 index 0000000..bbb3554 --- /dev/null +++ b/roles/apps/files/sonarr/sonarr-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: sonarr + name: sonarr +spec: + ports: + - name: "8989" + port: 8989 + targetPort: 8989 + selector: + io.kompose.service: sonarr diff --git a/roles/apps/files/tautulli/docker-compose.yml b/roles/apps/files/tautulli/docker-compose.yml new file mode 100644 index 0000000..e0b1a82 --- /dev/null +++ b/roles/apps/files/tautulli/docker-compose.yml @@ -0,0 +1,14 @@ +--- +services: + tautulli: + image: lscr.io/linuxserver/tautulli:latest + container_name: tautulli + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + volumes: + - /opt/tautulli:/config + ports: + - 8181:8181 + restart: always diff --git a/roles/apps/files/tautulli/tautulli-claim0-persistentvolumeclaim.yaml b/roles/apps/files/tautulli/tautulli-claim0-persistentvolumeclaim.yaml new file mode 100644 index 0000000..d378429 --- /dev/null +++ b/roles/apps/files/tautulli/tautulli-claim0-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: tautulli-claim0 + name: tautulli-claim0 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/tautulli/tautulli-deployment.yaml b/roles/apps/files/tautulli/tautulli-deployment.yaml new file mode 100644 index 0000000..d6fbe8c --- /dev/null +++ b/roles/apps/files/tautulli/tautulli-deployment.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: tautulli + name: tautulli +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: tautulli + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: tautulli + spec: + containers: + - env: + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: TZ + value: Etc/UTC + image: lscr.io/linuxserver/tautulli:latest + name: tautulli + ports: + - containerPort: 8181 + protocol: TCP + volumeMounts: + - mountPath: /config + name: tautulli-claim0 + restartPolicy: Always + volumes: + - name: tautulli-claim0 + persistentVolumeClaim: + claimName: tautulli-claim0 diff --git a/roles/apps/files/tautulli/tautulli-service.yaml b/roles/apps/files/tautulli/tautulli-service.yaml new file mode 100644 index 0000000..9607ba0 --- /dev/null +++ b/roles/apps/files/tautulli/tautulli-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: tautulli + name: tautulli +spec: + ports: + - name: "8181" + port: 8181 + targetPort: 8181 + selector: + io.kompose.service: tautulli diff --git a/roles/apps/files/transmission/docker-compose.yml b/roles/apps/files/transmission/docker-compose.yml new file mode 100644 index 0000000..90fc380 --- /dev/null +++ b/roles/apps/files/transmission/docker-compose.yml @@ -0,0 +1,23 @@ +--- +services: + transmission: + image: lscr.io/linuxserver/transmission:latest + container_name: transmission + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + - TRANSMISSION_WEB_HOME= #optional + - USER= #optional + - PASS= #optional + - WHITELIST= #optional + - PEERPORT= #optional + - HOST_WHITELIST= #optional + volumes: + - /opt/transmission:/config + - /opt/downloads:/downloads #optional + ports: + - 9091:9091 + - 51413:51413 + - 51413:51413/udp + restart: always diff --git a/roles/apps/files/transmission/transmission-claim0-persistentvolumeclaim.yaml b/roles/apps/files/transmission/transmission-claim0-persistentvolumeclaim.yaml new file mode 100644 index 0000000..2a137ce --- /dev/null +++ b/roles/apps/files/transmission/transmission-claim0-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: transmission-claim0 + name: transmission-claim0 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/transmission/transmission-claim1-persistentvolumeclaim.yaml b/roles/apps/files/transmission/transmission-claim1-persistentvolumeclaim.yaml new file mode 100644 index 0000000..575adbd --- /dev/null +++ b/roles/apps/files/transmission/transmission-claim1-persistentvolumeclaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + io.kompose.service: transmission-claim1 + name: transmission-claim1 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi diff --git a/roles/apps/files/transmission/transmission-deployment.yaml b/roles/apps/files/transmission/transmission-deployment.yaml new file mode 100644 index 0000000..1fe6134 --- /dev/null +++ b/roles/apps/files/transmission/transmission-deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: transmission + name: transmission +spec: + replicas: 1 + selector: + matchLabels: + io.kompose.service: transmission + strategy: + type: Recreate + template: + metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: transmission + spec: + containers: + - env: + - name: HOST_WHITELIST + - name: PASS + - name: PEERPORT + - name: PGID + value: "1000" + - name: PUID + value: "1000" + - name: TRANSMISSION_WEB_HOME + - name: TZ + value: Etc/UTC + - name: USER + - name: WHITELIST + image: lscr.io/linuxserver/transmission:latest + name: transmission + ports: + - containerPort: 9091 + protocol: TCP + - containerPort: 51413 + protocol: TCP + - containerPort: 51413 + protocol: UDP + volumeMounts: + - mountPath: /config + name: transmission-claim0 + - mountPath: /downloads + name: transmission-claim1 + restartPolicy: Always + volumes: + - name: transmission-claim0 + persistentVolumeClaim: + claimName: transmission-claim0 + - name: transmission-claim1 + persistentVolumeClaim: + claimName: transmission-claim1 diff --git a/roles/apps/files/transmission/transmission-service.yaml b/roles/apps/files/transmission/transmission-service.yaml new file mode 100644 index 0000000..ae2a467 --- /dev/null +++ b/roles/apps/files/transmission/transmission-service.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + kompose.cmd: kompose convert + kompose.version: 1.34.0 (cbf2835db) + labels: + io.kompose.service: transmission + name: transmission +spec: + ports: + - name: "9091" + port: 9091 + targetPort: 9091 + - name: "51413" + port: 51413 + targetPort: 51413 + - name: 51413-udp + port: 51413 + protocol: UDP + targetPort: 51413 + selector: + io.kompose.service: transmission diff --git a/roles/apps/tasks/bazarr.yml b/roles/apps/tasks/bazarr.yml new file mode 100644 index 0000000..3b61db7 --- /dev/null +++ b/roles/apps/tasks/bazarr.yml @@ -0,0 +1,34 @@ +- name: Create Bazarr PVC 1 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'bazarr/bazarr-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Bazarr PVC 2 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'bazarr/bazarr-claim1-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Bazarr PVC 3 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'bazarr/bazarr-claim2-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Bazarr deployment + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'bazarr/bazarr-deployment.yaml') | from_yaml }}" + +- name: Create Bazarr service + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'bazarr/bazarr-service.yaml') | from_yaml }}" diff --git a/roles/apps/tasks/jellyfin.yml b/roles/apps/tasks/jellyfin.yml new file mode 100644 index 0000000..064be68 --- /dev/null +++ b/roles/apps/tasks/jellyfin.yml @@ -0,0 +1,34 @@ +- name: Create Jellyfin PVC 1 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'jellyfin/jellyfin-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Jellyfin PVC 2 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'jellyfin/jellyfin-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Jellyfin PVC 3 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'jellyfin/jellyfin-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Jellyfin deployment + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'jellyfin/jellyfin-deployment.yaml') | from_yaml }}" + +- name: Create Jellyfin service + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'jellyfin/jellyfin-service.yaml') | from_yaml }}" diff --git a/roles/apps/tasks/lidarr.yml b/roles/apps/tasks/lidarr.yml new file mode 100644 index 0000000..277d84f --- /dev/null +++ b/roles/apps/tasks/lidarr.yml @@ -0,0 +1,34 @@ +- name: Create Lidarr PVC 1 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'lidarr/lidarr-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Lidarr PVC 2 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'lidarr/lidarr-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Lidarr PVC 3 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'lidarr/lidarr-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Lidarr deployment + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'lidarr/lidarr-deployment.yaml') | from_yaml }}" + +- name: Create Lidarr service + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'lidarr/lidarr-service.yaml') | from_yaml }}" diff --git a/roles/apps/tasks/main.yml b/roles/apps/tasks/main.yml new file mode 100644 index 0000000..e97ff63 --- /dev/null +++ b/roles/apps/tasks/main.yml @@ -0,0 +1,24 @@ +- import_tasks: bazarr.yml + tags: bazarr +- import_tasks: jellyfin.yml + tags: jellyfin +- import_tasks: lidarr.yml + tags: lidarr +- import_tasks: nzbget.yml + tags: nzbget +- import_tasks: overseerr.yml + tags: overseerr +- import_tasks: plex.yml + tags: plex +- import_tasks: prowlarr.yml + tags: prowlarr +- import_tasks: radarr.yml + tags: radarr +- import_tasks: readarr.yml + tags: readarr +- import_tasks: sonarr.yml + tags: sonarr +- import_tasks: tautulli.yml + tags: tautulli +- import_tasks: transmission.yml + tags: transmission diff --git a/roles/apps/tasks/nzbget.yml b/roles/apps/tasks/nzbget.yml new file mode 100644 index 0000000..531f451 --- /dev/null +++ b/roles/apps/tasks/nzbget.yml @@ -0,0 +1,27 @@ +- name: Create NZBGet PVC 1 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'nzbget/nzbget-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create NZBGet PVC 2 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'nzbget/nzbget-claim1-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create NZBGet deployment + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'nzbget/nzbget-deployment.yaml') | from_yaml }}" + +- name: Create NZBGet service + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'nzbget/nzbget-service.yaml') | from_yaml }}" diff --git a/roles/apps/tasks/overseerr.yml b/roles/apps/tasks/overseerr.yml new file mode 100644 index 0000000..f3eadfe --- /dev/null +++ b/roles/apps/tasks/overseerr.yml @@ -0,0 +1,20 @@ +- name: Create Overseerr PVC + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'overseerr/overseerr-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Overseerr deployment + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'overseerr/overseerr-deployment.yaml') | from_yaml }}" + +- name: Create Overseerr service + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'overseerr/overseerr-service.yaml') | from_yaml }}" diff --git a/roles/apps/tasks/plex.yml b/roles/apps/tasks/plex.yml new file mode 100644 index 0000000..e9a81b5 --- /dev/null +++ b/roles/apps/tasks/plex.yml @@ -0,0 +1,27 @@ +- name: Create Plex PVC 1 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'plex/plex-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Plex PVC 2 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'plex/plex-claim1-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Plex PVC 3 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'plex/plex-claim2-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Plex deployment + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'plex/plex-deployment.yaml') | from_yaml }}" diff --git a/roles/apps/tasks/prowlarr.yml b/roles/apps/tasks/prowlarr.yml new file mode 100644 index 0000000..306b62e --- /dev/null +++ b/roles/apps/tasks/prowlarr.yml @@ -0,0 +1,20 @@ +- name: Create Prowlarr PVC + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'prowlarr/prowlarr-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Prowlarr deployment + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'prowlarr/prowlarr-deployment.yaml') | from_yaml }}" + +- name: Create Prowlarr service + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'prowlarr/prowlarr-service.yaml') | from_yaml }}" diff --git a/roles/apps/tasks/radarr.yml b/roles/apps/tasks/radarr.yml new file mode 100644 index 0000000..e0b2fe1 --- /dev/null +++ b/roles/apps/tasks/radarr.yml @@ -0,0 +1,20 @@ +- name: Create Radarr PVC + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'radarr/radarr-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Radarr deployment + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'radarr/radarr-deployment.yaml') | from_yaml }}" + +- name: Create Radarr service + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'radarr/radarr-service.yaml') | from_yaml }}" diff --git a/roles/apps/tasks/readarr.yml b/roles/apps/tasks/readarr.yml new file mode 100644 index 0000000..3e5a6a7 --- /dev/null +++ b/roles/apps/tasks/readarr.yml @@ -0,0 +1,34 @@ +- name: Create Readarr PVC 1 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'readarr/readarr-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Readarr PVC 2 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'readarr/readarr-claim1-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Readarr PVC 3 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'readarr/readarr-claim2-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Readarr deployment + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'readarr/readarr-deployment.yaml') | from_yaml }}" + +- name: Create Readarr service + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'readarr/readarr-service.yaml') | from_yaml }}" diff --git a/roles/apps/tasks/sonarr.yml b/roles/apps/tasks/sonarr.yml new file mode 100644 index 0000000..2026510 --- /dev/null +++ b/roles/apps/tasks/sonarr.yml @@ -0,0 +1,34 @@ +- name: Create Sonarr PVC 1 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'sonarr/sonarr-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Sonarr PVC 2 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'sonarr/sonarr-claim1-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Sonarr PVC 3 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'sonarr/sonarr-claim2-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Sonarr deployment + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'sonarr/sonarr-deployment.yaml') | from_yaml }}" + +- name: Create Sonarr service + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'sonarr/sonarr-service.yaml') | from_yaml }}" diff --git a/roles/apps/tasks/tautulli.yml b/roles/apps/tasks/tautulli.yml new file mode 100644 index 0000000..8986f2e --- /dev/null +++ b/roles/apps/tasks/tautulli.yml @@ -0,0 +1,20 @@ +- name: Create Tautulli PVC + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'tautulli/tautulli-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Tautulli deployment + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'tautulli/tautulli-deployment.yaml') | from_yaml }}" + +- name: Create Tautulli service + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'tautulli/tautulli-service.yaml') | from_yaml }}" diff --git a/roles/apps/tasks/transmission.yml b/roles/apps/tasks/transmission.yml new file mode 100644 index 0000000..63a4f21 --- /dev/null +++ b/roles/apps/tasks/transmission.yml @@ -0,0 +1,27 @@ +- name: Create Transmission PVC 1 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'transmission/transmission-claim0-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Transmission PVC 2 + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'transmission/transmission-claim1-persistentvolumeclaim.yaml') | from_yaml }}" + +- name: Create Transmission deployment + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'transmission/transmission-deployment.yaml') | from_yaml }}" + +- name: Create Transmission service + kubernetes.core.k8s: + kubeconfig: /etc/rancher/k3s/k3s.yaml + state: present + namespace: default + definition: "{{ lookup('file', 'transmission/transmission-service.yaml') | from_yaml }}" diff --git a/roles/k3s/files/10-flannel.conf b/roles/k3s/files/10-flannel.conf new file mode 100644 index 0000000..35c9166 --- /dev/null +++ b/roles/k3s/files/10-flannel.conf @@ -0,0 +1,10 @@ +{ + "name": "k8s-flannel.network", + "type": "flannel", + "delegate": { + "hairpin-mode": true, + "force-address": "10.42.0.0/16", + "master-plugin": true + }, + "isDefaultGateway": true +} diff --git a/roles/k3s/files/ingress-controller-load-balancer.yaml b/roles/k3s/files/ingress-controller-load-balancer.yaml new file mode 100644 index 0000000..b93bb01 --- /dev/null +++ b/roles/k3s/files/ingress-controller-load-balancer.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx-controller-loadbalancer + namespace: ingress-nginx +spec: + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + type: LoadBalancer diff --git a/roles/k3s/files/value.yaml b/roles/k3s/files/value.yaml new file mode 100644 index 0000000..cfb80b8 --- /dev/null +++ b/roles/k3s/files/value.yaml @@ -0,0 +1,4 @@ +csi: + kubeletRootDir: "/var/lib/kubelet" +defaultSettings: + diskType: "flesystem" diff --git a/roles/k3s/tasks/main.yml b/roles/k3s/tasks/main.yml new file mode 100644 index 0000000..7bf8b7c --- /dev/null +++ b/roles/k3s/tasks/main.yml @@ -0,0 +1,234 @@ +- name: Configure local prerequisites + block: + - name: Add Kubernete's public signing key + apt_key: + url: https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key + keyring: /usr/share/keyrings/kubernetes-apt-keyring.gpg + delegate_to: 127.0.0.1 + become: true + + - name: Set up apt repositories + apt_repository: + repo: "deb [signed-by=/usr/share/keyrings/kubernetes-apt-keyring.gpg trusted=yes] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /" + delegate_to: 127.0.0.1 + become: true + + - name: Install the local CLI tools for Kubernetes + apt: + update_cache: yes + name: + - kubectl + - helm + - python3-kubernetes + delegate_to: 127.0.0.1 + become: true + +- name: Install Docker + apt: + update_cache: yes + name: docker.io + +- name: Install btop for process monitoring + apt: + update_cache: yes + name: btop + +- name: Install python3-kubernetes + apt: + update_cache: yes + name: python3-kubernetes + +- name: Check if cmdline has been updated + shell: + cmd: cat /boot/firmware/cmdline.txt | grep -q cgroup_enable + changed_when: no + ignore_errors: true + register: cmdline_status + +- name: Enable the required container features + shell: + cmd: sudo sed -i '$ s/$/ cgroup_enable=cpuset cgroup_enable=memory cgroup_memory=1/' /boot/firmware/cmdline.txt + when: cmdline_status.rc != 0 + +- name: Disable swap + shell: + cmd: swapoff -a + +- name: Permanently disable swap memory + replace: + path: /etc/dphys-swapfile + regexp: '^CONF_SWAPSIZE=\d+' + replace: "CONF_SWAPSIZE=0" + +- name: Reboot system + reboot: + reboot_timeout: 3600 + when: cmdline_status.rc != 0 + +- name: Setup K3s on the primary node + block: + - name: Create the /opt/k3s directory + file: + path: /opt/k3s + state: directory + + - name: Download and install K3s + shell: + cmd: curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --disable=traefik --flannel-backend=host-gw --tls-san={{ k3s_primary_node_ip }} --bind-address={{ k3s_primary_node_ip }} --advertise-address={{ k3s_primary_node_ip }} --node-ip={{ k3s_primary_node_ip }} --cluster-init" sh -s - + + - name: Fetch the access token + shell: + cmd: cat /var/lib/rancher/k3s/server/node-token + register: cluster_token + + - name: Install the nginx Ingress controller + shell: + cmd: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.11.3/deploy/static/provider/baremetal/deploy.yaml + + - name: Copy over the ingress-controller-load-balancer service + copy: + src: ingress-controller-load-balancer.yaml + dest: /opt/k3s/ingress-controller-load-balancer.yaml + + - name: Create the load balancer + shell: + cmd: kubectl apply -f /opt/k3s/ingress-controller-load-balancer.yaml + + - name: Fetch the kubeconfig for the user + shell: + cmd: cat /etc/rancher/k3s/k3s.yaml + register: kube_config + + - name: Encrypt this cluster token and add it to the group vars for kube + debug: + msg: "{{ cluster_token.stdout }}" + + - name: Downloading the k3s config file + fetch: + src: /etc/rancher/k3s/k3s.yaml + dest: "/home/{{ user_name }}/.kube/config" + flat: true + group: "{{ user_name }}" + owner: "{{ user_name }}" + + - name: Updating the k3s config file + replace: + path: /etc/rancher/k3s/k3s.yaml + regexp: "127.0.0.1" + replace: "{{ k3s_primary_node_ip }}" + + - name: Add Helm repository for the kubernetes-dashboard + kubernetes.core.helm_repository: + name: kubernetes-dashboard + repo_url: https://kubernetes.github.io/dashboard/ + delegate_to: 127.0.0.1 + + - name: Install the Kubernetes Dashboard + kubernetes.core.helm: + name: kubernetes-dashboard + chart_ref: kubernetes-dashboard/kubernetes-dashboard + release_namespace: kubernetes-dashboard + create_namespace: true + kubeconfig: "/home/{{ user_name }}/.kube/config" + delegate_to: 127.0.0.1 + + # - name: Add Helm repository for NFS provisioner + # kubernetes.core.helm_repository: + # name: nfs-subdir-external-provisioner + # repo_url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/ + # delegate_to: 127.0.0.1 + + # - name: Install NFS subdir external provisioner + # kubernetes.core.helm: + # name: nfs + # chart_ref: nfs-subdir-external-provisioner/nfs-subdir-external-provisioner + # release_namespace: kube-system + # create_namespace: true + # values: + # nfs: + # server: "{{ nfs_server_ip }}" + # path: "/volume1/nas" + # kubeconfig: "/home/{{ user_name }}/.kube/config" + # delegate_to: 127.0.0.1 + + # - name: Install the longhorn block storage system iscsi + # shell: + # cmd: "kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v{{ longhorn_version }}/deploy/prerequisite/longhorn-iscsi-installation.yaml" + + # - name: Install the longhorn block storage system NFSv4 + # shell: + # cmd: "kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.7.2/deploy/prerequisite/longhorn-nfs-installation.yaml" + + # - name: Add Helm repository for longhorn + # kubernetes.core.helm_repository: + # name: longhorn + # repo_url: https://charts.longhorn.io + # delegate_to: 127.0.0.1 + + # - name: Install Longhorn using Helm + # community.kubernetes.helm: + # name: longhorn + # chart_ref: longhorn/longhorn + # values_file: "{{ role_path }}/files/value.yaml" + # namespace: longhorn-system + # create_namespace: true + # state: present + + # - name: Create PersistentVolume for full NFS access + # kubernetes.core.k8s: + # state: present + # kubeconfig: /etc/rancher/k3s/k3s.yaml + # definition: + # apiVersion: v1 + # kind: PersistentVolume + # metadata: + # name: nfs-pv + # namespace: default + # spec: + # capacity: + # storage: 100Gi + # accessModes: + # - ReadWriteMany + # storageClassName: nfs + # nfs: + # path: /volume1/nas + # server: "{{ nfs_server_ip }}" + # persistentVolumeReclaimPolicy: Retain + + # - name: Create PersistentVolumeClaim for NFS + # kubernetes.core.k8s: + # state: present + # kubeconfig: /etc/rancher/k3s/k3s.yaml + # definition: + # apiVersion: v1 + # kind: PersistentVolumeClaim + # metadata: + # name: nfs-pvc + # namespace: default + # spec: + # accessModes: + # - ReadWriteMany + # storageClassName: nfs + # resources: + # requests: + # storage: 100Gi + # volumeName: nfs-pv + + when: "'kube-primary' in group_names and k3s_cluster_token is not defined" + +- name: Setup K3s on the follower node + shell: + cmd: curl -sfL https://get.k3s.io | K3S_URL=https://"{{ k3s_primary_node_ip }}":6443 K3S_TOKEN="{{ k3s_cluster_token }}" sh - + when: "'kube-primary' not in group_names and k3s_cluster_token is defined" + +- name: Check to see if Flannel is properly configured + stat: + path: /etc/cni/net.d/10-flannel.conf + changed_when: no + register: flannel_config_file + +- name: Copy over the flannel config if it is not properly configured + copy: + src: 10-flannel.conf + dest: /etc/cni/net.d/10-flannel.conf + when: not flannel_config_file.stat.exists diff --git a/setup-k3s.yml b/setup-k3s.yml new file mode 100644 index 0000000..4da5811 --- /dev/null +++ b/setup-k3s.yml @@ -0,0 +1,13 @@ +- name: Setup a new K3s cluster + hosts: all + gather_facts: yes + become: yes + roles: + - { role: k3s, tags: k3s-cluster } + +- name: Deploy applications into Kubernetes cluster + hosts: kube-primary + gather_facts: yes + become: yes + roles: + - { role: apps, tags: apps }