move the rest and replace

This commit is contained in:
Nicholas St. Germain 2020-09-05 21:58:48 -05:00
parent 3070528d2f
commit b94814d3d7
No known key found for this signature in database
GPG Key ID: 7221152119DAB1E6
59 changed files with 90 additions and 2476 deletions

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# OWNERS file for Kubernetes
OWNERS

View File

@ -1,17 +1,20 @@
apiVersion: v2 apiVersion: v2
appVersion: 0.7.1.1784
description: Looks and smells like Sonarr but made for music.
name: lidarr name: lidarr
version: 3.0.1 description: Lidarr Chart
type: application
version: 4.0.0
appVersion: 0.7.1.1785
keywords: keywords:
- lidarr - lidarr
- usenet
- bittorrent
home: https://github.com/k8s-at-home/charts/tree/master/charts/lidarr home: https://github.com/k8s-at-home/charts/tree/master/charts/lidarr
icon: https://lidarr.audio/img/logo.png
sources: sources:
- https://hub.docker.com/r/linuxserver/lidarr/ - https://github.com/Lidarr/Lidarr
- https://github.com/lidarr/Lidarr/ - https://hub.docker.com/r/linuxserver/lidarr
maintainers: maintainers:
- name: billimek - name: DirtyCajunRice
email: jeff@billimek.com email: nick@cajun.pro
dependencies:
- name: media-common
repository: https://k8s-at-home.com/charts/
version: ~1.0.0
alias: lidarr

View File

@ -1,4 +0,0 @@
approvers:
- billimek
reviewers:
- billimek

View File

@ -1,115 +0,0 @@
# lidarr music download client
This is a helm chart for [lidarr](https://github.com/lidarr/Lidarr) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/lidarr/)
## TL;DR;
```shell
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
$ helm install k8s-at-home/lidarr
```
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm install --name my-release k8s-at-home/lidarr
```
## Upgrading
Chart versions 1.0.1 and earlier used separate PVCs for Downloads and Music. This presented an issue where Lidarr would be unable to hard-link files between the /downloads and /music directories when importing media. This is caused because each PVC is exposed to the pod as a separate filesystem. This resulted in Lidarr copying files rather than linking; using additional storage without the user's knowledge.
This chart now uses a single PVC for Downloads and Music. This means all of your media (and downloads) must be in, or be subdirectories of, a single directory. If upgrading from an earlier version of the chart, do the following:
1. [Uninstall](#uninstalling-the-chart) your current release
2. On your backing store, organize your media, ie. media/music, media/downloads
3. If using a pre-existing PVC, create a single new PVC for all of your media
4. Refer to the [configuration](#configuration) for updates to the chart values
5. Re-install the chart
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Lidarr's `Mass Editor` under the `Library` tab. Simply select all artists in your library, and use the editor to change the `Root Folder` and hit save.
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
helm delete my-release --purge
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the Sentry chart and their default values.
| Parameter | Description | Default |
|----------------------------|-------------------------------------|---------------------------------------------------------|
| `image.repository` | Image repository | `linuxserver/lidarr` |
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/lidarr/tags/).| `0.7.1.1381-ls7`|
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
| `timezone` | Timezone the lidarr instance should run as, e.g. 'America/New_York' | `UTC` |
| `puid` | process userID the lidarr instance should run as | `1001` |
| `pgid` | process groupID the lidarr instance should run as | `1001` |
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
| `Service.type` | Kubernetes service type for the lidarr GUI | `ClusterIP` |
| `Service.port` | Kubernetes port where the lidarr GUI is exposed| `8686` |
| `Service.annotations` | Service annotations for the lidarr GUI | `{}` |
| `Service.labels` | Custom labels | `{}` |
| `Service.loadBalancerIP` | Loadbalance IP for the lidarr GUI | `{}` |
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
| `ingress.enabled` | Enables Ingress | `false` |
| `ingress.annotations` | Ingress annotations | `{}` |
| `ingress.labels` | Custom labels | `{}`
| `ingress.path` | Ingress path | `/` |
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
| `ingress.tls` | Ingress TLS configuration | `[]` |
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
| `persistence.config.existingClaim`| Use an existing PVC to persist data | `nil` |
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
| `persistence.media.enabled` | Use persistent volume to store configuration data | `true` |
| `persistence.media.size` | Size of persistent volume claim | `10Gi` |
| `persistence.media.existingClaim`| Use an existing PVC to persist data | `nil` |
| `persistence.media.storageClass` | Type of persistent volume claim | `-` |
| `persistence.media.accessMode` | Persistence access mode | `ReadWriteOnce` |
| `persistence.media.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
| `persistence.extraExistingClaimMounts` | Optionally add multiple existing claims | `[]` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Toleration labels for pod assignment | `[]` |
| `affinity` | Affinity settings for pod assignment | `{}` |
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install --name my-release \
--set timezone="America/New York" \
k8s-at-home/lidarr
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
helm install --name my-release -f values.yaml stable/lidarr
```
---
**NOTE**
If you get `Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...` it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
---
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/lidarr/values.yaml) file. It has several commented out suggested values.

View File

@ -1,19 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "lidarr.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ include "lidarr.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "lidarr.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "lidarr.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:80
{{- end }}

View File

@ -1,32 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "lidarr.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "lidarr.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "lidarr.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@ -1,29 +0,0 @@
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "lidarr.fullname" . }}-config
{{- if .Values.persistence.config.skipuninstall }}
annotations:
"helm.sh/resource-policy": keep
{{- end }}
labels:
app.kubernetes.io/name: {{ include "lidarr.name" . }}
helm.sh/chart: {{ include "lidarr.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
- {{ .Values.persistence.config.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.config.size | quote }}
{{- if .Values.persistence.config.storageClass }}
{{- if (eq "-" .Values.persistence.config.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.config.storageClass }}"
{{- end }}
{{- end }}
{{- end -}}

View File

@ -1,110 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "lidarr.fullname" . }}
{{- if .Values.deploymentAnnotations }}
annotations:
{{- range $key, $value := .Values.deploymentAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "lidarr.name" . }}
helm.sh/chart: {{ include "lidarr.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: 1
revisionHistoryLimit: 3
strategy:
type: {{ .Values.strategyType }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "lidarr.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "lidarr.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.podAnnotations }}
annotations:
{{- range $key, $value := .Values.podAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8686
protocol: TCP
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
readinessProbe:
tcpSocket:
port: http
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
env:
- name: TZ
value: "{{ .Values.timezone }}"
- name: PUID
value: "{{ .Values.puid }}"
- name: PGID
value: "{{ .Values.pgid }}"
volumeMounts:
- mountPath: /config
name: config
- mountPath: /media
name: media
{{- if .Values.persistence.media.subPath }}
subPath: {{ .Values.persistence.media.subPath }}
{{- end }}
{{- range .Values.persistence.extraExistingClaimMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 12 }}
volumes:
- name: config
{{- if .Values.persistence.config.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "lidarr.fullname" . }}-config{{- end }}
{{- else }}
emptyDir: {}
{{- end }}
- name: media
{{- if .Values.persistence.media.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.persistence.media.existingClaim }}{{ .Values.persistence.media.existingClaim }}{{- else }}{{ template "lidarr.fullname" . }}-media{{- end }}
{{- else }}
emptyDir: {}
{{- end }}
{{- range .Values.persistence.extraExistingClaimMounts }}
- name: {{ .name }}
persistentVolumeClaim:
claimName: {{ .existingClaim }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@ -1,41 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "lidarr.fullname" . -}}
{{- $ingressPath := .Values.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app.kubernetes.io/name: {{ include "lidarr.name" . }}
helm.sh/chart: {{ include "lidarr.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.ingress.labels -}}
{{ toYaml . | nindent 4 }}
{{- end -}}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . | quote }}
http:
paths:
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}

View File

@ -1,29 +0,0 @@
{{- if and .Values.persistence.media.enabled (not .Values.persistence.media.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "lidarr.fullname" . }}-media
{{- if .Values.persistence.media.skipuninstall }}
annotations:
"helm.sh/resource-policy": keep
{{- end }}
labels:
app.kubernetes.io/name: {{ include "lidarr.name" . }}
helm.sh/chart: {{ include "lidarr.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
- {{ .Values.persistence.media.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.media.size | quote }}
{{- if .Values.persistence.media.storageClass }}
{{- if (eq "-" .Values.persistence.media.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.media.storageClass }}"
{{- end }}
{{- end }}
{{- end -}}

View File

@ -1,52 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "lidarr.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "lidarr.name" . }}
helm.sh/chart: {{ include "lidarr.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.service.labels }}
{{ toYaml .Values.service.labels | indent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
type: ClusterIP
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{end}}
{{- else if eq .Values.service.type "LoadBalancer" }}
type: {{ .Values.service.type }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
{{- end -}}
{{- else }}
type: {{ .Values.service.type }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs:
{{ toYaml .Values.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.service.externalTrafficPolicy }}
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
{{- end }}
ports:
- name: http
port: {{ .Values.service.port }}
protocol: TCP
targetPort: http
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
nodePort: {{.Values.service.nodePort}}
{{ end }}
selector:
app.kubernetes.io/name: {{ include "lidarr.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@ -1,132 +1,10 @@
# Default values for lidarr. # Default values for lidarr.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image: lidarr:
repository: linuxserver/lidarr image:
tag: 0.7.1.1784-ls18 organization: linuxserver
repository: lidarr
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
tag: ""
# upgrade strategy type (e.g. Recreate or RollingUpdate) service:
strategyType: Recreate
# Probes configuration
probes:
liveness:
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
readiness:
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
nameOverride: ""
fullnameOverride: ""
timezone: UTC
puid: 1001
pgid: 1001
service:
type: ClusterIP
port: 8686 port: 8686
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
labels: {}
## Use loadBalancerIP to request a specific static IP,
## otherwise leave blank
##
loadBalancerIP:
# loadBalancerSourceRanges: []
## Set the externalTrafficPolicy in the Service to either Cluster or Local
# externalTrafficPolicy: Cluster
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
persistence:
config:
enabled: true
## lidarr configuration data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
##
## If you want to reuse an existing claim, you can pass the name of the PVC using
## the existingClaim variable
# existingClaim: your-claim
accessMode: ReadWriteOnce
size: 1Gi
## Do not delete the pvc upon helm uninstall
skipuninstall: false
media:
enabled: true
## lidarr media volume configuration
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
##
## If you want to reuse an existing claim, you can pass the name of the PVC using
## the existingClaim variable
# existingClaim: your-claim
# subPath: some-subpath
accessMode: ReadWriteOnce
size: 10Gi
## Do not delete the pvc upon helm uninstall
skipuninstall: false
extraExistingClaimMounts: []
# - name: external-mount
# mountPath: /srv/external-mount
## A manually managed Persistent Volume and Claim
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
# readOnly: true
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
deploymentAnnotations: {}

View File

@ -1,20 +0,0 @@
apiVersion: v2
name: lidarr
description: Lidarr Chart
type: application
version: 1.0.0
appVersion: 0.7.1.1785
keywords:
- lidarr
home: https://github.com/k8s-at-home/charts/tree/master/charts/media-common/lidarr
sources:
- https://github.com/Lidarr/Lidarr
- https://hub.docker.com/r/itscontained/lidarr
maintainers:
- name: DirtyCajunRice
email: nick@cajun.pro
dependencies:
- name: media-common
repository: https://k8s-at-home.com/charts/
version: 1.0.0
alias: lidarr

View File

@ -1,10 +0,0 @@
# Default values for lidarr.
lidarr:
image:
organization: linuxserver
repository: lidarr
pullPolicy: IfNotPresent
tag: ""
service:
port: 8686

View File

@ -1,20 +0,0 @@
apiVersion: v2
name: ombi
description: Ombi Chart
type: application
version: 1.0.0
appVersion: 4.0.471
keywords:
- ombi
home: https://github.com/k8s-at-home/charts/tree/master/charts/media-common/ombi
sources:
- https://github.com/tidusjar/Ombi
- https://hub.docker.com/r/itscontained/ombi
maintainers:
- name: DirtyCajunRice
email: nick@cajun.pro
dependencies:
- name: media-common
repository: https://k8s-at-home.com/charts/
version: 1.0.0
alias: ombi

View File

@ -1,11 +0,0 @@
# Default values for ombi.
ombi:
image:
organization: itscontained
repository: ombi
pullPolicy: IfNotPresent
tag: ""
service:
port: 5000
configPath: /var/lib/ombi

View File

@ -1,20 +0,0 @@
apiVersion: v2
name: sonarr
description: Sonarr Chart
type: application
version: 1.0.0
appVersion: 3.0.3.913
keywords:
- sonarr
home: https://github.com/k8s-at-home/charts/tree/master/charts/media-common/sonarr
sources:
- https://github.com/Sonarr/Sonarr
- https://hub.docker.com/r/itscontained/sonarr
maintainers:
- name: DirtyCajunRice
email: nick@cajun.pro
dependencies:
- name: media-common
repository: https://k8s-at-home.com/charts/
version: 1.0.0
alias: sonarr

View File

@ -1,11 +0,0 @@
# Default values for sonarr.
sonarr:
image:
organization: itscontained
repository: sonarr
pullPolicy: IfNotPresent
tag: ""
service:
port: 8989
configPath: /var/lib/radarr

View File

@ -1,20 +0,0 @@
apiVersion: v2
name: tautulli
description: Tautulli Chart
type: application
version: 1.0.0
appVersion: v2.5.4
keywords:
- tautulli
home: https://github.com/k8s-at-home/charts/tree/master/charts/media-common/tautulli
sources:
- https://github.com/Tautulli/Tautulli
- https://hub.docker.com/r/tautulli/tautulli
maintainers:
- name: DirtyCajunRice
email: nick@cajun.pro
dependencies:
- name: media-common
repository: https://k8s-at-home.com/charts/
version: 1.0.0
alias: tautulli

View File

@ -1,10 +0,0 @@
# Default values for tautulli.
tautulli:
image:
organization: tautulli
repository: tautulli
pullPolicy: IfNotPresent
tag: ""
service:
port: 8181

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# OWNERS file for Kubernetes
OWNERS

View File

@ -1,16 +1,20 @@
apiVersion: v2 apiVersion: v2
appVersion: v4.0.464
description: Want a Movie or TV Show on Plex or Emby? Use Ombi!
name: ombi name: ombi
version: 3.0.1 description: Ombi Chart
type: application
version: 4.0.0
appVersion: 4.0.471
keywords: keywords:
- ombi - ombi
- plex
home: https://github.com/k8s-at-home/charts/tree/master/charts/ombi home: https://github.com/k8s-at-home/charts/tree/master/charts/ombi
icon: https://ombi.io/img/logo-orange-small.png
sources: sources:
- https://hub.docker.com/r/linuxserver/ombi/ - https://github.com/tidusjar/Ombi
- https://ombi.io/ - https://hub.docker.com/r/itscontained/ombi
maintainers: maintainers:
- name: billimek - name: DirtyCajunRice
email: jeff@billimek.com email: nick@cajun.pro
dependencies:
- name: media-common
repository: https://k8s-at-home.com/charts/
version: ~1.0.0
alias: ombi

View File

@ -1,4 +0,0 @@
approvers:
- billimek
reviewers:
- billimek

View File

@ -1,97 +0,0 @@
# Ombi
This is a helm chart for [Ombi](https://ombi.io/) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/ombi/)
## TL;DR;
```shell
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
$ helm install k8s-at-home/ombi
```
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm install --name my-release k8s-at-home/ombi
```
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
helm delete my-release --purge
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the Sentry chart and their default values.
| Parameter | Description | Default |
|----------------------------|-------------------------------------|---------------------------------------------------------|
| `image.repository` | Image repository | `linuxserver/ombi` |
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/ombi/tags/).| `3.0.4914-ls72`|
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
| `timezone` | Timezone the Ombi instance should run as, e.g. 'America/New_York' | `UTC` |
| `puid` | process userID the Ombi instance should run as | `1001` |
| `pgid` | process groupID the Ombi instance should run as | `1001` |
| `baseUrl` | adjust baseUrl if behind a reverse proxy | null |
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
| `Service.type` | Kubernetes service type for the Ombi GUI | `ClusterIP` |
| `Service.port` | Kubernetes port where the Ombi GUI is exposed| `3579` |
| `Service.annotations` | Service annotations for the Ombi GUI | `{}` |
| `Service.labels` | Custom labels | `{}` |
| `Service.loadBalancerIP` | Loadbalance IP for the Ombi GUI | `{}` |
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
| `ingress.enabled` | Enables Ingress | `false` |
| `ingress.annotations` | Ingress annotations | `{}` |
| `ingress.labels` | Custom labels | `{}`
| `ingress.path` | Ingress path | `/` |
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
| `ingress.tls` | Ingress TLS configuration | `[]` |
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
| `persistence.config.existingClaim`| Use an existing PVC to persist data | `nil` |
| `persistence.config.subPath` | Mount a sub directory of the persistent volume if set | `""` |
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Toleration labels for pod assignment | `[]` |
| `affinity` | Affinity settings for pod assignment | `{}` |
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install --name my-release \
--set timezone="America/New York" \
k8s-at-home/ombi
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
helm install --name my-release -f values.yaml k8s-at-home/ombi
```
---
**NOTE**
If you get `Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...` it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
---
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/ombi/values.yaml) file. It has several commented out suggested values.

View File

@ -1,19 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "ombi.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ include "ombi.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "ombi.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "ombi.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:3579 to use your application"
kubectl port-forward $POD_NAME 3579:80
{{- end }}

View File

@ -1,32 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "ombi.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "ombi.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "ombi.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@ -1,29 +0,0 @@
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "ombi.fullname" . }}-config
{{- if .Values.persistence.config.skipuninstall }}
annotations:
"helm.sh/resource-policy": keep
{{- end }}
labels:
app.kubernetes.io/name: {{ include "ombi.name" . }}
helm.sh/chart: {{ include "ombi.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
- {{ .Values.persistence.config.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.config.size | quote }}
{{- if .Values.persistence.config.storageClass }}
{{- if (eq "-" .Values.persistence.config.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.config.storageClass }}"
{{- end }}
{{- end }}
{{- end -}}

View File

@ -1,95 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "ombi.fullname" . }}
{{- if .Values.deploymentAnnotations }}
annotations:
{{- range $key, $value := .Values.deploymentAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "ombi.name" . }}
helm.sh/chart: {{ include "ombi.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: 1
revisionHistoryLimit: 3
strategy:
type: {{ .Values.strategyType }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "ombi.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "ombi.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.podAnnotations }}
annotations:
{{- range $key, $value := .Values.podAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 3579
protocol: TCP
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
readinessProbe:
tcpSocket:
port: http
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
env:
- name: TZ
value: "{{ .Values.timezone }}"
- name: PUID
value: "{{ .Values.puid }}"
- name: PGID
value: "{{ .Values.pgid }}"
{{- if .Values.baseUrl }}
- name: BASE_URL
value: "{{ .Values.baseUrl }}"
{{ end }}
volumeMounts:
- mountPath: /config
name: config
{{- if .Values.persistence.config.subPath }}
subPath: "{{ .Values.persistence.config.subPath }}"
{{- end }}
resources:
{{ toYaml .Values.resources | indent 12 }}
volumes:
- name: config
{{- if .Values.persistence.config.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "ombi.fullname" . }}-config{{- end }}
{{- else }}
emptyDir: {}
{{ end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@ -1,41 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "ombi.fullname" . -}}
{{- $ingressPath := .Values.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app.kubernetes.io/name: {{ include "ombi.name" . }}
helm.sh/chart: {{ include "ombi.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.ingress.labels -}}
{{ toYaml . | nindent 4 }}
{{- end -}}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . | quote }}
http:
paths:
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}

View File

@ -1,53 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "ombi.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "ombi.name" . }}
helm.sh/chart: {{ include "ombi.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.service.labels }}
{{ toYaml .Values.service.labels | indent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
type: ClusterIP
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{end}}
{{- else if eq .Values.service.type "LoadBalancer" }}
type: {{ .Values.service.type }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
{{- end -}}
{{- else }}
type: {{ .Values.service.type }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs:
{{ toYaml .Values.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.service.externalTrafficPolicy }}
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
{{- end }}
ports:
- name: http
port: {{ .Values.service.port }}
protocol: TCP
targetPort: http
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
nodePort: {{.Values.service.nodePort}}
{{ end }}
selector:
app.kubernetes.io/name: {{ include "ombi.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@ -1,117 +1,11 @@
# Default values for Ombi. # Default values for ombi.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image: ombi:
repository: linuxserver/ombi image:
tag: v4.0.464-ls10 organization: itscontained
repository: ombi
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
tag: ""
# upgrade strategy type (e.g. Recreate or RollingUpdate) service:
strategyType: Recreate port: 5000
configPath: /var/lib/ombi
# Probes configuration
probes:
liveness:
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
readiness:
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
nameOverride: ""
fullnameOverride: ""
timezone: UTC
puid: 1001
pgid: 1001
# Subfolder can optionally be defined as an env variable for reverse proxies. Keep
# in mind that once this value is defined, the gui setting for base url no longer
# works. To use the gui setting, remove this env variable.
#
# baseUrl: /ombi
service:
type: ClusterIP
port: 3579
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
labels: {}
## Use loadBalancerIP to request a specific static IP,
## otherwise leave blank
##
loadBalancerIP:
# loadBalancerSourceRanges: []
## Set the externalTrafficPolicy in the Service to either Cluster or Local
# externalTrafficPolicy: Cluster
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
persistence:
config:
enabled: true
## Ombi configuration data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
##
## If you want to reuse an existing claim, you can pass the name of the PVC using
## the existingClaim variable
# existingClaim: your-claim
accessMode: ReadWriteOnce
size: 1Gi
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
##
subPath: ""
## Do not delete the pvc upon helm uninstall
skipuninstall: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
deploymentAnnotations: {}

View File

@ -6,15 +6,15 @@ version: 1.0.0
appVersion: latest appVersion: latest
keywords: keywords:
- organizr - organizr
home: https://github.com/k8s-at-home/charts/tree/master/charts/media-common/organizr home: https://github.com/k8s-at-home/charts/tree/master/charts/organizr
sources: sources:
- https://github.com/causefx/Organizr - https://github.com/causefx/Organizr
- https://hub.docker.com/r/itscontained/ombi - https://hub.docker.com/r/organizr/organizr
maintainers: maintainers:
- name: DirtyCajunRice - name: DirtyCajunRice
email: nick@cajun.pro email: nick@cajun.pro
dependencies: dependencies:
- name: media-common - name: media-common
repository: https://k8s-at-home.com/charts/ repository: https://k8s-at-home.com/charts/
version: 1.0.0 version: ~1.0.0
alias: organizr alias: organizr

View File

@ -16,5 +16,5 @@ maintainers:
dependencies: dependencies:
- name: media-common - name: media-common
repository: https://k8s-at-home.com/charts/ repository: https://k8s-at-home.com/charts/
version: 1.0.0 version: ~1.0.0
alias: radarr alias: radarr

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# OWNERS file for Kubernetes
OWNERS

View File

@ -1,17 +1,20 @@
apiVersion: v2 apiVersion: v2
appVersion: 3.0.3.911
description: Sonarr is a television show downloading client
name: sonarr name: sonarr
version: 5.0.1 description: Sonarr Chart
type: application
version: 6.0.0
appVersion: 3.0.3.913
keywords: keywords:
- sonarr - sonarr
- usenet home: https://github.com/k8s-at-home/charts/tree/master/charts/media-common/sonarr
- bittorrent
home: https://github.com/k8s-at-home/charts/tree/master/charts/sonarr
icon: https://avatars1.githubusercontent.com/u/1082903?s=400&v=4
sources: sources:
- https://hub.docker.com/r/linuxserver/sonarr/ - https://github.com/Sonarr/Sonarr
- https://sonarr.tv/ - https://hub.docker.com/r/itscontained/sonarr
maintainers: maintainers:
- name: billimek - name: DirtyCajunRice
email: jeff@billimek.com email: nick@cajun.pro
dependencies:
- name: media-common
repository: https://k8s-at-home.com/charts/
version: ~1.0.0
alias: sonarr

View File

@ -1,4 +0,0 @@
approvers:
- billimek
reviewers:
- billimek

View File

@ -1,132 +0,0 @@
# sonarr televsion show download client
This is a helm chart for [sonarr](https://github.com/sonarr/sonarr/) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/sonarr/)
## TL;DR;
```shell
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
$ helm install k8s-at-home/sonarr
```
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm install --name my-release k8s-at-home/sonarr
```
## Upgrading
Chart versions 3.2.0 and earlier used separate PVCs for Downloads and TV. This presented an issue where Sonarr would be unable to hard-link files between the /downloads and /tv directories when importing media. This is caused because each PVC is exposed to the pod as a separate filesystem. This resulted in Sonarr copying files rather than linking; using additional storage without the user's knowledge.
This chart now uses a single PVC for Downloads and TV. This means all of your media (and downloads) must be in, or be subdirectories of, a single directory. If upgrading from v1 of the chart, do the following:
1. [Uninstall](#uninstalling-the-chart) your current release
2. On your backing store, organize your media, ie. media/tv, media/downloads
3. If using a pre-existing PVC, create a single new PVC for all of your media
4. Refer to the [configuration](#configuration) for updates to the chart values
5. Re-install the chart
6. Update your settings in the app to point to the new PVC, which is mounted at /media. This can be done using Sonarr's `Series Editor` under the `Series` tab. Simply select all series in your library, and use the editor to change the `Root Folder` and hit save.
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
helm delete my-release --purge
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the Sentry chart and their default values.
| Parameter | Description | Default |
| ------------------------------------------- | -------------------------------------------------------------------------------------------- | ---------------------------------------------- |
| `image.repository` | Image repository | `linuxserver/sonarr` |
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/sonarr/tags/). | `2.0.0.5344-ls60` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
| `timezone` | Timezone the instance should run as, e.g. 'America/New_York' | `UTC` |
| `puid` | process userID the instance should run as | `1001` |
| `pgid` | process groupID the instance should run as | `1001` |
| `exportarr.enabled` | Enable Prometheus monitoring with [Exportarr](https://github.com/onedr0p/exportarr) | `false` |
| `exportarr.image.repository` | Exportarr image repository | `onedr0p/exportarr` |
| `exportarr.image.tag` | Exportarr image tag | `v0.3.0` |
| `exportarr.image.pullPolicy` | Exportarr image pullPolicy | `IfNotPresent` |
| `exportarr.port` | Prometheus scrape port | `9707` |
| `exportarr.url` | Sonarr's URL | `http://sonarr.default.svc.cluster.local:8989` |
| `exportarr.apikey` | Sonarr's API Key | |
| `exportarr.enableEpisodeQualityMetrics` | Enable episode quality metrics gathering | `false` |
| `exportarr.serviceMonitor.enabled` | Enable Prometheus Operator ServiceMonitor monitoring | `false` |
| `exportarr.serviceMonitor.namespace` | Define namespace where to deploy the ServiceMonitor resource | (namespace where you are deploying) |
| `exportarr.serviceMonitor.path` | Prometheus scrape path | `/metrics` |
| `exportarr.serviceMonitor.interval` | Prometheus scrape interval | `4m` |
| `exportarr.serviceMonitor.scrapeTimeout` | Prometheus scrape timeout | `90s` |
| `exportarr.serviceMonitor.additionalLabels` | Add custom labels to ServiceMonitor | {} |
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
| `service.type` | Kubernetes service type for the GUI | `ClusterIP` |
| `service.port` | Kubernetes port where the GUI is exposed | `8989` |
| `service.annotations` | Service annotations for the GUI | `{}` |
| `service.labels` | Custom labels | `{}` |
| `service.loadBalancerIP` | Loadbalancer IP for the GUI | `{}` |
| `service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None |
| `ingress.enabled` | Enables Ingress | `false` |
| `ingress.annotations` | Ingress annotations | `{}` |
| `ingress.labels` | Custom labels | `{}` |
| `ingress.path` | Ingress path | `/` |
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
| `ingress.tls` | Ingress TLS configuration | `[]` |
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
| `persistence.config.existingClaim` | Use an existing PVC to persist data | `nil` |
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
| `persistence.config.subPath` | Mount a sub directory if set | `nil ` |
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
| `persistence.media.enabled` | Use persistent volume for media | `true` |
| `persistence.media.size` | Size of persistent volume claim | `10Gi` |
| `persistence.media.existingClaim` | Use an existing PVC to persist data | `nil` |
| `persistence.media.storageClass` | Type of persistent volume claim | `-` |
| `persistence.media.subPath` | Mount a sub directory if set | `nil ` |
| `persistence.media.accessMode` | Persistence access mode | `ReadWriteOnce` |
| `persistence.media.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
| `persistence.extraExistingClaimMounts` | Optionally add multiple existing claims | `[]` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Toleration labels for pod assignment | `[]` |
| `affinity` | Affinity settings for pod assignment | `{}` |
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install --name my-release \
--set timezone="America/New York" \
k8s-at-home/sonarr
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
helm install --name my-release -f values.yaml stable/sonarr
```
---
**NOTE**
If you get `Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...` it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
---
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/sonarr/values.yaml) file. It has several commented out suggested values.

View File

@ -1,19 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "sonarr.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ include "sonarr.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "sonarr.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "sonarr.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:80
{{- end }}

View File

@ -1,32 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "sonarr.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "sonarr.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "sonarr.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@ -1,29 +0,0 @@
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "sonarr.fullname" . }}-config
{{- if .Values.persistence.config.skipuninstall }}
annotations:
"helm.sh/resource-policy": keep
{{- end }}
labels:
app.kubernetes.io/name: {{ include "sonarr.name" . }}
helm.sh/chart: {{ include "sonarr.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
- {{ .Values.persistence.config.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.config.size | quote }}
{{- if .Values.persistence.config.storageClass }}
{{- if (eq "-" .Values.persistence.config.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.config.storageClass }}"
{{- end }}
{{- end }}
{{- end -}}

View File

@ -1,151 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "sonarr.fullname" . }}
{{- if .Values.deploymentAnnotations }}
annotations:
{{- range $key, $value := .Values.deploymentAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "sonarr.name" . }}
helm.sh/chart: {{ include "sonarr.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: 1
revisionHistoryLimit: 3
strategy:
type: {{ .Values.strategyType }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "sonarr.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "sonarr.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.podAnnotations }}
annotations:
{{- range $key, $value := .Values.podAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8989
protocol: TCP
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
readinessProbe:
tcpSocket:
port: http
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
env:
- name: TZ
value: "{{ .Values.timezone }}"
- name: PUID
value: "{{ .Values.puid }}"
- name: PGID
value: "{{ .Values.pgid }}"
volumeMounts:
- mountPath: /config
name: config
{{- if .Values.persistence.config.subPath }}
subPath: "{{ .Values.persistence.config.subPath }}"
{{- end }}
- mountPath: /media
name: media
{{- if .Values.persistence.media.subPath }}
subPath: {{ .Values.persistence.media.subPath }}
{{- end }}
{{- range .Values.persistence.extraExistingClaimMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- if .Values.exportarr.enabled }}
- name: sonarr-exporter
image: "{{ .Values.exportarr.image.repository }}:{{ .Values.exportarr.image.tag }}"
imagePullPolicy: {{ .Values.exportarr.image.pullPolicy }}
command: ["exportarr"]
args: ["sonarr"]
env:
- name: PORT
value: "{{ .Values.exportarr.port }}"
- name: URL
value: "{{ .Values.exportarr.url }}"
- name: APIKEY
value: "{{ .Values.exportarr.apikey }}"
- name: ENABLE_EPISODE_QUALITY_METRICS
value: "{{ .Values.exportarr.enableEpisodeQualityMetrics }}"
ports:
- name: monitoring
containerPort: {{ .Values.exportarr.port }}
livenessProbe:
httpGet:
path: /healthz
port: monitoring
failureThreshold: 5
periodSeconds: 10
readinessProbe:
httpGet:
path: /healthz
port: monitoring
failureThreshold: 5
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 64Mi
limits:
cpu: 500m
memory: 256Mi
{{- end }}
volumes:
- name: config
{{- if .Values.persistence.config.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "sonarr.fullname" . }}-config{{- end }}
{{- else }}
emptyDir: {}
{{- end }}
- name: media
{{- if .Values.persistence.media.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.persistence.media.existingClaim }}{{ .Values.persistence.media.existingClaim }}{{- else }}{{ template "sonarr.fullname" . }}-media{{- end }}
{{- else }}
emptyDir: {}
{{- end }}
{{- range .Values.persistence.extraExistingClaimMounts }}
- name: {{ .name }}
persistentVolumeClaim:
claimName: {{ .existingClaim }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@ -1,41 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "sonarr.fullname" . -}}
{{- $ingressPath := .Values.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app.kubernetes.io/name: {{ include "sonarr.name" . }}
helm.sh/chart: {{ include "sonarr.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.ingress.labels -}}
{{ toYaml . | nindent 4 }}
{{- end -}}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . | quote }}
http:
paths:
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}

View File

@ -1,29 +0,0 @@
{{- if and .Values.persistence.media.enabled (not .Values.persistence.media.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "sonarr.fullname" . }}-media
{{- if .Values.persistence.media.skipuninstall }}
annotations:
"helm.sh/resource-policy": keep
{{- end }}
labels:
app.kubernetes.io/name: {{ include "sonarr.name" . }}
helm.sh/chart: {{ include "sonarr.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
- {{ .Values.persistence.media.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.media.size | quote }}
{{- if .Values.persistence.media.storageClass }}
{{- if (eq "-" .Values.persistence.media.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.media.storageClass }}"
{{- end }}
{{- end }}
{{- end -}}

View File

@ -1,20 +0,0 @@
{{- if .Values.exportarr.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "sonarr.fullname" . }}-exporter
labels:
app.kubernetes.io/name: {{ include "sonarr.name" . }}
helm.sh/chart: {{ include "sonarr.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
clusterIP: None
ports:
- name: monitoring
port: {{ .Values.exportarr.port }}
targetPort: monitoring
selector:
app.kubernetes.io/name: {{ include "sonarr.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@ -1,52 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "sonarr.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "sonarr.name" . }}
helm.sh/chart: {{ include "sonarr.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.service.labels }}
{{ toYaml .Values.service.labels | indent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
type: ClusterIP
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{end}}
{{- else if eq .Values.service.type "LoadBalancer" }}
type: {{ .Values.service.type }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
{{- end -}}
{{- else }}
type: {{ .Values.service.type }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs:
{{ toYaml .Values.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.service.externalTrafficPolicy }}
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
{{- end }}
ports:
- name: http
port: {{ .Values.service.port }}
protocol: TCP
targetPort: http
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
nodePort: {{.Values.service.nodePort}}
{{ end }}
selector:
app.kubernetes.io/name: {{ include "sonarr.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@ -1,24 +0,0 @@
{{- if .Values.exportarr.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "sonarr.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "sonarr.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "sonarr.chart" . }}
{{- with .Values.exportarr.serviceMonitor.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: {{ include "sonarr.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
endpoints:
- port: monitoring
interval: {{ .Values.exportarr.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.exportarr.serviceMonitor.scrapeTimeout }}
path: {{ .Values.exportarr.serviceMonitor.path }}
{{- end }}

View File

@ -1,153 +1,11 @@
# Default values for sonarr. # Default values for sonarr.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image: sonarr:
repository: linuxserver/sonarr
tag: 3.0.3.911-ls39
pullPolicy: IfNotPresent
# upgrade strategy type (e.g. Recreate or RollingUpdate)
strategyType: Recreate
# Probes configuration
probes:
liveness:
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
readiness:
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
# Prometheus Metrics
exportarr:
enabled: false
image: image:
repository: onedr0p/exportarr organization: itscontained
tag: v0.3.0 repository: sonarr
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
url: "http://sonarr.default.svc.cluster.local:8989" tag: ""
apikey: service:
port: 9707
# Enable to gather episode quality metrics, if enabled slows down scrape timing due to more API calls
enableEpisodeQualityMetrics: false
serviceMonitor:
enabled: false
namespace: default
path: /metrics
interval: 4m
scrapeTimeout: 90s
additionalLabels: {}
nameOverride: ""
fullnameOverride: ""
timezone: UTC
puid: 1001
pgid: 1001
service:
type: ClusterIP
port: 8989 port: 8989
## Specify the nodePort value for the LoadBalancer and NodePort service types. configPath: /var/lib/radarr
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
labels: {}
## Use loadBalancerIP to request a specific static IP,
## otherwise leave blank
##
loadBalancerIP:
# loadBalancerSourceRanges: []
## Set the externalTrafficPolicy in the Service to either Cluster or Local
# externalTrafficPolicy: Cluster
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
persistence:
config:
enabled: true
## sonarr configuration data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
##
## If you want to reuse an existing claim, you can pass the name of the PVC using
## the existingClaim variable
# existingClaim: your-claim
# subPath: some-subpath
accessMode: ReadWriteOnce
size: 1Gi
## Do not delete the pvc upon helm uninstall
skipuninstall: false
media:
enabled: true
## sonarr media volume configuration
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
##
## If you want to reuse an existing claim, you can pass the name of the PVC using
## the existingClaim variable
# existingClaim: your-claim
# subPath: some-subpath
accessMode: ReadWriteOnce
size: 10Gi
## Do not delete the pvc upon helm uninstall
skipuninstall: false
extraExistingClaimMounts: []
# - name: external-mount
# mountPath: /srv/external-mount
## A manually managed Persistent Volume and Claim
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
# readOnly: true
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
deploymentAnnotations: {}

View File

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# OWNERS file for Kubernetes
OWNERS

View File

@ -1,16 +1,20 @@
apiVersion: v2 apiVersion: v2
appVersion: v2.5.4
description: A Python based monitoring and tracking tool for Plex Media Server.
name: tautulli name: tautulli
version: 3.0.1 description: Tautulli Chart
type: application
version: 4.0.0
appVersion: v2.5.4
keywords: keywords:
- tautulli - tautulli
- plex
home: https://github.com/k8s-at-home/charts/tree/master/charts/tautulli home: https://github.com/k8s-at-home/charts/tree/master/charts/tautulli
icon: https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/tautulli-icon.png
sources: sources:
- https://hub.docker.com/r/linuxserver/tautulli/ - https://github.com/Tautulli/Tautulli
- https://tautulli.com/ - https://hub.docker.com/r/tautulli/tautulli
maintainers: maintainers:
- name: billimek - name: DirtyCajunRice
email: jeff@billimek.com email: nick@cajun.pro
dependencies:
- name: media-common
repository: https://k8s-at-home.com/charts/
version: ~1.0.0
alias: tautulli

View File

@ -1,4 +0,0 @@
approvers:
- billimek
reviewers:
- billimek

View File

@ -1,96 +0,0 @@
# Tautulli
This is a helm chart for [Tautulli](https://tautulli.com/) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/tautulli/)
## TL;DR;
```shell
$ helm repo add k8s-at-home https://k8s-at-home.com/charts/
$ helm install k8s-at-home/tautulli
```
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm install --name my-release k8s-at-home/tautulli
```
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
helm delete my-release --purge
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of the Sentry chart and their default values.
| Parameter | Description | Default |
|----------------------------|-------------------------------------|---------------------------------------------------------|
| `image.repository` | Image repository | `linuxserver/tautulli` |
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/tautulli/tags/).| `v2.1.39-ls32`|
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
| `timezone` | Timezone the Tautulli instance should run as, e.g. 'America/New_York' | `UTC` |
| `puid` | process userID the Tautulli instance should run as | `1001` |
| `pgid` | process groupID the Tautulli instance should run as | `1001` |
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
| `Service.type` | Kubernetes service type for the Tautulli GUI | `ClusterIP` |
| `Service.port` | Kubernetes port where the Tautulli GUI is exposed| `8181` |
| `Service.annotations` | Service annotations for the Tautulli GUI | `{}` |
| `Service.labels` | Custom labels | `{}` |
| `Service.loadBalancerIP` | Loadbalance IP for the Tautulli GUI | `{}` |
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
| `ingress.enabled` | Enables Ingress | `false` |
| `ingress.annotations` | Ingress annotations | `{}` |
| `ingress.labels` | Custom labels | `{}`
| `ingress.path` | Ingress path | `/` |
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
| `ingress.tls` | Ingress TLS configuration | `[]` |
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
| `persistence.config.existingClaim`| Use an existing PVC to persist data | `nil` |
| `persistence.config.subPath` | Mount a sub directory of the persistent volume if set | `""` |
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
| `resources` | CPU/Memory resource requests/limits | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Toleration labels for pod assignment | `[]` |
| `affinity` | Affinity settings for pod assignment | `{}` |
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install --name my-release \
--set timezone="America/New York" \
k8s-at-home/tautulli
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
helm install --name my-release -f values.yaml k8s-at-home/tautulli
```
---
**NOTE**
If you get `Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...` it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
---
Read through the [values.yaml](https://github.com/k8s-at-home/charts/blob/master/charts/tautulli/values.yaml) file. It has several commented out suggested values.

View File

@ -1,19 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "tautulli.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ include "tautulli.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "tautulli.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "tautulli.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8181 to use your application"
kubectl port-forward $POD_NAME 8181:80
{{- end }}

View File

@ -1,32 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "tautulli.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "tautulli.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "tautulli.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@ -1,29 +0,0 @@
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "tautulli.fullname" . }}-config
{{- if .Values.persistence.config.skipuninstall }}
annotations:
"helm.sh/resource-policy": keep
{{- end }}
labels:
app.kubernetes.io/name: {{ include "tautulli.name" . }}
helm.sh/chart: {{ include "tautulli.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
- {{ .Values.persistence.config.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.config.size | quote }}
{{- if .Values.persistence.config.storageClass }}
{{- if (eq "-" .Values.persistence.config.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.config.storageClass }}"
{{- end }}
{{- end }}
{{- end -}}

View File

@ -1,101 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "tautulli.fullname" . }}
{{- if .Values.deploymentAnnotations }}
annotations:
{{- range $key, $value := .Values.deploymentAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "tautulli.name" . }}
helm.sh/chart: {{ include "tautulli.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: 1
revisionHistoryLimit: 3
strategy:
type: {{ .Values.strategyType }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "tautulli.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "tautulli.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.podAnnotations }}
annotations:
{{- range $key, $value := .Values.podAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8181
protocol: TCP
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
readinessProbe:
tcpSocket:
port: http
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
env:
- name: TZ
value: "{{ .Values.timezone }}"
- name: PUID
value: "{{ .Values.puid }}"
- name: PGID
value: "{{ .Values.pgid }}"
volumeMounts:
- mountPath: /config
name: config
{{- if .Values.persistence.config.subPath }}
subPath: "{{ .Values.persistence.config.subPath }}"
{{- end }}
{{- range .Values.persistence.extraExistingClaimMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 12 }}
volumes:
- name: config
{{- if .Values.persistence.config.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "tautulli.fullname" . }}-config{{- end }}
{{- else }}
emptyDir: {}
{{ end }}
{{- range .Values.persistence.extraExistingClaimMounts }}
- name: {{ .name }}
persistentVolumeClaim:
claimName: {{ .existingClaim }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@ -1,41 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "tautulli.fullname" . -}}
{{- $ingressPath := .Values.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app.kubernetes.io/name: {{ include "tautulli.name" . }}
helm.sh/chart: {{ include "tautulli.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.ingress.labels -}}
{{ toYaml . | nindent 4 }}
{{- end -}}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . | quote }}
http:
paths:
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}

View File

@ -1,53 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "tautulli.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "tautulli.name" . }}
helm.sh/chart: {{ include "tautulli.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.service.labels }}
{{ toYaml .Values.service.labels | indent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
type: ClusterIP
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{end}}
{{- else if eq .Values.service.type "LoadBalancer" }}
type: {{ .Values.service.type }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
{{- end -}}
{{- else }}
type: {{ .Values.service.type }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs:
{{ toYaml .Values.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.service.externalTrafficPolicy }}
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
{{- end }}
ports:
- name: http
port: {{ .Values.service.port }}
protocol: TCP
targetPort: http
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
nodePort: {{.Values.service.nodePort}}
{{ end }}
selector:
app.kubernetes.io/name: {{ include "tautulli.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@ -1,117 +1,10 @@
# Default values for tautulli. # Default values for tautulli.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image: tautulli:
repository: linuxserver/tautulli image:
tag: v2.5.4-ls12 organization: tautulli
repository: tautulli
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
tag: ""
# upgrade strategy type (e.g. Recreate or RollingUpdate) service:
strategyType: Recreate
# Probes configuration
probes:
liveness:
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
readiness:
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
nameOverride: ""
fullnameOverride: ""
timezone: UTC
puid: 1001
pgid: 1001
service:
type: ClusterIP
port: 8181 port: 8181
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
labels: {}
## Use loadBalancerIP to request a specific static IP,
## otherwise leave blank
##
loadBalancerIP:
# loadBalancerSourceRanges: []
## Set the externalTrafficPolicy in the Service to either Cluster or Local
# externalTrafficPolicy: Cluster
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
persistence:
config:
enabled: true
## tautulli configuration data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
##
## If you want to reuse an existing claim, you can pass the name of the PVC using
## the existingClaim variable
# existingClaim: your-claim
accessMode: ReadWriteOnce
size: 1Gi
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
##
subPath: ""
## Do not delete the pvc upon helm uninstall
skipuninstall: false
extraExistingClaimMounts: []
# - name: external-mount
# mountPath: /srv/external-mount
## A manually managed Persistent Volume and Claim
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
# readOnly: true
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
deploymentAnnotations: {}