mirror of
https://github.com/k8s-at-home/charts.git
synced 2025-01-24 07:59:02 +00:00
[lidarr] add new chart (#190)
* [lidarr] add new chart * [lidarr] fix port * [lidarr] clean up some movie references
This commit is contained in:
parent
6b39213862
commit
57b0a62907
23
charts/lidarr/.helmignore
Normal file
23
charts/lidarr/.helmignore
Normal file
@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
# OWNERS file for Kubernetes
|
||||
OWNERS
|
17
charts/lidarr/Chart.yaml
Normal file
17
charts/lidarr/Chart.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
appVersion: 0.7.1.1381-ls7
|
||||
description: ooks and smells like Sonarr but made for music.
|
||||
name: lidarr
|
||||
version: 1.0.0
|
||||
keywords:
|
||||
- lidarr
|
||||
- usenet
|
||||
- bittorrent
|
||||
home: https://github.com/billimek/billimek-charts/tree/master/charts/lidarr
|
||||
icon: https://lidarr.audio/img/logo.png
|
||||
sources:
|
||||
- https://hub.docker.com/r/linuxserver/lidarr/
|
||||
- https://github.com/lidarr/Lidarr/
|
||||
maintainers:
|
||||
- name: billimek
|
||||
email: jeff@billimek.com
|
4
charts/lidarr/OWNERS
Normal file
4
charts/lidarr/OWNERS
Normal file
@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- billimek
|
||||
reviewers:
|
||||
- billimek
|
108
charts/lidarr/README.md
Normal file
108
charts/lidarr/README.md
Normal file
@ -0,0 +1,108 @@
|
||||
# lidarr music download client
|
||||
|
||||
This is a helm chart for [lidarr](https://github.com/lidarr/Lidarr) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/lidarr/)
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```shell
|
||||
$ helm repo add billimek https://billimek.com/billimek-charts/
|
||||
$ helm install billimek/lidarr
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
helm install --name my-release billimek/lidarr
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```console
|
||||
helm delete my-release --purge
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|----------------------------|-------------------------------------|---------------------------------------------------------|
|
||||
| `image.repository` | Image repository | `linuxserver/lidarr` |
|
||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/lidarr/tags/).| `amd64-v0.2.0.1344-ls17`|
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `timezone` | Timezone the lidarr instance should run as, e.g. 'America/New_York' | `UTC` |
|
||||
| `puid` | process userID the lidarr instance should run as | `1001` |
|
||||
| `pgid` | process groupID the lidarr instance should run as | `1001` |
|
||||
| `probes.liveness.initialDelaySeconds` | Specify liveness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.liveness.failureThreshold` | Specify liveness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.liveness.timeoutSeconds` | Specify liveness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `probes.readiness.initialDelaySeconds` | Specify readiness `initialDelaySeconds` parameter for the deployment | `60` |
|
||||
| `probes.readiness.failureThreshold` | Specify readiness `failureThreshold` parameter for the deployment | `5` |
|
||||
| `probes.readiness.timeoutSeconds` | Specify readiness `timeoutSeconds` parameter for the deployment | `10` |
|
||||
| `Service.type` | Kubernetes service type for the lidarr GUI | `ClusterIP` |
|
||||
| `Service.port` | Kubernetes port where the lidarr GUI is exposed| `8686` |
|
||||
| `Service.annotations` | Service annotations for the lidarr GUI | `{}` |
|
||||
| `Service.labels` | Custom labels | `{}` |
|
||||
| `Service.loadBalancerIP` | Loadbalance IP for the lidarr GUI | `{}` |
|
||||
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
||||
| `ingress.labels` | Custom labels | `{}`
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
||||
| `persistence.config.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.config.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.downloads.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.downloads.size` | Size of persistent volume claim | `10Gi` |
|
||||
| `persistence.downloads.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.downloads.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.downloads.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.downloads.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.music.enabled` | Use persistent volume to store configuration data | `true` |
|
||||
| `persistence.music.size` | Size of persistent volume claim | `10Gi` |
|
||||
| `persistence.music.existingClaim`| Use an existing PVC to persist data | `nil` |
|
||||
| `persistence.music.storageClass` | Type of persistent volume claim | `-` |
|
||||
| `persistence.music.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
||||
| `persistence.music.skipuninstall` | Do not delete the pvc upon helm uninstall | `false` |
|
||||
| `persistence.extraExistingClaimMounts` | Optionally add multiple existing claims | `[]` |
|
||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
||||
| `podAnnotations` | Key-value pairs to add as pod annotations | `{}` |
|
||||
| `deploymentAnnotations` | Key-value pairs to add as deployment annotations | `{}` |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
```console
|
||||
helm install --name my-release \
|
||||
--set timezone="America/New York" \
|
||||
billimek/lidarr
|
||||
```
|
||||
|
||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
||||
|
||||
```console
|
||||
helm install --name my-release -f values.yaml stable/lidarr
|
||||
```
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
If you get `Error: rendered manifests contain a resource that already exists. Unable to continue with install: existing resource conflict: ...` it may be because you uninstalled the chart with `skipuninstall` enabled, you need to manually delete the pvc or use `existingClaim`.
|
||||
|
||||
---
|
||||
|
||||
Read through the [values.yaml](https://github.com/billimek/billimek-charts/blob/master/charts/lidarr/values.yaml) file. It has several commented out suggested values.
|
19
charts/lidarr/templates/NOTES.txt
Normal file
19
charts/lidarr/templates/NOTES.txt
Normal file
@ -0,0 +1,19 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range .Values.ingress.hosts }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "lidarr.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc -w {{ include "lidarr.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "lidarr.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "lidarr.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
32
charts/lidarr/templates/_helpers.tpl
Normal file
32
charts/lidarr/templates/_helpers.tpl
Normal file
@ -0,0 +1,32 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "lidarr.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "lidarr.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "lidarr.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
29
charts/lidarr/templates/config-pvc.yaml
Normal file
29
charts/lidarr/templates/config-pvc.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
|
||||
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "lidarr.fullname" . }}-config
|
||||
{{- if .Values.persistence.config.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.config.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.config.size | quote }}
|
||||
{{- if .Values.persistence.config.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.config.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.config.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
122
charts/lidarr/templates/deployment.yaml
Normal file
122
charts/lidarr/templates/deployment.yaml
Normal file
@ -0,0 +1,122 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "lidarr.fullname" . }}
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.deploymentAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
strategy:
|
||||
type: {{ .Values.strategyType }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8686
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: "{{ .Values.timezone }}"
|
||||
- name: PUID
|
||||
value: "{{ .Values.puid }}"
|
||||
- name: PGID
|
||||
value: "{{ .Values.pgid }}"
|
||||
volumeMounts:
|
||||
- mountPath: /config
|
||||
name: config
|
||||
- mountPath: /downloads
|
||||
name: downloads
|
||||
{{- if .Values.persistence.downloads.subPath }}
|
||||
subPath: {{ .Values.persistence.downloads.subPath }}
|
||||
{{- end }}
|
||||
- mountPath: /music
|
||||
name: music
|
||||
{{- if .Values.persistence.music.subPath }}
|
||||
subPath: {{ .Values.persistence.music.subPath }}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
mountPath: {{ .mountPath }}
|
||||
readOnly: {{ .readOnly }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
volumes:
|
||||
- name: config
|
||||
{{- if .Values.persistence.config.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "lidarr.fullname" . }}-config{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
- name: downloads
|
||||
{{- if .Values.persistence.downloads.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.downloads.existingClaim }}{{ .Values.persistence.downloads.existingClaim }}{{- else }}{{ template "lidarr.fullname" . }}-downloads{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
- name: music
|
||||
{{- if .Values.persistence.music.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.music.existingClaim }}{{ .Values.persistence.music.existingClaim }}{{- else }}{{ template "lidarr.fullname" . }}-music{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .existingClaim }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
29
charts/lidarr/templates/downloads-pvc.yaml
Normal file
29
charts/lidarr/templates/downloads-pvc.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
|
||||
{{- if and .Values.persistence.downloads.enabled (not .Values.persistence.downloads.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "lidarr.fullname" . }}-downloads
|
||||
{{- if .Values.persistence.downloads.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.downloads.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.downloads.size | quote }}
|
||||
{{- if .Values.persistence.downloads.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.downloads.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.downloads.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
38
charts/lidarr/templates/ingress.yaml
Normal file
38
charts/lidarr/templates/ingress.yaml
Normal file
@ -0,0 +1,38 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "lidarr.fullname" . -}}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ . | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ $ingressPath }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: http
|
||||
{{- end }}
|
||||
{{- end }}
|
29
charts/lidarr/templates/music-pvc.yaml
Normal file
29
charts/lidarr/templates/music-pvc.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
|
||||
{{- if and .Values.persistence.music.enabled (not .Values.persistence.music.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "lidarr.fullname" . }}-music
|
||||
{{- if .Values.persistence.music.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.music.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.music.size | quote }}
|
||||
{{- if .Values.persistence.music.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.music.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.music.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
52
charts/lidarr/templates/service.yaml
Normal file
52
charts/lidarr/templates/service.yaml
Normal file
@ -0,0 +1,52 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "lidarr.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
helm.sh/chart: {{ include "lidarr.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{ toYaml .Values.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
||||
type: ClusterIP
|
||||
{{- if .Values.service.clusterIP }}
|
||||
clusterIP: {{ .Values.service.clusterIP }}
|
||||
{{end}}
|
||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if .Values.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalTrafficPolicy }}
|
||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{.Values.service.nodePort}}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "lidarr.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
150
charts/lidarr/values.yaml
Normal file
150
charts/lidarr/values.yaml
Normal file
@ -0,0 +1,150 @@
|
||||
# Default values for lidarr.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: linuxserver/lidarr
|
||||
tag: 0.7.1.1381-ls7
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
strategyType: Recreate
|
||||
|
||||
# Probes configuration
|
||||
probes:
|
||||
liveness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
readiness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
timezone: UTC
|
||||
puid: 1001
|
||||
pgid: 1001
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8686
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
## Provide any additional annotations which may be required. This can be used to
|
||||
## set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
labels: {}
|
||||
## Use loadBalancerIP to request a specific static IP,
|
||||
## otherwise leave blank
|
||||
##
|
||||
loadBalancerIP:
|
||||
# loadBalancerSourceRanges: []
|
||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
||||
# externalTrafficPolicy: Cluster
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
persistence:
|
||||
config:
|
||||
enabled: true
|
||||
## lidarr configuration data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
downloads:
|
||||
enabled: true
|
||||
## lidarr downloads volume configuration
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
# subPath: some-subpath
|
||||
accessMode: ReadWriteOnce
|
||||
size: 10Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
music:
|
||||
enabled: true
|
||||
## Directory where music are persisted
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
# subPath: some-subpath
|
||||
accessMode: ReadWriteOnce
|
||||
size: 10Gi
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
extraExistingClaimMounts: []
|
||||
# - name: external-mount
|
||||
# mountPath: /srv/external-mount
|
||||
## A manually managed Persistent Volume and Claim
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
# existingClaim:
|
||||
# readOnly: true
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
deploymentAnnotations: {}
|
Loading…
Reference in New Issue
Block a user