mirror of
https://github.com/k8s-at-home/charts.git
synced 2025-01-24 07:59:02 +00:00
cleanup of gh-pages branch
This commit is contained in:
parent
23433a4311
commit
9ea409d8c8
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,3 +0,0 @@
|
|||||||
[submodule "kube-plex"]
|
|
||||||
path = kube-plex
|
|
||||||
url = https://github.com/billimek/kube-plex.git
|
|
@ -1,4 +1,4 @@
|
|||||||
# personal helm charts
|
# billimek helm charts
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
|
@ -1,23 +0,0 @@
|
|||||||
# Patterns to ignore when building packages.
|
|
||||||
# This supports shell glob matching, relative path matching, and
|
|
||||||
# negation (prefixed with !). Only one pattern per line.
|
|
||||||
.DS_Store
|
|
||||||
# Common VCS dirs
|
|
||||||
.git/
|
|
||||||
.gitignore
|
|
||||||
.bzr/
|
|
||||||
.bzrignore
|
|
||||||
.hg/
|
|
||||||
.hgignore
|
|
||||||
.svn/
|
|
||||||
# Common backup files
|
|
||||||
*.swp
|
|
||||||
*.bak
|
|
||||||
*.tmp
|
|
||||||
*~
|
|
||||||
# Various IDEs
|
|
||||||
.project
|
|
||||||
.idea/
|
|
||||||
*.tmproj
|
|
||||||
# OWNERS file for Kubernetes
|
|
||||||
OWNERS
|
|
@ -1,16 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
name: comcast
|
|
||||||
version: 1.0.0
|
|
||||||
appVersion: 1.0.0
|
|
||||||
description: periodic comcast data usage checks and save the results to InfluxDB
|
|
||||||
keywords:
|
|
||||||
- comcast
|
|
||||||
- influxdb
|
|
||||||
- xfinity
|
|
||||||
home: https://github.com/billimek/billimek-charts/tree/master/comcast
|
|
||||||
sources:
|
|
||||||
- https://github.com/billimek/comcastUsage-for-influxdb
|
|
||||||
- https://github.com/billimek/billimek-charts
|
|
||||||
maintainers:
|
|
||||||
- name: billimek
|
|
||||||
email: jeff@billimek.com
|
|
@ -1,4 +0,0 @@
|
|||||||
approvers:
|
|
||||||
- billimek
|
|
||||||
reviewers:
|
|
||||||
- billimek
|
|
@ -1,83 +0,0 @@
|
|||||||
# Comcast Data Cap Usage Collector For InfluxDB and Grafana
|
|
||||||
|
|
||||||
![Screenshot](https://github.com/billimek/comcastUsage-for-influxdb/raw/master/images/comcast_grafana_example.png)
|
|
||||||
|
|
||||||
This tool allows you to run periodic comcast data usage checks and save the results to Influxdb
|
|
||||||
|
|
||||||
## TL;DR;
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm repo add billimek https://billimek.github.io/helm-repo
|
|
||||||
$ helm install billimek/comcast
|
|
||||||
```
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
This code is adopted from the work done by [barrycarey](https://github.com/barrycarey) in the [similar thing for capturing speedtest data](https://github.com/barrycarey/Speedtest-for-InfluxDB-and-Grafana) as well as [jantman's](https://github.com/jantman) [xfinity-usage python example](https://github.com/jantman/xfinity-usage)
|
|
||||||
|
|
||||||
## Installing the Chart
|
|
||||||
|
|
||||||
To install the chart with the release name `my-release`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm install --name my-release billimek/comcast
|
|
||||||
```
|
|
||||||
## Uninstalling the Chart
|
|
||||||
|
|
||||||
To uninstall/delete the `my-release` deployment:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm delete my-release --purge
|
|
||||||
```
|
|
||||||
|
|
||||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
The configuration is set as a block of text through a configmap and mouted as a file in /src/config.ini Any value in this text block should match the defined Comcast configuration. There are several values here that will have to match our kubernetes configuration.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
|
||||||
|
|
||||||
| Parameter | Description | Default |
|
|
||||||
| ------------------------------- | ------------------------------- | ---------------------------------------------------------- |
|
|
||||||
| `image.repository` | Comcast image | `billimek/comcastusage-for-influxdb` |
|
|
||||||
| `image.tag` | Comcast image tag | `latest` |
|
|
||||||
| `image.pullPolicy` | Comcast image pull policy | `IfNotPresent` |
|
|
||||||
| `debug` | Display debugging output | `false` |
|
|
||||||
| `config.delay` | how many seconds to wait between checks | `3600` |
|
|
||||||
| `config.influxdb.host` | InfluxDB hostname | `influxdb-influxdb` |
|
|
||||||
| `config.influxdb.port` | InfluxDB port | `8086` |
|
|
||||||
| `config.influxdb.database` | InfluxDB database | `comcast` |
|
|
||||||
| `config.influxdb.username` | InfluxDB username | `` |
|
|
||||||
| `config.influxdb.password` | InfluxDB password | `` |
|
|
||||||
| `config.influxdb.ssl` | InfluxDB connection using SSL | `false` |
|
|
||||||
| `config.comcast.username` | Comcast website login usernma | `someuser` |
|
|
||||||
| `config.comcast.password` | Comcast website login password | `somepassword` |
|
|
||||||
|
|
||||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm install --name my-release \
|
|
||||||
--set config.comcast.username=tonystark,config.comcast.password=mypassword \
|
|
||||||
billimek/comcast
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm install --name my-release -f values.yaml billimek/comcast
|
|
||||||
```
|
|
||||||
|
|
||||||
Read through the [values.yaml](values.yaml) file. It has several commented out suggested values.
|
|
||||||
|
|
||||||
## InfluxDB metrics
|
|
||||||
```
|
|
||||||
'measurement': 'comcast_data_usage',
|
|
||||||
'fields': {
|
|
||||||
'used',
|
|
||||||
'total',
|
|
||||||
'unit'
|
|
||||||
}
|
|
||||||
```
|
|
@ -1,7 +0,0 @@
|
|||||||
You can connect to the container running comcast. To open a shell session in the pod run the following:
|
|
||||||
|
|
||||||
- kubectl exec -i -t --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "comcast.fullname" . }} -o jsonpath='{.items[0].metadata.name}') /bin/sh
|
|
||||||
|
|
||||||
To trail the logs for the comcast pod run the following:
|
|
||||||
|
|
||||||
- kubectl logs -f --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "comcast.fullname" . }} -o jsonpath='{ .items[0].metadata.name }')
|
|
@ -1,27 +0,0 @@
|
|||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Expand the name of the chart.
|
|
||||||
*/}}
|
|
||||||
{{- define "comcast.name" -}}
|
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create a default fully qualified app name.
|
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
|
||||||
If release name contains chart name it will be used as a full name.
|
|
||||||
*/}}
|
|
||||||
{{- define "comcast.fullname" -}}
|
|
||||||
{{- if .Values.fullnameOverride -}}
|
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
|
||||||
{{- if contains $name .Release.Name -}}
|
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
@ -1,32 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: {{ template "comcast.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app: {{ template "comcast.name" . }}
|
|
||||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
|
||||||
release: "{{ .Release.Name }}"
|
|
||||||
heritage: "{{ .Release.Service }}"
|
|
||||||
data:
|
|
||||||
config.ini: |
|
|
||||||
[GENERAL]
|
|
||||||
Delay = {{ .Values.config.delay }}
|
|
||||||
{{- if .Values.debug }}
|
|
||||||
Output = True
|
|
||||||
{{- else }}
|
|
||||||
Output = False
|
|
||||||
{{- end }}
|
|
||||||
[INFLUXDB]
|
|
||||||
Address = {{ .Values.config.influxdb.host }}
|
|
||||||
Port = {{ .Values.config.influxdb.port }}
|
|
||||||
Database = {{ .Values.config.influxdb.database }}
|
|
||||||
Username = {{ .Values.config.influxdb.username }}
|
|
||||||
Password = {{ .Values.config.influxdb.password }}
|
|
||||||
{{- if .Values.config.influxdb.ssl }}
|
|
||||||
Verify_SSL = True
|
|
||||||
{{- else }}
|
|
||||||
Verify_SSL = False
|
|
||||||
{{- end }}
|
|
||||||
[COMCAST]
|
|
||||||
Username = {{ .Values.config.comcast.username }}
|
|
||||||
Password = {{ .Values.config.comcast.password }}
|
|
@ -1,42 +0,0 @@
|
|||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ template "comcast.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app: {{ template "comcast.name" . }}
|
|
||||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
heritage: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: {{ template "comcast.name" . }}
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
replicas: {{ default 1 .Values.replicas }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: {{ template "comcast.name" . }}
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: {{ .Chart.Name }}
|
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
|
||||||
resources:
|
|
||||||
{{ toYaml .Values.resources | indent 12 }}
|
|
||||||
volumeMounts:
|
|
||||||
- name: {{ template "comcast.name" . }}
|
|
||||||
mountPath: /src/config.ini
|
|
||||||
subPath: config.ini
|
|
||||||
{{- if .Values.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ toYaml .Values.nodeSelector | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
volumes:
|
|
||||||
- name: {{ template "comcast.name" . }}
|
|
||||||
configMap:
|
|
||||||
name: {{ template "comcast.fullname" . }}
|
|
||||||
items:
|
|
||||||
- key: config.ini
|
|
||||||
path: config.ini
|
|
@ -1,36 +0,0 @@
|
|||||||
# Default values for comcast.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
# Declare variables to be passed into your templates.
|
|
||||||
replicaCount: 1
|
|
||||||
image:
|
|
||||||
repository: billimek/comcastusage-for-influxdb
|
|
||||||
tag: latest
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
resources: {}
|
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
|
||||||
# choice for the user. This also increases chances charts run on environments with little
|
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
|
||||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
|
||||||
# limits:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
nodeSelector: {}
|
|
||||||
debug: false
|
|
||||||
config:
|
|
||||||
# how many seconds to wait between checks
|
|
||||||
delay: 3600
|
|
||||||
influxdb:
|
|
||||||
# host/port/database are mandatory - change as needed
|
|
||||||
host: influxdb-influxdb
|
|
||||||
port: 8086
|
|
||||||
database: comcast
|
|
||||||
# username:
|
|
||||||
# password:
|
|
||||||
ssl: false
|
|
||||||
comcast:
|
|
||||||
# username/password are mandatory and must be populated
|
|
||||||
username: someuser
|
|
||||||
password: somepassword
|
|
@ -1,21 +0,0 @@
|
|||||||
# Patterns to ignore when building packages.
|
|
||||||
# This supports shell glob matching, relative path matching, and
|
|
||||||
# negation (prefixed with !). Only one pattern per line.
|
|
||||||
.DS_Store
|
|
||||||
# Common VCS dirs
|
|
||||||
.git/
|
|
||||||
.gitignore
|
|
||||||
.bzr/
|
|
||||||
.bzrignore
|
|
||||||
.hg/
|
|
||||||
.hgignore
|
|
||||||
.svn/
|
|
||||||
# Common backup files
|
|
||||||
*.swp
|
|
||||||
*.bak
|
|
||||||
*.tmp
|
|
||||||
*~
|
|
||||||
# Various IDEs
|
|
||||||
.project
|
|
||||||
.idea/
|
|
||||||
*.tmproj
|
|
@ -1,15 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
appVersion: "1.0"
|
|
||||||
description: Dynamic DNS using DigitalOcean's DNS Services
|
|
||||||
name: digitalocean-dyndns
|
|
||||||
version: 0.1.0
|
|
||||||
keywords:
|
|
||||||
- digitalocean
|
|
||||||
- dynamicdns
|
|
||||||
home: https://github.com/billimek/billimek-charts/tree/master/digitalocean-dyndns
|
|
||||||
sources:
|
|
||||||
- https://github.com/tunix/digitalocean-dyndns
|
|
||||||
- https://github.com/billimek/billimek-charts
|
|
||||||
maintainers:
|
|
||||||
- name: billimek
|
|
||||||
email: jeff@billimek.com
|
|
@ -1,62 +0,0 @@
|
|||||||
# Dynamic DNS using DigitalOcean's DNS Services
|
|
||||||
|
|
||||||
A script that pushes the public IP address of the running machine to DigitalOcean's DNS API's. It requires an existing A record to update. The resulting container image is roughly around 7 MB (thanks to Alpine Linux).
|
|
||||||
|
|
||||||
## TL;DR;
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm repo add billimek https://billimek.github.io/helm-repo
|
|
||||||
$ helm install billimek/digitalocean-dyndns
|
|
||||||
```
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
This code is adopted from [this original repo](https://github.com/tunix/digitalocean-dyndns)
|
|
||||||
|
|
||||||
## Installing the Chart
|
|
||||||
|
|
||||||
To install the chart with the release name `my-release`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm install --name my-release billimek/digitalocean-dyndns
|
|
||||||
```
|
|
||||||
## Uninstalling the Chart
|
|
||||||
|
|
||||||
To uninstall/delete the `my-release` deployment:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm delete my-release --purge
|
|
||||||
```
|
|
||||||
|
|
||||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
|
||||||
|
|
||||||
| Parameter | Description | Default |
|
|
||||||
| ------------------------------- | ------------------------------- | ---------------------------------------------------------- |
|
|
||||||
| `image.repository` | digitalocean-dyndns image | `tunix/digitalocean-dyndns` |
|
|
||||||
| `image.tag` | digitalocean-dyndns image tag | `latest` |
|
|
||||||
| `image.pullPolicy` | digitalocean-dyndns image pull policy | `Always` |
|
|
||||||
| `digitialocean.token` | The token you generate in DigitalOcean's API settings. | `` |
|
|
||||||
| `digitialocean.domain` | The domain your subdomain is registered at. (i.e. foo.com for home.foo.com) | `` |
|
|
||||||
| `digitialocean.name` | Subdomain to use. (name in A record) (i.e. home for home.foo.com or @ for no subdomain) | `@` |
|
|
||||||
| `digitialocean.sleep_interval` | Polling time in seconds | `300` |
|
|
||||||
|
|
||||||
|
|
||||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm install --name my-release \
|
|
||||||
--set config.digitalocean.token=thisismyapikey \
|
|
||||||
billimek/digitalocean-dyndns
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm install --name my-release -f values.yaml billimek/digitalocean-dyndns
|
|
||||||
```
|
|
||||||
|
|
||||||
Read through the [values.yaml](values.yaml) file. It has several commented out suggested values.
|
|
@ -1,7 +0,0 @@
|
|||||||
You can connect to the container running digitalocean-dyndns. To open a shell session in the pod run the following:
|
|
||||||
|
|
||||||
- kubectl exec -i -t --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "digitalocean-dyndns.fullname" . }} -o jsonpath='{.items[0].metadata.name}') /bin/sh
|
|
||||||
|
|
||||||
To trail the logs for the digitalocean-dyndns pod run the following:
|
|
||||||
|
|
||||||
- kubectl logs -f --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "digitalocean-dyndns.fullname" . }} -o jsonpath='{ .items[0].metadata.name }')
|
|
@ -1,32 +0,0 @@
|
|||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
{{/*
|
|
||||||
Expand the name of the chart.
|
|
||||||
*/}}
|
|
||||||
{{- define "digitalocean-dyndns.name" -}}
|
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create a default fully qualified app name.
|
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
|
||||||
If release name contains chart name it will be used as a full name.
|
|
||||||
*/}}
|
|
||||||
{{- define "digitalocean-dyndns.fullname" -}}
|
|
||||||
{{- if .Values.fullnameOverride -}}
|
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
|
||||||
{{- if contains $name .Release.Name -}}
|
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create chart name and version as used by the chart label.
|
|
||||||
*/}}
|
|
||||||
{{- define "digitalocean-dyndns.chart" -}}
|
|
||||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
@ -1,51 +0,0 @@
|
|||||||
apiVersion: apps/v1beta2
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ template "digitalocean-dyndns.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app: {{ template "digitalocean-dyndns.name" . }}
|
|
||||||
chart: {{ template "digitalocean-dyndns.chart" . }}
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
heritage: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
replicas: {{ .Values.replicaCount }}
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: {{ template "digitalocean-dyndns.name" . }}
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: {{ template "digitalocean-dyndns.name" . }}
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: {{ .Chart.Name }}
|
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
|
||||||
env:
|
|
||||||
- name: DIGITALOCEAN_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: {{ template "digitalocean-dyndns.fullname" . }}
|
|
||||||
key: digitalocean-dyndns-apikey
|
|
||||||
- name: DOMAIN
|
|
||||||
value: "{{ .Values.digitialocean.domain }}"
|
|
||||||
- name: NAME
|
|
||||||
value: "{{ .Values.digitialocean.name }}"
|
|
||||||
- name: SLEEP_INTERVAL
|
|
||||||
value: "{{ .Values.digitialocean.sleep_interval }}"
|
|
||||||
resources:
|
|
||||||
{{ toYaml .Values.resources | indent 12 }}
|
|
||||||
{{- with .Values.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.affinity }}
|
|
||||||
affinity:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.tolerations }}
|
|
||||||
tolerations:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
@ -1,12 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: {{ template "digitalocean-dyndns.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app: {{ template "digitalocean-dyndns.name" . }}
|
|
||||||
chart: {{ template "digitalocean-dyndns.chart" . }}
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
heritage: {{ .Release.Service }}
|
|
||||||
type: Opaque
|
|
||||||
data:
|
|
||||||
digitalocean-dyndns-apikey: {{ .Values.digitialocean.token | b64enc | quote }}
|
|
@ -1,34 +0,0 @@
|
|||||||
# Default values for digitalocean-dyndns.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
# Declare variables to be passed into your templates.
|
|
||||||
|
|
||||||
replicaCount: 1
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: tunix/digitalocean-dyndns
|
|
||||||
tag: latest
|
|
||||||
pullPolicy: Always
|
|
||||||
|
|
||||||
digitialocean:
|
|
||||||
token: sometoken # REQUIRED: The token you generate in DigitalOcean's API settings.
|
|
||||||
domain: somedomain # REQUIRED: The domain your subdomain is registered at. (i.e. foo.com for home.foo.com)
|
|
||||||
name: "@" # REQUIRED: Subdomain to use. (name in A record) (i.e. home for home.foo.com or @ for no subdomain)
|
|
||||||
sleep_interval: 300 # Polling time in seconds
|
|
||||||
|
|
||||||
resources: {}
|
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
|
||||||
# choice for the user. This also increases chances charts run on environments with little
|
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
|
||||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
|
||||||
# limits:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
|
|
||||||
nodeSelector: {}
|
|
||||||
|
|
||||||
tolerations: []
|
|
||||||
|
|
||||||
affinity: {}
|
|
@ -1 +0,0 @@
|
|||||||
The Home Assistant chart is now an official helm chart: https://github.com/helm/charts/tree/master/stable/home-assistant
|
|
10
index.html
Normal file
10
index.html
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>billimek chart repo</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>Helm Charts</h1>
|
||||||
|
<p>In order to add this repo, please run:</p>
|
||||||
|
<pre> helm repo add billimek https://billimek.com/billimek-charts/</pre>
|
||||||
|
</body>
|
||||||
|
</html>
|
@ -1 +0,0 @@
|
|||||||
Subproject commit df3daeafd728c4f0d5c567b2d9bb0565c5cf5e52
|
|
@ -1,23 +0,0 @@
|
|||||||
# Patterns to ignore when building packages.
|
|
||||||
# This supports shell glob matching, relative path matching, and
|
|
||||||
# negation (prefixed with !). Only one pattern per line.
|
|
||||||
.DS_Store
|
|
||||||
# Common VCS dirs
|
|
||||||
.git/
|
|
||||||
.gitignore
|
|
||||||
.bzr/
|
|
||||||
.bzrignore
|
|
||||||
.hg/
|
|
||||||
.hgignore
|
|
||||||
.svn/
|
|
||||||
# Common backup files
|
|
||||||
*.swp
|
|
||||||
*.bak
|
|
||||||
*.tmp
|
|
||||||
*~
|
|
||||||
# Various IDEs
|
|
||||||
.project
|
|
||||||
.idea/
|
|
||||||
*.tmproj
|
|
||||||
# OWNERS file for Kubernetes
|
|
||||||
OWNERS
|
|
@ -1,15 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
name: modem-stats
|
|
||||||
version: 1.0.0
|
|
||||||
appVersion: 1.0.0
|
|
||||||
description: periodic cable modem data collection and save the results to InfluxDB
|
|
||||||
keywords:
|
|
||||||
- sb6183
|
|
||||||
- influxdb
|
|
||||||
home: https://github.com/billimek/billimek-charts/tree/master/modem-stats
|
|
||||||
sources:
|
|
||||||
- https://github.com/billimek/SB6183-stats-for-influxdb
|
|
||||||
- https://github.com/billimek/billimek-charts
|
|
||||||
maintainers:
|
|
||||||
- name: billimek
|
|
||||||
email: jeff@billimek.com
|
|
@ -1,4 +0,0 @@
|
|||||||
approvers:
|
|
||||||
- billimek
|
|
||||||
reviewers:
|
|
||||||
- billimek
|
|
@ -1,68 +0,0 @@
|
|||||||
# cable modem (sb6183) signal and stats collection agent for influxdb
|
|
||||||
|
|
||||||
![Screenshot](https://camo.githubusercontent.com/939e044c0491abf790d91bd1d7f909b187e4098c/68747470733a2f2f692e696d6775722e636f6d2f70705a6a6e6b502e706e67)
|
|
||||||
|
|
||||||
This tool allows you to run periodic scanning of the sb6183 cable modem and save the results to Influxdb
|
|
||||||
|
|
||||||
## TL;DR;
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm repo add billimek https://billimek.github.io/helm-repo
|
|
||||||
$ helm install billimek/modem-stats
|
|
||||||
```
|
|
||||||
|
|
||||||
## Installing the Chart
|
|
||||||
|
|
||||||
To install the chart with the release name `my-release`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm install --name my-release billimek/modem-stats
|
|
||||||
```
|
|
||||||
## Uninstalling the Chart
|
|
||||||
|
|
||||||
To uninstall/delete the `my-release` deployment:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm delete my-release --purge
|
|
||||||
```
|
|
||||||
|
|
||||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
The configuration is set as a block of text through a configmap and mouted as a file in /src/config.ini Any value in this text block should match the defined sb6183 configuration. There are several values here that will have to match our kubernetes configuration.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
|
||||||
|
|
||||||
| Parameter | Description | Default |
|
|
||||||
| ------------------------------- | ------------------------------- | ---------------------------------------------------------- |
|
|
||||||
| `image.repository` | modem-stats image | `billimek/sb6183-for-influxdb` |
|
|
||||||
| `image.tag` | modem-stats image tag | `latest` |
|
|
||||||
| `image.pullPolicy` | modem-stats image pull policy | `IfNotPresent` |
|
|
||||||
| `debug` | Display debugging output | `false` |
|
|
||||||
| `config.delay` | how many seconds to wait between checks | `3600` |
|
|
||||||
| `config.influxdb.host` | InfluxDB hostname | `influxdb-influxdb` |
|
|
||||||
| `config.influxdb.port` | InfluxDB port | `8086` |
|
|
||||||
| `config.influxdb.database` | InfluxDB database | `sb6183` |
|
|
||||||
| `config.influxdb.username` | InfluxDB username | `` |
|
|
||||||
| `config.influxdb.password` | InfluxDB password | `` |
|
|
||||||
| `config.influxdb.ssl` | InfluxDB connection using SSL | `false` |
|
|
||||||
| `config.modem.url` | sb6183 stats URL page | `http://192.168.100.1/RgConnect.asp` |
|
|
||||||
|
|
||||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm install --name my-release \
|
|
||||||
--set onfig.influxdb.host=some-influx-host \
|
|
||||||
billimek/modem-stats
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm install --name my-release -f values.yaml billimek/modem-stats
|
|
||||||
```
|
|
||||||
|
|
||||||
Read through the [values.yaml](values.yaml) file. It has several commented out suggested values.
|
|
@ -1,7 +0,0 @@
|
|||||||
You can connect to the container running modem-stats. To open a shell session in the pod run the following:
|
|
||||||
|
|
||||||
- kubectl exec -i -t --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "modem-stats.fullname" . }} -o jsonpath='{.items[0].metadata.name}') /bin/sh
|
|
||||||
|
|
||||||
To trail the logs for the modem-stats pod run the following:
|
|
||||||
|
|
||||||
- kubectl logs -f --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "modem-stats.fullname" . }} -o jsonpath='{ .items[0].metadata.name }')
|
|
@ -1,27 +0,0 @@
|
|||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Expand the name of the chart.
|
|
||||||
*/}}
|
|
||||||
{{- define "modem-stats.name" -}}
|
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create a default fully qualified app name.
|
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
|
||||||
If release name contains chart name it will be used as a full name.
|
|
||||||
*/}}
|
|
||||||
{{- define "modem-stats.fullname" -}}
|
|
||||||
{{- if .Values.fullnameOverride -}}
|
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
|
||||||
{{- if contains $name .Release.Name -}}
|
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
@ -1,31 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: {{ template "modem-stats.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app: {{ template "modem-stats.name" . }}
|
|
||||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
|
||||||
release: "{{ .Release.Name }}"
|
|
||||||
heritage: "{{ .Release.Service }}"
|
|
||||||
data:
|
|
||||||
config.ini: |
|
|
||||||
[GENERAL]
|
|
||||||
Delay = {{ .Values.config.delay }}
|
|
||||||
{{- if .Values.debug }}
|
|
||||||
Output = True
|
|
||||||
{{- else }}
|
|
||||||
Output = False
|
|
||||||
{{- end }}
|
|
||||||
[INFLUXDB]
|
|
||||||
Address = {{ .Values.config.influxdb.host }}
|
|
||||||
Port = {{ .Values.config.influxdb.port }}
|
|
||||||
Database = {{ .Values.config.influxdb.database }}
|
|
||||||
Username = {{ .Values.config.influxdb.username }}
|
|
||||||
Password = {{ .Values.config.influxdb.password }}
|
|
||||||
{{- if .Values.config.influxdb.ssl }}
|
|
||||||
Verify_SSL = True
|
|
||||||
{{- else }}
|
|
||||||
Verify_SSL = False
|
|
||||||
{{- end }}
|
|
||||||
[MODEM]
|
|
||||||
URL = {{ .Values.config.modem.url }}
|
|
@ -1,42 +0,0 @@
|
|||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ template "modem-stats.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app: {{ template "modem-stats.name" . }}
|
|
||||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
heritage: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: {{ template "modem-stats.name" . }}
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
replicas: {{ default 1 .Values.replicas }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: {{ template "modem-stats.name" . }}
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: {{ .Chart.Name }}
|
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
|
||||||
resources:
|
|
||||||
{{ toYaml .Values.resources | indent 12 }}
|
|
||||||
volumeMounts:
|
|
||||||
- name: {{ template "modem-stats.name" . }}
|
|
||||||
mountPath: /src/config.ini
|
|
||||||
subPath: config.ini
|
|
||||||
{{- if .Values.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ toYaml .Values.nodeSelector | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
volumes:
|
|
||||||
- name: {{ template "modem-stats.name" . }}
|
|
||||||
configMap:
|
|
||||||
name: {{ template "modem-stats.fullname" . }}
|
|
||||||
items:
|
|
||||||
- key: config.ini
|
|
||||||
path: config.ini
|
|
@ -1,34 +0,0 @@
|
|||||||
# Default values for modem-stats.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
# Declare variables to be passed into your templates.
|
|
||||||
replicaCount: 1
|
|
||||||
image:
|
|
||||||
repository: billimek/sb6183-for-influxdb
|
|
||||||
tag: latest
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
resources: {}
|
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
|
||||||
# choice for the user. This also increases chances charts run on environments with little
|
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
|
||||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
|
||||||
# limits:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
nodeSelector: {}
|
|
||||||
debug: false
|
|
||||||
config:
|
|
||||||
# how many seconds to wait between checks
|
|
||||||
delay: 3600
|
|
||||||
influxdb:
|
|
||||||
# host/port/database are mandatory - change as needed
|
|
||||||
host: influxdb-influxdb
|
|
||||||
port: 8086
|
|
||||||
database: cable_modem_stats
|
|
||||||
# username:
|
|
||||||
# password:
|
|
||||||
ssl: false
|
|
||||||
modem:
|
|
||||||
url: http://192.168.100.1/RgConnect.asp
|
|
@ -1 +0,0 @@
|
|||||||
The node-red chart is now an official helm chart: https://github.com/helm/charts/tree/master/stable/node-red
|
|
@ -1,23 +0,0 @@
|
|||||||
# Patterns to ignore when building packages.
|
|
||||||
# This supports shell glob matching, relative path matching, and
|
|
||||||
# negation (prefixed with !). Only one pattern per line.
|
|
||||||
.DS_Store
|
|
||||||
# Common VCS dirs
|
|
||||||
.git/
|
|
||||||
.gitignore
|
|
||||||
.bzr/
|
|
||||||
.bzrignore
|
|
||||||
.hg/
|
|
||||||
.hgignore
|
|
||||||
.svn/
|
|
||||||
# Common backup files
|
|
||||||
*.swp
|
|
||||||
*.bak
|
|
||||||
*.tmp
|
|
||||||
*~
|
|
||||||
# Various IDEs
|
|
||||||
.project
|
|
||||||
.idea/
|
|
||||||
*.tmproj
|
|
||||||
# OWNERS file for Kubernetes
|
|
||||||
OWNERS
|
|
@ -1,16 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
appVersion: amd64-v21.0-r2302-ls8
|
|
||||||
description: NZBGet is a Usenet downloader client
|
|
||||||
name: nzbget
|
|
||||||
version: 2.0.2
|
|
||||||
keywords:
|
|
||||||
- nzbget
|
|
||||||
- usenet
|
|
||||||
home: https://github.com/billimek/billimek-charts/tree/master/nzbget
|
|
||||||
icon: https://avatars1.githubusercontent.com/u/3368377?s=400&v=4
|
|
||||||
sources:
|
|
||||||
- https://hub.docker.com/r/linuxserver/nzbget/
|
|
||||||
- https://nzbget.net/
|
|
||||||
maintainers:
|
|
||||||
- name: billimek
|
|
||||||
email: jeff@billimek.com
|
|
@ -1,4 +0,0 @@
|
|||||||
approvers:
|
|
||||||
- billimek
|
|
||||||
reviewers:
|
|
||||||
- billimek
|
|
@ -1,89 +0,0 @@
|
|||||||
# NZBGet Usenet client
|
|
||||||
|
|
||||||
This is a helm chart for [NZBGet](https://nzbget.net/) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/nzbget/)
|
|
||||||
|
|
||||||
## TL;DR;
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ helm repo add billimek https://billimek.github.io/helm-repo
|
|
||||||
$ helm install billimek/nzbget
|
|
||||||
```
|
|
||||||
|
|
||||||
## Installing the Chart
|
|
||||||
|
|
||||||
To install the chart with the release name `my-release`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm install --name my-release billimek/nzbget
|
|
||||||
```
|
|
||||||
|
|
||||||
The default login details (change ASAP) are:
|
|
||||||
|
|
||||||
* login:nzbget
|
|
||||||
* password:tegbzn6789
|
|
||||||
|
|
||||||
## Uninstalling the Chart
|
|
||||||
|
|
||||||
To uninstall/delete the `my-release` deployment:
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm delete my-release --purge
|
|
||||||
```
|
|
||||||
|
|
||||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
|
||||||
|
|
||||||
| Parameter | Description | Default |
|
|
||||||
|----------------------------|-------------------------------------|---------------------------------------------------------|
|
|
||||||
| `image.repository` | Image repository | `linuxserver/nzbget` |
|
|
||||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/nzbget/tags/).| `amd64-v21.0-r2302-ls8 `|
|
|
||||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
|
||||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
|
||||||
| `timezone` | Timezone the nzbget instance should run as, e.g. 'America/New_York' | `UTC` |
|
|
||||||
| `puid` | process userID the nzbget instance should run as | `1001` |
|
|
||||||
| `pgid` | process groupID the nzbget instance should run as | `1001` |
|
|
||||||
| `Service.type` | Kubernetes service type for the nzbget GUI | `ClusterIP` |
|
|
||||||
| `Service.port` | Kubernetes port where the nzbget GUI is exposed| `6789` |
|
|
||||||
| `Service.annotations` | Service annotations for the nzbget GUI | `{}` |
|
|
||||||
| `Service.labels` | Custom labels | `{}` |
|
|
||||||
| `Service.loadBalancerIP` | Loadbalance IP for the nzbget GUI | `{}` |
|
|
||||||
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
|
|
||||||
| `ingress.enabled` | Enables Ingress | `false` |
|
|
||||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
|
||||||
| `ingress.labels` | Custom labels | `{}`
|
|
||||||
| `ingress.path` | Ingress path | `/` |
|
|
||||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
|
||||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
|
||||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
|
||||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
|
||||||
| `persistence.config.existingClaim`| Use an existing PVC to persist data | `nil` |
|
|
||||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
|
||||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
|
||||||
| `persistence.downloads.enabled` | Use persistent volume to store configuration data | `true` |
|
|
||||||
| `persistence.downloads.size` | Size of persistent volume claim | `10Gi` |
|
|
||||||
| `persistence.downloads.existingClaim`| Use an existing PVC to persist data | `nil` |
|
|
||||||
| `persistence.downloads.storageClass` | Type of persistent volume claim | `-` |
|
|
||||||
| `persistence.downloads.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
|
||||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
|
||||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
|
||||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
|
||||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
|
||||||
|
|
||||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm install --name my-release \
|
|
||||||
--set timezone="America/New York" \
|
|
||||||
billimek/nzbget
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm install --name my-release -f values.yaml stable/nzbget
|
|
||||||
```
|
|
||||||
|
|
||||||
Read through the [values.yaml](values.yaml) file. It has several commented out suggested values.
|
|
@ -1,21 +0,0 @@
|
|||||||
1. Get the application URL by running these commands:
|
|
||||||
{{- if .Values.ingress.enabled }}
|
|
||||||
{{- range .Values.ingress.hosts }}
|
|
||||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
|
||||||
{{- end }}
|
|
||||||
{{- else if contains "NodePort" .Values.service.type }}
|
|
||||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "nzbget.fullname" . }})
|
|
||||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
|
||||||
echo http://$NODE_IP:$NODE_PORT
|
|
||||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
|
||||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
|
||||||
You can watch the status of by running 'kubectl get svc -w {{ include "nzbget.fullname" . }}'
|
|
||||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "nzbget.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
|
||||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
|
||||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
|
||||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "nzbget.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
|
||||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
|
||||||
kubectl port-forward $POD_NAME 8080:80
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
The default login to the GUI is login:nzbget, password:tegbzn6789
|
|
@ -1,32 +0,0 @@
|
|||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
{{/*
|
|
||||||
Expand the name of the chart.
|
|
||||||
*/}}
|
|
||||||
{{- define "nzbget.name" -}}
|
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create a default fully qualified app name.
|
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
|
||||||
If release name contains chart name it will be used as a full name.
|
|
||||||
*/}}
|
|
||||||
{{- define "nzbget.fullname" -}}
|
|
||||||
{{- if .Values.fullnameOverride -}}
|
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
|
||||||
{{- if contains $name .Release.Name -}}
|
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create chart name and version as used by the chart label.
|
|
||||||
*/}}
|
|
||||||
{{- define "nzbget.chart" -}}
|
|
||||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
@ -1,25 +0,0 @@
|
|||||||
|
|
||||||
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: {{ template "nzbget.fullname" . }}-config
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "nzbget.name" . }}
|
|
||||||
helm.sh/chart: {{ include "nzbget.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- {{ .Values.persistence.config.accessMode | quote }}
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: {{ .Values.persistence.config.size | quote }}
|
|
||||||
{{- if .Values.persistence.config.storageClass }}
|
|
||||||
{{- if (eq "-" .Values.persistence.config.storageClass) }}
|
|
||||||
storageClassName: ""
|
|
||||||
{{- else }}
|
|
||||||
storageClassName: "{{ .Values.persistence.config.storageClass }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end -}}
|
|
@ -1,81 +0,0 @@
|
|||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ include "nzbget.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "nzbget.name" . }}
|
|
||||||
helm.sh/chart: {{ include "nzbget.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
strategy:
|
|
||||||
type: {{ .Values.strategyType }}
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: {{ include "nzbget.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "nzbget.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: {{ .Chart.Name }}
|
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
containerPort: 6789
|
|
||||||
protocol: TCP
|
|
||||||
livenessProbe:
|
|
||||||
tcpSocket:
|
|
||||||
port: http
|
|
||||||
readinessProbe:
|
|
||||||
tcpSocket:
|
|
||||||
port: http
|
|
||||||
env:
|
|
||||||
- name: TZ
|
|
||||||
value: "{{ .Values.timezone }}"
|
|
||||||
- name: PUID
|
|
||||||
value: "{{ .Values.puid }}"
|
|
||||||
- name: PGID
|
|
||||||
value: "{{ .Values.pgid }}"
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /config
|
|
||||||
name: config
|
|
||||||
- mountPath: /downloads
|
|
||||||
name: downloads
|
|
||||||
{{- if .Values.persistence.downloads.subPath }}
|
|
||||||
subPath: {{ .Values.persistence.downloads.subPath }}
|
|
||||||
{{ end }}
|
|
||||||
resources:
|
|
||||||
{{ toYaml .Values.resources | indent 12 }}
|
|
||||||
volumes:
|
|
||||||
- name: config
|
|
||||||
{{- if .Values.persistence.config.enabled }}
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "nzbget.fullname" . }}-config{{- end }}
|
|
||||||
{{- else }}
|
|
||||||
emptyDir: {}
|
|
||||||
{{ end }}
|
|
||||||
- name: downloads
|
|
||||||
{{- if .Values.persistence.downloads.enabled }}
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: {{ if .Values.persistence.downloads.existingClaim }}{{ .Values.persistence.downloads.existingClaim }}{{- else }}{{ template "nzbget.fullname" . }}-downloads{{- end }}
|
|
||||||
{{- else }}
|
|
||||||
emptyDir: {}
|
|
||||||
{{ end }}
|
|
||||||
{{- with .Values.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.affinity }}
|
|
||||||
affinity:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.tolerations }}
|
|
||||||
tolerations:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
@ -1,25 +0,0 @@
|
|||||||
|
|
||||||
{{- if and .Values.persistence.downloads.enabled (not .Values.persistence.downloads.existingClaim) }}
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: {{ template "nzbget.fullname" . }}-downloads
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "nzbget.name" . }}
|
|
||||||
helm.sh/chart: {{ include "nzbget.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- {{ .Values.persistence.downloads.accessMode | quote }}
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: {{ .Values.persistence.downloads.size | quote }}
|
|
||||||
{{- if .Values.persistence.downloads.storageClass }}
|
|
||||||
{{- if (eq "-" .Values.persistence.downloads.storageClass) }}
|
|
||||||
storageClassName: ""
|
|
||||||
{{- else }}
|
|
||||||
storageClassName: "{{ .Values.persistence.downloads.storageClass }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end -}}
|
|
@ -1,38 +0,0 @@
|
|||||||
{{- if .Values.ingress.enabled -}}
|
|
||||||
{{- $fullName := include "nzbget.fullname" . -}}
|
|
||||||
{{- $ingressPath := .Values.ingress.path -}}
|
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: {{ $fullName }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "nzbget.name" . }}
|
|
||||||
helm.sh/chart: {{ include "nzbget.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
{{- with .Values.ingress.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{ toYaml . | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
spec:
|
|
||||||
{{- if .Values.ingress.tls }}
|
|
||||||
tls:
|
|
||||||
{{- range .Values.ingress.tls }}
|
|
||||||
- hosts:
|
|
||||||
{{- range .hosts }}
|
|
||||||
- {{ . | quote }}
|
|
||||||
{{- end }}
|
|
||||||
secretName: {{ .secretName }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
rules:
|
|
||||||
{{- range .Values.ingress.hosts }}
|
|
||||||
- host: {{ . | quote }}
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- path: {{ $ingressPath }}
|
|
||||||
backend:
|
|
||||||
serviceName: {{ $fullName }}
|
|
||||||
servicePort: http
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
@ -1,53 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: {{ template "nzbget.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "nzbget.name" . }}
|
|
||||||
helm.sh/chart: {{ include "nzbget.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
{{- if .Values.service.labels }}
|
|
||||||
{{ toYaml .Values.service.labels | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.service.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{ toYaml . | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
spec:
|
|
||||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
|
||||||
type: ClusterIP
|
|
||||||
{{- if .Values.service.clusterIP }}
|
|
||||||
clusterIP: {{ .Values.service.clusterIP }}
|
|
||||||
{{end}}
|
|
||||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
|
||||||
type: {{ .Values.service.type }}
|
|
||||||
{{- if .Values.service.loadBalancerIP }}
|
|
||||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
|
||||||
loadBalancerSourceRanges:
|
|
||||||
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
|
|
||||||
{{- end -}}
|
|
||||||
{{- else }}
|
|
||||||
type: {{ .Values.service.type }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.service.externalIPs }}
|
|
||||||
externalIPs:
|
|
||||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.service.externalTrafficPolicy }}
|
|
||||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
|
||||||
{{- end }}
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
port: {{ .Values.service.port }}
|
|
||||||
protocol: TCP
|
|
||||||
targetPort: http
|
|
||||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
|
||||||
nodePort: {{.Values.service.nodePort}}
|
|
||||||
{{ end }}
|
|
||||||
selector:
|
|
||||||
app.kubernetes.io/name: {{ include "nzbget.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
|
|
@ -1,106 +0,0 @@
|
|||||||
# Default values for nzbget.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
# Declare variables to be passed into your templates.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: linuxserver/nzbget
|
|
||||||
tag: amd64-v21.0-r2302-ls8
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
|
|
||||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
|
||||||
strategyType: Recreate
|
|
||||||
|
|
||||||
nameOverride: ""
|
|
||||||
fullnameOverride: ""
|
|
||||||
|
|
||||||
timezone: UTC
|
|
||||||
puid: 1001
|
|
||||||
pgid: 1001
|
|
||||||
|
|
||||||
service:
|
|
||||||
type: ClusterIP
|
|
||||||
port: 6789
|
|
||||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
|
||||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
|
||||||
##
|
|
||||||
# nodePort:
|
|
||||||
## Provide any additional annotations which may be required. This can be used to
|
|
||||||
## set the LoadBalancer service type to internal only.
|
|
||||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
|
||||||
##
|
|
||||||
annotations: {}
|
|
||||||
labels: {}
|
|
||||||
## Use loadBalancerIP to request a specific static IP,
|
|
||||||
## otherwise leave blank
|
|
||||||
##
|
|
||||||
loadBalancerIP:
|
|
||||||
# loadBalancerSourceRanges: []
|
|
||||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
|
||||||
# externalTrafficPolicy: Cluster
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
annotations: {}
|
|
||||||
# kubernetes.io/ingress.class: nginx
|
|
||||||
# kubernetes.io/tls-acme: "true"
|
|
||||||
path: /
|
|
||||||
hosts:
|
|
||||||
- chart-example.local
|
|
||||||
tls: []
|
|
||||||
# - secretName: chart-example-tls
|
|
||||||
# hosts:
|
|
||||||
# - chart-example.local
|
|
||||||
|
|
||||||
persistence:
|
|
||||||
config:
|
|
||||||
enabled: true
|
|
||||||
## nzbget configuration data Persistent Volume Storage Class
|
|
||||||
## If defined, storageClassName: <storageClass>
|
|
||||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
|
||||||
## If undefined (the default) or set to null, no storageClassName spec is
|
|
||||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
|
||||||
## GKE, AWS & OpenStack)
|
|
||||||
##
|
|
||||||
# storageClass: "-"
|
|
||||||
##
|
|
||||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
|
||||||
## the existingClaim variable
|
|
||||||
# existingClaim: your-claim
|
|
||||||
accessMode: ReadWriteOnce
|
|
||||||
size: 1Gi
|
|
||||||
downloads:
|
|
||||||
enabled: true
|
|
||||||
## nzbget torrents downloads volume configuration
|
|
||||||
## If defined, storageClassName: <storageClass>
|
|
||||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
|
||||||
## If undefined (the default) or set to null, no storageClassName spec is
|
|
||||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
|
||||||
## GKE, AWS & OpenStack)
|
|
||||||
##
|
|
||||||
# storageClass: "-"
|
|
||||||
##
|
|
||||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
|
||||||
## the existingClaim variable
|
|
||||||
# existingClaim: your-claim
|
|
||||||
# subPath: some-subpath
|
|
||||||
accessMode: ReadWriteOnce
|
|
||||||
size: 10Gi
|
|
||||||
|
|
||||||
|
|
||||||
resources: {}
|
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
|
||||||
# choice for the user. This also increases chances charts run on environments with little
|
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
|
||||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
|
||||||
# limits:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
|
|
||||||
nodeSelector: {}
|
|
||||||
|
|
||||||
tolerations: []
|
|
||||||
|
|
||||||
affinity: {}
|
|
@ -1,23 +0,0 @@
|
|||||||
# Patterns to ignore when building packages.
|
|
||||||
# This supports shell glob matching, relative path matching, and
|
|
||||||
# negation (prefixed with !). Only one pattern per line.
|
|
||||||
.DS_Store
|
|
||||||
# Common VCS dirs
|
|
||||||
.git/
|
|
||||||
.gitignore
|
|
||||||
.bzr/
|
|
||||||
.bzrignore
|
|
||||||
.hg/
|
|
||||||
.hgignore
|
|
||||||
.svn/
|
|
||||||
# Common backup files
|
|
||||||
*.swp
|
|
||||||
*.bak
|
|
||||||
*.tmp
|
|
||||||
*~
|
|
||||||
# Various IDEs
|
|
||||||
.project
|
|
||||||
.idea/
|
|
||||||
*.tmproj
|
|
||||||
# OWNERS file for Kubernetes
|
|
||||||
OWNERS
|
|
@ -1,17 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
appVersion: amd64-v0.2.0.1293-ls9
|
|
||||||
description: Radarr is a movie downloading client
|
|
||||||
name: radarr
|
|
||||||
version: 2.0.2
|
|
||||||
keywords:
|
|
||||||
- radarr
|
|
||||||
- usenet
|
|
||||||
- bittorrent
|
|
||||||
home: https://github.com/billimek/billimek-charts/tree/master/radarr
|
|
||||||
icon: https://avatars3.githubusercontent.com/u/25025331?s=400&v=4
|
|
||||||
sources:
|
|
||||||
- https://hub.docker.com/r/linuxserver/radarr/
|
|
||||||
- https://github.com/Radarr/Radarr/
|
|
||||||
maintainers:
|
|
||||||
- name: billimek
|
|
||||||
email: jeff@billimek.com
|
|
@ -1,4 +0,0 @@
|
|||||||
approvers:
|
|
||||||
- billimek
|
|
||||||
reviewers:
|
|
||||||
- billimek
|
|
@ -1,90 +0,0 @@
|
|||||||
# radarr movie download client
|
|
||||||
|
|
||||||
This is a helm chart for [radarr](https://github.com/Radarr/Radarr/) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/radarr/)
|
|
||||||
|
|
||||||
## TL;DR;
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ helm repo add billimek https://billimek.github.io/helm-repo
|
|
||||||
$ helm install billimek/radarr
|
|
||||||
```
|
|
||||||
|
|
||||||
## Installing the Chart
|
|
||||||
|
|
||||||
To install the chart with the release name `my-release`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm install --name my-release billimek/radarr
|
|
||||||
```
|
|
||||||
|
|
||||||
## Uninstalling the Chart
|
|
||||||
|
|
||||||
To uninstall/delete the `my-release` deployment:
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm delete my-release --purge
|
|
||||||
```
|
|
||||||
|
|
||||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
|
||||||
|
|
||||||
| Parameter | Description | Default |
|
|
||||||
|----------------------------|-------------------------------------|---------------------------------------------------------|
|
|
||||||
| `image.repository` | Image repository | `linuxserver/radarr` |
|
|
||||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/radarr/tags/).| `amd64-v0.2.0.1293-ls9`|
|
|
||||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
|
||||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
|
||||||
| `timezone` | Timezone the radarr instance should run as, e.g. 'America/New_York' | `UTC` |
|
|
||||||
| `puid` | process userID the radarr instance should run as | `1001` |
|
|
||||||
| `pgid` | process groupID the radarr instance should run as | `1001` |
|
|
||||||
| `Service.type` | Kubernetes service type for the radarr GUI | `ClusterIP` |
|
|
||||||
| `Service.port` | Kubernetes port where the radarr GUI is exposed| `7878` |
|
|
||||||
| `Service.annotations` | Service annotations for the radarr GUI | `{}` |
|
|
||||||
| `Service.labels` | Custom labels | `{}` |
|
|
||||||
| `Service.loadBalancerIP` | Loadbalance IP for the radarr GUI | `{}` |
|
|
||||||
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
|
|
||||||
| `ingress.enabled` | Enables Ingress | `false` |
|
|
||||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
|
||||||
| `ingress.labels` | Custom labels | `{}`
|
|
||||||
| `ingress.path` | Ingress path | `/` |
|
|
||||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
|
||||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
|
||||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
|
||||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
|
||||||
| `persistence.config.existingClaim`| Use an existing PVC to persist data | `nil` |
|
|
||||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
|
||||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
|
||||||
| `persistence.downloads.enabled` | Use persistent volume to store configuration data | `true` |
|
|
||||||
| `persistence.downloads.size` | Size of persistent volume claim | `10Gi` |
|
|
||||||
| `persistence.downloads.existingClaim`| Use an existing PVC to persist data | `nil` |
|
|
||||||
| `persistence.downloads.storageClass` | Type of persistent volume claim | `-` |
|
|
||||||
| `persistence.downloads.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
|
||||||
| `persistence.movies.enabled` | Use persistent volume to store configuration data | `true` |
|
|
||||||
| `persistence.movies.size` | Size of persistent volume claim | `10Gi` |
|
|
||||||
| `persistence.movies.existingClaim`| Use an existing PVC to persist data | `nil` |
|
|
||||||
| `persistence.movies.storageClass` | Type of persistent volume claim | `-` |
|
|
||||||
| `persistence.movies.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
|
||||||
| `persistence.extraExistingClaimMounts` | Optionally add multiple existing claims | `[]` |
|
|
||||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
|
||||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
|
||||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
|
||||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
|
||||||
|
|
||||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm install --name my-release \
|
|
||||||
--set timezone="America/New York" \
|
|
||||||
billimek/radarr
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm install --name my-release -f values.yaml stable/radarr
|
|
||||||
```
|
|
||||||
|
|
||||||
Read through the [values.yaml](values.yaml) file. It has several commented out suggested values.
|
|
@ -1,19 +0,0 @@
|
|||||||
1. Get the application URL by running these commands:
|
|
||||||
{{- if .Values.ingress.enabled }}
|
|
||||||
{{- range .Values.ingress.hosts }}
|
|
||||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
|
||||||
{{- end }}
|
|
||||||
{{- else if contains "NodePort" .Values.service.type }}
|
|
||||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "radarr.fullname" . }})
|
|
||||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
|
||||||
echo http://$NODE_IP:$NODE_PORT
|
|
||||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
|
||||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
|
||||||
You can watch the status of by running 'kubectl get svc -w {{ include "radarr.fullname" . }}'
|
|
||||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "radarr.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
|
||||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
|
||||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
|
||||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "radarr.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
|
||||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
|
||||||
kubectl port-forward $POD_NAME 8080:80
|
|
||||||
{{- end }}
|
|
@ -1,32 +0,0 @@
|
|||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
{{/*
|
|
||||||
Expand the name of the chart.
|
|
||||||
*/}}
|
|
||||||
{{- define "radarr.name" -}}
|
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create a default fully qualified app name.
|
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
|
||||||
If release name contains chart name it will be used as a full name.
|
|
||||||
*/}}
|
|
||||||
{{- define "radarr.fullname" -}}
|
|
||||||
{{- if .Values.fullnameOverride -}}
|
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
|
||||||
{{- if contains $name .Release.Name -}}
|
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create chart name and version as used by the chart label.
|
|
||||||
*/}}
|
|
||||||
{{- define "radarr.chart" -}}
|
|
||||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
@ -1,25 +0,0 @@
|
|||||||
|
|
||||||
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: {{ template "radarr.fullname" . }}-config
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
|
||||||
helm.sh/chart: {{ include "radarr.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- {{ .Values.persistence.config.accessMode | quote }}
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: {{ .Values.persistence.config.size | quote }}
|
|
||||||
{{- if .Values.persistence.config.storageClass }}
|
|
||||||
{{- if (eq "-" .Values.persistence.config.storageClass) }}
|
|
||||||
storageClassName: ""
|
|
||||||
{{- else }}
|
|
||||||
storageClassName: "{{ .Values.persistence.config.storageClass }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end -}}
|
|
@ -1,103 +0,0 @@
|
|||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ include "radarr.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
|
||||||
helm.sh/chart: {{ include "radarr.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
strategy:
|
|
||||||
type: {{ .Values.strategyType }}
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: {{ .Chart.Name }}
|
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
containerPort: 7878
|
|
||||||
protocol: TCP
|
|
||||||
livenessProbe:
|
|
||||||
tcpSocket:
|
|
||||||
port: http
|
|
||||||
readinessProbe:
|
|
||||||
tcpSocket:
|
|
||||||
port: http
|
|
||||||
env:
|
|
||||||
- name: TZ
|
|
||||||
value: "{{ .Values.timezone }}"
|
|
||||||
- name: PUID
|
|
||||||
value: "{{ .Values.puid }}"
|
|
||||||
- name: PGID
|
|
||||||
value: "{{ .Values.pgid }}"
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /config
|
|
||||||
name: config
|
|
||||||
- mountPath: /downloads
|
|
||||||
name: downloads
|
|
||||||
{{- if .Values.persistence.downloads.subPath }}
|
|
||||||
subPath: {{ .Values.persistence.downloads.subPath }}
|
|
||||||
{{- end }}
|
|
||||||
- mountPath: /movies
|
|
||||||
name: movies
|
|
||||||
{{- if .Values.persistence.movies.subPath }}
|
|
||||||
subPath: {{ .Values.persistence.movies.subPath }}
|
|
||||||
{{- end }}
|
|
||||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
|
||||||
- name: {{ .name }}
|
|
||||||
mountPath: {{ .mountPath }}
|
|
||||||
readOnly: {{ .readOnly }}
|
|
||||||
{{- end }}
|
|
||||||
resources:
|
|
||||||
{{ toYaml .Values.resources | indent 12 }}
|
|
||||||
volumes:
|
|
||||||
- name: config
|
|
||||||
{{- if .Values.persistence.config.enabled }}
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "radarr.fullname" . }}-config{{- end }}
|
|
||||||
{{- else }}
|
|
||||||
emptyDir: {}
|
|
||||||
{{- end }}
|
|
||||||
- name: downloads
|
|
||||||
{{- if .Values.persistence.downloads.enabled }}
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: {{ if .Values.persistence.downloads.existingClaim }}{{ .Values.persistence.downloads.existingClaim }}{{- else }}{{ template "radarr.fullname" . }}-downloads{{- end }}
|
|
||||||
{{- else }}
|
|
||||||
emptyDir: {}
|
|
||||||
{{- end }}
|
|
||||||
- name: movies
|
|
||||||
{{- if .Values.persistence.movies.enabled }}
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: {{ if .Values.persistence.movies.existingClaim }}{{ .Values.persistence.movies.existingClaim }}{{- else }}{{ template "radarr.fullname" . }}-movies{{- end }}
|
|
||||||
{{- else }}
|
|
||||||
emptyDir: {}
|
|
||||||
{{- end }}
|
|
||||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
|
||||||
- name: {{ .name }}
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: {{ .existingClaim }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.affinity }}
|
|
||||||
affinity:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.tolerations }}
|
|
||||||
tolerations:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
@ -1,25 +0,0 @@
|
|||||||
|
|
||||||
{{- if and .Values.persistence.downloads.enabled (not .Values.persistence.downloads.existingClaim) }}
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: {{ template "radarr.fullname" . }}-downloads
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
|
||||||
helm.sh/chart: {{ include "radarr.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- {{ .Values.persistence.downloads.accessMode | quote }}
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: {{ .Values.persistence.downloads.size | quote }}
|
|
||||||
{{- if .Values.persistence.downloads.storageClass }}
|
|
||||||
{{- if (eq "-" .Values.persistence.downloads.storageClass) }}
|
|
||||||
storageClassName: ""
|
|
||||||
{{- else }}
|
|
||||||
storageClassName: "{{ .Values.persistence.downloads.storageClass }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end -}}
|
|
@ -1,38 +0,0 @@
|
|||||||
{{- if .Values.ingress.enabled -}}
|
|
||||||
{{- $fullName := include "radarr.fullname" . -}}
|
|
||||||
{{- $ingressPath := .Values.ingress.path -}}
|
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: {{ $fullName }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
|
||||||
helm.sh/chart: {{ include "radarr.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
{{- with .Values.ingress.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{ toYaml . | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
spec:
|
|
||||||
{{- if .Values.ingress.tls }}
|
|
||||||
tls:
|
|
||||||
{{- range .Values.ingress.tls }}
|
|
||||||
- hosts:
|
|
||||||
{{- range .hosts }}
|
|
||||||
- {{ . | quote }}
|
|
||||||
{{- end }}
|
|
||||||
secretName: {{ .secretName }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
rules:
|
|
||||||
{{- range .Values.ingress.hosts }}
|
|
||||||
- host: {{ . | quote }}
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- path: {{ $ingressPath }}
|
|
||||||
backend:
|
|
||||||
serviceName: {{ $fullName }}
|
|
||||||
servicePort: http
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
@ -1,25 +0,0 @@
|
|||||||
|
|
||||||
{{- if and .Values.persistence.movies.enabled (not .Values.persistence.movies.existingClaim) }}
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: {{ template "radarr.fullname" . }}-movies
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
|
||||||
helm.sh/chart: {{ include "radarr.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- {{ .Values.persistence.movies.accessMode | quote }}
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: {{ .Values.persistence.movies.size | quote }}
|
|
||||||
{{- if .Values.persistence.movies.storageClass }}
|
|
||||||
{{- if (eq "-" .Values.persistence.movies.storageClass) }}
|
|
||||||
storageClassName: ""
|
|
||||||
{{- else }}
|
|
||||||
storageClassName: "{{ .Values.persistence.movies.storageClass }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end -}}
|
|
@ -1,52 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: {{ template "radarr.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
|
||||||
helm.sh/chart: {{ include "radarr.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
{{- if .Values.service.labels }}
|
|
||||||
{{ toYaml .Values.service.labels | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.service.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{ toYaml . | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
spec:
|
|
||||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
|
||||||
type: ClusterIP
|
|
||||||
{{- if .Values.service.clusterIP }}
|
|
||||||
clusterIP: {{ .Values.service.clusterIP }}
|
|
||||||
{{end}}
|
|
||||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
|
||||||
type: {{ .Values.service.type }}
|
|
||||||
{{- if .Values.service.loadBalancerIP }}
|
|
||||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
|
||||||
loadBalancerSourceRanges:
|
|
||||||
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
|
|
||||||
{{- end -}}
|
|
||||||
{{- else }}
|
|
||||||
type: {{ .Values.service.type }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.service.externalIPs }}
|
|
||||||
externalIPs:
|
|
||||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.service.externalTrafficPolicy }}
|
|
||||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
|
||||||
{{- end }}
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
port: {{ .Values.service.port }}
|
|
||||||
protocol: TCP
|
|
||||||
targetPort: http
|
|
||||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
|
||||||
nodePort: {{.Values.service.nodePort}}
|
|
||||||
{{ end }}
|
|
||||||
selector:
|
|
||||||
app.kubernetes.io/name: {{ include "radarr.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
@ -1,129 +0,0 @@
|
|||||||
# Default values for radarr.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
# Declare variables to be passed into your templates.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: linuxserver/radarr
|
|
||||||
tag: amd64-v0.2.0.1293-ls9
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
|
|
||||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
|
||||||
strategyType: Recreate
|
|
||||||
|
|
||||||
nameOverride: ""
|
|
||||||
fullnameOverride: ""
|
|
||||||
|
|
||||||
timezone: UTC
|
|
||||||
puid: 1001
|
|
||||||
pgid: 1001
|
|
||||||
|
|
||||||
service:
|
|
||||||
type: ClusterIP
|
|
||||||
port: 7878
|
|
||||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
|
||||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
|
||||||
##
|
|
||||||
# nodePort:
|
|
||||||
## Provide any additional annotations which may be required. This can be used to
|
|
||||||
## set the LoadBalancer service type to internal only.
|
|
||||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
|
||||||
##
|
|
||||||
annotations: {}
|
|
||||||
labels: {}
|
|
||||||
## Use loadBalancerIP to request a specific static IP,
|
|
||||||
## otherwise leave blank
|
|
||||||
##
|
|
||||||
loadBalancerIP:
|
|
||||||
# loadBalancerSourceRanges: []
|
|
||||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
|
||||||
# externalTrafficPolicy: Cluster
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
annotations: {}
|
|
||||||
# kubernetes.io/ingress.class: nginx
|
|
||||||
# kubernetes.io/tls-acme: "true"
|
|
||||||
path: /
|
|
||||||
hosts:
|
|
||||||
- chart-example.local
|
|
||||||
tls: []
|
|
||||||
# - secretName: chart-example-tls
|
|
||||||
# hosts:
|
|
||||||
# - chart-example.local
|
|
||||||
|
|
||||||
persistence:
|
|
||||||
config:
|
|
||||||
enabled: true
|
|
||||||
## radarr configuration data Persistent Volume Storage Class
|
|
||||||
## If defined, storageClassName: <storageClass>
|
|
||||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
|
||||||
## If undefined (the default) or set to null, no storageClassName spec is
|
|
||||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
|
||||||
## GKE, AWS & OpenStack)
|
|
||||||
##
|
|
||||||
# storageClass: "-"
|
|
||||||
##
|
|
||||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
|
||||||
## the existingClaim variable
|
|
||||||
# existingClaim: your-claim
|
|
||||||
accessMode: ReadWriteOnce
|
|
||||||
size: 1Gi
|
|
||||||
downloads:
|
|
||||||
enabled: true
|
|
||||||
## radarr downloads volume configuration
|
|
||||||
## If defined, storageClassName: <storageClass>
|
|
||||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
|
||||||
## If undefined (the default) or set to null, no storageClassName spec is
|
|
||||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
|
||||||
## GKE, AWS & OpenStack)
|
|
||||||
##
|
|
||||||
# storageClass: "-"
|
|
||||||
##
|
|
||||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
|
||||||
## the existingClaim variable
|
|
||||||
# existingClaim: your-claim
|
|
||||||
# subPath: some-subpath
|
|
||||||
accessMode: ReadWriteOnce
|
|
||||||
size: 10Gi
|
|
||||||
movies:
|
|
||||||
enabled: true
|
|
||||||
## Directory where movies are persisted
|
|
||||||
## If defined, storageClassName: <storageClass>
|
|
||||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
|
||||||
## If undefined (the default) or set to null, no storageClassName spec is
|
|
||||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
|
||||||
## GKE, AWS & OpenStack)
|
|
||||||
##
|
|
||||||
# storageClass: "-"
|
|
||||||
##
|
|
||||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
|
||||||
## the existingClaim variable
|
|
||||||
# existingClaim: your-claim
|
|
||||||
# subPath: some-subpath
|
|
||||||
accessMode: ReadWriteOnce
|
|
||||||
size: 10Gi
|
|
||||||
extraExistingClaimMounts: []
|
|
||||||
# - name: external-mount
|
|
||||||
# mountPath: /srv/external-mount
|
|
||||||
## A manually managed Persistent Volume and Claim
|
|
||||||
## If defined, PVC must be created manually before volume will be bound
|
|
||||||
# existingClaim:
|
|
||||||
# readOnly: true
|
|
||||||
|
|
||||||
resources: {}
|
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
|
||||||
# choice for the user. This also increases chances charts run on environments with little
|
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
|
||||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
|
||||||
# limits:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
|
|
||||||
nodeSelector: {}
|
|
||||||
|
|
||||||
tolerations: []
|
|
||||||
|
|
||||||
affinity: {}
|
|
@ -1,23 +0,0 @@
|
|||||||
# Patterns to ignore when building packages.
|
|
||||||
# This supports shell glob matching, relative path matching, and
|
|
||||||
# negation (prefixed with !). Only one pattern per line.
|
|
||||||
.DS_Store
|
|
||||||
# Common VCS dirs
|
|
||||||
.git/
|
|
||||||
.gitignore
|
|
||||||
.bzr/
|
|
||||||
.bzrignore
|
|
||||||
.hg/
|
|
||||||
.hgignore
|
|
||||||
.svn/
|
|
||||||
# Common backup files
|
|
||||||
*.swp
|
|
||||||
*.bak
|
|
||||||
*.tmp
|
|
||||||
*~
|
|
||||||
# Various IDEs
|
|
||||||
.project
|
|
||||||
.idea/
|
|
||||||
*.tmproj
|
|
||||||
# OWNERS file for Kubernetes
|
|
||||||
OWNERS
|
|
@ -1,17 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
appVersion: 0.9.7-0.13.7
|
|
||||||
description: flood-rtorrent
|
|
||||||
name: rtorrent-flood
|
|
||||||
version: 2.0.7
|
|
||||||
keywords:
|
|
||||||
- rtorrent
|
|
||||||
- flood
|
|
||||||
- torrrent
|
|
||||||
home: https://github.com/billimek/billimek-charts/tree/master/rtorrent-flood
|
|
||||||
icon: https://github.com/jfurrow/flood/blob/master/flood.png?raw=true
|
|
||||||
sources:
|
|
||||||
- https://hub.docker.com/r/wonderfall/rtorrent-flood
|
|
||||||
- https://github.com/Wonderfall/docker-rtorrent-flood
|
|
||||||
maintainers:
|
|
||||||
- name: billimek
|
|
||||||
email: jeff@billimek.com
|
|
@ -1,4 +0,0 @@
|
|||||||
approvers:
|
|
||||||
- billimek
|
|
||||||
reviewers:
|
|
||||||
- billimek
|
|
@ -1,3 +0,0 @@
|
|||||||
# rTorrent/flood BitTorrent client
|
|
||||||
|
|
||||||
TBD
|
|
@ -1,19 +0,0 @@
|
|||||||
1. Get the application URL by running these commands:
|
|
||||||
{{- if .Values.ingress.enabled }}
|
|
||||||
{{- range .Values.ingress.hosts }}
|
|
||||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
|
||||||
{{- end }}
|
|
||||||
{{- else if contains "NodePort" .Values.guiService.type }}
|
|
||||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "rtorrent-flood.fullname" . }})
|
|
||||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
|
||||||
echo http://$NODE_IP:$NODE_PORT
|
|
||||||
{{- else if contains "LoadBalancer" .Values.guiService.type }}
|
|
||||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
|
||||||
You can watch the status of by running 'kubectl get svc -w {{ include "rtorrent-flood.fullname" . }}'
|
|
||||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "rtorrent-flood.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
|
||||||
echo http://$SERVICE_IP:{{ .Values.guiService.port }}
|
|
||||||
{{- else if contains "ClusterIP" .Values.guiService.type }}
|
|
||||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "rtorrent-flood.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
|
||||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
|
||||||
kubectl port-forward $POD_NAME 8080:80
|
|
||||||
{{- end }}
|
|
@ -1,32 +0,0 @@
|
|||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
{{/*
|
|
||||||
Expand the name of the chart.
|
|
||||||
*/}}
|
|
||||||
{{- define "rtorrent-flood.name" -}}
|
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create a default fully qualified app name.
|
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
|
||||||
If release name contains chart name it will be used as a full name.
|
|
||||||
*/}}
|
|
||||||
{{- define "rtorrent-flood.fullname" -}}
|
|
||||||
{{- if .Values.fullnameOverride -}}
|
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
|
||||||
{{- if contains $name .Release.Name -}}
|
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create chart name and version as used by the chart label.
|
|
||||||
*/}}
|
|
||||||
{{- define "rtorrent-flood.chart" -}}
|
|
||||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
@ -1,52 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: {{ template "rtorrent-flood.fullname" . }}-bt
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "rtorrent-flood.name" . }}
|
|
||||||
helm.sh/chart: {{ include "rtorrent-flood.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
{{- if .Values.btService.labels }}
|
|
||||||
{{ toYaml .Values.btService.labels | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.btService.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{ toYaml . | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
spec:
|
|
||||||
{{- if (or (eq .Values.btService.type "ClusterIP") (empty .Values.btService.type)) }}
|
|
||||||
type: ClusterIP
|
|
||||||
{{- if .Values.btService.clusterIP }}
|
|
||||||
clusterIP: {{ .Values.btService.clusterIP }}
|
|
||||||
{{end}}
|
|
||||||
{{- else if eq .Values.btService.type "LoadBalancer" }}
|
|
||||||
type: {{ .Values.btService.type }}
|
|
||||||
{{- if .Values.btService.loadBalancerIP }}
|
|
||||||
loadBalancerIP: {{ .Values.btService.loadBalancerIP }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.btService.loadBalancerSourceRanges }}
|
|
||||||
loadBalancerSourceRanges:
|
|
||||||
{{ toYaml .Values.btService.loadBalancerSourceRanges | indent 4 }}
|
|
||||||
{{- end -}}
|
|
||||||
{{- else }}
|
|
||||||
type: {{ .Values.btService.type }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.btService.externalIPs }}
|
|
||||||
externalIPs:
|
|
||||||
{{ toYaml .Values.btService.externalIPs | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.btService.externalTrafficPolicy }}
|
|
||||||
externalTrafficPolicy: {{ .Values.btService.externalTrafficPolicy }}
|
|
||||||
{{- end }}
|
|
||||||
ports:
|
|
||||||
- name: bt
|
|
||||||
port: {{ .Values.btService.port }}
|
|
||||||
protocol: TCP
|
|
||||||
targetPort: bt
|
|
||||||
{{ if (and (eq .Values.btService.type "NodePort") (not (empty .Values.btService.nodePort))) }}
|
|
||||||
nodePort: {{.Values.btService.nodePort}}
|
|
||||||
{{ end }}
|
|
||||||
selector:
|
|
||||||
app.kubernetes.io/name: {{ include "rtorrent-flood.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
@ -1,24 +0,0 @@
|
|||||||
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: {{ template "rtorrent-flood.fullname" . }}-config
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "rtorrent-flood.name" . }}
|
|
||||||
helm.sh/chart: {{ include "rtorrent-flood.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- {{ .Values.persistence.config.accessMode | quote }}
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: {{ .Values.persistence.config.size | quote }}
|
|
||||||
{{- if .Values.persistence.config.storageClass }}
|
|
||||||
{{- if (eq "-" .Values.persistence.config.storageClass) }}
|
|
||||||
storageClassName: ""
|
|
||||||
{{- else }}
|
|
||||||
storageClassName: "{{ .Values.persistence.config.storageClass }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end -}}
|
|
@ -1,25 +0,0 @@
|
|||||||
|
|
||||||
{{- if and .Values.persistence.data.enabled (not .Values.persistence.data.existingClaim) }}
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: {{ template "rtorrent-flood.fullname" . }}-data
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "rtorrent-flood.name" . }}
|
|
||||||
helm.sh/chart: {{ include "rtorrent-flood.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- {{ .Values.persistence.data.accessMode | quote }}
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: {{ .Values.persistence.data.size | quote }}
|
|
||||||
{{- if .Values.persistence.data.storageClass }}
|
|
||||||
{{- if (eq "-" .Values.persistence.data.storageClass) }}
|
|
||||||
storageClassName: ""
|
|
||||||
{{- else }}
|
|
||||||
storageClassName: "{{ .Values.persistence.data.storageClass }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end -}}
|
|
@ -1,93 +0,0 @@
|
|||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ template "rtorrent-flood.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "rtorrent-flood.name" . }}
|
|
||||||
helm.sh/chart: {{ include "rtorrent-flood.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
strategy:
|
|
||||||
type: {{ .Values.strategyType }}
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: {{ include "rtorrent-flood.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "rtorrent-flood.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: {{ .Chart.Name }}
|
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
containerPort: 3000
|
|
||||||
protocol: TCP
|
|
||||||
- name: bt
|
|
||||||
containerPort: 49184
|
|
||||||
protocol: TCP
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
port: http
|
|
||||||
initialDelaySeconds: 30
|
|
||||||
failureThreshold: 5
|
|
||||||
timeoutSeconds: 10
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
port: http
|
|
||||||
initialDelaySeconds: 30
|
|
||||||
failureThreshold: 5
|
|
||||||
timeoutSeconds: 10
|
|
||||||
tty: true
|
|
||||||
env:
|
|
||||||
- name: TZ
|
|
||||||
value: "{{ .Values.timezone }}"
|
|
||||||
- name: FLOOD_SECRET
|
|
||||||
value: "{{ .Values.floodSecret }}"
|
|
||||||
- name: UID
|
|
||||||
value: "{{ .Values.uid }}"
|
|
||||||
- name: GID
|
|
||||||
value: "{{ .Values.gid }}"
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /flood-db
|
|
||||||
name: flood-db
|
|
||||||
- mountPath: /data
|
|
||||||
name: data
|
|
||||||
{{- if .Values.persistence.data.subPath }}
|
|
||||||
subPath: {{ .Values.persistence.data.subPath }}
|
|
||||||
{{ end }}
|
|
||||||
resources:
|
|
||||||
{{ toYaml .Values.resources | indent 12 }}
|
|
||||||
volumes:
|
|
||||||
- name: flood-db
|
|
||||||
{{- if .Values.persistence.config.enabled }}
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "rtorrent-flood.fullname" . }}-config{{- end }}
|
|
||||||
{{- else }}
|
|
||||||
emptyDir: {}
|
|
||||||
{{ end }}
|
|
||||||
- name: data
|
|
||||||
{{- if .Values.persistence.data.enabled }}
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: {{ if .Values.persistence.data.existingClaim }}{{ .Values.persistence.data.existingClaim }}{{- else }}{{ template "rtorrent-flood.fullname" . }}-data{{- end }}
|
|
||||||
{{- else }}
|
|
||||||
emptyDir: {}
|
|
||||||
{{ end }}
|
|
||||||
{{- with .Values.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.affinity }}
|
|
||||||
affinity:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.tolerations }}
|
|
||||||
tolerations:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
@ -1,52 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: {{ template "rtorrent-flood.fullname" . }}-gui
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "rtorrent-flood.name" . }}
|
|
||||||
helm.sh/chart: {{ include "rtorrent-flood.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
{{- if .Values.guiService.labels }}
|
|
||||||
{{ toYaml .Values.guiService.labels | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.guiService.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{ toYaml . | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
spec:
|
|
||||||
{{- if (or (eq .Values.guiService.type "ClusterIP") (empty .Values.guiService.type)) }}
|
|
||||||
type: ClusterIP
|
|
||||||
{{- if .Values.guiService.clusterIP }}
|
|
||||||
clusterIP: {{ .Values.guiService.clusterIP }}
|
|
||||||
{{end}}
|
|
||||||
{{- else if eq .Values.guiService.type "LoadBalancer" }}
|
|
||||||
type: {{ .Values.guiService.type }}
|
|
||||||
{{- if .Values.guiService.loadBalancerIP }}
|
|
||||||
loadBalancerIP: {{ .Values.guiService.loadBalancerIP }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.guiService.loadBalancerSourceRanges }}
|
|
||||||
loadBalancerSourceRanges:
|
|
||||||
{{ toYaml .Values.guiService.loadBalancerSourceRanges | indent 4 }}
|
|
||||||
{{- end -}}
|
|
||||||
{{- else }}
|
|
||||||
type: {{ .Values.guiService.type }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.guiService.externalIPs }}
|
|
||||||
externalIPs:
|
|
||||||
{{ toYaml .Values.guiService.externalIPs | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.guiService.externalTrafficPolicy }}
|
|
||||||
externalTrafficPolicy: {{ .Values.guiService.externalTrafficPolicy }}
|
|
||||||
{{- end }}
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
port: {{ .Values.guiService.port }}
|
|
||||||
protocol: TCP
|
|
||||||
targetPort: http
|
|
||||||
{{ if (and (eq .Values.guiService.type "NodePort") (not (empty .Values.guiService.nodePort))) }}
|
|
||||||
nodePort: {{.Values.guiService.nodePort}}
|
|
||||||
{{ end }}
|
|
||||||
selector:
|
|
||||||
app.kubernetes.io/name: {{ include "rtorrent-flood.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
@ -1,38 +0,0 @@
|
|||||||
{{- if .Values.ingress.enabled -}}
|
|
||||||
{{- $fullName := include "rtorrent-flood.fullname" . -}}
|
|
||||||
{{- $ingressPath := .Values.ingress.path -}}
|
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: {{ $fullName }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "rtorrent-flood.name" . }}
|
|
||||||
helm.sh/chart: {{ include "rtorrent-flood.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
{{- with .Values.ingress.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{ toYaml . | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
spec:
|
|
||||||
{{- if .Values.ingress.tls }}
|
|
||||||
tls:
|
|
||||||
{{- range .Values.ingress.tls }}
|
|
||||||
- hosts:
|
|
||||||
{{- range .hosts }}
|
|
||||||
- {{ . }}
|
|
||||||
{{- end }}
|
|
||||||
secretName: {{ .secretName }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
rules:
|
|
||||||
{{- range .Values.ingress.hosts }}
|
|
||||||
- host: {{ . }}
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- path: {{ $ingressPath }}
|
|
||||||
backend:
|
|
||||||
serviceName: {{ $fullName }}-gui
|
|
||||||
servicePort: http
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
@ -1,128 +0,0 @@
|
|||||||
# Default values for deluge.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
# Declare variables to be passed into your templates.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: wonderfall/rtorrent-flood
|
|
||||||
tag: 0.9.7-0.13.7
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
|
|
||||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
|
||||||
strategyType: Recreate
|
|
||||||
|
|
||||||
nameOverride: ""
|
|
||||||
fullnameOverride: ""
|
|
||||||
|
|
||||||
#timezone: UTC
|
|
||||||
floodSecret: "supersecret"
|
|
||||||
uid: 1001
|
|
||||||
gid: 1001
|
|
||||||
|
|
||||||
guiService:
|
|
||||||
type: ClusterIP
|
|
||||||
port: 3000
|
|
||||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
|
||||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
|
||||||
##
|
|
||||||
# nodePort:
|
|
||||||
## Provide any additional annotations which may be required. This can be used to
|
|
||||||
## set the LoadBalancer service type to internal only.
|
|
||||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
|
||||||
##
|
|
||||||
annotations: {}
|
|
||||||
labels: {}
|
|
||||||
## Use loadBalancerIP to request a specific static IP,
|
|
||||||
## otherwise leave blank
|
|
||||||
##
|
|
||||||
loadBalancerIP:
|
|
||||||
# loadBalancerSourceRanges: []
|
|
||||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
|
||||||
# externalTrafficPolicy: Cluster
|
|
||||||
|
|
||||||
btService:
|
|
||||||
type: NodePort
|
|
||||||
port: 49184
|
|
||||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
|
||||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
|
||||||
##
|
|
||||||
# nodePort:
|
|
||||||
## Provide any additional annotations which may be required. This can be used to
|
|
||||||
## set the LoadBalancer service type to internal only.
|
|
||||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
|
||||||
##
|
|
||||||
annotations: {}
|
|
||||||
labels: {}
|
|
||||||
## Use loadBalancerIP to request a specific static IP,
|
|
||||||
## otherwise leave blank
|
|
||||||
##
|
|
||||||
loadBalancerIP:
|
|
||||||
# loadBalancerSourceRanges: []
|
|
||||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
|
||||||
# externalTrafficPolicy: Cluster
|
|
||||||
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
annotations: {}
|
|
||||||
# kubernetes.io/ingress.class: nginx
|
|
||||||
# kubernetes.io/tls-acme: "true"
|
|
||||||
path: /
|
|
||||||
hosts:
|
|
||||||
- chart-example.local
|
|
||||||
tls: []
|
|
||||||
# - secretName: chart-example-tls
|
|
||||||
# hosts:
|
|
||||||
# - chart-example.local
|
|
||||||
|
|
||||||
persistence:
|
|
||||||
config:
|
|
||||||
enabled: true
|
|
||||||
## configuration data Persistent Volume Storage Class
|
|
||||||
## If defined, storageClassName: <storageClass>
|
|
||||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
|
||||||
## If undefined (the default) or set to null, no storageClassName spec is
|
|
||||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
|
||||||
## GKE, AWS & OpenStack)
|
|
||||||
##
|
|
||||||
# storageClass: "-"
|
|
||||||
##
|
|
||||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
|
||||||
## the existingClaim variable
|
|
||||||
# existingClaim: your-claim
|
|
||||||
accessMode: ReadWriteOnce
|
|
||||||
size: 1Gi
|
|
||||||
data:
|
|
||||||
enabled: true
|
|
||||||
## torrents data volume configuration
|
|
||||||
## If defined, storageClassName: <storageClass>
|
|
||||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
|
||||||
## If undefined (the default) or set to null, no storageClassName spec is
|
|
||||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
|
||||||
## GKE, AWS & OpenStack)
|
|
||||||
##
|
|
||||||
# storageClass: "-"
|
|
||||||
##
|
|
||||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
|
||||||
## the existingClaim variable
|
|
||||||
# existingClaim: your-claim
|
|
||||||
# subPath: some-subpath
|
|
||||||
accessMode: ReadWriteOnce
|
|
||||||
size: 10Gi
|
|
||||||
|
|
||||||
resources: {}
|
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
|
||||||
# choice for the user. This also increases chances charts run on environments with little
|
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
|
||||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
|
||||||
# limits:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
|
|
||||||
nodeSelector: {}
|
|
||||||
|
|
||||||
tolerations: []
|
|
||||||
|
|
||||||
affinity: {}
|
|
@ -1,23 +0,0 @@
|
|||||||
# Patterns to ignore when building packages.
|
|
||||||
# This supports shell glob matching, relative path matching, and
|
|
||||||
# negation (prefixed with !). Only one pattern per line.
|
|
||||||
.DS_Store
|
|
||||||
# Common VCS dirs
|
|
||||||
.git/
|
|
||||||
.gitignore
|
|
||||||
.bzr/
|
|
||||||
.bzrignore
|
|
||||||
.hg/
|
|
||||||
.hgignore
|
|
||||||
.svn/
|
|
||||||
# Common backup files
|
|
||||||
*.swp
|
|
||||||
*.bak
|
|
||||||
*.tmp
|
|
||||||
*~
|
|
||||||
# Various IDEs
|
|
||||||
.project
|
|
||||||
.idea/
|
|
||||||
*.tmproj
|
|
||||||
# OWNERS file for Kubernetes
|
|
||||||
OWNERS
|
|
@ -1,17 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
appVersion: amd64-3.0.1.418-ls45
|
|
||||||
description: Sonarr is a television show downloading client
|
|
||||||
name: sonarr
|
|
||||||
version: 2.0.2
|
|
||||||
keywords:
|
|
||||||
- sonarr
|
|
||||||
- usenet
|
|
||||||
- bittorrent
|
|
||||||
home: https://github.com/billimek/billimek-charts/tree/master/sonarr
|
|
||||||
icon: https://avatars1.githubusercontent.com/u/1082903?s=400&v=4
|
|
||||||
sources:
|
|
||||||
- https://hub.docker.com/r/linuxserver/sonarr/
|
|
||||||
- https://sonarr.tv/
|
|
||||||
maintainers:
|
|
||||||
- name: billimek
|
|
||||||
email: jeff@billimek.com
|
|
@ -1,4 +0,0 @@
|
|||||||
approvers:
|
|
||||||
- billimek
|
|
||||||
reviewers:
|
|
||||||
- billimek
|
|
@ -1,90 +0,0 @@
|
|||||||
# sonarr televsion show download client
|
|
||||||
|
|
||||||
This is a helm chart for [sonarr](https://github.com/sonarr/sonarr/) leveraging the [Linuxserver.io image](https://hub.docker.com/r/linuxserver/sonarr/)
|
|
||||||
|
|
||||||
## TL;DR;
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ helm repo add billimek https://billimek.github.io/helm-repo
|
|
||||||
$ helm install billimek/sonarr
|
|
||||||
```
|
|
||||||
|
|
||||||
## Installing the Chart
|
|
||||||
|
|
||||||
To install the chart with the release name `my-release`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm install --name my-release billimek/sonarr
|
|
||||||
```
|
|
||||||
|
|
||||||
## Uninstalling the Chart
|
|
||||||
|
|
||||||
To uninstall/delete the `my-release` deployment:
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm delete my-release --purge
|
|
||||||
```
|
|
||||||
|
|
||||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
|
||||||
|
|
||||||
| Parameter | Description | Default |
|
|
||||||
|----------------------------|-------------------------------------|---------------------------------------------------------|
|
|
||||||
| `image.repository` | Image repository | `linuxserver/sonarr` |
|
|
||||||
| `image.tag` | Image tag. Possible values listed [here](https://hub.docker.com/r/linuxserver/sonarr/tags/).| `amd64-3.0.1.418-ls45`|
|
|
||||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
|
||||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
|
||||||
| `timezone` | Timezone the sonarr instance should run as, e.g. 'America/New_York' | `UTC` |
|
|
||||||
| `puid` | process userID the sonarr instance should run as | `1001` |
|
|
||||||
| `pgid` | process groupID the sonarr instance should run as | `1001` |
|
|
||||||
| `Service.type` | Kubernetes service type for the sonarr GUI | `ClusterIP` |
|
|
||||||
| `Service.port` | Kubernetes port where the sonarr GUI is exposed| `8989` |
|
|
||||||
| `Service.annotations` | Service annotations for the sonarr GUI | `{}` |
|
|
||||||
| `Service.labels` | Custom labels | `{}` |
|
|
||||||
| `Service.loadBalancerIP` | Loadbalance IP for the sonarr GUI | `{}` |
|
|
||||||
| `Service.loadBalancerSourceRanges` | List of IP CIDRs allowed access to load balancer (if supported) | None
|
|
||||||
| `ingress.enabled` | Enables Ingress | `false` |
|
|
||||||
| `ingress.annotations` | Ingress annotations | `{}` |
|
|
||||||
| `ingress.labels` | Custom labels | `{}`
|
|
||||||
| `ingress.path` | Ingress path | `/` |
|
|
||||||
| `ingress.hosts` | Ingress accepted hostnames | `chart-example.local` |
|
|
||||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
|
||||||
| `persistence.config.enabled` | Use persistent volume to store configuration data | `true` |
|
|
||||||
| `persistence.config.size` | Size of persistent volume claim | `1Gi` |
|
|
||||||
| `persistence.config.existingClaim`| Use an existing PVC to persist data | `nil` |
|
|
||||||
| `persistence.config.storageClass` | Type of persistent volume claim | `-` |
|
|
||||||
| `persistence.config.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
|
||||||
| `persistence.downloads.enabled` | Use persistent volume for downloads | `true` |
|
|
||||||
| `persistence.downloads.size` | Size of persistent volume claim | `10Gi` |
|
|
||||||
| `persistence.downloads.existingClaim`| Use an existing PVC to persist data | `nil` |
|
|
||||||
| `persistence.downloads.storageClass` | Type of persistent volume claim | `-` |
|
|
||||||
| `persistence.downloads.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
|
||||||
| `persistence.tv.enabled` | Use persistent volume for tv show persistence | `true` |
|
|
||||||
| `persistence.tv.size` | Size of persistent volume claim | `10Gi` |
|
|
||||||
| `persistence.tv.existingClaim`| Use an existing PVC to persist data | `nil` |
|
|
||||||
| `persistence.tv.storageClass` | Type of persistent volume claim | `-` |
|
|
||||||
| `persistence.tv.accessMode` | Persistence access mode | `ReadWriteOnce` |
|
|
||||||
| `persistence.extraExistingClaimMounts` | Optionally add multiple existing claims | `[]` |
|
|
||||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
|
||||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
|
||||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
|
||||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
|
||||||
|
|
||||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm install --name my-release \
|
|
||||||
--set timezone="America/New York" \
|
|
||||||
billimek/sonarr
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm install --name my-release -f values.yaml stable/sonarr
|
|
||||||
```
|
|
||||||
|
|
||||||
Read through the [values.yaml](values.yaml) file. It has several commented out suggested values.
|
|
@ -1,19 +0,0 @@
|
|||||||
1. Get the application URL by running these commands:
|
|
||||||
{{- if .Values.ingress.enabled }}
|
|
||||||
{{- range .Values.ingress.hosts }}
|
|
||||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
|
|
||||||
{{- end }}
|
|
||||||
{{- else if contains "NodePort" .Values.service.type }}
|
|
||||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "sonarr.fullname" . }})
|
|
||||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
|
||||||
echo http://$NODE_IP:$NODE_PORT
|
|
||||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
|
||||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
|
||||||
You can watch the status of by running 'kubectl get svc -w {{ include "sonarr.fullname" . }}'
|
|
||||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "sonarr.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
|
||||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
|
||||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
|
||||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "sonarr.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
|
||||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
|
||||||
kubectl port-forward $POD_NAME 8080:80
|
|
||||||
{{- end }}
|
|
@ -1,32 +0,0 @@
|
|||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
{{/*
|
|
||||||
Expand the name of the chart.
|
|
||||||
*/}}
|
|
||||||
{{- define "sonarr.name" -}}
|
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create a default fully qualified app name.
|
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
|
||||||
If release name contains chart name it will be used as a full name.
|
|
||||||
*/}}
|
|
||||||
{{- define "sonarr.fullname" -}}
|
|
||||||
{{- if .Values.fullnameOverride -}}
|
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
|
||||||
{{- if contains $name .Release.Name -}}
|
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create chart name and version as used by the chart label.
|
|
||||||
*/}}
|
|
||||||
{{- define "sonarr.chart" -}}
|
|
||||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
@ -1,25 +0,0 @@
|
|||||||
|
|
||||||
{{- if and .Values.persistence.config.enabled (not .Values.persistence.config.existingClaim) }}
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: {{ template "sonarr.fullname" . }}-config
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
|
||||||
helm.sh/chart: {{ include "sonarr.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- {{ .Values.persistence.config.accessMode | quote }}
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: {{ .Values.persistence.config.size | quote }}
|
|
||||||
{{- if .Values.persistence.config.storageClass }}
|
|
||||||
{{- if (eq "-" .Values.persistence.config.storageClass) }}
|
|
||||||
storageClassName: ""
|
|
||||||
{{- else }}
|
|
||||||
storageClassName: "{{ .Values.persistence.config.storageClass }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end -}}
|
|
@ -1,105 +0,0 @@
|
|||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ include "sonarr.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
|
||||||
helm.sh/chart: {{ include "sonarr.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
strategy:
|
|
||||||
type: {{ .Values.strategyType }}
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: {{ .Chart.Name }}
|
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
containerPort: 8989
|
|
||||||
protocol: TCP
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: http
|
|
||||||
readinessProbe:
|
|
||||||
httpGet:
|
|
||||||
path: /
|
|
||||||
port: http
|
|
||||||
env:
|
|
||||||
- name: TZ
|
|
||||||
value: "{{ .Values.timezone }}"
|
|
||||||
- name: PUID
|
|
||||||
value: "{{ .Values.puid }}"
|
|
||||||
- name: PGID
|
|
||||||
value: "{{ .Values.pgid }}"
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /config
|
|
||||||
name: config
|
|
||||||
- mountPath: /downloads
|
|
||||||
name: downloads
|
|
||||||
{{- if .Values.persistence.downloads.subPath }}
|
|
||||||
subPath: {{ .Values.persistence.downloads.subPath }}
|
|
||||||
{{- end }}
|
|
||||||
- mountPath: /tv
|
|
||||||
name: tv
|
|
||||||
{{- if .Values.persistence.tv.subPath }}
|
|
||||||
subPath: {{ .Values.persistence.tv.subPath }}
|
|
||||||
{{- end }}
|
|
||||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
|
||||||
- name: {{ .name }}
|
|
||||||
mountPath: {{ .mountPath }}
|
|
||||||
readOnly: {{ .readOnly }}
|
|
||||||
{{- end }}
|
|
||||||
resources:
|
|
||||||
{{ toYaml .Values.resources | indent 12 }}
|
|
||||||
volumes:
|
|
||||||
- name: config
|
|
||||||
{{- if .Values.persistence.config.enabled }}
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: {{ if .Values.persistence.config.existingClaim }}{{ .Values.persistence.config.existingClaim }}{{- else }}{{ template "sonarr.fullname" . }}-config{{- end }}
|
|
||||||
{{- else }}
|
|
||||||
emptyDir: {}
|
|
||||||
{{- end }}
|
|
||||||
- name: downloads
|
|
||||||
{{- if .Values.persistence.downloads.enabled }}
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: {{ if .Values.persistence.downloads.existingClaim }}{{ .Values.persistence.downloads.existingClaim }}{{- else }}{{ template "sonarr.fullname" . }}-downloads{{- end }}
|
|
||||||
{{- else }}
|
|
||||||
emptyDir: {}
|
|
||||||
{{- end }}
|
|
||||||
- name: tv
|
|
||||||
{{- if .Values.persistence.tv.enabled }}
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: {{ if .Values.persistence.tv.existingClaim }}{{ .Values.persistence.tv.existingClaim }}{{- else }}{{ template "sonarr.fullname" . }}-tv{{- end }}
|
|
||||||
{{- else }}
|
|
||||||
emptyDir: {}
|
|
||||||
{{- end }}
|
|
||||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
|
||||||
- name: {{ .name }}
|
|
||||||
persistentVolumeClaim:
|
|
||||||
claimName: {{ .existingClaim }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.affinity }}
|
|
||||||
affinity:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.tolerations }}
|
|
||||||
tolerations:
|
|
||||||
{{ toYaml . | indent 8 }}
|
|
||||||
{{- end }}
|
|
@ -1,25 +0,0 @@
|
|||||||
|
|
||||||
{{- if and .Values.persistence.downloads.enabled (not .Values.persistence.downloads.existingClaim) }}
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: {{ template "sonarr.fullname" . }}-downloads
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
|
||||||
helm.sh/chart: {{ include "sonarr.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- {{ .Values.persistence.downloads.accessMode | quote }}
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: {{ .Values.persistence.downloads.size | quote }}
|
|
||||||
{{- if .Values.persistence.downloads.storageClass }}
|
|
||||||
{{- if (eq "-" .Values.persistence.downloads.storageClass) }}
|
|
||||||
storageClassName: ""
|
|
||||||
{{- else }}
|
|
||||||
storageClassName: "{{ .Values.persistence.downloads.storageClass }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end -}}
|
|
@ -1,38 +0,0 @@
|
|||||||
{{- if .Values.ingress.enabled -}}
|
|
||||||
{{- $fullName := include "sonarr.fullname" . -}}
|
|
||||||
{{- $ingressPath := .Values.ingress.path -}}
|
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: {{ $fullName }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
|
||||||
helm.sh/chart: {{ include "sonarr.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
{{- with .Values.ingress.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{ toYaml . | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
spec:
|
|
||||||
{{- if .Values.ingress.tls }}
|
|
||||||
tls:
|
|
||||||
{{- range .Values.ingress.tls }}
|
|
||||||
- hosts:
|
|
||||||
{{- range .hosts }}
|
|
||||||
- {{ . | quote }}
|
|
||||||
{{- end }}
|
|
||||||
secretName: {{ .secretName }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
rules:
|
|
||||||
{{- range .Values.ingress.hosts }}
|
|
||||||
- host: {{ . | quote }}
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- path: {{ $ingressPath }}
|
|
||||||
backend:
|
|
||||||
serviceName: {{ $fullName }}
|
|
||||||
servicePort: http
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
@ -1,52 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: {{ template "sonarr.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
|
||||||
helm.sh/chart: {{ include "sonarr.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
{{- if .Values.service.labels }}
|
|
||||||
{{ toYaml .Values.service.labels | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.service.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{ toYaml . | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
spec:
|
|
||||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
|
||||||
type: ClusterIP
|
|
||||||
{{- if .Values.service.clusterIP }}
|
|
||||||
clusterIP: {{ .Values.service.clusterIP }}
|
|
||||||
{{end}}
|
|
||||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
|
||||||
type: {{ .Values.service.type }}
|
|
||||||
{{- if .Values.service.loadBalancerIP }}
|
|
||||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
|
||||||
loadBalancerSourceRanges:
|
|
||||||
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
|
|
||||||
{{- end -}}
|
|
||||||
{{- else }}
|
|
||||||
type: {{ .Values.service.type }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.service.externalIPs }}
|
|
||||||
externalIPs:
|
|
||||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.service.externalTrafficPolicy }}
|
|
||||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
|
||||||
{{- end }}
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
port: {{ .Values.service.port }}
|
|
||||||
protocol: TCP
|
|
||||||
targetPort: http
|
|
||||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
|
||||||
nodePort: {{.Values.service.nodePort}}
|
|
||||||
{{ end }}
|
|
||||||
selector:
|
|
||||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
@ -1,25 +0,0 @@
|
|||||||
|
|
||||||
{{- if and .Values.persistence.tv.enabled (not .Values.persistence.tv.existingClaim) }}
|
|
||||||
kind: PersistentVolumeClaim
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: {{ template "sonarr.fullname" . }}-tv
|
|
||||||
labels:
|
|
||||||
app.kubernetes.io/name: {{ include "sonarr.name" . }}
|
|
||||||
helm.sh/chart: {{ include "sonarr.chart" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- {{ .Values.persistence.tv.accessMode | quote }}
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: {{ .Values.persistence.tv.size | quote }}
|
|
||||||
{{- if .Values.persistence.tv.storageClass }}
|
|
||||||
{{- if (eq "-" .Values.persistence.tv.storageClass) }}
|
|
||||||
storageClassName: ""
|
|
||||||
{{- else }}
|
|
||||||
storageClassName: "{{ .Values.persistence.tv.storageClass }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end -}}
|
|
@ -1,129 +0,0 @@
|
|||||||
# Default values for sonarr.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
# Declare variables to be passed into your templates.
|
|
||||||
|
|
||||||
image:
|
|
||||||
repository: linuxserver/sonarr
|
|
||||||
tag: amd64-3.0.1.418-ls45
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
|
|
||||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
|
||||||
strategyType: Recreate
|
|
||||||
|
|
||||||
nameOverride: ""
|
|
||||||
fullnameOverride: ""
|
|
||||||
|
|
||||||
timezone: UTC
|
|
||||||
puid: 1001
|
|
||||||
pgid: 1001
|
|
||||||
|
|
||||||
service:
|
|
||||||
type: ClusterIP
|
|
||||||
port: 8989
|
|
||||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
|
||||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
|
||||||
##
|
|
||||||
# nodePort:
|
|
||||||
## Provide any additional annotations which may be required. This can be used to
|
|
||||||
## set the LoadBalancer service type to internal only.
|
|
||||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
|
||||||
##
|
|
||||||
annotations: {}
|
|
||||||
labels: {}
|
|
||||||
## Use loadBalancerIP to request a specific static IP,
|
|
||||||
## otherwise leave blank
|
|
||||||
##
|
|
||||||
loadBalancerIP:
|
|
||||||
# loadBalancerSourceRanges: []
|
|
||||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
|
||||||
# externalTrafficPolicy: Cluster
|
|
||||||
|
|
||||||
ingress:
|
|
||||||
enabled: false
|
|
||||||
annotations: {}
|
|
||||||
# kubernetes.io/ingress.class: nginx
|
|
||||||
# kubernetes.io/tls-acme: "true"
|
|
||||||
path: /
|
|
||||||
hosts:
|
|
||||||
- chart-example.local
|
|
||||||
tls: []
|
|
||||||
# - secretName: chart-example-tls
|
|
||||||
# hosts:
|
|
||||||
# - chart-example.local
|
|
||||||
|
|
||||||
persistence:
|
|
||||||
config:
|
|
||||||
enabled: true
|
|
||||||
## sonarr configuration data Persistent Volume Storage Class
|
|
||||||
## If defined, storageClassName: <storageClass>
|
|
||||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
|
||||||
## If undefined (the default) or set to null, no storageClassName spec is
|
|
||||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
|
||||||
## GKE, AWS & OpenStack)
|
|
||||||
##
|
|
||||||
# storageClass: "-"
|
|
||||||
##
|
|
||||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
|
||||||
## the existingClaim variable
|
|
||||||
# existingClaim: your-claim
|
|
||||||
accessMode: ReadWriteOnce
|
|
||||||
size: 1Gi
|
|
||||||
downloads:
|
|
||||||
enabled: true
|
|
||||||
## sonarr downloads volume configuration
|
|
||||||
## If defined, storageClassName: <storageClass>
|
|
||||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
|
||||||
## If undefined (the default) or set to null, no storageClassName spec is
|
|
||||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
|
||||||
## GKE, AWS & OpenStack)
|
|
||||||
##
|
|
||||||
# storageClass: "-"
|
|
||||||
##
|
|
||||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
|
||||||
## the existingClaim variable
|
|
||||||
# existingClaim: your-claim
|
|
||||||
# subPath: some-subpath
|
|
||||||
accessMode: ReadWriteOnce
|
|
||||||
size: 10Gi
|
|
||||||
tv:
|
|
||||||
enabled: true
|
|
||||||
## Directory where televion shows are persisted
|
|
||||||
## If defined, storageClassName: <storageClass>
|
|
||||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
|
||||||
## If undefined (the default) or set to null, no storageClassName spec is
|
|
||||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
|
||||||
## GKE, AWS & OpenStack)
|
|
||||||
##
|
|
||||||
# storageClass: "-"
|
|
||||||
##
|
|
||||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
|
||||||
## the existingClaim variable
|
|
||||||
# existingClaim: your-claim
|
|
||||||
# subPath: some-subpath
|
|
||||||
accessMode: ReadWriteOnce
|
|
||||||
size: 10Gi
|
|
||||||
extraExistingClaimMounts: []
|
|
||||||
# - name: external-mount
|
|
||||||
# mountPath: /srv/external-mount
|
|
||||||
## A manually managed Persistent Volume and Claim
|
|
||||||
## If defined, PVC must be created manually before volume will be bound
|
|
||||||
# existingClaim:
|
|
||||||
# readOnly: true
|
|
||||||
|
|
||||||
resources: {}
|
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
|
||||||
# choice for the user. This also increases chances charts run on environments with little
|
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
|
||||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
|
||||||
# limits:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
|
|
||||||
nodeSelector: {}
|
|
||||||
|
|
||||||
tolerations: []
|
|
||||||
|
|
||||||
affinity: {}
|
|
@ -1,23 +0,0 @@
|
|||||||
# Patterns to ignore when building packages.
|
|
||||||
# This supports shell glob matching, relative path matching, and
|
|
||||||
# negation (prefixed with !). Only one pattern per line.
|
|
||||||
.DS_Store
|
|
||||||
# Common VCS dirs
|
|
||||||
.git/
|
|
||||||
.gitignore
|
|
||||||
.bzr/
|
|
||||||
.bzrignore
|
|
||||||
.hg/
|
|
||||||
.hgignore
|
|
||||||
.svn/
|
|
||||||
# Common backup files
|
|
||||||
*.swp
|
|
||||||
*.bak
|
|
||||||
*.tmp
|
|
||||||
*~
|
|
||||||
# Various IDEs
|
|
||||||
.project
|
|
||||||
.idea/
|
|
||||||
*.tmproj
|
|
||||||
# OWNERS file for Kubernetes
|
|
||||||
OWNERS
|
|
@ -1,15 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
name: speedtest
|
|
||||||
version: 1.1.0
|
|
||||||
appVersion: 1.0.0
|
|
||||||
description: periodic speedtest and save the results to InfluxDB
|
|
||||||
keywords:
|
|
||||||
- speedtest
|
|
||||||
- influxdb
|
|
||||||
home: https://github.com/billimek/billimek-charts/tree/master/speedtest
|
|
||||||
sources:
|
|
||||||
- https://github.com/billimek/Speedtest-for-InfluxDB-and-Grafana
|
|
||||||
- https://github.com/billimek/billimek-charts
|
|
||||||
maintainers:
|
|
||||||
- name: billimek
|
|
||||||
email: jeff@billimek.com
|
|
@ -1,4 +0,0 @@
|
|||||||
approvers:
|
|
||||||
- billimek
|
|
||||||
reviewers:
|
|
||||||
- billimek
|
|
@ -1,69 +0,0 @@
|
|||||||
# Speedtest.net Collector For InfluxDB and Grafana
|
|
||||||
|
|
||||||
![Screenshot](https://camo.githubusercontent.com/c652a6685bcb5a8cec6a47c92e57d159b28e47e7/68747470733a2f2f7075752e73682f746d664f412f623535373665383864652e706e67)
|
|
||||||
|
|
||||||
This tool is a wrapper for speedtest-cli which allows you to run periodic speedtets and save the results to Influxdb
|
|
||||||
|
|
||||||
## TL;DR;
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm repo add billimek https://billimek.github.io/helm-repo
|
|
||||||
$ helm install billimek/speedtest
|
|
||||||
```
|
|
||||||
|
|
||||||
## Installing the Chart
|
|
||||||
|
|
||||||
To install the chart with the release name `my-release`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm install --name my-release billimek/speedtest
|
|
||||||
```
|
|
||||||
|
|
||||||
## Uninstalling the Chart
|
|
||||||
|
|
||||||
To uninstall/delete the `my-release` deployment:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm delete my-release --purge
|
|
||||||
```
|
|
||||||
|
|
||||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
The configuration is set as a block of text through a configmap and mouted as a file in /src/config.ini Any value in this text block should match the defined speedtest configuration. There are several values here that will have to match our kubernetes configuration.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
|
||||||
|
|
||||||
| Parameter | Description | Default |
|
|
||||||
| ------------------------------- | ------------------------------- | ---------------------------------------------------------- |
|
|
||||||
| `image.repository` | speedtest image | `billimek/speedtestusage-for-influxdb` |
|
|
||||||
| `image.tag` | speedtest image tag | `latest` |
|
|
||||||
| `image.pullPolicy` | speedtest image pull policy | `IfNotPresent` |
|
|
||||||
| `debug` | Display debugging output | `false` |
|
|
||||||
| `config.delay` | how many seconds to wait between checks | `3600` |
|
|
||||||
| `config.influxdb.host` | InfluxDB hostname | `influxdb-influxdb` |
|
|
||||||
| `config.influxdb.port` | InfluxDB port | `8086` |
|
|
||||||
| `config.influxdb.database` | InfluxDB database | `speedtests` |
|
|
||||||
| `config.influxdb.username` | InfluxDB username | `` |
|
|
||||||
| `config.influxdb.password` | InfluxDB password | `` |
|
|
||||||
| `config.influxdb.ssl` | InfluxDB connection using SSL | `false` |
|
|
||||||
| `config.speedtest.server` | server to use for speedtest - leave blank to auto-pick | `` |
|
|
||||||
|
|
||||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm install --name my-release \
|
|
||||||
--set config.influxdb.host=some-influx-host \
|
|
||||||
billimek/speedtest
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm install --name my-release -f values.yaml billimek/speedtest
|
|
||||||
```
|
|
||||||
|
|
||||||
Read through the [values.yaml](values.yaml) file. It has several commented out suggested values.
|
|
@ -1,7 +0,0 @@
|
|||||||
You can connect to the container running speedtest. To open a shell session in the pod run the following:
|
|
||||||
|
|
||||||
- kubectl exec -i -t --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "speedtest.fullname" . }} -o jsonpath='{.items[0].metadata.name}') /bin/sh
|
|
||||||
|
|
||||||
To trail the logs for the speedtest pod run the following:
|
|
||||||
|
|
||||||
- kubectl logs -f --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "speedtest.fullname" . }} -o jsonpath='{ .items[0].metadata.name }')
|
|
@ -1,27 +0,0 @@
|
|||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Expand the name of the chart.
|
|
||||||
*/}}
|
|
||||||
{{- define "speedtest.name" -}}
|
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create a default fully qualified app name.
|
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
|
||||||
If release name contains chart name it will be used as a full name.
|
|
||||||
*/}}
|
|
||||||
{{- define "speedtest.fullname" -}}
|
|
||||||
{{- if .Values.fullnameOverride -}}
|
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
|
||||||
{{- if contains $name .Release.Name -}}
|
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
@ -1,32 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: {{ template "speedtest.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app: {{ template "speedtest.name" . }}
|
|
||||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
|
||||||
release: "{{ .Release.Name }}"
|
|
||||||
heritage: "{{ .Release.Service }}"
|
|
||||||
data:
|
|
||||||
config.ini: |
|
|
||||||
[GENERAL]
|
|
||||||
Delay = {{ .Values.config.delay }}
|
|
||||||
[INFLUXDB]
|
|
||||||
Address = {{ .Values.config.influxdb.host }}
|
|
||||||
Port = {{ .Values.config.influxdb.port }}
|
|
||||||
Database = {{ .Values.config.influxdb.database }}
|
|
||||||
Username = {{ .Values.config.influxdb.username }}
|
|
||||||
Password = {{ .Values.config.influxdb.password }}
|
|
||||||
{{- if .Values.config.influxdb.ssl }}
|
|
||||||
Verify_SSL = True
|
|
||||||
{{- else }}
|
|
||||||
Verify_SSL = False
|
|
||||||
{{- end }}
|
|
||||||
[SPEEDTEST]
|
|
||||||
Server = {{ .Values.config.speedtest.server }}
|
|
||||||
[LOGGING]
|
|
||||||
{{- if .Values.debug }}
|
|
||||||
Level = debug
|
|
||||||
{{- else }}
|
|
||||||
Level = info
|
|
||||||
{{- end }}
|
|
@ -1,42 +0,0 @@
|
|||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ template "speedtest.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app: {{ template "speedtest.name" . }}
|
|
||||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
heritage: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: {{ template "speedtest.name" . }}
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
replicas: {{ default 1 .Values.replicas }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: {{ template "speedtest.name" . }}
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: {{ .Chart.Name }}
|
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
|
||||||
resources:
|
|
||||||
{{ toYaml .Values.resources | indent 12 }}
|
|
||||||
volumeMounts:
|
|
||||||
- name: {{ template "speedtest.name" . }}
|
|
||||||
mountPath: /src/config.ini
|
|
||||||
subPath: config.ini
|
|
||||||
{{- if .Values.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ toYaml .Values.nodeSelector | indent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
volumes:
|
|
||||||
- name: {{ template "speedtest.name" . }}
|
|
||||||
configMap:
|
|
||||||
name: {{ template "speedtest.fullname" . }}
|
|
||||||
items:
|
|
||||||
- key: config.ini
|
|
||||||
path: config.ini
|
|
@ -1,35 +0,0 @@
|
|||||||
# Default values for speedtest.
|
|
||||||
# This is a YAML-formatted file.
|
|
||||||
# Declare variables to be passed into your templates.
|
|
||||||
replicaCount: 1
|
|
||||||
image:
|
|
||||||
repository: atribe/speedtest-for-influxdb-and-grafana
|
|
||||||
tag: latest
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
resources: {}
|
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
|
||||||
# choice for the user. This also increases chances charts run on environments with little
|
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
|
||||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
|
||||||
# limits:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
nodeSelector: {}
|
|
||||||
debug: false
|
|
||||||
config:
|
|
||||||
# how many seconds to wait between checks
|
|
||||||
delay: 3600
|
|
||||||
influxdb:
|
|
||||||
# host/port/database are mandatory - change as needed
|
|
||||||
host: influxdb-influxdb
|
|
||||||
port: 8086
|
|
||||||
database: speedtests
|
|
||||||
# username:
|
|
||||||
# password:
|
|
||||||
ssl: false
|
|
||||||
speedtest:
|
|
||||||
# Leave blank to auto pick server
|
|
||||||
server:
|
|
@ -1 +0,0 @@
|
|||||||
The Unifi chart is now an official helm chart: https://github.com/helm/charts/tree/master/stable/unifi
|
|
@ -1,23 +0,0 @@
|
|||||||
# Patterns to ignore when building packages.
|
|
||||||
# This supports shell glob matching, relative path matching, and
|
|
||||||
# negation (prefixed with !). Only one pattern per line.
|
|
||||||
.DS_Store
|
|
||||||
# Common VCS dirs
|
|
||||||
.git/
|
|
||||||
.gitignore
|
|
||||||
.bzr/
|
|
||||||
.bzrignore
|
|
||||||
.hg/
|
|
||||||
.hgignore
|
|
||||||
.svn/
|
|
||||||
# Common backup files
|
|
||||||
*.swp
|
|
||||||
*.bak
|
|
||||||
*.tmp
|
|
||||||
*~
|
|
||||||
# Various IDEs
|
|
||||||
.project
|
|
||||||
.idea/
|
|
||||||
*.tmproj
|
|
||||||
# OWNERS file for Kubernetes
|
|
||||||
OWNERS
|
|
@ -1,15 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
name: uptimerobot
|
|
||||||
version: 1.1.2
|
|
||||||
appVersion: 1.1.0
|
|
||||||
description: A tool to get statistics from Uptime Robot and log it into InfluxDB
|
|
||||||
keywords:
|
|
||||||
- uptimerobot
|
|
||||||
- influxdb
|
|
||||||
home: https://github.com/billimek/billimek-charts/tree/master/uptimerobot
|
|
||||||
sources:
|
|
||||||
- https://github.com/trojanc/node-influx-uptimerobot
|
|
||||||
- https://github.com/billimek/billimek-charts
|
|
||||||
maintainers:
|
|
||||||
- name: billimek
|
|
||||||
email: jeff@billimek.com
|
|
@ -1,4 +0,0 @@
|
|||||||
approvers:
|
|
||||||
- billimek
|
|
||||||
reviewers:
|
|
||||||
- billimek
|
|
@ -1,71 +0,0 @@
|
|||||||
# A tool to get statistics from Uptime Robot and log it into InfluxDB
|
|
||||||
|
|
||||||
![Screenshot](https://github.com/billimek/node-influx-uptimerobot/blob/master/docs/dashboard.png)
|
|
||||||
|
|
||||||
This tool allows you to run periodic uptimerobot data usage checks and save the results to Influxdb
|
|
||||||
|
|
||||||
## TL;DR;
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm repo add billimek https://billimek.github.io/helm-repo
|
|
||||||
$ helm install billimek/uptimerobot
|
|
||||||
```
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
This code is adopted from [this original repo](https://github.com/trojanc/node-influx-uptimerobot)
|
|
||||||
|
|
||||||
## Installing the Chart
|
|
||||||
|
|
||||||
To install the chart with the release name `my-release`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm install --name my-release billimek/uptimerobot
|
|
||||||
```
|
|
||||||
## Uninstalling the Chart
|
|
||||||
|
|
||||||
To uninstall/delete the `my-release` deployment:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm delete my-release --purge
|
|
||||||
```
|
|
||||||
|
|
||||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
The configuration is set as a block of text through a configmap and mouted as a file in /src/config.ini Any value in this text block should match the defined uptimerobot configuration. There are several values here that will have to match our kubernetes configuration.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
The following tables lists the configurable parameters of the Sentry chart and their default values.
|
|
||||||
|
|
||||||
| Parameter | Description | Default |
|
|
||||||
| ------------------------------- | ------------------------------- | ---------------------------------------------------------- |
|
|
||||||
| `image.repository` | uptimerobot image | `billimek/uptimerobotusage-for-influxdb` |
|
|
||||||
| `image.tag` | uptimerobot image tag | `latest` |
|
|
||||||
| `image.pullPolicy` | uptimerobot image pull policy | `IfNotPresent` |
|
|
||||||
| `delay` | number of seconds to wait between collections | `300` |
|
|
||||||
| `config.influxdb.host` | InfluxDB hostname | `influxdb-influxdb` |
|
|
||||||
| `config.influxdb.port` | InfluxDB port | `8086` |
|
|
||||||
| `config.influxdb.database` | InfluxDB database | `uptimerobot` |
|
|
||||||
| `config.influxdb.protocol` | InfluxDB protocol | `http` |
|
|
||||||
| `config.influxdb.username` | InfluxDB username | `` |
|
|
||||||
| `config.influxdb.password` | InfluxDB password | `` |
|
|
||||||
| `config.uptimerobot.apikey` | uptimerobot API key (REQUIRED) | `someapikey` |
|
|
||||||
|
|
||||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm install --name my-release \
|
|
||||||
--set config.uptimerobot.apikey=thisismyapikey \
|
|
||||||
billimek/uptimerobot
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ helm install --name my-release -f values.yaml billimek/uptimerobot
|
|
||||||
```
|
|
||||||
|
|
||||||
Read through the [values.yaml](values.yaml) file. It has several commented out suggested values.
|
|
@ -1,7 +0,0 @@
|
|||||||
You can connect to the container running uptimerobot. To open a shell session in the pod run the following:
|
|
||||||
|
|
||||||
- kubectl exec -i -t --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "uptimerobot.fullname" . }} -o jsonpath='{.items[0].metadata.name}') /bin/sh
|
|
||||||
|
|
||||||
To trail the logs for the uptimerobot pod run the following:
|
|
||||||
|
|
||||||
- kubectl logs -f --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "uptimerobot.fullname" . }} -o jsonpath='{ .items[0].metadata.name }')
|
|
@ -1,34 +0,0 @@
|
|||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Expand the name of the chart.
|
|
||||||
*/}}
|
|
||||||
{{- define "uptimerobot.name" -}}
|
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create a default fully qualified app name.
|
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
|
||||||
If release name contains chart name it will be used as a full name.
|
|
||||||
*/}}
|
|
||||||
{{- define "uptimerobot.fullname" -}}
|
|
||||||
{{- if .Values.fullnameOverride -}}
|
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
|
||||||
{{- if contains $name .Release.Name -}}
|
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create chart name and version as used by the chart label.
|
|
||||||
*/}}
|
|
||||||
{{- define "uptimerobot.chart" -}}
|
|
||||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
@ -1,69 +0,0 @@
|
|||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ template "uptimerobot.fullname" . }}
|
|
||||||
labels:
|
|
||||||
app: {{ template "uptimerobot.name" . }}
|
|
||||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
heritage: {{ .Release.Service }}
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: {{ template "uptimerobot.name" . }}
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
replicas: {{ default 1 .Values.replicas }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: {{ template "uptimerobot.name" . }}
|
|
||||||
release: {{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: {{ .Chart.Name }}
|
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
|
||||||
env:
|
|
||||||
- name: UPTIMEROBOT_API_KEY
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: {{ template "uptimerobot.fullname" . }}
|
|
||||||
key: uptimerobot-apikey
|
|
||||||
{{- if .Values.config.influxdb.username }}
|
|
||||||
- name: INFLUX_USERNAME
|
|
||||||
valueFrom:
|
|
||||||
configMapKeyRef:
|
|
||||||
name: {{ template "uptimerobot.fullname" . }}
|
|
||||||
key: influxdb-username
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.config.influxdb.password }}
|
|
||||||
- name: INFLUX_PASSWORD
|
|
||||||
valueFrom:
|
|
||||||
configMapKeyRef:
|
|
||||||
name: {{ template "uptimerobot.fullname" . }}
|
|
||||||
key: influxdb-password
|
|
||||||
{{- end }}
|
|
||||||
- name: APPLICATION_INTERVAL
|
|
||||||
value: "{{ .Values.delay }}"
|
|
||||||
{{- if .Values.config.uptimerobot.logs_limit }}
|
|
||||||
- name: UPTIMEROBOT_LOGS_LIMIT
|
|
||||||
value: "{{ .Values.config.uptimerobot.logs_limit }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.config.uptimerobot.response_times_limit }}
|
|
||||||
- name: UPTIMEROBOT_RESPONSE_TIMES_LIMIT
|
|
||||||
value: "{{ .Values.config.uptimerobot.response_times_limit }}"
|
|
||||||
{{- end }}
|
|
||||||
- name: INFLUX_HOST
|
|
||||||
value: "{{ .Values.config.influxdb.host }}"
|
|
||||||
- name: INFLUX_PORT
|
|
||||||
value: "{{ .Values.config.influxdb.port }}"
|
|
||||||
- name: INFLUX_PROTOCOL
|
|
||||||
value: "{{ .Values.config.influxdb.protocol }}"
|
|
||||||
- name: INFLUX_DATABASE
|
|
||||||
value: "{{ .Values.config.influxdb.database }}"
|
|
||||||
resources:
|
|
||||||
{{ toYaml .Values.resources | indent 12 }}
|
|
||||||
{{- if .Values.nodeSelector }}
|
|
||||||
nodeSelector:
|
|
||||||
{{ toYaml .Values.nodeSelector | indent 8 }}
|
|
||||||
{{- end }}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user