From 3145c2a953bcf4e8cb65fc005b99d5030f7f0e8a Mon Sep 17 00:00:00 2001 From: Bogdan Alov Date: Wed, 15 May 2019 17:20:23 +0300 Subject: [PATCH 1/4] updated efk stack --- efk/elasticsearch-curator/.helmignore | 21 ++ efk/elasticsearch-curator/Chart.yaml | 18 ++ efk/elasticsearch-curator/OWNERS | 6 + efk/elasticsearch-curator/README.md | 59 ++++ .../ci/initcontainer-values.yaml | 9 + efk/elasticsearch-curator/templates/NOTES.txt | 6 + .../templates/_helpers.tpl | 44 +++ .../templates/configmap.yaml | 12 + .../templates/cronjob.yaml | 115 ++++++++ .../templates/hooks/job.install.yaml | 69 +++++ efk/elasticsearch-curator/values.yaml | 140 ++++++++++ efk/elasticsearch-exporter/.helmignore | 24 ++ efk/{ => elasticsearch-exporter}/Chart.yaml | 12 +- efk/elasticsearch-exporter/OWNERS | 6 + efk/elasticsearch-exporter/README.md | 87 ++++++ .../templates/NOTES.txt | 15 ++ .../templates/_helpers.tpl | 15 +- .../templates/cert-secret.yaml} | 10 +- .../templates/deployment.yaml} | 48 ++-- .../templates/service.yaml} | 10 +- .../templates/servicemonitor.yaml | 30 +++ efk/elasticsearch-exporter/values.yaml | 91 +++++++ efk/elasticsearch/.helmignore | 3 + efk/elasticsearch/Chart.yaml | 23 ++ efk/elasticsearch/OWNERS | 10 + efk/elasticsearch/README.md | 252 ++++++++++++++++++ .../ci/extrainitcontainers-values.yaml | 9 + .../ci/plugin-initcontainer-values.yaml | 7 + efk/elasticsearch/templates/NOTES.txt | 31 +++ efk/elasticsearch/templates/_helpers.tpl | 108 ++++++++ efk/elasticsearch/templates/client-auth.yaml | 11 + .../templates/client-deployment.yaml | 171 ++++++++++++ .../templates/client-ingress.yaml | 44 +++ efk/elasticsearch/templates/client-pdb.yaml | 24 ++ .../templates/client-serviceaccount.yaml | 12 + efk/elasticsearch/templates/client-svc.yaml | 34 +++ efk/elasticsearch/templates/configmap.yaml | 169 ++++++++++++ efk/elasticsearch/templates/data-pdb.yaml | 24 ++ .../templates/data-serviceaccount.yaml | 12 + .../templates/data-statefulset.yaml | 226 ++++++++++++++++ efk/elasticsearch/templates/job.yaml | 34 +++ efk/elasticsearch/templates/master-pdb.yaml | 24 ++ .../templates/master-serviceaccount.yaml | 12 + .../templates/master-statefulset.yaml | 214 +++++++++++++++ efk/elasticsearch/templates/master-svc.yaml | 19 ++ .../templates/podsecuritypolicy.yaml | 43 +++ efk/elasticsearch/templates/role.yaml | 17 ++ efk/elasticsearch/templates/rolebinding.yaml | 26 ++ .../templates/tests/test-configmap.yaml | 15 ++ efk/elasticsearch/templates/tests/test.yaml | 42 +++ efk/elasticsearch/values.yaml | 230 ++++++++++++++++ efk/fluent-bit/Chart.yaml | 18 ++ efk/fluent-bit/OWNERS | 6 + efk/fluent-bit/README.md | 152 +++++++++++ efk/fluent-bit/templates/NOTES.txt | 15 ++ efk/fluent-bit/templates/_helpers.tpl | 57 ++++ efk/fluent-bit/templates/cluster-role.yaml | 18 ++ .../templates/cluster-rolebinding.yaml | 19 ++ efk/fluent-bit/templates/config.yaml | 194 ++++++++++++++ efk/fluent-bit/templates/daemonset.yaml | 154 +++++++++++ efk/fluent-bit/templates/secret.yaml | 12 + efk/fluent-bit/templates/service.yaml | 25 ++ efk/fluent-bit/templates/serviceaccount.yaml | 11 + .../templates/tests/test-configmap.yaml | 43 +++ efk/fluent-bit/templates/tests/test.yaml | 53 ++++ efk/fluent-bit/values.yaml | 244 +++++++++++++++++ efk/kibana/.helmignore | 21 ++ efk/kibana/Chart.yaml | 17 ++ efk/kibana/OWNERS | 6 + efk/kibana/README.md | 140 ++++++++++ efk/kibana/ci/authproxy-enabled.yaml | 3 + efk/kibana/ci/dashboard-values.yaml | 21 ++ efk/kibana/ci/extra-configmap-mounts.yaml | 6 + efk/kibana/ci/ingress-hosts-paths.yaml | 3 + efk/kibana/ci/ingress-hosts.yaml | 3 + efk/kibana/ci/initcontainers-all-values.yaml | 23 ++ efk/kibana/ci/initcontainers-values.yaml | 18 ++ efk/kibana/ci/plugin-install.yaml | 9 + efk/kibana/ci/pvc.yaml | 11 + efk/kibana/ci/security-context.yaml | 6 + efk/kibana/ci/service-values.yaml | 4 + efk/kibana/ci/url_dashboard-values.yaml | 7 + efk/kibana/templates/NOTES.txt | 18 ++ efk/kibana/templates/_helpers.tpl | 40 +++ .../templates/configmap-dashboardimport.yaml | 67 +++++ efk/kibana/templates/configmap.yaml | 14 + efk/kibana/templates/deployment.yaml | 238 +++++++++++++++++ efk/kibana/templates/ingress.yaml | 33 +++ efk/kibana/templates/service.yaml | 56 ++++ efk/kibana/templates/serviceaccount.yaml | 11 + .../templates/tests/test-configmap.yaml | 35 +++ efk/kibana/templates/tests/test.yaml | 42 +++ efk/kibana/templates/volume-claim.yaml | 31 +++ efk/kibana/values.yaml | 228 ++++++++++++++++ efk/templates/NOTES.txt | 1 - efk/templates/es-curator-config.yaml | 57 ---- efk/templates/es-curator.yaml | 29 -- efk/templates/es-data-stateful.yaml | 98 ------- efk/templates/es-data-svc.yaml | 15 -- efk/templates/es-discovery-svc.yaml | 16 -- efk/templates/es-ingest-svc.yaml | 15 -- efk/templates/es-ingest.yaml | 91 ------- efk/templates/es-master-stateful.yaml | 94 ------- efk/templates/es-master-svc.yaml | 15 -- efk/templates/es-svc.yaml | 15 -- efk/templates/fluentbit-configmap.yaml | 61 ----- efk/templates/fluentbit-ds.yaml | 61 ----- efk/templates/fluentbit-roles.yaml | 29 -- efk/templates/kibana-svc.yaml | 13 - efk/templates/kibana.yaml | 37 --- efk/values.yaml | 168 ------------ 111 files changed, 4773 insertions(+), 867 deletions(-) create mode 100644 efk/elasticsearch-curator/.helmignore create mode 100644 efk/elasticsearch-curator/Chart.yaml create mode 100644 efk/elasticsearch-curator/OWNERS create mode 100644 efk/elasticsearch-curator/README.md create mode 100644 efk/elasticsearch-curator/ci/initcontainer-values.yaml create mode 100644 efk/elasticsearch-curator/templates/NOTES.txt create mode 100644 efk/elasticsearch-curator/templates/_helpers.tpl create mode 100644 efk/elasticsearch-curator/templates/configmap.yaml create mode 100644 efk/elasticsearch-curator/templates/cronjob.yaml create mode 100644 efk/elasticsearch-curator/templates/hooks/job.install.yaml create mode 100644 efk/elasticsearch-curator/values.yaml create mode 100644 efk/elasticsearch-exporter/.helmignore rename efk/{ => elasticsearch-exporter}/Chart.yaml (53%) create mode 100644 efk/elasticsearch-exporter/OWNERS create mode 100644 efk/elasticsearch-exporter/README.md create mode 100644 efk/elasticsearch-exporter/templates/NOTES.txt rename efk/{ => elasticsearch-exporter}/templates/_helpers.tpl (64%) rename efk/{templates/es-exporter-cert-secret.yaml => elasticsearch-exporter/templates/cert-secret.yaml} (52%) rename efk/{templates/es-exporter-deployment.yaml => elasticsearch-exporter/templates/deployment.yaml} (57%) rename efk/{templates/es-exporter-service.yaml => elasticsearch-exporter/templates/service.yaml} (60%) create mode 100644 efk/elasticsearch-exporter/templates/servicemonitor.yaml create mode 100644 efk/elasticsearch-exporter/values.yaml create mode 100644 efk/elasticsearch/.helmignore create mode 100644 efk/elasticsearch/Chart.yaml create mode 100644 efk/elasticsearch/OWNERS create mode 100644 efk/elasticsearch/README.md create mode 100644 efk/elasticsearch/ci/extrainitcontainers-values.yaml create mode 100644 efk/elasticsearch/ci/plugin-initcontainer-values.yaml create mode 100644 efk/elasticsearch/templates/NOTES.txt create mode 100644 efk/elasticsearch/templates/_helpers.tpl create mode 100644 efk/elasticsearch/templates/client-auth.yaml create mode 100644 efk/elasticsearch/templates/client-deployment.yaml create mode 100644 efk/elasticsearch/templates/client-ingress.yaml create mode 100644 efk/elasticsearch/templates/client-pdb.yaml create mode 100644 efk/elasticsearch/templates/client-serviceaccount.yaml create mode 100644 efk/elasticsearch/templates/client-svc.yaml create mode 100644 efk/elasticsearch/templates/configmap.yaml create mode 100644 efk/elasticsearch/templates/data-pdb.yaml create mode 100644 efk/elasticsearch/templates/data-serviceaccount.yaml create mode 100644 efk/elasticsearch/templates/data-statefulset.yaml create mode 100644 efk/elasticsearch/templates/job.yaml create mode 100644 efk/elasticsearch/templates/master-pdb.yaml create mode 100644 efk/elasticsearch/templates/master-serviceaccount.yaml create mode 100644 efk/elasticsearch/templates/master-statefulset.yaml create mode 100644 efk/elasticsearch/templates/master-svc.yaml create mode 100644 efk/elasticsearch/templates/podsecuritypolicy.yaml create mode 100644 efk/elasticsearch/templates/role.yaml create mode 100644 efk/elasticsearch/templates/rolebinding.yaml create mode 100644 efk/elasticsearch/templates/tests/test-configmap.yaml create mode 100644 efk/elasticsearch/templates/tests/test.yaml create mode 100644 efk/elasticsearch/values.yaml create mode 100644 efk/fluent-bit/Chart.yaml create mode 100644 efk/fluent-bit/OWNERS create mode 100644 efk/fluent-bit/README.md create mode 100644 efk/fluent-bit/templates/NOTES.txt create mode 100644 efk/fluent-bit/templates/_helpers.tpl create mode 100644 efk/fluent-bit/templates/cluster-role.yaml create mode 100644 efk/fluent-bit/templates/cluster-rolebinding.yaml create mode 100644 efk/fluent-bit/templates/config.yaml create mode 100644 efk/fluent-bit/templates/daemonset.yaml create mode 100644 efk/fluent-bit/templates/secret.yaml create mode 100644 efk/fluent-bit/templates/service.yaml create mode 100644 efk/fluent-bit/templates/serviceaccount.yaml create mode 100644 efk/fluent-bit/templates/tests/test-configmap.yaml create mode 100644 efk/fluent-bit/templates/tests/test.yaml create mode 100644 efk/fluent-bit/values.yaml create mode 100644 efk/kibana/.helmignore create mode 100644 efk/kibana/Chart.yaml create mode 100644 efk/kibana/OWNERS create mode 100644 efk/kibana/README.md create mode 100644 efk/kibana/ci/authproxy-enabled.yaml create mode 100644 efk/kibana/ci/dashboard-values.yaml create mode 100644 efk/kibana/ci/extra-configmap-mounts.yaml create mode 100644 efk/kibana/ci/ingress-hosts-paths.yaml create mode 100644 efk/kibana/ci/ingress-hosts.yaml create mode 100644 efk/kibana/ci/initcontainers-all-values.yaml create mode 100644 efk/kibana/ci/initcontainers-values.yaml create mode 100644 efk/kibana/ci/plugin-install.yaml create mode 100644 efk/kibana/ci/pvc.yaml create mode 100644 efk/kibana/ci/security-context.yaml create mode 100644 efk/kibana/ci/service-values.yaml create mode 100644 efk/kibana/ci/url_dashboard-values.yaml create mode 100644 efk/kibana/templates/NOTES.txt create mode 100644 efk/kibana/templates/_helpers.tpl create mode 100644 efk/kibana/templates/configmap-dashboardimport.yaml create mode 100644 efk/kibana/templates/configmap.yaml create mode 100644 efk/kibana/templates/deployment.yaml create mode 100644 efk/kibana/templates/ingress.yaml create mode 100644 efk/kibana/templates/service.yaml create mode 100644 efk/kibana/templates/serviceaccount.yaml create mode 100644 efk/kibana/templates/tests/test-configmap.yaml create mode 100644 efk/kibana/templates/tests/test.yaml create mode 100644 efk/kibana/templates/volume-claim.yaml create mode 100644 efk/kibana/values.yaml delete mode 100644 efk/templates/NOTES.txt delete mode 100644 efk/templates/es-curator-config.yaml delete mode 100644 efk/templates/es-curator.yaml delete mode 100644 efk/templates/es-data-stateful.yaml delete mode 100644 efk/templates/es-data-svc.yaml delete mode 100644 efk/templates/es-discovery-svc.yaml delete mode 100644 efk/templates/es-ingest-svc.yaml delete mode 100644 efk/templates/es-ingest.yaml delete mode 100644 efk/templates/es-master-stateful.yaml delete mode 100644 efk/templates/es-master-svc.yaml delete mode 100644 efk/templates/es-svc.yaml delete mode 100644 efk/templates/fluentbit-configmap.yaml delete mode 100644 efk/templates/fluentbit-ds.yaml delete mode 100644 efk/templates/fluentbit-roles.yaml delete mode 100644 efk/templates/kibana-svc.yaml delete mode 100644 efk/templates/kibana.yaml delete mode 100644 efk/values.yaml diff --git a/efk/elasticsearch-curator/.helmignore b/efk/elasticsearch-curator/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/efk/elasticsearch-curator/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/efk/elasticsearch-curator/Chart.yaml b/efk/elasticsearch-curator/Chart.yaml new file mode 100644 index 0000000..371e702 --- /dev/null +++ b/efk/elasticsearch-curator/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +appVersion: "5.5.4" +description: A Helm chart for Elasticsearch Curator +name: elasticsearch-curator +version: 1.4.0 +home: https://github.com/elastic/curator +keywords: +- curator +- elasticsearch +- elasticsearch-curator +sources: +- https://github.com/kubernetes/charts/elasticsearch-curator +- https://github.com/pires/docker-elasticsearch-curator +maintainers: + - name: tmestdagh + email: mestdagh.tom@gmail.com + - name: gianrubio + email: gianrubio@gmail.com diff --git a/efk/elasticsearch-curator/OWNERS b/efk/elasticsearch-curator/OWNERS new file mode 100644 index 0000000..d8c0ba0 --- /dev/null +++ b/efk/elasticsearch-curator/OWNERS @@ -0,0 +1,6 @@ +approvers: + - tmestdagh + - gianrubio +reviewers: + - tmestdagh + - gianrubio \ No newline at end of file diff --git a/efk/elasticsearch-curator/README.md b/efk/elasticsearch-curator/README.md new file mode 100644 index 0000000..cfae298 --- /dev/null +++ b/efk/elasticsearch-curator/README.md @@ -0,0 +1,59 @@ +# Elasticsearch Curator Helm Chart + +This directory contains a Kubernetes chart to deploy the [Elasticsearch Curator](https://github.com/elastic/curator). + +## Prerequisites Details + +* Elasticsearch + +* The `elasticsearch-curator` cron job requires [K8s CronJob](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) support: + > You need a working Kubernetes cluster at version >= 1.8 (for CronJob). For previous versions of cluster (< 1.8) you need to explicitly enable `batch/v2alpha1` API by passing `--runtime-config=batch/v2alpha1=true` to the API server ([see Turn on or off an API version for your cluster for more](https://kubernetes.io/docs/admin/cluster-management/#turn-on-or-off-an-api-version-for-your-cluster)). + +## Chart Details + +This chart will do the following: + +* Create a CronJob which runs the Curator + +## Installing the Chart + +To install the chart, use the following: + +```console +$ helm install stable/elasticsearch-curator +``` + +## Configuration + +The following table lists the configurable parameters of the docker-registry chart and +their default values. + +| Parameter | Description | Default | +| :----------------------------------- | :---------------------------------------------------------- | :------------------------------------------- | +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `image.repository` | Container image to use | `quay.io/pires/docker-elasticsearch-curator` | +| `image.tag` | Container image tag to deploy | `5.5.4` | +| `hooks` | Whether to run job on selected hooks | `{ "install": false, "upgrade": false }` | +| `cronjob.schedule` | Schedule for the CronJob | `0 1 * * *` | +| `cronjob.annotations` | Annotations to add to the cronjob | {} | +| `cronjob.concurrencyPolicy` | `Allow|Forbid|Replace` concurrent jobs | `nil` | +| `cronjob.failedJobsHistoryLimit` | Specify the number of failed Jobs to keep | `nil` | +| `cronjob.successfulJobsHistoryLimit` | Specify the number of completed Jobs to keep | `nil` | +| `pod.annotations` | Annotations to add to the pod | {} | +| `dryrun` | Run Curator in dry-run mode | `false` | +| `env` | Environment variables to add to the cronjob container | {} | +| `envFromSecrets` | Environment variables from secrets to the cronjob container | {} | +| `envFromSecrets.*.from.secret` | - `secretKeyRef.name` used for environment variable | | +| `envFromSecrets.*.from.key` | - `secretKeyRef.key` used for environment variable | | +| `command` | Command to execute | ["curator"] | +| `configMaps.action_file_yml` | Contents of the Curator action_file.yml | See values.yaml | +| `configMaps.config_yml` | Contents of the Curator config.yml (overrides config) | See values.yaml | +| `resources` | Resource requests and limits | {} | +| `priorityClassName` | priorityClassName | `nil` | +| `extraVolumeMounts` | Mount extra volume(s), | | +| `extraVolumes` | Extra volumes | | +| `extraInitContainers` | Init containers to add to the cronjob container | {} | +| `securityContext` | Configure PodSecurityContext | | + +Specify each parameter using the `--set key=value[,key=value]` argument to +`helm install`. diff --git a/efk/elasticsearch-curator/ci/initcontainer-values.yaml b/efk/elasticsearch-curator/ci/initcontainer-values.yaml new file mode 100644 index 0000000..578becf --- /dev/null +++ b/efk/elasticsearch-curator/ci/initcontainer-values.yaml @@ -0,0 +1,9 @@ +extraInitContainers: + test: + image: alpine:latest + command: + - "/bin/sh" + - "-c" + args: + - | + true diff --git a/efk/elasticsearch-curator/templates/NOTES.txt b/efk/elasticsearch-curator/templates/NOTES.txt new file mode 100644 index 0000000..0b4fe02 --- /dev/null +++ b/efk/elasticsearch-curator/templates/NOTES.txt @@ -0,0 +1,6 @@ +A CronJob will run with schedule {{ .Values.cronjob.schedule }}. + +The Jobs will not be removed automagically when deleting this Helm chart. +To remove these jobs, run the following : + + kubectl -n {{ .Release.Namespace }} delete job -l app={{ template "elasticsearch-curator.name" . }},release={{ .Release.Name }} \ No newline at end of file diff --git a/efk/elasticsearch-curator/templates/_helpers.tpl b/efk/elasticsearch-curator/templates/_helpers.tpl new file mode 100644 index 0000000..c786fb5 --- /dev/null +++ b/efk/elasticsearch-curator/templates/_helpers.tpl @@ -0,0 +1,44 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the appropriate apiVersion for cronjob APIs. +*/}} +{{- define "cronjob.apiVersion" -}} +{{- if semverCompare "< 1.8-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "batch/v2alpha1" }} +{{- else if semverCompare ">=1.8-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "batch/v1beta1" }} +{{- end -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "elasticsearch-curator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "elasticsearch-curator.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "elasticsearch-curator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/efk/elasticsearch-curator/templates/configmap.yaml b/efk/elasticsearch-curator/templates/configmap.yaml new file mode 100644 index 0000000..3bfc7fc --- /dev/null +++ b/efk/elasticsearch-curator/templates/configmap.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "elasticsearch-curator.fullname" . }}-config + labels: + app: {{ template "elasticsearch-curator.name" . }} + chart: {{ template "elasticsearch-curator.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + action_file.yml: {{ required "A valid .Values.configMaps.action_file_yml entry is required!" (toYaml .Values.configMaps.action_file_yml | indent 2) }} + config.yml: {{ required "A valid .Values.configMaps.config_yml entry is required!" (toYaml .Values.configMaps.config_yml | indent 2) }} diff --git a/efk/elasticsearch-curator/templates/cronjob.yaml b/efk/elasticsearch-curator/templates/cronjob.yaml new file mode 100644 index 0000000..6b5e16c --- /dev/null +++ b/efk/elasticsearch-curator/templates/cronjob.yaml @@ -0,0 +1,115 @@ +apiVersion: {{ template "cronjob.apiVersion" . }} +kind: CronJob +metadata: + name: {{ template "elasticsearch-curator.fullname" . }} + labels: + app: {{ template "elasticsearch-curator.name" . }} + chart: {{ template "elasticsearch-curator.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- if .Values.cronjob.annotations }} + annotations: +{{ toYaml .Values.cronjob.annotations | indent 4 }} +{{- end }} +spec: + schedule: "{{ .Values.cronjob.schedule }}" + {{- with .Values.cronjob.concurrencyPolicy }} + concurrencyPolicy: {{ . }} + {{- end }} + {{- with .Values.cronjob.failedJobsHistoryLimit }} + failedJobsHistoryLimit: {{ . }} + {{- end }} + {{- with .Values.cronjob.successfulJobsHistoryLimit }} + successfulJobsHistoryLimit: {{ . }} + {{- end }} + jobTemplate: + metadata: + labels: + app: {{ template "elasticsearch-curator.name" . }} + release: {{ .Release.Name }} + spec: + template: + metadata: + labels: + app: {{ template "elasticsearch-curator.name" . }} + release: {{ .Release.Name }} +{{- if .Values.pod.annotations }} + annotations: +{{ toYaml .Values.pod.annotations | indent 12 }} +{{- end }} + spec: + volumes: + - name: config-volume + configMap: + name: {{ template "elasticsearch-curator.fullname" . }}-config +{{- if .Values.extraVolumes }} +{{ toYaml .Values.extraVolumes | indent 12 }} +{{- end }} + restartPolicy: Never +{{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" +{{- end }} +{{- if .Values.image.pullSecret }} + imagePullSecrets: + - name: {{ .Values.image.pullSecret }} +{{- end }} +{{- if .Values.extraInitContainers }} + initContainers: +{{- range $key, $value := .Values.extraInitContainers }} + - name: "{{ $key }}" +{{ toYaml $value | indent 12 }} +{{- end }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: + - name: config-volume + mountPath: /etc/es-curator +{{- if .Values.extraVolumeMounts }} +{{ toYaml .Values.extraVolumeMounts | indent 16 }} +{{ end }} +{{ if .Values.command }} + command: +{{ toYaml .Values.command | indent 16 }} +{{- end }} +{{- if .Values.dryrun }} + args: [ "--dry-run", "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ] +{{- else }} + args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ] +{{- end }} + env: +{{- if .Values.env }} +{{- range $key,$value := .Values.env }} + - name: {{ $key | upper | quote}} + value: {{ $value | quote}} +{{- end }} +{{- end }} +{{- if .Values.envFromSecrets }} +{{- range $key,$value := .Values.envFromSecrets }} + - name: {{ $key | upper | quote}} + valueFrom: + secretKeyRef: + name: {{ $value.from.secret | quote}} + key: {{ $value.from.key | quote}} +{{- end }} +{{- end }} + resources: +{{ toYaml .Values.resources | indent 16 }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 12 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 12 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 12 }} + {{- end }} + {{- with .Values.securityContext }} + securityContext: +{{ toYaml . | indent 12 }} + {{- end }} diff --git a/efk/elasticsearch-curator/templates/hooks/job.install.yaml b/efk/elasticsearch-curator/templates/hooks/job.install.yaml new file mode 100644 index 0000000..d2056d0 --- /dev/null +++ b/efk/elasticsearch-curator/templates/hooks/job.install.yaml @@ -0,0 +1,69 @@ +{{- range $kind, $enabled := .Values.hooks }} +{{ if $enabled }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "elasticsearch-curator.fullname" $ }}-on-{{ $kind }} + labels: + app: {{ template "elasticsearch-curator.name" $ }} + chart: {{ template "elasticsearch-curator.chart" $ }} + release: {{ $.Release.Name }} + heritage: {{ $.Release.Service }} + annotations: + "helm.sh/hook": post-{{ $kind }} + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation +{{- if $.Values.cronjob.annotations }} +{{ toYaml $.Values.cronjob.annotations | indent 4 }} +{{- end }} +spec: + template: + metadata: + labels: + app: {{ template "elasticsearch-curator.name" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.pod.annotations }} + annotations: +{{ toYaml $.Values.pod.annotations | indent 8 }} +{{- end }} + spec: + volumes: + - name: config-volume + configMap: + name: {{ template "elasticsearch-curator.fullname" $ }}-config +{{- if $.Values.extraVolumes }} +{{ toYaml $.Values.extraVolumes | indent 8 }} +{{- end }} + restartPolicy: Never +{{- if $.Values.priorityClassName }} + priorityClassName: "{{ $.Values.priorityClassName }}" +{{- end }} + containers: + - name: {{ $.Chart.Name }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + volumeMounts: + - name: config-volume + mountPath: /etc/es-curator + {{- if $.Values.extraVolumeMounts }} +{{ toYaml $.Values.extraVolumeMounts | indent 12 }} + {{- end }} + command: [ "curator" ] + args: [ "--config", "/etc/es-curator/config.yml", "/etc/es-curator/action_file.yml" ] + resources: +{{ toYaml $.Values.resources | indent 12 }} + {{- with $.Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with $.Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with $.Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end -}} +{{ end }} diff --git a/efk/elasticsearch-curator/values.yaml b/efk/elasticsearch-curator/values.yaml new file mode 100644 index 0000000..2474fbc --- /dev/null +++ b/efk/elasticsearch-curator/values.yaml @@ -0,0 +1,140 @@ +# Default values for elasticsearch-curator. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +cronjob: + # At 01:00 every day + schedule: "0 1 * * *" + annotations: {} + concurrencyPolicy: "" + failedJobsHistoryLimit: "" + successfulJobsHistoryLimit: "" + +pod: + annotations: {} + +image: + repository: quay.io/pires/docker-elasticsearch-curator + tag: 5.5.4 + pullPolicy: IfNotPresent + +hooks: + install: false + upgrade: false + +# run curator in dry-run mode +dryrun: false + +command: ["curator"] +env: {} + +configMaps: + # Delete indices older than 7 days + action_file_yml: |- + --- + actions: + 1: + action: delete_indices + description: "Clean up ES by deleting old indices" + options: + timeout_override: + continue_if_exception: False + disable_action: False + ignore_empty_list: True + filters: + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: 7 + field: + stats_result: + epoch: + exclude: False + # Having config_yaml WILL override the other config + config_yml: |- + --- + client: + hosts: + - elasticsearch-client + port: 9200 + # url_prefix: + # use_ssl: True + # certificate: + # client_cert: + # client_key: + # ssl_no_validate: True + # http_auth: + # timeout: 30 + # master_only: False + # logging: + # loglevel: INFO + # logfile: + # logformat: default + # blacklist: ['elasticsearch', 'urllib3'] + + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +priorityClassName: "" + +# extraVolumes and extraVolumeMounts allows you to mount other volumes +# Example Use Case: mount ssl certificates when elasticsearch has tls enabled +# extraVolumes: +# - name: es-certs +# secret: +# defaultMode: 420 +# secretName: es-certs +# extraVolumeMounts: +# - name: es-certs +# mountPath: /certs +# readOnly: true + +# Add your own init container or uncomment and modify the given example. +extraInitContainers: {} + ## Don't configure S3 repository till Elasticsearch is reachable. + ## Ensure that it is available at http://elasticsearch:9200 + ## + # elasticsearch-s3-repository: + # image: jwilder/dockerize:latest + # imagePullPolicy: "IfNotPresent" + # command: + # - "/bin/sh" + # - "-c" + # args: + # - | + # ES_HOST=elasticsearch + # ES_PORT=9200 + # ES_REPOSITORY=backup + # S3_REGION=us-east-1 + # S3_BUCKET=bucket + # S3_BASE_PATH=backup + # S3_COMPRESS=true + # S3_STORAGE_CLASS=standard + # apk add curl --no-cache && \ + # dockerize -wait http://${ES_HOST}:${ES_PORT} --timeout 120s && \ + # cat < **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/efk/elasticsearch-exporter/templates/NOTES.txt b/efk/elasticsearch-exporter/templates/NOTES.txt new file mode 100644 index 0000000..4311f10 --- /dev/null +++ b/efk/elasticsearch-exporter/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "elasticsearch-exporter.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT{{ .Values.web.path }} +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "elasticsearch-exporter.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "elasticsearch-exporter.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.httpPort }}{{ .Values.web.path }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "elasticsearch-exporter.fullname" . }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:{{ .Values.service.httpPort }}{{ .Values.web.path }} to use your application" + kubectl port-forward $POD_NAME {{ .Values.service.httpPort }}:{{ .Values.service.httpPort }} --namespace {{ .Release.Namespace }} +{{- end }} diff --git a/efk/templates/_helpers.tpl b/efk/elasticsearch-exporter/templates/_helpers.tpl similarity index 64% rename from efk/templates/_helpers.tpl rename to efk/elasticsearch-exporter/templates/_helpers.tpl index 84d7a7e..1b098d1 100644 --- a/efk/templates/_helpers.tpl +++ b/efk/elasticsearch-exporter/templates/_helpers.tpl @@ -2,23 +2,15 @@ {{/* Expand the name of the chart. */}} -{{- define "name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 53 | trimSuffix "-" -}} -{{- end -}} - {{- define "elasticsearch-exporter.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Create a default fully qualified app name. -We truncate at 53 chars (63 - len("-discovery")) because some Kubernetes name fields are limited to 63 (by the DNS naming spec). +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. */}} -{{- define "fullname" -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- printf "%s-%s" .Release.Name $name | trunc 53 | trimSuffix "-" -}} -{{- end -}} - {{- define "elasticsearch-exporter.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} @@ -27,12 +19,11 @@ We truncate at 53 chars (63 - len("-discovery")) because some Kubernetes name fi {{- if contains $name .Release.Name -}} {{- .Release.Name | trunc 63 | trimSuffix "-" -}} {{- else -}} -{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} {{- end -}} {{- end -}} {{- end -}} - {{/* Create chart name and version as used by the chart label. */}} diff --git a/efk/templates/es-exporter-cert-secret.yaml b/efk/elasticsearch-exporter/templates/cert-secret.yaml similarity index 52% rename from efk/templates/es-exporter-cert-secret.yaml rename to efk/elasticsearch-exporter/templates/cert-secret.yaml index 06f6333..6e4ac25 100644 --- a/efk/templates/es-exporter-cert-secret.yaml +++ b/efk/elasticsearch-exporter/templates/cert-secret.yaml @@ -1,4 +1,4 @@ -{{- if .Values.elasticsearchexporter.es.ssl.enabled }} +{{- if .Values.es.ssl.enabled }} apiVersion: v1 kind: Secret metadata: @@ -10,7 +10,7 @@ metadata: heritage: "{{ .Release.Service }}" type: Opaque data: - ca.pem: {{ .Values.elasticsearchexporter.es.ssl.ca.pem | b64enc }} - client.pem: {{ .Values.elasticsearchexporter.es.ssl.client.pem | b64enc }} - client.key: {{ .Values.elasticsearchexporter.es.ssl.client.key | b64enc }} -{{- end }} \ No newline at end of file + ca.pem: {{ .Values.es.ssl.ca.pem | b64enc }} + client.pem: {{ .Values.es.ssl.client.pem | b64enc }} + client.key: {{ .Values.es.ssl.client.key | b64enc }} +{{- end }} diff --git a/efk/templates/es-exporter-deployment.yaml b/efk/elasticsearch-exporter/templates/deployment.yaml similarity index 57% rename from efk/templates/es-exporter-deployment.yaml rename to efk/elasticsearch-exporter/templates/deployment.yaml index 51bb754..c57bc32 100644 --- a/efk/templates/es-exporter-deployment.yaml +++ b/efk/elasticsearch-exporter/templates/deployment.yaml @@ -8,7 +8,7 @@ metadata: release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" spec: - replicas: {{ .Values.elasticsearchexporter.replicaCount }} + replicas: {{ .Values.replicaCount }} selector: matchLabels: app: {{ template "elasticsearch-exporter.name" . }} @@ -23,34 +23,34 @@ spec: labels: app: {{ template "elasticsearch-exporter.name" . }} release: "{{ .Release.Name }}" - {{- if .Values.elasticsearchexporter.podAnnotations }} + {{- if .Values.podAnnotations }} annotations: -{{ toYaml .Values.elasticsearchexporter.podAnnotations | indent 8 }} +{{ toYaml .Values.podAnnotations | indent 8 }} {{- end }} spec: -{{- if .Values.elasticsearchexporter.priorityClassName }} - priorityClassName: "{{ .Values.elasticsearchexporter.priorityClassName }}" +{{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" {{- end }} - restartPolicy: {{ .Values.elasticsearchexporter.restartPolicy }} + restartPolicy: {{ .Values.restartPolicy }} securityContext: runAsNonRoot: true runAsUser: 1000 containers: - name: {{ .Chart.Name }} - image: "{{ .Values.elasticsearchexporter.image.repository }}:{{ .Values.elasticsearchexporter.image.tag }}" - imagePullPolicy: {{ .Values.elasticsearchexporter.image.pullPolicy }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} command: ["elasticsearch_exporter", - "-es.uri={{ .Values.elasticsearchexporter.es.uri }}", - "-es.all={{ .Values.elasticsearchexporter.es.all }}", - "-es.indices={{ .Values.elasticsearchexporter.es.indices }}", - "-es.timeout={{ .Values.elasticsearchexporter.es.timeout }}", - {{- if .Values.elasticsearchexporter.es.ssl.enabled }} + "-es.uri={{ .Values.es.uri }}", + "-es.all={{ .Values.es.all }}", + "-es.indices={{ .Values.es.indices }}", + "-es.timeout={{ .Values.es.timeout }}", + {{- if .Values.es.ssl.enabled }} "-es.ca=/ssl/ca.pem", "-es.client-cert=/ssl/client.pem", "-es.client-private-key=/ssl/client.key", {{- end }} - "-web.listen-address=:{{ .Values.elasticsearchexporter.service.httpPort }}", - "-web.telemetry-path={{ .Values.elasticsearchexporter.web.path }}"] + "-web.listen-address=:{{ .Values.service.httpPort }}", + "-web.telemetry-path={{ .Values.web.path }}"] securityContext: capabilities: drop: @@ -70,9 +70,9 @@ spec: - SETFCAP readOnlyRootFilesystem: true resources: -{{ toYaml .Values.elasticsearchexporter.resources | indent 12 }} +{{ toYaml .Values.resources | indent 12 }} ports: - - containerPort: {{ .Values.elasticsearchexporter.service.httpPort }} + - containerPort: {{ .Values.service.httpPort }} name: http livenessProbe: httpGet: @@ -87,21 +87,21 @@ spec: initialDelaySeconds: 10 timeoutSeconds: 10 volumeMounts: - {{- if .Values.elasticsearchexporter.es.ssl.enabled }} + {{- if .Values.es.ssl.enabled }} - mountPath: /ssl name: ssl {{- end }} -{{- if .Values.elasticsearchexporter.nodeSelector }} +{{- if .Values.nodeSelector }} nodeSelector: -{{ toYaml .Values.elasticsearchexporter.nodeSelector | indent 8 }} +{{ toYaml .Values.nodeSelector | indent 8 }} {{- end }} -{{- if .Values.elasticsearchexporter.tolerations }} +{{- if .Values.tolerations }} tolerations: -{{ toYaml .Values.elasticsearchexporter.tolerations | indent 8 }} +{{ toYaml .Values.tolerations | indent 8 }} {{- end }} volumes: - {{- if .Values.elasticsearchexporter.es.ssl.enabled }} + {{- if .Values.es.ssl.enabled }} - name: ssl secret: secretName: {{ template "elasticsearch-exporter.fullname" . }}-cert - {{- end }} \ No newline at end of file + {{- end }} diff --git a/efk/templates/es-exporter-service.yaml b/efk/elasticsearch-exporter/templates/service.yaml similarity index 60% rename from efk/templates/es-exporter-service.yaml rename to efk/elasticsearch-exporter/templates/service.yaml index f1b9543..bf7df3a 100644 --- a/efk/templates/es-exporter-service.yaml +++ b/efk/elasticsearch-exporter/templates/service.yaml @@ -7,16 +7,16 @@ metadata: app: {{ template "elasticsearch-exporter.name" . }} release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" -{{- if .Values.elasticsearchexporter.service.annotations }} +{{- if .Values.service.annotations }} annotations: -{{ toYaml .Values.elasticsearchexporter.service.annotations | indent 4 }} +{{ toYaml .Values.service.annotations | indent 4 }} {{- end }} spec: - type: {{ .Values.elasticsearchexporter.service.type }} + type: {{ .Values.service.type }} ports: - name: http - port: {{ .Values.elasticsearchexporter.service.httpPort }} + port: {{ .Values.service.httpPort }} protocol: TCP selector: app: {{ template "elasticsearch-exporter.name" . }} - release: "{{ .Release.Name }}" \ No newline at end of file + release: "{{ .Release.Name }}" diff --git a/efk/elasticsearch-exporter/templates/servicemonitor.yaml b/efk/elasticsearch-exporter/templates/servicemonitor.yaml new file mode 100644 index 0000000..5043316 --- /dev/null +++ b/efk/elasticsearch-exporter/templates/servicemonitor.yaml @@ -0,0 +1,30 @@ +{{- if .Values.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "elasticsearch-exporter.fullname" . }} + labels: + chart: {{ template "elasticsearch-exporter.chart" . }} + app: {{ template "elasticsearch-exporter.name" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + {{- if .Values.serviceMonitor.labels }} + {{- toYaml .Values.serviceMonitor.labels | nindent 4 }} + {{- end }} +spec: + endpoints: + - interval: 10s + honorLabels: true + port: http + path: {{ .Values.web.path }} + scheme: http + jobLabel: "{{ .Release.Name }}" + selector: + matchLabels: + app: {{ template "elasticsearch-exporter.name" . }} + release: "{{ .Release.Name }}" + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/efk/elasticsearch-exporter/values.yaml b/efk/elasticsearch-exporter/values.yaml new file mode 100644 index 0000000..3b796ec --- /dev/null +++ b/efk/elasticsearch-exporter/values.yaml @@ -0,0 +1,91 @@ +## number of exporter instances +## +replicaCount: 1 + +## restart policy for all containers +## +restartPolicy: Always + +image: + repository: justwatch/elasticsearch_exporter + tag: 1.0.2 + pullPolicy: IfNotPresent + +resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 100m + memory: 128Mi + +priorityClassName: "" + +nodeSelector: {} + +tolerations: {} + +podAnnotations: {} + +service: + type: ClusterIP + httpPort: 9108 + annotations: + nynja.biz/scrape: "true" + nynja.biz/scrape_port: "9108" + nynja.biz/env: "dev" + nynja.biz/probe: "efkexporter" + +es: + ## Address (host and port) of the Elasticsearch node we should connect to. + ## This could be a local node (localhost:9200, for instance), or the address + ## of a remote Elasticsearch server. When basic auth is needed, + ## specify as: ://:@:. e.g., http://admin:pass@localhost:9200. + ## + uri: http://elasticsearch-client:9200 + + ## If true, query stats for all nodes in the cluster, rather than just the + ## node we connect to. + ## + all: true + + ## If true, query stats for all indices in the cluster. + ## + indices: true + + ## Timeout for trying to get stats from Elasticsearch. (ex: 20s) + ## + timeout: 30s + + ssl: + ## If true, a secure connection to ES cluster is used (requires SSL certs below) + ## + enabled: false + + ca: + + ## PEM that contains trusted CAs used for setting up secure Elasticsearch connection + ## + # pem: + + client: + + ## PEM that contains the client cert to connect to Elasticsearch. + ## + # pem: + + ## Private key for client auth when connecting to Elasticsearch + ## + # key: + +web: + ## Path under which to expose metrics. + ## + path: /metrics + +serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + labels: {} diff --git a/efk/elasticsearch/.helmignore b/efk/elasticsearch/.helmignore new file mode 100644 index 0000000..f225651 --- /dev/null +++ b/efk/elasticsearch/.helmignore @@ -0,0 +1,3 @@ +.git +# OWNERS file for Kubernetes +OWNERS \ No newline at end of file diff --git a/efk/elasticsearch/Chart.yaml b/efk/elasticsearch/Chart.yaml new file mode 100644 index 0000000..d360df2 --- /dev/null +++ b/efk/elasticsearch/Chart.yaml @@ -0,0 +1,23 @@ +name: elasticsearch +home: https://www.elastic.co/products/elasticsearch +version: 1.24.0 +appVersion: 6.7.0 +description: Flexible and powerful open source, distributed real-time search and analytics + engine. +icon: https://static-www.elastic.co/assets/blteb1c97719574938d/logo-elastic-elasticsearch-lt.svg +sources: +- https://www.elastic.co/products/elasticsearch +- https://github.com/jetstack/elasticsearch-pet +- https://github.com/giantswarm/kubernetes-elastic-stack +- https://github.com/GoogleCloudPlatform/elasticsearch-docker +- https://github.com/clockworksoul/helm-elasticsearch +- https://github.com/pires/kubernetes-elasticsearch-cluster +maintainers: +- name: simonswine + email: christian@jetstack.io +- name: icereval + email: michael.haselton@gmail.com +- name: rendhalver + email: pete.brown@powerhrg.com +- name: desaintmartin + email: cedric@desaintmartin.fr diff --git a/efk/elasticsearch/OWNERS b/efk/elasticsearch/OWNERS new file mode 100644 index 0000000..3c4068f --- /dev/null +++ b/efk/elasticsearch/OWNERS @@ -0,0 +1,10 @@ +approvers: +- simonswine +- icereval +- rendhalver +- desaintmartin +reviewers: +- simonswine +- icereval +- rendhalver +- desaintmartin diff --git a/efk/elasticsearch/README.md b/efk/elasticsearch/README.md new file mode 100644 index 0000000..f9f34cd --- /dev/null +++ b/efk/elasticsearch/README.md @@ -0,0 +1,252 @@ +# Elasticsearch Helm Chart + +This chart uses a standard Docker image of Elasticsearch (docker.elastic.co/elasticsearch/elasticsearch-oss) and uses a service pointing to the master's transport port for service discovery. +Elasticsearch does not communicate with the Kubernetes API, hence no need for RBAC permissions. + +## Warning for previous users +If you are currently using an earlier version of this Chart you will need to redeploy your Elasticsearch clusters. The discovery method used here is incompatible with using RBAC. +If you are upgrading to Elasticsearch 6 from the 5.5 version used in this chart before, please note that your cluster needs to do a full cluster restart. +The simplest way to do that is to delete the installation (keep the PVs) and install this chart again with the new version. +If you want to avoid doing that upgrade to Elasticsearch 5.6 first before moving on to Elasticsearch 6.0. + +## Prerequisites Details + +* Kubernetes 1.6+ +* PV dynamic provisioning support on the underlying infrastructure + +## StatefulSets Details +* https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ + +## StatefulSets Caveats +* https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#limitations + +## Todo + +* Implement TLS/Auth/Security +* Smarter upscaling/downscaling +* Solution for memory locking + +## Chart Details +This chart will do the following: + +* Implemented a dynamically scalable elasticsearch cluster using Kubernetes StatefulSets/Deployments +* Multi-role deployment: master, client (coordinating) and data nodes +* Statefulset Supports scaling down without degrading the cluster + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/elasticsearch +``` + +## Deleting the Charts + +Delete the Helm deployment as normal + +``` +$ helm delete my-release +``` + +Deletion of the StatefulSet doesn't cascade to deleting associated PVCs. To delete them: + +``` +$ kubectl delete pvc -l release=my-release,component=data +``` + +## Configuration + +The following table lists the configurable parameters of the elasticsearch chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------ | ------------------------------------------------------------------- | --------------------------------------------------- | +| `appVersion` | Application Version (Elasticsearch) | `6.7.0` | +| `image.repository` | Container image name | `docker.elastic.co/elasticsearch/elasticsearch-oss` | +| `image.tag` | Container image tag | `6.7.0` | +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `initImage.repository` | Init container image name | `busybox` | +| `initImage.tag` | Init container image tag | `latest` | +| `initImage.pullPolicy` | Init container pull policy | `Always` | +| `cluster.name` | Cluster name | `elasticsearch` | +| `cluster.xpackEnable` | Writes the X-Pack configuration options to the configuration file | `false` | +| `cluster.config` | Additional cluster config appended | `{}` | +| `cluster.keystoreSecret` | Name of secret holding secure config options in an es keystore | `nil` | +| `cluster.env` | Cluster environment variables | `{MINIMUM_MASTER_NODES: "2"}` | +| `cluster.bootstrapShellCommand` | Post-init command to run in separate Job | `""` | +| `cluster.additionalJavaOpts` | Cluster parameters to be added to `ES_JAVA_OPTS` environment variable | `""` | +| `cluster.plugins` | List of Elasticsearch plugins to install | `[]` | +| `client.name` | Client component name | `client` | +| `client.replicas` | Client node replicas (deployment) | `2` | +| `client.resources` | Client node resources requests & limits | `{} - cpu limit must be an integer` | +| `client.priorityClassName` | Client priorityClass | `nil` | +| `client.heapSize` | Client node heap size | `512m` | +| `client.podAnnotations` | Client Deployment annotations | `{}` | +| `client.nodeSelector` | Node labels for client pod assignment | `{}` | +| `client.tolerations` | Client tolerations | `[]` | +| `client.serviceAnnotations` | Client Service annotations | `{}` | +| `client.serviceType` | Client service type | `ClusterIP` | +| `client.loadBalancerIP` | Client loadBalancerIP | `{}` | +| `client.loadBalancerSourceRanges` | Client loadBalancerSourceRanges | `{}` | +| `client.antiAffinity` | Client anti-affinity policy | `soft` | +| `client.nodeAffinity` | Client node affinity policy | `{}` | +| `client.initResources` | Client initContainer resources requests & limits | `{}` | +| `client.additionalJavaOpts` | Parameters to be added to `ES_JAVA_OPTS` environment variable for client | `""` | +| `client.ingress.enabled` | Enable Client Ingress | `false` | +| `client.ingress.user` | If this & password are set, enable basic-auth on ingress | `nil` | +| `client.ingress.password` | If this & user are set, enable basic-auth on ingress | `nil` | +| `client.ingress.annotations` | Client Ingress annotations | `{}` | +| `client.ingress.hosts` | Client Ingress Hostnames | `[]` | +| `client.ingress.tls` | Client Ingress TLS configuration | `[]` | +| `master.initResources` | Master initContainer resources requests & limits | `{}` | +| `master.additionalJavaOpts` | Parameters to be added to `ES_JAVA_OPTS` environment variable for master | `""` | +| `master.exposeHttp` | Expose http port 9200 on master Pods for monitoring, etc | `false` | +| `master.name` | Master component name | `master` | +| `master.replicas` | Master node replicas (deployment) | `2` | +| `master.resources` | Master node resources requests & limits | `{} - cpu limit must be an integer` | +| `master.priorityClassName` | Master priorityClass | `nil` | +| `master.podAnnotations` | Master Deployment annotations | `{}` | +| `master.nodeSelector` | Node labels for master pod assignment | `{}` | +| `master.tolerations` | Master tolerations | `[]` | +| `master.heapSize` | Master node heap size | `512m` | +| `master.name` | Master component name | `master` | +| `master.persistence.enabled` | Master persistent enabled/disabled | `true` | +| `master.persistence.name` | Master statefulset PVC template name | `data` | +| `master.persistence.size` | Master persistent volume size | `4Gi` | +| `master.persistence.storageClass` | Master persistent volume Class | `nil` | +| `master.persistence.accessMode` | Master persistent Access Mode | `ReadWriteOnce` | +| `master.readinessProbe` | Master container readiness probes | see `values.yaml` for defaults | +| `master.antiAffinity` | Master anti-affinity policy | `soft` | +| `master.nodeAffinity` | Master node affinity policy | `{}` | +| `master.podManagementPolicy` | Master pod creation strategy | `OrderedReady` | +| `master.updateStrategy` | Master node update strategy policy | `{type: "onDelete"}` | +| `data.initResources` | Data initContainer resources requests & limits | `{}` | +| `data.additionalJavaOpts` | Parameters to be added to `ES_JAVA_OPTS` environment variable for data | `""` | +| `data.exposeHttp` | Expose http port 9200 on data Pods for monitoring, etc | `false` | +| `data.replicas` | Data node replicas (statefulset) | `2` | +| `data.resources` | Data node resources requests & limits | `{} - cpu limit must be an integer` | +| `data.priorityClassName` | Data priorityClass | `nil` | +| `data.heapSize` | Data node heap size | `1536m` | +| `data.hooks.drain.enabled | Data nodes: Enable drain pre-stop and post-start hook | `true` | +| `data.persistence.enabled` | Data persistent enabled/disabled | `true` | +| `data.persistence.name` | Data statefulset PVC template name | `data` | +| `data.persistence.size` | Data persistent volume size | `30Gi` | +| `data.persistence.storageClass` | Data persistent volume Class | `nil` | +| `data.persistence.accessMode` | Data persistent Access Mode | `ReadWriteOnce` | +| `data.readinessProbe` | Readiness probes for data-containers | see `values.yaml` for defaults | +| `data.podAnnotations` | Data StatefulSet annotations | `{}` | +| `data.nodeSelector` | Node labels for data pod assignment | `{}` | +| `data.tolerations` | Data tolerations | `[]` | +| `data.terminationGracePeriodSeconds` | Data termination grace period (seconds) | `3600` | +| `data.antiAffinity` | Data anti-affinity policy | `soft` | +| `data.nodeAffinity` | Data node affinity policy | `{}` | +| `data.podManagementPolicy` | Data pod creation strategy | `OrderedReady` | +| `data.updateStrategy` | Data node update strategy policy | `{type: "onDelete"}` | +| `sysctlInitContainer.enabled` | If true, the sysctl init container is enabled (does not stop extraInitContainers from running) | `true` | +| `extraInitContainers` | Additional init container passed through the tpl | `` | +| `podSecurityPolicy.annotations` | Specify pod annotations in the pod security policy | `{}` | +| `podSecurityPolicy.enabled` | Specify if a pod security policy must be created | `false` | +| `serviceAccounts.client.create` | If true, create the client service account | `true` | +| `serviceAccounts.client.name` | Name of the client service account to use or create | `{{ elasticsearch.client.fullname }}` | +| `serviceAccounts.master.create` | If true, create the master service account | `true` | +| `serviceAccounts.master.name` | Name of the master service account to use or create | `{{ elasticsearch.master.fullname }}` | +| `serviceAccounts.data.create` | If true, create the data service account | `true` | +| `serviceAccounts.data.name` | Name of the data service account to use or create | `{{ elasticsearch.data.fullname }}` | +| `testFramework.image` | `test-framework` image repository. | `dduportal/bats` | +| `testFramework.tag` | `test-framework` image tag. | `0.4.0` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +In terms of Memory resources you should make sure that you follow that equation: + +- `${role}HeapSize < ${role}MemoryRequests < ${role}MemoryLimits` + +The YAML value of cluster.config is appended to elasticsearch.yml file for additional customization ("script.inline: on" for example to allow inline scripting) + +# Deep dive + +## Application Version + +This chart aims to support Elasticsearch v2 to v6 deployments by specifying the `values.yaml` parameter `appVersion`. + +### Version Specific Features + +* Memory Locking *(variable renamed)* +* Ingest Node *(v5)* +* X-Pack Plugin *(v5)* + +Upgrade paths & more info: https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html + +## Mlocking + +This is a limitation in kubernetes right now. There is no way to raise the +limits of lockable memory, so that these memory areas won't be swapped. This +would degrade performance heavily. The issue is tracked in +[kubernetes/#3595](https://github.com/kubernetes/kubernetes/issues/3595). + +``` +[WARN ][bootstrap] Unable to lock JVM Memory: error=12,reason=Cannot allocate memory +[WARN ][bootstrap] This can result in part of the JVM being swapped out. +[WARN ][bootstrap] Increase RLIMIT_MEMLOCK, soft limit: 65536, hard limit: 65536 +``` + +## Minimum Master Nodes +> The minimum_master_nodes setting is extremely important to the stability of your cluster. This setting helps prevent split brains, the existence of two masters in a single cluster. + +>When you have a split brain, your cluster is at danger of losing data. Because the master is considered the supreme ruler of the cluster, it decides when new indices can be created, how shards are moved, and so forth. If you have two masters, data integrity becomes perilous, since you have two nodes that think they are in charge. + +>This setting tells Elasticsearch to not elect a master unless there are enough master-eligible nodes available. Only then will an election take place. + +>This setting should always be configured to a quorum (majority) of your master-eligible nodes. A quorum is (number of master-eligible nodes / 2) + 1 + +More info: https://www.elastic.co/guide/en/elasticsearch/guide/1.x/_important_configuration_changes.html#_minimum_master_nodes + +# Client and Coordinating Nodes + +Elasticsearch v5 terminology has updated, and now refers to a `Client Node` as a `Coordinating Node`. + +More info: https://www.elastic.co/guide/en/elasticsearch/reference/5.5/modules-node.html#coordinating-node + +## Enabling elasticsearch internal monitoring +Requires version 6.3+ and standard non `oss` repository defined. Starting with 6.3 Xpack is partially free and enabled by default. You need to set a new config to enable the collection of these internal metrics. (https://www.elastic.co/guide/en/elasticsearch/reference/6.3/monitoring-settings.html) + +To do this through this helm chart override with the three following changes: +``` +image.repository: docker.elastic.co/elasticsearch/elasticsearch +cluster.xpackEnable: true +cluster.env.XPACK_MONITORING_ENABLED: true +``` + +Note: to see these changes you will need to update your kibana repo to `image.repository: docker.elastic.co/kibana/kibana` instead of the `oss` version + + +## Select right storage class for SSD volumes + +### GCE + Kubernetes 1.5 + +Create StorageClass for SSD-PD + +``` +$ kubectl create -f - < >(tee -a "/var/log/elasticsearch-hooks.log") + NODE_NAME=${HOSTNAME} + echo "Prepare to migrate data of the node ${NODE_NAME}" + echo "Move all data from node ${NODE_NAME}" + curl -s -XPUT -H 'Content-Type: application/json' '{{ template "elasticsearch.client.fullname" . }}:9200/_cluster/settings' -d "{ + \"transient\" :{ + \"cluster.routing.allocation.exclude._name\" : \"${NODE_NAME}\" + } + }" + echo "" + + while true ; do + echo -e "Wait for node ${NODE_NAME} to become empty" + SHARDS_ALLOCATION=$(curl -s -XGET 'http://{{ template "elasticsearch.client.fullname" . }}:9200/_cat/shards') + if ! echo "${SHARDS_ALLOCATION}" | grep -E "${NODE_NAME}"; then + break + fi + sleep 1 + done + echo "Node ${NODE_NAME} is ready to shutdown" + post-start-hook.sh: |- + #!/bin/bash + exec &> >(tee -a "/var/log/elasticsearch-hooks.log") + NODE_NAME=${HOSTNAME} + CLUSTER_SETTINGS=$(curl -s -XGET "http://{{ template "elasticsearch.client.fullname" . }}:9200/_cluster/settings") + if echo "${CLUSTER_SETTINGS}" | grep -E "${NODE_NAME}"; then + echo "Activate node ${NODE_NAME}" + curl -s -XPUT -H 'Content-Type: application/json' "http://{{ template "elasticsearch.client.fullname" . }}:9200/_cluster/settings" -d "{ + \"transient\" :{ + \"cluster.routing.allocation.exclude._name\" : null + } + }" + fi + echo "Node ${NODE_NAME} is ready to be used" +{{- end }} diff --git a/efk/elasticsearch/templates/data-pdb.yaml b/efk/elasticsearch/templates/data-pdb.yaml new file mode 100644 index 0000000..54e91c7 --- /dev/null +++ b/efk/elasticsearch/templates/data-pdb.yaml @@ -0,0 +1,24 @@ +{{- if .Values.data.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.data.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "elasticsearch.data.fullname" . }} +spec: +{{- if .Values.data.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.data.podDisruptionBudget.minAvailable }} +{{- end }} +{{- if .Values.data.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.data.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + app: {{ template "elasticsearch.name" . }} + component: "{{ .Values.data.name }}" + release: {{ .Release.Name }} +{{- end }} diff --git a/efk/elasticsearch/templates/data-serviceaccount.yaml b/efk/elasticsearch/templates/data-serviceaccount.yaml new file mode 100644 index 0000000..2a9b4fd --- /dev/null +++ b/efk/elasticsearch/templates/data-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccounts.data.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.data.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "elasticsearch.data.fullname" . }} +{{- end }} diff --git a/efk/elasticsearch/templates/data-statefulset.yaml b/efk/elasticsearch/templates/data-statefulset.yaml new file mode 100644 index 0000000..a407803 --- /dev/null +++ b/efk/elasticsearch/templates/data-statefulset.yaml @@ -0,0 +1,226 @@ +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.data.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "elasticsearch.data.fullname" . }} +spec: + serviceName: {{ template "elasticsearch.data.fullname" . }} + replicas: {{ .Values.data.replicas }} + template: + metadata: + labels: + app: {{ template "elasticsearch.name" . }} + component: "{{ .Values.data.name }}" + release: {{ .Release.Name }} + role: data + {{- if .Values.data.podAnnotations }} + annotations: +{{ toYaml .Values.data.podAnnotations | indent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "elasticsearch.serviceAccountName.data" . }} +{{- if .Values.data.priorityClassName }} + priorityClassName: "{{ .Values.data.priorityClassName }}" +{{- end }} + securityContext: + fsGroup: 1000 + {{- if or .Values.data.antiAffinity .Values.data.nodeAffinity }} + affinity: + {{- end }} + {{- if eq .Values.data.antiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "elasticsearch.name" . }}" + release: "{{ .Release.Name }}" + component: "{{ .Values.data.name }}" + {{- else if eq .Values.data.antiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "elasticsearch.name" . }}" + release: "{{ .Release.Name }}" + component: "{{ .Values.data.name }}" + {{- end }} + {{- with .Values.data.nodeAffinity }} + nodeAffinity: +{{ toYaml . | indent 10 }} + {{- end }} +{{- if .Values.data.nodeSelector }} + nodeSelector: +{{ toYaml .Values.data.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.data.tolerations }} + tolerations: +{{ toYaml .Values.data.tolerations | indent 8 }} +{{- end }} + initContainers: +{{- if .Values.sysctlInitContainer.enabled }} + # see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html + # and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall + - name: "sysctl" + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: {{ .Values.initImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.data.initResources | indent 12 }} + command: ["sysctl", "-w", "vm.max_map_count=262144"] + securityContext: + privileged: true +{{- end }} + - name: "chown" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + resources: +{{ toYaml .Values.data.initResources | indent 12 }} + command: + - /bin/bash + - -c + - > + set -e; + set -x; + chown elasticsearch:elasticsearch /usr/share/elasticsearch/data; + for datadir in $(find /usr/share/elasticsearch/data -mindepth 1 -maxdepth 1 -not -name ".snapshot"); do + chown -R elasticsearch:elasticsearch $datadir; + done; + chown elasticsearch:elasticsearch /usr/share/elasticsearch/logs; + for logfile in $(find /usr/share/elasticsearch/logs -mindepth 1 -maxdepth 1 -not -name ".snapshot"); do + chown -R elasticsearch:elasticsearch $logfile; + done + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: data +{{- if .Values.extraInitContainers }} +{{ tpl .Values.extraInitContainers . | indent 6 }} +{{- end }} +{{- if .Values.cluster.plugins }} +{{ include "plugin-installer" . | indent 6 }} +{{- end }} + containers: + - name: elasticsearch + env: + - name: DISCOVERY_SERVICE + value: {{ template "elasticsearch.fullname" . }}-discovery + - name: NODE_MASTER + value: "false" + - name: PROCESSORS + valueFrom: + resourceFieldRef: + resource: limits.cpu + - name: ES_JAVA_OPTS + value: "-Djava.net.preferIPv4Stack=true -Xms{{ .Values.data.heapSize }} -Xmx{{ .Values.data.heapSize }} {{ .Values.cluster.additionalJavaOpts }} {{ .Values.data.additionalJavaOpts }}" + {{- range $key, $value := .Values.cluster.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + ports: + - containerPort: 9300 + name: transport +{{ if .Values.data.exposeHttp }} + - containerPort: 9200 + name: http +{{ end }} + resources: +{{ toYaml .Values.data.resources | indent 12 }} + readinessProbe: +{{ toYaml .Values.data.readinessProbe | indent 10 }} + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: data + - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml +{{- if .Values.cluster.plugins }} + - mountPath: /usr/share/elasticsearch/plugins/ + name: plugindir +{{- end }} +{{- if hasPrefix "2." .Values.appVersion }} + - mountPath: /usr/share/elasticsearch/config/logging.yml + name: config + subPath: logging.yml +{{- end }} +{{- if hasPrefix "5." .Values.appVersion }} + - mountPath: /usr/share/elasticsearch/config/log4j2.properties + name: config + subPath: log4j2.properties +{{- end }} +{{- if .Values.cluster.keystoreSecret }} + - name: keystore + mountPath: "/usr/share/elasticsearch/config/elasticsearch.keystore" + subPath: elasticsearch.keystore + readOnly: true +{{- end }} +{{- if .Values.data.hooks.drain.enabled }} + - name: config + mountPath: /pre-stop-hook.sh + subPath: pre-stop-hook.sh + - name: config + mountPath: /post-start-hook.sh + subPath: post-start-hook.sh + lifecycle: + preStop: + exec: + command: ["/bin/bash","/pre-stop-hook.sh"] + postStart: + exec: + command: ["/bin/bash","/post-start-hook.sh"] +{{- end }} + terminationGracePeriodSeconds: {{ .Values.data.terminationGracePeriodSeconds }} +{{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range $pullSecret := .Values.image.pullSecrets }} + - name: {{ $pullSecret }} + {{- end }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "elasticsearch.fullname" . }} +{{- if .Values.cluster.plugins }} + - name: plugindir + emptyDir: {} +{{- end }} +{{- if .Values.cluster.keystoreSecret }} + - name: keystore + secret: + secretName: {{ .Values.cluster.keystoreSecret }} +{{- end }} + {{- if not .Values.data.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + podManagementPolicy: {{ .Values.data.podManagementPolicy }} + updateStrategy: + type: {{ .Values.data.updateStrategy.type }} + {{- if .Values.data.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: {{ .Values.data.persistence.name }} + spec: + accessModes: + - {{ .Values.data.persistence.accessMode | quote }} + {{- if .Values.data.persistence.storageClass }} + {{- if (eq "-" .Values.data.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.data.persistence.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.data.persistence.size }}" + {{- end }} diff --git a/efk/elasticsearch/templates/job.yaml b/efk/elasticsearch/templates/job.yaml new file mode 100644 index 0000000..f375497 --- /dev/null +++ b/efk/elasticsearch/templates/job.yaml @@ -0,0 +1,34 @@ +{{- if .Values.cluster.bootstrapShellCommand }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "elasticsearch.fullname" . }}-bootstrap + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "10" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + metadata: + name: {{ template "elasticsearch.fullname" . }}-bootstrap + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + spec: + containers: + - name: bootstrap-elasticsearch + image: byrnedo/alpine-curl + command: + - "sh" + - "-c" + - {{ .Values.cluster.bootstrapShellCommand | quote }} + restartPolicy: Never + backoffLimit: 20 +{{- end }} diff --git a/efk/elasticsearch/templates/master-pdb.yaml b/efk/elasticsearch/templates/master-pdb.yaml new file mode 100644 index 0000000..c3efe83 --- /dev/null +++ b/efk/elasticsearch/templates/master-pdb.yaml @@ -0,0 +1,24 @@ +{{- if .Values.master.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.master.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "elasticsearch.master.fullname" . }} +spec: +{{- if .Values.master.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.master.podDisruptionBudget.minAvailable }} +{{- end }} +{{- if .Values.master.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.master.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + app: {{ template "elasticsearch.name" . }} + component: "{{ .Values.master.name }}" + release: {{ .Release.Name }} +{{- end }} diff --git a/efk/elasticsearch/templates/master-serviceaccount.yaml b/efk/elasticsearch/templates/master-serviceaccount.yaml new file mode 100644 index 0000000..0f7dfbd --- /dev/null +++ b/efk/elasticsearch/templates/master-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccounts.master.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.master.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "elasticsearch.master.fullname" . }} +{{- end }} diff --git a/efk/elasticsearch/templates/master-statefulset.yaml b/efk/elasticsearch/templates/master-statefulset.yaml new file mode 100644 index 0000000..048535c --- /dev/null +++ b/efk/elasticsearch/templates/master-statefulset.yaml @@ -0,0 +1,214 @@ +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.master.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "elasticsearch.master.fullname" . }} +spec: + serviceName: {{ template "elasticsearch.master.fullname" . }} + replicas: {{ .Values.master.replicas }} + template: + metadata: + labels: + app: {{ template "elasticsearch.name" . }} + component: "{{ .Values.master.name }}" + release: {{ .Release.Name }} + role: master + {{- if .Values.master.podAnnotations }} + annotations: +{{ toYaml .Values.master.podAnnotations | indent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "elasticsearch.serviceAccountName.master" . }} +{{- if .Values.master.priorityClassName }} + priorityClassName: "{{ .Values.master.priorityClassName }}" +{{- end }} + securityContext: + fsGroup: 1000 + {{- if or .Values.master.antiAffinity .Values.master.nodeAffinity }} + affinity: + {{- end }} + {{- if eq .Values.master.antiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchLabels: + app: "{{ template "elasticsearch.name" . }}" + release: "{{ .Release.Name }}" + component: "{{ .Values.master.name }}" + {{- else if eq .Values.master.antiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + app: "{{ template "elasticsearch.name" . }}" + release: "{{ .Release.Name }}" + component: "{{ .Values.master.name }}" + {{- end }} + {{- with .Values.master.nodeAffinity }} + nodeAffinity: +{{ toYaml . | indent 10 }} + {{- end }} +{{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} +{{- end }} +{{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} +{{- end }} + initContainers: +{{- if .Values.sysctlInitContainer.enabled }} + # see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html + # and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall + - name: "sysctl" + image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: {{ .Values.initImage.pullPolicy | quote }} + resources: +{{ toYaml .Values.master.initResources | indent 12 }} + command: ["sysctl", "-w", "vm.max_map_count=262144"] + securityContext: + privileged: true +{{- end }} + - name: "chown" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + resources: +{{ toYaml .Values.master.initResources | indent 12 }} + command: + - /bin/bash + - -c + - > + set -e; + set -x; + chown elasticsearch:elasticsearch /usr/share/elasticsearch/data; + for datadir in $(find /usr/share/elasticsearch/data -mindepth 1 -maxdepth 1 -not -name ".snapshot"); do + chown -R elasticsearch:elasticsearch $datadir; + done; + chown elasticsearch:elasticsearch /usr/share/elasticsearch/logs; + for logfile in $(find /usr/share/elasticsearch/logs -mindepth 1 -maxdepth 1 -not -name ".snapshot"); do + chown -R elasticsearch:elasticsearch $logfile; + done + securityContext: + runAsUser: 0 + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: data +{{- if .Values.extraInitContainers }} +{{ tpl .Values.extraInitContainers . | indent 6 }} +{{- end }} +{{- if .Values.cluster.plugins }} +{{ include "plugin-installer" . | indent 6 }} +{{- end }} + containers: + - name: elasticsearch + env: + - name: NODE_DATA + value: "false" +{{- if hasPrefix "5." .Values.appVersion }} + - name: NODE_INGEST + value: "false" +{{- end }} + - name: DISCOVERY_SERVICE + value: {{ template "elasticsearch.fullname" . }}-discovery + - name: PROCESSORS + valueFrom: + resourceFieldRef: + resource: limits.cpu + - name: ES_JAVA_OPTS + value: "-Djava.net.preferIPv4Stack=true -Xms{{ .Values.master.heapSize }} -Xmx{{ .Values.master.heapSize }} {{ .Values.cluster.additionalJavaOpts }} {{ .Values.master.additionalJavaOpts }}" + {{- range $key, $value := .Values.cluster.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + resources: +{{ toYaml .Values.master.resources | indent 12 }} + readinessProbe: +{{ toYaml .Values.master.readinessProbe | indent 10 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + ports: + - containerPort: 9300 + name: transport +{{ if .Values.master.exposeHttp }} + - containerPort: 9200 + name: http +{{ end }} + volumeMounts: + - mountPath: /usr/share/elasticsearch/data + name: data + - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml +{{- if .Values.cluster.plugins }} + - mountPath: /usr/share/elasticsearch/plugins/ + name: plugindir +{{- end }} +{{- if hasPrefix "2." .Values.appVersion }} + - mountPath: /usr/share/elasticsearch/config/logging.yml + name: config + subPath: logging.yml +{{- end }} +{{- if hasPrefix "5." .Values.appVersion }} + - mountPath: /usr/share/elasticsearch/config/log4j2.properties + name: config + subPath: log4j2.properties +{{- end }} +{{- if .Values.cluster.keystoreSecret }} + - name: keystore + mountPath: "/usr/share/elasticsearch/config/elasticsearch.keystore" + subPath: elasticsearch.keystore + readOnly: true +{{- end }} +{{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range $pullSecret := .Values.image.pullSecrets }} + - name: {{ $pullSecret }} + {{- end }} +{{- end }} + volumes: + - name: config + configMap: + name: {{ template "elasticsearch.fullname" . }} +{{- if .Values.cluster.plugins }} + - name: plugindir + emptyDir: {} +{{- end }} +{{- if .Values.cluster.keystoreSecret }} + - name: keystore + secret: + secretName: {{ .Values.cluster.keystoreSecret }} +{{- end }} + {{- if not .Values.master.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + podManagementPolicy: {{ .Values.master.podManagementPolicy }} + updateStrategy: + type: {{ .Values.master.updateStrategy.type }} + {{- if .Values.master.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: {{ .Values.master.persistence.name }} + spec: + accessModes: + - {{ .Values.master.persistence.accessMode | quote }} + {{- if .Values.master.persistence.storageClass }} + {{- if (eq "-" .Values.master.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.master.persistence.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.master.persistence.size }}" + {{ end }} diff --git a/efk/elasticsearch/templates/master-svc.yaml b/efk/elasticsearch/templates/master-svc.yaml new file mode 100644 index 0000000..5db28b7 --- /dev/null +++ b/efk/elasticsearch/templates/master-svc.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.master.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "elasticsearch.fullname" . }}-discovery +spec: + clusterIP: None + ports: + - port: 9300 + targetPort: transport + selector: + app: {{ template "elasticsearch.name" . }} + component: "{{ .Values.master.name }}" + release: {{ .Release.Name }} diff --git a/efk/elasticsearch/templates/podsecuritypolicy.yaml b/efk/elasticsearch/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..ee38e35 --- /dev/null +++ b/efk/elasticsearch/templates/podsecuritypolicy.yaml @@ -0,0 +1,43 @@ +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: extensions/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "elasticsearch.fullname" . }} + labels: + app: {{ template "elasticsearch.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: +{{- if .Values.podSecurityPolicy.annotations }} +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: true + allowPrivilegeEscalation: true + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + runAsGroup: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1000 + max: 1000 + readOnlyRootFilesystem: false + hostPorts: + - min: 1 + max: 65535 +{{- end }} diff --git a/efk/elasticsearch/templates/role.yaml b/efk/elasticsearch/templates/role.yaml new file mode 100644 index 0000000..1e329c5 --- /dev/null +++ b/efk/elasticsearch/templates/role.yaml @@ -0,0 +1,17 @@ +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: {{ template "elasticsearch.fullname" . }} + labels: + app: {{ template "elasticsearch.name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "elasticsearch.fullname" . }} +{{- end }} diff --git a/efk/elasticsearch/templates/rolebinding.yaml b/efk/elasticsearch/templates/rolebinding.yaml new file mode 100644 index 0000000..3606960 --- /dev/null +++ b/efk/elasticsearch/templates/rolebinding.yaml @@ -0,0 +1,26 @@ +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: {{ template "elasticsearch.fullname" . }} + labels: + app: {{ template "elasticsearch.name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +roleRef: + kind: Role + name: {{ template "elasticsearch.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ template "elasticsearch.serviceAccountName.client" . }} + namespace: {{ .Release.Namespace }} +- kind: ServiceAccount + name: {{ template "elasticsearch.serviceAccountName.data" . }} + namespace: {{ .Release.Namespace }} +- kind: ServiceAccount + name: {{ template "elasticsearch.serviceAccountName.master" . }} + namespace: {{ .Release.Namespace }} +{{- end }} + diff --git a/efk/elasticsearch/templates/tests/test-configmap.yaml b/efk/elasticsearch/templates/tests/test-configmap.yaml new file mode 100644 index 0000000..f9a30c1 --- /dev/null +++ b/efk/elasticsearch/templates/tests/test-configmap.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "elasticsearch.fullname" . }}-test + labels: + app: {{ template "elasticsearch.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +data: + run.sh: |- + @test "Test Access and Health" { + curl -D - http://{{ template "elasticsearch.client.fullname" . }}:9200 + curl -D - http://{{ template "elasticsearch.client.fullname" . }}:9200/_cluster/health?wait_for_status=green + } diff --git a/efk/elasticsearch/templates/tests/test.yaml b/efk/elasticsearch/templates/tests/test.yaml new file mode 100644 index 0000000..b30d841 --- /dev/null +++ b/efk/elasticsearch/templates/tests/test.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ template "elasticsearch.fullname" . }}-test + labels: + app: {{ template "elasticsearch.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" + annotations: + "helm.sh/hook": test-success +spec: + initContainers: + - name: test-framework + image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" + command: + - "bash" + - "-c" + - | + set -ex + # copy bats to tools dir + cp -R /usr/local/libexec/ /tools/bats/ + volumeMounts: + - mountPath: /tools + name: tools + containers: + - name: {{ .Release.Name }}-test + image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" + command: ["/tools/bats/bats", "-t", "/tests/run.sh"] + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + - mountPath: /tools + name: tools + volumes: + - name: tests + configMap: + name: {{ template "elasticsearch.fullname" . }}-test + - name: tools + emptyDir: {} + restartPolicy: Never diff --git a/efk/elasticsearch/values.yaml b/efk/elasticsearch/values.yaml new file mode 100644 index 0000000..2f55978 --- /dev/null +++ b/efk/elasticsearch/values.yaml @@ -0,0 +1,230 @@ +# Default values for elasticsearch. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +appVersion: "6.7.0" + +## Define serviceAccount names for components. Defaults to component's fully qualified name. +## +serviceAccounts: + client: + create: true + name: + master: + create: true + name: + data: + create: true + name: + +## Specify if a Pod Security Policy for node-exporter must be created +## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + enabled: false + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + +image: + repository: "docker.elastic.co/elasticsearch/elasticsearch-oss" + tag: "6.7.0" + pullPolicy: "IfNotPresent" + # If specified, use these secrets to access the image + # pullSecrets: + # - registry-secret + +testFramework: + image: "dduportal/bats" + tag: "0.4.0" + +initImage: + repository: "busybox" + tag: "latest" + pullPolicy: "Always" + +cluster: + name: "elasticsearch" + # If you want X-Pack installed, switch to an image that includes it, enable this option and toggle the features you want + # enabled in the environment variables outlined in the README + xpackEnable: false + # Some settings must be placed in a keystore, so they need to be mounted in from a secret. + # Use this setting to specify the name of the secret + # keystoreSecret: eskeystore + config: {} + # Custom parameters, as string, to be added to ES_JAVA_OPTS environment variable + additionalJavaOpts: "" + # Command to run at the end of deployment + bootstrapShellCommand: "" + env: + # IMPORTANT: https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#minimum_master_nodes + # To prevent data loss, it is vital to configure the discovery.zen.minimum_master_nodes setting so that each master-eligible + # node knows the minimum number of master-eligible nodes that must be visible in order to form a cluster. + MINIMUM_MASTER_NODES: "2" + # List of plugins to install via dedicated init container + plugins: [] + # - ingest-attachment + # - mapper-size + +client: + name: client + replicas: 3 + serviceType: ClusterIP + loadBalancerIP: {} + loadBalancerSourceRanges: {} +## (dict) If specified, apply these annotations to the client service +# serviceAnnotations: +# example: client-svc-foo + heapSize: "512m" + # additionalJavaOpts: "-XX:MaxRAM=512m" + antiAffinity: "soft" + nodeAffinity: {} + nodeSelector: {} + tolerations: [] + initResources: {} + # limits: + # cpu: "25m" + # # memory: "128Mi" + # requests: + # cpu: "25m" + # memory: "128Mi" + resources: + limits: + cpu: "1" + memory: "1024Mi" + requests: + cpu: "25m" + memory: "512Mi" + priorityClassName: "" + ## (dict) If specified, apply these annotations to each client Pod + # podAnnotations: + # example: client-foo + podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 + ingress: + enabled: false + # user: NAME + # password: PASSWORD + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + path: / + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +master: + name: master + exposeHttp: false + replicas: 4 + heapSize: "512m" + # additionalJavaOpts: "-XX:MaxRAM=512m" + persistence: + enabled: true + accessMode: ReadWriteOnce + name: data + size: "4Gi" + # storageClass: "ssd" + readinessProbe: + httpGet: + path: /_cluster/health?local=true + port: 9200 + initialDelaySeconds: 5 + antiAffinity: "soft" + nodeAffinity: {} + nodeSelector: {} + tolerations: [] + initResources: + limits: + cpu: "25m" + memory: "128Mi" + requests: + cpu: "25m" + memory: "128Mi" + resources: + limits: + cpu: "1" + memory: "1024Mi" + requests: + cpu: "25m" + memory: "512Mi" + priorityClassName: "" + ## (dict) If specified, apply these annotations to each master Pod + # podAnnotations: + # example: master-foo + podManagementPolicy: OrderedReady + podDisruptionBudget: + enabled: false + minAvailable: 2 # Same as `cluster.env.MINIMUM_MASTER_NODES` + # maxUnavailable: 1 + updateStrategy: + type: OnDelete + +data: + name: data + exposeHttp: false + replicas: 4 + heapSize: "1536m" + # additionalJavaOpts: "-XX:MaxRAM=1536m" + persistence: + enabled: true + accessMode: ReadWriteOnce + name: data + size: "30Gi" + # storageClass: "ssd" + readinessProbe: + httpGet: + path: /_cluster/health?local=true + port: 9200 + initialDelaySeconds: 5 + terminationGracePeriodSeconds: 3600 + antiAffinity: "soft" + nodeAffinity: {} + nodeSelector: {} + tolerations: [] + initResources: {} + # limits: + # cpu: "25m" + # # memory: "128Mi" + # requests: + # cpu: "25m" + # memory: "128Mi" + resources: + limits: + cpu: "1" + # memory: "2048Mi" + requests: + cpu: "25m" + memory: "1536Mi" + priorityClassName: "" + ## (dict) If specified, apply these annotations to each data Pod + # podAnnotations: + # example: data-foo + podDisruptionBudget: + enabled: false + # minAvailable: 1 + maxUnavailable: 1 + podManagementPolicy: OrderedReady + updateStrategy: + type: OnDelete + hooks: # post-start and pre-stop hooks + drain: # drain the node before stopping it and re-integrate it into the cluster after start + enabled: true + +## Sysctl init container to setup vm.max_map_count +# see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html +# and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall +sysctlInitContainer: + enabled: true +## Additional init containers +extraInitContainers: | diff --git a/efk/fluent-bit/Chart.yaml b/efk/fluent-bit/Chart.yaml new file mode 100644 index 0000000..a0bca37 --- /dev/null +++ b/efk/fluent-bit/Chart.yaml @@ -0,0 +1,18 @@ +name: fluent-bit +version: 1.9.2 +appVersion: 1.0.6 +description: Fast and Lightweight Log/Data Forwarder for Linux, BSD and OSX +keywords: +- logging +- monitoring +- fluent +- fluentd +sources: +- http://fluentbit.io +icon: http://fluentbit.io/assets/img/logo1-default.png +home: http://fluentbit.io +maintainers: +- name: kfox1111 + email: Kevin.Fox@pnnl.gov +- name: edsiper + email: eduardo@treasure-data.com diff --git a/efk/fluent-bit/OWNERS b/efk/fluent-bit/OWNERS new file mode 100644 index 0000000..3a9219e --- /dev/null +++ b/efk/fluent-bit/OWNERS @@ -0,0 +1,6 @@ +approvers: +- kfox1111 +- edsiper +reviewers: +- kfox1111 +- edsiper diff --git a/efk/fluent-bit/README.md b/efk/fluent-bit/README.md new file mode 100644 index 0000000..5608ffd --- /dev/null +++ b/efk/fluent-bit/README.md @@ -0,0 +1,152 @@ +# Fluent-Bit Chart + +[Fluent Bit](http://fluentbit.io/) is an open source and multi-platform Log Forwarder. + +## Chart Details + +This chart will do the following: + +* Install a configmap for Fluent Bit +* Install a daemonset that provisions Fluent Bit [per-host architecture] + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release stable/fluent-bit +``` + +When installing this chart on [Minikube](https://kubernetes.io/docs/getting-started-guides/minikube/), it's required to specify that so the DaemonSet will be able to mount the log files properly, make sure to append the _--set on\_minikube=true_ option at the end of the _helm_ command, e.g: + +```bash +$ helm install --name my-release stable/fluent-bit --set on_minikube=true +``` + +## Configuration + +The following table lists the configurable parameters of the Fluent-Bit chart and the default values. + +| Parameter | Description | Default | +| ----------------------- | ---------------------------------- | ----------------------- | +| **Backend Selection** | +| `backend.type` | Set the backend to which Fluent-Bit should flush the information it gathers | `forward` | +| **Forward Backend** | +| `backend.forward.host` | Target host where Fluent-Bit or Fluentd are listening for Forward messages | `fluentd` | +| `backend.forward.port` | TCP Port of the target service | `24284` | +| `backend.forward.shared_key` | A key string known by the remote Fluentd used for authorization. | `` | +| `backend.forward.tls` | Enable or disable TLS support | `off` | +| `backend.forward.tls_verify` | Force certificate validation | `on` | +| `backend.forward.tls_debug` | Set TLS debug verbosity level. It accept the following values: 0-4 | `1` | +| **ElasticSearch Backend** | +| `backend.es.host` | IP address or hostname of the target Elasticsearch instance | `elasticsearch` | +| `backend.es.port` | TCP port of the target Elasticsearch instance. | `9200` | +| `backend.es.index` | Elastic Index name | `kubernetes_cluster` | +| `backend.es.type` | Elastic Type name | `flb_type` | +| `backend.es.time_key` | Elastic Time Key | `@timestamp` | +| `backend.es.logstash_prefix` | Index Prefix. If Logstash_Prefix is equals to 'mydata' your index will become 'mydata-YYYY.MM.DD'. | `kubernetes_cluster` | +| `backend.es.replace_dots` | Enable/Disable Replace_Dots option. | `On` | +| `backend.es.http_user` | Optional username credential for Elastic X-Pack access. | `` | +| `backend.es.http_passwd:` | Password for user defined in HTTP_User. | `` | +| `backend.es.tls` | Enable or disable TLS support | `off` | +| `backend.es.tls_verify` | Force certificate validation | `on` | +| `backend.es.tls_ca` | TLS CA certificate for the Elastic instance (in PEM format). Specify if tls: on. | `` | +| `backend.es.tls_debug` | Set TLS debug verbosity level. It accept the following values: 0-4 | `1` | +| **HTTP Backend** | +| `backend.http.host` | IP address or hostname of the target HTTP Server | `127.0.0.1` | +| `backend.http.port` | TCP port of the target HTTP Server | `80` | +| `backend.http.uri` | Specify an optional HTTP URI for the target web server, e.g: /something | `"/"` +| `backend.http.http_user` | Optional username credential for Basic Authentication. | `` | +| `backend.http.http_passwd:` | Password for user defined in HTTP_User. | `` | +| `backend.http.format` | Specify the data format to be used in the HTTP request body, by default it uses msgpack, optionally it can be set to json. | `msgpack` | +| `backend.http.tls` | Enable or disable TLS support | `off` | +| `backend.http.tls_verify` | Force certificate validation | `on` | +| `backend.http.tls_debug` | Set TLS debug verbosity level. It accept the following values: 0-4 | `1` | +| **Splunk Backend** | +| `backend.splunk.host` | IP address or hostname of the target Splunk Server | `127.0.0.1` | +| `backend.splunk.port` | TCP port of the target Splunk Server | `8088` | +| `backend.splunk.token` | Specify the Authentication Token for the HTTP Event Collector interface. | `` | +| `backend.splunk.send_raw` | If enabled, record keys and values are set in the main map. | `off` | +| `backend.splunk.tls` | Enable or disable TLS support | `on` | +| `backend.splunk.tls_verify` | Force TLS certificate validation | `off` | +| `backend.splunk.tls_debug` | Set TLS debug verbosity level. It accept the following values: 0-4 | `1` | +| `backend.splunk.message_key` | Tag applied to all incoming logs | `kubernetes` | +| **Parsers** | +| `parsers.enabled` | Enable custom parsers | `false` | +| `parsers.regex` | List of regex parsers | `NULL` | +| `parsers.json` | List of json parsers | `NULL` | +| **General** | +| `annotations` | Optional deamonset set annotations | `NULL` | +| `podAnnotations` | Optional pod annotations | `NULL` | +| `podLabels` | Optional pod labels | `NULL` | +| `fullConfigMap` | User has provided entire config (parsers + system) | `false` | +| `existingConfigMap` | ConfigMap override | `` | +| `extraEntries.input` | Extra entries for existing [INPUT] section | `` | +| `extraEntries.filter` | Extra entries for existing [FILTER] section | `` | +| `extraEntries.output` | Extra entries for existing [OUPUT] section | `` | +| `extraPorts` | List of extra ports | | +| `extraVolumeMounts` | Mount an extra volume, required to mount ssl certificates when elasticsearch has tls enabled | | +| `extraVolume` | Extra volume | | +| `service.flush` | Interval to flush output (seconds) | `1` | +| `service.logLevel` | Diagnostic level (error/warning/info/debug/trace) | `info` | +| `filter.enableExclude` | Enable the use of monitoring for a pod annotation of `fluentbit.io/exclude: true`. If present, discard logs from that pod. | `true` | +| `filter.enableParser` | Enable the use of monitoring for a pod annotation of `fluentbit.io/parser: parser_name`. parser_name must be the name of a parser contained within parsers.conf | `true` | +| `filter.kubeURL` | Optional custom configmaps | `https://kubernetes.default.svc:443` | +| `filter.kubeCAFile` | Optional custom configmaps | `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt` | +| `filter.kubeTokenFile` | Optional custom configmaps | `/var/run/secrets/kubernetes.io/serviceaccount/token` | +| `filter.kubeTag` | Optional top-level tag for matching in filter | `kube` | +| `filter.mergeJSONLog` | If the log field content is a JSON string map, append the map fields as part of the log structure | `true` | +| `image.fluent_bit.repository` | Image | `fluent/fluent-bit` | +| `image.fluent_bit.tag` | Image tag | `1.0.6` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `nameOverride` | Override name of app | `nil` | +| `fullnameOverride` | Override full name of app | `nil` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `input.tail.memBufLimit` | Specify Mem_Buf_Limit in tail input | `5MB` | +| `input.tail.parser` | Specify Parser in tail input. | `docker` | +| `input.tail.path` | Specify log file(s) through the use of common wildcards. | `/var/log/containers/*.log` | +| `input.systemd.enabled` | [Enable systemd input](https://docs.fluentbit.io/manual/input/systemd) | `false` | +| `input.systemd.filters.systemdUnit` | Please see https://docs.fluentbit.io/manual/input/systemd | `[docker.service, kubelet.service`, `node-problem-detector.service]` | +| `input.systemd.maxEntries` | Please see https://docs.fluentbit.io/manual/input/systemd | `1000` | +| `input.systemd.readFromTail` | Please see https://docs.fluentbit.io/manual/input/systemd | `true` | +| `input.systemd.tag` | Please see https://docs.fluentbit.io/manual/input/systemd | `host.*` | +| `rbac.create` | Specifies whether RBAC resources should be created. | `true` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created. | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `NULL` | +| `rawConfig` | Raw contents of fluent-bit.conf | `@INCLUDE fluent-bit-service.conf`
`@INCLUDE fluent-bit-input.conf`
`@INCLUDE fluent-bit-filter.conf`
` @INCLUDE fluent-bit-output.conf` | +| `resources` | Pod resource requests & limits | `{}` | +| `hostNetwork` | Use host's network | `false` | +| `dnsPolicy` | Specifies the dnsPolicy to use | `ClusterFirst` | +| `tolerations` | Optional daemonset tolerations | `NULL` | +| `nodeSelector` | Node labels for fluent-bit pod assignment | `NULL` | +| `affinity` | Expressions for affinity | `NULL` | +| `metrics.enabled` | Specifies whether a service for metrics should be exposed | `false` | +| `metrics.service.annotations` | Optional metrics service annotations | `NULL` | +| `metrics.service.port` | Port on where metrics should be exposed | `2020` | +| `metrics.service.type` | Service type for metrics | `ClusterIP` | +| `trackOffsets` | Specify whether to track the file offsets for tailing docker logs. This allows fluent-bit to pick up where it left after pod restarts but requires access to a `hostPath` | `false` | +| `testFramework.image` | `test-framework` image repository. | `dduportal/bats` | +| `testFramework.tag` | `test-framework` image tag. | `0.4.0` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install --name my-release -f values.yaml stable/fluent-bit +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Upgrading + +### From < 1.0.0 To >= 1.0.0 + +Values `extraInputs`, `extraFilters` and `extraOutputs` have been removed in version `1.0.0` of the fluent-bit chart. +To add additional entries to the existing sections, please use the `extraEntries.input`, `extraEntries.filter` and `extraEntries.output` values. +For entire sections, please use the `rawConfig` value, inserting blocks of text as desired. + +### From < 1.8.0 to >= 1.8.0 + +Version `1.8.0` introduces the use of release name as full name if it contains the chart name(fluent-bit in this case). E.g. with a release name of `fluent-bit`, this renames the DaemonSet from `fluent-bit-fluent-bit` to `fluent-bit`. The suggested approach is to delete the release and reinstall it. diff --git a/efk/fluent-bit/templates/NOTES.txt b/efk/fluent-bit/templates/NOTES.txt new file mode 100644 index 0000000..bbfcc0b --- /dev/null +++ b/efk/fluent-bit/templates/NOTES.txt @@ -0,0 +1,15 @@ +fluent-bit is now running. + +{{- if eq .Values.backend.type "forward" }} + +It will forward all container logs to the svc named {{ .Values.backend.forward.host }} on port: {{ .Values.backend.forward.port }} +{{- else if eq .Values.backend.type "es" }} + +It will forward all container logs to the svc named {{ .Values.backend.es.host }} on port: {{ .Values.backend.es.port }} +{{- else if eq .Values.backend.type "http" }} + +It will forward all container logs to the svc named {{ .Values.backend.http.host }} on port: {{ .Values.backend.http.port }} +{{- else if eq .Values.backend.type "splunk" }} + +It will forward all container logs to the svc named {{ .Values.backend.splunk.host }} on port: {{ .Values.backend.splunk.port }} +{{- end }} diff --git a/efk/fluent-bit/templates/_helpers.tpl b/efk/fluent-bit/templates/_helpers.tpl new file mode 100644 index 0000000..42453da --- /dev/null +++ b/efk/fluent-bit/templates/_helpers.tpl @@ -0,0 +1,57 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "fluent-bit.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fluent-bit.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "fluent-bit.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC APIs. +*/}} +{{- define "rbac.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" -}} +rbac.authorization.k8s.io/v1 +{{- else if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1beta1" -}} +rbac.authorization.k8s.io/v1beta1 +{{- else -}} +rbac.authorization.k8s.io/v1alpha1 +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "fluent-bit.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "fluent-bit.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + diff --git a/efk/fluent-bit/templates/cluster-role.yaml b/efk/fluent-bit/templates/cluster-role.yaml new file mode 100644 index 0000000..c2979c5 --- /dev/null +++ b/efk/fluent-bit/templates/cluster-role.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "fluent-bit.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get +{{- end -}} diff --git a/efk/fluent-bit/templates/cluster-rolebinding.yaml b/efk/fluent-bit/templates/cluster-rolebinding.yaml new file mode 100644 index 0000000..140e527 --- /dev/null +++ b/efk/fluent-bit/templates/cluster-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "fluent-bit.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "fluent-bit.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "fluent-bit.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/efk/fluent-bit/templates/config.yaml b/efk/fluent-bit/templates/config.yaml new file mode 100644 index 0000000..ef69ac5 --- /dev/null +++ b/efk/fluent-bit/templates/config.yaml @@ -0,0 +1,194 @@ +{{- if (empty .Values.existingConfigMap) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "fluent-bit.fullname" . }}-config + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + fluent-bit-service.conf: |- + [SERVICE] + Flush {{ .Values.service.flush }} + Daemon Off + Log_Level {{ .Values.service.logLevel }} + Parsers_File parsers.conf +{{- if .Values.parsers.enabled }} + Parsers_File parsers_custom.conf +{{- end }} +{{- if .Values.metrics.enabled }} + HTTP_Server On + HTTP_Listen 0.0.0.0 + HTTP_Port 2020 +{{- end }} + + fluent-bit-input.conf: |- + [INPUT] + Name tail + Path {{ .Values.input.tail.path }} + Parser {{ .Values.input.tail.parser }} + Tag {{ .Values.filter.kubeTag }}.* + Refresh_Interval 5 + Mem_Buf_Limit {{ .Values.input.tail.memBufLimit }} + Skip_Long_Lines On +{{- if .Values.trackOffsets }} + DB /tail-db/tail-containers-state.db + DB.Sync Normal +{{- end }} +{{- if .Values.input.systemd.enabled }} + [INPUT] + Name systemd + Tag {{ .Values.input.systemd.tag }} +{{- range $value := .Values.input.systemd.filters.systemdUnit }} + Systemd_Filter _SYSTEMD_UNIT={{ $value }} +{{- end }} + Max_Entries {{ .Values.input.systemd.maxEntries }} + Read_From_Tail {{ .Values.input.systemd.readFromTail }} +{{- end }} +{{ .Values.extraEntries.input | indent 8 }} + + fluent-bit-filter.conf: |- + [FILTER] + Name kubernetes + Match {{ .Values.filter.kubeTag }}.* + Kube_URL {{ .Values.filter.kubeURL }} + Kube_CA_File {{ .Values.filter.kubeCAFile }} + Kube_Token_File {{ .Values.filter.kubeTokenFile }} +{{- if .Values.filter.mergeJSONLog }} + Merge_Log On +{{- end }} +{{- if .Values.filter.enableParser }} + K8S-Logging.Parser On +{{- end }} +{{- if .Values.filter.enableExclude }} + K8S-Logging.Exclude On +{{- end }} +{{ .Values.extraEntries.filter | indent 8 }} + + fluent-bit-output.conf: |- +{{ if eq .Values.backend.type "test" }} + [OUTPUT] + Name file + Match * + Path /tmp/fluent-bit.log +{{ else if eq .Values.backend.type "forward" }} + [OUTPUT] + Name forward + Match * + Host {{ .Values.backend.forward.host }} + Port {{ .Values.backend.forward.port }} + Retry_Limit False +{{- if .Values.backend.forward.shared_key }} + Shared_Key {{ .Values.backend.forward.shared_key }} +{{- end }} +{{ else if eq .Values.backend.type "es" }} + [OUTPUT] + Name es + Match * + Host {{ .Values.backend.es.host }} + Port {{ .Values.backend.es.port }} + Logstash_Format On + Retry_Limit False + Type {{ .Values.backend.es.type }} +{{- if .Values.backend.es.time_key }} + Time_Key {{ .Values.backend.es.time_key }} +{{- end }} +{{- if .Values.backend.es.replace_dots }} + Replace_Dots {{ .Values.backend.es.replace_dots }} +{{- end }} +{{- if .Values.backend.es.logstash_prefix }} + Logstash_Prefix {{ .Values.backend.es.logstash_prefix }} +{{ else if .Values.backend.es.index }} + Index {{ .Values.backend.es.index }} +{{- end }} +{{- if .Values.backend.es.http_user }} + HTTP_User {{ .Values.backend.es.http_user }} + HTTP_Passwd {{ .Values.backend.es.http_passwd }} +{{- end }} +{{if eq .Values.backend.es.tls "on" }} + tls {{ .Values.backend.es.tls }} + tls.verify {{ .Values.backend.es.tls_verify }} + tls.debug {{ .Values.backend.es.tls_debug }} +{{- if .Values.backend.es.tls_ca }} + tls.ca_file /secure/es-tls-ca.crt +{{- end }} +{{- end }} +{{ else if eq .Values.backend.type "splunk" }} + [OUTPUT] + Name splunk + Match * + Host {{ .Values.backend.splunk.host }} + Port {{ .Values.backend.splunk.port }} + Splunk_Token {{ .Values.backend.splunk.token }} + Splunk_Send_Raw {{ .Values.backend.splunk.send_raw}} + TLS {{ .Values.backend.splunk.tls }} + TLS.Verify {{ .Values.backend.splunk.tls_verify }} + tls.debug {{ .Values.backend.splunk.tls_debug }} + Message_Key {{ .Values.backend.splunk.message_key }} +{{ else if eq .Values.backend.type "http" }} + [OUTPUT] + Name http + Match * + Host {{ .Values.backend.http.host }} + Port {{ .Values.backend.http.port }} + URI {{ .Values.backend.http.uri }} +{{- if .Values.backend.http.http_user }} + HTTP_User {{ .Values.backend.http.http_user }} + HTTP_Passwd {{ .Values.backend.http.http_passwd }} +{{- end }} + tls {{ .Values.backend.http.tls }} + tls.verify {{ .Values.backend.http.tls_verify }} + tls.debug {{ .Values.backend.http.tls_debug }} +{{- if .Values.backend.http.proxy }} + Proxy {{ .Values.backend.http.proxy }} +{{- end }} + Format {{ .Values.backend.http.format }} +{{- end }} +{{ .Values.extraEntries.output | indent 8 }} + + + fluent-bit.conf: |- +{{ .Values.rawConfig | indent 4 }} + + parsers.conf: |- +{{- if .Values.parsers.regex }} +{{- range .Values.parsers.regex }} + [PARSER] + Name {{ .name }} + Format regex + Regex {{ .regex }} +{{- if .timeKey }} + Time_Key {{ .timeKey }} +{{- end }} +{{- if .timeFormat }} + Time_Format {{ .timeFormat }} +{{- end }} +{{ end }} +{{- end }} +{{- if .Values.parsers.json }} +{{- range .Values.parsers.json }} + [PARSER] + Name {{ .name }} + Format json +{{- if .timeKeep }} + Time_Keep {{ .timeKeep }} +{{- end }} +{{- if .timeKey }} + Time_Key {{ .timeKey }} +{{- end }} +{{- if .timeFormat }} + Time_Format {{ .timeFormat }} +{{- end }} +{{- if .decodeFieldAs }} + Decode_Field_As {{ .decodeFieldAs }} {{ .decodeField | default "log" }} +{{- end}} +{{- if .extraEntries }} +{{ .extraEntries | indent 8 }} +{{- end }} +{{ end }} +{{- end }} + +{{- end -}} + diff --git a/efk/fluent-bit/templates/daemonset.yaml b/efk/fluent-bit/templates/daemonset.yaml new file mode 100644 index 0000000..d1bf032 --- /dev/null +++ b/efk/fluent-bit/templates/daemonset.yaml @@ -0,0 +1,154 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: {{ template "fluent-bit.fullname" . }} + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: {{ template "fluent-bit.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: +{{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- end }} + hostNetwork: {{ .Values.hostNetwork }} + dnsPolicy: {{ .Values.dnsPolicy }} + serviceAccountName: {{ template "fluent-bit.serviceAccountName" . }} + containers: + - name: fluent-bit + image: "{{ .Values.image.fluent_bit.repository }}:{{ .Values.image.fluent_bit.tag }}" + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + env: +{{ toYaml .Values.env | indent 10 }} + resources: +{{ toYaml .Values.resources | indent 10 }} +{{- if or .Values.metrics.enabled .Values.extraPorts }} + ports: +{{- if .Values.metrics.enabled }} + - name: metrics + containerPort: 2020 + protocol: TCP +{{- end -}} +{{- if .Values.extraPorts }} +{{ toYaml .Values.extraPorts | indent 8 }} +{{- end }} +{{- end }} + volumeMounts: + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + {{- if .Values.input.systemd.enabled }} + - name: etcmachineid + mountPath: /etc/machine-id + readOnly: true + {{- end }} +{{- if .Values.fullConfigMap }} + - name: config + mountPath: /fluent-bit/etc +{{- else }} + - name: config + mountPath: /fluent-bit/etc/fluent-bit.conf + subPath: fluent-bit.conf + - name: config + mountPath: /fluent-bit/etc/fluent-bit-service.conf + subPath: fluent-bit-service.conf + - name: config + mountPath: /fluent-bit/etc/fluent-bit-input.conf + subPath: fluent-bit-input.conf + - name: config + mountPath: /fluent-bit/etc/fluent-bit-filter.conf + subPath: fluent-bit-filter.conf + - name: config + mountPath: /fluent-bit/etc/fluent-bit-output.conf + subPath: fluent-bit-output.conf + +{{- if .Values.parsers.enabled }} + - name: config + mountPath: /fluent-bit/etc/parsers_custom.conf + subPath: parsers.conf +{{- end }} +{{- end }} +{{- if .Values.backend.es.tls_ca }} + - name: es-tls-secret + mountPath: /secure/es-tls-ca.crt + subPath: es-tls-ca.crt +{{- end }} +{{- if .Values.trackOffsets }} + - name: tail-db + mountPath: /tail-db +{{- end }} +{{- if .Values.extraVolumeMounts }} +{{ toYaml .Values.extraVolumeMounts | indent 8 }} +{{- end }} +{{ if .Values.on_minikube }} + - name: mnt + mountPath: /mnt + readOnly: true +{{ end }} + terminationGracePeriodSeconds: 10 + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{ if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + {{- if .Values.input.systemd.enabled }} + - name: etcmachineid + hostPath: + path: /etc/machine-id + type: File + {{- end }} +{{- if .Values.backend.es.tls_ca }} + - name: es-tls-secret + secret: + secretName: "{{ template "fluent-bit.fullname" . }}-es-tls-secret" +{{- end }} +{{- if .Values.trackOffsets }} + - name: tail-db + hostPath: + path: {{ .Values.taildb.directory }} + type: DirectoryOrCreate +{{- end }} + - name: config + configMap: + name: {{ if .Values.existingConfigMap }}{{ .Values.existingConfigMap }}{{- else }}{{ template "fluent-bit.fullname" . }}-config{{- end }} +{{- if .Values.extraVolumes }} +{{ toYaml .Values.extraVolumes | indent 6 }} +{{- end }} +{{ if .Values.on_minikube }} + - name: mnt + hostPath: + path: /mnt +{{ end }} diff --git a/efk/fluent-bit/templates/secret.yaml b/efk/fluent-bit/templates/secret.yaml new file mode 100644 index 0000000..999c2da --- /dev/null +++ b/efk/fluent-bit/templates/secret.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Secret +metadata: + name: "{{ template "fluent-bit.fullname" . }}-es-tls-secret" + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + es-tls-ca.crt: {{ .Values.backend.es.tls_ca | b64enc | quote }} diff --git a/efk/fluent-bit/templates/service.yaml b/efk/fluent-bit/templates/service.yaml new file mode 100644 index 0000000..df06ed1 --- /dev/null +++ b/efk/fluent-bit/templates/service.yaml @@ -0,0 +1,25 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.metrics.service.annotations }} + annotations: +{{ toYaml .Values.metrics.service.annotations | indent 4 }} +{{- end }} + name: {{ template "fluent-bit.fullname" . }}-metrics + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: {{ .Values.metrics.service.type}} + sessionAffinity: None + ports: + - port: {{ .Values.metrics.service.port }} + targetPort: metrics + name: metrics + selector: + app: {{ template "fluent-bit.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/efk/fluent-bit/templates/serviceaccount.yaml b/efk/fluent-bit/templates/serviceaccount.yaml new file mode 100644 index 0000000..c7f4307 --- /dev/null +++ b/efk/fluent-bit/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "fluent-bit.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "fluent-bit.serviceAccountName" . }} +{{- end -}} diff --git a/efk/fluent-bit/templates/tests/test-configmap.yaml b/efk/fluent-bit/templates/tests/test-configmap.yaml new file mode 100644 index 0000000..445d6ce --- /dev/null +++ b/efk/fluent-bit/templates/tests/test-configmap.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "fluent-bit.fullname" . }}-test + labels: + app: {{ template "fluent-bit.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +data: + run.sh: |- + {{- if eq .Values.backend.type "forward"}} + {{- if eq .Values.backend.forward.tls "on"}} + fluent-gem install fluent-plugin-secure-forward + {{- end }} + @test "Test Access" { + fluentd -c /tests/fluentd.conf --dry-run + } + {{- else if eq .Values.backend.type "es"}} + @test "Test Elasticssearch Indices" { + url="http://{{ .Values.backend.es.host }}:{{ .Values.backend.es.port }}/_cat/indices?format=json" + result=$(curl url | jq -cr '.[] | select((.index | contains("kubernetes_cluster")) and (.health != "green"))') + [ "$result" == "" ] + } + {{- end }} + + fluentd.conf: |- + + {{- if eq .Values.backend.forward.tls "off" }} + @type forward + bind 0.0.0.0 + port {{ .Values.backend.forward.port }} + {{- else }} + @type secure_forward + self_hostname myserver.local + secure no + {{- end }} + shared_key {{ .Values.backend.forward.shared_key }} + + + + @type stdout + diff --git a/efk/fluent-bit/templates/tests/test.yaml b/efk/fluent-bit/templates/tests/test.yaml new file mode 100644 index 0000000..c80e2e8 --- /dev/null +++ b/efk/fluent-bit/templates/tests/test.yaml @@ -0,0 +1,53 @@ +{{- if or (eq .Values.backend.type "forward") (and (eq .Values.backend.type "es") (eq .Values.backend.es.tls "off")) }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ template "fluent-bit.fullname" . }}-test + labels: + app: {{ template "fluent-bit.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" + annotations: + "helm.sh/hook": test-success +spec: + initContainers: + - name: test-framework + image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" + command: + - "bash" + - "-c" + - | + set -ex + # copy bats to tools dir + cp -R /usr/local/libexec/ /tools/bats/ + volumeMounts: + - mountPath: /tools + name: tools + containers: + - name: {{ .Release.Name }}-test + {{- if eq .Values.backend.type "forward"}} + image: "fluent/fluentd:v1.4-debian-1" + {{- else }} + image: "dwdraju/alpine-curl-jq" + {{- end }} + command: ["/tools/bats/bats", "-t", "/tests/run.sh"] + {{- if eq .Values.backend.forward.tls "on"}} + securityContext: + # run as root to install fluent gems + runAsUser: 0 + {{- end }} + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + - mountPath: /tools + name: tools + volumes: + - name: tests + configMap: + name: {{ template "fluent-bit.fullname" . }}-test + - name: tools + emptyDir: {} + restartPolicy: Never +{{- end }} diff --git a/efk/fluent-bit/values.yaml b/efk/fluent-bit/values.yaml new file mode 100644 index 0000000..da8f042 --- /dev/null +++ b/efk/fluent-bit/values.yaml @@ -0,0 +1,244 @@ +# Minikube stores its logs in a separate directory. +# enable if started in minikube. +on_minikube: false + +image: + fluent_bit: + repository: fluent/fluent-bit + tag: 1.0.6 + pullPolicy: IfNotPresent + +testFramework: + image: "dduportal/bats" + tag: "0.4.0" + +nameOverride: "" +fullnameOverride: "" + +# When enabled, exposes json and prometheus metrics on {{ .Release.Name }}-metrics service +metrics: + enabled: true + service: + annotations: {} + # In order for Prometheus to consume metrics automatically use the following annotations: + # prometheus.io/path: "/api/v1/metrics/prometheus" + # prometheus.io/port: "2020" + # prometheus.io/scrape: "true" + port: 2020 + type: ClusterIP + +# When enabled, fluent-bit will keep track of tailing offsets across pod restarts. +trackOffsets: false + +backend: + type: es + forward: + host: fluentd + port: 24284 + tls: "off" + tls_verify: "on" + tls_debug: 1 + shared_key: + es: + host: elasticsearch-client + port: 9200 + # Elastic Index Name + index: kubernetes_cluster + type: flb_type + logstash_prefix: kubernetes_cluster + replace_dots: "On" + time_key: "@es_timestamp" + # Optional username credential for Elastic X-Pack access + http_user: + # Password for user defined in HTTP_User + http_passwd: + # Optional TLS encryption to ElasticSearch instance + tls: "off" + tls_verify: "on" + # TLS certificate for the Elastic (in PEM format). Use if tls=on and tls_verify=on. + tls_ca: "" + # TLS debugging levels = 1-4 + tls_debug: 1 + splunk: + host: 127.0.0.1 + port: 8088 + token: "" + send_raw: "on" + tls: "on" + tls_verify: "off" + tls_debug: 1 + message_key: "kubernetes" + + ## + ## Ref: http://fluentbit.io/documentation/current/output/http.html + ## + http: + host: 127.0.0.1 + port: 80 + uri: "/" + http_user: + http_passwd: + tls: "off" + tls_verify: "on" + tls_debug: 1 + ## Specify the data format to be used in the HTTP request body + ## Can be either 'msgpack' or 'json' + format: msgpack + +parsers: + enabled: true + ## List the respective parsers in key: value format per entry + ## Regex required fields are name and regex. JSON required field + ## is name. + regex: [] + ## json parser config can be defined by providing an extraEntries field. + ## The following entry: + ## json: + ## - extraEntries: | + ## Decode_Field_As escaped log do_next + ## Decode_Field_As json log + ## + ## translates into + ## + ## Command | Decoder | Field | Optional Action | + ## ==============|===========|=======|===================| + ## Decode_Field_As escaped log do_next + ## Decode_Field_As json log + ## + json: + - name: json + timeKey: time + - name: docker + timeKey: time + decodeFieldAs: "escaped" + decodeField: "log" + +env: [] + +## Annotations to add to the DaemonSet's Pods +podAnnotations: {} + +## By default there different 'files' provides in the config +## (fluent-bit.conf, custom_parsers.conf). This defeats +## changing a configmap (since it uses subPath). If this +## variable is set, the user is assumed to have provided, +## in 'existingConfigMap' the entire config (etc/*) of fluent-bit, +## parsers and system config. In this case, no subPath is +## used +fullConfigMap: false + +## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.existingConfigMap}} +## Defining existingConfigMap will cause templates/config.yaml +## to NOT generate a ConfigMap resource +## +existingConfigMap: "" + +rawConfig: |- + @INCLUDE fluent-bit-service.conf + @INCLUDE fluent-bit-input.conf + @INCLUDE fluent-bit-filter.conf + @INCLUDE fluent-bit-output.conf + +extraEntries: + input: |- +# # >=1 additional Key/Value entrie(s) for existing Input section + filter: |- +# # >=1 additional Key/Value entrie(s) for existing Filter section + output: |- +# # >=1 additional Key/Value entrie(s) for existing Ouput section + +## Extra ports to add to the daemonset ports section +extraPorts: [] + +## Extra volumes containing additional files required for fluent-bit to work +## (eg. CA certificates) +## Ref: https://kubernetes.io/docs/concepts/storage/volumes/ +## +extraVolumes: [] + +## Extra volume mounts for the fluent-bit pod. +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-volume-storage/ +## +extraVolumeMounts: [] + +resources: + limits: + cpu: 100m + memory: 500Mi + requests: + cpu: 10m + memory: 8Mi + +# When enabled, pods will bind to the node's network namespace. +hostNetwork: false + +# Which DNS policy to use for the pod. +# Consider switching to 'ClusterFirstWithHostNet' when 'hostNetwork' is enabled. +dnsPolicy: ClusterFirst + +## Node tolerations for fluent-bit scheduling to nodes with taints +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +## +tolerations: [] +# - key: "key" +# operator: "Equal|Exists" +# value: "value" +# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +## Node labels for fluent-bit pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +affinity: {} + +service: + flush: 1 + logLevel: info + +input: + tail: + memBufLimit: 5MB + parser: docker + path: /var/log/containers/*.log + systemd: + enabled: false + filters: + systemdUnit: + - docker.service + - kubelet.service + - node-problem-detector.service + maxEntries: 1000 + readFromTail: true + tag: host.* + +filter: + kubeURL: https://kubernetes.default.svc:443 + kubeCAFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + kubeTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + kubeTag: kube +# If true, check to see if the log field content is a JSON string map, if so, +# it append the map fields as part of the log structure. + mergeJSONLog: false + +# If true, enable the use of monitoring for a pod annotation of +# fluentbit.io/parser: parser_name. parser_name must be the name +# of a parser contained within parsers.conf + enableParser: true + +# If true, enable the use of monitoring for a pod annotation of +# fluentbit.io/exclude: true. If present, discard logs from that pod. + enableExclude: true + +rbac: + # Specifies whether RBAC resources should be created + create: true + +taildb: + directory: /var/lib/fluent-bit + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: diff --git a/efk/kibana/.helmignore b/efk/kibana/.helmignore new file mode 100644 index 0000000..c13e3c8 --- /dev/null +++ b/efk/kibana/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj \ No newline at end of file diff --git a/efk/kibana/Chart.yaml b/efk/kibana/Chart.yaml new file mode 100644 index 0000000..96bb894 --- /dev/null +++ b/efk/kibana/Chart.yaml @@ -0,0 +1,17 @@ +name: kibana +version: 2.2.0 +appVersion: 6.7.0 +description: Kibana is an open source data visualization plugin for Elasticsearch +icon: https://raw.githubusercontent.com/elastic/kibana/master/src/ui/public/icons/kibana-color.svg +keywords: +- elasticsearch +- kibana +maintainers: +- name: compleatang + email: casey@monax.io +- name: monotek + email: monotek23@gmail.com +sources: +- https://github.com/elastic/kibana +engine: gotpl +home: https://www.elastic.co/products/kibana diff --git a/efk/kibana/OWNERS b/efk/kibana/OWNERS new file mode 100644 index 0000000..71578f0 --- /dev/null +++ b/efk/kibana/OWNERS @@ -0,0 +1,6 @@ +approvers: +- compleatang +- monotek +reviewers: +- compleatang +- monotek diff --git a/efk/kibana/README.md b/efk/kibana/README.md new file mode 100644 index 0000000..c900c88 --- /dev/null +++ b/efk/kibana/README.md @@ -0,0 +1,140 @@ +# kibana + +[kibana](https://github.com/elastic/kibana) is your window into the Elastic Stack. Specifically, it's an open source (Apache Licensed), browser-based analytics and search dashboard for Elasticsearch. + +## TL;DR; + +```console +$ helm install stable/kibana +``` + +## Introduction + +This chart bootstraps a kibana deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm install stable/kibana --name my-release +``` + +The command deploys kibana on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +NOTE : We notice that lower resource constraints given to the chart + plugins are likely not going to work well. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following table lists the configurable parameters of the kibana chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------------ | ---------------------------------------------------------------------- | ------------------------------------- | +| `affinity` | node/pod affinities | None | +| `env` | Environment variables to configure Kibana | `{}` | +| `files` | Kibana configuration files | None | +| `livenessProbe.enabled` | livenessProbe to be enabled? | `false` | +| `livenessProbe.path` | path for livenessProbe | `/status` | +| `livenessProbe.initialDelaySeconds` | number of seconds | 30 | +| `livenessProbe.timeoutSeconds` | number of seconds | 10 | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.repository` | Image repository | `docker.elastic.co/kibana/kibana-oss` | +| `image.tag` | Image tag | `6.7.0` | +| `image.pullSecrets` | Specify image pull secrets | `nil` | +| `commandline.args` | add additional commandline args | `nil` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations | None: | +| `ingress.hosts` | Ingress accepted hostnames | None: | +| `ingress.tls` | Ingress TLS configuration | None: | +| `nodeSelector` | node labels for pod assignment | `{}` | +| `podAnnotations` | annotations to add to each pod | `{}` | +| `podLabels` | labels to add to each pod | `{}` | +| `replicaCount` | desired number of pods | `1` | +| `revisionHistoryLimit` | revisionHistoryLimit | `3` | +| `serviceAccountName` | DEPRECATED: use serviceAccount.name | `nil` | +| `serviceAccount.create` | create a serviceAccount to run the pod | `false` | +| `serviceAccount.name` | name of the serviceAccount to create | `kibana.fullname` | +| `authProxyEnabled` | enables authproxy. Create container in extracontainers | `false` | +| `extraContainers` | Sidecar containers to add to the kibana pod | `{}` | +| `extraVolumeMounts` | additional volumemounts for the kibana pod | `[]` | +| `extraVolumes` | additional volumes to add to the kibana pod | `[]` | +| `resources` | pod resource requests & limits | `{}` | +| `priorityClassName` | priorityClassName | `nil` | +| `service.externalPort` | external port for the service | `443` | +| `service.internalPort` | internal port for the service | `4180` | +| `service.portName` | service port name | None: | +| `service.authProxyPort` | port to use when using sidecar authProxy | None: | +| `service.externalIPs` | external IP addresses | None: | +| `service.loadBalancerIP` | Load Balancer IP address | None: | +| `service.loadBalancerSourceRanges` | Limit load balancer source IPs to list of CIDRs (where available)) | `[]` | +| `service.nodePort` | NodePort value if service.type is NodePort | None: | +| `service.type` | type of service | `ClusterIP` | +| `service.clusterIP` | static clusterIP or None for headless services | None: | +| `service.annotations` | Kubernetes service annotations | None: | +| `service.labels` | Kubernetes service labels | None: | +| `service.selector` | Kubernetes service selector | `{}` | +| `tolerations` | List of node taints to tolerate | `[]` | +| `dashboardImport.enabled` | Enable dashboard import | `false` | +| `dashboardImport.timeout` | Time in seconds waiting for Kibana to be in green overall state | `60` | +| `dashboardImport.xpackauth.enabled` | Enable Xpack auth | `false` | +| `dashboardImport.xpackauth.username` | Optional Xpack username | `myuser` | +| `dashboardImport.xpackauth.password` | Optional Xpack password | `mypass` | +| `dashboardImport.dashboards` | Dashboards | `{}` | +| `plugins.enabled` | Enable installation of plugins. | `false` | +| `plugins.reset` | Optional : Remove all installed plugins before installing all new ones | `false` | +| `plugins.values` | List of plugins to install. Format | None: | +| `persistentVolumeClaim.enabled` | Enable PVC for plugins | `false` | +| `persistentVolumeClaim.existingClaim` | Use your own PVC for plugins | `false` | +| `persistentVolumeClaim.annotations` | Add your annotations for the PVC | `{}` | +| `persistentVolumeClaim.accessModes` | Acces mode to the PVC | `ReadWriteOnce` | +| `persistentVolumeClaim.size` | Size of the PVC | `5Gi` | +| `persistentVolumeClaim.storageClass` | Storage class of the PVC | None: | +| `readinessProbe.enabled` | readinessProbe to be enabled? | `false` | +| `readinessProbe.path` | path for readinessProbe | `/status` | +| `readinessProbe.initialDelaySeconds` | number of seconds | 30 | +| `readinessProbe.timeoutSeconds` | number of seconds | 10 | +| `readinessProbe.periodSeconds` | number of seconds | 10 | +| `readinessProbe.successThreshold` | number of successes | 5 | +| `securityContext.enabled` | Enable security context (should be true for PVC) | `false` | +| `securityContext.allowPrivilegeEscalation` | Allow privilege escalation | `false` | +| `securityContext.runAsUser` | User id to run in pods | `1000` | +| `securityContext.fsGroup` | fsGroup id to run in pods | `2000` | +| `extraConfigMapMounts` | Additional configmaps to be mounted | `[]` | +| `deployment.annotations` | Annotations for deployment | `{}` | +| `initContainers` | Init containers to add to the kibana deployment | `{}` | +| `testFramework.image` | `test-framework` image repository. | `dduportal/bats` | +| `testFramework.tag` | `test-framework` image tag. | `0.4.0` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +- The Kibana configuration files config properties can be set through the `env` parameter too. +- All the files listed under this variable will overwrite any existing files by the same name in kibana config directory. +- Files not mentioned under this variable will remain unaffected. + +```console +$ helm install stable/kibana --name my-release \ + --set=image.tag=v0.0.2,resources.limits.cpu=200m +``` + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example : + +```console +$ helm install stable/kibana --name my-release -f values.yaml +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Dasboard import + +- A dashboard for dashboardImport.dashboards can be a JSON or a download url to a JSON file. diff --git a/efk/kibana/ci/authproxy-enabled.yaml b/efk/kibana/ci/authproxy-enabled.yaml new file mode 100644 index 0000000..186724a --- /dev/null +++ b/efk/kibana/ci/authproxy-enabled.yaml @@ -0,0 +1,3 @@ +--- +# disable internal port by setting authProxyEnabled +authProxyEnabled: true diff --git a/efk/kibana/ci/dashboard-values.yaml b/efk/kibana/ci/dashboard-values.yaml new file mode 100644 index 0000000..e3516ce --- /dev/null +++ b/efk/kibana/ci/dashboard-values.yaml @@ -0,0 +1,21 @@ +--- +# enable the dashboard init container with dashboard embedded in configmap + +dashboardImport: + enabled: true + dashboards: + 1_create_index: |- + { + "version": "6.7.0", + "objects": [ + { + "id": "a88738e0-d3c1-11e8-b38e-a37c21cf8c95", + "version": 2, + "attributes": { + "title": "logstash-*", + "timeFieldName": "@timestamp", + "fields": "[{\"name\":\"@timestamp\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true}]" + } + } + ] + } diff --git a/efk/kibana/ci/extra-configmap-mounts.yaml b/efk/kibana/ci/extra-configmap-mounts.yaml new file mode 100644 index 0000000..c4ccd57 --- /dev/null +++ b/efk/kibana/ci/extra-configmap-mounts.yaml @@ -0,0 +1,6 @@ +--- +extraConfigMapMounts: + - name: logtrail-configs + configMap: kibana-logtrail + mountPath: /usr/share/kibana/plugins/logtrail/logtrail.json + subPath: logtrail.json diff --git a/efk/kibana/ci/ingress-hosts-paths.yaml b/efk/kibana/ci/ingress-hosts-paths.yaml new file mode 100644 index 0000000..f9cca09 --- /dev/null +++ b/efk/kibana/ci/ingress-hosts-paths.yaml @@ -0,0 +1,3 @@ +ingress: + hosts: + - localhost.localdomain/kibana diff --git a/efk/kibana/ci/ingress-hosts.yaml b/efk/kibana/ci/ingress-hosts.yaml new file mode 100644 index 0000000..7d8de7a --- /dev/null +++ b/efk/kibana/ci/ingress-hosts.yaml @@ -0,0 +1,3 @@ +ingress: + hosts: + - kibana.localhost.localdomain diff --git a/efk/kibana/ci/initcontainers-all-values.yaml b/efk/kibana/ci/initcontainers-all-values.yaml new file mode 100644 index 0000000..297220b --- /dev/null +++ b/efk/kibana/ci/initcontainers-all-values.yaml @@ -0,0 +1,23 @@ +--- +# enable all init container types + +# A dashboard is defined by a name and a string with the json payload or the download url +dashboardImport: + enabled: true + dashboards: + k8s: https://raw.githubusercontent.com/monotek/kibana-dashboards/master/k8s-fluentd-elasticsearch.json + +# Enable the plugin init container with plugins retrieved from an URL +plugins: + enabled: true + reset: false + # Use to add/upgrade plugin + values: + - analyze-api-ui-plugin,6.7.0,https://github.com/johtani/analyze-api-ui-plugin/releases/download/6.7.0/analyze-api-ui-plugin-6.7.0.zip + # - other_plugin + +# Add your own init container +initContainers: + echo-container: + image: "busybox" + command: ['sh', '-c', 'echo Hello from init container! && sleep 3'] diff --git a/efk/kibana/ci/initcontainers-values.yaml b/efk/kibana/ci/initcontainers-values.yaml new file mode 100644 index 0000000..70d939c --- /dev/null +++ b/efk/kibana/ci/initcontainers-values.yaml @@ -0,0 +1,18 @@ +--- +# enable user-defined init containers + +initContainers: + numbers-container: + image: "busybox" + imagePullPolicy: "IfNotPresent" + command: + - "/bin/sh" + - "-c" + - | + for i in $(seq 1 10); do + echo $i + done + + echo-container: + image: "busybox" + command: ['sh', '-c', 'echo Hello from init container! && sleep 3'] diff --git a/efk/kibana/ci/plugin-install.yaml b/efk/kibana/ci/plugin-install.yaml new file mode 100644 index 0000000..6c9da5c --- /dev/null +++ b/efk/kibana/ci/plugin-install.yaml @@ -0,0 +1,9 @@ +--- +# enable the plugin init container with plugins retrieved from an URL +plugins: + enabled: true + reset: false + # Use to add/upgrade plugin + values: + - analyze-api-ui-plugin,6.7.0,https://github.com/johtani/analyze-api-ui-plugin/releases/download/6.7.0/analyze-api-ui-plugin-6.7.0.zip + # - other_plugin diff --git a/efk/kibana/ci/pvc.yaml b/efk/kibana/ci/pvc.yaml new file mode 100644 index 0000000..3f96fa0 --- /dev/null +++ b/efk/kibana/ci/pvc.yaml @@ -0,0 +1,11 @@ +--- +persistentVolumeClaim: + # set to true to use pvc + enabled: true + # set to true to use you own pvc + existingClaim: false + annotations: {} + + accessModes: + - ReadWriteOnce + size: "5Gi" \ No newline at end of file diff --git a/efk/kibana/ci/security-context.yaml b/efk/kibana/ci/security-context.yaml new file mode 100644 index 0000000..d12ea2b --- /dev/null +++ b/efk/kibana/ci/security-context.yaml @@ -0,0 +1,6 @@ +--- +securityContext: + enabled: true + allowPrivilegeEscalation: false + runAsUser: 1000 + fsGroup: 2000 \ No newline at end of file diff --git a/efk/kibana/ci/service-values.yaml b/efk/kibana/ci/service-values.yaml new file mode 100644 index 0000000..cebf52a --- /dev/null +++ b/efk/kibana/ci/service-values.yaml @@ -0,0 +1,4 @@ +--- +service: + selector: + foo: bar diff --git a/efk/kibana/ci/url_dashboard-values.yaml b/efk/kibana/ci/url_dashboard-values.yaml new file mode 100644 index 0000000..7149314 --- /dev/null +++ b/efk/kibana/ci/url_dashboard-values.yaml @@ -0,0 +1,7 @@ +--- +# enable the dashboard init container with dashboard retrieved from an URL + +dashboardImport: + enabled: true + dashboards: + k8s: https://raw.githubusercontent.com/monotek/kibana-dashboards/master/k8s-fluentd-elasticsearch.json diff --git a/efk/kibana/templates/NOTES.txt b/efk/kibana/templates/NOTES.txt new file mode 100644 index 0000000..f515ecd --- /dev/null +++ b/efk/kibana/templates/NOTES.txt @@ -0,0 +1,18 @@ +To verify that {{ template "kibana.fullname" . }} has started, run: + + kubectl --namespace={{ .Release.Namespace }} get pods -l "app={{ template "kibana.name" . }}" + +Kibana can be accessed: + + * From outside the cluster, run these commands in the same shell: + {{- if contains "NodePort" .Values.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "kibana.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + {{- else if contains "ClusterIP" .Values.service.type }} + + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "kibana.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:5601 to use Kibana" + kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME 5601:5601 + {{- end }} diff --git a/efk/kibana/templates/_helpers.tpl b/efk/kibana/templates/_helpers.tpl new file mode 100644 index 0000000..c6c30e9 --- /dev/null +++ b/efk/kibana/templates/_helpers.tpl @@ -0,0 +1,40 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kibana.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kibana.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kibana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} +{{ default (include "kibana.fullname" .) .Values.serviceAccount.name }} +{{- else -}} +{{- if .Values.serviceAccountName -}} +{{- .Values.serviceAccountName }} +{{- else -}} +{{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/efk/kibana/templates/configmap-dashboardimport.yaml b/efk/kibana/templates/configmap-dashboardimport.yaml new file mode 100644 index 0000000..2155a40 --- /dev/null +++ b/efk/kibana/templates/configmap-dashboardimport.yaml @@ -0,0 +1,67 @@ +{{- if .Values.dashboardImport.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kibana.fullname" . }}-importscript + labels: + app: {{ template "kibana.name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + dashboardImport.sh: | + #!/usr/bin/env bash + # + # kibana dashboard import script + # + + cd /kibanadashboards + + echo "Starting Kibana..." + + /usr/local/bin/kibana-docker $@ & + + echo "Waiting up to {{ .Values.dashboardImport.timeout }} seconds for Kibana to get in green overall state..." + + for i in {1..{{ .Values.dashboardImport.timeout }}}; do + curl -s localhost:5601/api/status | python -c 'import sys, json; print json.load(sys.stdin)["status"]["overall"]["state"]' 2> /dev/null | grep green > /dev/null && break || sleep 1 + done + + for DASHBOARD_FILE in *; do + echo -e "Importing ${DASHBOARD_FILE} dashboard..." + + if ! python -c 'import sys, json; print json.load(sys.stdin)' < "${DASHBOARD_FILE}" &> /dev/null ; then + echo "${DASHBOARD_FILE} is not valid JSON, assuming it's an URL..." + TMP_FILE="$(mktemp)" + curl -s $(cat ${DASHBOARD_FILE}) > ${TMP_FILE} + curl -v {{ if .Values.dashboardImport.xpackauth.enabled }}--user {{ .Values.dashboardImport.xpackauth.username }}:{{ .Values.dashboardImport.xpackauth.password }}{{ end }} -s --connect-timeout 60 --max-time 60 -XPOST localhost:5601/api/kibana/dashboards/import?force=true -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d @${TMP_FILE} + rm ${TMP_FILE} + else + echo "Valid JSON found in ${DASHBOARD_FILE}, importing..." + curl -v {{ if .Values.dashboardImport.xpackauth.enabled }}--user {{ .Values.dashboardImport.xpackauth.username }}:{{ .Values.dashboardImport.xpackauth.password }}{{ end }} -s --connect-timeout 60 --max-time 60 -XPOST localhost:5601/api/kibana/dashboards/import?force=true -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d @./${DASHBOARD_FILE} + fi + + if [ "$?" != "0" ]; then + echo -e "\nImport of ${DASHBOARD_FILE} dashboard failed... Exiting..." + exit 1 + else + echo -e "\nImport of ${DASHBOARD_FILE} dashboard finished :-)" + fi + + done +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kibana.fullname" . }}-dashboards + labels: + app: {{ template "kibana.name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: +{{- range $key, $value := .Values.dashboardImport.dashboards }} + {{ $key }}: |- +{{ $value | indent 4 }} +{{- end -}} +{{- end -}} diff --git a/efk/kibana/templates/configmap.yaml b/efk/kibana/templates/configmap.yaml new file mode 100644 index 0000000..610da72 --- /dev/null +++ b/efk/kibana/templates/configmap.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kibana.fullname" . }} + labels: + app: {{ template "kibana.name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: +{{- range $key, $value := .Values.files }} + {{ $key }}: | +{{ toYaml $value | default "{}" | indent 4 }} +{{- end -}} diff --git a/efk/kibana/templates/deployment.yaml b/efk/kibana/templates/deployment.yaml new file mode 100644 index 0000000..33208ea --- /dev/null +++ b/efk/kibana/templates/deployment.yaml @@ -0,0 +1,238 @@ +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + labels: + app: {{ template "kibana.name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "kibana.fullname" . }} +{{- if .Values.deployment.annotations }} + annotations: +{{ toYaml .Values.deployment.annotations | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicaCount }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + labels: + app: {{ template "kibana.name" . }} + release: "{{ .Release.Name }}" +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ template "kibana.serviceAccountName" . }} + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} +{{- if or (.Values.initContainers) (.Values.dashboardImport.enabled) (.Values.plugins.enabled) }} + initContainers: +{{- if .Values.initContainers }} +{{- range $key, $value := .Values.initContainers }} + - name: "{{ $key }}" +{{ toYaml $value | indent 8 }} +{{- end }} +{{- end }} +{{- if .Values.dashboardImport.enabled }} + - name: {{ .Chart.Name }}-dashboardimport + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: ["/bin/bash"] + args: + - "-c" + - "/tmp/dashboardImport.sh" +{{- if .Values.commandline.args }} +{{ toYaml .Values.commandline.args | indent 10 }} +{{- end }} + env: + {{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + volumeMounts: + - name: {{ template "kibana.fullname" . }}-dashboards + mountPath: "/kibanadashboards" + - name: {{ template "kibana.fullname" . }}-importscript + mountPath: "/tmp/dashboardImport.sh" + subPath: dashboardImport.sh + {{- range $configFile := (keys .Values.files) }} + - name: {{ template "kibana.name" $ }} + mountPath: "/usr/share/kibana/config/{{ $configFile }}" + subPath: {{ $configFile }} + {{- end }} +{{- end }} +{{- if .Values.plugins.enabled}} + - name: {{ .Chart.Name }}-plugins-install + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/bash + - "-c" + - | + set -e + rm -rf plugins/lost+found + plugins=( + {{- range .Values.plugins.values }} + {{ . }} + {{- end }} + ) + if {{ .Values.plugins.reset }} + then + for p in $(./bin/kibana-plugin list | cut -d "@" -f1) + do + ./bin/kibana-plugin remove ${p} + done + fi + for i in "${plugins[@]}" + do + IFS=',' read -ra PLUGIN <<< "$i" + pluginInstalledCheck=$(./bin/kibana-plugin list | grep "${PLUGIN[0]}" | cut -d '@' -f1 || true) + pluginVersionCheck=$(./bin/kibana-plugin list | grep "${PLUGIN[0]}" | cut -d '@' -f2 || true) + if [ "${pluginInstalledCheck}" = "${PLUGIN[0]}" ] + then + if [ "${pluginVersionCheck}" != "${PLUGIN[1]}" ] + then + ./bin/kibana-plugin remove "${PLUGIN[0]}" + ./bin/kibana-plugin install "${PLUGIN[2]}" + fi + else + ./bin/kibana-plugin install "${PLUGIN[2]}" + fi + done + env: + {{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + volumeMounts: + - name: plugins + mountPath: /usr/share/kibana/plugins + {{- range $configFile := (keys .Values.files) }} + - name: {{ template "kibana.name" $ }} + mountPath: "/usr/share/kibana/config/{{ $configFile }}" + subPath: {{ $configFile }} + {{- end }} +{{- if .Values.securityContext.enabled }} + securityContext: + allowPrivilegeEscalation: {{ .Values.securityContext.allowPrivilegeEscalation }} +{{- end }} +{{- end }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.commandline.args }} + args: + - "/bin/bash" + - "/usr/local/bin/kibana-docker" +{{ toYaml .Values.commandline.args | indent 10 }} + {{- end }} + env: + {{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} +{{- if (not .Values.authProxyEnabled) }} + ports: + - containerPort: {{ .Values.service.internalPort }} + name: {{ template "kibana.name" . }} + protocol: TCP +{{- end }} +{{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: {{ .Values.livenessProbe.path }} + port: {{ .Values.service.internalPort }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} +{{- end }} +{{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: {{ .Values.readinessProbe.path }} + port: {{ .Values.service.internalPort }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} +{{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + volumeMounts: + {{- range $configFile := (keys .Values.files) }} + - name: {{ template "kibana.name" $ }} + mountPath: "/usr/share/kibana/config/{{ $configFile }}" + subPath: {{ $configFile }} + {{- end }} +{{- if .Values.extraVolumeMounts }} +{{ toYaml .Values.extraVolumeMounts | indent 8 }} +{{- end }} +{{- if .Values.plugins.enabled}} + - name: plugins + mountPath: /usr/share/kibana/plugins +{{- end }} +{{- with .Values.extraContainers }} +{{ tpl . $ | indent 6 }} +{{- end }} +{{- range .Values.extraConfigMapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} +{{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} +{{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + fsGroup: {{ .Values.securityContext.fsGroup }} +{{- end }} + volumes: + - name: {{ template "kibana.name" . }} + configMap: + name: {{ template "kibana.fullname" . }} +{{- if .Values.plugins.enabled}} + - name: plugins + {{- if .Values.persistentVolumeClaim.enabled }} + persistentVolumeClaim: + claimName: {{ template "kibana.fullname" . }} + {{- else }} + emptyDir: {} + {{- end }} +{{- end }} +{{- if .Values.dashboardImport.enabled }} + - name: {{ template "kibana.fullname" . }}-dashboards + configMap: + name: {{ template "kibana.fullname" . }}-dashboards + - name: {{ template "kibana.fullname" . }}-importscript + configMap: + name: {{ template "kibana.fullname" . }}-importscript + defaultMode: 0777 +{{- end }} +{{- range .Values.extraConfigMapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} +{{- end }} +{{- if .Values.extraVolumes }} +{{ toYaml .Values.extraVolumes | indent 8 }} +{{- end }} diff --git a/efk/kibana/templates/ingress.yaml b/efk/kibana/templates/ingress.yaml new file mode 100644 index 0000000..de14ae9 --- /dev/null +++ b/efk/kibana/templates/ingress.yaml @@ -0,0 +1,33 @@ +{{- if .Values.ingress.enabled -}} +{{- $serviceName := include "kibana.fullname" . -}} +{{- $servicePort := .Values.service.externalPort -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + labels: + app: {{ template "kibana.name" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "kibana.fullname" . }} + annotations: + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + {{- range .Values.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: + - path: /{{ rest $url | join "/" }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/efk/kibana/templates/service.yaml b/efk/kibana/templates/service.yaml new file mode 100644 index 0000000..4416c45 --- /dev/null +++ b/efk/kibana/templates/service.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: {{ template "kibana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.service.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + name: {{ template "kibana.fullname" . }} + {{- with .Values.service.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} +spec: + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} + {{- end }} + type: {{ .Values.service.type }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + ports: + - port: {{ .Values.service.externalPort }} +{{- if not .Values.authProxyEnabled }} + targetPort: {{ .Values.service.internalPort }} +{{- else }} + targetPort: {{ .Values.service.authProxyPort }} +{{- end }} + protocol: TCP +{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{ .Values.service.nodePort }} +{{ end }} +{{- if .Values.service.portName }} + name: {{ .Values.service.portName }} +{{- end }} +{{- if .Values.service.externalIPs }} + externalIPs: +{{ toYaml .Values.service.externalIPs | indent 4 }} +{{- end }} + selector: + app: {{ template "kibana.name" . }} + release: {{ .Release.Name }} +{{- range $key, $value := .Values.service.selector }} + {{ $key }}: {{ $value | quote }} +{{- end }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} +{{- end }} diff --git a/efk/kibana/templates/serviceaccount.yaml b/efk/kibana/templates/serviceaccount.yaml new file mode 100644 index 0000000..948390a --- /dev/null +++ b/efk/kibana/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kibana.serviceAccountName" . }} + labels: + app: {{ template "kibana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +{{- end -}} diff --git a/efk/kibana/templates/tests/test-configmap.yaml b/efk/kibana/templates/tests/test-configmap.yaml new file mode 100644 index 0000000..912755e --- /dev/null +++ b/efk/kibana/templates/tests/test-configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kibana.fullname" . }}-test + labels: + app: {{ template "kibana.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" +data: + run.sh: |- + @test "Test Status" { + {{- if .Values.service.selector }} + skip "Can't guarentee pod names with selector" + {{- else }} + {{- $port := .Values.service.externalPort }} + url="http://{{ template "kibana.fullname" . }}{{ if $port }}:{{ $port }}{{ end }}/api{{ .Values.livenessProbe.path }}" + + # retry for 1 minute + run curl -s -o /dev/null -I -w "%{http_code}" --retry 30 --retry-delay 2 $url + + code=$(curl -s -o /dev/null -I -w "%{http_code}" $url) + body=$(curl $url) + if [ "$code" == "503" ] + then + skip "Kibana Unavailable (503), can't get status - see pod logs: $body" + fi + + result=$(echo $body | jq -cr '.status.statuses[]') + [ "$result" != "" ] + + result=$(echo $body | jq -cr '.status.statuses[] | select(.state != "green")') + [ "$result" == "" ] + {{- end }} + } diff --git a/efk/kibana/templates/tests/test.yaml b/efk/kibana/templates/tests/test.yaml new file mode 100644 index 0000000..8a518fd --- /dev/null +++ b/efk/kibana/templates/tests/test.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ template "kibana.fullname" . }}-test + labels: + app: {{ template "kibana.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + heritage: "{{ .Release.Service }}" + release: "{{ .Release.Name }}" + annotations: + "helm.sh/hook": test-success +spec: + initContainers: + - name: test-framework + image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" + command: + - "bash" + - "-c" + - | + set -ex + # copy bats to tools dir + cp -R /usr/local/libexec/ /tools/bats/ + volumeMounts: + - mountPath: /tools + name: tools + containers: + - name: {{ .Release.Name }}-test + image: "dwdraju/alpine-curl-jq" + command: ["/tools/bats/bats", "-t", "/tests/run.sh"] + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + - mountPath: /tools + name: tools + volumes: + - name: tests + configMap: + name: {{ template "kibana.fullname" . }}-test + - name: tools + emptyDir: {} + restartPolicy: Never diff --git a/efk/kibana/templates/volume-claim.yaml b/efk/kibana/templates/volume-claim.yaml new file mode 100644 index 0000000..2939712 --- /dev/null +++ b/efk/kibana/templates/volume-claim.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.plugins.enabled .Values.persistentVolumeClaim.enabled -}} +{{- if not .Values.persistentVolumeClaim.existingClaim -}} +apiVersion: "v1" +kind: "PersistentVolumeClaim" +metadata: +{{- if .Values.persistentVolumeClaim.annotations }} + annotations: +{{ toYaml .Values.persistentVolumeClaim.annotations | indent 4 }} +{{- end }} + labels: + app: {{ template "kibana.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + component: "{{ .Values.persistentVolumeClaim.name }}" + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "kibana.fullname" . }} +spec: + accessModes: +{{ toYaml .Values.persistentVolumeClaim.accessModes | indent 4 }} +{{- if .Values.persistentVolumeClaim.storageClass }} +{{- if (eq "-" .Values.persistentVolumeClaim.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.persistentVolumeClaim.storageClass }}" +{{- end }} +{{- end }} + resources: + requests: + storage: "{{ .Values.persistentVolumeClaim.size }}" +{{- end -}} +{{- end -}} diff --git a/efk/kibana/values.yaml b/efk/kibana/values.yaml new file mode 100644 index 0000000..c7bc81f --- /dev/null +++ b/efk/kibana/values.yaml @@ -0,0 +1,228 @@ +image: + repository: "docker.elastic.co/kibana/kibana-oss" + tag: "6.7.0" + pullPolicy: "IfNotPresent" + +testFramework: + image: "dduportal/bats" + tag: "0.4.0" + +commandline: + args: [] + +env: + CLUSTER_NAME: "nynja" + # All Kibana configuration options are adjustable via env vars. + # To adjust a config option to an env var uppercase + replace `.` with `_` + # Ref: https://www.elastic.co/guide/en/kibana/current/settings.html + # + # ELASTICSEARCH_URL: http://elasticsearch-client:9200 + # SERVER_PORT: 5601 + # LOGGING_VERBOSE: "true" + # SERVER_DEFAULTROUTE: "/app/kibana" + +files: + kibana.yml: + ## Default Kibana configuration from kibana-docker. + server.name: kibana + server.host: "0" + elasticsearch.url: http://elasticsearch-client:9200 + + ## Custom config properties below + ## Ref: https://www.elastic.co/guide/en/kibana/current/settings.html + # server.port: 5601 + # logging.verbose: "true" + # server.defaultRoute: "/app/kibana" + +deployment: + annotations: {} + +service: + type: ClusterIP + # clusterIP: None + # portName: kibana-svc + externalPort: 80 + internalPort: 5601 + # authProxyPort: 5602 To be used with authProxyEnabled and a proxy extraContainer + ## External IP addresses of service + ## Default: nil + ## + # externalIPs: + # - 192.168.0.1 + # + ## LoadBalancer IP if service.type is LoadBalancer + ## Default: nil + ## + # loadBalancerIP: 10.2.2.2 + annotations: {} + # Annotation example: setup ssl with aws cert when service.type is LoadBalancer + # service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT + labels: {} + ## Label example: show service URL in `kubectl cluster-info` + # kubernetes.io/cluster-service: "true" + ## Limit load balancer source ips to list of CIDRs (where available) + # loadBalancerSourceRanges: [] + selector: {} + +ingress: + enabled: false + # hosts: + # - kibana.localhost.localdomain + # - localhost.localdomain/kibana + # annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # tls: + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +serviceAccount: + # Specifies whether a service account should be created + create: false + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # If set and create is false, the service account must be existing + name: + +livenessProbe: + enabled: false + path: /status + initialDelaySeconds: 30 + timeoutSeconds: 10 + +readinessProbe: + enabled: true + path: /status + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 10 + successThreshold: 5 + +# Enable an authproxy. Specify container in extraContainers +authProxyEnabled: false + +extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - --resource=uri=/* +# - --discovery-url=https://discovery-url +# - --client-id=client +# - --client-secret=secret +# - --listen=0.0.0.0:5602 +# - --upstream-url=http://127.0.0.1:5601 +# ports: +# - name: web +# containerPort: 9090 + +extraVolumeMounts: [] + +extraVolumes: [] + +resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 100m + memory: 300Mi + +priorityClassName: "" + +# Affinity for pod assignment +# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +# affinity: {} + +# Tolerations for pod assignment +# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +# Node labels for pod assignment +# Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +podAnnotations: {} +replicaCount: 1 +revisionHistoryLimit: 3 + +# Custom labels for pod assignment +podLabels: {} + +# To export a dashboard from a running Kibana 6.3.x use: +# curl --user : -XGET https://kibana.yourdomain.com:5601/api/kibana/dashboards/export?dashboard= > my-dashboard.json +# A dashboard is defined by a name and a string with the json payload or the download url +dashboardImport: + enabled: false + timeout: 60 + xpackauth: + enabled: false + username: myuser + password: mypass + dashboards: {} + # k8s: https://raw.githubusercontent.com/monotek/kibana-dashboards/master/k8s-fluentd-elasticsearch.json + +# List of plugins to install using initContainer +# NOTE : We notice that lower resource constraints given to the chart + plugins are likely not going to work well. +plugins: + # set to true to enable plugins installation + enabled: false + # set to true to remove all kibana plugins before installation + reset: false + # Use to add/upgrade plugin + values: + # - elastalert-kibana-plugin,1.0.1,https://github.com/bitsensor/elastalert-kibana-plugin/releases/download/1.0.1/elastalert-kibana-plugin-1.0.1-6.4.2.zip + # - logtrail,0.1.31,https://github.com/sivasamyk/logtrail/releases/download/v0.1.31/logtrail-6.6.0-0.1.31.zip + # - other_plugin + +persistentVolumeClaim: + # set to true to use pvc + enabled: false + # set to true to use you own pvc + existingClaim: false + annotations: {} + + accessModes: + - ReadWriteOnce + size: "5Gi" + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + +# default security context +securityContext: + enabled: false + allowPrivilegeEscalation: false + runAsUser: 1000 + fsGroup: 2000 + +extraConfigMapMounts: [] + # - name: logtrail-configs + # configMap: kibana-logtrail + # mountPath: /usr/share/kibana/plugins/logtrail/logtrail.json + # subPath: logtrail.json + +# Add your own init container or uncomment and modify the given example. +initContainers: {} + ## Don't start kibana till Elasticsearch is reachable. + ## Ensure that it is available at http://elasticsearch:9200 + ## + # es-check: # <- will be used as container name + # image: "appropriate/curl:latest" + # imagePullPolicy: "IfNotPresent" + # command: + # - "/bin/sh" + # - "-c" + # - | + # is_down=true + # while "$is_down"; do + # if curl -sSf --fail-early --connect-timeout 5 http://elasticsearch:9200; then + # is_down=false + # else + # sleep 5 + # fi + # done diff --git a/efk/templates/NOTES.txt b/efk/templates/NOTES.txt deleted file mode 100644 index 8b13789..0000000 --- a/efk/templates/NOTES.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/efk/templates/es-curator-config.yaml b/efk/templates/es-curator-config.yaml deleted file mode 100644 index 783be5f..0000000 --- a/efk/templates/es-curator-config.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "fullname" . }}-curator-config - labels: - app: {{ template "fullname" . }} -data: - action_file.yml: |- - --- - # Remember, leave a key empty if there is no value. None will be a string, - # not a Python "NoneType" - # - # Also remember that all examples have 'disable_action' set to True. If you - # want to use this action as a template, be sure to set this to False after - # copying it. - actions: - 1: - action: delete_indices - description: "Clean up ES by deleting old indices" - options: - timeout_override: - continue_if_exception: True - ignore_empty_list: True - disable_action: False - filters: - - filtertype: age - source: name - direction: older - timestring: '{{ .Values.curator.age.timestring }}' - unit: {{ .Values.curator.age.unit }} - unit_count: {{ .Values.curator.age.unit_count }} - field: - stats_result: - epoch: - exclude: False - config.yml: |- - --- - # Remember, leave a key empty if there is no value. None will be a string, - # not a Python "NoneType" - client: - hosts: - - elasticsearch - port: 9200 - url_prefix: - use_ssl: False - certificate: - client_cert: - client_key: - ssl_no_validate: False - http_auth: - timeout: 30 - master_only: False - logging: - loglevel: INFO - logfile: - logformat: default - blacklist: ['elasticsearch', 'urllib3'] diff --git a/efk/templates/es-curator.yaml b/efk/templates/es-curator.yaml deleted file mode 100644 index 5e20d70..0000000 --- a/efk/templates/es-curator.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: batch/v1beta1 -kind: CronJob -metadata: - name: {{ template "fullname" . }}-curator - labels: - app: {{ template "fullname" . }} -spec: - schedule: {{ .Values.curator.schedule }} - successfulJobsHistoryLimit: {{ .Values.curator.successfulJobsHistoryLimit }} - failedJobsHistoryLimit: {{ .Values.curator.failedJobsHistoryLimit }} - concurrencyPolicy: Forbid - startingDeadlineSeconds: {{ .Values.curator.startingDeadlineSeconds }} - jobTemplate: - spec: - template: - spec: - containers: - - name: curator - image: "{{ .Values.image.curator.repository }}:{{ .Values.image.curator.tag }}" - imagePullPolicy: {{ .Values.image.curator.pullPolicy }} - args: ["--config", "/etc/config/config.yml", "/etc/config/action_file.yml"] - volumeMounts: - - name: config-volume - mountPath: /etc/config - volumes: - - name: config-volume - configMap: - name: {{ template "fullname" . }}-curator-config - restartPolicy: OnFailure diff --git a/efk/templates/es-data-stateful.yaml b/efk/templates/es-data-stateful.yaml deleted file mode 100644 index 2ec82dd..0000000 --- a/efk/templates/es-data-stateful.yaml +++ /dev/null @@ -1,98 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "fullname" . }}-data - labels: - component: elasticsearch - role: data -spec: - selector: - matchLabels: - component: elasticsearch - role: data - serviceName: {{ template "fullname" . }}-data - replicas: {{ .Values.data.replicas}} - template: - metadata: - labels: - component: elasticsearch - role: data - spec: - initContainers: - - name: init-sysctl - image: "{{ .Values.image.init.repository }}:{{ .Values.image.init.tag }}" - command: - - sysctl - - -w - - vm.max_map_count=262144 - securityContext: - privileged: true - containers: - - name: es-data - image: "{{ .Values.image.es.repository}}:{{ .Values.image.es.tag }}" - securityContext: - privileged: false - capabilities: - add: - - IPC_LOCK - - SYS_RESOURCE - ulimits: - memlock: - soft: -1 - hard: -1 - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: DISCOVERY_SERVICE - value: {{ template "fullname" . }}-discovery - - name: MEMORY_LOCK - value: "{{ .Values.data.env.MEMORY_LOCK }}" - - name: CLUSTER_NAME - value: "{{ .Values.global.env.CLUSTER_NAME }}" - - name: NODE_MASTER - value: "{{ .Values.data.env.NODE_MASTER }}" - - name: NODE_INGEST - value: "{{ .Values.data.env.NODE_INGEST }}" - - name: HTTP_ENABLE - value: "{{ .Values.data.env.HTTP_ENABLE }}" - - name: ES_JAVA_OPTS - value: "-Xms{{ .Values.data.heapMemory }} -Xmx{{ .Values.data.heapMemory }}" - - name: PROCESSORS - valueFrom: - resourceFieldRef: - resource: limits.cpu - resources: -{{ toYaml .Values.data.resources | indent 10 }} - ports: - - containerPort: 9200 - name: http - - containerPort: 9300 - name: transport - livenessProbe: - tcpSocket: - port: transport - initialDelaySeconds: 120 - periodSeconds: 10 - readinessProbe: - httpGet: - path: /_cluster/health - port: http - initialDelaySeconds: 20 - timeoutSeconds: 5 - volumeMounts: - - name: storage - mountPath: /data - volumeClaimTemplates: - - metadata: - name: storage - spec: - accessModes: [ "{{ .Values.data.storage.access }}" ] - resources: - requests: - storage: {{ .Values.data.storage.size }} diff --git a/efk/templates/es-data-svc.yaml b/efk/templates/es-data-svc.yaml deleted file mode 100644 index 020b01d..0000000 --- a/efk/templates/es-data-svc.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "fullname" . }}-data - labels: - component: elasticsearch - role: data -spec: - ports: - - port: 9300 - name: transport - clusterIP: None - selector: - component: elasticsearch - role: data diff --git a/efk/templates/es-discovery-svc.yaml b/efk/templates/es-discovery-svc.yaml deleted file mode 100644 index 225c6bf..0000000 --- a/efk/templates/es-discovery-svc.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "fullname" . }}-discovery - labels: - component: elasticsearch - role: master -spec: - selector: - component: elasticsearch - role: master - ports: - - name: transport - port: 9300 - protocol: TCP - clusterIP: None diff --git a/efk/templates/es-ingest-svc.yaml b/efk/templates/es-ingest-svc.yaml deleted file mode 100644 index 5dd1683..0000000 --- a/efk/templates/es-ingest-svc.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "fullname" . }}-ingest - labels: - component: elasticsearch - role: ingest -spec: - selector: - component: elasticsearch - role: ingest - ports: - - name: http - port: 9200 -#type: LoadBalancer diff --git a/efk/templates/es-ingest.yaml b/efk/templates/es-ingest.yaml deleted file mode 100644 index a71257c..0000000 --- a/efk/templates/es-ingest.yaml +++ /dev/null @@ -1,91 +0,0 @@ -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: {{ template "fullname" . }}-ingest - labels: - component: elasticsearch - role: ingest -spec: - replicas: {{ .Values.ingest.replicas }} - template: - metadata: - labels: - component: elasticsearch - role: ingest - spec: - initContainers: - - name: init-sysctl - image: "{{ .Values.image.init.repository }}:{{ .Values.image.init.tag }}" - command: - - sysctl - - -w - - vm.max_map_count=262144 - securityContext: - privileged: true - containers: - - name: {{ template "fullname" . }}-ingest - image: "{{ .Values.image.es.repository}}:{{ .Values.image.es.tag }}" - securityContext: - privileged: false - capabilities: - add: - - IPC_LOCK - - SYS_RESOURCE - ulimits: - memlock: - soft: -1 - hard: -1 - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: DISCOVERY_SERVICE - value: {{ template "fullname" . }}-discovery - - name: MEMORY_LOCK - value: "{{ .Values.ingest.env.MEMORY_LOCK }}" - - name: CLUSTER_NAME - value: "{{ .Values.global.env.CLUSTER_NAME }}" - - name: NODE_MASTER - value: "{{ .Values.ingest.env.NODE_MASTER }}" - - name: NODE_DATA - value: "{{ .Values.ingest.env.NODE_DATA }}" - - name: HTTP_ENABLE - value: "{{ .Values.ingest.env.HTTP_ENABLE }}" - - name: ES_JAVA_OPTS - value: "-Xms{{ .Values.ingest.heapMemory }} -Xmx{{ .Values.ingest.heapMemory }}" - - name: NETWORK_HOST - value: _site_,_lo_ - - name: PROCESSORS - valueFrom: - resourceFieldRef: - resource: limits.cpu - resources: -{{ toYaml .Values.ingest.resources | indent 10 }} - ports: - - containerPort: 9200 - name: http - - containerPort: 9300 - name: transport - livenessProbe: - tcpSocket: - port: transport - initialDelaySeconds: 120 - periodSeconds: 10 - readinessProbe: - httpGet: - path: /_cluster/health - port: http - initialDelaySeconds: 20 - timeoutSeconds: 5 - volumeMounts: - - name: storage - mountPath: /data - volumes: - - emptyDir: - medium: "" - name: storage diff --git a/efk/templates/es-master-stateful.yaml b/efk/templates/es-master-stateful.yaml deleted file mode 100644 index 6af7fb6..0000000 --- a/efk/templates/es-master-stateful.yaml +++ /dev/null @@ -1,94 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "fullname" . }}-master - labels: - component: elasticsearch - role: master -spec: - selector: - matchLabels: - component: elasticsearch - role: master - serviceName: {{ template "fullname" . }}-master - replicas: {{ .Values.master.replicas }} - template: - metadata: - labels: - component: elasticsearch - role: master - spec: - initContainers: - - name: init-sysctl - image: "{{ .Values.image.init.repository }}:{{ .Values.image.init.tag }}" - command: - - sysctl - - -w - - vm.max_map_count=262144 - securityContext: - privileged: true - containers: - - name: {{ template "fullname" . }}-master - image: "{{ .Values.image.es.repository}}:{{ .Values.image.es.tag }}" - securityContext: - privileged: false - capabilities: - add: - - IPC_LOCK - - SYS_RESOURCE - ulimits: - memlock: - soft: -1 - hard: -1 - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: DISCOVERY_SERVICE - value: {{ template "fullname" . }}-discovery - - name: MEMORY_LOCK - value: "{{ .Values.master.env.MEMORY_LOCK }}" - - name: CLUSTER_NAME - value: "{{ .Values.global.env.CLUSTER_NAME }}" - - name: NUMBER_OF_MASTERS - value: "{{ .Values.master.env.NUMBER_OF_MASTERS }}" - - name: NODE_MASTER - value: "{{ .Values.master.env.NODE_MASTER }}" - - name: NODE_INGEST - value: "{{ .Values.master.env.NODE_INGEST }}" - - name: NODE_DATA - value: "{{ .Values.master.env.NODE_DATA }}" - - name: HTTP_ENABLE - value: "{{ .Values.master.env.HTTP_ENABLE }}" - - name: ES_JAVA_OPTS - value: "-Xms{{ .Values.master.heapMemory }} -Xmx{{ .Values.master.heapMemory }}" - - name: PROCESSORS - valueFrom: - resourceFieldRef: - resource: limits.cpu - resources: -{{ toYaml .Values.master.resources | indent 10 }} - ports: - - containerPort: 9300 - name: transport - livenessProbe: - tcpSocket: - port: transport - initialDelaySeconds: 120 - periodSeconds: 10 - volumeMounts: - - name: storage - mountPath: /data - volumeClaimTemplates: - - metadata: - name: storage - spec: - accessModes: [ "{{ .Values.master.storage.access }}" ] - resources: - requests: - storage: {{ .Values.master.storage.size }} diff --git a/efk/templates/es-master-svc.yaml b/efk/templates/es-master-svc.yaml deleted file mode 100644 index 236b19c..0000000 --- a/efk/templates/es-master-svc.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ template "fullname" . }}-master - labels: - component: elasticsearch - role: master -spec: - ports: - - port: 9300 - name: transport - clusterIP: None - selector: - component: elasticsearch - role: master diff --git a/efk/templates/es-svc.yaml b/efk/templates/es-svc.yaml deleted file mode 100644 index d765c01..0000000 --- a/efk/templates/es-svc.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: elasticsearch - labels: - component: elasticsearch - role: data -spec: - selector: - component: elasticsearch - role: data - ports: - - name: http - port: 9200 -#type: LoadBalancer diff --git a/efk/templates/fluentbit-configmap.yaml b/efk/templates/fluentbit-configmap.yaml deleted file mode 100644 index 955fc20..0000000 --- a/efk/templates/fluentbit-configmap.yaml +++ /dev/null @@ -1,61 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: fluent-bit-config - namespace: logging - labels: - k8s-app: fluent-bit -data: - # Configuration files: server, input, filters and output - # ====================================================== - fluent-bit.conf: | - [SERVICE] - Flush 1 - Log_Level info - Daemon off - Retry_Limit 1 - Parsers_File parsers.conf - HTTP_Server On - HTTP_Listen 0.0.0.0 - HTTP_Port 2020 - @INCLUDE input-kubernetes.conf - @INCLUDE filter-kubernetes.conf - @INCLUDE output-elasticsearch.conf - input-kubernetes.conf: | - [INPUT] - Name tail - Tag kube.* - Path /var/log/containers/*.log - Parser docker - Tag_Regex (?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?[^_]+)_(?.+)- - Mem_Buf_Limit 5MB - Skip_Long_Lines On - Refresh_Interval 5 - filter-kubernetes.conf: | - [FILTER] - Name kubernetes - Match * - Kube_URL https://kubernetes.default.svc.cluster.local:443 - Merge_Log On - K8S-Logging.Parser On - K8S-Logging.Exclude On - output-elasticsearch.conf: | - [OUTPUT] - Name es - Match * - Host ${FLUENT_ELASTICSEARCH_HOST} - Port ${FLUENT_ELASTICSEARCH_PORT} - Logstash_Format On - Retry_Limit False - parsers.conf: | - [PARSER] - Name json - Format json - Time_Key time - [PARSER] - Name docker - Format json - Time_Key time - # Command | Decoder | Field | Optional Action - # =============|==================|================= - Decode_Field_As escaped log diff --git a/efk/templates/fluentbit-ds.yaml b/efk/templates/fluentbit-ds.yaml deleted file mode 100644 index f38b3ad..0000000 --- a/efk/templates/fluentbit-ds.yaml +++ /dev/null @@ -1,61 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: fluent-bit - namespace: logging - labels: - k8s-app: fluent-bit-logging - version: v1 - kubernetes.io/cluster-service: "true" -spec: - template: - metadata: - labels: - k8s-app: fluent-bit-logging - version: v1 - kubernetes.io/cluster-service: "true" - spec: - initContainers: - - name: init-sysctl - image: "{{ .Values.image.init.repository }}:{{ .Values.image.init.tag }}" - command: - - sysctl - - -w - - vm.max_map_count=262144 - securityContext: - privileged: true - containers: - - name: fluent-bit - image: {{ .Values.image.fluentbit.repository }}:{{ .Values.image.fluentbit.tag }} - imagePullPolicy: Always - ports: - - containerPort: 2020 - env: - - name: FLUENT_ELASTICSEARCH_HOST - value: "elasticsearch.logging.svc.cluster.local" - - name: FLUENT_ELASTICSEARCH_PORT - value: "9200" - volumeMounts: - - name: varlog - mountPath: /var/log - - name: varlibdockercontainers - mountPath: /var/lib/docker/containers - readOnly: true - - name: fluent-bit-config - mountPath: /fluent-bit/etc/ - terminationGracePeriodSeconds: 10 - volumes: - - name: varlog - hostPath: - path: /var/log - - name: varlibdockercontainers - hostPath: - path: /var/lib/docker/containers - - name: fluent-bit-config - configMap: - name: fluent-bit-config - serviceAccountName: fluent-bit - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule diff --git a/efk/templates/fluentbit-roles.yaml b/efk/templates/fluentbit-roles.yaml deleted file mode 100644 index 5e0b69a..0000000 --- a/efk/templates/fluentbit-roles.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: fluent-bit - namespace: logging ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: fluent-bit-read -rules: -- apiGroups: [""] - resources: - - namespaces - - pods - verbs: ["get", "list", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: fluent-bit-read -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: fluent-bit-read -subjects: -- kind: ServiceAccount - name: fluent-bit - namespace: logging diff --git a/efk/templates/kibana-svc.yaml b/efk/templates/kibana-svc.yaml deleted file mode 100644 index e60e262..0000000 --- a/efk/templates/kibana-svc.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: kibana - labels: - component: kibana -spec: - selector: - component: kibana - ports: - - name: http - port: 80 - targetPort: http diff --git a/efk/templates/kibana.yaml b/efk/templates/kibana.yaml deleted file mode 100644 index f695041..0000000 --- a/efk/templates/kibana.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: {{ template "fullname" . }}-kibana - labels: - component: kibana -spec: - replicas: 1 - selector: - matchLabels: - component: kibana - template: - metadata: - labels: - component: kibana - spec: - containers: - - name: kibana - image: "{{ .Values.image.kibana.repository }}:{{ .Values.image.kibana.tag }}" - env: - - name: CLUSTER_NAME - value: {{ .Values.global.env.CLUSTER_NAME }} - - resources: - limits: - cpu: 1000m - requests: - cpu: 100m - ports: - - containerPort: 5601 - name: http - readinessProbe: - httpGet: - path: /api/status - port: http - initialDelaySeconds: 20 - timeoutSeconds: 5 diff --git a/efk/values.yaml b/efk/values.yaml deleted file mode 100644 index f16fceb..0000000 --- a/efk/values.yaml +++ /dev/null @@ -1,168 +0,0 @@ -image: - curator: - repository: bobrik/curator - tag: latest - pullPolicy: IfNotPresent - fluentbit: - repository: fluent/fluent-bit - tag: 0.14.7 - es: - repository: ivanovua/elastic - tag: 6.4.2 - kibana: - repository: docker.elastic.co/kibana/kibana-oss - tag: 6.3.2 - init: - repository: busybox - tag: 1.27.2 - -global: - env: - CLUSTER_NAME: "nynja" - - -master: - replicas: 3 - heapMemory: 256m - resources: - requests: - cpu: 0.25 - limits: - cpu: 1 - env: - NUMBER_OF_MASTERS: "2" - NODE_MASTER: "true" - NODE_INGEST: "false" - NODE_DATA: "false" - HTTP_ENABLE: "false" - MEMORY_LOCK: "true" - storage: - access: "ReadWriteOnce" - size: 5Gi - -ingest: - replicas: 3 - heapMemory: 1024m - resources: - requests: - cpu: 0.25 - limits: - cpu: 1 - env: - NODE_MASTER: "false" - NODE_DATA: "false" - HTTP_ENABLE: "true" - MEMORY_LOCK: "true" -data: - replicas: 3 - heapMemory: 2048m - resources: - requests: - cpu: 0.5 - limits: - cpu: 1 - env: - NODE_MASTER: "false" - NODE_INGEST: "false" - HTTP_ENABLE: "true" - MEMORY_LOCK: "true" - storage: - access: "ReadWriteOnce" - size: 25Gi - - -curator: - schedule: "0 1 * * *" - successfulJobsHistoryLimit: 1 - failedJobsHistoryLimit: 3 - startingDeadlineSeconds: 120 - age: - timestring: "%Y.%m.%d" - unit: "days" - unit_count: 7 - -elasticsearchexporter: - ## number of exporter instances - ## - replicaCount: 1 - - ## restart policy for all containers - ## - restartPolicy: Always - - image: - repository: justwatch/elasticsearch_exporter - tag: 1.0.2 - pullPolicy: IfNotPresent - - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 100m - memory: 128Mi - - priorityClassName: "" - - nodeSelector: {} - - tolerations: {} - - podAnnotations: {} - - service: - type: ClusterIP - httpPort: 9108 - annotations: - nynja.biz/scrape: "true" - nynja.biz/scrape_port: "9108" - nynja.biz/env: "dev" - nynja.biz/probe: "efkexporter" - - es: - ## Address (host and port) of the Elasticsearch node we should connect to. - ## This could be a local node (localhost:9200, for instance), or the address - ## of a remote Elasticsearch server. When basic auth is needed, - ## specify as: ://:@:. e.g., http://admin:pass@localhost:9200. - ## - uri: http://elasticsearch:9200 - - ## If true, query stats for all nodes in the cluster, rather than just the - ## node we connect to. - ## - all: true - - ## If true, query stats for all indices in the cluster. - ## - indices: true - - ## Timeout for trying to get stats from Elasticsearch. (ex: 20s) - ## - timeout: 30s - - ssl: - ## If true, a secure connection to ES cluster is used (requires SSL certs below) - ## - enabled: false - - ca: - - ## PEM that contains trusted CAs used for setting up secure Elasticsearch connection - ## - # pem: - - client: - - ## PEM that contains the client cert to connect to Elasticsearch. - ## - # pem: - - ## Private key for client auth when connecting to Elasticsearch - ## - # key: - - web: - ## Path under which to expose metrics. - ## - path: /metrics -- GitLab From c923498714d71131150a90626635436d21e05e4a Mon Sep 17 00:00:00 2001 From: Bogdan Alov Date: Wed, 15 May 2019 17:20:52 +0300 Subject: [PATCH 2/4] removing dev values for deploy --- efk/elasticsearch-exporter/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/efk/elasticsearch-exporter/values.yaml b/efk/elasticsearch-exporter/values.yaml index 3b796ec..f1bc4de 100644 --- a/efk/elasticsearch-exporter/values.yaml +++ b/efk/elasticsearch-exporter/values.yaml @@ -33,7 +33,7 @@ service: annotations: nynja.biz/scrape: "true" nynja.biz/scrape_port: "9108" - nynja.biz/env: "dev" + nynja.biz/env: "" nynja.biz/probe: "efkexporter" es: -- GitLab From 040084d19845e354b93e7c53af55931ae7da8b57 Mon Sep 17 00:00:00 2001 From: Bogdan Alov Date: Thu, 27 Jun 2019 11:27:34 +0300 Subject: [PATCH 3/4] updating updating fluent-bit to 1.1.3; excluding kubernetes annotations --- efk/fluent-bit/Chart.yaml | 13 +++-- efk/fluent-bit/OWNERS | 4 ++ efk/fluent-bit/README.md | 5 +- efk/fluent-bit/release.txt | Bin 0 -> 25430 bytes efk/fluent-bit/templates/config.yaml | 4 +- efk/fluent-bit/templates/daemonset.yaml | 3 + efk/fluent-bit/templates/service.yaml | 5 ++ .../templates/tests/test-configmap.yaml | 9 ++- efk/fluent-bit/templates/tests/test.yaml | 2 +- efk/fluent-bit/values.yaml | 53 ++++++++++++------ 10 files changed, 71 insertions(+), 27 deletions(-) create mode 100644 efk/fluent-bit/release.txt diff --git a/efk/fluent-bit/Chart.yaml b/efk/fluent-bit/Chart.yaml index a0bca37..8cf7b7d 100644 --- a/efk/fluent-bit/Chart.yaml +++ b/efk/fluent-bit/Chart.yaml @@ -1,6 +1,7 @@ +apiVersion: v1 name: fluent-bit -version: 1.9.2 -appVersion: 1.0.6 +version: 2.0.6 +appVersion: 1.1.3 description: Fast and Lightweight Log/Data Forwarder for Linux, BSD and OSX keywords: - logging @@ -8,11 +9,13 @@ keywords: - fluent - fluentd sources: -- http://fluentbit.io -icon: http://fluentbit.io/assets/img/logo1-default.png -home: http://fluentbit.io +- https://fluentbit.io +icon: https://fluentbit.io/assets/img/logo1-default.png +home: https://fluentbit.io maintainers: - name: kfox1111 email: Kevin.Fox@pnnl.gov - name: edsiper email: eduardo@treasure-data.com +- name: hectorj2f + email: hfernandez@mesosphere.com diff --git a/efk/fluent-bit/OWNERS b/efk/fluent-bit/OWNERS index 3a9219e..fa1f012 100644 --- a/efk/fluent-bit/OWNERS +++ b/efk/fluent-bit/OWNERS @@ -1,6 +1,10 @@ approvers: - kfox1111 - edsiper +- hectorj2f +- Towmeykaw reviewers: - kfox1111 - edsiper +- hectorj2f +- Towmeykaw diff --git a/efk/fluent-bit/README.md b/efk/fluent-bit/README.md index 5608ffd..102c8f0 100644 --- a/efk/fluent-bit/README.md +++ b/efk/fluent-bit/README.md @@ -95,9 +95,10 @@ The following table lists the configurable parameters of the Fluent-Bit chart an | `filter.kubeCAFile` | Optional custom configmaps | `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt` | | `filter.kubeTokenFile` | Optional custom configmaps | `/var/run/secrets/kubernetes.io/serviceaccount/token` | | `filter.kubeTag` | Optional top-level tag for matching in filter | `kube` | +| `filter.kubeTagPrefix` | Optional tag prefix used by Tail | `kube.var.log.containers.` | | `filter.mergeJSONLog` | If the log field content is a JSON string map, append the map fields as part of the log structure | `true` | | `image.fluent_bit.repository` | Image | `fluent/fluent-bit` | -| `image.fluent_bit.tag` | Image tag | `1.0.6` | +| `image.fluent_bit.tag` | Image tag | `1.1.3` | | `image.pullPolicy` | Image pull policy | `IfNotPresent` | | `nameOverride` | Override name of app | `nil` | | `fullnameOverride` | Override full name of app | `nil` | @@ -117,11 +118,13 @@ The following table lists the configurable parameters of the Fluent-Bit chart an | `resources` | Pod resource requests & limits | `{}` | | `hostNetwork` | Use host's network | `false` | | `dnsPolicy` | Specifies the dnsPolicy to use | `ClusterFirst` | +| `priorityClassName` | Specifies the priorityClassName to use | `NULL` | | `tolerations` | Optional daemonset tolerations | `NULL` | | `nodeSelector` | Node labels for fluent-bit pod assignment | `NULL` | | `affinity` | Expressions for affinity | `NULL` | | `metrics.enabled` | Specifies whether a service for metrics should be exposed | `false` | | `metrics.service.annotations` | Optional metrics service annotations | `NULL` | +| `metrics.service.labels` | Additional labels for the fluent-bit metrics service definition, specified as a map. | None | | `metrics.service.port` | Port on where metrics should be exposed | `2020` | | `metrics.service.type` | Service type for metrics | `ClusterIP` | | `trackOffsets` | Specify whether to track the file offsets for tailing docker logs. This allows fluent-bit to pick up where it left after pod restarts but requires access to a `hostPath` | `false` | diff --git a/efk/fluent-bit/release.txt b/efk/fluent-bit/release.txt new file mode 100644 index 0000000000000000000000000000000000000000..1a117b4347add5b6044ba213ed50bca0722dd09d GIT binary patch literal 25430 zcmeI5eNP-ol85W>lkPk8F(-{@bAl7zt#Ph&2O6<&CTX|b4U08 z+WbrUw()E=_nXb;Y5Gg61+`gjzG;@!&b~JUEkjzkfV8PLPn$>0mcHxFTEcllt-(au zs=g~Kz2DqRW!ue5(LHK@pL#M%SVpQ3{OiK-t?s^7`Lcf95Y523+dLGE&1O46I8Yzo zi34xc!$tAxRk|)u=)9F$JV_WgRle0cX*Tp(6@Ab!616M(^+dn61P}OsZvL9ChHJff ztol!ct*`!WbD$5L2lt0WmBVLStpv_{Ha?M(J!aN{r`j`t?e! z&s2L?Bem$qlkFOm^P?~V9#q))vzQu9L4`)blJr0~~N z{+W76t;I~OI#sV1XjqxWU=qweXMm(sUg)jOGJO>TX30sC!|gEn-^;n41c zf_iz@bLcdEDRJyH??uz8s6CiaxuBkPd-FjwzDT&xjOVEj_RPosKOg-z9lOV#gPBoQZE(gEt!WlFP$+ zhdyG#b~Osd{;VPR-vXxQ6{@uW?Fq!iNs&D?ZDfK zmoSQQDIUc&8m`cref8l?Ug@nkjfXJZx|{g!R``KRPF3qXwV>DUQ*WOsk^lx4{Z%Tr znBi`+CfMaedA!}EBY1wJgxcqcXZHn(JA6>|VYEC_iTjcj)q5kD*iEb`JU)}8tS2k! zV*>6INsC7(U#P|p&G)i^BemPt2cK%KEt>gB7(Pheo~qnDKi)LcbWK>kQft#`%F2F@ zf4-^m+)oO339dhC0)UN9qc2{u`G6V+G@b{_ZIKh6S5is6i*(n>V5rw=@aCu803Dqh=5{B)Wf+P2CL69xXHUSmV8YjMSru+rlt z^@LaiJjfz)iS)sTGd=A>b&dQ~TsRk97AdxAd7@}{HSxgx7cw&LFH`fB1(TY?S}oFm2E@C|v476Lq2Lz6bIi*y2iU6{dw zy>RJtU2=c0rW)t6#pn}01Ko`~rdf~J^taBXeFrM~MT*1;rUTu75e77J;nNERVSRSQf+MKjdIvkyBk$@+hGrM78`yHjoL++N|k{?!Y1Ulr~q1{+H6f$aXqEepFPu zCMaaG?#Vvl9lD-vQM`$E+GC-EOT)=N?dmY1|GGMq7^#;~PUT~WG_#zd%bq_q3CwXa zCCD{%T4L0w6U?&Avd5>$;GX2`T^cL%K*txHq}04ajy=d2+?HQ>bx{kfKMMl*YN8qrv3rI-jkokAYF*3% zxnJGaXk}>3`{0dRIE&rz6!_4wsC=dR{qY;|1RTZpn?B|E(m5RCO03Dg`s8+;_;Z}( zhf(nsIYJ;0{&;IW8RKg|u`}9O zGxEfiMR7QkKpXkDdOu=K^4*NKd+<8uoarw;GLeT_SD!mzPq)=`Vs^^P`bZnkvBGYB zYZ!B@A@R_H`T*Qx$qJe`*7vPGuZ0~R_^4r--zCjLZcXCIq@e+M92hnHg?R$g9G}7F zT5!4Tku#oyB#Df*YeeKZZA-makUaEj2CFo#37IoM;e(wx)(1|tXE~HZj9p^lNO=@!uO&h6e&;|qPE?L?k;bfM*p##{a7FZxWj%Ir`L29_TjoBBJ} z*qw>5*djN&-e%doR`$+=VS#ruT6mOE-w0*8)O+pICUog7_fTSM%p_&8B}`dB&EHm4k4IbO4B zW6iMS>!SZiQf{^;M>`JHuhVIBeHcbtyLRMKjGw?cyH_TOW!9-pM}#s=4!gM9RKpqO z_9v2v4~nUYN8qB*vnYqlL>cC#@T4&xPhQKiU(owlvw}56=?@g0Z>66g$iRQCB+ z+2MopqAtPs0N+b6ONJka*Q?S{tlw3o*f{EAd@5erid4@0IM2DVJK|gxWSV4C&*AmS z%uljGER}k%cti?>) zGkiApDx=!@?=pzkGmYEQCtHcHQTj1yWX4+V0f-c0Eo{Z!biU7K^?9bBlzrY(9Bw}P zP;!RVSStJ`_%7SN+=0^HzP>I(eRf36whQDr*uBT!x?OO)LNA^PF8g+edN{0RZ10?p z`kvQ=y_D-ijT(NQd$N7x5Xl1%-Mj0sqR+4GZS{IKem3(ofAc^ToXV46nfm#Umm(AW zVd5!lO@p`)A4R-b>;JaulZga7_PVyG)G6c2?1`vhM^=ZuTB9kuX>0hPGVb8Lr@oP| z>F-yUAEhOA3mPqZZPzl?_&4^5dw=c}*F0BUCLPke*64sY6jm)3^d4LWPTeD41yWsy zox9M~7xC9)*bVuU&B^TNa{j-zO6_A+>XSdMuWWm1SMIF@OMpJaK2q{bmg%ar&x&Y2 z7X98=zur^7n76YQZ@v=y$NpN@;oGb5b}r>GabjNCjF_+W`!bh+RXmZuxN1gh{+SM^ z)AeWmS&qJZT+V^z{2OCg%Yw~swXB=*h?(`8UjHbIk3YtnL5JVpZ+TaI7Wn~-#+h}o zVq@#~3u^o4Bx^3MTx})cdqMSB*LG44_1oX-6cTgs>xxzFTn9T?v9lTTf67m0fBgN$ zewdQyQ1d+L&Ij#y{`cmZ=r-y``ek=tb*u)hY@TVmJFO*J$jTxu9VS^KEY{EZi$;*O z_)>bnnzWU{J+dsUvR_k)^@0^EE4PfC&GFx8G_HwjLwxCic3^)cTt0?^+L_}7;Ya@;dvQh5YsA2LJ zvsMYIH?J|Z^B71IW4ov?YlAP;;~`Oq?_68b_#lZ`cJ{LTDM_I1=UI{^zAfqZowe+t zr3Z0uPr1`{b3y~P{yF;XGsAF&sDx2!uTtCk>3^kBWRF-Zce!On*;nfPnfa`2S^L^y z4fh2V8oaHo96#^H#okl1Q6F1o-KJyN$rgQn3I5&0%NRxfl;=LH=T>J^vT_jik+tQS znBej@$E9_c#X+uzSYY6eDhG1>iW$zuH*meRQNk0$u$?`TY zohyC+Fx{$I`jQvZHDbuV#ZPsHV>%vN0Yy6d&TmX>KlJQ|%keBZS)6!HJTaMFZ}*QF zFLn2x`V4^4DB}u0!GaDU&+K2d7-dCPhs>evTD4thYtkjw*PMGk6KHF?KL=l2`*uww zcD1#TbLG*pq*!Fr9wS=C@vYZ_dY=yivE1>9)f~2Sd3anpFFQgM?>jZ<1?wY2{4{fY zVpL}LHfF4`VPROAgvZ^eGVTh&?p$V{Sf8h2eExsv5%HtVxn3x1ZRY|QfkTZ~DQ{8Z zEOuviAu-sK0K@eN7;#5n39CQecYW&TzSkHo|K4J_9(W6`L;RE1XI@XF@LKZAerex9 zg4MBeyLZIxWqrTVhqpo8(h1&WN%t*f@OJg=j-K7o?u?t0D{nH{QyFzQuS3ao)xM+O zRtufjQ_EdF-P6xHR2e1Z@K#EpZLu0Yi)+i7ri~UpnfLJYxs5g88xrvSH_(k ze|}!RduGjR`W34C&#-wdzn0XLXWG1uU)!|gGj3iZUTYlnD555hv~A0z*~wb;5HWS- zJw_n4XeG9`n3bI#z_Igo+lr~|9B{13ecU}?zDK0i-*$g&H;v6-YW2a;cYefo;`F!f z!^YXXxI)?GSB{?B{hdvXo&1aN-Rk>Fg;}7!{vxlJJsS~YN9&Dwm_8hRy@)Vhe#`lwrXJ%_wkjy-hFem)m5Zk|_x30w;Z9dQP&pt26*1e6< zWdc69{m(m8>%O~>imUqVzn{vh`t){BX8EmH&c4p}n;@N=T%*sgdDL>35#zEcnaS&s zWG!p$*zdr~Hv6NvU{lg65w0a9EG!lqx8-X z-fFwv{6Rb6?ri9mq0y875HUFT0`>9s83ne^lJN5}ND#`s91^!}q``b8u3QkYDS<}*%R?X#E# z-Lsth5RGfg+_rRYL)zufFJp07`>CJy23kA^&F`pL*&fw@8VX z56+A&>ipQoMQ=xX_TQ5dxVIY0H`D{H%{AE@-Z0B&mdz>iNwg?^o#Q693pzWIK*&Eg10Z;FywwtMR`uNk{ybX8$nf0V*GwYCe+AkZFmK?VcUKjoF%p$Rx z2P5Zxrq)Q^FS^=pZTDIBu3TU}(%6ht!*=JfRzTj^_lB~P!>&Dh*G~C18ILK!LMD{I ntmH7m<6X5m`erYw#S*NKOg~e_dz=1 additional Key/Value entrie(s) for existing Input section + # >=1 additional Key/Value entrie(s) for existing Input section filter: |- + Annotations Off # # >=1 additional Key/Value entrie(s) for existing Filter section output: |- # # >=1 additional Key/Value entrie(s) for existing Ouput section +# WARNING!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ## Extra ports to add to the daemonset ports section extraPorts: [] @@ -162,12 +181,12 @@ extraVolumes: [] extraVolumeMounts: [] resources: - limits: - cpu: 100m - memory: 500Mi - requests: - cpu: 10m - memory: 8Mi + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 10m + memory: 8Mi # When enabled, pods will bind to the node's network namespace. hostNetwork: false @@ -216,9 +235,11 @@ filter: kubeCAFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt kubeTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token kubeTag: kube + kubeTagPrefix: kube.var.log.containers. + # If true, check to see if the log field content is a JSON string map, if so, # it append the map fields as part of the log structure. - mergeJSONLog: false + mergeJSONLog: true # If true, enable the use of monitoring for a pod annotation of # fluentbit.io/parser: parser_name. parser_name must be the name -- GitLab From 25407f0941ae21c865ff6c275ceef57753777b0f Mon Sep 17 00:00:00 2001 From: Bogdan Alov Date: Thu, 27 Jun 2019 12:14:47 +0300 Subject: [PATCH 4/4] removing efk folder; adding separate chart for each service --- .../.helmignore | 0 .../Chart.yaml | 0 .../OWNERS | 0 .../README.md | 0 .../ci/initcontainer-values.yaml | 0 .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 0 .../templates/configmap.yaml | 0 .../templates/cronjob.yaml | 0 .../templates/hooks/job.install.yaml | 0 .../values.yaml | 0 .../.helmignore | 0 .../Chart.yaml | 0 .../OWNERS | 0 .../README.md | 0 .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 0 .../templates/cert-secret.yaml | 0 .../templates/deployment.yaml | 0 .../templates/service.yaml | 0 .../templates/servicemonitor.yaml | 0 .../values.yaml | 0 {efk/elasticsearch => elasticsearch}/.helmignore | 0 {efk/elasticsearch => elasticsearch}/Chart.yaml | 0 {efk/elasticsearch => elasticsearch}/OWNERS | 0 {efk/elasticsearch => elasticsearch}/README.md | 0 .../ci/extrainitcontainers-values.yaml | 0 .../ci/plugin-initcontainer-values.yaml | 0 .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 0 .../templates/client-auth.yaml | 0 .../templates/client-deployment.yaml | 0 .../templates/client-ingress.yaml | 0 .../templates/client-pdb.yaml | 0 .../templates/client-serviceaccount.yaml | 0 .../templates/client-svc.yaml | 0 .../templates/configmap.yaml | 0 .../templates/data-pdb.yaml | 0 .../templates/data-serviceaccount.yaml | 0 .../templates/data-statefulset.yaml | 0 .../templates/job.yaml | 0 .../templates/master-pdb.yaml | 0 .../templates/master-serviceaccount.yaml | 0 .../templates/master-statefulset.yaml | 0 .../templates/master-svc.yaml | 0 .../templates/podsecuritypolicy.yaml | 0 .../templates/role.yaml | 0 .../templates/rolebinding.yaml | 0 .../templates/tests/test-configmap.yaml | 0 .../templates/tests/test.yaml | 0 {efk/elasticsearch => elasticsearch}/values.yaml | 0 {efk/fluent-bit => fluent-bit}/Chart.yaml | 0 {efk/fluent-bit => fluent-bit}/OWNERS | 0 {efk/fluent-bit => fluent-bit}/README.md | 0 {efk/fluent-bit => fluent-bit}/release.txt | Bin {efk/fluent-bit => fluent-bit}/templates/NOTES.txt | 0 .../templates/_helpers.tpl | 0 .../templates/cluster-role.yaml | 0 .../templates/cluster-rolebinding.yaml | 0 .../fluent-bit => fluent-bit}/templates/config.yaml | 0 .../templates/daemonset.yaml | 0 .../fluent-bit => fluent-bit}/templates/secret.yaml | 0 .../templates/service.yaml | 0 .../templates/serviceaccount.yaml | 0 .../templates/tests/test-configmap.yaml | 0 .../templates/tests/test.yaml | 0 {efk/fluent-bit => fluent-bit}/values.yaml | 0 {efk/kibana => kibana}/.helmignore | 0 {efk/kibana => kibana}/Chart.yaml | 0 {efk/kibana => kibana}/OWNERS | 0 {efk/kibana => kibana}/README.md | 0 {efk/kibana => kibana}/ci/authproxy-enabled.yaml | 0 {efk/kibana => kibana}/ci/dashboard-values.yaml | 0 .../ci/extra-configmap-mounts.yaml | 0 {efk/kibana => kibana}/ci/ingress-hosts-paths.yaml | 0 {efk/kibana => kibana}/ci/ingress-hosts.yaml | 0 .../ci/initcontainers-all-values.yaml | 0 .../kibana => kibana}/ci/initcontainers-values.yaml | 0 {efk/kibana => kibana}/ci/plugin-install.yaml | 0 {efk/kibana => kibana}/ci/pvc.yaml | 0 {efk/kibana => kibana}/ci/security-context.yaml | 0 {efk/kibana => kibana}/ci/service-values.yaml | 0 {efk/kibana => kibana}/ci/url_dashboard-values.yaml | 0 {efk/kibana => kibana}/templates/NOTES.txt | 0 {efk/kibana => kibana}/templates/_helpers.tpl | 0 .../templates/configmap-dashboardimport.yaml | 0 {efk/kibana => kibana}/templates/configmap.yaml | 0 {efk/kibana => kibana}/templates/deployment.yaml | 0 {efk/kibana => kibana}/templates/ingress.yaml | 0 {efk/kibana => kibana}/templates/service.yaml | 0 .../kibana => kibana}/templates/serviceaccount.yaml | 0 .../templates/tests/test-configmap.yaml | 0 {efk/kibana => kibana}/templates/tests/test.yaml | 0 {efk/kibana => kibana}/templates/volume-claim.yaml | 0 {efk/kibana => kibana}/values.yaml | 0 95 files changed, 0 insertions(+), 0 deletions(-) rename {efk/elasticsearch-curator => elasticsearch-curator}/.helmignore (100%) rename {efk/elasticsearch-curator => elasticsearch-curator}/Chart.yaml (100%) rename {efk/elasticsearch-curator => elasticsearch-curator}/OWNERS (100%) rename {efk/elasticsearch-curator => elasticsearch-curator}/README.md (100%) rename {efk/elasticsearch-curator => elasticsearch-curator}/ci/initcontainer-values.yaml (100%) rename {efk/elasticsearch-curator => elasticsearch-curator}/templates/NOTES.txt (100%) rename {efk/elasticsearch-curator => elasticsearch-curator}/templates/_helpers.tpl (100%) rename {efk/elasticsearch-curator => elasticsearch-curator}/templates/configmap.yaml (100%) rename {efk/elasticsearch-curator => elasticsearch-curator}/templates/cronjob.yaml (100%) rename {efk/elasticsearch-curator => elasticsearch-curator}/templates/hooks/job.install.yaml (100%) rename {efk/elasticsearch-curator => elasticsearch-curator}/values.yaml (100%) rename {efk/elasticsearch-exporter => elasticsearch-exporter}/.helmignore (100%) rename {efk/elasticsearch-exporter => elasticsearch-exporter}/Chart.yaml (100%) rename {efk/elasticsearch-exporter => elasticsearch-exporter}/OWNERS (100%) rename {efk/elasticsearch-exporter => elasticsearch-exporter}/README.md (100%) rename {efk/elasticsearch-exporter => elasticsearch-exporter}/templates/NOTES.txt (100%) rename {efk/elasticsearch-exporter => elasticsearch-exporter}/templates/_helpers.tpl (100%) rename {efk/elasticsearch-exporter => elasticsearch-exporter}/templates/cert-secret.yaml (100%) rename {efk/elasticsearch-exporter => elasticsearch-exporter}/templates/deployment.yaml (100%) rename {efk/elasticsearch-exporter => elasticsearch-exporter}/templates/service.yaml (100%) rename {efk/elasticsearch-exporter => elasticsearch-exporter}/templates/servicemonitor.yaml (100%) rename {efk/elasticsearch-exporter => elasticsearch-exporter}/values.yaml (100%) rename {efk/elasticsearch => elasticsearch}/.helmignore (100%) rename {efk/elasticsearch => elasticsearch}/Chart.yaml (100%) rename {efk/elasticsearch => elasticsearch}/OWNERS (100%) rename {efk/elasticsearch => elasticsearch}/README.md (100%) rename {efk/elasticsearch => elasticsearch}/ci/extrainitcontainers-values.yaml (100%) rename {efk/elasticsearch => elasticsearch}/ci/plugin-initcontainer-values.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/NOTES.txt (100%) rename {efk/elasticsearch => elasticsearch}/templates/_helpers.tpl (100%) rename {efk/elasticsearch => elasticsearch}/templates/client-auth.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/client-deployment.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/client-ingress.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/client-pdb.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/client-serviceaccount.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/client-svc.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/configmap.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/data-pdb.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/data-serviceaccount.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/data-statefulset.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/job.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/master-pdb.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/master-serviceaccount.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/master-statefulset.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/master-svc.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/podsecuritypolicy.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/role.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/rolebinding.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/tests/test-configmap.yaml (100%) rename {efk/elasticsearch => elasticsearch}/templates/tests/test.yaml (100%) rename {efk/elasticsearch => elasticsearch}/values.yaml (100%) rename {efk/fluent-bit => fluent-bit}/Chart.yaml (100%) rename {efk/fluent-bit => fluent-bit}/OWNERS (100%) rename {efk/fluent-bit => fluent-bit}/README.md (100%) rename {efk/fluent-bit => fluent-bit}/release.txt (100%) rename {efk/fluent-bit => fluent-bit}/templates/NOTES.txt (100%) rename {efk/fluent-bit => fluent-bit}/templates/_helpers.tpl (100%) rename {efk/fluent-bit => fluent-bit}/templates/cluster-role.yaml (100%) rename {efk/fluent-bit => fluent-bit}/templates/cluster-rolebinding.yaml (100%) rename {efk/fluent-bit => fluent-bit}/templates/config.yaml (100%) rename {efk/fluent-bit => fluent-bit}/templates/daemonset.yaml (100%) rename {efk/fluent-bit => fluent-bit}/templates/secret.yaml (100%) rename {efk/fluent-bit => fluent-bit}/templates/service.yaml (100%) rename {efk/fluent-bit => fluent-bit}/templates/serviceaccount.yaml (100%) rename {efk/fluent-bit => fluent-bit}/templates/tests/test-configmap.yaml (100%) rename {efk/fluent-bit => fluent-bit}/templates/tests/test.yaml (100%) rename {efk/fluent-bit => fluent-bit}/values.yaml (100%) rename {efk/kibana => kibana}/.helmignore (100%) rename {efk/kibana => kibana}/Chart.yaml (100%) rename {efk/kibana => kibana}/OWNERS (100%) rename {efk/kibana => kibana}/README.md (100%) rename {efk/kibana => kibana}/ci/authproxy-enabled.yaml (100%) rename {efk/kibana => kibana}/ci/dashboard-values.yaml (100%) rename {efk/kibana => kibana}/ci/extra-configmap-mounts.yaml (100%) rename {efk/kibana => kibana}/ci/ingress-hosts-paths.yaml (100%) rename {efk/kibana => kibana}/ci/ingress-hosts.yaml (100%) rename {efk/kibana => kibana}/ci/initcontainers-all-values.yaml (100%) rename {efk/kibana => kibana}/ci/initcontainers-values.yaml (100%) rename {efk/kibana => kibana}/ci/plugin-install.yaml (100%) rename {efk/kibana => kibana}/ci/pvc.yaml (100%) rename {efk/kibana => kibana}/ci/security-context.yaml (100%) rename {efk/kibana => kibana}/ci/service-values.yaml (100%) rename {efk/kibana => kibana}/ci/url_dashboard-values.yaml (100%) rename {efk/kibana => kibana}/templates/NOTES.txt (100%) rename {efk/kibana => kibana}/templates/_helpers.tpl (100%) rename {efk/kibana => kibana}/templates/configmap-dashboardimport.yaml (100%) rename {efk/kibana => kibana}/templates/configmap.yaml (100%) rename {efk/kibana => kibana}/templates/deployment.yaml (100%) rename {efk/kibana => kibana}/templates/ingress.yaml (100%) rename {efk/kibana => kibana}/templates/service.yaml (100%) rename {efk/kibana => kibana}/templates/serviceaccount.yaml (100%) rename {efk/kibana => kibana}/templates/tests/test-configmap.yaml (100%) rename {efk/kibana => kibana}/templates/tests/test.yaml (100%) rename {efk/kibana => kibana}/templates/volume-claim.yaml (100%) rename {efk/kibana => kibana}/values.yaml (100%) diff --git a/efk/elasticsearch-curator/.helmignore b/elasticsearch-curator/.helmignore similarity index 100% rename from efk/elasticsearch-curator/.helmignore rename to elasticsearch-curator/.helmignore diff --git a/efk/elasticsearch-curator/Chart.yaml b/elasticsearch-curator/Chart.yaml similarity index 100% rename from efk/elasticsearch-curator/Chart.yaml rename to elasticsearch-curator/Chart.yaml diff --git a/efk/elasticsearch-curator/OWNERS b/elasticsearch-curator/OWNERS similarity index 100% rename from efk/elasticsearch-curator/OWNERS rename to elasticsearch-curator/OWNERS diff --git a/efk/elasticsearch-curator/README.md b/elasticsearch-curator/README.md similarity index 100% rename from efk/elasticsearch-curator/README.md rename to elasticsearch-curator/README.md diff --git a/efk/elasticsearch-curator/ci/initcontainer-values.yaml b/elasticsearch-curator/ci/initcontainer-values.yaml similarity index 100% rename from efk/elasticsearch-curator/ci/initcontainer-values.yaml rename to elasticsearch-curator/ci/initcontainer-values.yaml diff --git a/efk/elasticsearch-curator/templates/NOTES.txt b/elasticsearch-curator/templates/NOTES.txt similarity index 100% rename from efk/elasticsearch-curator/templates/NOTES.txt rename to elasticsearch-curator/templates/NOTES.txt diff --git a/efk/elasticsearch-curator/templates/_helpers.tpl b/elasticsearch-curator/templates/_helpers.tpl similarity index 100% rename from efk/elasticsearch-curator/templates/_helpers.tpl rename to elasticsearch-curator/templates/_helpers.tpl diff --git a/efk/elasticsearch-curator/templates/configmap.yaml b/elasticsearch-curator/templates/configmap.yaml similarity index 100% rename from efk/elasticsearch-curator/templates/configmap.yaml rename to elasticsearch-curator/templates/configmap.yaml diff --git a/efk/elasticsearch-curator/templates/cronjob.yaml b/elasticsearch-curator/templates/cronjob.yaml similarity index 100% rename from efk/elasticsearch-curator/templates/cronjob.yaml rename to elasticsearch-curator/templates/cronjob.yaml diff --git a/efk/elasticsearch-curator/templates/hooks/job.install.yaml b/elasticsearch-curator/templates/hooks/job.install.yaml similarity index 100% rename from efk/elasticsearch-curator/templates/hooks/job.install.yaml rename to elasticsearch-curator/templates/hooks/job.install.yaml diff --git a/efk/elasticsearch-curator/values.yaml b/elasticsearch-curator/values.yaml similarity index 100% rename from efk/elasticsearch-curator/values.yaml rename to elasticsearch-curator/values.yaml diff --git a/efk/elasticsearch-exporter/.helmignore b/elasticsearch-exporter/.helmignore similarity index 100% rename from efk/elasticsearch-exporter/.helmignore rename to elasticsearch-exporter/.helmignore diff --git a/efk/elasticsearch-exporter/Chart.yaml b/elasticsearch-exporter/Chart.yaml similarity index 100% rename from efk/elasticsearch-exporter/Chart.yaml rename to elasticsearch-exporter/Chart.yaml diff --git a/efk/elasticsearch-exporter/OWNERS b/elasticsearch-exporter/OWNERS similarity index 100% rename from efk/elasticsearch-exporter/OWNERS rename to elasticsearch-exporter/OWNERS diff --git a/efk/elasticsearch-exporter/README.md b/elasticsearch-exporter/README.md similarity index 100% rename from efk/elasticsearch-exporter/README.md rename to elasticsearch-exporter/README.md diff --git a/efk/elasticsearch-exporter/templates/NOTES.txt b/elasticsearch-exporter/templates/NOTES.txt similarity index 100% rename from efk/elasticsearch-exporter/templates/NOTES.txt rename to elasticsearch-exporter/templates/NOTES.txt diff --git a/efk/elasticsearch-exporter/templates/_helpers.tpl b/elasticsearch-exporter/templates/_helpers.tpl similarity index 100% rename from efk/elasticsearch-exporter/templates/_helpers.tpl rename to elasticsearch-exporter/templates/_helpers.tpl diff --git a/efk/elasticsearch-exporter/templates/cert-secret.yaml b/elasticsearch-exporter/templates/cert-secret.yaml similarity index 100% rename from efk/elasticsearch-exporter/templates/cert-secret.yaml rename to elasticsearch-exporter/templates/cert-secret.yaml diff --git a/efk/elasticsearch-exporter/templates/deployment.yaml b/elasticsearch-exporter/templates/deployment.yaml similarity index 100% rename from efk/elasticsearch-exporter/templates/deployment.yaml rename to elasticsearch-exporter/templates/deployment.yaml diff --git a/efk/elasticsearch-exporter/templates/service.yaml b/elasticsearch-exporter/templates/service.yaml similarity index 100% rename from efk/elasticsearch-exporter/templates/service.yaml rename to elasticsearch-exporter/templates/service.yaml diff --git a/efk/elasticsearch-exporter/templates/servicemonitor.yaml b/elasticsearch-exporter/templates/servicemonitor.yaml similarity index 100% rename from efk/elasticsearch-exporter/templates/servicemonitor.yaml rename to elasticsearch-exporter/templates/servicemonitor.yaml diff --git a/efk/elasticsearch-exporter/values.yaml b/elasticsearch-exporter/values.yaml similarity index 100% rename from efk/elasticsearch-exporter/values.yaml rename to elasticsearch-exporter/values.yaml diff --git a/efk/elasticsearch/.helmignore b/elasticsearch/.helmignore similarity index 100% rename from efk/elasticsearch/.helmignore rename to elasticsearch/.helmignore diff --git a/efk/elasticsearch/Chart.yaml b/elasticsearch/Chart.yaml similarity index 100% rename from efk/elasticsearch/Chart.yaml rename to elasticsearch/Chart.yaml diff --git a/efk/elasticsearch/OWNERS b/elasticsearch/OWNERS similarity index 100% rename from efk/elasticsearch/OWNERS rename to elasticsearch/OWNERS diff --git a/efk/elasticsearch/README.md b/elasticsearch/README.md similarity index 100% rename from efk/elasticsearch/README.md rename to elasticsearch/README.md diff --git a/efk/elasticsearch/ci/extrainitcontainers-values.yaml b/elasticsearch/ci/extrainitcontainers-values.yaml similarity index 100% rename from efk/elasticsearch/ci/extrainitcontainers-values.yaml rename to elasticsearch/ci/extrainitcontainers-values.yaml diff --git a/efk/elasticsearch/ci/plugin-initcontainer-values.yaml b/elasticsearch/ci/plugin-initcontainer-values.yaml similarity index 100% rename from efk/elasticsearch/ci/plugin-initcontainer-values.yaml rename to elasticsearch/ci/plugin-initcontainer-values.yaml diff --git a/efk/elasticsearch/templates/NOTES.txt b/elasticsearch/templates/NOTES.txt similarity index 100% rename from efk/elasticsearch/templates/NOTES.txt rename to elasticsearch/templates/NOTES.txt diff --git a/efk/elasticsearch/templates/_helpers.tpl b/elasticsearch/templates/_helpers.tpl similarity index 100% rename from efk/elasticsearch/templates/_helpers.tpl rename to elasticsearch/templates/_helpers.tpl diff --git a/efk/elasticsearch/templates/client-auth.yaml b/elasticsearch/templates/client-auth.yaml similarity index 100% rename from efk/elasticsearch/templates/client-auth.yaml rename to elasticsearch/templates/client-auth.yaml diff --git a/efk/elasticsearch/templates/client-deployment.yaml b/elasticsearch/templates/client-deployment.yaml similarity index 100% rename from efk/elasticsearch/templates/client-deployment.yaml rename to elasticsearch/templates/client-deployment.yaml diff --git a/efk/elasticsearch/templates/client-ingress.yaml b/elasticsearch/templates/client-ingress.yaml similarity index 100% rename from efk/elasticsearch/templates/client-ingress.yaml rename to elasticsearch/templates/client-ingress.yaml diff --git a/efk/elasticsearch/templates/client-pdb.yaml b/elasticsearch/templates/client-pdb.yaml similarity index 100% rename from efk/elasticsearch/templates/client-pdb.yaml rename to elasticsearch/templates/client-pdb.yaml diff --git a/efk/elasticsearch/templates/client-serviceaccount.yaml b/elasticsearch/templates/client-serviceaccount.yaml similarity index 100% rename from efk/elasticsearch/templates/client-serviceaccount.yaml rename to elasticsearch/templates/client-serviceaccount.yaml diff --git a/efk/elasticsearch/templates/client-svc.yaml b/elasticsearch/templates/client-svc.yaml similarity index 100% rename from efk/elasticsearch/templates/client-svc.yaml rename to elasticsearch/templates/client-svc.yaml diff --git a/efk/elasticsearch/templates/configmap.yaml b/elasticsearch/templates/configmap.yaml similarity index 100% rename from efk/elasticsearch/templates/configmap.yaml rename to elasticsearch/templates/configmap.yaml diff --git a/efk/elasticsearch/templates/data-pdb.yaml b/elasticsearch/templates/data-pdb.yaml similarity index 100% rename from efk/elasticsearch/templates/data-pdb.yaml rename to elasticsearch/templates/data-pdb.yaml diff --git a/efk/elasticsearch/templates/data-serviceaccount.yaml b/elasticsearch/templates/data-serviceaccount.yaml similarity index 100% rename from efk/elasticsearch/templates/data-serviceaccount.yaml rename to elasticsearch/templates/data-serviceaccount.yaml diff --git a/efk/elasticsearch/templates/data-statefulset.yaml b/elasticsearch/templates/data-statefulset.yaml similarity index 100% rename from efk/elasticsearch/templates/data-statefulset.yaml rename to elasticsearch/templates/data-statefulset.yaml diff --git a/efk/elasticsearch/templates/job.yaml b/elasticsearch/templates/job.yaml similarity index 100% rename from efk/elasticsearch/templates/job.yaml rename to elasticsearch/templates/job.yaml diff --git a/efk/elasticsearch/templates/master-pdb.yaml b/elasticsearch/templates/master-pdb.yaml similarity index 100% rename from efk/elasticsearch/templates/master-pdb.yaml rename to elasticsearch/templates/master-pdb.yaml diff --git a/efk/elasticsearch/templates/master-serviceaccount.yaml b/elasticsearch/templates/master-serviceaccount.yaml similarity index 100% rename from efk/elasticsearch/templates/master-serviceaccount.yaml rename to elasticsearch/templates/master-serviceaccount.yaml diff --git a/efk/elasticsearch/templates/master-statefulset.yaml b/elasticsearch/templates/master-statefulset.yaml similarity index 100% rename from efk/elasticsearch/templates/master-statefulset.yaml rename to elasticsearch/templates/master-statefulset.yaml diff --git a/efk/elasticsearch/templates/master-svc.yaml b/elasticsearch/templates/master-svc.yaml similarity index 100% rename from efk/elasticsearch/templates/master-svc.yaml rename to elasticsearch/templates/master-svc.yaml diff --git a/efk/elasticsearch/templates/podsecuritypolicy.yaml b/elasticsearch/templates/podsecuritypolicy.yaml similarity index 100% rename from efk/elasticsearch/templates/podsecuritypolicy.yaml rename to elasticsearch/templates/podsecuritypolicy.yaml diff --git a/efk/elasticsearch/templates/role.yaml b/elasticsearch/templates/role.yaml similarity index 100% rename from efk/elasticsearch/templates/role.yaml rename to elasticsearch/templates/role.yaml diff --git a/efk/elasticsearch/templates/rolebinding.yaml b/elasticsearch/templates/rolebinding.yaml similarity index 100% rename from efk/elasticsearch/templates/rolebinding.yaml rename to elasticsearch/templates/rolebinding.yaml diff --git a/efk/elasticsearch/templates/tests/test-configmap.yaml b/elasticsearch/templates/tests/test-configmap.yaml similarity index 100% rename from efk/elasticsearch/templates/tests/test-configmap.yaml rename to elasticsearch/templates/tests/test-configmap.yaml diff --git a/efk/elasticsearch/templates/tests/test.yaml b/elasticsearch/templates/tests/test.yaml similarity index 100% rename from efk/elasticsearch/templates/tests/test.yaml rename to elasticsearch/templates/tests/test.yaml diff --git a/efk/elasticsearch/values.yaml b/elasticsearch/values.yaml similarity index 100% rename from efk/elasticsearch/values.yaml rename to elasticsearch/values.yaml diff --git a/efk/fluent-bit/Chart.yaml b/fluent-bit/Chart.yaml similarity index 100% rename from efk/fluent-bit/Chart.yaml rename to fluent-bit/Chart.yaml diff --git a/efk/fluent-bit/OWNERS b/fluent-bit/OWNERS similarity index 100% rename from efk/fluent-bit/OWNERS rename to fluent-bit/OWNERS diff --git a/efk/fluent-bit/README.md b/fluent-bit/README.md similarity index 100% rename from efk/fluent-bit/README.md rename to fluent-bit/README.md diff --git a/efk/fluent-bit/release.txt b/fluent-bit/release.txt similarity index 100% rename from efk/fluent-bit/release.txt rename to fluent-bit/release.txt diff --git a/efk/fluent-bit/templates/NOTES.txt b/fluent-bit/templates/NOTES.txt similarity index 100% rename from efk/fluent-bit/templates/NOTES.txt rename to fluent-bit/templates/NOTES.txt diff --git a/efk/fluent-bit/templates/_helpers.tpl b/fluent-bit/templates/_helpers.tpl similarity index 100% rename from efk/fluent-bit/templates/_helpers.tpl rename to fluent-bit/templates/_helpers.tpl diff --git a/efk/fluent-bit/templates/cluster-role.yaml b/fluent-bit/templates/cluster-role.yaml similarity index 100% rename from efk/fluent-bit/templates/cluster-role.yaml rename to fluent-bit/templates/cluster-role.yaml diff --git a/efk/fluent-bit/templates/cluster-rolebinding.yaml b/fluent-bit/templates/cluster-rolebinding.yaml similarity index 100% rename from efk/fluent-bit/templates/cluster-rolebinding.yaml rename to fluent-bit/templates/cluster-rolebinding.yaml diff --git a/efk/fluent-bit/templates/config.yaml b/fluent-bit/templates/config.yaml similarity index 100% rename from efk/fluent-bit/templates/config.yaml rename to fluent-bit/templates/config.yaml diff --git a/efk/fluent-bit/templates/daemonset.yaml b/fluent-bit/templates/daemonset.yaml similarity index 100% rename from efk/fluent-bit/templates/daemonset.yaml rename to fluent-bit/templates/daemonset.yaml diff --git a/efk/fluent-bit/templates/secret.yaml b/fluent-bit/templates/secret.yaml similarity index 100% rename from efk/fluent-bit/templates/secret.yaml rename to fluent-bit/templates/secret.yaml diff --git a/efk/fluent-bit/templates/service.yaml b/fluent-bit/templates/service.yaml similarity index 100% rename from efk/fluent-bit/templates/service.yaml rename to fluent-bit/templates/service.yaml diff --git a/efk/fluent-bit/templates/serviceaccount.yaml b/fluent-bit/templates/serviceaccount.yaml similarity index 100% rename from efk/fluent-bit/templates/serviceaccount.yaml rename to fluent-bit/templates/serviceaccount.yaml diff --git a/efk/fluent-bit/templates/tests/test-configmap.yaml b/fluent-bit/templates/tests/test-configmap.yaml similarity index 100% rename from efk/fluent-bit/templates/tests/test-configmap.yaml rename to fluent-bit/templates/tests/test-configmap.yaml diff --git a/efk/fluent-bit/templates/tests/test.yaml b/fluent-bit/templates/tests/test.yaml similarity index 100% rename from efk/fluent-bit/templates/tests/test.yaml rename to fluent-bit/templates/tests/test.yaml diff --git a/efk/fluent-bit/values.yaml b/fluent-bit/values.yaml similarity index 100% rename from efk/fluent-bit/values.yaml rename to fluent-bit/values.yaml diff --git a/efk/kibana/.helmignore b/kibana/.helmignore similarity index 100% rename from efk/kibana/.helmignore rename to kibana/.helmignore diff --git a/efk/kibana/Chart.yaml b/kibana/Chart.yaml similarity index 100% rename from efk/kibana/Chart.yaml rename to kibana/Chart.yaml diff --git a/efk/kibana/OWNERS b/kibana/OWNERS similarity index 100% rename from efk/kibana/OWNERS rename to kibana/OWNERS diff --git a/efk/kibana/README.md b/kibana/README.md similarity index 100% rename from efk/kibana/README.md rename to kibana/README.md diff --git a/efk/kibana/ci/authproxy-enabled.yaml b/kibana/ci/authproxy-enabled.yaml similarity index 100% rename from efk/kibana/ci/authproxy-enabled.yaml rename to kibana/ci/authproxy-enabled.yaml diff --git a/efk/kibana/ci/dashboard-values.yaml b/kibana/ci/dashboard-values.yaml similarity index 100% rename from efk/kibana/ci/dashboard-values.yaml rename to kibana/ci/dashboard-values.yaml diff --git a/efk/kibana/ci/extra-configmap-mounts.yaml b/kibana/ci/extra-configmap-mounts.yaml similarity index 100% rename from efk/kibana/ci/extra-configmap-mounts.yaml rename to kibana/ci/extra-configmap-mounts.yaml diff --git a/efk/kibana/ci/ingress-hosts-paths.yaml b/kibana/ci/ingress-hosts-paths.yaml similarity index 100% rename from efk/kibana/ci/ingress-hosts-paths.yaml rename to kibana/ci/ingress-hosts-paths.yaml diff --git a/efk/kibana/ci/ingress-hosts.yaml b/kibana/ci/ingress-hosts.yaml similarity index 100% rename from efk/kibana/ci/ingress-hosts.yaml rename to kibana/ci/ingress-hosts.yaml diff --git a/efk/kibana/ci/initcontainers-all-values.yaml b/kibana/ci/initcontainers-all-values.yaml similarity index 100% rename from efk/kibana/ci/initcontainers-all-values.yaml rename to kibana/ci/initcontainers-all-values.yaml diff --git a/efk/kibana/ci/initcontainers-values.yaml b/kibana/ci/initcontainers-values.yaml similarity index 100% rename from efk/kibana/ci/initcontainers-values.yaml rename to kibana/ci/initcontainers-values.yaml diff --git a/efk/kibana/ci/plugin-install.yaml b/kibana/ci/plugin-install.yaml similarity index 100% rename from efk/kibana/ci/plugin-install.yaml rename to kibana/ci/plugin-install.yaml diff --git a/efk/kibana/ci/pvc.yaml b/kibana/ci/pvc.yaml similarity index 100% rename from efk/kibana/ci/pvc.yaml rename to kibana/ci/pvc.yaml diff --git a/efk/kibana/ci/security-context.yaml b/kibana/ci/security-context.yaml similarity index 100% rename from efk/kibana/ci/security-context.yaml rename to kibana/ci/security-context.yaml diff --git a/efk/kibana/ci/service-values.yaml b/kibana/ci/service-values.yaml similarity index 100% rename from efk/kibana/ci/service-values.yaml rename to kibana/ci/service-values.yaml diff --git a/efk/kibana/ci/url_dashboard-values.yaml b/kibana/ci/url_dashboard-values.yaml similarity index 100% rename from efk/kibana/ci/url_dashboard-values.yaml rename to kibana/ci/url_dashboard-values.yaml diff --git a/efk/kibana/templates/NOTES.txt b/kibana/templates/NOTES.txt similarity index 100% rename from efk/kibana/templates/NOTES.txt rename to kibana/templates/NOTES.txt diff --git a/efk/kibana/templates/_helpers.tpl b/kibana/templates/_helpers.tpl similarity index 100% rename from efk/kibana/templates/_helpers.tpl rename to kibana/templates/_helpers.tpl diff --git a/efk/kibana/templates/configmap-dashboardimport.yaml b/kibana/templates/configmap-dashboardimport.yaml similarity index 100% rename from efk/kibana/templates/configmap-dashboardimport.yaml rename to kibana/templates/configmap-dashboardimport.yaml diff --git a/efk/kibana/templates/configmap.yaml b/kibana/templates/configmap.yaml similarity index 100% rename from efk/kibana/templates/configmap.yaml rename to kibana/templates/configmap.yaml diff --git a/efk/kibana/templates/deployment.yaml b/kibana/templates/deployment.yaml similarity index 100% rename from efk/kibana/templates/deployment.yaml rename to kibana/templates/deployment.yaml diff --git a/efk/kibana/templates/ingress.yaml b/kibana/templates/ingress.yaml similarity index 100% rename from efk/kibana/templates/ingress.yaml rename to kibana/templates/ingress.yaml diff --git a/efk/kibana/templates/service.yaml b/kibana/templates/service.yaml similarity index 100% rename from efk/kibana/templates/service.yaml rename to kibana/templates/service.yaml diff --git a/efk/kibana/templates/serviceaccount.yaml b/kibana/templates/serviceaccount.yaml similarity index 100% rename from efk/kibana/templates/serviceaccount.yaml rename to kibana/templates/serviceaccount.yaml diff --git a/efk/kibana/templates/tests/test-configmap.yaml b/kibana/templates/tests/test-configmap.yaml similarity index 100% rename from efk/kibana/templates/tests/test-configmap.yaml rename to kibana/templates/tests/test-configmap.yaml diff --git a/efk/kibana/templates/tests/test.yaml b/kibana/templates/tests/test.yaml similarity index 100% rename from efk/kibana/templates/tests/test.yaml rename to kibana/templates/tests/test.yaml diff --git a/efk/kibana/templates/volume-claim.yaml b/kibana/templates/volume-claim.yaml similarity index 100% rename from efk/kibana/templates/volume-claim.yaml rename to kibana/templates/volume-claim.yaml diff --git a/efk/kibana/values.yaml b/kibana/values.yaml similarity index 100% rename from efk/kibana/values.yaml rename to kibana/values.yaml -- GitLab