diff --git a/kafka/Chart.yaml b/kafka/Chart.yaml index 5901888564aaeb3f7f7b3f299fa56f1178f5c76b..c9630fbbefce6226aab882203636a1752ab50a16 100644 --- a/kafka/Chart.yaml +++ b/kafka/Chart.yaml @@ -2,9 +2,8 @@ apiVersion: v1 description: Apache Kafka is publish-subscribe messaging rethought as a distributed commit log. name: kafka -version: 0.10.1 -#appVersion: 4.1.2 -appVersion: 2.0.0 +version: 0.15.0 +appVersion: 5.0.1 keywords: - kafka - zookeeper diff --git a/kafka/README.md b/kafka/README.md index e3d10b99f291fad569e87a49c52b83517bdef7a6..b041ac9e636f73356190cd36365c15ade52e7b6d 100644 --- a/kafka/README.md +++ b/kafka/README.md @@ -57,7 +57,7 @@ following configurable parameters: | Parameter | Description | Default | |------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------| | `image` | Kafka Container image name | `confluentinc/cp-kafka` | -| `imageTag` | Kafka Container image tag | `4.1.2-2` | +| `imageTag` | Kafka Container image tag | `5.0.1` | | `imagePullPolicy` | Kafka Container pull policy | `IfNotPresent` | | `replicas` | Kafka Brokers | `3` | | `component` | Kafka k8s selector key | `kafka` | @@ -71,15 +71,17 @@ following configurable parameters: | `headless.targetPort` | Target port to be used for the headless service. This is not a required value. | `nil` | | `headless.port` | Port to be used for the headless service. https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | `9092` | | `external.enabled` | If True, exposes Kafka brokers via NodePort (PLAINTEXT by default) | `false` | +| `external.dns.useInternal` | If True, add Annotation for internal DNS service | `false` | +| `external.dns.useExternal` | If True, add Annotation for external DNS service | `true` | | `external.servicePort` | TCP port configured at external services (one per pod) to relay from NodePort to the external listener port. | '19092' | | `external.firstListenerPort` | TCP port which is added pod index number to arrive at the port used for NodePort and external listener port. | '31090' | | `external.domain` | Domain in which to advertise Kafka external listeners. | `cluster.local` | -| `external.init` | External init container settings. | (see `values.yaml`) | | `external.type` | Service Type. | `NodePort` | | `external.distinct` | Distinct DNS entries for each created A record. | `false` | | `external.annotations` | Additional annotations for the external service. | `{}` | | `podAnnotations` | Annotation to be added to Kafka pods | `{}` | -| `rbac.enabled` | Enable a service account and role for the init container to use in an RBAC enabled cluster | `false` | +| `loadBalancerIP` | Add Static IP to the type Load Balancer. Depends on the provider if enabled | `[]` +| `envOverrides` | Add additional Environment Variables in the dictionary format | `{ zookeeper.sasl.enabled: "False" }` | | `configurationOverrides` | `Kafka ` [configuration setting][brokerconfigs] overrides in the dictionary format | `{ offsets.topic.replication.factor: 3 }` | | `secrets` | `{}` | Pass any secrets to the kafka pods. Each secret will be passed as an environment variable by default. The secret can also be mounted to a specific path if required. Environment variable names are generated as: `_` (All upper case)| | `additionalPorts` | Additional ports to expose on brokers. Useful when the image exposes metrics (like prometheus, etc.) through a javaagent instead of a sidecar | `{}` | @@ -105,16 +107,21 @@ following configurable parameters: | `prometheus.jmx.image` | JMX Exporter container image | `solsson/kafka-prometheus-jmx-exporter@sha256` | | `prometheus.jmx.imageTag` | JMX Exporter container image tag | `a23062396cd5af1acdf76512632c20ea6be76885dfc20cd9ff40fb23846557e8` | | `prometheus.jmx.interval` | Interval that Prometheus scrapes JMX metrics when using Prometheus Operator | `10s` | +| `prometheus.jmx.scrapeTimeout` | Timeout that Prometheus scrapes JMX metrics when using Prometheus Operator | `10s` | | `prometheus.jmx.port` | JMX Exporter Port which exposes metrics in Prometheus format for scraping | `5556` | | `prometheus.kafka.enabled` | Whether or not to create a separate Kafka exporter | `false` | | `prometheus.kafka.image` | Kafka Exporter container image | `danielqsj/kafka-exporter` | | `prometheus.kafka.imageTag` | Kafka Exporter container image tag | `v1.2.0` | | `prometheus.kafka.interval` | Interval that Prometheus scrapes Kafka metrics when using Prometheus Operator | `10s` | +| `prometheus.kafka.scrapeTimeout` | Timeout that Prometheus scrapes Kafka metrics when using Prometheus Operator | `10s` | | `prometheus.kafka.port` | Kafka Exporter Port which exposes metrics in Prometheus format for scraping | `9308` | | `prometheus.kafka.resources` | Allows setting resource limits for kafka-exporter pod | `{}` | +| `prometheus.kafka.affinity` | Defines affinities and anti-affinities for pods as defined in: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity preferences | `{}` | +| `prometheus.kafka.tolerations` | List of node tolerations for the pods. https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | `[]` | | `prometheus.operator.enabled` | True if using the Prometheus Operator, False if not | `false` | | `prometheus.operator.serviceMonitor.namespace` | Namespace which Prometheus is running in. Default to kube-prometheus install. | `monitoring` | | `prometheus.operator.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `configJob.backoffLimit` | Number of retries before considering kafka-config job as failed | `6` | | `topics` | List of topics to create & configure. Can specify name, partitions, replicationFactor, reassignPartitions, config. See values.yaml | `[]` (Empty list) | | `zookeeper.enabled` | If True, installs Zookeeper Chart | `true` | | `zookeeper.resources` | Zookeeper resource requests and limits | `{}` | @@ -203,7 +210,7 @@ such port at a time, setting the range at every Kafka pod is a reasonably safe c #### Load Balancer External Service Type -The load balancer external service type differs from the node port type by routing to the `port` specified in the service for each statefulset container. Because of this `external.servicePort` is unused and will be set to the sum of `external.firstListenerPort` and the replica number. It is important to note that `external.firstListenerPort` does not have to be within the configured node port range for the cluster, however a node port will be allocated. +The load balancer external service type differs from the node port type by routing to the `external.servicePort` specified in the service for each statefulset container (if `external.distinct` is set). If `external.distinct` is false, `external.servicePort` is unused and will be set to the sum of `external.firstListenerPort` and the replica number. It is important to note that `external.firstListenerPort` does not have to be within the configured node port range for the cluster, however a node port will be allocated. ## Known Limitations diff --git a/kafka/charts/zookeeper/Chart.yaml b/kafka/charts/zookeeper/Chart.yaml index c58087e0993f50c9090a7678c915a05c3fde54b7..fee4b2ae8acdb9e1ad9d5382f3b74d1b8033afbc 100644 --- a/kafka/charts/zookeeper/Chart.yaml +++ b/kafka/charts/zookeeper/Chart.yaml @@ -1,6 +1,6 @@ name: zookeeper home: https://zookeeper.apache.org/ -version: 1.2.0 +version: 1.2.2 appVersion: 3.4.10 description: Centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. diff --git a/kafka/charts/zookeeper/values.yaml b/kafka/charts/zookeeper/values.yaml index 53daa41f611cf2b6af333a40c4fe9f72104c38d0..57cbb249a7310c500cf9a4775508994755c95c8e 100644 --- a/kafka/charts/zookeeper/values.yaml +++ b/kafka/charts/zookeeper/values.yaml @@ -286,7 +286,7 @@ env: ## The Log Level that for the ZooKeeper processes logger. ## Choices are `TRACE,DEBUG,INFO,WARN,ERROR,FATAL`. - ZK_LOG_LEVEL: INFO + ZK_LOG_LEVEL: WARN ## The maximum number of concurrent client connections that ## a server in the ensemble will accept. diff --git a/kafka/requirements.lock b/kafka/requirements.lock index 802e6a9ad9a0c89cd91953074b34b5807b352856..59e9ca7d247981f3d8e2dcf8134b83b28234eae6 100644 --- a/kafka/requirements.lock +++ b/kafka/requirements.lock @@ -1,6 +1,6 @@ dependencies: - name: zookeeper repository: https://kubernetes-charts-incubator.storage.googleapis.com/ - version: 1.0.2 -digest: sha256:0ea890c77e32aee10c564b732c9fa27b17fa5c398bc50a6bf342ecbb79094cdc -generated: 2018-07-09T20:04:07.73379146+03:00 + version: 1.2.0 +digest: sha256:48de211cbffc0b7df9995edc4fd5d693e8bbc94e684aa83c11e6f94803f0e8b9 +generated: 2018-11-26T17:47:36.893674-05:00 diff --git a/kafka/requirements.yaml b/kafka/requirements.yaml index 839e0f3b9b3c3a44516bb4e1347e54f883a032d9..2bee53a96d6d3db035976825198d87fc29d1d500 100644 --- a/kafka/requirements.yaml +++ b/kafka/requirements.yaml @@ -1,6 +1,5 @@ dependencies: - name: zookeeper - #version: 1.0.2 version: 1.2.0 repository: https://kubernetes-charts-incubator.storage.googleapis.com/ condition: zookeeper.enabled diff --git a/kafka/templates/NOTES.txt b/kafka/templates/NOTES.txt index 11eade7b55779554b8ec4e61b7ea3c08d7be8bab..08ea4e45cb08ae5679333ed52fd9b2a460ad1c31 100644 --- a/kafka/templates/NOTES.txt +++ b/kafka/templates/NOTES.txt @@ -19,20 +19,20 @@ You can connect to Kafka by running a simple pod in the K8s cluster like this wi Once you have the testclient pod above running, you can list all kafka topics with: - kubectl -n {{ .Release.Namespace }} exec testclient -- /usr/bin/kafka-topics --zookeeper {{ .Release.Name }}-zookeeper:2181 --list - + kubectl -n {{ .Release.Namespace }} exec testclient -- /opt/kafka/bin/kafka-topics.sh --zookeeper {{ .Release.Name }}-zookeeper:2181 --list +n To create a new topic: - kubectl -n {{ .Release.Namespace }} exec testclient -- /usr/bin/kafka-topics --zookeeper {{ .Release.Name }}-zookeeper:2181 --topic test1 --create --partitions 1 --replication-factor 1 + kubectl -n {{ .Release.Namespace }} exec testclient -- /opt/kafka/bin/kafka-topics.sh --zookeeper {{ .Release.Name }}-zookeeper:2181 --topic test1 --create --partitions 1 --replication-factor 1 To listen for messages on a topic: - kubectl -n {{ .Release.Namespace }} exec -ti testclient -- /usr/bin/kafka-console-consumer --bootstrap-server {{ .Release.Name }}-kafka:9092 --topic test1 --from-beginning + kubectl -n {{ .Release.Namespace }} exec -ti testclient -- /opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server {{ include "kafka.fullname" . }}:9092 --topic test1 --from-beginning To stop the listener session above press: Ctrl+C To start an interactive message producer session: - kubectl -n {{ .Release.Namespace }} exec -ti testclient -- /usr/bin/kafka-console-producer --broker-list {{ .Release.Name }}-kafka-headless:9092 --topic test1 + kubectl -n {{ .Release.Namespace }} exec -ti testclient -- /opt/kafka/bin/kafka-console-producer.sh --broker-list {{ include "kafka.fullname" . }}-headless:9092 --topic test1 To create a message in the above session, simply type the message and press "enter" To end the producer session try: Ctrl+C @@ -51,12 +51,12 @@ clients to point at the following brokers. Please allow a few minutes for all associated resources to become healthy. {{ $fullName := include "kafka.fullname" . }} {{- $replicas := .Values.replicas | int }} - {{- $servicePort := .Values.external.servicePort }} + {{- $servicePort := .Values.external.servicePort | int}} {{- $root := . }} {{- range $i, $e := until $replicas }} {{- $externalListenerPort := add $root.Values.external.firstListenerPort $i }} {{- if $root.Values.external.distinct }} -{{ printf "%s-%d.%s:%d" $root.Release.Name $i $root.Values.external.domain $externalListenerPort | indent 2 }} +{{ printf "%s-%d.%s:%d" $root.Release.Name $i $root.Values.external.domain $servicePort | indent 2 }} {{- else }} {{ printf "%s.%s:%d" $root.Release.Name $root.Values.external.domain $externalListenerPort | indent 2 }} {{- end }} diff --git a/kafka/templates/configmap-config.yaml b/kafka/templates/configmap-config.yaml index 062041c26ea1752e49d8b285f4e8d8ad98614c40..9af770235c09d1d0db65ec57428db31811e9c73e 100644 --- a/kafka/templates/configmap-config.yaml +++ b/kafka/templates/configmap-config.yaml @@ -25,7 +25,7 @@ data: sleep 20 done echo "Applying runtime configuration using {{ .Values.image }}:{{ .Values.imageTag }}" - {{- range $n, $topic := .Values.topics }} + {{- range $n, $topic := .Values.topics }} {{- if and $topic.partitions $topic.replicationFactor $topic.reassignPartitions }} cat << EOF > {{ $topic.name }}-increase-replication-factor.json {"version":1, "partitions":[ @@ -50,5 +50,12 @@ data: kafka-configs --zookeeper {{ $zk }} --entity-type topics --entity-name {{ $topic.name }} --alter --force --add-config {{ nospace $topic.config }} {{- end }} kafka-configs --zookeeper {{ $zk }} --entity-type topics --entity-name {{ $topic.name }} --describe + {{- if $topic.acls }} + {{- range $a, $acl := $topic.acls }} + {{ if and $acl.user $acl.operations }} + kafka-acls --authorizer-properties zookeeper.connect={{ $zk }} --force --add --allow-principal User:{{ $acl.user }}{{- range $operation := $acl.operations }} --operation {{ $operation }} {{- end }} --topic {{ $topic.name }} {{ $topic.extraParams }} + {{- end }} + {{- end }} {{- end }} + {{- end }} {{- end -}} diff --git a/kafka/templates/deployment-kafka-exporter.yaml b/kafka/templates/deployment-kafka-exporter.yaml index d43aab1f773e82a969a8795f2bca19c1c686bddc..709ea0c743e6609d13752c6ef64a4629ca76c3c4 100644 --- a/kafka/templates/deployment-kafka-exporter.yaml +++ b/kafka/templates/deployment-kafka-exporter.yaml @@ -35,4 +35,16 @@ spec: - containerPort: {{ .Values.prometheus.kafka.port }} resources: {{ toYaml .Values.prometheus.kafka.resources | indent 10 }} +{{- if .Values.prometheus.kafka.tolerations }} + tolerations: +{{ toYaml .Values.prometheus.kafka.tolerations | indent 8 }} +{{- end }} +{{- if .Values.prometheus.kafka.affinity }} + affinity: +{{ toYaml .Values.prometheus.kafka.affinity | indent 8 }} +{{- end }} +{{- if .Values.prometheus.kafka.nodeSelector }} + nodeSelector: +{{ toYaml .Values.prometheus.kafka.nodeSelector | indent 8 }} +{{- end }} {{- end }} diff --git a/kafka/templates/job-config.yaml b/kafka/templates/job-config.yaml index 1bd747f9662e35868291d0e03674a5ca9d830cfa..54bf4f73be589a353f1c9ca9b87b78d1f175cf58 100644 --- a/kafka/templates/job-config.yaml +++ b/kafka/templates/job-config.yaml @@ -10,13 +10,14 @@ metadata: heritage: "{{ .Release.Service }}" release: "{{ .Release.Name }}" spec: + backoffLimit: {{ .Values.configJob.backoffLimit }} template: metadata: labels: app: {{ template "kafka.fullname" . }} release: "{{ .Release.Name }}" spec: - restartPolicy: Never + restartPolicy: OnFailure volumes: - name: config-volume configMap: diff --git a/kafka/templates/service-brokers-external.yaml b/kafka/templates/service-brokers-external.yaml index e8084f83351ac882b283cd9c1c73fc852d7030c8..d1e2bfdea9452ea8f39fd16f60dab778250f9b3a 100644 --- a/kafka/templates/service-brokers-external.yaml +++ b/kafka/templates/service-brokers-external.yaml @@ -8,17 +8,27 @@ {{- $externalListenerPort := add $root.Values.external.firstListenerPort $i }} {{- $responsiblePod := printf "%s-%d" (printf "%s" $fullName) $i }} {{- $distinctPrefix := printf "%s-%d" $dnsPrefix $i }} + {{- $loadBalancerIPLen := len $root.Values.external.loadBalancerIP }} + --- apiVersion: v1 kind: Service metadata: annotations: {{- if $root.Values.external.distinct }} + {{- if $root.Values.external.dns.useInternal }} dns.alpha.kubernetes.io/internal: "{{ $distinctPrefix }}.{{ $root.Values.external.domain }}" + {{- end }} + {{- if $root.Values.external.dns.useExternal }} external-dns.alpha.kubernetes.io/hostname: "{{ $distinctPrefix }}.{{ $root.Values.external.domain }}" + {{- end }} {{- else }} + {{- if $root.Values.external.dns.useInternal }} dns.alpha.kubernetes.io/internal: "{{ $dnsPrefix }}.{{ $root.Values.external.domain }}" + {{- end }} + {{- if $root.Values.external.dns.useExternal }} external-dns.alpha.kubernetes.io/hostname: "{{ $dnsPrefix }}.{{ $root.Values.external.domain }}" + {{- end }} {{- end }} {{- if $root.Values.external.annotations }} {{ toYaml $root.Values.external.annotations | indent 4 }} @@ -34,7 +44,7 @@ spec: type: {{ $root.Values.external.type }} ports: - name: external-broker - {{- if eq $root.Values.external.type "LoadBalancer" }} + {{- if and (eq $root.Values.external.type "LoadBalancer") (not $root.Values.external.distinct) }} port: {{ $externalListenerPort }} {{- else }} port: {{ $servicePort }} @@ -44,9 +54,12 @@ spec: nodePort: {{ $externalListenerPort }} {{- end }} protocol: TCP + {{- if and (eq $root.Values.external.type "LoadBalancer") (eq $loadBalancerIPLen $replicas) }} + loadBalancerIP: {{ index $root.Values.external.loadBalancerIP $i }} + {{- end }} selector: app: {{ include "kafka.name" $root }} release: {{ $root.Release.Name }} - pod: {{ $responsiblePod | quote }} + statefulset.kubernetes.io/pod-name: {{ $responsiblePod | quote }} {{- end }} {{- end }} diff --git a/kafka/templates/servicemonitors.yaml b/kafka/templates/servicemonitors.yaml index 92eb1252e864d4f7ea55fee6e0a61b5118bddb9e..2bff00c7dfef1629e26e36e5a89040255fdbf4ea 100644 --- a/kafka/templates/servicemonitors.yaml +++ b/kafka/templates/servicemonitors.yaml @@ -14,6 +14,9 @@ spec: endpoints: - port: jmx-exporter interval: {{ .Values.prometheus.jmx.interval }} + {{- if .Values.prometheus.jmx.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.jmx.scrapeTimeout }} + {{- end }} namespaceSelector: any: true {{ end }} @@ -34,6 +37,9 @@ spec: endpoints: - port: kafka-exporter interval: {{ .Values.prometheus.kafka.interval }} + {{- if .Values.prometheus.kafka.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.kafka.scrapeTimeout }} + {{- end }} namespaceSelector: any: true {{ end }} diff --git a/kafka/templates/statefulset.yaml b/kafka/templates/statefulset.yaml index acbdc060c36bb405261e9695c662d631f127f79b..e92157fb92dab6bbf4c870aed1702cabfe825da8 100644 --- a/kafka/templates/statefulset.yaml +++ b/kafka/templates/statefulset.yaml @@ -31,29 +31,6 @@ spec: {{- if .Values.schedulerName }} schedulerName: "{{ .Values.schedulerName }}" {{- end }} -{{- if .Values.rbac.enabled }} - serviceAccountName: {{ .Release.Name }} -{{- end }} - {{- if .Values.external.enabled }} - ## ref: https://github.com/Yolean/kubernetes-kafka/blob/master/kafka/50kafka.yml - initContainers: - - name: init-ext - image: "{{ .Values.external.init.image }}:{{ .Values.external.init.imageTag }}" - imagePullPolicy: "{{ .Values.external.init.imagePullPolicy }}" - command: - - sh - - -euxc - - "kubectl label pods ${POD_NAME} --namespace ${POD_NAMESPACE} pod=${POD_NAME} --overwrite" - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - {{- end }} {{- if .Values.tolerations }} tolerations: {{ toYaml .Values.tolerations | indent 8 }} @@ -154,10 +131,18 @@ spec: - name: JMX_PORT value: "{{ .Values.jmx.port }}" {{- end }} - - name: POD_IP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE valueFrom: fieldRef: - fieldPath: status.podIP + fieldPath: metadata.namespace - name: KAFKA_HEAP_OPTS value: {{ .Values.kafkaHeapOptions }} {{- if not (hasKey .Values.configurationOverrides "zookeeper.connect") }} @@ -187,6 +172,10 @@ spec: {{- end }} {{- end }} {{- end }} + {{- range $key, $value := .Values.envOverrides }} + - name: {{ printf "%s" $key | replace "." "_" | upper | quote }} + value: {{ $value | quote }} + {{- end }} # This is required because the Downward API does not yet support identification of # pod numbering in statefulsets. Thus, we are required to specify a command which # allows us to extract the pod ID for usage as the Kafka Broker ID. @@ -196,18 +185,24 @@ spec: - -exc - | unset KAFKA_PORT && \ - export KAFKA_BROKER_ID=${HOSTNAME##*-} && \ - export KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://${POD_IP}:9092{{ if kindIs "string" $advertisedListenersOverride }}{{ printf ",%s" $advertisedListenersOverride }}{{ end }} && \ + export KAFKA_BROKER_ID=${POD_NAME##*-} && \ + export KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://${POD_NAME}.{{ include "kafka.fullname" . }}-headless.${POD_NAMESPACE}:9092{{ if kindIs "string" $advertisedListenersOverride }}{{ printf ",%s" $advertisedListenersOverride }}{{ end }} && \ exec /etc/confluent/docker/run volumeMounts: - name: datadir mountPath: {{ .Values.persistence.mountPath | quote }} {{- range $secret := .Values.secrets }} {{- if $secret.mountPath }} - {{- range $key := $secret.keys }} + {{- if $secret.keys }} + {{- range $key := $secret.keys }} - name: {{ include "kafka.fullname" $ }}-{{ $secret.name }} mountPath: {{ $secret.mountPath }}/{{ $key }} subPath: {{ $key }} + readOnly: true + {{- end }} + {{- else }} + - name: {{ include "kafka.fullname" $ }}-{{ $secret.name }} + mountPath: {{ $secret.mountPath }} readOnly: true {{- end }} {{- end }} diff --git a/kafka/values.yaml b/kafka/values.yaml index c3305a2caf39c1586a58e6d908b4c7acd516ae7d..20493d4047f37ba0741e8fcc0c6fa54847fa3f84 100644 --- a/kafka/values.yaml +++ b/kafka/values.yaml @@ -9,8 +9,7 @@ replicas: 3 image: "confluentinc/cp-kafka" ## The kafka image tag -#imageTag: "4.1.2-2" -imageTag: "5.0.0-2" +imageTag: "5.0.1" # Confluent image for Kafka 2.0.0 ## Specify a imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images @@ -36,11 +35,6 @@ updateStrategy: ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy podManagementPolicy: OrderedReady -## If RBAC is enabled on the cluster, the Kafka init container needs a service account -## with permissisions sufficient to apply pod labels -rbac: - enabled: false - ## Useful if using any custom authorizer ## Pass in some secrets to use (if required) # secrets: @@ -56,9 +50,6 @@ rbac: # mountPath: /opt/zookeeper/secret -## The name of the storage class which the cluster should use. -# storageClass: default - ## The subpath within the Kafka container's PV where logs will be stored. ## This is combined with `persistence.mountPath`, to create, by default: /opt/kafka/data/logs logSubPath: "logs" @@ -139,13 +130,16 @@ external: type: NodePort # annotations: # service.beta.kubernetes.io/openstack-internal-load-balancer: "true" - + dns: + useInternal: false + useExternal: true # create an A record for each statefulset pod distinct: false enabled: false servicePort: 19092 firstListenerPort: 31090 domain: cluster.local + loadBalancerIP: [] init: image: "lwolf/kubectl_deployer" imageTag: "0.4" @@ -160,6 +154,7 @@ podAnnotations: {} ## configurationOverrides: "offsets.topic.replication.factor": 3 + "confluent.support.metrics.enable": false # Disables confluent metric submission # "auto.leader.rebalance.enable": true # "auto.create.topics.enable": true # "controlled.shutdown.enable": true @@ -176,6 +171,11 @@ configurationOverrides: # "listener.security.protocol.map": |- # PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT +## set extra ENVs +# key: "value" +envOverrides: {} + + ## A collection of additional ports to expose on brokers (formatted as normal containerPort yaml) # Useful when the image exposes metrics (like prometheus, etc.) through a javaagent instead of a sidecar additionalPorts: {} @@ -248,8 +248,7 @@ jmx: prometheus: ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics jmx: - #enabled: false - enabled: true + enabled: true # default: false ## The image to use for the metrics collector image: solsson/kafka-prometheus-jmx-exporter@sha256 @@ -260,6 +259,9 @@ prometheus: ## Interval at which Prometheus scrapes metrics, note: only used by Prometheus Operator interval: 10s + ## Timeout at which Prometheus timeouts scrape run, note: only used by Prometheus Operator + scrapeTimeout: 10s + ## Port jmx-exporter exposes Prometheus format metrics to scrape port: 5556 @@ -273,8 +275,7 @@ prometheus: ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter kafka: - #enabled: false - enabled: true + enabled: true # default: false ## The image to use for the metrics collector image: danielqsj/kafka-exporter @@ -285,6 +286,9 @@ prometheus: ## Interval at which Prometheus scrapes metrics, note: only used by Prometheus Operator interval: 10s + ## Timeout at which Prometheus timeouts scrape run, note: only used by Prometheus Operator + scrapeTimeout: 10s + ## Port kafka-exporter exposes for Prometheus to scrape metrics port: 9308 @@ -297,6 +301,39 @@ prometheus: cpu: 100m memory: 100Mi + # Tolerations for nodes that have taints on them. + # Useful if you want to dedicate nodes to just run kafka-exporter + # https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: [] + # tolerations: + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## Pod scheduling preferences (by default keep pods within a release on separate nodes). + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## By default we don't set affinity + affinity: {} + ## Alternatively, this typical example defines: + ## affinity (to encourage Kafka Exporter pods to be collocated with Kafka pods) + # affinity: + # podAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 50 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app + # operator: In + # values: + # - kafka + # topologyKey: "kubernetes.io/hostname" + + ## Node labels for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + nodeSelector: {} + operator: ## Are you using Prometheus Operator? enabled: false @@ -311,6 +348,13 @@ prometheus: selector: prometheus: kube-prometheus +## Kafka Config job configuration +## +configJob: + ## Specify the number of retries before considering kafka-config job as failed. + ## https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#pod-backoff-failure-policy + backoffLimit: 6 + ## Topic creation and configuration. ## The job will be run on a deployment only when the config has been changed. ## - If 'partitions' and 'replicationFactor' are specified we create the topic (with --if-not-exists.) @@ -339,6 +383,17 @@ topics: [] # replicationFactor: 3 # defaultConfig: "segment.bytes,segment.ms" # config: "cleanup.policy=compact,delete.retention.ms=604800000" + # - name: myAclTopicPartitions + # partitions: 8 + # acls: + # - user: read + # operations: [ Read ] + # - user: read_and_write + # operations: + # - Read + # - Write + # - user: all + # operations: [ All ] # ------------------------------------------------------------------------------ # Zookeeper: @@ -349,15 +404,8 @@ zookeeper: ## ref: https://github.com/kubernetes/charts/tree/master/incubator/zookeeper enabled: true - ## As weighted quorums are not supported, it is imperative that an odd number of replicas - ## be chosen. Moreover, the number of replicas should be either 1, 3, 5, or 7. - ## - ## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper#stateful-set - replicaCount: 3 # Desired quantity of ZooKeeper pods. This should always be (1,3,5, or 7) - ## Configure Zookeeper resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - #resources: ~ resources: limits: cpu: 200m @@ -365,17 +413,15 @@ zookeeper: requests: cpu: 100m memory: 128Mi - + ## Environmental variables to set in Zookeeper env: ## The JVM heap size to allocate to Zookeeper ZK_HEAP_SIZE: "500M" persistence: - #enabled: false - enabled: true + enabled: true # default: false ## The amount of PV storage allocated to each Zookeeper pod in the statefulset - # size: "2Gi" size: "2Gi" ## Specify a Zookeeper imagePullPolicy @@ -397,41 +443,3 @@ zookeeper: # labelSelector: # matchLabels: # release: zookeeper - -exporters: - - jmx: - #enabled: false - enabled: true - config: - lowercaseOutputName: false - ## ref: https://github.com/prometheus/jmx_exporter/blob/master/example_configs/zookeeper.yaml - env: {} - #resources: {} - resources: - limits: - cpu: 150m - memory: 200Mi - requests: - cpu: 100m - memory: 128Mi - - zookeeper: - ## refs: - ## - https://github.com/carlpett/zookeeper_exporter - ## - https://hub.docker.com/r/josdotso/zookeeper-exporter/ - ## - https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/#zookeeper-metrics - #enabled: false - enabled: true - config: - logLevel: info - resetOnScrape: "true" - env: {} - #resources: {} - resources: - limits: - cpu: 200m - memory: 200Mi - requests: - cpu: 50m - memory: 100Mi diff --git a/prometheus/bogi_test_release.tx b/prometheus/bogi_test_release.tx new file mode 100644 index 0000000000000000000000000000000000000000..32b7e7ae806837a21c8f459eaeb91133715392e5 Binary files /dev/null and b/prometheus/bogi_test_release.tx differ