From 52625aee68c5ce97eca86bed64128d17c2aa9199 Mon Sep 17 00:00:00 2001 From: Jose Armesto Date: Mon, 6 Nov 2023 16:14:15 +0100 Subject: [PATCH] Use vendir to generate chart (#104) * WIP * Generate cilium chart using our fork and vendir * Apply more changes from our fork * More changes from our fork * Target fork main branch * Remove sha hashes from images in values file * Added instructions on how to upgrade the app --- .gitignore | 1 + CHANGELOG.md | 4 + Makefile.gen.app.mk | 11 +- README.md | 62 +- helm/cilium/Chart.yaml | 2 +- helm/cilium/README.md | 136 +- .../hubble-l7-http-metrics-by-workload.json | 1170 ++++++++ helm/cilium/files/nodeinit/poststart-eni.bash | 21 - .../tls-certmanager/_helpers.tpl | 9 - .../clustermesh-apiserver-issuer.yaml | 21 - helm/cilium/templates/hubble-ui/_nginx.tpl | 2 +- .../hubble/tls-certmanager/_helpers.tpl | 9 - .../hubble/tls-certmanager/hubble-issuer.yaml | 21 - helm/cilium/values.yaml | 11 +- helm/cilium/values.yaml.tmpl | 2553 +++++++++++++++++ vendir.lock.yml | 15 + vendir.yml | 16 + 17 files changed, 3868 insertions(+), 196 deletions(-) create mode 100644 .gitignore create mode 100644 helm/cilium/files/hubble/dashboards/hubble-l7-http-metrics-by-workload.json delete mode 100644 helm/cilium/files/nodeinit/poststart-eni.bash delete mode 100644 helm/cilium/templates/clustermesh-apiserver/tls-certmanager/_helpers.tpl delete mode 100644 helm/cilium/templates/clustermesh-apiserver/tls-certmanager/clustermesh-apiserver-issuer.yaml delete mode 100644 helm/cilium/templates/hubble/tls-certmanager/_helpers.tpl delete mode 100644 helm/cilium/templates/hubble/tls-certmanager/hubble-issuer.yaml create mode 100644 helm/cilium/values.yaml.tmpl create mode 100644 vendir.lock.yml create mode 100644 vendir.yml diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..22d0d82f --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +vendor diff --git a/CHANGELOG.md b/CHANGELOG.md index 0aff0776..6622367c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Changed + +- Generate cilium chart using our fork and `vendir`. + ## [0.16.0] - 2023-10-25 ### Changed diff --git a/Makefile.gen.app.mk b/Makefile.gen.app.mk index 3787b357..d7f7e2b6 100644 --- a/Makefile.gen.app.mk +++ b/Makefile.gen.app.mk @@ -26,16 +26,9 @@ lint-chart: check-env ## Runs ct against the default chart. update-chart: check-env ## Sync chart with upstream repo. @echo "====> $@" + rm -rf ./helm/$(APPLICATION) vendir sync - $(MAKE) update-deps - -update-deps: check-env $(DEPS) ## Update Helm dependencies. - cd $(APPLICATION) && helm dependency update - -$(DEPS): check-env ## Update main Chart.yaml with new local dep versions. - dep_name=$(shell basename $@) && \ - new_version=`$(YQ) .version $(APPLICATION)/charts/$$dep_name/Chart.yaml` && \ - $(YQ) -i e "with(.dependencies[]; select(.name == \"$$dep_name\") | .version = \"$$new_version\")" $(APPLICATION)/Chart.yaml + sed -i 's/@sha256:[a-f0-9]\+//g' ./helm/$(APPLICATION)/values.yaml helm-docs: check-env ## Update $(APPLICATION) README. $(HELM_DOCS) -c $(APPLICATION) -g $(APPLICATION) diff --git a/README.md b/README.md index cab9ce07..94b10348 100644 --- a/README.md +++ b/README.md @@ -3,13 +3,6 @@ # cilium chart Giant Swarm offers a cilium App which can be installed in workload clusters. -Here we define the cilium chart with its templates and default configuration. - -**What is this app?** - -**Why did we add it?** - -**Who can use it?** ## Installing @@ -18,50 +11,23 @@ There are several ways to install this app onto a workload cluster. - [Using our web interface](https://docs.giantswarm.io/ui-api/web/app-platform/#installing-an-app). - By creating an [App resource](https://docs.giantswarm.io/ui-api/management-api/crd/apps.application.giantswarm.io/) in the management cluster as explained in [Getting started with App Platform](https://docs.giantswarm.io/app-platform/getting-started/). -## Configuring - -### values.yaml - -**This is an example of a values file you could upload using our web interface.** - -```yaml -# values.yaml - -``` - -### Sample App CR and ConfigMap for the management cluster - -If you have access to the Kubernetes API on the management cluster, you could create -the App CR and ConfigMap directly. +## Upgrading cilium version -Here is an example that would install the app to -workload cluster `abc12`: - -```yaml -# appCR.yaml +The contents of the `helm` folder are being generated by the `make` target called `make update-chart`. +This target uses [`vendir`](https://carvel.dev/vendir/) to fetch the helm chart contained in [the fork of the cilium repository that we maintain](https://github.com/giantswarm/cilium-upstream). +Currently, the main branch on the fork contains upstream branch `v1.13`, with our custom changes on top. +If you want to upgrade this cilium-app to use a newer version of cilium, you need to prepare our fork first. +We need to create a new branch on our fork based off the branch of the version we want to upgrade to. For example, if we want to upgrade to cilium v1.14, we need to create a new branch based off the upstream `v1.14` branch. +Then we need to apply our custom changes on top of that new branch. You can use `cherry-pick` for that, for example ``` - -```yaml -# user-values-configmap.yaml - +git cherry-pick a4b22dee87ba3663f967f6dd6d8e666c849c742d^..25c449534cc325a5798fc7c839b8ac33591b3516 ``` -See our [full reference on how to configure apps](https://docs.giantswarm.io/app-platform/app-configuration/) for more details. - -## Compatibility - -This app has been tested to work with the following workload cluster release versions: - -- _add release version_ - -## Limitations - -Some apps have restrictions on how they can be deployed. -Not following these limitations will most likely result in a broken deployment. - -- _add limitation_ - -## Credit +Then, we need to update the `vendir` configuration in `vendir.yml` on this repository to use the new branch, and run the make target `APPLICATION=cilium make update-chart`. +With the generated changes, let's create a new pull request so that everyone can review the changes that will be applied to the cilium chart. +If we need further customizations, we can keep adding commits on the new branch, and re-run `make update-chart` to update the generated files. -- {APP HELM REPOSITORY} +Once we are happy with the changes, we can merge the changes in the fork to the fork's `main` branch. After merging, the `main` branch in our fork should contain cilium `v1.14` with our customizations on top. +Then update our pull request to use the fork's `main` branch again, and merge it. +Finally, we just need to create a new release of this cilium-app. diff --git a/helm/cilium/Chart.yaml b/helm/cilium/Chart.yaml index a9bb6ab3..9e9d007a 100644 --- a/helm/cilium/Chart.yaml +++ b/helm/cilium/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cilium displayName: Cilium home: https://cilium.io/ -version: 0.16.0 +version: 1.13.6 appVersion: 1.13.6 kubeVersion: ">= 1.16.0-0" icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.13/Documentation/images/logo-solo.svg diff --git a/helm/cilium/README.md b/helm/cilium/README.md index 33b69299..8cc75c33 100644 --- a/helm/cilium/README.md +++ b/helm/cilium/README.md @@ -1,6 +1,6 @@ # cilium -![Version: 1.13.0](https://img.shields.io/badge/Version-1.13.0-informational?style=flat-square) ![AppVersion: 1.13.0](https://img.shields.io/badge/AppVersion-1.13.0-informational?style=flat-square) +![Version: 1.13.6](https://img.shields.io/badge/Version-1.13.6-informational?style=flat-square) ![AppVersion: 1.13.6](https://img.shields.io/badge/AppVersion-1.13.6-informational?style=flat-square) Cilium is open source software for providing and transparently securing network connectivity and loadbalancing between application workloads such as @@ -71,13 +71,12 @@ contributors across the globe, there is almost always someone available to help. | bgp.enabled | bool | `false` | Enable BGP support inside Cilium; embeds a new ConfigMap for BGP inside cilium-agent and cilium-operator | | bgpControlPlane | object | `{"enabled":false}` | This feature set enables virtual BGP routers to be created via CiliumBGPPeeringPolicy CRDs. | | bgpControlPlane.enabled | bool | `false` | Enables the BGP control plane. | -| bpf.clockProbe | bool | `false` | Enable BPF clock source probing for more efficient tick retrieval. | | bpf.ctAnyMax | int | `262144` | Configure the maximum number of entries for the non-TCP connection tracking table. | | bpf.ctTcpMax | int | `524288` | Configure the maximum number of entries in the TCP connection tracking table. | | bpf.hostLegacyRouting | bool | `false` | Configure whether direct routing mode should route traffic via host stack (true) or directly and more efficiently out of BPF (false) if the kernel supports it. The latter has the implication that it will also bypass netfilter in the host namespace. | | bpf.lbExternalClusterIP | bool | `false` | Allow cluster external access to ClusterIP services. | | bpf.lbMapMax | int | `65536` | Configure the maximum number of service entries in the load balancer maps. | -| bpf.mapDynamicSizeRatio | float64 | `0.0025` | Configure auto-sizing for all BPF maps based on available memory. ref: https://docs.cilium.io/en/stable/concepts/ebpf/maps/#ebpf-maps | +| bpf.mapDynamicSizeRatio | float64 | `0.0025` | Configure auto-sizing for all BPF maps based on available memory. ref: https://docs.cilium.io/en/stable/network/ebpf/maps/ | | bpf.masquerade | bool | `false` | Enable native IP masquerade support in eBPF | | bpf.monitorAggregation | string | `"medium"` | Configure the level of aggregation for monitor notifications. Valid options are none, low, medium, maximum. | | bpf.monitorFlags | string | `"all"` | Configure which TCP flags trigger notifications when seen for the first time in a connection. | @@ -89,9 +88,12 @@ contributors across the globe, there is almost always someone available to help. | bpf.root | string | `"/sys/fs/bpf"` | Configure the mount point for the BPF filesystem | | bpf.tproxy | bool | `false` | Configure the eBPF-based TPROXY to reduce reliance on iptables rules for implementing Layer 7 policy. | | bpf.vlanBypass | list | `[]` | Configure explicitly allowed VLAN id's for bpf logic bypass. [0] will allow all VLAN id's without any filtering. | -| certgen | object | `{"image":{"override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/certgen","tag":"v0.1.8@sha256:4a456552a5f192992a6edcec2febb1c54870d665173a33dc7d876129b199ddbd"},"podLabels":{},"tolerations":[],"ttlSecondsAfterFinished":1800}` | Configure certificate generation for Hubble integration. If hubble.tls.auto.method=cronJob, these values are used for the Kubernetes CronJob which will be scheduled regularly to (re)generate any certificates not provided manually. | +| bpfClockProbe | bool | `false` | Enable BPF clock source probing for more efficient tick retrieval. | +| certgen | object | `{"extraVolumeMounts":[],"extraVolumes":[],"image":{"override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/certgen","tag":"v0.1.8@sha256:4a456552a5f192992a6edcec2febb1c54870d665173a33dc7d876129b199ddbd"},"podLabels":{},"tolerations":[],"ttlSecondsAfterFinished":1800}` | Configure certificate generation for Hubble integration. If hubble.tls.auto.method=cronJob, these values are used for the Kubernetes CronJob which will be scheduled regularly to (re)generate any certificates not provided manually. | +| certgen.extraVolumeMounts | list | `[]` | Additional certgen volumeMounts. | +| certgen.extraVolumes | list | `[]` | Additional certgen volumes. | | certgen.podLabels | object | `{}` | Labels to be added to hubble-certgen pods | -| certgen.tolerations | list | `[]` | Node tolerations for pod assignment on nodes with taints ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | +| certgen.tolerations | list | `[]` | Node tolerations for pod assignment on nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | certgen.ttlSecondsAfterFinished | int | `1800` | Seconds after which the completed job pod will be deleted | | cgroup | object | `{"autoMount":{"enabled":true,"resources":{}},"hostRoot":"/run/cilium/cgroupv2"}` | Configure cgroup related configuration | | cgroup.autoMount.enabled | bool | `true` | Enable auto mount of cgroup2 filesystem. When `autoMount` is enabled, cgroup2 filesystem is mounted at `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod. If users disable `autoMount`, it's expected that users have mounted cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the volume will be mounted inside the cilium agent pod at the same path. | @@ -105,23 +107,28 @@ contributors across the globe, there is almost always someone available to help. | clustermesh.apiserver.etcd.image | object | `{"override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/coreos/etcd","tag":"v3.5.4@sha256:795d8660c48c439a7c3764c2330ed9222ab5db5bb524d8d0607cac76f7ba82a3"}` | Clustermesh API server etcd image. | | clustermesh.apiserver.etcd.init.resources | object | `{}` | Specifies the resources for etcd init container in the apiserver | | clustermesh.apiserver.etcd.resources | object | `{}` | Specifies the resources for etcd container in the apiserver | +| clustermesh.apiserver.etcd.securityContext | object | `{}` | Security context to be added to clustermesh-apiserver etcd containers | | clustermesh.apiserver.extraEnv | list | `[]` | Additional clustermesh-apiserver environment variables. | -| clustermesh.apiserver.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.13.0","useDigest":false}` | Clustermesh API server image. | -| clustermesh.apiserver.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| clustermesh.apiserver.extraVolumeMounts | list | `[]` | Additional clustermesh-apiserver volumeMounts. | +| clustermesh.apiserver.extraVolumes | list | `[]` | Additional clustermesh-apiserver volumes. | +| clustermesh.apiserver.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.13.6","useDigest":false}` | Clustermesh API server image. | +| clustermesh.apiserver.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | clustermesh.apiserver.podAnnotations | object | `{}` | Annotations to be added to clustermesh-apiserver pods | | clustermesh.apiserver.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | | clustermesh.apiserver.podDisruptionBudget.maxUnavailable | int | `1` | Maximum number/percentage of pods that may be made unavailable | | clustermesh.apiserver.podDisruptionBudget.minAvailable | string | `nil` | Minimum number/percentage of pods that should remain scheduled. When it's set, maxUnavailable must be disabled by `maxUnavailable: null` | | clustermesh.apiserver.podLabels | object | `{}` | Labels to be added to clustermesh-apiserver pods | +| clustermesh.apiserver.podSecurityContext | object | `{}` | Security context to be added to clustermesh-apiserver pods | | clustermesh.apiserver.priorityClassName | string | `""` | The priority class to use for clustermesh-apiserver | | clustermesh.apiserver.replicas | int | `1` | Number of replicas run for the clustermesh-apiserver deployment. | | clustermesh.apiserver.resources | object | `{}` | Resource requests and limits for the clustermesh-apiserver | +| clustermesh.apiserver.securityContext | object | `{}` | Security context to be added to clustermesh-apiserver containers | | clustermesh.apiserver.service.annotations | object | `{}` | Annotations for the clustermesh-apiserver For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal" For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 | -| clustermesh.apiserver.service.nodePort | int | `32379` | Optional port to use as the node port for apiserver access. | +| clustermesh.apiserver.service.nodePort | int | `32379` | Optional port to use as the node port for apiserver access. WARNING: make sure to configure a different NodePort in each cluster if kube-proxy replacement is enabled, as Cilium is currently affected by a known bug (#24692) when NodePorts are handled by the KPR implementation. If a service with the same NodePort exists both in the local and the remote cluster, all traffic originating from inside the cluster and targeting the corresponding NodePort will be redirected to a local backend, regardless of whether the destination node belongs to the local or the remote cluster. | | clustermesh.apiserver.service.type | string | `"NodePort"` | The type of service used for apiserver access. | | clustermesh.apiserver.tls.admin | object | `{"cert":"","key":""}` | base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key. Used if 'auto' is not enabled. | | clustermesh.apiserver.tls.auto | object | `{"certManagerIssuerRef":{},"certValidityDuration":1095,"enabled":true,"method":"helm"}` | Configure automatic TLS certificates generation. A Kubernetes CronJob is used the generate any certificates not provided by the user at installation time. | -| clustermesh.apiserver.tls.auto.certManagerIssuerRef | object | `{}` | certmanager issuer used when clustermesh.apiserver.tls.auto.method=certmanager. If not specified, a CA issuer will be created. | +| clustermesh.apiserver.tls.auto.certManagerIssuerRef | object | `{}` | certmanager issuer used when clustermesh.apiserver.tls.auto.method=certmanager. | | clustermesh.apiserver.tls.auto.certValidityDuration | int | `1095` | Generated certificates validity duration in days. | | clustermesh.apiserver.tls.auto.enabled | bool | `true` | When set to true, automatically generate a CA and certificates to enable mTLS between clustermesh-apiserver and external workload instances. If set to false, the certs to be provided by setting appropriate values below. | | clustermesh.apiserver.tls.ca | object | `{"cert":"","key":""}` | base64 encoded PEM values for the ExternalWorkload CA certificate and private key. | @@ -132,7 +139,7 @@ contributors across the globe, there is almost always someone available to help. | clustermesh.apiserver.tls.server | object | `{"cert":"","extraDnsNames":[],"extraIpAddresses":[],"key":""}` | base64 encoded PEM values for the clustermesh-apiserver server certificate and private key. Used if 'auto' is not enabled. | | clustermesh.apiserver.tls.server.extraDnsNames | list | `[]` | Extra DNS names added to certificate when it's auto generated | | clustermesh.apiserver.tls.server.extraIpAddresses | list | `[]` | Extra IP addresses added to certificate when it's auto generated | -| clustermesh.apiserver.tolerations | list | `[]` | Node tolerations for pod assignment on nodes with taints ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | +| clustermesh.apiserver.tolerations | list | `[]` | Node tolerations for pod assignment on nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | clustermesh.apiserver.topologySpreadConstraints | list | `[]` | Pod topology spread constraints for clustermesh-apiserver | | clustermesh.apiserver.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | clustermesh-apiserver update strategy | | clustermesh.config | object | `{"clusters":[],"domain":"mesh.cilium.io","enabled":false}` | Clustermesh explicit configuration. | @@ -150,6 +157,7 @@ contributors across the globe, there is almost always someone available to help. | cni.hostConfDirMountPath | string | `"/host/etc/cni/net.d"` | Configure the path to where the CNI configuration directory is mounted inside the agent pod. | | cni.install | bool | `true` | Install the CNI configuration and binary files into the filesystem. | | cni.logFile | string | `"/var/run/cilium/cilium-cni.log"` | Configure the log file for CNI logging with retention policy of 7 days. Disable CNI file logging by setting this field to empty explicitly. | +| cni.uninstall | bool | `true` | Remove the CNI configuration and binary files on agent shutdown. Enable this if you're removing Cilium from the cluster. Disable this to prevent the CNI configuration file from being removed during agent upgrade, which can cause nodes to go unmanageable. | | conntrackGCInterval | string | `"0s"` | Configure how frequently garbage collection should occur for the datapath connection tracking table. | | containerRuntime | object | `{"integration":"none"}` | Configure container runtime specific integration. | | containerRuntime.integration | string | `"none"` | Enables specific integrations for container runtimes. Supported values: - containerd - crio - docker - none - auto (automatically detect the container runtime) | @@ -189,6 +197,7 @@ contributors across the globe, there is almost always someone available to help. | encryption.interface | string | `""` | Deprecated in favor of encryption.ipsec.interface. The interface to use for encrypted traffic. This option is only effective when encryption.type is set to ipsec. | | encryption.ipsec.interface | string | `""` | The interface to use for encrypted traffic. | | encryption.ipsec.keyFile | string | `""` | Name of the key file inside the Kubernetes secret configured via secretName. | +| encryption.ipsec.keyWatcher | bool | `true` | Enable the key watcher. If disabled, a restart of the agent will be necessary on key rotations. | | encryption.ipsec.mountPath | string | `""` | Path to mount the secret inside the Cilium pod. | | encryption.ipsec.secretName | string | `""` | Name of the Kubernetes secret containing the encryption keys. | | encryption.keyFile | string | `"keys"` | Deprecated in favor of encryption.ipsec.keyFile. Name of the key file inside the Kubernetes secret configured via secretName. This option is only effective when encryption.type is set to ipsec. | @@ -212,23 +221,30 @@ contributors across the globe, there is almost always someone available to help. | eni.subnetIDsFilter | list | `[]` | Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs Important note: This requires that each instance has an ENI with a matching subnet attached when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, use the CNI configuration file settings (cni.customConf) instead. | | eni.subnetTagsFilter | list | `[]` | Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs Important note: This requires that each instance has an ENI with a matching subnet attached when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, use the CNI configuration file settings (cni.customConf) instead. | | eni.updateEC2AdapterLimitViaAPI | bool | `false` | Update ENI Adapter limits from the EC2 API | +| envoyConfig.enabled | bool | `false` | Enable CiliumEnvoyConfig CRD CiliumEnvoyConfig CRD can also be implicitly enabled by other options. | +| envoyConfig.secretsNamespace | object | `{"create":true,"name":"cilium-secrets"}` | SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from. | +| envoyConfig.secretsNamespace.create | bool | `true` | Create secrets namespace for CiliumEnvoyConfig CRDs. | +| envoyConfig.secretsNamespace.name | string | `"cilium-secrets"` | Name of secret namespace which Cilium agents are given read access to. | | etcd.clusterDomain | string | `"cluster.local"` | Cluster domain for cilium-etcd-operator. | | etcd.enabled | bool | `false` | Enable etcd mode for the agent. | | etcd.endpoints | list | `["https://CHANGE-ME:2379"]` | List of etcd endpoints (not needed when using managed=true). | | etcd.extraArgs | list | `[]` | Additional cilium-etcd-operator container arguments. | +| etcd.extraVolumeMounts | list | `[]` | Additional cilium-etcd-operator volumeMounts. | +| etcd.extraVolumes | list | `[]` | Additional cilium-etcd-operator volumes. | | etcd.image | object | `{"override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-etcd-operator","tag":"v2.0.7@sha256:04b8327f7f992693c2cb483b999041ed8f92efc8e14f2a5f3ab95574a65ea2dc"}` | cilium-etcd-operator image. | | etcd.k8sService | bool | `false` | If etcd is behind a k8s service set this option to true so that Cilium does the service translation automatically without requiring a DNS to be running. | -| etcd.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-etcd-operator pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| etcd.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-etcd-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | etcd.podAnnotations | object | `{}` | Annotations to be added to cilium-etcd-operator pods | | etcd.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | | etcd.podDisruptionBudget.maxUnavailable | int | `1` | Maximum number/percentage of pods that may be made unavailable | | etcd.podDisruptionBudget.minAvailable | string | `nil` | Minimum number/percentage of pods that should remain scheduled. When it's set, maxUnavailable must be disabled by `maxUnavailable: null` | | etcd.podLabels | object | `{}` | Labels to be added to cilium-etcd-operator pods | +| etcd.podSecurityContext | object | `{}` | Security context to be added to cilium-etcd-operator pods | | etcd.priorityClassName | string | `""` | The priority class to use for cilium-etcd-operator | -| etcd.resources | object | `{}` | cilium-etcd-operator resource limits & requests ref: https://kubernetes.io/docs/user-guide/compute-resources/ | +| etcd.resources | object | `{}` | cilium-etcd-operator resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | etcd.securityContext | object | `{}` | Security context to be added to cilium-etcd-operator pods | | etcd.ssl | bool | `false` | Enable use of TLS/SSL for connectivity to etcd. (auto-enabled if managed=true) | -| etcd.tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for cilium-etcd-operator scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | +| etcd.tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for cilium-etcd-operator scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | etcd.topologySpreadConstraints | list | `[]` | Pod topology spread constraints for cilium-etcd-operator | | etcd.updateStrategy | object | `{"rollingUpdate":{"maxSurge":1,"maxUnavailable":1},"type":"RollingUpdate"}` | cilium-etcd-operator update strategy | | externalIPs.enabled | bool | `false` | Enable ExternalIPs service support. | @@ -254,13 +270,13 @@ contributors across the globe, there is almost always someone available to help. | hostPort.enabled | bool | `false` | Enable hostPort service support. | | hubble.enabled | bool | `true` | Enable Hubble (true by default). | | hubble.listenAddress | string | `":4244"` | An additional address for Hubble to listen to. Set this field ":4244" if you are enabling Hubble Relay, as it assumes that Hubble is listening on port 4244. | -| hubble.metrics | object | `{"dashboards":{"annotations":{},"enabled":false,"label":"grafana_dashboard","labelValue":"1","namespace":null},"enableOpenMetrics":false,"enabled":null,"port":9965,"serviceAnnotations":{},"serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","labels":{},"metricRelabelings":null,"relabelings":[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]}}` | Hubble metrics configuration. See https://docs.cilium.io/en/stable/operations/metrics/#hubble-metrics for more comprehensive documentation about Hubble metrics. | +| hubble.metrics | object | `{"dashboards":{"annotations":{},"enabled":false,"label":"grafana_dashboard","labelValue":"1","namespace":null},"enableOpenMetrics":false,"enabled":null,"port":9965,"serviceAnnotations":{},"serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","labels":{},"metricRelabelings":null,"relabelings":[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]}}` | Hubble metrics configuration. See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics for more comprehensive documentation about Hubble metrics. | | hubble.metrics.enableOpenMetrics | bool | `false` | Enables exporting hubble metrics in OpenMetrics format. | | hubble.metrics.enabled | string | `nil` | Configures the list of metrics to collect. If empty or null, metrics are disabled. Example: enabled: - dns:query;ignoreAAAA - drop - tcp - flow - icmp - http You can specify the list of metrics from the helm CLI: --set metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}" | | hubble.metrics.port | int | `9965` | Configure the port the hubble metric server listens on. | | hubble.metrics.serviceAnnotations | object | `{}` | Annotations to be added to hubble-metrics service. | | hubble.metrics.serviceMonitor.annotations | object | `{}` | Annotations to add to ServiceMonitor hubble | -| hubble.metrics.serviceMonitor.enabled | bool | `false` | Create ServiceMonitor resources for Prometheus Operator. This requires the prometheus CRDs to be available. ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) | +| hubble.metrics.serviceMonitor.enabled | bool | `false` | Create ServiceMonitor resources for Prometheus Operator. This requires the prometheus CRDs to be available. ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) | | hubble.metrics.serviceMonitor.interval | string | `"10s"` | Interval for scrape metrics. | | hubble.metrics.serviceMonitor.labels | object | `{}` | Labels to add to ServiceMonitor hubble | | hubble.metrics.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor hubble | @@ -273,10 +289,10 @@ contributors across the globe, there is almost always someone available to help. | hubble.relay.dialTimeout | string | `nil` | Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s"). | | hubble.relay.enabled | bool | `false` | Enable Hubble Relay (requires hubble.enabled=true) | | hubble.relay.extraEnv | list | `[]` | Additional hubble-relay environment variables. | -| hubble.relay.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.13.0","useDigest":false}` | Hubble-relay container image. | +| hubble.relay.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.13.6","useDigest":false}` | Hubble-relay container image. | | hubble.relay.listenHost | string | `""` | Host to listen to. Specify an empty string to bind to all the interfaces. | | hubble.relay.listenPort | string | `"4245"` | Port to listen to. | -| hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | hubble.relay.podAnnotations | object | `{}` | Annotations to be added to hubble-relay pods | | hubble.relay.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | | hubble.relay.podDisruptionBudget.maxUnavailable | int | `1` | Maximum number/percentage of pods that may be made unavailable | @@ -288,7 +304,7 @@ contributors across the globe, there is almost always someone available to help. | hubble.relay.priorityClassName | string | `""` | The priority class to use for hubble-relay | | hubble.relay.prometheus | object | `{"enabled":false,"port":9966,"serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","labels":{},"metricRelabelings":null,"relabelings":null}}` | Enable prometheus metrics for hubble-relay on the configured port at /metrics | | hubble.relay.prometheus.serviceMonitor.annotations | object | `{}` | Annotations to add to ServiceMonitor hubble-relay | -| hubble.relay.prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) | +| hubble.relay.prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) | | hubble.relay.prometheus.serviceMonitor.interval | string | `"10s"` | Interval for scrape metrics. | | hubble.relay.prometheus.serviceMonitor.labels | object | `{}` | Labels to add to ServiceMonitor hubble-relay | | hubble.relay.prometheus.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor hubble-relay | @@ -309,18 +325,18 @@ contributors across the globe, there is almost always someone available to help. | hubble.relay.tls.server | object | `{"cert":"","enabled":false,"extraDnsNames":[],"extraIpAddresses":[],"key":""}` | base64 encoded PEM values for the hubble-relay server certificate and private key | | hubble.relay.tls.server.extraDnsNames | list | `[]` | extra DNS names added to certificate when its auto gen | | hubble.relay.tls.server.extraIpAddresses | list | `[]` | extra IP addresses added to certificate when its auto gen | -| hubble.relay.tolerations | list | `[]` | Node tolerations for pod assignment on nodes with taints ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | +| hubble.relay.tolerations | list | `[]` | Node tolerations for pod assignment on nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | hubble.relay.topologySpreadConstraints | list | `[]` | Pod topology spread constraints for hubble-relay | | hubble.relay.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-relay update strategy | | hubble.skipUnknownCGroupIDs | bool | `true` | Skip Hubble events with unknown cgroup ids | | hubble.socketPath | string | `"/var/run/cilium/hubble.sock"` | Unix domain socket path to listen to when Hubble is enabled. | | hubble.tls | object | `{"auto":{"certManagerIssuerRef":{},"certValidityDuration":1095,"enabled":true,"method":"helm","schedule":"0 0 1 */4 *"},"ca":{"cert":"","key":""},"enabled":true,"server":{"cert":"","extraDnsNames":[],"extraIpAddresses":[],"key":""}}` | TLS configuration for Hubble | | hubble.tls.auto | object | `{"certManagerIssuerRef":{},"certValidityDuration":1095,"enabled":true,"method":"helm","schedule":"0 0 1 */4 *"}` | Configure automatic TLS certificates generation. | -| hubble.tls.auto.certManagerIssuerRef | object | `{}` | certmanager issuer used when hubble.tls.auto.method=certmanager. If not specified, a CA issuer will be created. | +| hubble.tls.auto.certManagerIssuerRef | object | `{}` | certmanager issuer used when hubble.tls.auto.method=certmanager. | | hubble.tls.auto.certValidityDuration | int | `1095` | Generated certificates validity duration in days. | | hubble.tls.auto.enabled | bool | `true` | Auto-generate certificates. When set to true, automatically generate a CA and certificates to enable mTLS between Hubble server and Hubble Relay instances. If set to false, the certs for Hubble server need to be provided by setting appropriate values below. | | hubble.tls.auto.method | string | `"helm"` | Set the method to auto-generate certificates. Supported values: - helm: This method uses Helm to generate all certificates. - cronJob: This method uses a Kubernetes CronJob the generate any certificates not provided by the user at installation time. - certmanager: This method use cert-manager to generate & rotate certificates. | -| hubble.tls.auto.schedule | string | `"0 0 1 */4 *"` | Schedule for certificates regeneration (regardless of their expiration date). Only used if method is "cronJob". If nil, then no recurring job will be created. Instead, only the one-shot job is deployed to generate the certificates at installation time. Defaults to midnight of the first day of every fourth month. For syntax, see https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule | +| hubble.tls.auto.schedule | string | `"0 0 1 */4 *"` | Schedule for certificates regeneration (regardless of their expiration date). Only used if method is "cronJob". If nil, then no recurring job will be created. Instead, only the one-shot job is deployed to generate the certificates at installation time. Defaults to midnight of the first day of every fourth month. For syntax, see https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax | | hubble.tls.ca | object | `{"cert":"","key":""}` | Deprecated in favor of tls.ca. To be removed in 1.13. base64 encoded PEM values for the Hubble CA certificate and private key. | | hubble.tls.ca.cert | string | `""` | Deprecated in favor of tls.ca.cert. To be removed in 1.13. | | hubble.tls.ca.key | string | `""` | Deprecated in favor of tls.ca.key. To be removed in 1.13. The CA private key (optional). If it is provided, then it will be used by hubble.tls.auto.method=cronJob to generate all other certificates. Otherwise, a ephemeral CA is generated if hubble.tls.auto.enabled=true. | @@ -330,15 +346,22 @@ contributors across the globe, there is almost always someone available to help. | hubble.tls.server.extraIpAddresses | list | `[]` | Extra IP addresses added to certificate when it's auto generated | | hubble.ui.affinity | object | `{}` | Affinity for hubble-ui | | hubble.ui.backend.extraEnv | list | `[]` | Additional hubble-ui backend environment variables. | -| hubble.ui.backend.image | object | `{"override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.10.0@sha256:cc5e2730b3be6f117b22176e25875f2308834ced7c3aa34fb598aa87a2c0a6a4"}` | Hubble-ui backend image. | +| hubble.ui.backend.extraVolumeMounts | list | `[]` | Additional hubble-ui backend volumeMounts. | +| hubble.ui.backend.extraVolumes | list | `[]` | Additional hubble-ui backend volumes. | +| hubble.ui.backend.image | object | `{"override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.11.0@sha256:14c04d11f78da5c363f88592abae8d2ecee3cbe009f443ef11df6ac5f692d839"}` | Hubble-ui backend image. | | hubble.ui.backend.resources | object | `{}` | Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. | +| hubble.ui.backend.securityContext | object | `{}` | Hubble-ui backend security context. | +| hubble.ui.baseUrl | string | `"/"` | Defines base url prefix for all hubble-ui http requests. It needs to be changed in case if ingress for hubble-ui is configured under some sub-path. Trailing `/` is required for custom path, ex. `/service-map/` | | hubble.ui.enabled | bool | `false` | Whether to enable the Hubble UI. | | hubble.ui.frontend.extraEnv | list | `[]` | Additional hubble-ui frontend environment variables. | -| hubble.ui.frontend.image | object | `{"override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.10.0@sha256:118ad2fcfd07fabcae4dde35ec88d33564c9ca7abe520aa45b1eb13ba36c6e0a"}` | Hubble-ui frontend image. | +| hubble.ui.frontend.extraVolumeMounts | list | `[]` | Additional hubble-ui frontend volumeMounts. | +| hubble.ui.frontend.extraVolumes | list | `[]` | Additional hubble-ui frontend volumes. | +| hubble.ui.frontend.image | object | `{"override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.11.0@sha256:bcb369c47cada2d4257d63d3749f7f87c91dde32e010b223597306de95d1ecc8"}` | Hubble-ui frontend image. | | hubble.ui.frontend.resources | object | `{}` | Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. | +| hubble.ui.frontend.securityContext | object | `{}` | Hubble-ui frontend security context. | | hubble.ui.frontend.server.ipv6 | object | `{"enabled":true}` | Controls server listener for ipv6 | | hubble.ui.ingress | object | `{"annotations":{},"className":"","enabled":false,"hosts":["chart-example.local"],"tls":[]}` | hubble-ui ingress configuration. | -| hubble.ui.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| hubble.ui.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | hubble.ui.podAnnotations | object | `{}` | Annotations to be added to hubble-ui pods | | hubble.ui.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | | hubble.ui.podDisruptionBudget.maxUnavailable | int | `1` | Maximum number/percentage of pods that may be made unavailable | @@ -349,18 +372,19 @@ contributors across the globe, there is almost always someone available to help. | hubble.ui.rollOutPods | bool | `false` | Roll out Hubble-ui pods automatically when configmap is updated. | | hubble.ui.securityContext | object | `{"enabled":true,"fsGroup":1001,"runAsGroup":1001,"runAsUser":1001}` | Security context to be added to Hubble UI pods | | hubble.ui.securityContext.enabled | bool | `true` | Deprecated in favor of hubble.ui.securityContext. Whether to set the security context on the Hubble UI pods. | -| hubble.ui.service | object | `{"nodePort":31235,"type":"ClusterIP"}` | hubble-ui service configuration. | +| hubble.ui.service | object | `{"annotations":{},"nodePort":31235,"type":"ClusterIP"}` | hubble-ui service configuration. | +| hubble.ui.service.annotations | object | `{}` | Annotations to be added for the Hubble UI service | | hubble.ui.service.nodePort | int | `31235` | - The port to use when the service type is set to NodePort. | | hubble.ui.service.type | string | `"ClusterIP"` | - The type of service used for Hubble UI access, either ClusterIP or NodePort. | | hubble.ui.standalone.enabled | bool | `false` | When true, it will allow installing the Hubble UI only, without checking dependencies. It is useful if a cluster already has cilium and Hubble relay installed and you just want Hubble UI to be deployed. When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui` | | hubble.ui.standalone.tls.certsVolume | object | `{}` | When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required to provide a volume for mounting the client certificates. | | hubble.ui.tls.client | object | `{"cert":"","key":""}` | base64 encoded PEM values used to connect to hubble-relay This keypair is presented to Hubble Relay instances for mTLS authentication and is required when hubble.relay.tls.server.enabled is true. These values need to be set manually if hubble.tls.auto.enabled is false. | -| hubble.ui.tolerations | list | `[]` | Node tolerations for pod assignment on nodes with taints ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | +| hubble.ui.tolerations | list | `[]` | Node tolerations for pod assignment on nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | hubble.ui.topologySpreadConstraints | list | `[]` | Pod topology spread constraints for hubble-ui | | hubble.ui.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-ui update strategy. | | identityAllocationMode | string | `"crd"` | Method to use for identity allocation (`crd` or `kvstore`). | | identityChangeGracePeriod | string | `"5s"` | Time to wait before using new identity on endpoint identity change. | -| image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.13.0","useDigest":false}` | Agent container image. | +| image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.13.6","useDigest":false}` | Agent container image. | | imagePullSecrets | string | `nil` | Configure image pull secrets for pulling container images | | ingressController.enabled | bool | `false` | Enable cilium ingress controller This will automatically set enable-envoy-config as well. | | ingressController.enforceHttps | bool | `true` | Enforce https for host having matching TLS host in Ingress. Incoming traffic to http listener will return 308 http error code with respective location in header. | @@ -370,17 +394,19 @@ contributors across the globe, there is almost always someone available to help. | ingressController.secretsNamespace.create | bool | `true` | Create secrets namespace for Ingress. | | ingressController.secretsNamespace.name | string | `"cilium-secrets"` | Name of Ingress secret namespace. | | ingressController.secretsNamespace.sync | bool | `true` | Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. If disabled, TLS secrets must be maintained externally. | -| ingressController.service | object | `{"annotations":{},"insecureNodePort":null,"labels":{},"name":"cilium-ingress","secureNodePort":null,"type":"LoadBalancer"}` | Load-balancer service in shared mode. This is a single load-balancer service for all Ingress resources. | +| ingressController.service | object | `{"allocateLoadBalancerNodePorts":null,"annotations":{},"insecureNodePort":null,"labels":{},"loadBalancerClass":null,"loadBalancerIP":null,"name":"cilium-ingress","secureNodePort":null,"type":"LoadBalancer"}` | Load-balancer service in shared mode. This is a single load-balancer service for all Ingress resources. | +| ingressController.service.allocateLoadBalancerNodePorts | string | `nil` | Configure if node port allocation is required for LB service ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation | | ingressController.service.annotations | object | `{}` | Annotations to be added for the shared LB service | | ingressController.service.insecureNodePort | string | `nil` | Configure a specific nodePort for insecure HTTP traffic on the shared LB service | | ingressController.service.labels | object | `{}` | Labels to be added for the shared LB service | +| ingressController.service.loadBalancerClass | string | `nil` | Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+) | +| ingressController.service.loadBalancerIP | string | `nil` | Configure a specific loadBalancerIP on the shared LB service | | ingressController.service.name | string | `"cilium-ingress"` | Service name | | ingressController.service.secureNodePort | string | `nil` | Configure a specific nodePort for secure HTTPS traffic on the shared LB service | | ingressController.service.type | string | `"LoadBalancer"` | Service type for the shared LB service | -| installIptablesRules | bool | `true` | Configure whether to install iptables rules to allow for TPROXY (L7 proxy injection), iptables-based masquerading and compatibility with kube-proxy. | | installNoConntrackIptablesRules | bool | `false` | Install Iptables rules to skip netfilter connection tracking on all pod traffic. This option is only effective when Cilium is running in direct routing and full KPR mode. Moreover, this option cannot be enabled when Cilium is running in a managed Kubernetes environment or in a chained CNI setup. | | ipMasqAgent | object | `{"enabled":false}` | Configure the eBPF-based ip-masq-agent | -| ipam.mode | string | `"cluster-pool"` | Configure IP Address Management mode. ref: https://docs.cilium.io/en/stable/concepts/networking/ipam/ | +| ipam.mode | string | `"cluster-pool"` | Configure IP Address Management mode. ref: https://docs.cilium.io/en/stable/network/concepts/ipam/ | | ipam.operator.clusterPoolIPv4MaskSize | int | `24` | IPv4 CIDR mask size to delegate to individual nodes for IPAM. | | ipam.operator.clusterPoolIPv4PodCIDR | string | `"10.0.0.0/8"` | Deprecated in favor of ipam.operator.clusterPoolIPv4PodCIDRList. IPv4 CIDR range to delegate to individual nodes for IPAM. | | ipam.operator.clusterPoolIPv4PodCIDRList | list | `[]` | IPv4 CIDR list range to delegate to individual nodes for IPAM. | @@ -408,7 +434,7 @@ contributors across the globe, there is almost always someone available to help. | loadBalancer | object | `{"l7":{"algorithm":"round_robin","backend":"disabled","ports":[]}}` | Configure service load balancing | | loadBalancer.l7 | object | `{"algorithm":"round_robin","backend":"disabled","ports":[]}` | L7 LoadBalancer | | loadBalancer.l7.algorithm | string | `"round_robin"` | Default LB algorithm The default LB algorithm to be used for services, which can be overridden by the service annotation (e.g. service.cilium.io/lb-l7-algorithm) Applicable values: round_robin, least_request, random | -| loadBalancer.l7.backend | string | `"disabled"` | Enable L7 service load balancing via envoy proxy. The request to a k8s service, which has specific annotation e.g. service.cilium.io/lb-l7, will be forwarded to the local backend proxy to be load balanced to the service endpoints. Please refer to docs for supported annotations for more configuration. Applicable values: - envoy: Enable L7 load balancing via envoy proxy. This will automatically set enable-envoy-config as well. - disabled: Disable L7 load balancing. | +| loadBalancer.l7.backend | string | `"disabled"` | Enable L7 service load balancing via envoy proxy. The request to a k8s service, which has specific annotation e.g. service.cilium.io/lb-l7, will be forwarded to the local backend proxy to be load balanced to the service endpoints. Please refer to docs for supported annotations for more configuration. Applicable values: - envoy: Enable L7 load balancing via envoy proxy. This will automatically set enable-envoy-config as well. - disabled: Disable L7 load balancing via service annotation. | | loadBalancer.l7.ports | list | `[]` | List of ports from service to be automatically redirected to above backend. Any service exposing one of these ports will be automatically redirected. Fine-grained control can be achieved by using the service annotation. | | localRedirectPolicy | bool | `false` | Enable Local Redirect Policy. | | logSystemLoad | bool | `false` | Enables periodic logging of system load | @@ -428,14 +454,14 @@ contributors across the globe, there is almost always someone available to help. | nodeinit.bootstrapFile | string | `"/tmp/cilium-bootstrap.d/cilium-bootstrap-time"` | bootstrapFile is the location of the file where the bootstrap timestamp is written by the node-init DaemonSet | | nodeinit.enabled | bool | `false` | Enable the node initialization DaemonSet | | nodeinit.extraEnv | list | `[]` | Additional nodeinit environment variables. | -| nodeinit.image | object | `{"override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/startup-script","tag":"d69851597ea019af980891a4628fb36b7880ec26"}` | node-init image. | -| nodeinit.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for nodeinit pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| nodeinit.image | object | `{"override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/startup-script","tag":"62093c5c233ea914bfa26a10ba41f8780d9b737f"}` | node-init image. | +| nodeinit.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for nodeinit pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | nodeinit.podAnnotations | object | `{}` | Annotations to be added to node-init pods. | | nodeinit.podLabels | object | `{}` | Labels to be added to node-init pods. | | nodeinit.priorityClassName | string | `""` | The priority class to use for the nodeinit pod. | -| nodeinit.resources | object | `{"requests":{"cpu":"100m","memory":"100Mi"}}` | nodeinit resource limits & requests ref: https://kubernetes.io/docs/user-guide/compute-resources/ | +| nodeinit.resources | object | `{"requests":{"cpu":"100m","memory":"100Mi"}}` | nodeinit resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | nodeinit.securityContext | object | `{"capabilities":{"add":["SYS_MODULE","NET_ADMIN","SYS_ADMIN","SYS_CHROOT","SYS_PTRACE"]},"privileged":false,"seLinuxOptions":{"level":"s0","type":"spc_t"}}` | Security context to be added to nodeinit pods. | -| nodeinit.tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for nodeinit scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | +| nodeinit.tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for nodeinit scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | nodeinit.updateStrategy | object | `{"type":"RollingUpdate"}` | node-init update strategy | | operator.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"io.cilium/app":"operator"}},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for cilium-operator | | operator.dnsPolicy | string | `""` | DNS policy for Cilium operator pods. Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy | @@ -448,34 +474,35 @@ contributors across the globe, there is almost always someone available to help. | operator.extraVolumes | list | `[]` | Additional cilium-operator volumes. | | operator.identityGCInterval | string | `"15m0s"` | Interval for identity garbage collection. | | operator.identityHeartbeatTimeout | string | `"30m0s"` | Timeout for identity heartbeats. | -| operator.image | object | `{"alibabacloudDigest":"","awsDigest":"","azureDigest":"","genericDigest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.13.0","useDigest":false}` | cilium-operator image. | +| operator.image | object | `{"alibabacloudDigest":"","awsDigest":"","azureDigest":"","genericDigest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.13.6","useDigest":false}` | cilium-operator image. | | operator.nodeGCInterval | string | `"5m0s"` | Interval for cilium node garbage collection. | -| operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | operator.podAnnotations | object | `{}` | Annotations to be added to cilium-operator pods | | operator.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | | operator.podDisruptionBudget.maxUnavailable | int | `1` | Maximum number/percentage of pods that may be made unavailable | | operator.podDisruptionBudget.minAvailable | string | `nil` | Minimum number/percentage of pods that should remain scheduled. When it's set, maxUnavailable must be disabled by `maxUnavailable: null` | | operator.podLabels | object | `{}` | Labels to be added to cilium-operator pods | +| operator.podSecurityContext | object | `{}` | Security context to be added to cilium-operator pods | | operator.pprof.address | string | `"localhost"` | Configure pprof listen address for cilium-operator | | operator.pprof.enabled | bool | `false` | Enable pprof for cilium-operator | | operator.pprof.port | int | `6061` | Configure pprof listen port for cilium-operator | | operator.priorityClassName | string | `""` | The priority class to use for cilium-operator | | operator.prometheus | object | `{"enabled":false,"port":9963,"serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","labels":{},"metricRelabelings":null,"relabelings":null}}` | Enable prometheus metrics for cilium-operator on the configured port at /metrics | | operator.prometheus.serviceMonitor.annotations | object | `{}` | Annotations to add to ServiceMonitor cilium-operator | -| operator.prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) | +| operator.prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) | | operator.prometheus.serviceMonitor.interval | string | `"10s"` | Interval for scrape metrics. | | operator.prometheus.serviceMonitor.labels | object | `{}` | Labels to add to ServiceMonitor cilium-operator | | operator.prometheus.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor cilium-operator | | operator.prometheus.serviceMonitor.relabelings | string | `nil` | Relabeling configs for the ServiceMonitor cilium-operator | | operator.removeNodeTaints | bool | `true` | Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium pod running. | | operator.replicas | int | `2` | Number of replicas to run for the cilium-operator deployment | -| operator.resources | object | `{}` | cilium-operator resource limits & requests ref: https://kubernetes.io/docs/user-guide/compute-resources/ | +| operator.resources | object | `{}` | cilium-operator resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | operator.rollOutPods | bool | `false` | Roll out cilium-operator pods automatically when configmap is updated. | | operator.securityContext | object | `{}` | Security context to be added to cilium-operator pods | | operator.setNodeNetworkStatus | bool | `true` | Set Node condition NetworkUnavailable to 'false' with the reason 'CiliumIsUp' for nodes that have a healthy Cilium pod. | | operator.skipCNPStatusStartupClean | bool | `false` | Skip CNP node status clean up at operator startup. | | operator.skipCRDCreation | bool | `false` | Skip CRDs creation for cilium-operator | -| operator.tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for cilium-operator scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | +| operator.tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for cilium-operator scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | operator.topologySpreadConstraints | list | `[]` | Pod topology spread constraints for cilium-operator | | operator.unmanagedPodWatcher.intervalSeconds | int | `15` | Interval, in seconds, to check if there are any pods that are not managed by Cilium. | | operator.unmanagedPodWatcher.restart | bool | `true` | Restart any pod that are not managed by Cilium. | @@ -483,37 +510,42 @@ contributors across the globe, there is almost always someone available to help. | pmtuDiscovery.enabled | bool | `false` | Enable path MTU discovery to send ICMP fragmentation-needed replies to the client. | | podAnnotations | object | `{}` | Annotations to be added to agent pods | | podLabels | object | `{}` | Labels to be added to agent pods | -| policyEnforcementMode | string | `"default"` | The agent can be put into one of the three policy enforcement modes: default, always and never. ref: https://docs.cilium.io/en/stable/policy/intro/#policy-enforcement-modes | +| podSecurityContext | object | `{}` | Security Context for cilium-agent pods. | +| policyEnforcementMode | string | `"default"` | The agent can be put into one of the three policy enforcement modes: default, always and never. ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes | | pprof.address | string | `"localhost"` | Configure pprof listen address for cilium-agent | | pprof.enabled | bool | `false` | Enable pprof for cilium-agent | | pprof.port | int | `6060` | Configure pprof listen port for cilium-agent | | preflight.affinity | object | `{"podAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium"}},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for cilium-preflight | | preflight.enabled | bool | `false` | Enable Cilium pre-flight resources (required for upgrade) | | preflight.extraEnv | list | `[]` | Additional preflight environment variables. | -| preflight.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.13.0","useDigest":false}` | Cilium pre-flight image. | -| preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| preflight.extraVolumeMounts | list | `[]` | Additional preflight volumeMounts. | +| preflight.extraVolumes | list | `[]` | Additional preflight volumes. | +| preflight.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.13.6","useDigest":false}` | Cilium pre-flight image. | +| preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | | preflight.podAnnotations | object | `{}` | Annotations to be added to preflight pods | | preflight.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | | preflight.podDisruptionBudget.maxUnavailable | int | `1` | Maximum number/percentage of pods that may be made unavailable | | preflight.podDisruptionBudget.minAvailable | string | `nil` | Minimum number/percentage of pods that should remain scheduled. When it's set, maxUnavailable must be disabled by `maxUnavailable: null` | | preflight.podLabels | object | `{}` | Labels to be added to the preflight pod. | +| preflight.podSecurityContext | object | `{}` | Security context to be added to preflight pods. | | preflight.priorityClassName | string | `""` | The priority class to use for the preflight pod. | -| preflight.resources | object | `{}` | preflight resource limits & requests ref: https://kubernetes.io/docs/user-guide/compute-resources/ | +| preflight.resources | object | `{}` | preflight resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | preflight.securityContext | object | `{}` | Security context to be added to preflight pods | | preflight.terminationGracePeriodSeconds | int | `1` | Configure termination grace period for preflight Deployment and DaemonSet. | | preflight.tofqdnsPreCache | string | `""` | Path to write the `--tofqdns-pre-cache` file to. | -| preflight.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | Node tolerations for preflight scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | +| preflight.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | Node tolerations for preflight scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | preflight.updateStrategy | object | `{"type":"RollingUpdate"}` | preflight update strategy | | preflight.validateCNPs | bool | `true` | By default we should always validate the installed CNPs before upgrading Cilium. This will make sure the user will have the policies deployed in the cluster with the right schema. | | priorityClassName | string | `""` | The priority class to use for cilium-agent. | -| prometheus | object | `{"enabled":false,"metrics":null,"port":9962,"serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","labels":{},"metricRelabelings":null,"relabelings":[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]}}` | Configure prometheus metrics on the configured port at /metrics | -| prometheus.metrics | string | `nil` | Metrics that should be enabled or disabled from the default metric list. (+metric_foo to enable metric_foo , -metric_bar to disable metric_bar). ref: https://docs.cilium.io/en/stable/operations/metrics/#exported-metrics | +| prometheus | object | `{"enabled":false,"metrics":null,"port":9962,"serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","labels":{},"metricRelabelings":null,"relabelings":[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}],"trustCRDsExist":false}}` | Configure prometheus metrics on the configured port at /metrics | +| prometheus.metrics | string | `nil` | Metrics that should be enabled or disabled from the default metric list. (+metric_foo to enable metric_foo , -metric_bar to disable metric_bar). ref: https://docs.cilium.io/en/stable/observability/metrics/ | | prometheus.serviceMonitor.annotations | object | `{}` | Annotations to add to ServiceMonitor cilium-agent | -| prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) | +| prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) | | prometheus.serviceMonitor.interval | string | `"10s"` | Interval for scrape metrics. | | prometheus.serviceMonitor.labels | object | `{}` | Labels to add to ServiceMonitor cilium-agent | | prometheus.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor cilium-agent | | prometheus.serviceMonitor.relabelings | list | `[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]` | Relabeling configs for the ServiceMonitor cilium-agent | +| prometheus.serviceMonitor.trustCRDsExist | bool | `false` | Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying | | proxy | object | `{"prometheus":{"enabled":true,"port":"9964"},"sidecarImageRegex":"cilium/istio_proxy"}` | Configure Istio proxy options. | | proxy.sidecarImageRegex | string | `"cilium/istio_proxy"` | Regular expression matching compatible Istio sidecar istio-proxy container image names | | rbac.create | bool | `true` | Enable creation of Resource-Based Access Control configuration. | @@ -521,7 +553,7 @@ contributors across the globe, there is almost always someone available to help. | readinessProbe.periodSeconds | int | `30` | interval between checks of the readiness probe | | remoteNodeIdentity | bool | `true` | Enable use of the remote node identity. ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity | | resourceQuotas | object | `{"cilium":{"hard":{"pods":"10k"}},"enabled":false,"operator":{"hard":{"pods":"15"}}}` | Enable resource quotas for priority classes used in the cluster. | -| resources | object | `{}` | Agent resource limits & requests ref: https://kubernetes.io/docs/user-guide/compute-resources/ | +| resources | object | `{}` | Agent resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | rollOutCiliumPods | bool | `false` | Roll out cilium agent pods automatically when configmap is updated. | | sctp | object | `{"enabled":false}` | SCTP Configuration Values | | sctp.enabled | bool | `false` | Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming. | @@ -532,8 +564,8 @@ contributors across the globe, there is almost always someone available to help. | securityContext.privileged | bool | `false` | Run the pod with elevated privileges | | securityContext.seLinuxOptions | object | `{"level":"s0","type":"spc_t"}` | SELinux options for the `cilium-agent` and init containers | | serviceAccounts | object | Component's fully qualified name. | Define serviceAccount names for components. | -| serviceAccounts.clustermeshcertgen | object | `{"annotations":{},"create":true,"name":"clustermesh-apiserver-generate-certs"}` | Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob | -| serviceAccounts.hubblecertgen | object | `{"annotations":{},"create":true,"name":"hubble-generate-certs"}` | Hubblecertgen is used if hubble.tls.auto.method=cronJob | +| serviceAccounts.clustermeshcertgen | object | `{"annotations":{},"automount":true,"create":true,"name":"clustermesh-apiserver-generate-certs"}` | Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob | +| serviceAccounts.hubblecertgen | object | `{"annotations":{},"automount":true,"create":true,"name":"hubble-generate-certs"}` | Hubblecertgen is used if hubble.tls.auto.method=cronJob | | sleepAfterInit | bool | `false` | Do not run Cilium agent when running with clean mode. Useful to completely uninstall Cilium as it will stop Cilium from starting and create artifacts in the node. | | socketLB | object | `{"enabled":false}` | Configure socket LB | | socketLB.enabled | bool | `false` | Enable socket LB | @@ -549,7 +581,7 @@ contributors across the globe, there is almost always someone available to help. | tls.ca.certValidityDuration | int | `1095` | Generated certificates validity duration in days. This will be used for auto generated CA. | | tls.ca.key | string | `""` | Optional CA private key. If it is provided, it will be used by cilium to generate all other certificates. Otherwise, an ephemeral CA is generated. | | tls.secretsBackend | string | `"local"` | This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies (namely the secrets referenced by terminatingTLS and originatingTLS). Possible values: - local - k8s | -| tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for agent scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | +| tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for agent scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | tunnel | string | `"vxlan"` | Configure the encapsulation configuration for communication between nodes. Possible values: - disabled - vxlan (default) - geneve | | tunnelPort | int | Port 8472 for VXLAN, Port 6081 for Geneve | Configure VXLAN and Geneve tunnel port. | | updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":2},"type":"RollingUpdate"}` | Cilium agent update strategy | diff --git a/helm/cilium/files/hubble/dashboards/hubble-l7-http-metrics-by-workload.json b/helm/cilium/files/hubble/dashboards/hubble-l7-http-metrics-by-workload.json new file mode 100644 index 00000000..b8ae55e9 --- /dev/null +++ b/helm/cilium/files/hubble/dashboards/hubble-l7-http-metrics-by-workload.json @@ -0,0 +1,1170 @@ +{ + "__inputs": [], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "9.0.5" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 14, + "panels": [], + "title": "General", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 1 + }, + "id": 16, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.0.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "round(sum(rate(hubble_http_requests_total{reporter=~\"${reporter}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\"}[$__rate_interval])), 0.001)", + "refId": "A" + } + ], + "title": "Incoming Request Volume", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 1 + }, + "id": 17, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.0.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(rate(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\", status!~\"5.*\"}[$__rate_interval]))\n/\nsum(rate(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "{{ cluster }} {{ method }} {{ source_namespace }}/{{ source_workload }}", + "range": true, + "refId": "A" + } + ], + "title": "Incoming Request Success Rate (non-5xx responses)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 1 + }, + "id": 18, + "options": { + "legend": { + "calcs": [ + "lastNotNull" + ], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.0.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.50, sum(rate(hubble_http_request_duration_seconds_bucket{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\"}[$__rate_interval])) by (le))", + "interval": "", + "legendFormat": "P50", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.95, sum(rate(hubble_http_request_duration_seconds_bucket{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\"}[$__rate_interval])) by (le))", + "hide": false, + "interval": "", + "legendFormat": "P95", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum(rate(hubble_http_request_duration_seconds_bucket{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\"}[$__rate_interval])) by (le))", + "hide": false, + "interval": "", + "legendFormat": "P99", + "range": true, + "refId": "C" + } + ], + "title": "Request Duration", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 6 + }, + "id": 6, + "panels": [], + "title": "Requests by Source", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 7 + }, + "id": 3, + "options": { + "legend": { + "calcs": [ + "max", + "mean", + "sum", + "lastNotNull" + ], + "displayMode": "table", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "round(sum(rate(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\", source_namespace=~\"${source_namespace}\", source_workload=~\"${source_workload}\"}[$__rate_interval])) by (cluster, source_namespace, source_workload, status), 0.001)", + "interval": "", + "legendFormat": "{{ cluster }} {{ method }} {{ source_namespace }}/{{ source_workload }}: {{ status }}", + "range": true, + "refId": "A" + } + ], + "title": "Incoming Requests by Source and Response Code", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 7 + }, + "id": 7, + "options": { + "legend": { + "calcs": [ + "mean", + "min", + "max", + "lastNotNull" + ], + "displayMode": "table", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(rate(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\", source_namespace=~\"${source_namespace}\", source_workload=~\"${source_workload}\",status!~\"5.*\"}[$__rate_interval])) by (cluster, source_namespace, source_workload)\n/\nsum(rate(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\", source_namespace=~\"${source_namespace}\", source_workload=~\"${source_workload}\"}[$__rate_interval])) by (cluster, source_namespace, source_workload)", + "interval": "", + "legendFormat": "{{ cluster }} {{ method }} {{ source_namespace }}/{{ source_workload }}", + "range": true, + "refId": "A" + } + ], + "title": "Incoming Request Success Rate (non-5xx responses) By Source", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 4, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean", + "lastNotNull" + ], + "displayMode": "table", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "histogram_quantile(0.50, sum(rate(hubble_http_request_duration_seconds_bucket{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\", source_namespace=~\"${source_namespace}\", source_workload=~\"${source_workload}\"}[$__rate_interval])) by (cluster, source_namespace, source_workload, le))", + "interval": "", + "legendFormat": "{{ cluster }} {{ source_namespace }}/{{ source_workload }} P50", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "histogram_quantile(0.95, sum(rate(hubble_http_request_duration_seconds_bucket{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\", source_namespace=~\"${source_namespace}\", source_workload=~\"${source_workload}\"}[$__rate_interval])) by (cluster, source_namespace, source_workload, le))", + "hide": false, + "interval": "", + "legendFormat": "{{ cluster }} {{ source_namespace }}/{{ source_workload }} P95", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "histogram_quantile(0.99, sum(rate(hubble_http_request_duration_seconds_bucket{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\", source_namespace=~\"${source_namespace}\", source_workload=~\"${source_workload}\"}[$__rate_interval])) by (cluster, source_namespace, source_workload, le))", + "hide": false, + "interval": "", + "legendFormat": "{{ cluster }} {{ source_namespace }}/{{ source_workload }} P99", + "range": true, + "refId": "C" + } + ], + "title": "HTTP Request Duration by Source", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 27 + }, + "id": 9, + "panels": [], + "title": "Requests by Destination", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 28 + }, + "id": 10, + "options": { + "legend": { + "calcs": [ + "max", + "mean", + "sum", + "lastNotNull" + ], + "displayMode": "table", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "round(sum(rate(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\", source_namespace=~\"${source_namespace}\", source_workload=~\"${source_workload}\"}[$__rate_interval])) by (cluster, destination_namespace, destination_workload, status), 0.001)", + "interval": "", + "legendFormat": "{{ cluster }} {{ method }} {{ destination_namespace }}/{{ destination_workload }}: {{ status }}", + "range": true, + "refId": "A" + } + ], + "title": "Incoming Requests by Destination and Response Code", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 28 + }, + "id": 11, + "options": { + "legend": { + "calcs": [ + "mean", + "min", + "max", + "lastNotNull" + ], + "displayMode": "table", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(rate(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\", source_namespace=~\"${source_namespace}\", source_workload=~\"${source_workload}\",status!~\"5.*\"}[$__rate_interval])) by (cluster, destination_namespace, destination_workload)\n/\nsum(rate(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\", source_namespace=~\"${source_namespace}\", source_workload=~\"${source_workload}\"}[$__rate_interval])) by (cluster, destination_namespace, destination_workload)", + "interval": "", + "legendFormat": "{{ cluster }} {{ method }} {{ destination_namespace }}/{{ destination_workload }}", + "range": true, + "refId": "A" + } + ], + "title": "Incoming Request Success Rate (non-5xx responses) By Destination", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 38 + }, + "id": 12, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean", + "lastNotNull" + ], + "displayMode": "table", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "histogram_quantile(0.50, sum(rate(hubble_http_request_duration_seconds_bucket{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\", source_namespace=~\"${source_namespace}\", source_workload=~\"${source_workload}\"}[$__rate_interval])) by (cluster, destination_namespace, destination_workload, le))", + "interval": "", + "legendFormat": "{{ cluster }} {{ destination_namespace }}/{{ destination_workload }} P50", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "histogram_quantile(0.95, sum(rate(hubble_http_request_duration_seconds_bucket{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\", source_namespace=~\"${source_namespace}\", source_workload=~\"${source_workload}\"}[$__rate_interval])) by (cluster, destination_namespace, destination_workload, le))", + "hide": false, + "interval": "", + "legendFormat": "{{ cluster }} {{ destination_namespace }}/{{ destination_workload }} P95", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "histogram_quantile(0.99, sum(rate(hubble_http_request_duration_seconds_bucket{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", reporter=\"${reporter}\", source_namespace=~\"${source_namespace}\", source_workload=~\"${source_workload}\"}[$__rate_interval])) by (cluster, destination_namespace, destination_workload, le))", + "hide": false, + "interval": "", + "legendFormat": "{{ cluster }} {{ destination_namespace }}/{{ destination_workload }} P99", + "range": true, + "refId": "C" + } + ], + "title": "HTTP Request Duration by Destination", + "type": "timeseries" + } + ], + "refresh": "30s", + "schemaVersion": 36, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": {}, + "hide": 0, + "includeAll": false, + "label": "Prometheus", + "multi": false, + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(hubble_http_requests_total, cluster)", + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(hubble_http_requests_total, cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 2, + "type": "query" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(hubble_http_requests_total{cluster=~\"${cluster}\"}, destination_namespace)", + "description": "", + "hide": 0, + "includeAll": false, + "label": "Destination Namespace", + "multi": false, + "name": "destination_namespace", + "options": [], + "query": { + "query": "label_values(hubble_http_requests_total{cluster=~\"${cluster}\"}, destination_namespace)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 2, + "type": "query" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\"}, destination_workload)", + "hide": 0, + "includeAll": false, + "label": "Destination Workload", + "multi": false, + "name": "destination_workload", + "options": [], + "query": { + "query": "label_values(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\"}, destination_workload)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 2, + "type": "query" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(hubble_http_requests_total, reporter)", + "hide": 0, + "includeAll": false, + "label": "Reporter", + "multi": false, + "name": "reporter", + "options": [], + "query": { + "query": "label_values(hubble_http_requests_total, reporter)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\"}, source_namespace)", + "hide": 0, + "includeAll": true, + "label": "Source Namespace", + "multi": true, + "name": "source_namespace", + "options": [], + "query": { + "query": "label_values(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\"}, source_namespace)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", source_namespace=~\"${source_namespace}\"}, source_workload)", + "hide": 0, + "includeAll": true, + "label": "Source Workload", + "multi": true, + "name": "source_workload", + "options": [], + "query": { + "query": "label_values(hubble_http_requests_total{cluster=~\"${cluster}\", destination_namespace=~\"${destination_namespace}\", destination_workload=~\"${destination_workload}\", source_namespace=~\"${source_namespace}\"}, source_workload)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Hubble L7 HTTP Metrics by Workload", + "uid": "3g264CZVz", + "version": 2, + "weekStart": "" +} diff --git a/helm/cilium/files/nodeinit/poststart-eni.bash b/helm/cilium/files/nodeinit/poststart-eni.bash deleted file mode 100644 index 3c75f12a..00000000 --- a/helm/cilium/files/nodeinit/poststart-eni.bash +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -set -o errexit -set -o pipefail -set -o nounset - -# When running in AWS ENI mode, it's likely that 'aws-node' has -# had a chance to install SNAT iptables rules. These can result -# in dropped traffic, so we should attempt to remove them. -# We do it using a 'postStart' hook since this may need to run -# for nodes which might have already been init'ed but may still -# have dangling rules. This is safe because there are no -# dependencies on anything that is part of the startup script -# itself, and can be safely run multiple times per node (e.g. in -# case of a restart). -if [[ "$(iptables-save | grep -c AWS-SNAT-CHAIN)" != "0" ]]; -then - echo 'Deleting iptables rules created by the AWS CNI VPC plugin' - iptables-save | grep -v AWS-SNAT-CHAIN | iptables-restore -fi -echo 'Done!' diff --git a/helm/cilium/templates/clustermesh-apiserver/tls-certmanager/_helpers.tpl b/helm/cilium/templates/clustermesh-apiserver/tls-certmanager/_helpers.tpl deleted file mode 100644 index 782f252e..00000000 --- a/helm/cilium/templates/clustermesh-apiserver/tls-certmanager/_helpers.tpl +++ /dev/null @@ -1,9 +0,0 @@ -{{- define "clustermesh-apiserver-generate-certs.certmanager.issuer" }} -{{- if .Values.clustermesh.apiserver.tls.auto.certManagerIssuerRef }} - {{- toYaml .Values.clustermesh.apiserver.tls.auto.certManagerIssuerRef }} -{{- else }} - group: cert-manager.io - kind: Issuer - name: clustermesh-apiserver-issuer -{{- end }} -{{- end }} diff --git a/helm/cilium/templates/clustermesh-apiserver/tls-certmanager/clustermesh-apiserver-issuer.yaml b/helm/cilium/templates/clustermesh-apiserver/tls-certmanager/clustermesh-apiserver-issuer.yaml deleted file mode 100644 index 5a8fa6a3..00000000 --- a/helm/cilium/templates/clustermesh-apiserver/tls-certmanager/clustermesh-apiserver-issuer.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- if and (or .Values.externalWorkloads.enabled .Values.clustermesh.useAPIServer) .Values.clustermesh.apiserver.tls.auto.enabled (eq .Values.clustermesh.apiserver.tls.auto.method "certmanager") (not .Values.clustermesh.apiserver.tls.auto.certManagerIssuerRef) }} -{{- $_ := include "clustermesh-apiserver-generate-certs.helm.setup-ca" . -}} ---- -apiVersion: v1 -kind: Secret -metadata: - name: clustermesh-apiserver-ca-cert - namespace: {{ .Release.Namespace }} -data: - ca.crt: {{ .cmca.Cert | b64enc }} - ca.key: {{ .cmca.Key | b64enc }} ---- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: clustermesh-apiserver-issuer - namespace: {{ .Release.Namespace }} -spec: - ca: - secretName: clustermesh-apiserver-ca-cert -{{- end }} diff --git a/helm/cilium/templates/hubble-ui/_nginx.tpl b/helm/cilium/templates/hubble-ui/_nginx.tpl index 1e7656c1..3b409207 100644 --- a/helm/cilium/templates/hubble-ui/_nginx.tpl +++ b/helm/cilium/templates/hubble-ui/_nginx.tpl @@ -46,7 +46,7 @@ server { {{- if not (eq .Values.hubble.ui.baseUrl "/") }} rewrite ^{{ (trimSuffix "/" .Values.hubble.ui.baseUrl) }}(/.*)$ $1 break; {{- end }} - # double `/index.html` is required here + # double `/index.html` is required here try_files $uri $uri/ /index.html /index.html; } } diff --git a/helm/cilium/templates/hubble/tls-certmanager/_helpers.tpl b/helm/cilium/templates/hubble/tls-certmanager/_helpers.tpl deleted file mode 100644 index 6b00dd5a..00000000 --- a/helm/cilium/templates/hubble/tls-certmanager/_helpers.tpl +++ /dev/null @@ -1,9 +0,0 @@ -{{- define "hubble-generate-certs.certmanager.issuer" }} -{{- if .Values.hubble.tls.auto.certManagerIssuerRef }} - {{- toYaml .Values.hubble.tls.auto.certManagerIssuerRef }} -{{- else }} - group: cert-manager.io - kind: Issuer - name: hubble-issuer -{{- end }} -{{- end }} diff --git a/helm/cilium/templates/hubble/tls-certmanager/hubble-issuer.yaml b/helm/cilium/templates/hubble/tls-certmanager/hubble-issuer.yaml deleted file mode 100644 index 8b60b1af..00000000 --- a/helm/cilium/templates/hubble/tls-certmanager/hubble-issuer.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- if and (or .Values.agent .Values.hubble.relay.enabled .Values.hubble.ui.enabled) .Values.hubble.enabled .Values.hubble.tls.enabled .Values.hubble.tls.auto.enabled (eq .Values.hubble.tls.auto.method "certmanager") (not .Values.hubble.tls.auto.certManagerIssuerRef) }} -{{- $_ := include "hubble-generate-certs.helm.setup-ca" . -}} ---- -apiVersion: v1 -kind: Secret -metadata: - name: hubble-ca-secret - namespace: {{ .Release.Namespace }} -data: - ca.crt: {{ .ca.Cert | b64enc }} - ca.key: {{ .ca.Key | b64enc }} ---- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: hubble-issuer - namespace: {{ .Release.Namespace }} -spec: - ca: - secretName: hubble-ca-secret -{{- end }} diff --git a/helm/cilium/values.yaml b/helm/cilium/values.yaml index 4d66e8bf..4ee1b080 100644 --- a/helm/cilium/values.yaml +++ b/helm/cilium/values.yaml @@ -1,3 +1,6 @@ +# File generated by install/kubernetes/Makefile; DO NOT EDIT. +# This file is based on install/kubernetes/cilium/values.yaml.tmpl. + # upgradeCompatibility helps users upgrading to ensure that the configMap for # Cilium will not change critical values to ensure continued operation # This is flag is not required for new installations. @@ -805,8 +808,6 @@ eni: # are going to be used to create new ENIs instanceTagsFilter: [] -eksMode: false - externalIPs: # -- Enable ExternalIPs service support. enabled: false @@ -1491,7 +1492,7 @@ l7Proxy: true localRedirectPolicy: true # To include or exclude matched resources from cilium identity evaluation -labels: "k8s:!.*/enforce k8s:!.*fluxcd.io/.* k8s:!.*kubernetes.io/managed-by.* k8s:!controller-uid k8s:!job-name" +labels: "k8s:!.*/enforce k8s:!.*fluxcd.io/.* k8s:!.*kubernetes.io/managed-by.* k8s:!controller-uid k8s:!job-name" # logOptions allows you to define logging options. eg: # logOptions: @@ -2565,6 +2566,8 @@ sctp: # -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming. enabled: false +eksMode: false + defaultPolicies: enabled: false remove: false @@ -2589,7 +2592,7 @@ extraPolicies: - kube-system tolerations: - - operator: Exists + - operator: Exists # If true, it adds an initContainer to cilium-agent pods that cleans up any legacy kube-proxy iptables rules from the node before running cilium. # Only makes sense when `kubeProxyReplacement` is enabled (i.e. not set to 'disabled'). diff --git a/helm/cilium/values.yaml.tmpl b/helm/cilium/values.yaml.tmpl new file mode 100644 index 00000000..2139aacf --- /dev/null +++ b/helm/cilium/values.yaml.tmpl @@ -0,0 +1,2553 @@ +# upgradeCompatibility helps users upgrading to ensure that the configMap for +# Cilium will not change critical values to ensure continued operation +# This is flag is not required for new installations. +# For example: 1.7, 1.8, 1.9 +# upgradeCompatibility: '1.8' + +debug: + # -- Enable debug logging + enabled: false + # -- Configure verbosity levels for debug logging + # This option is used to enable debug messages for operations related to such + # sub-system such as (e.g. kvstore, envoy, datapath or policy), and flow is + # for enabling debug messages emitted per request, message and connection. + # + # Applicable values: + # - flow + # - kvstore + # - envoy + # - datapath + # - policy + verbose: ~ + +rbac: + # -- Enable creation of Resource-Based Access Control configuration. + create: true + +# -- Configure image pull secrets for pulling container images +imagePullSecrets: +# - name: "image-pull-secret" + +# -- (string) Kubernetes config path +# @default -- `"~/.kube/config"` +kubeConfigPath: "" +# -- (string) Kubernetes service host +k8sServiceHost: "" +# -- (string) Kubernetes service port +k8sServicePort: "" + +cluster: + # -- Name of the cluster. Only required for Cluster Mesh. + name: default + # -- (int) Unique ID of the cluster. Must be unique across all connected + # clusters and in the range of 1 to 255. Only required for Cluster Mesh, + # may be 0 if Cluster Mesh is not used. + id: 0 + +# -- Define serviceAccount names for components. +# @default -- Component's fully qualified name. +serviceAccounts: + cilium: + create: true + name: cilium + automount: true + annotations: {} + etcd: + create: true + name: cilium-etcd-operator + automount: true + annotations: {} + operator: + create: true + name: cilium-operator + automount: true + annotations: {} + preflight: + create: true + name: cilium-pre-flight + automount: true + annotations: {} + relay: + create: true + name: hubble-relay + automount: false + annotations: {} + ui: + create: true + name: hubble-ui + automount: true + annotations: {} + clustermeshApiserver: + create: true + name: clustermesh-apiserver + automount: true + annotations: {} + # -- Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob + clustermeshcertgen: + create: true + name: clustermesh-apiserver-generate-certs + automount: true + annotations: {} + # -- Hubblecertgen is used if hubble.tls.auto.method=cronJob + hubblecertgen: + create: true + name: hubble-generate-certs + automount: true + annotations: {} + +# -- Configure termination grace period for cilium-agent DaemonSet. +terminationGracePeriodSeconds: 1 + +# -- Install the cilium agent resources. +agent: true + +# -- Agent container name. +name: cilium + +# -- Roll out cilium agent pods automatically when configmap is updated. +rollOutCiliumPods: false + +# -- Agent container image. +image: + override: ~ + repository: "${CILIUM_REPO}" + tag: "${CILIUM_VERSION}" + pullPolicy: "${PULL_POLICY}" + # cilium-digest + digest: ${CILIUM_DIGEST} + useDigest: ${USE_DIGESTS} + +# -- Affinity for cilium-agent. +affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + +# -- Node selector for cilium-agent. +nodeSelector: + kubernetes.io/os: linux + +# -- Node tolerations for agent scheduling to nodes with taints +# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +tolerations: +- operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +# -- The priority class to use for cilium-agent. +priorityClassName: "" + +# -- DNS policy for Cilium agent pods. +# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy +dnsPolicy: "" + +# -- Additional containers added to the cilium DaemonSet. +extraContainers: [] + +# -- Additional agent container arguments. +extraArgs: [] + +# -- Additional agent container environment variables. +extraEnv: [] + +# -- Additional agent hostPath mounts. +extraHostPathMounts: [] + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer + +# -- Additional agent volumes. +extraVolumes: [] + +# -- Additional agent volumeMounts. +extraVolumeMounts: [] + +# -- extraConfig allows you to specify additional configuration parameters to be +# included in the cilium-config configmap. +extraConfig: {} +# my-config-a: "1234" +# my-config-b: |- +# test 1 +# test 2 +# test 3 + +# -- Security Context for cilium-agent pods. +podSecurityContext: {} + +# -- Annotations to be added to agent pods +podAnnotations: {} + +# -- Labels to be added to agent pods +podLabels: {} + +# -- Agent resource limits & requests +# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +resources: {} + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + +securityContext: + # -- User to run the pod with + # runAsUser: 0 + # -- Run the pod with elevated privileges + privileged: false + # -- SELinux options for the `cilium-agent` and init containers + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + # -- Capabilities for the `cilium-agent` container + ciliumAgent: + # Use to set socket permission + - CHOWN + # Used to terminate envoy child process + - KILL + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used since cilium creates raw sockets, etc... + - NET_RAW + # Used since cilium monitor uses mmap + - IPC_LOCK + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF + # Allow discretionary access control (e.g. required for package installation) + - DAC_OVERRIDE + # Allow to set Access Control Lists (ACLs) on arbitrary files (e.g. required for package installation) + - FOWNER + # Allow to execute program that changes GID (e.g. required for package installation) + - SETGID + # Allow to execute program that changes UID (e.g. required for package installation) + - SETUID + # -- Capabilities for the `mount-cgroup` init container + mountCgroup: + # Only used for 'mount' cgroup + - SYS_ADMIN + # Used for nsenter + - SYS_CHROOT + - SYS_PTRACE + # -- capabilities for the `apply-sysctl-overwrites` init container + applySysctlOverwrites: + # Required in order to access host's /etc/sysctl.d dir + - SYS_ADMIN + # Used for nsenter + - SYS_CHROOT + - SYS_PTRACE + # -- Capabilities for the `clean-cilium-state` init container + cleanCiliumState: + # Most of the capabilities here are the same ones used in the + # cilium-agent's container because this container can be used to + # uninstall all Cilium resources, and therefore it is likely that + # will need the same capabilities. + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF + +# -- Cilium agent update strategy +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 2 + +# Configuration Values for cilium-agent + +aksbyocni: + # -- Enable AKS BYOCNI integration. + # Note that this is incompatible with AKS clusters not created in BYOCNI mode: + # use Azure integration (`azure.enabled`) instead. + enabled: false + +# -- Enable installation of PodCIDR routes between worker +# nodes if worker nodes share a common L2 network segment. +autoDirectNodeRoutes: false + +# -- Annotate k8s node upon initialization with Cilium's metadata. +annotateK8sNode: false + +azure: + # -- Enable Azure integration. + # Note that this is incompatible with AKS clusters created in BYOCNI mode: use + # AKS BYOCNI integration (`aksbyocni.enabled`) instead. + enabled: false + # usePrimaryAddress: false + # resourceGroup: group1 + # subscriptionID: 00000000-0000-0000-0000-000000000000 + # tenantID: 00000000-0000-0000-0000-000000000000 + # clientID: 00000000-0000-0000-0000-000000000000 + # clientSecret: 00000000-0000-0000-0000-000000000000 + # userAssignedIdentityID: 00000000-0000-0000-0000-000000000000 + +alibabacloud: + # -- Enable AlibabaCloud ENI integration + enabled: false + +# -- Enable bandwidth manager to optimize TCP and UDP workloads and allow +# for rate-limiting traffic from individual Pods with EDT (Earliest Departure +# Time) through the "kubernetes.io/egress-bandwidth" Pod annotation. +bandwidthManager: + # -- Enable bandwidth manager infrastructure (also prerequirement for BBR) + enabled: false + # -- Activate BBR TCP congestion control for Pods + bbr: false + +# -- Configure standalone NAT46/NAT64 gateway +nat46x64Gateway: + # -- Enable RFC8215-prefixed translation + enabled: false + +# -- Configure BGP +bgp: + # -- Enable BGP support inside Cilium; embeds a new ConfigMap for BGP inside + # cilium-agent and cilium-operator + enabled: false + announce: + # -- Enable allocation and announcement of service LoadBalancer IPs + loadbalancerIP: false + # -- Enable announcement of node pod CIDR + podCIDR: false + +# -- This feature set enables virtual BGP routers to be created via +# CiliumBGPPeeringPolicy CRDs. +bgpControlPlane: + # -- Enables the BGP control plane. + enabled: false + +pmtuDiscovery: + # -- Enable path MTU discovery to send ICMP fragmentation-needed replies to + # the client. + enabled: false + +bpf: + # -- Configure the mount point for the BPF filesystem + root: /sys/fs/bpf + + # -- Enables pre-allocation of eBPF map values. This increases + # memory usage but can reduce latency. + preallocateMaps: false + + # -- (int) Configure the maximum number of entries in the TCP connection tracking + # table. + # @default -- `524288` + ctTcpMax: ~ + + # -- (int) Configure the maximum number of entries for the non-TCP connection + # tracking table. + # @default -- `262144` + ctAnyMax: ~ + + # -- Configure the maximum number of service entries in the + # load balancer maps. + lbMapMax: 65536 + + # -- (int) Configure the maximum number of entries for the NAT table. + # @default -- `524288` + natMax: ~ + + # -- (int) Configure the maximum number of entries for the neighbor table. + # @default -- `524288` + neighMax: ~ + + # -- Configure the maximum number of entries in endpoint policy map (per endpoint). + policyMapMax: 16384 + + # -- (float64) Configure auto-sizing for all BPF maps based on available memory. + # ref: https://docs.cilium.io/en/stable/network/ebpf/maps/ + # @default -- `0.0025` + mapDynamicSizeRatio: ~ + + # -- Configure the level of aggregation for monitor notifications. + # Valid options are none, low, medium, maximum. + monitorAggregation: medium + + # -- Configure the typical time between monitor notifications for + # active connections. + monitorInterval: "5s" + + # -- Configure which TCP flags trigger notifications when seen for the + # first time in a connection. + monitorFlags: "all" + + # -- Allow cluster external access to ClusterIP services. + lbExternalClusterIP: false + + # -- (bool) Enable native IP masquerade support in eBPF + # @default -- `false` + masquerade: ~ + + # -- (bool) Configure whether direct routing mode should route traffic via + # host stack (true) or directly and more efficiently out of BPF (false) if + # the kernel supports it. The latter has the implication that it will also + # bypass netfilter in the host namespace. + # @default -- `false` + hostLegacyRouting: ~ + + # -- (bool) Configure the eBPF-based TPROXY to reduce reliance on iptables rules + # for implementing Layer 7 policy. + # @default -- `false` + tproxy: ~ + + # -- (list) Configure explicitly allowed VLAN id's for bpf logic bypass. + # [0] will allow all VLAN id's without any filtering. + # @default -- `[]` + vlanBypass: ~ + +# -- Enable BPF clock source probing for more efficient tick retrieval. +bpfClockProbe: false + +# -- Clean all eBPF datapath state from the initContainer of the cilium-agent +# DaemonSet. +# +# WARNING: Use with care! +cleanBpfState: false + +# -- Clean all local Cilium state from the initContainer of the cilium-agent +# DaemonSet. Implies cleanBpfState: true. +# +# WARNING: Use with care! +cleanState: false + +# -- Wait for KUBE-PROXY-CANARY iptables rule to appear in "wait-for-kube-proxy" +# init container before launching cilium-agent. +# More context can be found in the commit message of below PR +# https://github.com/cilium/cilium/pull/20123 +waitForKubeProxy: false + +cni: + # -- Install the CNI configuration and binary files into the filesystem. + install: true + + # -- Remove the CNI configuration and binary files on agent shutdown. Enable this + # if you're removing Cilium from the cluster. Disable this to prevent the CNI + # configuration file from being removed during agent upgrade, which can cause + # nodes to go unmanageable. + uninstall: true + + # -- Configure chaining on top of other CNI plugins. Possible values: + # - none + # - aws-cni + # - flannel + # - generic-veth + # - portmap + chainingMode: none + + # -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the + # node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. + # This ensures no Pods can be scheduled using other CNI plugins during Cilium + # agent downtime. + exclusive: true + + # -- Configure the log file for CNI logging with retention policy of 7 days. + # Disable CNI file logging by setting this field to empty explicitly. + logFile: /var/run/cilium/cilium-cni.log + + # -- Skip writing of the CNI configuration. This can be used if + # writing of the CNI configuration is performed by external automation. + customConf: false + + # -- Configure the path to the CNI configuration directory on the host. + confPath: /etc/cni/net.d + + # -- Configure the path to the CNI binary directory on the host. + binPath: /opt/cni/bin + + # -- Specify the path to a CNI config to read from on agent start. + # This can be useful if you want to manage your CNI + # configuration outside of a Kubernetes environment. This parameter is + # mutually exclusive with the 'cni.configMap' parameter. + # readCniConf: /host/etc/cni/net.d/05-cilium.conf + + # -- When defined, configMap will mount the provided value as ConfigMap and + # interpret the cniConf variable as CNI configuration file and write it + # when the agent starts up + # configMap: cni-configuration + + # -- Configure the key in the CNI ConfigMap to read the contents of + # the CNI configuration from. + configMapKey: cni-config + + # -- Configure the path to where to mount the ConfigMap inside the agent pod. + confFileMountPath: /tmp/cni-configuration + + # -- Configure the path to where the CNI configuration directory is mounted + # inside the agent pod. + hostConfDirMountPath: /host/etc/cni/net.d + +# -- (string) Configure how frequently garbage collection should occur for the datapath +# connection tracking table. +# @default -- `"0s"` +conntrackGCInterval: "" + +# -- Configure container runtime specific integration. +containerRuntime: + # -- Enables specific integrations for container runtimes. + # Supported values: + # - containerd + # - crio + # - docker + # - none + # - auto (automatically detect the container runtime) + integration: none + # -- Configure the path to the container runtime control socket. + # socketPath: /path/to/runtime.sock + +# -- (string) Configure timeout in which Cilium will exit if CRDs are not available +# @default -- `"5m"` +crdWaitTimeout: "" + +# -- Tail call hooks for custom eBPF programs. +customCalls: + # -- Enable tail call hooks for custom eBPF programs. + enabled: false + +daemon: + # -- Configure where Cilium runtime state should be stored. + runPath: "/var/run/cilium" + + # -- Configure a custom list of possible configuration override sources + # The default is "config-map:cilium-config,cilium-node-config". For supported + # values, see the help text for the build-config subcommand. + # Note that this value should be a comma-separated string. + configSources: ~ + + # -- allowedConfigOverrides is a list of config-map keys that can be overridden. + # That is to say, if this value is set, config sources (excepting the first one) can + # only override keys in this list. + # + # This takes precedence over blockedConfigOverrides. + # + # By default, all keys may be overridden. To disable overrides, set this to "none" or + # change the configSources variable. + allowedConfigOverrides: ~ + + # -- blockedConfigOverrides is a list of config-map keys that may not be overridden. + # In other words, if any of these keys appear in a configuration source excepting the + # first one, they will be ignored + # + # This is ignored if allowedConfigOverrides is set. + # + # By default, all keys may be overridden. + blockedConfigOverrides: ~ + +# -- Specify which network interfaces can run the eBPF datapath. This means +# that a packet sent from a pod to a destination outside the cluster will be +# masqueraded (to an output device IPv4 address), if the output device runs the +# program. When not specified, probing will automatically detect devices. +# devices: "" + +# -- Enables experimental support for the detection of new and removed datapath +# devices. When devices change the eBPF datapath is reloaded and services updated. +# If "devices" is set then only those devices, or devices matching a wildcard will +# be considered. +enableRuntimeDeviceDetection: false + +# -- Chains to ignore when installing feeder rules. +# disableIptablesFeederRules: "" + +# -- Limit egress masquerading to interface selector. +# egressMasqueradeInterfaces: "" + +# -- Whether to enable CNP status updates. +enableCnpStatusUpdates: false + +# -- Configures the use of the KVStore to optimize Kubernetes event handling by +# mirroring it into the KVstore for reduced overhead in large clusters. +enableK8sEventHandover: false + +# -- Enable setting identity mark for local traffic. +# enableIdentityMark: true + +# -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it. +# enableK8sEndpointSlice: true + +# -- Enable CiliumEndpointSlice feature. +enableCiliumEndpointSlice: false + +envoyConfig: + # -- Enable CiliumEnvoyConfig CRD + # CiliumEnvoyConfig CRD can also be implicitly enabled by other options. + enabled: false + + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from. + secretsNamespace: + # -- Create secrets namespace for CiliumEnvoyConfig CRDs. + create: true + + # -- Name of secret namespace which Cilium agents are given read access to. + name: cilium-secrets + +ingressController: + # -- Enable cilium ingress controller + # This will automatically set enable-envoy-config as well. + enabled: false + + # -- Default ingress load balancer mode + # Supported values: shared, dedicated + # For granular control, use the following annotations on the ingress resource + # ingress.cilium.io/loadbalancer-mode: shared|dedicated, + loadbalancerMode: dedicated + + # -- Enforce https for host having matching TLS host in Ingress. + # Incoming traffic to http listener will return 308 http error code with respective location in header. + enforceHttps: true + + # -- IngressLBAnnotations are the annotation prefixes, which are used to filter annotations to propagate + # from Ingress to the Load Balancer service + ingressLBAnnotationPrefixes: ['service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com'] + + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. + secretsNamespace: + # -- Create secrets namespace for Ingress. + create: true + + # -- Name of Ingress secret namespace. + name: cilium-secrets + + # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. + # If disabled, TLS secrets must be maintained externally. + sync: true + + # -- Load-balancer service in shared mode. + # This is a single load-balancer service for all Ingress resources. + service: + # -- Service name + name: cilium-ingress + # -- Labels to be added for the shared LB service + labels: {} + # -- Annotations to be added for the shared LB service + annotations: {} + # -- Service type for the shared LB service + type: LoadBalancer + # -- Configure a specific nodePort for insecure HTTP traffic on the shared LB service + insecureNodePort: ~ + # -- Configure a specific nodePort for secure HTTPS traffic on the shared LB service + secureNodePort : ~ + # -- Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+) + loadBalancerClass: ~ + # -- Configure a specific loadBalancerIP on the shared LB service + loadBalancerIP : ~ + # -- Configure if node port allocation is required for LB service + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + allocateLoadBalancerNodePorts: ~ + +gatewayAPI: + # -- Enable support for Gateway API in cilium + # This will automatically set enable-envoy-config as well. + enabled: false + + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. + secretsNamespace: + # -- Create secrets namespace for Gateway API. + create: true + + # -- Name of Gateway API secret namespace. + name: cilium-secrets + + # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. + # If disabled, TLS secrets must be maintained externally. + sync: true + +# -- Enables the fallback compatibility solution for when the xt_socket kernel +# module is missing and it is needed for the datapath L7 redirection to work +# properly. See documentation for details on when this can be disabled: +# https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel. +enableXTSocketFallback: true + +encryption: + # -- Enable transparent network encryption. + enabled: false + + # -- Encryption method. Can be either ipsec or wireguard. + type: ipsec + + # -- Enable encryption for pure node to node traffic. + # This option is only effective when encryption.type is set to ipsec. + nodeEncryption: false + + ipsec: + # -- Name of the key file inside the Kubernetes secret configured via secretName. + keyFile: "" + + # -- Path to mount the secret inside the Cilium pod. + mountPath: "" + + # -- Name of the Kubernetes secret containing the encryption keys. + secretName: "" + + # -- The interface to use for encrypted traffic. + interface: "" + + # -- Enable the key watcher. If disabled, a restart of the agent will be + # necessary on key rotations. + keyWatcher: true + + wireguard: + # -- Enables the fallback to the user-space implementation. + userspaceFallback: false + + # -- Deprecated in favor of encryption.ipsec.keyFile. + # Name of the key file inside the Kubernetes secret configured via secretName. + # This option is only effective when encryption.type is set to ipsec. + keyFile: keys + + # -- Deprecated in favor of encryption.ipsec.mountPath. + # Path to mount the secret inside the Cilium pod. + # This option is only effective when encryption.type is set to ipsec. + mountPath: /etc/ipsec + + # -- Deprecated in favor of encryption.ipsec.secretName. + # Name of the Kubernetes secret containing the encryption keys. + # This option is only effective when encryption.type is set to ipsec. + secretName: cilium-ipsec-keys + + # -- Deprecated in favor of encryption.ipsec.interface. + # The interface to use for encrypted traffic. + # This option is only effective when encryption.type is set to ipsec. + interface: "" + +endpointHealthChecking: + # -- Enable connectivity health checking between virtual endpoints. + enabled: true + +# -- Enable endpoint status. +# Status can be: policy, health, controllers, log and / or state. For 2 or more options use a space. +endpointStatus: + enabled: false + status: "" + +endpointRoutes: + # -- Enable use of per endpoint routes instead of routing via + # the cilium_host interface. + enabled: false + +eni: + # -- Enable Elastic Network Interface (ENI) integration. + enabled: false + # -- Update ENI Adapter limits from the EC2 API + updateEC2AdapterLimitViaAPI: false + # -- Release IPs not used from the ENI + awsReleaseExcessIPs: false + # -- Enable ENI prefix delegation + awsEnablePrefixDelegation: false + # -- EC2 API endpoint to use + ec2APIEndpoint: "" + # -- Tags to apply to the newly created ENIs + eniTags: {} + # -- Interval for garbage collection of unattached ENIs. Set to "0s" to disable. + # @default -- `"5m"` + gcInterval: "" + # -- Additional tags attached to ENIs created by Cilium. + # Dangling ENIs with this tag will be garbage collected + # @default -- `{"io.cilium/cilium-managed":"true,"io.cilium/cluster-name":""}` + gcTags: {} + # -- If using IAM role for Service Accounts will not try to + # inject identity values from cilium-aws kubernetes secret. + # Adds annotation to service account if managed by Helm. + # See https://github.com/aws/amazon-eks-pod-identity-webhook + iamRole: "" + # -- Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs + # Important note: This requires that each instance has an ENI with a matching subnet attached + # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + # use the CNI configuration file settings (cni.customConf) instead. + subnetIDsFilter: [] + # -- Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs + # Important note: This requires that each instance has an ENI with a matching subnet attached + # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + # use the CNI configuration file settings (cni.customConf) instead. + subnetTagsFilter: [] + # -- Filter via AWS EC2 Instance tags (k=v) which will dictate which AWS EC2 Instances + # are going to be used to create new ENIs + instanceTagsFilter: [] + +externalIPs: + # -- Enable ExternalIPs service support. + enabled: false + +# fragmentTracking enables IPv4 fragment tracking support in the datapath. +# fragmentTracking: true + +gke: + # -- Enable Google Kubernetes Engine integration + enabled: false + +# -- Enable connectivity health checking. +healthChecking: true + +# -- TCP port for the agent health API. This is not the port for cilium-health. +healthPort: 9879 + +# -- Configure the host firewall. +hostFirewall: + # -- Enables the enforcement of host policies in the eBPF datapath. + enabled: false + +hostPort: + # -- Enable hostPort service support. + enabled: false + +# -- Configure socket LB +socketLB: + # -- Enable socket LB + enabled: false + + # -- Disable socket lb for non-root ns. This is used to enable Istio routing rules. + # hostNamespaceOnly: false + +# -- Configure certificate generation for Hubble integration. +# If hubble.tls.auto.method=cronJob, these values are used +# for the Kubernetes CronJob which will be scheduled regularly to +# (re)generate any certificates not provided manually. +certgen: + image: + override: ~ + repository: "${CERTGEN_REPO}" + tag: "${CERTGEN_VERSION}" + pullPolicy: "${PULL_POLICY}" + # -- Seconds after which the completed job pod will be deleted + ttlSecondsAfterFinished: 1800 + # -- Labels to be added to hubble-certgen pods + podLabels: {} + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + + # -- Additional certgen volumes. + extraVolumes: [] + + # -- Additional certgen volumeMounts. + extraVolumeMounts: [] + +hubble: + # -- Enable Hubble (true by default). + enabled: true + + # -- Buffer size of the channel Hubble uses to receive monitor events. If this + # value is not set, the queue size is set to the default monitor queue size. + # eventQueueSize: "" + + # -- Number of recent flows for Hubble to cache. Defaults to 4095. + # Possible values are: + # 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, + # 2047, 4095, 8191, 16383, 32767, 65535 + # eventBufferCapacity: "4095" + + # -- Hubble metrics configuration. + # See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics + # for more comprehensive documentation about Hubble metrics. + metrics: + # -- Configures the list of metrics to collect. If empty or null, metrics + # are disabled. + # Example: + # + # enabled: + # - dns:query;ignoreAAAA + # - drop + # - tcp + # - flow + # - icmp + # - http + # + # You can specify the list of metrics from the helm CLI: + # + # --set metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}" + # + enabled: ~ + # -- Enables exporting hubble metrics in OpenMetrics format. + enableOpenMetrics: false + # -- Configure the port the hubble metric server listens on. + port: 9965 + # -- Annotations to be added to hubble-metrics service. + serviceAnnotations: {} + serviceMonitor: + # -- Create ServiceMonitor resources for Prometheus Operator. + # This requires the prometheus CRDs to be available. + # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor hubble + labels: {} + # -- Annotations to add to ServiceMonitor hubble + annotations: {} + # -- Interval for scrape metrics. + interval: "10s" + # -- Relabeling configs for the ServiceMonitor hubble + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + replacement: ${1} + # -- Metrics relabeling configs for the ServiceMonitor hubble + metricRelabelings: ~ + dashboards: + enabled: false + label: grafana_dashboard + namespace: ~ + labelValue: "1" + annotations: {} + + # -- Unix domain socket path to listen to when Hubble is enabled. + socketPath: /var/run/cilium/hubble.sock + + # -- An additional address for Hubble to listen to. + # Set this field ":4244" if you are enabling Hubble Relay, as it assumes that + # Hubble is listening on port 4244. + listenAddress: ":4244" + # -- Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available. + preferIpv6: false + # -- (bool) Skip Hubble events with unknown cgroup ids + # @default -- `true` + skipUnknownCGroupIDs: ~ + + peerService: + # -- Enable a K8s Service for the Peer service, so that it can be accessed + # by a non-local client. This configuration option is deprecated, the peer + # service will be non-optional starting Cilium v1.14. + enabled: true + # -- Service Port for the Peer service. + # If not set, it is dynamically assigned to port 443 if TLS is enabled and to + # port 80 if not. + # servicePort: 80 + # -- Target Port for the Peer service, must match the hubble.listenAddress' + # port. + targetPort: 4244 + # -- The cluster domain to use to query the Hubble Peer service. It should + # be the local cluster. + clusterDomain: cluster.local + # -- TLS configuration for Hubble + tls: + # -- Enable mutual TLS for listenAddress. Setting this value to false is + # highly discouraged as the Hubble API provides access to potentially + # sensitive network flow metadata and is exposed on the host network. + enabled: true + # -- Configure automatic TLS certificates generation. + auto: + # -- Auto-generate certificates. + # When set to true, automatically generate a CA and certificates to + # enable mTLS between Hubble server and Hubble Relay instances. If set to + # false, the certs for Hubble server need to be provided by setting + # appropriate values below. + enabled: true + # -- Set the method to auto-generate certificates. Supported values: + # - helm: This method uses Helm to generate all certificates. + # - cronJob: This method uses a Kubernetes CronJob the generate any + # certificates not provided by the user at installation + # time. + # - certmanager: This method use cert-manager to generate & rotate certificates. + method: helm + # -- Generated certificates validity duration in days. + certValidityDuration: 1095 + # -- Schedule for certificates regeneration (regardless of their expiration date). + # Only used if method is "cronJob". If nil, then no recurring job will be created. + # Instead, only the one-shot job is deployed to generate the certificates at + # installation time. + # + # Defaults to midnight of the first day of every fourth month. For syntax, see + # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax + schedule: "0 0 1 */4 *" + + # [Example] + # certManagerIssuerRef: + # group: cert-manager.io + # kind: ClusterIssuer + # name: ca-issuer + # -- certmanager issuer used when hubble.tls.auto.method=certmanager. + certManagerIssuerRef: {} + + # -- Deprecated in favor of tls.ca. To be removed in 1.13. + # base64 encoded PEM values for the Hubble CA certificate and private key. + ca: + # -- Deprecated in favor of tls.ca.cert. To be removed in 1.13. + cert: "" + # -- Deprecated in favor of tls.ca.key. To be removed in 1.13. + # The CA private key (optional). If it is provided, then it will be + # used by hubble.tls.auto.method=cronJob to generate all other certificates. + # Otherwise, a ephemeral CA is generated if hubble.tls.auto.enabled=true. + key: "" + # -- base64 encoded PEM values for the Hubble server certificate and private key + server: + cert: "" + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + + relay: + # -- Enable Hubble Relay (requires hubble.enabled=true) + enabled: false + + # -- Roll out Hubble Relay pods automatically when configmap is updated. + rollOutPods: false + + # -- Hubble-relay container image. + image: + override: ~ + repository: "${HUBBLE_RELAY_REPO}" + tag: "${CILIUM_VERSION}" + # hubble-relay-digest + digest: ${HUBBLE_RELAY_DIGEST} + useDigest: ${USE_DIGESTS} + pullPolicy: "${PULL_POLICY}" + + # -- Specifies the resources for the hubble-relay pods + resources: {} + + # -- Number of replicas run for the hubble-relay deployment. + replicas: 1 + + # -- Affinity for hubble-replay + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + + # -- Pod topology spread constraints for hubble-relay + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + + # -- Additional hubble-relay environment variables. + extraEnv: [] + + # -- Annotations to be added to hubble-relay pods + podAnnotations: {} + + # -- Labels to be added to hubble-relay pods + podLabels: {} + + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + + # -- The priority class to use for hubble-relay + priorityClassName: "" + + # -- Configure termination grace period for hubble relay Deployment. + terminationGracePeriodSeconds: 1 + + # -- hubble-relay update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + + # -- hubble-relay security context + securityContext: {} + + # -- hubble-relay service configuration. + service: + # --- The type of service used for Hubble Relay access, either ClusterIP or NodePort. + type: ClusterIP + # --- The port to use when the service type is set to NodePort. + nodePort: 31234 + + # -- Host to listen to. Specify an empty string to bind to all the interfaces. + listenHost: "" + + # -- Port to listen to. + listenPort: "4245" + + # -- TLS configuration for Hubble Relay + tls: + # -- base64 encoded PEM values for the hubble-relay client certificate and private key + # This keypair is presented to Hubble server instances for mTLS + # authentication and is required when hubble.tls.enabled is true. + # These values need to be set manually if hubble.tls.auto.enabled is false. + client: + cert: "" + key: "" + # -- base64 encoded PEM values for the hubble-relay server certificate and private key + server: + # When set to true, enable TLS on for Hubble Relay server + # (ie: for clients connecting to the Hubble Relay API). + enabled: false + # These values need to be set manually if hubble.tls.auto.enabled is false. + cert: "" + key: "" + # -- extra DNS names added to certificate when its auto gen + extraDnsNames: [] + # -- extra IP addresses added to certificate when its auto gen + extraIpAddresses: [] + + # -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s"). + dialTimeout: ~ + + # -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s"). + retryTimeout: ~ + + # -- Max number of flows that can be buffered for sorting before being sent to the + # client (per request) (e.g. 100). + sortBufferLenMax: ~ + + # -- When the per-request flows sort buffer is not full, a flow is drained every + # time this timeout is reached (only affects requests in follow-mode) (e.g. "1s"). + sortBufferDrainTimeout: ~ + + # -- Port to use for the k8s service backed by hubble-relay pods. + # If not set, it is dynamically assigned to port 443 if TLS is enabled and to + # port 80 if not. + # servicePort: 80 + + # -- Enable prometheus metrics for hubble-relay on the configured port at + # /metrics + prometheus: + enabled: false + port: 9966 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor hubble-relay + labels: {} + # -- Annotations to add to ServiceMonitor hubble-relay + annotations: {} + # -- Interval for scrape metrics. + interval: "10s" + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # -- Relabeling configs for the ServiceMonitor hubble-relay + relabelings: ~ + # -- Metrics relabeling configs for the ServiceMonitor hubble-relay + metricRelabelings: ~ + + pprof: + # -- Enable pprof for hubble-relay + enabled: false + # -- Configure pprof listen address for hubble-relay + address: localhost + # -- Configure pprof listen port for hubble-relay + port: 6062 + + ui: + # -- Whether to enable the Hubble UI. + enabled: false + + standalone: + # -- When true, it will allow installing the Hubble UI only, without checking dependencies. + # It is useful if a cluster already has cilium and Hubble relay installed and you just + # want Hubble UI to be deployed. + # When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui` + enabled: false + + tls: + # -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required + # to provide a volume for mounting the client certificates. + certsVolume: {} + # projected: + # defaultMode: 0400 + # sources: + # - secret: + # name: hubble-ui-client-certs + # items: + # - key: tls.crt + # path: client.crt + # - key: tls.key + # path: client.key + # - key: ca.crt + # path: hubble-relay-ca.crt + + # -- Roll out Hubble-ui pods automatically when configmap is updated. + rollOutPods: false + + tls: + # -- base64 encoded PEM values used to connect to hubble-relay + # This keypair is presented to Hubble Relay instances for mTLS + # authentication and is required when hubble.relay.tls.server.enabled is true. + # These values need to be set manually if hubble.tls.auto.enabled is false. + client: + cert: "" + key: "" + + backend: + # -- Hubble-ui backend image. + image: + override: ~ + repository: "${HUBBLE_UI_BACKEND_REPO}" + tag: "${HUBBLE_UI_BACKEND_VERSION}" + pullPolicy: "${PULL_POLICY}" + + # -- Hubble-ui backend security context. + securityContext: {} + + # -- Additional hubble-ui backend environment variables. + extraEnv: [] + + # -- Additional hubble-ui backend volumes. + extraVolumes: [] + + # -- Additional hubble-ui backend volumeMounts. + extraVolumeMounts: [] + + # -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. + resources: {} + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + + frontend: + # -- Hubble-ui frontend image. + image: + override: ~ + repository: "${HUBBLE_UI_FRONTEND_REPO}" + tag: "${HUBBLE_UI_FRONTEND_VERSION}" + pullPolicy: "${PULL_POLICY}" + + # -- Hubble-ui frontend security context. + securityContext: {} + + # -- Additional hubble-ui frontend environment variables. + extraEnv: [] + + # -- Additional hubble-ui frontend volumes. + extraVolumes: [] + + # -- Additional hubble-ui frontend volumeMounts. + extraVolumeMounts: [] + + # -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. + resources: {} + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + server: + # -- Controls server listener for ipv6 + ipv6: + enabled: true + + # -- The number of replicas of Hubble UI to deploy. + replicas: 1 + + # -- Annotations to be added to hubble-ui pods + podAnnotations: {} + + # -- Labels to be added to hubble-ui pods + podLabels: {} + + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + + # -- Affinity for hubble-ui + affinity: {} + + # -- Pod topology spread constraints for hubble-ui + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + + # -- The priority class to use for hubble-ui + priorityClassName: "" + + # -- hubble-ui update strategy. + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + + # -- Security context to be added to Hubble UI pods + securityContext: + # -- Deprecated in favor of hubble.ui.securityContext. + # Whether to set the security context on the Hubble UI pods. + enabled: true + runAsUser: 1001 + runAsGroup: 1001 + fsGroup: 1001 + + # -- hubble-ui service configuration. + service: + # -- Annotations to be added for the Hubble UI service + annotations: {} + # --- The type of service used for Hubble UI access, either ClusterIP or NodePort. + type: ClusterIP + # --- The port to use when the service type is set to NodePort. + nodePort: 31235 + + # -- Defines base url prefix for all hubble-ui http requests. + # It needs to be changed in case if ingress for hubble-ui is configured under some sub-path. + # Trailing `/` is required for custom path, ex. `/service-map/` + baseUrl: "/" + + # -- hubble-ui ingress configuration. + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + className: "" + hosts: + - chart-example.local + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + +# -- Method to use for identity allocation (`crd` or `kvstore`). +identityAllocationMode: "crd" + +# -- (string) Time to wait before using new identity on endpoint identity change. +# @default -- `"5s"` +identityChangeGracePeriod: "" + +# -- Install Iptables rules to skip netfilter connection tracking on all pod +# traffic. This option is only effective when Cilium is running in direct +# routing and full KPR mode. Moreover, this option cannot be enabled when Cilium +# is running in a managed Kubernetes environment or in a chained CNI setup. +installNoConntrackIptablesRules: false + +ipam: + # -- Configure IP Address Management mode. + # ref: https://docs.cilium.io/en/stable/network/concepts/ipam/ + mode: "cluster-pool" + operator: + # -- Deprecated in favor of ipam.operator.clusterPoolIPv4PodCIDRList. + # IPv4 CIDR range to delegate to individual nodes for IPAM. + clusterPoolIPv4PodCIDR: "10.0.0.0/8" + # -- IPv4 CIDR list range to delegate to individual nodes for IPAM. + clusterPoolIPv4PodCIDRList: [] + # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM. + clusterPoolIPv4MaskSize: 24 + # -- Deprecated in favor of ipam.operator.clusterPoolIPv6PodCIDRList. + # IPv6 CIDR range to delegate to individual nodes for IPAM. + clusterPoolIPv6PodCIDR: "fd00::/104" + # -- IPv6 CIDR list range to delegate to individual nodes for IPAM. + clusterPoolIPv6PodCIDRList: [] + # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM. + clusterPoolIPv6MaskSize: 120 + # -- The maximum burst size when rate limiting access to external APIs. + # Also known as the token bucket capacity. + # @default -- `20` + externalAPILimitBurstSize: ~ + # -- The maximum queries per second when rate limiting access to + # external APIs. Also known as the bucket refill rate, which is used to + # refill the bucket up to the burst size capacity. + # @default -- `4.0` + externalAPILimitQPS: ~ + +# -- Configure the eBPF-based ip-masq-agent +ipMasqAgent: + enabled: false +# the config of nonMasqueradeCIDRs +# config: + # nonMasqueradeCIDRs: [] + # masqLinkLocal: false + +# iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium. +# iptablesLockTimeout: "5s" + +ipv4: + # -- Enable IPv4 support. + enabled: true + +ipv6: + # -- Enable IPv6 support. + enabled: false + +# -- Configure Kubernetes specific configuration +k8s: {} + # -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR + # range via the Kubernetes node resource + # requireIPv4PodCIDR: false + + # -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR + # range via the Kubernetes node resource + # requireIPv6PodCIDR: false + +# -- Keep the deprecated selector labels when deploying Cilium DaemonSet. +keepDeprecatedLabels: false + +# -- Keep the deprecated probes when deploying Cilium DaemonSet +keepDeprecatedProbes: false + +startupProbe: + # -- failure threshold of startup probe. + # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) + failureThreshold: 105 + # -- interval between checks of the startup probe + periodSeconds: 2 +livenessProbe: + # -- failure threshold of liveness probe + failureThreshold: 10 + # -- interval between checks of the liveness probe + periodSeconds: 30 +readinessProbe: + # -- failure threshold of readiness probe + failureThreshold: 3 + # -- interval between checks of the readiness probe + periodSeconds: 30 + +# -- Configure the kube-proxy replacement in Cilium BPF datapath +# Valid options are "disabled", "partial", "strict". +# ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/ +#kubeProxyReplacement: "disabled" + +# -- healthz server bind address for the kube-proxy replacement. +# To enable set the value to '0.0.0.0:10256' for all ipv4 +# addresses and this '[::]:10256' for all ipv6 addresses. +# By default it is disabled. +kubeProxyReplacementHealthzBindAddr: "" + +l2NeighDiscovery: + # -- Enable L2 neighbor discovery in the agent + enabled: true + # -- Override the agent's default neighbor resolution refresh period. + refreshPeriod: "30s" + +# -- Enable Layer 7 network policy. +l7Proxy: true + +# -- Enable Local Redirect Policy. +localRedirectPolicy: false + +# To include or exclude matched resources from cilium identity evaluation +# labels: "" + +# logOptions allows you to define logging options. eg: +# logOptions: +# format: json + +# -- Enables periodic logging of system load +logSystemLoad: false + + +# -- Configure maglev consistent hashing +maglev: {} + # -- tableSize is the size (parameter M) for the backend table of one + # service entry + # tableSize: + + # -- hashSeed is the cluster-wide base64 encoded seed for the hashing + # hashSeed: + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +enableIPv4Masquerade: true + +# -- Enables IPv6 BIG TCP support which increases maximum GSO/GRO limits for nodes and pods +enableIPv6BIGTCP: false + +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +enableIPv6Masquerade: true + +# -- Enables egress gateway to redirect and SNAT the traffic that leaves the +# cluster. +egressGateway: + enabled: false + # -- Install egress gateway IP rules and routes in order to properly steer + # egress gateway traffic to the correct ENI interface + installRoutes: false + +vtep: +# -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow +# Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel. + enabled: false + +# -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1" + endpoint: "" +# -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" + cidr: "" +# -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0" + mask: "" +# -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y" + mac: "" + +# -- (string) Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +ipv4NativeRoutingCIDR: "" + +# -- (string) Allows to explicitly specify the IPv6 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +ipv6NativeRoutingCIDR: "" + +# -- cilium-monitor sidecar. +monitor: + # -- Enable the cilium-monitor sidecar. + enabled: false + +# -- Configure service load balancing +loadBalancer: + # -- standalone enables the standalone L4LB which does not connect to + # kube-apiserver. + # standalone: false + + # -- algorithm is the name of the load balancing algorithm for backend + # selection e.g. random or maglev + # algorithm: random + + # -- mode is the operation mode of load balancing for remote backends + # e.g. snat, dsr, hybrid + # mode: snat + + # -- acceleration is the option to accelerate service handling via XDP + # e.g. native, disabled + # acceleration: disabled + + # -- dsrDispatch configures whether IP option or IPIP encapsulation is + # used to pass a service IP and port to remote backend + # dsrDispatch: opt + + # -- serviceTopology enables K8s Topology Aware Hints -based service + # endpoints filtering + # serviceTopology: false + + # -- L7 LoadBalancer + l7: + # -- Enable L7 service load balancing via envoy proxy. + # The request to a k8s service, which has specific annotation e.g. service.cilium.io/lb-l7, + # will be forwarded to the local backend proxy to be load balanced to the service endpoints. + # Please refer to docs for supported annotations for more configuration. + # + # Applicable values: + # - envoy: Enable L7 load balancing via envoy proxy. This will automatically set enable-envoy-config as well. + # - disabled: Disable L7 load balancing via service annotation. + backend: disabled + # -- List of ports from service to be automatically redirected to above backend. + # Any service exposing one of these ports will be automatically redirected. + # Fine-grained control can be achieved by using the service annotation. + ports: [] + # -- Default LB algorithm + # The default LB algorithm to be used for services, which can be overridden by the + # service annotation (e.g. service.cilium.io/lb-l7-algorithm) + # Applicable values: round_robin, least_request, random + algorithm: round_robin + +# -- Configure N-S k8s service loadbalancing +nodePort: + # -- Enable the Cilium NodePort service implementation. + enabled: false + + # -- Port range to use for NodePort services. + # range: "30000,32767" + + # -- Set to true to prevent applications binding to service ports. + bindProtection: true + + # -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral + # ports is detected. + autoProtectPortRange: true + + # -- Enable healthcheck nodePort server for NodePort services + enableHealthCheck: true + +# policyAuditMode: false + +# -- The agent can be put into one of the three policy enforcement modes: +# default, always and never. +# ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes +policyEnforcementMode: "default" + +pprof: + # -- Enable pprof for cilium-agent + enabled: false + # -- Configure pprof listen address for cilium-agent + address: localhost + # -- Configure pprof listen port for cilium-agent + port: 6060 + +# -- Configure prometheus metrics on the configured port at /metrics +prometheus: + enabled: false + port: 9962 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor cilium-agent + labels: {} + # -- Annotations to add to ServiceMonitor cilium-agent + annotations: {} + # -- Interval for scrape metrics. + interval: "10s" + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # -- Relabeling configs for the ServiceMonitor cilium-agent + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + replacement: ${1} + # -- Metrics relabeling configs for the ServiceMonitor cilium-agent + metricRelabelings: ~ + # -- Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying + trustCRDsExist: false + # -- Metrics that should be enabled or disabled from the default metric + # list. (+metric_foo to enable metric_foo , -metric_bar to disable + # metric_bar). + # ref: https://docs.cilium.io/en/stable/observability/metrics/ + metrics: ~ + +# -- Configure Istio proxy options. +proxy: + prometheus: + enabled: true + port: "9964" + # -- Regular expression matching compatible Istio sidecar istio-proxy + # container image names + sidecarImageRegex: "cilium/istio_proxy" + +# -- Enable use of the remote node identity. +# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity +remoteNodeIdentity: true + +# -- Enable resource quotas for priority classes used in the cluster. +resourceQuotas: + enabled: false + cilium: + hard: + # 5k nodes * 2 DaemonSets (Cilium and cilium node init) + pods: "10k" + operator: + hard: + # 15 "clusterwide" Cilium Operator pods for HA + pods: "15" + +# Need to document default +################## +#sessionAffinity: false + +# -- Do not run Cilium agent when running with clean mode. Useful to completely +# uninstall Cilium as it will stop Cilium from starting and create artifacts +# in the node. +sleepAfterInit: false + +# -- Configure BPF socket operations configuration +sockops: + # enabled enables installation of socket options acceleration. + enabled: false + +# -- Enable check of service source ranges (currently, only for LoadBalancer). +svcSourceRangeCheck: true + +# -- Synchronize Kubernetes nodes to kvstore and perform CNP GC. +synchronizeK8sNodes: true + +# -- Configure TLS configuration in the agent. +tls: + # -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies + # (namely the secrets referenced by terminatingTLS and originatingTLS). + # Possible values: + # - local + # - k8s + secretsBackend: local + + # -- Base64 encoded PEM values for the CA certificate and private key. + # This can be used as common CA to generate certificates used by hubble and clustermesh components + ca: + # -- Optional CA cert. If it is provided, it will be used by cilium to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + cert: "" + + # -- Optional CA private key. If it is provided, it will be used by cilium to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + key: "" + + # -- Generated certificates validity duration in days. This will be used for auto generated CA. + certValidityDuration: 1095 + +# -- Configure the encapsulation configuration for communication between nodes. +# Possible values: +# - disabled +# - vxlan (default) +# - geneve +tunnel: "vxlan" + +# -- Configure VXLAN and Geneve tunnel port. +# @default -- Port 8472 for VXLAN, Port 6081 for Geneve +tunnelPort: 0 + +# -- Configure the underlying network MTU to overwrite auto-detected MTU. +MTU: 0 + +# -- Disable the usage of CiliumEndpoint CRD. +disableEndpointCRD: "false" + +wellKnownIdentities: + # -- Enable the use of well-known identities. + enabled: false + +etcd: + # -- Enable etcd mode for the agent. + enabled: false + + # -- cilium-etcd-operator image. + image: + override: ~ + repository: "${CILIUM_ETCD_OPERATOR_REPO}" + tag: "${CILIUM_ETCD_OPERATOR_VERSION}" + pullPolicy: "${PULL_POLICY}" + + # -- The priority class to use for cilium-etcd-operator + priorityClassName: "" + + # -- Additional cilium-etcd-operator container arguments. + extraArgs: [] + + # -- Additional cilium-etcd-operator volumes. + extraVolumes: [] + + # -- Additional cilium-etcd-operator volumeMounts. + extraVolumeMounts: [] + + # -- Node tolerations for cilium-etcd-operator scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Pod topology spread constraints for cilium-etcd-operator + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for cilium-etcd-operator pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + + # -- Security context to be added to cilium-etcd-operator pods + podSecurityContext: {} + + # -- Annotations to be added to cilium-etcd-operator pods + podAnnotations: {} + + # -- Labels to be added to cilium-etcd-operator pods + podLabels: {} + + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + + # -- cilium-etcd-operator resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + + # -- Security context to be added to cilium-etcd-operator pods + securityContext: {} + # runAsUser: 0 + + # -- cilium-etcd-operator update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + + # -- If etcd is behind a k8s service set this option to true so that Cilium + # does the service translation automatically without requiring a DNS to be + # running. + k8sService: false + + # -- Cluster domain for cilium-etcd-operator. + clusterDomain: cluster.local + + # -- List of etcd endpoints (not needed when using managed=true). + endpoints: + - https://CHANGE-ME:2379 + + # -- Enable use of TLS/SSL for connectivity to etcd. (auto-enabled if + # managed=true) + ssl: false + +operator: + # -- Enable the cilium-operator component (required). + enabled: true + + # -- Roll out cilium-operator pods automatically when configmap is updated. + rollOutPods: false + + # -- cilium-operator image. + image: + override: ~ + repository: "${CILIUM_OPERATOR_BASE_REPO}" + tag: "${CILIUM_VERSION}" + # operator-generic-digest + genericDigest: ${OPERATOR_GENERIC_DIGEST} + # operator-azure-digest + azureDigest: ${OPERATOR_AZURE_DIGEST} + # operator-aws-digest + awsDigest: ${OPERATOR_AWS_DIGEST} + # operator-alibabacloud-digest + alibabacloudDigest: ${OPERATOR_ALIBABACLOUD_DIGEST} + useDigest: ${USE_DIGESTS} + pullPolicy: "${PULL_POLICY}" + suffix: "${CILIUM_OPERATOR_SUFFIX}" + + # -- Number of replicas to run for the cilium-operator deployment + replicas: 2 + + # -- The priority class to use for cilium-operator + priorityClassName: "" + + # -- DNS policy for Cilium operator pods. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + dnsPolicy: "" + + # -- cilium-operator update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + + # -- Affinity for cilium-operator + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + io.cilium/app: operator + + # -- Pod topology spread constraints for cilium-operator + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for cilium-operator pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + + # -- Node tolerations for cilium-operator scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Additional cilium-operator container arguments. + extraArgs: [] + + # -- Additional cilium-operator environment variables. + extraEnv: [] + + # -- Additional cilium-operator hostPath mounts. + extraHostPathMounts: [] + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer + + # -- Additional cilium-operator volumes. + extraVolumes: [] + + # -- Additional cilium-operator volumeMounts. + extraVolumeMounts: [] + + # -- Security context to be added to cilium-operator pods + podSecurityContext: {} + + # -- Annotations to be added to cilium-operator pods + podAnnotations: {} + + # -- Labels to be added to cilium-operator pods + podLabels: {} + + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + + # -- cilium-operator resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 1000m + # memory: 1Gi + # requests: + # cpu: 100m + # memory: 128Mi + + # -- Security context to be added to cilium-operator pods + securityContext: {} + # runAsUser: 0 + + # -- Interval for endpoint garbage collection. + endpointGCInterval: "5m0s" + + # -- Interval for cilium node garbage collection. + nodeGCInterval: "5m0s" + + # -- Skip CNP node status clean up at operator startup. + skipCNPStatusStartupClean: false + + # -- Interval for identity garbage collection. + identityGCInterval: "15m0s" + + # -- Timeout for identity heartbeats. + identityHeartbeatTimeout: "30m0s" + + pprof: + # -- Enable pprof for cilium-operator + enabled: false + # -- Configure pprof listen address for cilium-operator + address: localhost + # -- Configure pprof listen port for cilium-operator + port: 6061 + + # -- Enable prometheus metrics for cilium-operator on the configured port at + # /metrics + prometheus: + enabled: false + port: 9963 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor cilium-operator + labels: {} + # -- Annotations to add to ServiceMonitor cilium-operator + annotations: {} + # -- Interval for scrape metrics. + interval: "10s" + # -- Relabeling configs for the ServiceMonitor cilium-operator + relabelings: ~ + # -- Metrics relabeling configs for the ServiceMonitor cilium-operator + metricRelabelings: ~ + + # -- Skip CRDs creation for cilium-operator + skipCRDCreation: false + + # -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium + # pod running. + removeNodeTaints: true + + # -- Set Node condition NetworkUnavailable to 'false' with the reason + # 'CiliumIsUp' for nodes that have a healthy Cilium pod. + setNodeNetworkStatus: true + + unmanagedPodWatcher: + # -- Restart any pod that are not managed by Cilium. + restart: true + # -- Interval, in seconds, to check if there are any pods that are not + # managed by Cilium. + intervalSeconds: 15 + +nodeinit: + # -- Enable the node initialization DaemonSet + enabled: false + + # -- node-init image. + image: + override: ~ + repository: "${CILIUM_NODEINIT_REPO}" + tag: "${CILIUM_NODEINIT_VERSION}" + pullPolicy: "${PULL_POLICY}" + + # -- The priority class to use for the nodeinit pod. + priorityClassName: "" + + # -- node-init update strategy + updateStrategy: + type: RollingUpdate + + # -- Additional nodeinit environment variables. + extraEnv: [] + + # -- Affinity for cilium-nodeinit + affinity: {} + + # -- Node labels for nodeinit pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + + # -- Node tolerations for nodeinit scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Annotations to be added to node-init pods. + podAnnotations: {} + + # -- Labels to be added to node-init pods. + podLabels: {} + + # -- nodeinit resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + requests: + cpu: 100m + memory: 100Mi + + # -- Security context to be added to nodeinit pods. + securityContext: + privileged: false + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + add: + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # Used for nsenter + - NET_ADMIN + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + + # -- bootstrapFile is the location of the file where the bootstrap timestamp is + # written by the node-init DaemonSet + bootstrapFile: "/tmp/cilium-bootstrap.d/cilium-bootstrap-time" + +preflight: + # -- Enable Cilium pre-flight resources (required for upgrade) + enabled: false + + # -- Cilium pre-flight image. + image: + override: ~ + repository: "${CILIUM_REPO}" + tag: "${CILIUM_VERSION}" + # cilium-digest + digest: ${CILIUM_DIGEST} + useDigest: ${USE_DIGESTS} + pullPolicy: "${PULL_POLICY}" + + # -- The priority class to use for the preflight pod. + priorityClassName: "" + + # -- preflight update strategy + updateStrategy: + type: RollingUpdate + + # -- Additional preflight environment variables. + extraEnv: [] + + # -- Additional preflight volumes. + extraVolumes: [] + + # -- Additional preflight volumeMounts. + extraVolumeMounts: [] + + # -- Affinity for cilium-preflight + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + + # -- Node labels for preflight pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + + # -- Node tolerations for preflight scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - key: node.kubernetes.io/not-ready + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + - key: node.cloudprovider.kubernetes.io/uninitialized + effect: NoSchedule + value: "true" + - key: CriticalAddonsOnly + operator: "Exists" + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Security context to be added to preflight pods. + podSecurityContext: {} + + # -- Annotations to be added to preflight pods + podAnnotations: {} + + # -- Labels to be added to the preflight pod. + podLabels: {} + + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + + # -- preflight resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + + # -- Security context to be added to preflight pods + securityContext: {} + # runAsUser: 0 + + # -- Path to write the `--tofqdns-pre-cache` file to. + tofqdnsPreCache: "" + + # -- Configure termination grace period for preflight Deployment and DaemonSet. + terminationGracePeriodSeconds: 1 + + # -- By default we should always validate the installed CNPs before upgrading + # Cilium. This will make sure the user will have the policies deployed in the + # cluster with the right schema. + validateCNPs: true + +# -- Explicitly enable or disable priority class. +# .Capabilities.KubeVersion is unsettable in `helm template` calls, +# it depends on k8s libraries version that Helm was compiled against. +# This option allows to explicitly disable setting the priority class, which +# is useful for rendering charts for gke clusters in advance. +enableCriticalPriorityClass: true + +# disableEnvoyVersionCheck removes the check for Envoy, which can be useful +# on AArch64 as the images do not currently ship a version of Envoy. +#disableEnvoyVersionCheck: false + +clustermesh: + # -- Deploy clustermesh-apiserver for clustermesh + useAPIServer: false + + # -- Clustermesh explicit configuration. + config: + # -- Enable the Clustermesh explicit configuration. + enabled: false + # -- Default dns domain for the Clustermesh API servers + # This is used in the case cluster addresses are not provided + # and IPs are used. + domain: mesh.cilium.io + # -- List of clusters to be peered in the mesh. + clusters: [] + # clusters: + # # -- Name of the cluster + # - name: cluster1 + # # -- Address of the cluster, use this if you created DNS records for + # # the cluster Clustermesh API server. + # address: cluster1.mesh.cilium.io + # # -- Port of the cluster Clustermesh API server. + # port: 2379 + # # -- IPs of the cluster Clustermesh API server, use multiple ones when + # # you have multiple IPs to access the Clustermesh API server. + # ips: + # - 172.18.255.201 + # # -- base64 encoded PEM values for the cluster client certificate, private key and certificate authority. + # tls: + # cert: "" + # key: "" + + apiserver: + # -- Clustermesh API server image. + image: + override: ~ + repository: "${CLUSTERMESH_APISERVER_REPO}" + tag: "${CILIUM_VERSION}" + # clustermesh-apiserver-digest + digest: ${CLUSTERMESH_APISERVER_DIGEST} + useDigest: ${USE_DIGESTS} + pullPolicy: "${PULL_POLICY}" + + etcd: + # -- Clustermesh API server etcd image. + image: + override: ~ + repository: "${ETCD_REPO}" + tag: "${ETCD_VERSION}" + pullPolicy: "${PULL_POLICY}" + + # -- Specifies the resources for etcd container in the apiserver + resources: {} + # requests: + # cpu: 200m + # memory: 256Mi + # limits: + # cpu: 1000m + # memory: 256Mi + + # -- Security context to be added to clustermesh-apiserver etcd containers + securityContext: {} + + init: + # -- Specifies the resources for etcd init container in the apiserver + resources: {} + # requests: + # cpu: 100m + # memory: 100Mi + # limits: + # cpu: 100m + # memory: 100Mi + + service: + # -- The type of service used for apiserver access. + type: NodePort + # -- Optional port to use as the node port for apiserver access. + # + # WARNING: make sure to configure a different NodePort in each cluster if + # kube-proxy replacement is enabled, as Cilium is currently affected by a known + # bug (#24692) when NodePorts are handled by the KPR implementation. If a service + # with the same NodePort exists both in the local and the remote cluster, all + # traffic originating from inside the cluster and targeting the corresponding + # NodePort will be redirected to a local backend, regardless of whether the + # destination node belongs to the local or the remote cluster. + nodePort: 32379 + # -- Optional loadBalancer IP address to use with type LoadBalancer. + # loadBalancerIP: + + # -- Annotations for the clustermesh-apiserver + # For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal" + # For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + annotations: {} + + # -- Number of replicas run for the clustermesh-apiserver deployment. + replicas: 1 + + # -- Additional clustermesh-apiserver environment variables. + extraEnv: [] + + # -- Additional clustermesh-apiserver volumes. + extraVolumes: [] + + # -- Additional clustermesh-apiserver volumeMounts. + extraVolumeMounts: [] + + # -- Security context to be added to clustermesh-apiserver containers + securityContext: {} + + # -- Security context to be added to clustermesh-apiserver pods + podSecurityContext: {} + + # -- Annotations to be added to clustermesh-apiserver pods + podAnnotations: {} + + # -- Labels to be added to clustermesh-apiserver pods + podLabels: {} + + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + + # -- Resource requests and limits for the clustermesh-apiserver container of the clustermesh-apiserver deployment, such as + # resources: + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + # -- Resource requests and limits for the clustermesh-apiserver + resources: {} + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M + + # -- Affinity for clustermesh.apiserver + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: clustermesh-apiserver + + # -- Pod topology spread constraints for clustermesh-apiserver + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + + # -- clustermesh-apiserver update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + + # -- The priority class to use for clustermesh-apiserver + priorityClassName: "" + + tls: + # -- Configure automatic TLS certificates generation. + # A Kubernetes CronJob is used the generate any + # certificates not provided by the user at installation + # time. + auto: + # -- When set to true, automatically generate a CA and certificates to + # enable mTLS between clustermesh-apiserver and external workload instances. + # If set to false, the certs to be provided by setting appropriate values below. + enabled: true + # Sets the method to auto-generate certificates. Supported values: + # - helm: This method uses Helm to generate all certificates. + # - cronJob: This method uses a Kubernetes CronJob the generate any + # certificates not provided by the user at installation + # time. + # - certmanager: This method use cert-manager to generate & rotate certificates. + method: helm + # -- Generated certificates validity duration in days. + certValidityDuration: 1095 + # -- Schedule for certificates regeneration (regardless of their expiration date). + # Only used if method is "cronJob". If nil, then no recurring job will be created. + # Instead, only the one-shot job is deployed to generate the certificates at + # installation time. + # + # Due to the out-of-band distribution of client certs to external workloads the + # CA is (re)regenerated only if it is not provided as a helm value and the k8s + # secret is manually deleted. + # + # Defaults to none. Commented syntax gives midnight of the first day of every + # fourth month. For syntax, see + # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax + # schedule: "0 0 1 */4 *" + + # [Example] + # certManagerIssuerRef: + # group: cert-manager.io + # kind: ClusterIssuer + # name: ca-issuer + # -- certmanager issuer used when clustermesh.apiserver.tls.auto.method=certmanager. + certManagerIssuerRef: {} + # -- base64 encoded PEM values for the ExternalWorkload CA certificate and private key. + ca: + # -- Optional CA cert. If it is provided, it will be used by the 'cronJob' method to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + cert: "" + # -- Optional CA private key. If it is provided, it will be used by the 'cronJob' method to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver server certificate and private key. + # Used if 'auto' is not enabled. + server: + cert: "" + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + # -- base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key. + # Used if 'auto' is not enabled. + admin: + cert: "" + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver client certificate and private key. + # Used if 'auto' is not enabled. + client: + cert: "" + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver remote cluster certificate and private key. + # Used if 'auto' is not enabled. + remote: + cert: "" + key: "" + +# -- Configure external workloads support +externalWorkloads: + # -- Enable support for external workloads, such as VMs (false by default). + enabled: false + +# -- Configure cgroup related configuration +cgroup: + autoMount: + # -- Enable auto mount of cgroup2 filesystem. + # When `autoMount` is enabled, cgroup2 filesystem is mounted at + # `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod. + # If users disable `autoMount`, it's expected that users have mounted + # cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the + # volume will be mounted inside the cilium agent pod at the same path. + enabled: true + # -- Init Container Cgroup Automount resource limits & requests + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) + hostRoot: /run/cilium/cgroupv2 + +# -- Configure whether to enable auto detect of terminating state for endpoints +# in order to support graceful termination. +enableK8sTerminatingEndpoint: true + +# -- Configure whether to unload DNS policy rules on graceful shutdown +# dnsPolicyUnloadOnShutdown: false + +# -- Configure the key of the taint indicating that Cilium is not ready on the node. +# When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up. +agentNotReadyTaintKey: "node.cilium.io/agent-not-ready" + +dnsProxy: + # -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'. + dnsRejectResponseCode: refused + # -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present. + enableDnsCompression: true + # -- Maximum number of IPs to maintain per FQDN name for each endpoint. + endpointMaxIpPerHostname: 50 + # -- Time during which idle but previously active connections with expired DNS lookups are still considered alive. + idleConnectionGracePeriod: 0s + # -- Maximum number of IPs to retain for expired DNS lookups with still-active connections. + maxDeferredConnectionDeletes: 10000 + # -- The minimum time, in seconds, to use DNS data for toFQDNs policies. + minTtl: 3600 + # -- DNS cache data at this path is preloaded on agent startup. + preCache: "" + # -- Global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port. + proxyPort: 0 + # -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information. + proxyResponseMaxDelay: 100ms + +# -- SCTP Configuration Values +sctp: + # -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming. + enabled: false diff --git a/vendir.lock.yml b/vendir.lock.yml new file mode 100644 index 00000000..b2edab59 --- /dev/null +++ b/vendir.lock.yml @@ -0,0 +1,15 @@ +apiVersion: vendir.k14s.io/v1alpha1 +directories: +- contents: + - git: + commitTitle: Enable PDB for cilium-operator + sha: 25c449534cc325a5798fc7c839b8ac33591b3516 + tags: + - 1.13.6-21-g25c449534c + path: cilium + path: vendor +- contents: + - directory: {} + path: . + path: helm/cilium +kind: LockConfig diff --git a/vendir.yml b/vendir.yml new file mode 100644 index 00000000..aaefd488 --- /dev/null +++ b/vendir.yml @@ -0,0 +1,16 @@ +apiVersion: vendir.k14s.io/v1alpha1 +kind: Config +directories: + - path: vendor + contents: + - path: cilium + git: + url: https://github.com/giantswarm/cilium + ref: main + includePaths: + - install/kubernetes/cilium/**/* + - path: helm/cilium + contents: + - path: . + directory: + path: vendor/cilium/install/kubernetes/cilium