Careful!

You are browsing documentation for a version of Kuma that is not the latest release.

Looking for even older versions? Learn more.

kuma-cp configuration reference

Kuma CP configuration

# Environment type. Available values are: "kubernetes" or "universal"
environment: universal # ENV: KUMA_ENVIRONMENT
# Mode in which Kuma CP is running. Available values are: "standalone", "global", "zone"
mode: standalone # ENV: KUMA_MODE

# Resource Store configuration
store:
  # Type of Store used in the Control Plane. Available values are: "kubernetes", "postgres" or "memory"
  type: memory # ENV: KUMA_STORE_TYPE

  # Kubernetes Store configuration (used when store.type=kubernetes)
  kubernetes:
    # Namespace where Control Plane is installed to.
    systemNamespace: kuma-system # ENV: KUMA_STORE_KUBERNETES_SYSTEM_NAMESPACE

  # Postgres Store configuration (used when store.type=postgres)
  postgres:
    # Host of the Postgres DB
    host: 127.0.0.1 # ENV: KUMA_STORE_POSTGRES_HOST
    # Port of the Postgres DB
    port: 15432 # ENV: KUMA_STORE_POSTGRES_PORT
    # User of the Postgres DB
    user: kuma # ENV: KUMA_STORE_POSTGRES_USER
    # Password of the Postgres DB
    password: kuma # ENV: KUMA_STORE_POSTGRES_PASSWORD
    # Database name of the Postgres DB
    dbName: kuma # ENV: KUMA_STORE_POSTGRES_DB_NAME
    # Driver to use, one of: pgx, postgres
    driverName: pgx # ENV: KUMA_STORE_POSTGRES_DRIVER_NAME
    # Connection Timeout to the DB in seconds
    connectionTimeout: 5 # ENV: KUMA_STORE_POSTGRES_CONNECTION_TIMEOUT
    # MaxConnectionLifetime (applied only when driverName=pgx) is the duration since creation after which a connection will be automatically closed
    maxConnectionLifetime: "1h" # ENV: KUMA_STORE_POSTGRES_MAX_CONNECTION_LIFETIME
    # MaxConnectionLifetimeJitter (applied only when driverName=pgx) is the duration after maxConnectionLifetime to randomly decide to close a connection.
    # This helps prevent all connections from being closed at the exact same time, starving the pool.
    maxConnectionLifetimeJitter: "1m" # ENV: KUMA_STORE_POSTGRES_MAX_CONNECTION_LIFETIME_JITTER
    # HealthCheckInterval (applied only when driverName=pgx) is the duration between checks of the health of idle connections.
    healthCheckInterval: "30s" # ENV: KUMA_STORE_POSTGRES_HEALTH_CHECK_INTERVAL
    # MinOpenConnections (applied only when driverName=pgx) is the minimum number of open connections to the database
    minOpenConnections: 0 # ENV: KUMA_STORE_POSTGRES_MIN_OPEN_CONNECTIONS
    # MaxOpenConnections is the maximum number of open connections to the database
    # `0` value means number of open connections is unlimited
    maxOpenConnections: 50 # ENV: KUMA_STORE_POSTGRES_MAX_OPEN_CONNECTIONS
    # MaxIdleConnections (applied only when driverName=postgres) is the maximum number of connections in the idle connection pool
    # <0 value means no idle connections and 0 means default max idle connections
    maxIdleConnections: 50  # ENV: KUMA_STORE_POSTGRES_MAX_IDLE_CONNECTIONS
    # TLS settings
    tls:
      # Mode of TLS connection. Available values are: "disable", "verifyNone", "verifyCa", "verifyFull"
      mode: disable # ENV: KUMA_STORE_POSTGRES_TLS_MODE
      # Path to TLS Certificate of the client. Required when server has METHOD=cert
      certPath: # ENV: KUMA_STORE_POSTGRES_TLS_CERT_PATH
      # Path to TLS Key of the client. Required when server has METHOD=cert
      keyPath: # ENV: KUMA_STORE_POSTGRES_TLS_KEY_PATH
      # Path to the root certificate. Used in verifyCa and verifyFull modes.
      caPath: # ENV: KUMA_STORE_POSTGRES_TLS_ROOT_CERT_PATH
    # MinReconnectInterval (applied only when driverName=postgres) controls the duration to wait before trying to
    # re-establish the database connection after connection loss. After each
    # consecutive failure this interval is doubled, until MaxReconnectInterval
    # is reached. Successfully completing the connection establishment procedure
    # resets the interval back to MinReconnectInterval.
    minReconnectInterval: "10s" # ENV: KUMA_STORE_POSTGRES_MIN_RECONNECT_INTERVAL
    # MaxReconnectInterval (applied only when driverName=postgres) controls the maximum possible duration to wait before trying
    # to re-establish the database connection after connection loss.
    maxReconnectInterval: "60s" # ENV: KUMA_STORE_POSTGRES_MAX_RECONNECT_INTERVAL

  # Cache for read only operations. This cache is local to the instance of the control plane.
  cache:
    # If true then cache is enabled
    enabled: true # ENV: KUMA_STORE_CACHE_ENABLED
    # Expiration time for elements in cache.
    expirationTime: 1s # ENV: KUMA_STORE_CACHE_EXPIRATION_TIME

  # Upsert (get and update) configuration
  upsert:
    # Base time for exponential backoff on upsert operations when retry is enabled
    conflictRetryBaseBackoff: 100ms # ENV: KUMA_STORE_UPSERT_CONFLICT_RETRY_BASE_BACKOFF
    # Max retries on upsert (get and update) operation when retry is enabled
    conflictRetryMaxTimes: 5 # ENV: KUMA_STORE_UPSERT_CONFLICT_RETRY_MAX_TIMES

  # If true, skips validation of resource delete.
  # For example you don't have to delete all Dataplane objects before you delete a Mesh
  unsafeDelete: false # ENV: KUMA_STORE_UNSAFE_DELETE

# Configuration of Bootstrap Server, which provides bootstrap config to Dataplanes
bootstrapServer:
  # Parameters of bootstrap configuration
  params:
    # Address of Envoy Admin
    adminAddress: 127.0.0.1 # ENV: KUMA_BOOTSTRAP_SERVER_PARAMS_ADMIN_ADDRESS
    # Port of Envoy Admin
    adminPort: 9901 # ENV: KUMA_BOOTSTRAP_SERVER_PARAMS_ADMIN_PORT
    # Path to access log file of Envoy Admin
    adminAccessLogPath: /dev/null # ENV: KUMA_BOOTSTRAP_SERVER_PARAMS_ADMIN_ACCESS_LOG_PATH
    # Host of XDS Server. By default it is the same host as the one used by kuma-dp to connect to the control plane
    xdsHost: "" # ENV: KUMA_BOOTSTRAP_SERVER_PARAMS_XDS_HOST
    # Port of XDS Server. By default it is autoconfigured from KUMA_DP_SERVER_PORT
    xdsPort: 0 # ENV: KUMA_BOOTSTRAP_SERVER_PARAMS_XDS_PORT
    # Connection timeout to the XDS Server
    xdsConnectTimeout: 1s # ENV: KUMA_BOOTSTRAP_SERVER_PARAMS_XDS_CONNECT_TIMEOUT

#  Monitoring Assignment Discovery Service (MADS) server configuration
monitoringAssignmentServer:
  # Port of a gRPC server that serves Monitoring Assignment Discovery Service (MADS).
  port: 5676 # ENV: KUMA_MONITORING_ASSIGNMENT_SERVER_PORT
  # Which MADS API versions to serve
  apiVersions: ["v1"] # ENV: KUMA_MONITORING_ASSIGNMENT_SERVER_API_VERSIONS
  # Interval for re-generating monitoring assignments for clients connected to the Control Plane.
  assignmentRefreshInterval: 1s # ENV: KUMA_MONITORING_ASSIGNMENT_SERVER_ASSIGNMENT_REFRESH_INTERVAL
  # The default timeout for a single fetch-based discovery request, if not specified
  defaultFetchTimeout: 30s # ENV: KUMA_MONITORING_ASSIGNMENT_SERVER_DEFAULT_FETCH_TIMEOUT
  # Path to TLS certificate file
  tlsCertFile: "" # ENV: KUMA_MONITORING_ASSIGNMENT_SERVER_TLS_CERT_FILE
  # Path to TLS key file
  tlsKeyFile: "" # ENV: KUMA_MONITORING_ASSIGNMENT_SERVER_TLS_KEY_FILE
  # TlsMinVersion the minimum version of TLS used across all the Kuma Servers.
  tlsMinVersion: "TLSv1_2" # ENV: KUMA_MONITORING_ASSIGNMENT_SERVER_TLS_MIN_VERSION
  # TlsMaxVersion the maximum version of TLS used across all the Kuma Servers.
  tlsMaxVersion: # ENV: KUMA_MONITORING_ASSIGNMENT_SERVER_TLS_MAX_VERSION
  # TlsCipherSuites the list of cipher suites to be used across all the Kuma Servers.
  tlsCipherSuites: [] # ENV: KUMA_MONITORING_ASSIGNMENT_SERVER_TLS_CIPHER_SUITES

# Envoy XDS server configuration
xdsServer:
  # Interval for re-genarting configuration for Dataplanes connected to the Control Plane
  dataplaneConfigurationRefreshInterval: 1s # ENV: KUMA_XDS_SERVER_DATAPLANE_CONFIGURATION_REFRESH_INTERVAL
  # Interval for flushing status of Dataplanes connected to the Control Plane
  dataplaneStatusFlushInterval: 10s # ENV: KUMA_XDS_SERVER_DATAPLANE_STATUS_FLUSH_INTERVAL
  # Backoff that is executed when Control Plane is sending the response that was previously rejected by Dataplane
  nackBackoff: 5s # ENV: KUMA_XDS_SERVER_NACK_BACKOFF
  # A delay between proxy terminating a connection and the CP trying to deregister the proxy.
  # It is used only in universal mode when you use direct lifecycle.
  # Setting this setting to 0s disables the delay.
  # Disabling this may cause race conditions that one instance of CP removes proxy object
  # while proxy is connected to another instance of the CP.
  dataplaneDeregistrationDelay: 10s # ENV: KUMA_XDS_DATAPLANE_DEREGISTRATION_DELAY

# API Server configuration
apiServer:
  # HTTP configuration of the API Server
  http:
    # If true then API Server will be served on HTTP
    enabled: true # ENV: KUMA_API_SERVER_HTTP_ENABLED
    # Network interface on which HTTP API Server will be exposed
    interface: 0.0.0.0 # ENV: KUMA_API_SERVER_HTTP_INTERFACE
    # Port of the API Server
    port: 5681 # ENV: KUMA_API_SERVER_HTTP_PORT
  # HTTPS configuration of the API Server
  https:
    # If true then API Server will be served on HTTPS
    enabled: true # ENV: KUMA_API_SERVER_HTTPS_ENABLED
    # Network interface on which HTTPS API Server will be exposed
    interface: 0.0.0.0 # ENV: KUMA_API_SERVER_HTTPS_INTERFACE
    # Port of the HTTPS API Server
    port: 5682 # ENV: KUMA_API_SERVER_HTTPS_PORT
    # Path to TLS certificate file. Autoconfigured from KUMA_GENERAL_TLS_CERT_FILE if empty
    tlsCertFile: "" # ENV: KUMA_API_SERVER_HTTPS_TLS_CERT_FILE
    # Path to TLS key file. Autoconfigured from KUMA_GENERAL_TLS_KEY_FILE if empty
    tlsKeyFile: "" # ENV: KUMA_API_SERVER_HTTPS_TLS_KEY_FILE
    # Path to the CA certificate which is used to sign client certificates. It is used only for verifying client certificates.
    tlsCaFile: "" # ENV: KUMA_API_SERVER_HTTPS_CLIENT_CERTS_CA_FILE
    # TlsMinVersion the minimum version of TLS used across all the Kuma Servers.
    tlsMinVersion: "TLSv1_2" # ENV: KUMA_API_SERVER_HTTPS_TLS_MIN_VERSION
    # TlsMaxVersion the maximum version of TLS used across all the Kuma Servers.
    tlsMaxVersion: # ENV: KUMA_API_SERVER_HTTPS_TLS_MAX_VERSION
    # TlsCipherSuites the list of cipher suites to be used across all the Kuma Servers.
    tlsCipherSuites: [] # ENV: KUMA_API_SERVER_HTTPS_TLS_CIPHER_SUITES
    # If true, then HTTPS connection will require client cert.
    requireClientCert: false # ENV: KUMA_API_SERVER_HTTPS_REQUIRE_CLIENT_CERT
  # Authentication configuration for administrative endpoints like Dataplane Token or managing Secrets
  auth:
    # Directory of authorized client certificates (only validate in HTTPS)
    clientCertsDir: "" # ENV: KUMA_API_SERVER_AUTH_CLIENT_CERTS_DIR
  # Api Server Authentication configuration
  authn:
    # Type of authentication mechanism (available values: "adminClientCerts", "tokens")
    type: tokens # ENV: KUMA_API_SERVER_AUTHN_TYPE
    # Localhost is authenticated as a user admin of group admin
    localhostIsAdmin: true # ENV: KUMA_API_SERVER_AUTHN_LOCALHOST_IS_ADMIN
    # Configuration for tokens authentication
    tokens:
      # If true then User Token with name admin and group admin will be created and placed as admin-user-token Kuma secret
      bootstrapAdminToken: true # ENV: KUMA_API_SERVER_AUTHN_TOKENS_BOOTSTRAP_ADMIN_TOKEN
      # If true the control plane token issuer is enabled. It's recommended to set it to false when all the tokens are issued offline.
      enableIssuer: true # ENV: KUMA_API_SERVER_AUTHN_TOKENS_ENABLE_ISSUER
      # Token validator configuration
      validator:
        # If true then Kuma secrets with prefix "user-token-signing-key" are considered as signing keys.
        useSecrets: true # ENV: KUMA_API_SERVER_AUTHN_TOKENS_VALIDATOR_USE_SECRETS
        # List of public keys used to validate the token. Example:
        # - kid: 1
        #   key: |
        #     -----BEGIN RSA PUBLIC KEY-----
        #     MIIBCgKCAQEAq....
        #     -----END RSA PUBLIC KEY-----
        # - kid: 2
        #   keyFile: /keys/public.pem
        publicKeys: []

  # If true, then API Server will operate in read only mode (serving GET requests)
  readOnly: false # ENV: KUMA_API_SERVER_READ_ONLY
  # Allowed domains for Cross-Origin Resource Sharing. The value can be either domain or regexp
  corsAllowedDomains:
    - ".*" # ENV: KUMA_API_SERVER_CORS_ALLOWED_DOMAINS
  # Can be used if you use a reverse proxy
  rootUrl: "" # ENV: KUMA_API_SERVER_ROOT_URL
  # The path to serve the API from
  basePath: "/" # ENV: KUMA_API_SERVER_BASE_PATH
  # configuration specific to the GUI
  gui:
    # Whether to serve the gui (if mode=zone this has no effect)
    enabled: true # ENV: KUMA_API_SERVER_GUI_ENABLED
    # Can be used if you use a reverse proxy or want to serve the gui from a different path
    rootUrl: "" # ENV: KUMA_API_SERVER_GUI_ROOT_URL
    # The path to serve the GUI from
    basePath: "/gui" # ENV: KUMA_API_SERVER_GUI_BASE_PATH

# Environment-specific configuration
runtime:
  # Kubernetes-specific configuration
  kubernetes:
    # Service name of the Kuma Control Plane. It is used to point Kuma DP to proper URL.
    controlPlaneServiceName: kuma-control-plane # ENV: KUMA_RUNTIME_KUBERNETES_CONTROL_PLANE_SERVICE_NAME
    # Name of Service Account that is used to run the Control Plane
    serviceAccountName: "system:serviceaccount:kuma-system:kuma-control-plane" # ENV: KUMA_RUNTIME_KUBERNETES_SERVICE_ACCOUNT_NAME
    # Taint controller that prevents applications from scheduling until CNI is ready.
    nodeTaintController:
      # If true enables the taint controller.
      enabled: false # ENV: KUMA_RUNTIME_KUBERNETES_NODE_TAINT_CONTROLLER_ENABLED
      # Value of app label on CNI pod that indicates if node can be ready.
      cniApp: "" # ENV: KUMA_RUNTIME_KUBERNETES_NODE_TAINT_CONTROLLER_CNI_APP
      # Value of CNI namespace.
      cniNamespace: "kube-system" # ENV: KUMA_RUNTIME_KUBERNETES_NODE_TAINT_CONTROLLER_CNI_NAMESPACE
    # Admission WebHook Server configuration
    admissionServer:
      # Address the Admission WebHook Server should be listening on
      address: # ENV: KUMA_RUNTIME_KUBERNETES_ADMISSION_SERVER_ADDRESS
      # Port the Admission WebHook Server should be listening on
      port: 5443 # ENV: KUMA_RUNTIME_KUBERNETES_ADMISSION_SERVER_PORT
      # Directory with a TLS cert and private key for the Admission WebHook Server.
      # TLS certificate file must be named `tls.crt`.
      # TLS key file must be named `tls.key`.
      certDir: # ENV: kuma_runtime_kubernetes_admission_server_cert_dir
    # Injector defines configuration of a Kuma Sidecar Injector.
    injector:
      # if true runs kuma-cp in CNI compatible mode
      cniEnabled: false # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_CNI_ENABLED
      # list of exceptions for Kuma injection
      exceptions:
        # a map of labels for exception. If pod matches label with given value Kuma won't be injected. Specify '*' to match any value.
        labels:
          openshift.io/build.name: "*"
          openshift.io/deployer-pod-for.name: "*"
      # VirtualProbesEnabled enables automatic converting HttpGet probes to virtual. Virtual probe
      #	serves on sub-path of insecure port 'virtualProbesPort',
      #	i.e :8080/health/readiness -> :9000/8080/health/readiness where 9000 is virtualProbesPort
      virtualProbesEnabled: true # ENV: KUMA_RUNTIME_KUBERNETES_VIRTUAL_PROBES_ENABLED
      # VirtualProbesPort is a port for exposing virtual probes which are not secured by mTLS
      virtualProbesPort: 9000 # ENV: KUMA_RUNTIME_KUBERNETES_VIRTUAL_PROBES_PORT
      # CaCertFile is CA certificate which will be used to verify a connection to the control plane.
      caCertFile:  # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_CA_CERT_FILE
      # SidecarContainer defines configuration of the Kuma sidecar container.
      sidecarContainer:
        # Image name.
        image: kuma/kuma-dp:latest # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_IMAGE
        # Redirect port for inbound traffic.
        redirectPortInbound: 15006 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_REDIRECT_PORT_INBOUND
        # Redirect port for inbound traffic.
        redirectPortInboundV6: 15010 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_REDIRECT_PORT_INBOUND_V6
        # Redirect port for outbound traffic.
        redirectPortOutbound: 15001 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_REDIRECT_PORT_OUTBOUND
        # User ID.
        uid: 5678 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_UID
        # Group ID.
        gid: 5678 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_GUI
        # Drain time for listeners.
        drainTime: 30s # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_DRAIN_TIME
        # Readiness probe.
        readinessProbe:
          # Number of seconds after the container has started before readiness probes are initiated.
          initialDelaySeconds: 1 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_READINESS_PROBE_INITIAL_DELAY_SECONDS
          # Number of seconds after which the probe times out.
          timeoutSeconds: 3 # ENV : KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_READINESS_PROBE_TIMEOUT_SECONDS
          # Number of seconds after which the probe times out.
          periodSeconds: 5 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_READINESS_PROBE_PERIOD_SECONDS
          # Minimum consecutive successes for the probe to be considered successful after having failed.
          successThreshold: 1 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_READINESS_PROBE_SUCCESS_THRESHOLD
          # Minimum consecutive failures for the probe to be considered failed after having succeeded.
          failureThreshold: 12 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_READINESS_PROBE_FAILURE_THRESHOLD
        # Liveness probe.
        livenessProbe:
          # Number of seconds after the container has started before liveness probes are initiated.
          initialDelaySeconds: 60 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_LIVENESS_PROBE_INITIAL_DELAY_SECONDS
          # Number of seconds after which the probe times out.
          timeoutSeconds: 3 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_LIVENESS_PROBE_TIMEOUT_SECONDS
          # How often (in seconds) to perform the probe.
          periodSeconds: 5 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_LIVENESS_PROBE_PERIOD_SECONDS
          # Minimum consecutive failures for the probe to be considered failed after having succeeded.
          failureThreshold: 12 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_LIVENESS_PROBE_FAILURE_THRESHOLD
        # Compute resource requirements.
        resources:
          # Minimum amount of compute resources required.
          requests:
            # CPU, in cores. (500m = .5 cores)
            cpu: 50m # ENV: KUMA_INJECTOR_SIDECAR_CONTAINER_RESOURCES_REQUESTS_CPU
            # Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
            memory: 64Mi # ENV: KUMA_INJECTOR_SIDECAR_CONTAINER_RESOURCES_REQUESTS_MEMORY
          # Maximum amount of compute resources allowed.
          limits:
            # CPU, in cores. (500m = .5 cores)
            cpu: 1000m # ENV: KUMA_INJECTOR_SIDECAR_CONTAINER_RESOURCES_LIMITS_CPU
            # Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
            memory: 512Mi # ENV: KUMA_INJECTOR_SIDECAR_CONTAINER_RESOURCES_LIMITS_MEMORY
        # Additional environment variables that can be placed on Kuma DP sidecar
        envVars: {} # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_SIDECAR_CONTAINER_ENV_VARS
      # InitContainer defines configuration of the Kuma init container
      initContainer:
        # Image name.
        image: kuma/kuma-init:latest # ENV: KUMA_INJECTOR_INIT_CONTAINER_IMAGE
      # ContainerPatches is an optional list of ContainerPatch names which will be applied
      # to init and sidecar containers if workload is not annotated with a patch list.
      containerPatches: [ ] # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_CONTAINER_PATCHES
      # Configuration for a traffic that is intercepted by sidecar
      sidecarTraffic:
        # List of inbound ports that will be excluded from interception.
        # This setting is applied on every pod unless traffic.kuma.io/exclude-inbound-ports annotation is specified on Pod.
        excludeInboundPorts: [ ] # ENV: KUMA_RUNTIME_KUBERNETES_SIDECAR_TRAFFIC_EXCLUDE_INBOUND_PORTS
        # List of outbound ports that will be excluded from interception.
        # This setting is applied on every pod unless traffic.kuma.io/exclude-oubound-ports annotation is specified on Pod.
        excludeOutboundPorts: [ ] # ENV: KUMA_RUNTIME_KUBERNETES_SIDECAR_TRAFFIC_EXCLUDE_OUTBOUND_PORTS
      builtinDNS:
        # Use the built-in DNS
        enabled: true # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_BUILTIN_DNS_ENABLED
        # Redirect port for DNS
        port: 15053 # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_BUILTIN_DNS_PORT
      transparentProxyV1: false # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_TRANSPARENT_PROXY_V1
      # EBPF defines configuration for the ebpf, when transparent proxy is marked to be
      # installed using ebpf instead of iptables
      ebpf:
        # Install transparent proxy using ebpf
        enabled: false # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_EBPF_ENABLED
        # Name of the environmental variable which will include IP address of the pod
        instanceIPEnvVarName: INSTANCE_IP # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_EBPF_INSTANCE_IP_ENV_VAR_NAME
        # Path where BPF file system will be mounted for pinning ebpf programs and maps
        bpffsPath: /sys/fs/bpf # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_EBPF_BPFFS_PATH
        # Path of mounted cgroup2
        cgroupPath: /sys/fs/cgroup # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_EBPF_CGROUP_PATH
        # Name of the network interface which should be used to attach to it TC programs
        # when not specified, we will try to automatically determine it
        tcAttachIface: "" # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_EBPF_TC_ATTACH_IFACE
        # Path where compiled eBPF programs are placed
        programsSourcePath: /kuma/ebpf # ENV: KUMA_RUNTIME_KUBERNETES_INJECTOR_EBPF_PROGRAMS_SOURCE_PATH
    marshalingCacheExpirationTime: 5m # ENV: KUMA_RUNTIME_KUBERNETES_MARSHALING_CACHE_EXPIRATION_TIME
    # Kubernetes's resources reconciliation concurrency configuration
    controllersConcurrency:
      # PodController defines maximum concurrent reconciliations of Pod resources
      # Default value 10. If set to 0 kube controller-runtime default value of 1 will be used.
      podController: 10 # ENV: KUMA_RUNTIME_KUBERNETES_CONTROLLERS_CONCURRENCY_POD_CONTROLLER
    # Kubernetes client configuration
    clientConfig:
      # Qps defines maximum requests kubernetes client is allowed to make per second.
      # Default value 100. If set to 0 kube-client default value of 5 will be used.
      qps: 100
      # BurstQps defines maximum burst requests kubernetes client is allowed to make per second
      # Default value 100. If set to 0 kube-client default value of 10 will be used.
      burstQps: 100
  # Universal-specific configuration
  universal:
    # DataplaneCleanupAge defines how long Dataplane should be offline to be cleaned up by GC
    dataplaneCleanupAge: 72h0m0s # ENV: KUMA_RUNTIME_UNIVERSAL_DATAPLANE_CLEANUP_AGE

# Default Kuma entities configuration
defaults:
  # If true, it skips creating the default Mesh
  skipMeshCreation: false # ENV: KUMA_DEFAULTS_SKIP_MESH_CREATION
  # If true, it skips creating the default tenant resources
  skipTenantResources: false # ENV: KUMA_DEFAULTS_SKIP_TENANT_RESOURCES

# Metrics configuration
metrics:
  dataplane:
    # How many latest subscriptions will be stored in DataplaneInsight object, if equals 0 then unlimited
    subscriptionLimit: 2 # ENV: KUMA_METRICS_DATAPLANE_SUBSCRIPTION_LIMIT
    # How long data plane proxy can stay Online without active xDS connection
    idleTimeout: 5m # ENV: KUMA_METRICS_DATAPLANE_IDLE_TIMEOUT
  zone:
    # How many latest subscriptions will be stored in ZoneInsights object, if equals 0 then unlimited
    subscriptionLimit: 10 # ENV: KUMA_METRICS_ZONE_SUBSCRIPTION_LIMIT
    # How long zone can stay Online without active KDS connection
    idleTimeout: 5m # ENV: KUMA_METRICS_ZONE_IDLE_TIMEOUT
  mesh:
    # Min time that should pass between MeshInsight resync
    minResyncTimeout: 1s # ENV: KUMA_METRICS_MESH_MIN_RESYNC_TIMEOUT
    # Max time that MeshInsight could spend without resync
    maxResyncTimeout: 20s # ENV: KUMA_METRICS_MESH_MAX_RESYNC_TIMEOUT

# Reports configuration
reports:
  # If true then usage stats will be reported
  enabled: false # ENV: KUMA_REPORTS_ENABLED

# General configuration
general:
  # dnsCacheTTL represents duration for how long Kuma CP will cache result of resolving dataplane's domain name
  dnsCacheTTL: 10s # ENV: KUMA_GENERAL_DNS_CACHE_TTL
  # TlsCertFile defines a path to a file with PEM-encoded TLS cert that will be used across all the Kuma Servers.
  tlsCertFile: # ENV: KUMA_GENERAL_TLS_CERT_FILE
  # TlsKeyFile defines a path to a file with PEM-encoded TLS key that will be used across all the Kuma Servers.
  tlsKeyFile: # ENV: KUMA_GENERAL_TLS_KEY_FILE
  # TlsMinVersion the minimum version of TLS used across all the Kuma Servers.
  tlsMinVersion: "TLSv1_2" # ENV: KUMA_GENERAL_TLS_MIN_VERSION
  # TlsMaxVersion the maximum version of TLS used across all the Kuma Servers.
  tlsMaxVersion: # ENV: KUMA_GENERAL_TLS_MAX_VERSION
  # TlsCipherSuites the list of cipher suites to be used across all the Kuma Servers.
  tlsCipherSuites: [] # ENV: KUMA_GENERAL_TLS_CIPHER_SUITES
  # WorkDir defines a path to the working directory
  # Kuma stores in this directory autogenerated entities like certificates.
  # If empty then the working directory is $HOME/.kuma
  workDir: "" # ENV: KUMA_GENERAL_WORK_DIR

# DNS Server configuration
dnsServer:
  # The domain that the server will resolve the services for
  domain: "mesh" # ENV: KUMA_DNS_SERVER_DOMAIN
  # The CIDR range used to allocate
  CIDR: "240.0.0.0/4" # ENV: KUMA_DNS_SERVER_CIDR
  # Will create a service "<kuma.io/service>.mesh" dns entry for every service.
  serviceVipEnabled: true # ENV: KUMA_DNS_SERVER_SERVICE_VIP_ENABLED
  # The port to use along with the `<kuma.io/service>.mesh` dns entry
  serviceVipPort: 80 # ENV: KUMA_DNS_SERVICE_SERVICE_VIP_PORT

# Multizone mode
multizone:
  global:
    kds:
      # Port of a gRPC server that serves Kuma Discovery Service (KDS).
      grpcPort: 5685 # ENV: KUMA_MULTIZONE_GLOBAL_KDS_GRPC_PORT
      # Interval for refreshing state of the world
      refreshInterval: 1s # ENV: KUMA_MULTIZONE_GLOBAL_KDS_REFRESH_INTERVAL
      # Interval for flushing Zone Insights (stats of multi-zone communication)
      zoneInsightFlushInterval: 10s # ENV: KUMA_MULTIZONE_GLOBAL_KDS_ZONE_INSIGHT_FLUSH_INTERVAL
      # TlsEnabled turns on TLS for KDS
      tlsEnabled: true # ENV: KUMA_MULTIZONE_GLOBAL_KDS_TLS_ENABLED
      # TlsCertFile defines a path to a file with PEM-encoded TLS cert.
      tlsCertFile: # ENV: KUMA_MULTIZONE_GLOBAL_KDS_TLS_CERT_FILE
      # TlsKeyFile defines a path to a file with PEM-encoded TLS key.
      tlsKeyFile: # ENV: KUMA_MULTIZONE_GLOBAL_KDS_TLS_KEY_FILE
      # TlsMinVersion the minimum version of TLS
      tlsMinVersion: "TLSv1_2" # ENV: KUMA_MULTIZONE_GLOBAL_KDS_TLS_MIN_VERSION
      # TlsMaxVersion the maximum version of TLS
      tlsMaxVersion: # ENV: KUMA_MULTIZONE_GLOBAL_KDS_TLS_MAX_VERSION
      # TlsCipherSuites the list of cipher suites
      tlsCipherSuites: [] # ENV: KUMA_MULTIZONE_GLOBAL_KDS_TLS_CIPHER_SUITES
      # MaxMsgSize defines a maximum size of the message in bytes that is exchanged using KDS.
      # In practice this means a limit on full list of one resource type.
      maxMsgSize: 10485760 # ENV: KUMA_MULTIZONE_GLOBAL_KDS_MAX_MSG_SIZE
      # MsgSendTimeout defines a timeout on sending a single KDS message.
      # KDS stream between control planes is terminated if the control plane hits this timeout.
      msgSendTimeout: 60s # ENV: KUMA_MULTIZONE_GLOBAL_KDS_MSG_SEND_TIMEOUT
      # Backoff that is executed when the global control plane is sending the response that was previously rejected by zone control plane
      nackBackoff: 5s # ENV: KUMA_MULTIZONE_GLOBAL_KDS_NACK_BACKOFF
  zone:
    # Kuma Zone name used to mark the zone dataplane resources
    name: "" # ENV: KUMA_MULTIZONE_ZONE_NAME
    # GlobalAddress URL of Global Kuma CP
    globalAddress: # ENV KUMA_MULTIZONE_ZONE_GLOBAL_ADDRESS
    kds:
      # Interval for refreshing state of the world
      refreshInterval: 1s # ENV: KUMA_MULTIZONE_ZONE_KDS_REFRESH_INTERVAL
      # RootCAFile defines a path to a file with PEM-encoded Root CA. Client will verify server by using it.
      rootCaFile: # ENV: KUMA_MULTIZONE_ZONE_KDS_ROOT_CA_FILE
      # If true, TLS connection to the server won't be verified.
      tlsSkipVerify: false # ENV: KUMA_MULTIZONE_ZONE_KDS_TLS_SKIP_VERIFY
      # MaxMsgSize defines a maximum size of the message in bytes that is exchanged using KDS.
      # In practice this means a limit on full list of one resource type.
      maxMsgSize: 10485760 # ENV: KUMA_MULTIZONE_ZONE_KDS_MAX_MSG_SIZE
      # MsgSendTimeout defines a timeout on sending a single KDS message.
      # KDS stream between control planes is terminated if the control plane hits this timeout.
      msgSendTimeout: 60s # ENV: KUMA_MULTIZONE_ZONE_KDS_MSG_SEND_TIMEOUT
      # Backoff that is executed when the zone control plane is sending the response that was previously rejected by global control plane
      nackBackoff: 5s # ENV: KUMA_MULTIZONE_ZONE_KDS_NACK_BACKOFF

# Diagnostics configuration
diagnostics:
  # Port of Diagnostic Server for checking health and readiness of the Control Plane
  serverPort: 5680 # ENV: KUMA_DIAGNOSTICS_SERVER_PORT
  # If true, enables https://golang.org/pkg/net/http/pprof/ debug endpoints
  debugEndpoints: false # ENV: KUMA_DIAGNOSTICS_DEBUG_ENDPOINTS
  # Whether tls is enabled or not
  tlsEnabled: false # ENV: KUMA_DIAGNOSTICS_TLS_ENABLED
  # TlsCertFile defines a path to a file with PEM-encoded TLS cert. If empty, autoconfigured from general.tlsCertFile
  tlsCertFile: # ENV: KUMA_DIAGNOSTICS_TLS_CERT_FILE
  # TlsKeyFile defines a path to a file with PEM-encoded TLS key. If empty, autoconfigured from general.tlsKeyFile
  tlsKeyFile: # ENV: KUMA_DIAGNOSTICS_TLS_KEY_FILE
  # TlsMinVersion the minimum version of TLS
  tlsMinVersion: "TLSv1_2" # ENV: KUMA_DIAGNOSTICS_TLS_MIN_VERSION
  # TlsMaxVersion the maximum version of TLS
  tlsMaxVersion: # ENV: KUMA_DIAGNOSTICS_TLS_MAX_VERSION
  # TlsCipherSuites the list of cipher suites
  tlsCipherSuites: [] # ENV: KUMA_DIAGNOSTICS_TLS_CIPHER_SUITES

# Dataplane Server configuration that servers API like Bootstrap/XDS for the Dataplane.
dpServer:
  # Port of the DP Server
  port: 5678 # ENV: KUMA_DP_SERVER_PORT
  # TlsCertFile defines a path to a file with PEM-encoded TLS cert. If empty, autoconfigured from general.tlsCertFile
  tlsCertFile: # ENV: KUMA_DP_SERVER_TLS_CERT_FILE
  # TlsKeyFile defines a path to a file with PEM-encoded TLS key. If empty, autoconfigured from general.tlsKeyFile
  tlsKeyFile: # ENV: KUMA_DP_SERVER_TLS_KEY_FILE
  # TlsMinVersion the minimum version of TLS
  tlsMinVersion: "TLSv1_2" # ENV: KUMA_DP_SERVER_TLS_MIN_VERSION
  # TlsMaxVersion the maximum version of TLS
  tlsMaxVersion: # ENV: KUMA_DP_SERVER_TLS_MAX_VERSION
  # TlsCipherSuites the list of cipher suites
  tlsCipherSuites: [] # ENV: KUMA_DP_SERVER_TLS_CIPHER_SUITES
  # ReadHeaderTimeout defines the amount of time DP server will be allowed
  # to read request headers. The connection's read deadline is reset
  # after reading the headers and the Handler can decide what is considered
  # too slow for the body. If ReadHeaderTimeout is zero there is no timeout.
  # The timeout is configurable as in rare cases, when Kuma CP was restarting,
  # 1s which is explicitly set in other servers was insufficient and DPs
  # were failing to reconnect (we observed this in Projected Service Account
  # Tokens e2e tests, which started flaking a lot after introducing explicit
  # 1s timeout)
  readHeaderTimeout: 5s # ENV: KUMA_DP_SERVER_READ_HEADER_TIMEOUT
  # Auth defines an authentication configuration for the DP Server
  # DEPRECATED: use "authn" section.
  auth:
    # Type of authentication. Available values: "serviceAccountToken", "dpToken", "none".
    # If empty, autoconfigured based on the environment - "serviceAccountToken" on Kubernetes, "dpToken" on Universal.
    type: "" # ENV: KUMA_DP_SERVER_AUTH_TYPE
  # Authn defines an authentication configuration for the DP Server
  authn:
    # Configuration for data plane proxy authentication.
    dpProxy:
      # Type of authentication. Available values: "serviceAccountToken", "dpToken", "none".
      # If empty, autoconfigured based on the environment - "serviceAccountToken" on Kubernetes, "dpToken" on Universal.
      type: ""
      # Configuration of dpToken authentication method
      dpToken:
        # If true the control plane token issuer is enabled. It's recommended to set it to false when all the tokens are issued offline.
        enableIssuer: true
        # DP Token validator configuration.
        validator:
          # If true then Kuma secrets with prefix "dataplane-token-signing-key-{mesh}" are considered as signing keys.
          useSecrets: true
          # List of public keys used to validate the token. Example:
          # - kid: 1
          #   mesh: default
          #   key: |
          #     -----BEGIN RSA PUBLIC KEY-----
          #     MIIBCgKCAQEAq....
          #     -----END RSA PUBLIC KEY-----
          # - kid: 2
          #   mesh: demo
          #   keyFile: /keys/public.pem
          publicKeys: []
    # Configuration for zone proxy authentication.
    zoneProxy:
      # Type of authentication. Available values: "serviceAccountToken", "zoneToken", "none".
      # If empty, autoconfigured based on the environment - "serviceAccountToken" on Kubernetes, "zoneToken" on Universal.
      type: ""
      # Configuration for zoneToken authentication method.
      zoneToken:
        # If true the control plane token issuer is enabled. It's recommended to set it to false when all the tokens are issued offline.
        enableIssuer: true
        # Zone Token validator configuration.
        validator:
          # If true then Kuma secrets with prefix "zone-token-signing-key" are considered as signing keys.
          useSecrets: true
          # List of public keys used to validate the token. Example:
          # - kid: 1
          #   key: |
          #     -----BEGIN RSA PUBLIC KEY-----
          #     MIIBCgKCAQEAq....
          #     -----END RSA PUBLIC KEY-----
          # - kid: 2
          #   keyFile: /keys/public.pem
          publicKeys: []
    # If true then Envoy uses Google gRPC instead of Envoy gRPC which lets a proxy reload the auth data (service account token, dp token etc.) stored in the file without proxy restart.
    enableReloadableTokens: false # ENV: KUMA_DP_SERVER_AUTHN_ENABLE_RELOADABLE_TOKENS
  # Hds defines a Health Discovery Service configuration
  hds:
    # Enabled if true then Envoy will actively check application's ports, but only on Universal.
    # On Kubernetes this feature disabled for now regardless the flag value
    enabled: true # ENV: KUMA_DP_SERVER_HDS_ENABLED
    # Interval for Envoy to send statuses for HealthChecks
    interval: 5s # ENV: KUMA_DP_SERVER_HDS_INTERVAL
    # RefreshInterval is an interval for re-genarting configuration for Dataplanes connected to the Control Plane
    refreshInterval: 10s # ENV: KUMA_DP_SERVER_HDS_REFRESH_INTERVAL
    # Check defines a HealthCheck configuration
    checkDefaults:
      # Timeout is a time to wait for a health check response. If the timeout is reached the
      # health check attempt will be considered a failure
      timeout: 2s # ENV: KUMA_DP_SERVER_HDS_CHECK_TIMEOUT
      # Interval between health checks
      interval: 1s # ENV: KUMA_DP_SERVER_HDS_CHECK_INTERVAL
      # NoTrafficInterval is a special health check interval that is used when a cluster has
      #	never had traffic routed to it
      noTrafficInterval: 1s # ENV: KUMA_DP_SERVER_HDS_CHECK_NO_TRAFFIC_INTERVAL
      # HealthyThreshold is a number of healthy health checks required before a host is marked healthy
      healthyThreshold: 1 # ENV: KUMA_DP_SERVER_HDS_CHECK_HEALTHY_THRESHOLD
      # UnhealthyThreshold is a number of unhealthy health checks required before a host is marked unhealthy
      unhealthyThreshold: 1 # ENV: KUMA_DP_SERVER_HDS_CHECK_UNHEALTHY_THRESHOLD

# Intercommunication CP configuration
interCp:
  # Catalog configuration. Catalog keeps a record of all live CP instances in the zone.
  catalog:
    # Indicates an address on which other control planes can communicate with this CP.
    # If empty then it's autoconfigured by taking the first IP of the nonloopback network interface.
    instanceAddress: "" # ENV: KUMA_INTER_CP_CATALOG_INSTANCE_ADDRESS
    # Interval on which CP will send heartbeat to a leader.
    heartbeatInterval: 5s # ENV: KUMA_INTER_CP_CATALOG_HEARTBEAT_INTERVAL
    # Interval on which CP will write all instances to a catalog.
    writerInterval: 15s # ENV: KUMA_INTER_CP_CATALOG_WRITER_INTERVAL
  # Intercommunication CP server configuration
  server:
    # Port of the inter-cp server
    port: 5683 # ENV: KUMA_INTER_CP_SERVER_PORT
    # TlsMinVersion the minimum version of TLS
    tlsMinVersion: "TLSv1_2" # ENV: KUMA_INTER_CP_SERVER_TLS_MIN_VERSION
    # TlsMaxVersion the maximum version of TLS
    tlsMaxVersion: # ENV: KUMA_INTER_CP_SERVER_TLS_MAX_VERSION
    # TlsCipherSuites the list of cipher suites
    tlsCipherSuites: [ ] # ENV: KUMA_INTER_CP_SERVER_TLS_CIPHER_SUITES

# Access Control configuration
access:
  # Type of access strategy (available values: "static")
  type: static
  # Configuration of static access strategy
  static:
    # AdminResources defines an access to admin resources (Secret/GlobalSecret)
    adminResources:
      # List of users that are allowed to access admin resources
      users: ["mesh-system:admin"] # ENV: KUMA_ACCESS_STATIC_ADMIN_RESOURCES_USERS
      # List of groups that are allowed to access admin resources
      groups: ["mesh-system:admin"] # ENV: KUMA_ACCESS_STATIC_ADMIN_RESOURCES_GROUPS
    # GenerateDPToken defines an access to generating dataplane token
    generateDpToken:
      # List of users that are allowed to generate dataplane token
      users: ["mesh-system:admin"] # ENV: KUMA_ACCESS_STATIC_GENERATE_DP_TOKEN_USERS
      # List of groups that are allowed to generate dataplane token
      groups: ["mesh-system:admin"] # ENV: KUMA_ACCESS_STATIC_GENERATE_DP_TOKEN_GROUPS
    # GenerateUserToken defines an access to generating user token
    generateUserToken:
      # List of users that are allowed to generate user token
      users: ["mesh-system:admin"] # ENV: KUMA_ACCESS_STATIC_GENERATE_USER_TOKEN_USERS
      # List of groups that are allowed to generate user token
      groups: ["mesh-system:admin"] # ENV: KUMA_ACCESS_STATIC_GENERATE_USER_TOKEN_GROUPS
    # GenerateZoneToken defines an access to generating zone token
    generateZoneToken:
      # List of users that are allowed to generate zone token
      users: ["mesh-system:admin"] # ENV: KUMA_ACCESS_STATIC_GENERATE_ZONE_TOKEN_USERS
      # List of groups that are allowed to generate zone token
      groups: ["mesh-system:admin"] # ENV: KUMA_ACCESS_STATIC_GENERATE_ZONE_TOKEN_GROUPS
    viewConfigDump:
      # List of users that are allowed to get envoy config dump
      users: [ ] # ENV: KUMA_ACCESS_STATIC_GET_CONFIG_DUMP_USERS
      # List of groups that are allowed to get envoy config dump
      groups: ["mesh-system:unauthenticated","mesh-system:authenticated"] # ENV: KUMA_ACCESS_STATIC_GET_CONFIG_DUMP_GROUPS
    viewStats:
      # List of users that are allowed to get envoy stats
      users: [ ] # ENV: KUMA_ACCESS_STATIC_VIEW_STATS_USERS
      # List of groups that are allowed to get envoy stats
      groups: ["mesh-system:unauthenticated","mesh-system:authenticated"] # ENV: KUMA_ACCESS_STATIC_VIEW_STATS_GROUPS
    viewClusters:
      # List of users that are allowed to get envoy clusters
      users: [ ] # ENV: KUMA_ACCESS_STATIC_VIEW_CLUSTERS_USERS
      # List of groups that are allowed to get envoy clusters
      groups: ["mesh-system:unauthenticated","mesh-system:authenticated"] # ENV: KUMA_ACCESS_STATIC_VIEW_CLUSTERS_GROUPS

# Configuration of experimental features of Kuma
experimental:
  # If true, experimental Gateway API is enabled
  gatewayAPI: false # ENV: KUMA_EXPERIMENTAL_GATEWAY_API
  # If true, instead of embedding kubernetes outbounds into Dataplane object, they are persisted next to VIPs in ConfigMap
  # This can improve performance, but it should be enabled only after all instances are migrated to version that supports this config
  kubeOutboundsAsVIPs: true # ENV: KUMA_EXPERIMENTAL_KUBE_OUTBOUNDS_AS_VIPS
  # Tag first virtual outbound model is compressed version of default Virtual Outbound model
  # It is recommended to use tag first model for deployments with more than 2k services
  # You can enable this flag on existing deployment. In order to downgrade cp with this flag enabled
  # you need to first disable this flag and redeploy cp, after config is rewritten to default
  # format you can downgrade your cp
  useTagFirstVirtualOutboundModel: false # ENV: KUMA_EXPERIMENTAL_USE_TAG_FIRST_VIRTUAL_OUTBOUND_MODEL
  # If true, KDS will sync using incremental xDS updates
  kdsDeltaEnabled: false # ENV: KUMA_EXPERIMENTAL_KDS_DELTA_ENABLED

proxy:
  gateway:
    # Sets the envoy runtime value to limit maximum number of incoming
    # connections to a builtin gateway data plane proxy
    globalDownstreamMaxConnections: 50000 # ENV: KUMA_PROXY_GATEWAY_GLOBAL_DOWNSTREAM_MAX_CONNECTIONS

Helm values.yaml

global:
  image:
    # -- Default registry for all Kuma Images
    registry: "docker.io/kumahq"
    # -- The default tag for all Kuma images, which itself defaults to .Chart.AppVersion
    tag:
  # -- Add `imagePullSecrets` to all the service accounts used for Kuma components
  imagePullSecrets: []

# -- Whether to patch the target namespace with the system label
patchSystemNamespace: true

installCrdsOnUpgrade:
  # -- Whether install new CRDs before upgrade (if any were introduced with the new version of Kuma)
  enabled: true
  # -- The `imagePullSecrets` to attach to the Service Account running CRD installation.
  # This field will be deprecated in a future release, please use .global.imagePullSecrets
  imagePullSecrets: []

# -- Whether to disable all helm hooks
noHelmHooks: false

controlPlane:
  # -- Environment that control plane is run in, useful when running universal global control plane on k8s
  environment: "kubernetes"

  # -- Labels to add to resources in addition to default labels
  extraLabels: {}

  # -- Kuma CP log level: one of off,info,debug
  logLevel: "info"

  # -- Kuma CP log output path: Defaults to /dev/stdout
  logOutputPath: ""

  # -- Kuma CP modes: one of standalone,zone,global
  mode: "standalone"

  # -- (string) Kuma CP zone, if running multizone
  zone:

  # -- Only used in `zone` mode
  kdsGlobalAddress: ""

  # -- Number of replicas of the Kuma CP. Ignored when autoscaling is enabled
  replicas: 1

  # -- Control Plane Pod Annotations
  podAnnotations: {}

  # Horizontal Pod Autoscaling configuration
  autoscaling:
    # -- Whether to enable Horizontal Pod Autoscaling, which requires the [Metrics Server](https://github.com/kubernetes-sigs/metrics-server) in the cluster
    enabled: false

    # -- The minimum CP pods to allow
    minReplicas: 2
    # -- The max CP pods to scale to
    maxReplicas: 5

    # -- For clusters that don't support autoscaling/v2, autoscaling/v1 is used
    targetCPUUtilizationPercentage: 80
    # -- For clusters that do support autoscaling/v2, use metrics
    metrics:
      - type: Resource
        resource:
          name: cpu
          target:
            type: Utilization
            averageUtilization: 80

  # -- Node selector for the Kuma Control Plane pods
  nodeSelector:
    kubernetes.io/os: linux

  # -- Tolerations for the Kuma Control Plane pods
  tolerations: []

  podDisruptionBudget:
    # -- Whether to create a pod disruption budget
    enabled: false
    # -- The maximum number of unavailable pods allowed by the budget
    maxUnavailable: 1

  # -- Affinity placement rule for the Kuma Control Plane pods.
  # This is rendered as a template, so you can reference other helm variables or includes.
  affinity:
    podAntiAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:
      - weight: 100
        podAffinityTerm:
          labelSelector:
            # These match the selector labels used on the deployment.
            matchExpressions:
              - key: app.kubernetes.io/name
                operator: In
                values:
                  - '{{ include "kuma.name" . }}'
              - key: app.kubernetes.io/instance
                operator: In
                values:
                  - '{{ .Release.Name }}'
              - key: app
                operator: In
                values:
                  - '{{ include "kuma.name" . }}-control-plane'
          topologyKey: kubernetes.io/hostname

  # -- Topology spread constraints rule for the Kuma Control Plane pods.
  # This is rendered as a template, so you can use variables to generate match labels.
  topologySpreadConstraints:

  # -- Failure policy of the mutating webhook implemented by the Kuma Injector component
  injectorFailurePolicy: Fail

  service:
    apiServer:
      http:
        # -- Port on which Http api server Service is exposed on Node for service of type NodePort
        nodePort: 30681
      https:
        # -- Port on which Https api server Service is exposed on Node for service of type NodePort
        nodePort: 30682

    # -- Whether to create a service resource.
    enabled: true

    # -- (string) Optionally override of the Kuma Control Plane Service's name
    name:

    # -- Service type of the Kuma Control Plane
    type: ClusterIP

    # -- Additional annotations to put on the Kuma Control Plane
    annotations: { }

  # Kuma API and GUI ingress settings. Useful if you want to expose the
  # API and GUI of Kuma outside the k8s cluster.
  ingress:
    # -- Install K8s Ingress resource that exposes GUI and API
    enabled: false
    # -- IngressClass defines which controller will implement the resource
    ingressClassName:
    # -- Ingress hostname
    hostname:
    # -- Map of ingress annotations.
    annotations: {}
    # -- Ingress path.
    path: /
    # -- Each path in an Ingress is required to have a corresponding path type. (ImplementationSpecific/Exact/Prefix)
    pathType: ImplementationSpecific

  globalZoneSyncService:
    # -- Whether to create a k8s service for the global zone sync
    # service. It will only be created when enabled and deploying the global
    # control plane.
    enabled: true
    # -- Service type of the Global-zone sync
    type: LoadBalancer
    # -- (string) Optionally specify IP to be used by cloud provider when configuring load balancer
    loadBalancerIP:
    # -- Additional annotations to put on the Global Zone Sync Service
    annotations: { }
    # -- Port on which Global Zone Sync Service is exposed on Node for service of type NodePort
    nodePort: 30685
    # -- Port on which Global Zone Sync Service is exposed
    port: 5685
    # -- Protocol of the Global Zone Sync service port
    protocol: grpc

  defaults:
    # -- Whether to skip creating the default Mesh
    skipMeshCreation: false

  # -- Whether to automountServiceAccountToken for cp. Optionally set to false
  automountServiceAccountToken: true

  # -- Optionally override the resource spec
  resources:
    requests:
       cpu: 500m
       memory: 256Mi
    limits:
       memory: 256Mi

  # -- Pod lifecycle settings (useful for adding a preStop hook, when
  # using AWS ALB or NLB)
  lifecycle: {}

  # -- Number of seconds to wait before force killing the pod. Make sure to
  # update this if you add a preStop hook.
  terminationGracePeriodSeconds: 30

  # TLS for various servers
  tls:
    general:
      # -- Secret that contains tls.crt, tls.key [and ca.crt when no
      # controlPlane.tls.general.caSecretName specified] for protecting
      # Kuma in-cluster communication
      secretName: ""
      # -- Secret that contains ca.crt that was used to sign cert for protecting
      # Kuma in-cluster communication (ca.crt present in this secret
      # have precedence over the one provided in the controlPlane.tls.general.secretName)
      caSecretName: ""
      # -- Base64 encoded CA certificate (the same as in controlPlane.tls.general.secret#ca.crt)
      caBundle: ""
    apiServer:
      # -- Secret that contains tls.crt, tls.key for protecting Kuma API on HTTPS
      secretName: ""
      # -- Secret that contains list of .pem certificates that can access admin endpoints of Kuma API on HTTPS
      clientCertsSecretName: ""
    # - if not creating the global control plane, then do nothing
    # - if secretName is empty and create is false, then do nothing
    # - if secretName is non-empty and create is false, then use the secret made outside of helm with the name secretName
    # - if secretName is empty and create is true, then create a secret with a default name and use it
    # - if secretName is non-empty and create is true, then create the secret using the provided name
    kdsGlobalServer:
      # -- Name of the K8s TLS Secret resource. If you set this and don't set
      # create=true, you have to create the secret manually.
      secretName: ""
      # -- Whether to create the TLS secret in helm.
      create: false
      # -- The TLS certificate to offer.
      cert: ""
      # -- The TLS key to use.
      key: ""
    # - if not creating the zonal control plane, then do nothing
    # - if secretName is empty and create is false, then do nothing
    # - if secretName is non-empty and create is false, then use the secret made outside of helm with the name secretName
    # - if secretName is empty and create is true, then create a secret with a default name and use it
    # - if secretName is non-empty and create is true, then create the secret using the provided name
    kdsZoneClient:
      # -- Name of the K8s Secret resource that contains ca.crt which was
      # used to sign the certificate of KDS Global Server. If you set this
      # and don't set create=true, you have to create the secret manually.
      secretName: ""
      # -- Whether to create the TLS secret in helm.
      create: false
      # -- CA bundle that was used to sign the certificate of KDS Global Server.
      cert: ""
      # -- If true, TLS cert of the server is not verified.
      skipVerify: false

  image:
    # -- Kuma CP ImagePullPolicy
    pullPolicy: IfNotPresent
    # -- Kuma CP image repository
    repository: "kuma-cp"
    # -- Kuma CP Image tag. When not specified, the value is copied from global.tag
    tag:

  # -- (object with { Env: string, Secret: string, Key: string }) Secrets to add as environment variables,
  # where `Env` is the name of the env variable,
  # `Secret` is the name of the Secret,
  # and `Key` is the key of the Secret value to use
  secrets:
  #  someSecret:
  #    Secret: some-secret
  #    Key: secret_key
  #    Env: SOME_SECRET

  # -- Additional environment variables that will be passed to the control plane
  envVars: { }

  # -- Additional config maps to mount into the control plane, with optional inline values
  extraConfigMaps: [ ]
#    - name: extra-config
#      mountPath: /etc/extra-config
#      readOnly: true
#      values:
#        extra-config-key: |
#          extra-config-value

  # -- (object with { name: string, mountPath: string, readOnly: string }) Additional secrets to mount into the control plane,
  # where `Env` is the name of the env variable,
  # `Secret` is the name of the Secret,
  # and `Key` is the key of the Secret value to use
  extraSecrets:
  #  extraConfig:
  #    name: extra-config
  #    mountPath: /etc/extra-config
  #    readOnly: true

  webhooks:
    validator:
      # -- Additional rules to apply on Kuma validator webhook. Useful when building custom policy on top of Kuma.
      additionalRules: ""
    ownerReference:
      # -- Additional rules to apply on Kuma owner reference webhook. Useful when building custom policy on top of Kuma.
      additionalRules: ""

  # -- Specifies if the deployment should be started in hostNetwork mode.
  hostNetwork: false
  # -- Define a new server port for the admission controller. Recommended to set in combination with
  # hostNetwork to prevent multiple port bindings on the same port (like Calico in AWS EKS).
  admissionServerPort: 5443

  # -- Security context at the pod level for control plane.
  podSecurityContext:
    runAsNonRoot: true

  # -- Security context at the container level for control plane.
  containerSecurityContext:
    readOnlyRootFilesystem: true

cni:
  # -- Install Kuma with CNI instead of proxy init container
  enabled: false
  # -- Install CNI in chained mode
  chained: false
  # -- Set the CNI install directory
  netDir: /etc/cni/multus/net.d
  # -- Set the CNI bin directory
  binDir: /var/lib/cni/bin
  # -- Set the CNI configuration name
  confName: kuma-cni.conf
  # -- CNI log level: one of off,info,debug
  logLevel: info
  # -- Node Selector for the CNI pods
  nodeSelector:
    kubernetes.io/os: linux
  # -- Tolerations for the CNI pods
  tolerations: []
  # -- Additional pod annotations
  podAnnotations: { }
  # -- Set the CNI namespace
  namespace: kube-system

  image:
    # -- CNI image repository
    repository: "kuma-cni"
    # -- CNI image tag - defaults to .Chart.AppVersion
    tag:
    # -- CNI image pull policy
    imagePullPolicy: IfNotPresent

  # -- it's only useful in tests to trigger a possible race condition
  delayStartupSeconds: 0

  # -- use new CNI (experimental)
  experimental:
    imageEbpf:
      # -- CNI experimental eBPF image registry
      registry: "docker.io/kumahq"
      # -- CNI experimental eBPF image repository
      repository: "merbridge"
      # -- CNI experimental eBPF image tag
      tag: "0.8.5"

  resources:
    requests:
      cpu: 100m
      memory: 100Mi
    limits:
      memory: 100Mi

  # -- Security context at the pod level for cni
  podSecurityContext: {}

  # -- Security context at the container level for cni
  containerSecurityContext:
    readOnlyRootFilesystem: true
    runAsNonRoot: false
    runAsUser: 0
    runAsGroup: 0

dataPlane:
  image:
    # -- The Kuma DP image repository
    repository: "kuma-dp"
    # -- Kuma DP ImagePullPolicy
    pullPolicy: IfNotPresent
    # -- Kuma DP Image Tag. When not specified, the value is copied from global.tag
    tag:

  initImage:
    # -- The Kuma DP init image repository
    repository: "kuma-init"
    # -- Kuma DP init image tag When not specified, the value is copied from global.tag
    tag:

ingress:
  # -- If true, it deploys Ingress for cross cluster communication
  enabled: false

  # -- Labels to add to resources, in addition to default labels
  extraLabels: {}

  # -- Time for which old listener will still be active as draining
  drainTime: 30s

  # -- Number of replicas of the Ingress. Ignored when autoscaling is enabled.
  replicas: 1

  # -- Define the resources to allocate to mesh ingress
  resources:
    requests:
      cpu: 50m
      memory: 64Mi
    limits:
      cpu: 1000m
      memory: 512Mi

  # -- Pod lifecycle settings (useful for adding a preStop hook, when
  # using AWS ALB or NLB)
  lifecycle: {}

  # -- Number of seconds to wait before force killing the pod. Make sure to
  # update this if you add a preStop hook.
  terminationGracePeriodSeconds: 40

  # Horizontal Pod Autoscaling configuration
  autoscaling:
    # -- Whether to enable Horizontal Pod Autoscaling, which requires the [Metrics Server](https://github.com/kubernetes-sigs/metrics-server) in the cluster
    enabled: false

    # -- The minimum CP pods to allow
    minReplicas: 2
    # -- The max CP pods to scale to
    maxReplicas: 5

    # -- For clusters that don't support autoscaling/v2, autoscaling/v1 is used
    targetCPUUtilizationPercentage: 80
    # -- For clusters that do support autoscaling/v2, use metrics
    metrics:
      - type: Resource
        resource:
          name: cpu
          target:
            type: Utilization
            averageUtilization: 80

  service:
    # -- Whether to create a Service resource.
    enabled: true
    # -- Service type of the Ingress
    type: LoadBalancer
    # -- (string) Optionally specify IP to be used by cloud provider when configuring load balancer
    loadBalancerIP:
    # -- Additional annotations to put on the Ingress service
    annotations: { }
    # -- Port on which Ingress is exposed
    port: 10001
    # -- Port on which service is exposed on Node for service of type NodePort
    nodePort:
  # -- Additional pod annotations (deprecated favor `podAnnotations`)
  annotations: { }
  # -- Additional pod annotations
  podAnnotations: { }
  # -- Node Selector for the Ingress pods
  nodeSelector:
    kubernetes.io/os: linux
  # -- Tolerations for the Ingress pods
  tolerations: []
  podDisruptionBudget:
    # -- Whether to create a pod disruption budget
    enabled: false
    # -- The maximum number of unavailable pods allowed by the budget
    maxUnavailable: 1

  # -- Affinity placement rule for the Kuma Ingress pods
  # This is rendered as a template, so you can reference other helm variables
  # or includes.
  affinity:
    podAntiAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:
      - weight: 100
        podAffinityTerm:
          labelSelector:
            # These match the selector labels used on the deployment.
            matchExpressions:
              - key: app.kubernetes.io/name
                operator: In
                values:
                  - '{{ include "kuma.name" . }}'
              - key: app.kubernetes.io/instance
                operator: In
                values:
                  - '{{ .Release.Name }}'
              - key: app
                operator: In
                values:
                  - kuma-ingress
          topologyKey: kubernetes.io/hostname

  # -- Topology spread constraints rule for the Kuma Mesh Ingress pods.
  # This is rendered as a template, so you can use variables to generate match labels.
  topologySpreadConstraints:

  # -- Security context at the pod level for ingress
  podSecurityContext:
    runAsNonRoot: true
    runAsUser: 5678
    runAsGroup: 5678

  # -- Security context at the container level for ingress
  containerSecurityContext:
   readOnlyRootFilesystem: true

egress:
  # -- If true, it deploys Egress for cross cluster communication
  enabled: false
  # -- Labels to add to resources, in addition to the default labels.
  extraLabels: {}
  # -- Time for which old listener will still be active as draining
  drainTime: 30s
  # -- Number of replicas of the Egress. Ignored when autoscaling is enabled.
  replicas: 1

  # Horizontal Pod Autoscaling configuration
  autoscaling:
    # -- Whether to enable Horizontal Pod Autoscaling, which requires the [Metrics Server](https://github.com/kubernetes-sigs/metrics-server) in the cluster
    enabled: false

    # -- The minimum CP pods to allow
    minReplicas: 2
    # -- The max CP pods to scale to
    maxReplicas: 5

    # -- For clusters that don't support autoscaling/v2, autoscaling/v1 is used
    targetCPUUtilizationPercentage: 80
    # -- For clusters that do support autoscaling/v2, use metrics
    metrics:
      - type: Resource
        resource:
          name: cpu
          target:
            type: Utilization
            averageUtilization: 80
  resources:
    requests:
      cpu: 50m
      memory: 64Mi
    limits:
      cpu: 1000m
      memory: 512Mi

  service:
    # -- Whether to create the service object
    enabled: true
    # -- Service type of the Egress
    type: ClusterIP
    # -- (string) Optionally specify IP to be used by cloud provider when configuring load balancer
    loadBalancerIP:
    # -- Additional annotations to put on the Egress service
    annotations: { }
    # -- Port on which Egress is exposed
    port: 10002
    # -- Port on which service is exposed on Node for service of type NodePort
    nodePort:
  # -- Additional pod annotations (deprecated favor `podAnnotations`)
  annotations: { }
  # -- Additional pod annotations
  podAnnotations: { }
  # -- Node Selector for the Egress pods
  nodeSelector:
    kubernetes.io/os: linux
  # -- Tolerations for the Egress pods
  tolerations: []
  podDisruptionBudget:
    # -- Whether to create a pod disruption budget
    enabled: false
    # -- The maximum number of unavailable pods allowed by the budget
    maxUnavailable: 1

  # -- Affinity placement rule for the Kuma Egress pods.
  # This is rendered as a template, so you can reference other helm variables or includes.
  affinity:
    podAntiAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:
      - weight: 100
        podAffinityTerm:
          labelSelector:
            # These match the selector labels used on the deployment.
            matchExpressions:
              - key: app.kubernetes.io/name
                operator: In
                values:
                  - '{{ include "kuma.name" . }}'
              - key: app.kubernetes.io/instance
                operator: In
                values:
                  - '{{ .Release.Name }}'
              - key: app
                operator: In
                values:
                  - kuma-egress
          topologyKey: kubernetes.io/hostname

  # -- Topology spread constraints rule for the Kuma Egress pods.
  # This is rendered as a template, so you can use variables to generate match labels.
  topologySpreadConstraints:

  # -- Security context at the pod level for egress
  podSecurityContext:
    runAsNonRoot: true
    runAsUser: 5678
    runAsGroup: 5678

  # -- Security context at the container level for egress
  containerSecurityContext:
   readOnlyRootFilesystem: true

kumactl:
  image:
    # -- The kumactl image repository
    repository: kumactl
    # -- The kumactl image tag. When not specified, the value is copied from global.tag
    tag:

kubectl:
  # kuma image that support v1.20.15 image */ } }
  # see: https://hub.docker.com/r/kumahq/kubectl */ } }
  image:
    # -- The kubectl image registry
    registry: kumahq
    # -- The kubectl image repository
    repository: kubectl
    # -- The kubectl image tag
    tag: "v1.20.15"
hooks:
  # -- Node selector for the HELM hooks
  nodeSelector:
    kubernetes.io/os: linux
  # -- Tolerations for the HELM hooks
  tolerations: []
  # -- Security context at the pod level for crd/webhook/ns
  podSecurityContext:
    runAsNonRoot: true

  # -- Security context at the container level for crd/webhook/ns
  containerSecurityContext:
    readOnlyRootFilesystem: true

  # -- ebpf-cleanup hook needs write access to the root filesystem to clean ebpf programs
  # Changing below values will potentially break ebpf cleanup completely,
  # so be cautious when doing so.
  ebpfCleanup:
    # -- Security context at the pod level for crd/webhook/cleanup-ebpf
    podSecurityContext:
      runAsNonRoot: false
    # -- Security context at the container level for crd/webhook/cleanup-ebpf
    containerSecurityContext:
      readOnlyRootFilesystem: false

experimental:
  # -- If true, it installs experimental Gateway API support
  gatewayAPI: false
  # Configuration for the experimental ebpf mode for transparent proxy
  ebpf:
    # -- If true, ebpf will be used instead of using iptables to install/configure transparent proxy
    enabled: false
    # -- Name of the environmental variable which will contain the IP address of a pod
    instanceIPEnvVarName: INSTANCE_IP
    # -- Path where BPF file system should be mounted
    bpffsPath: /sys/fs/bpf
    # -- Host's cgroup2 path
    cgroupPath: /sys/fs/cgroup
    # -- Name of the network interface which TC programs should be attached to, we'll try to automatically determine it if empty
    tcAttachIface: ""
    # -- Path where compiled eBPF programs which will be installed can be found
    programsSourcePath: /kuma/ebpf
  # -- If true, it uses new API for resource synchronization
  deltaKds: false

legacy:
  # -- If true, use the legacy transparent proxy engine
  transparentProxy: false
  cni:
    # -- If true, it installs legacy version of the CNI
    enabled: false
    image:
      # -- CNI v1 image registry
      registry: "docker.io/kumahq"
      # -- CNI v1 image repository
      repository: "install-cni"
      # -- CNI v1 image tag
      tag: "0.0.10"

# Postgres' settings for universal control plane on k8s
postgres:
  # -- Postgres port, password should be provided as a secret reference in "controlPlane.secrets"
  # with the Env value "KUMA_STORE_POSTGRES_PASSWORD".
  # Example:
  # controlPlane:
  #   secrets:
  #     - Secret: postgres-postgresql
  #       Key: postgresql-password
  #       Env: KUMA_STORE_POSTGRES_PASSWORD
  port: "5432"
  # TLS settings
  tls:
    # -- Mode of TLS connection. Available values are: "disable", "verifyNone", "verifyCa", "verifyFull"
    mode: disable # ENV: KUMA_STORE_POSTGRES_TLS_MODE
    # -- Whether to disable SNI the postgres `sslsni` option.
    disableSSLSNI: false # ENV: KUMA_STORE_POSTGRES_TLS_DISABLE_SSLSNI
    # -- Secret name that contains the ca.crt
    caSecretName:
    # -- Secret name that contains the client tls.crt, tls.key
    secretName:

# @ignored for helm-docs
plugins:
  policies:
    meshaccesslogs: {}
    meshcircuitbreakers: {}
    meshfaultinjections: {}
    meshhealthchecks: {}
    meshhttproutes: {}
    meshloadbalancingstrategies: {}
    meshproxypatches: {}
    meshratelimits: {}
    meshretries: {}
    meshtcproutes: {}
    meshtimeouts: {}
    meshtraces: {}
    meshtrafficpermissions: {}