Important: This documentation is about an older version. It's relevant only to the release noted, many of the features and functions have been updated or replaced. Please view the current version.
Helm Chart Values
This is the generade reference for the Loki Helm Chart values.
Key | Type | Description | Default |
---|---|---|---|
enterprise.adminApi | object | If enabled, the correct admin_client storage will be configured. If disabled while running enterprise, make sure auth is set to `type: trust`, or that `auth_enabled` is set to `false`. |
{
"enabled": true
}
|
enterprise.adminTokenSecret | string | Alternative name for admin token secret, needed by tokengen and provisioner jobs |
null
|
enterprise.canarySecret | string | Alternative name of the secret to store token for the canary |
null
|
enterprise.cluster_name | string | Optional name of the GEL cluster, otherwise will use .Release.Name The cluster name must match what is in your GEL license |
null
|
enterprise.config | string |
"{{- if .Values.enterprise.adminApi.enabled }}\n{{- if or .Values.minio.enabled (eq .Values.loki.storage.type \"s3\") (eq .Values.loki.storage.type \"gcs\") }}\nadmin_client:\n storage:\n s3:\n bucket_name: {{ .Values.loki.storage.bucketNames.admin }}\n{{- end }}\n{{- end }}\nauth:\n type: {{ .Values.enterprise.adminApi.enabled | ternary \"enterprise\" \"trust\" }}\nauth_enabled: {{ .Values.loki.auth_enabled }}\ncluster_name: {{ include \"loki.clusterName\" . }}\nlicense:\n path: /etc/loki/license/license.jwt\n"
| |
enterprise.enabled | bool |
false
| |
enterprise.externalLicenseName | string | Name of external licesne secret to use |
null
|
enterprise.image.pullPolicy | string | Docker image pull policy |
"IfNotPresent"
|
enterprise.image.registry | string | The Docker registry |
"docker.io"
|
enterprise.image.repository | string | Docker image repository |
"grafana/enterprise-logs"
|
enterprise.image.tag | string | Overrides the image tag whose default is the chart's appVersion |
"v1.4.0"
|
enterprise.license | object | Grafana Enterprise Logs license In order to use Grafana Enterprise Logs features, you will need to provide the contents of your Grafana Enterprise Logs license, either by providing the contents of the license.jwt, or the name Kubernetes Secret that contains your license.jwt. To set the license contents, use the flag `--set-file 'license.contents=./license.jwt'` |
{
"contents": "NOTAVALIDLICENSE"
}
|
enterprise.nginxConfig.file | string |
"worker_processes 5; ## Default: 1\nerror_log /dev/stderr;\npid /tmp/nginx.pid;\nworker_rlimit_nofile 8192;\n\nevents {\n worker_connections 4096; ## Default: 1024\n}\n\nhttp {\n client_body_temp_path /tmp/client_temp;\n proxy_temp_path /tmp/proxy_temp_path;\n fastcgi_temp_path /tmp/fastcgi_temp;\n uwsgi_temp_path /tmp/uwsgi_temp;\n scgi_temp_path /tmp/scgi_temp;\n\n proxy_http_version 1.1;\n\n default_type application/octet-stream;\n log_format {{ .Values.gateway.nginxConfig.logFormat }}\n\n {{- if .Values.gateway.verboseLogging }}\n access_log /dev/stderr main;\n {{- else }}\n\n map $status $loggable {\n ~^[23] 0;\n default 1;\n }\n access_log /dev/stderr main if=$loggable;\n {{- end }}\n\n sendfile on;\n tcp_nopush on;\n resolver {{ .Values.global.dnsService }}.{{ .Values.global.dnsNamespace }}.svc.{{ .Values.global.clusterDomain }}.;\n\n {{- with .Values.gateway.nginxConfig.httpSnippet }}\n {{ . | nindent 2 }}\n {{- end }}\n\n server {\n listen 8080;\n\n {{- if .Values.gateway.basicAuth.enabled }}\n auth_basic \"Loki\";\n auth_basic_user_file /etc/nginx/secrets/.htpasswd;\n {{- end }}\n\n location = / {\n return 200 'OK';\n auth_basic off;\n }\n\n location = /api/prom/push {\n proxy_pass http://{{ include \"loki.writeFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location = /api/prom/tail {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection \"upgrade\";\n }\n\n location ~ /api/prom/.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /prometheus/api/v1/alerts.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /prometheus/api/v1/rules.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location = /loki/api/v1/push {\n proxy_pass http://{{ include \"loki.writeFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location = /loki/api/v1/tail {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection \"upgrade\";\n }\n\n location ~ /loki/api/.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /admin/api/.* {\n proxy_pass http://{{ include \"loki.writeFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /compactor/.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /distributor/.* {\n proxy_pass http://{{ include \"loki.writeFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /ring {\n proxy_pass http://{{ include \"loki.writeFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /ingester/.* {\n proxy_pass http://{{ include \"loki.writeFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /ruler/.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /scheduler/.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n {{- with .Values.gateway.nginxConfig.serverSnippet }}\n {{ . | nindent 4 }}\n {{- end }}\n }\n}\n"
| |
enterprise.provisioner | object | Configuration for `provisioner` target |
{
"annotations": {},
"enabled": true,
"env": [],
"image": {
"pullPolicy": "IfNotPresent",
"registry": "docker.io",
"repository": "grafana/enterprise-logs-provisioner",
"tag": null
},
"labels": {},
"priorityClassName": null,
"provisionedSecretPrefix": "{{ include \"loki.name\" . }}-provisioned",
"securityContext": {
"fsGroup": 10001,
"runAsGroup": 10001,
"runAsNonRoot": true,
"runAsUser": 10001
},
"tenants": []
}
|
enterprise.provisioner.annotations | object | Additional annotations for the `provisioner` Job |
{}
|
enterprise.provisioner.enabled | bool | Whether the job should be part of the deployment |
true
|
enterprise.provisioner.env | list | Additional Kubernetes environment |
[]
|
enterprise.provisioner.image | object | Provisioner image to Utilize |
{
"pullPolicy": "IfNotPresent",
"registry": "docker.io",
"repository": "grafana/enterprise-logs-provisioner",
"tag": null
}
|
enterprise.provisioner.image.pullPolicy | string | Docker image pull policy |
"IfNotPresent"
|
enterprise.provisioner.image.registry | string | The Docker registry |
"docker.io"
|
enterprise.provisioner.image.repository | string | Docker image repository |
"grafana/enterprise-logs-provisioner"
|
enterprise.provisioner.image.tag | string | Overrides the image tag whose default is the chart's appVersion |
null
|
enterprise.provisioner.labels | object | Additional labels for the `provisioner` Job |
{}
|
enterprise.provisioner.priorityClassName | string | The name of the PriorityClass for provisioner Job |
null
|
enterprise.provisioner.provisionedSecretPrefix | string | Name of the secret to store provisioned tokens in |
"{{ include \"loki.name\" . }}-provisioned"
|
enterprise.provisioner.securityContext | object | Run containers as user `enterprise-logs(uid=10001)` |
{
"fsGroup": 10001,
"runAsGroup": 10001,
"runAsNonRoot": true,
"runAsUser": 10001
}
|
enterprise.provisioner.tenants | list | Tenants to be created. Each tenant will get a read and write policy and associated token. |
[]
|
enterprise.tokengen | object | Configuration for `tokengen` target |
{
"annotations": {},
"enabled": true,
"env": [],
"extraArgs": [],
"extraEnvFrom": [],
"extraVolumeMounts": [],
"extraVolumes": [],
"labels": {},
"securityContext": {
"fsGroup": 10001,
"runAsGroup": 10001,
"runAsNonRoot": true,
"runAsUser": 10001
},
"tolerations": []
}
|
enterprise.tokengen.annotations | object | Additional annotations for the `tokengen` Job |
{}
|
enterprise.tokengen.enabled | bool | Whether the job should be part of the deployment |
true
|
enterprise.tokengen.env | list | Additional Kubernetes environment |
[]
|
enterprise.tokengen.extraArgs | list | Additional CLI arguments for the `tokengen` target |
[]
|
enterprise.tokengen.extraEnvFrom | list | Environment variables from secrets or configmaps to add to the tokengen pods |
[]
|
enterprise.tokengen.extraVolumeMounts | list | Additional volume mounts for Pods |
[]
|
enterprise.tokengen.extraVolumes | list | Additional volumes for Pods |
[]
|
enterprise.tokengen.labels | object | Additional labels for the `tokengen` Job |
{}
|
enterprise.tokengen.securityContext | object | Run containers as user `enterprise-logs(uid=10001)` |
{
"fsGroup": 10001,
"runAsGroup": 10001,
"runAsNonRoot": true,
"runAsUser": 10001
}
|
enterprise.tokengen.tolerations | list | Tolerations for tokengen Job |
[]
|
enterprise.useExternalLicense | bool | Set to true when providing an external license |
false
|
enterprise.version | string |
"v1.5.2"
| |
fullnameOverride | string | Overrides the chart's computed fullname |
null
|
gateway.affinity | string | Affinity for gateway pods. Passed through `tpl` and, thus, to be configured as string | Hard node and soft zone anti-affinity |
gateway.autoscaling.enabled | bool | Enable autoscaling for the gateway |
false
|
gateway.autoscaling.maxReplicas | int | Maximum autoscaling replicas for the gateway |
3
|
gateway.autoscaling.minReplicas | int | Minimum autoscaling replicas for the gateway |
1
|
gateway.autoscaling.targetCPUUtilizationPercentage | int | Target CPU utilisation percentage for the gateway |
60
|
gateway.autoscaling.targetMemoryUtilizationPercentage | string | Target memory utilisation percentage for the gateway |
null
|
gateway.basicAuth.enabled | bool | Enables basic authentication for the gateway |
false
|
gateway.basicAuth.existingSecret | string | Existing basic auth secret to use. Must contain '.htpasswd' |
null
|
gateway.basicAuth.htpasswd | string | Uses the specified username and password to compute a htpasswd using Sprig's `htpasswd` function. The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes high CPU load. |
"{{ htpasswd (required \"'gateway.basicAuth.username' is required\" .Values.gateway.basicAuth.username) (required \"'gateway.basicAuth.password' is required\" .Values.gateway.basicAuth.password) }}"
|
gateway.basicAuth.password | string | The basic auth password for the gateway |
null
|
gateway.basicAuth.username | string | The basic auth username for the gateway |
null
|
gateway.containerSecurityContext | object | The SecurityContext for gateway containers |
{
"allowPrivilegeEscalation": false,
"capabilities": {
"drop": [
"ALL"
]
},
"readOnlyRootFilesystem": true
}
|
gateway.deploymentStrategy | object | ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy |
{
"type": "RollingUpdate"
}
|
gateway.enabled | bool | Specifies whether the gateway should be enabled |
true
|
gateway.extraArgs | list | Additional CLI args for the gateway |
[]
|
gateway.extraEnv | list | Environment variables to add to the gateway pods |
[]
|
gateway.extraEnvFrom | list | Environment variables from secrets or configmaps to add to the gateway pods |
[]
|
gateway.extraVolumeMounts | list | Volume mounts to add to the gateway pods |
[]
|
gateway.extraVolumes | list | Volumes to add to the gateway pods |
[]
|
gateway.image.pullPolicy | string | The gateway image pull policy |
"IfNotPresent"
|
gateway.image.registry | string | The Docker registry for the gateway image |
"docker.io"
|
gateway.image.repository | string | The gateway image repository |
"nginxinc/nginx-unprivileged"
|
gateway.image.tag | string | The gateway image tag |
"1.19-alpine"
|
gateway.ingress.annotations | object | Annotations for the gateway ingress |
{}
|
gateway.ingress.enabled | bool | Specifies whether an ingress for the gateway should be created |
false
|
gateway.ingress.hosts | list | Hosts configuration for the gateway ingress |
[
{
"host": "gateway.loki.example.com",
"paths": [
{
"path": "/"
}
]
}
]
|
gateway.ingress.tls | list | TLS configuration for the gateway ingress |
[
{
"hosts": [
"gateway.loki.example.com"
],
"secretName": "loki-gateway-tls"
}
]
|
gateway.nginxConfig.file | string | Config file contents for Nginx. Passed through the `tpl` function to allow templating | See values.yaml |
gateway.nginxConfig.httpSnippet | string | Allows appending custom configuration to the http block |
""
|
gateway.nginxConfig.logFormat | string | NGINX log format |
"main '$remote_addr - $remote_user [$time_local] $status '\n '\"$request\" $body_bytes_sent \"$http_referer\" '\n '\"$http_user_agent\" \"$http_x_forwarded_for\"';"
|
gateway.nginxConfig.serverSnippet | string | Allows appending custom configuration to the server block |
""
|
gateway.nodeSelector | object | Node selector for gateway pods |
{}
|
gateway.podAnnotations | object | Annotations for gateway pods |
{}
|
gateway.podSecurityContext | object | The SecurityContext for gateway containers |
{
"fsGroup": 101,
"runAsGroup": 101,
"runAsNonRoot": true,
"runAsUser": 101
}
|
gateway.priorityClassName | string | The name of the PriorityClass for gateway pods |
null
|
gateway.readinessProbe.httpGet.path | string |
"/"
| |
gateway.readinessProbe.httpGet.port | string |
"http"
| |
gateway.readinessProbe.initialDelaySeconds | int |
15
| |
gateway.readinessProbe.timeoutSeconds | int |
1
| |
gateway.replicas | int | Number of replicas for the gateway |
1
|
gateway.resources | object | Resource requests and limits for the gateway |
{}
|
gateway.service.annotations | object | Annotations for the gateway service |
{}
|
gateway.service.clusterIP | string | ClusterIP of the gateway service |
null
|
gateway.service.labels | object | Labels for gateway service |
{}
|
gateway.service.loadBalancerIP | string | Load balancer IPO address if service type is LoadBalancer |
null
|
gateway.service.nodePort | int | Node port if service type is NodePort |
null
|
gateway.service.port | int | Port of the gateway service |
80
|
gateway.service.type | string | Type of the gateway service |
"ClusterIP"
|
gateway.terminationGracePeriodSeconds | int | Grace period to allow the gateway to shutdown before it is killed |
30
|
gateway.tolerations | list | Tolerations for gateway pods |
[]
|
gateway.verboseLogging | bool | Enable logging of 2xx and 3xx HTTP requests |
true
|
global.clusterDomain | string | configures cluster domain ("cluster.local" by default) |
"cluster.local"
|
global.dnsNamespace | string | configures DNS service namespace |
"kube-system"
|
global.dnsService | string | configures DNS service name |
"kube-dns"
|
global.image.registry | string | Overrides the Docker registry globally for all images |
null
|
global.priorityClassName | string | Overrides the priorityClassName for all pods |
null
|
imagePullSecrets | list | Image pull secrets for Docker images |
[]
|
ingress.annotations | object |
{}
| |
ingress.enabled | bool |
false
| |
ingress.hosts[0] | string |
"loki.example.com"
| |
ingress.paths.read[0] | string |
"/api/prom/tail"
| |
ingress.paths.read[1] | string |
"/loki/api/v1/tail"
| |
ingress.paths.read[2] | string |
"/loki/api"
| |
ingress.paths.read[3] | string |
"/api/prom/rules"
| |
ingress.paths.read[4] | string |
"/loki/api/v1/rules"
| |
ingress.paths.read[5] | string |
"/prometheus/api/v1/rules"
| |
ingress.paths.read[6] | string |
"/prometheus/api/v1/alerts"
| |
ingress.paths.write[0] | string |
"/api/prom/push"
| |
ingress.paths.write[1] | string |
"/loki/api/v1/push"
| |
kubectlImage.pullPolicy | string | Docker image pull policy |
"IfNotPresent"
|
kubectlImage.registry | string | The Docker registry |
"docker.io"
|
kubectlImage.repository | string | Docker image repository |
"bitnami/kubectl"
|
kubectlImage.tag | string | Overrides the image tag whose default is the chart's appVersion |
null
|
loki.analytics | object | Optional analytics configuration |
{}
|
loki.auth_enabled | bool |
true
| |
loki.commonConfig | object | Check https://grafana.com/docs/loki/latest/configuration/#common_config for more info on how to provide a common configuration |
{
"path_prefix": "/var/loki",
"replication_factor": 3
}
|
loki.compactor | object | Optional compactor configuration |
{}
|
loki.config | string | Config file contents for Loki | See values.yaml |
loki.containerSecurityContext | object | The SecurityContext for Loki containers |
{
"allowPrivilegeEscalation": false,
"capabilities": {
"drop": [
"ALL"
]
},
"readOnlyRootFilesystem": true
}
|
loki.existingSecretForConfig | string | Specify an existing secret containing loki configuration. If non-empty, overrides `loki.config` |
""
|
loki.image.pullPolicy | string | Docker image pull policy |
"IfNotPresent"
|
loki.image.registry | string | The Docker registry |
"docker.io"
|
loki.image.repository | string | Docker image repository |
"grafana/loki"
|
loki.image.tag | string | Overrides the image tag whose default is the chart's appVersion |
null
|
loki.limits_config | object | Limits config |
{
"enforce_metric_name": false,
"max_cache_freshness_per_query": "10m",
"reject_old_samples": true,
"reject_old_samples_max_age": "168h",
"split_queries_by_interval": "15m"
}
|
loki.memcached | object | Configure memcached as an external cache for chunk and results cache. Disabled by default must enable and specify a host for each cache you would like to use. |
{
"chunk_cache": {
"batch_size": 256,
"enabled": false,
"host": "",
"parallelism": 10,
"service": "memcached-client"
},
"results_cache": {
"default_validity": "12h",
"enabled": false,
"host": "",
"service": "memcached-client",
"timeout": "500ms"
}
}
|
loki.podAnnotations | object | Common annotations for all pods |
{}
|
loki.podSecurityContext | object | The SecurityContext for Loki pods |
{
"fsGroup": 10001,
"runAsGroup": 10001,
"runAsNonRoot": true,
"runAsUser": 10001
}
|
loki.query_scheduler | object | Additional query scheduler config |
{}
|
loki.readinessProbe.httpGet.path | string |
"/ready"
| |
loki.readinessProbe.httpGet.port | string |
"http-metrics"
| |
loki.readinessProbe.initialDelaySeconds | int |
30
| |
loki.readinessProbe.timeoutSeconds | int |
1
| |
loki.revisionHistoryLimit | int | The number of old ReplicaSets to retain to allow rollback |
10
|
loki.rulerConfig | object | Check https://grafana.com/docs/loki/latest/configuration/#ruler for more info on configuring ruler |
{}
|
loki.schemaConfig | object | Check https://grafana.com/docs/loki/latest/configuration/#schema_config for more info on how to configure schemas |
{}
|
loki.server | object | Check https://grafana.com/docs/loki/latest/configuration/#server for more info on the server configuration. |
{
"grpc_listen_port": 9095,
"http_listen_port": 3100
}
|
loki.storage | object | Storage config. Providing this will automatically populate all necessary storage configs in the templated config. |
{
"bucketNames": {
"admin": "admin",
"chunks": "chunks",
"ruler": "ruler"
},
"filesystem": {
"chunks_directory": "/var/loki/chunks",
"rules_directory": "/var/loki/rules"
},
"gcs": {
"chunkBufferSize": 0,
"enableHttp2": true,
"requestTimeout": "0s"
},
"s3": {
"accessKeyId": null,
"endpoint": null,
"http_config": {},
"insecure": false,
"region": null,
"s3": null,
"s3ForcePathStyle": false,
"secretAccessKey": null
},
"type": "s3"
}
|
loki.storage_config | object | Additional storage config |
{
"hedging": {
"at": "250ms",
"max_per_second": 20,
"up_to": 3
}
}
|
loki.structuredConfig | object | Structured loki configuration, takes precedence over `loki.config`, `loki.schemaConfig`, `loki.storageConfig` |
{}
|
migrate | object | Options that may be necessary when performing a migration from another helm chart |
{
"fromDistributed": {
"enabled": false,
"memberlistService": ""
}
}
|
migrate.fromDistributed | object | When migrating from a distributed chart like loki-distributed or enterprise-logs |
{
"enabled": false,
"memberlistService": ""
}
|
migrate.fromDistributed.enabled | bool | Set to true if migrating from a distributed helm chart |
false
|
migrate.fromDistributed.memberlistService | string | If migrating from a distributed service, provide the distributed deployment's memberlist service DNS so the new deployment can join it's ring. |
""
|
minio | object | ----------------------------------- |
{
"buckets": [
{
"name": "chunks",
"policy": "none",
"purge": false
},
{
"name": "ruler",
"policy": "none",
"purge": false
},
{
"name": "admin",
"policy": "none",
"purge": false
}
],
"drivesPerNode": 2,
"enabled": false,
"persistence": {
"size": "5Gi"
},
"replicas": 1,
"resources": {
"requests": {
"cpu": "100m",
"memory": "128Mi"
}
},
"rootPassword": "supersecret",
"rootUser": "enterprise-logs"
}
|
monitoring.alerts.annotations | object | Additional annotations for the alerts PrometheusRule resource |
{}
|
monitoring.alerts.enabled | bool | If enabled, create PrometheusRule resource with Loki alerting rules |
true
|
monitoring.alerts.labels | object | Additional labels for the alerts PrometheusRule resource |
{}
|
monitoring.alerts.namespace | string | Alternative namespace to create alerting rules PrometheusRule resource in |
null
|
monitoring.dashboards.annotations | object | Additional annotations for the dashboards ConfigMap |
{}
|
monitoring.dashboards.enabled | bool | If enabled, create configmap with dashboards for monitoring Loki |
true
|
monitoring.dashboards.labels | object | Additional labels for the dashboards ConfigMap |
{}
|
monitoring.dashboards.namespace | string | Alternative namespace to create dashboards ConfigMap in |
null
|
monitoring.rules.additionalGroups | list | Additional groups to add to the rules file |
[]
|
monitoring.rules.alerting | bool | Include alerting rules |
true
|
monitoring.rules.annotations | object | Additional annotations for the rules PrometheusRule resource |
{}
|
monitoring.rules.enabled | bool | If enabled, create PrometheusRule resource with Loki recording rules |
true
|
monitoring.rules.labels | object | Additional labels for the rules PrometheusRule resource |
{}
|
monitoring.rules.namespace | string | Alternative namespace to create recording rules PrometheusRule resource in |
null
|
monitoring.selfMonitoring.enabled | bool |
true
| |
monitoring.selfMonitoring.grafanaAgent.annotations | object | Grafana Agent annotations |
{}
|
monitoring.selfMonitoring.grafanaAgent.enableConfigReadAPI | bool | Enable the config read api on port 8080 of the agent |
false
|
monitoring.selfMonitoring.grafanaAgent.installOperator | bool | Controls whether to install the Grafana Agent Operator and its CRDs. Note that helm will not install CRDs if this flag is enabled during an upgrade. In that case install the CRDs manually from https://github.com/grafana/agent/tree/main/production/operator/crds |
true
|
monitoring.selfMonitoring.grafanaAgent.labels | object | Additional Grafana Agent labels |
{}
|
monitoring.selfMonitoring.grafanaAgent.namespace | string | Alternative namespace for Grafana Agent resources |
null
|
monitoring.selfMonitoring.logsInstance.annotations | object | LogsInstance annotations |
{}
|
monitoring.selfMonitoring.logsInstance.clients | string | Additional clients for remote write |
null
|
monitoring.selfMonitoring.logsInstance.labels | object | Additional LogsInstance labels |
{}
|
monitoring.selfMonitoring.logsInstance.namespace | string | Alternative namespace for LogsInstance resources |
null
|
monitoring.selfMonitoring.lokiCanary.annotations | object | Additional annotations for the `loki-canary` Daemonset |
{}
|
monitoring.selfMonitoring.lokiCanary.enabled | bool |
true
| |
monitoring.selfMonitoring.lokiCanary.extraArgs | list | Additional CLI arguments for the `loki-canary' command |
[]
|
monitoring.selfMonitoring.lokiCanary.extraEnv | list | Environment variables to add to the canary pods |
[]
|
monitoring.selfMonitoring.lokiCanary.extraEnvFrom | list | Environment variables from secrets or configmaps to add to the canary pods |
[]
|
monitoring.selfMonitoring.lokiCanary.image | object | Image to use for loki canary |
{
"pullPolicy": "IfNotPresent",
"registry": "docker.io",
"repository": "grafana/loki-canary",
"tag": null
}
|
monitoring.selfMonitoring.lokiCanary.image.pullPolicy | string | Docker image pull policy |
"IfNotPresent"
|
monitoring.selfMonitoring.lokiCanary.image.registry | string | The Docker registry |
"docker.io"
|
monitoring.selfMonitoring.lokiCanary.image.repository | string | Docker image repository |
"grafana/loki-canary"
|
monitoring.selfMonitoring.lokiCanary.image.tag | string | Overrides the image tag whose default is the chart's appVersion |
null
|
monitoring.selfMonitoring.lokiCanary.nodeSelector | object | Node selector for canary pods |
{}
|
monitoring.selfMonitoring.lokiCanary.resources | object | Resource requests and limits for the canary |
{}
|
monitoring.selfMonitoring.lokiCanary.tolerations | list | Tolerations for canary pods |
[]
|
monitoring.selfMonitoring.podLogs.annotations | object | PodLogs annotations |
{}
|
monitoring.selfMonitoring.podLogs.labels | object | Additional PodLogs labels |
{}
|
monitoring.selfMonitoring.podLogs.namespace | string | Alternative namespace for PodLogs resources |
null
|
monitoring.selfMonitoring.podLogs.relabelings | list | PodLogs relabel configs to apply to samples before scraping https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig |
[]
|
monitoring.selfMonitoring.tenant | string | Tenant to use for self monitoring |
"self-monitoring"
|
monitoring.serviceMonitor.annotations | object | ServiceMonitor annotations |
{}
|
monitoring.serviceMonitor.enabled | bool | If enabled, ServiceMonitor resources for Prometheus Operator are created |
true
|
monitoring.serviceMonitor.interval | string | ServiceMonitor scrape interval |
null
|
monitoring.serviceMonitor.labels | object | Additional ServiceMonitor labels |
{}
|
monitoring.serviceMonitor.metricsInstance | object | If defined, will create a MetricsInstance for the Grafana Agent Operator. |
{
"annotations": {},
"labels": {},
"remoteWrite": null
}
|
monitoring.serviceMonitor.metricsInstance.annotations | object | MerticsInstance annotations |
{}
|
monitoring.serviceMonitor.metricsInstance.labels | object | Additional MatricsInstance labels |
{}
|
monitoring.serviceMonitor.metricsInstance.remoteWrite | string | If defined a MetricsInstance will be created to remote write metrics. |
null
|
monitoring.serviceMonitor.namespace | string | Alternative namespace for ServiceMonitor resources |
null
|
monitoring.serviceMonitor.namespaceSelector | object | Namespace selector for ServiceMonitor resources |
{}
|
monitoring.serviceMonitor.relabelings | list | ServiceMonitor relabel configs to apply to samples before scraping https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig |
[]
|
monitoring.serviceMonitor.scheme | string | ServiceMonitor will use http by default, but you can pick https as well |
"http"
|
monitoring.serviceMonitor.scrapeTimeout | string | ServiceMonitor scrape timeout in Go duration format (e.g. 15s) |
null
|
monitoring.serviceMonitor.tlsConfig | string | ServiceMonitor will use these tlsConfig settings to make the health check requests |
null
|
nameOverride | string | Overrides the chart's name |
null
|
networkPolicy.alertmanager.namespaceSelector | object | Specifies the namespace the alertmanager is running in |
{}
|
networkPolicy.alertmanager.podSelector | object | Specifies the alertmanager Pods. As this is cross-namespace communication, you also need the namespaceSelector. |
{}
|
networkPolicy.alertmanager.port | int | Specify the alertmanager port used for alerting |
9093
|
networkPolicy.discovery.namespaceSelector | object | Specifies the namespace the discovery Pods are running in |
{}
|
networkPolicy.discovery.podSelector | object | Specifies the Pods labels used for discovery. As this is cross-namespace communication, you also need the namespaceSelector. |
{}
|
networkPolicy.discovery.port | int | Specify the port used for discovery |
null
|
networkPolicy.enabled | bool | Specifies whether Network Policies should be created |
false
|
networkPolicy.externalStorage.cidrs | list | Specifies specific network CIDRs you want to limit access to |
[]
|
networkPolicy.externalStorage.ports | list | Specify the port used for external storage, e.g. AWS S3 |
[]
|
networkPolicy.ingress.namespaceSelector | object | Specifies the namespaces which are allowed to access the http port |
{}
|
networkPolicy.ingress.podSelector | object | Specifies the Pods which are allowed to access the http port. As this is cross-namespace communication, you also need the namespaceSelector. |
{}
|
networkPolicy.metrics.cidrs | list | Specifies specific network CIDRs which are allowed to access the metrics port. In case you use namespaceSelector, you also have to specify your kubelet networks here. The metrics ports are also used for probes. |
[]
|
networkPolicy.metrics.namespaceSelector | object | Specifies the namespaces which are allowed to access the metrics port |
{}
|
networkPolicy.metrics.podSelector | object | Specifies the Pods which are allowed to access the metrics port. As this is cross-namespace communication, you also need the namespaceSelector. |
{}
|
rbac.pspEnabled | bool | If pspEnabled true, a PodSecurityPolicy is created for K8s that use psp. |
false
|
rbac.sccEnabled | bool | For OpenShift set pspEnabled to 'false' and sccEnabled to 'true' to use the SecurityContextConstraints. |
false
|
read.affinity | string | Affinity for read pods. Passed through `tpl` and, thus, to be configured as string | Hard node and soft zone anti-affinity |
read.autoscaling.enabled | bool | Enable autoscaling for the read, this is only used if `queryIndex.enabled: true` |
false
|
read.autoscaling.maxReplicas | int | Maximum autoscaling replicas for the read |
3
|
read.autoscaling.minReplicas | int | Minimum autoscaling replicas for the read |
1
|
read.autoscaling.targetCPUUtilizationPercentage | int | Target CPU utilisation percentage for the read |
60
|
read.autoscaling.targetMemoryUtilizationPercentage | string | Target memory utilisation percentage for the read |
null
|
read.extraArgs | list | Additional CLI args for the read |
[]
|
read.extraEnv | list | Environment variables to add to the read pods |
[]
|
read.extraEnvFrom | list | Environment variables from secrets or configmaps to add to the read pods |
[]
|
read.extraVolumeMounts | list | Volume mounts to add to the read pods |
[]
|
read.extraVolumes | list | Volumes to add to the read pods |
[]
|
read.image.registry | string | The Docker registry for the read image. Overrides `loki.image.registry` |
null
|
read.image.repository | string | Docker image repository for the read image. Overrides `loki.image.repository` |
null
|
read.image.tag | string | Docker image tag for the read image. Overrides `loki.image.tag` |
null
|
read.nodeSelector | object | Node selector for read pods |
{}
|
read.persistence.size | string | Size of persistent disk |
"10Gi"
|
read.persistence.storageClass | string | Storage class to be used. If defined, storageClassName: |
null
|
read.podAnnotations | object | Annotations for read pods |
{}
|
read.priorityClassName | string | The name of the PriorityClass for read pods |
null
|
read.replicas | int | Number of replicas for the read |
3
|
read.resources | object | Resource requests and limits for the read |
{}
|
read.selectorLabels | object | Additional selecto labels for each `read` pod |
{}
|
read.serviceLabels | object | Labels for read service |
{}
|
read.terminationGracePeriodSeconds | int | Grace period to allow the read to shutdown before it is killed |
30
|
read.tolerations | list | Tolerations for read pods |
[]
|
serviceAccount.annotations | object | Annotations for the service account |
{}
|
serviceAccount.automountServiceAccountToken | bool | Set this toggle to false to opt out of automounting API credentials for the service account |
true
|
serviceAccount.create | bool | Specifies whether a ServiceAccount should be created |
true
|
serviceAccount.imagePullSecrets | list | Image pull secrets for the service account |
[]
|
serviceAccount.name | string | The name of the ServiceAccount to use. If not set and create is true, a name is generated using the fullname template |
null
|
singleBinary.affinity | string | Affinity for single binary pods. Passed through `tpl` and, thus, to be configured as string | Hard node and soft zone anti-affinity |
singleBinary.autoscaling.enabled | bool | Enable autoscaling, this is only used if `queryIndex.enabled: true` |
false
|
singleBinary.autoscaling.maxReplicas | int | Maximum autoscaling replicas for the single binary |
3
|
singleBinary.autoscaling.minReplicas | int | Minimum autoscaling replicas for the single binary |
1
|
singleBinary.autoscaling.targetCPUUtilizationPercentage | int | Target CPU utilisation percentage for the single binary |
60
|
singleBinary.autoscaling.targetMemoryUtilizationPercentage | string | Target memory utilisation percentage for the single binary |
null
|
singleBinary.extraArgs | list | Labels for single binary service |
[]
|
singleBinary.extraEnv | list | Environment variables to add to the single binary pods |
[]
|
singleBinary.extraEnvFrom | list | Environment variables from secrets or configmaps to add to the single binary pods |
[]
|
singleBinary.extraVolumeMounts | list | Volume mounts to add to the single binary pods |
[]
|
singleBinary.extraVolumes | list | Volumes to add to the single binary pods |
[]
|
singleBinary.image.registry | string | The Docker registry for the single binary image. Overrides `loki.image.registry` |
null
|
singleBinary.image.repository | string | Docker image repository for the single binary image. Overrides `loki.image.repository` |
null
|
singleBinary.image.tag | string | Docker image tag for the single binary image. Overrides `loki.image.tag` |
null
|
singleBinary.nodeSelector | object | Node selector for single binary pods |
{}
|
singleBinary.persistence.size | string | Size of persistent disk |
"10Gi"
|
singleBinary.persistence.storageClass | string | Storage class to be used. If defined, storageClassName: |
null
|
singleBinary.podAnnotations | object | Annotations for single binary pods |
{}
|
singleBinary.priorityClassName | string | The name of the PriorityClass for single binary pods |
null
|
singleBinary.replicas | int | Number of replicas for the single binary |
1
|
singleBinary.resources | object | Resource requests and limits for the single binary |
{}
|
singleBinary.selectorLabels | object | Additional selecto labels for each `single binary` pod |
{}
|
singleBinary.terminationGracePeriodSeconds | int | Grace period to allow the single binary to shutdown before it is killed |
30
|
singleBinary.tolerations | list | Tolerations for single binary pods |
[]
|
test | object | Section for configuring optional Helm test |
{
"annotations": {},
"enabled": true,
"image": {
"pullPolicy": "IfNotPresent",
"registry": "docker.io",
"repository": "grafana/loki-helm-test",
"tag": null
},
"labels": {},
"prometheusAddress": "http://prometheus:9090",
"timeout": "1m"
}
|
test.annotations | object | Additional annotations for test pods |
{}
|
test.image | object | Image to use for loki canary |
{
"pullPolicy": "IfNotPresent",
"registry": "docker.io",
"repository": "grafana/loki-helm-test",
"tag": null
}
|
test.image.pullPolicy | string | Docker image pull policy |
"IfNotPresent"
|
test.image.registry | string | The Docker registry |
"docker.io"
|
test.image.repository | string | Docker image repository |
"grafana/loki-helm-test"
|
test.image.tag | string | Overrides the image tag whose default is the chart's appVersion |
null
|
test.labels | object | Additional labels for the test pods |
{}
|
test.prometheusAddress | string | Address of the prometheus server to query for the test |
"http://prometheus:9090"
|
test.timeout | string | Number of times to retry the test before failing |
"1m"
|
tracing.jaegerAgentHost | string |
""
| |
write.affinity | string | Affinity for write pods. Passed through `tpl` and, thus, to be configured as string | Hard node and soft zone anti-affinity |
write.extraArgs | list | Additional CLI args for the write |
[]
|
write.extraEnv | list | Environment variables to add to the write pods |
[]
|
write.extraEnvFrom | list | Environment variables from secrets or configmaps to add to the write pods |
[]
|
write.extraVolumeMounts | list | Volume mounts to add to the write pods |
[]
|
write.extraVolumes | list | Volumes to add to the write pods |
[]
|
write.image.registry | string | The Docker registry for the write image. Overrides `loki.image.registry` |
null
|
write.image.repository | string | Docker image repository for the write image. Overrides `loki.image.repository` |
null
|
write.image.tag | string | Docker image tag for the write image. Overrides `loki.image.tag` |
null
|
write.nodeSelector | object | Node selector for write pods |
{}
|
write.persistence.size | string | Size of persistent disk |
"10Gi"
|
write.persistence.storageClass | string | Storage class to be used. If defined, storageClassName: |
null
|
write.podAnnotations | object | Annotations for write pods |
{}
|
write.priorityClassName | string | The name of the PriorityClass for write pods |
null
|
write.replicas | int | Number of replicas for the write |
3
|
write.resources | object | Resource requests and limits for the write |
{}
|
write.selectorLabels | object | Additional selector labels for each `write` pod |
{}
|
write.serviceLabels | object | Labels for ingestor service |
{}
|
write.terminationGracePeriodSeconds | int | Grace period to allow the write to shutdown before it is killed. Especially for the ingestor, this must be increased. It must be long enough so writes can be gracefully shutdown flushing/transferring all data and to successfully leave the member ring on shutdown. |
300
|
write.tolerations | list | Tolerations for write pods |
[]
|