Difference between revisions of "Helm show all grafana/grafana"

From wikieduonline
Jump to navigation Jump to search
(Created page with "<pre> apiVersion: v2 appVersion: 8.2.5 description: The leading tool for querying and visualizing time series and metrics. home: https://grafana.net icon: https://raw.githubus...")
 
 
(7 intermediate revisions by the same user not shown)
Line 1: Line 1:
 +
{{lowercase}}
 +
[[helm show all]] grafana/grafana
 +
 +
 +
apiVersion: v2
 +
appVersion: 8.2.5
 +
description: The leading tool for querying and visualizing time series and metrics.
 +
home: https://grafana.net
 +
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
 +
kubeVersion: ^1.8.0-0
 +
maintainers:
 +
 +
  name: zanhsieh
 +
 +
  name: rtluckie
 +
 +
  name: maorfr
 +
 +
  name: Xtigyro
 +
 +
  name: torstenwalter
 +
name: grafana
 
<pre>
 
<pre>
apiVersion: v2
 
appVersion: 8.2.5
 
description: The leading tool for querying and visualizing time series and metrics.
 
home: https://grafana.net
 
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
 
kubeVersion: ^1.8.0-0
 
maintainers:
 
 
  name: zanhsieh
 
 
  name: rtluckie
 
 
  name: maorfr
 
 
  name: Xtigyro
 
 
  name: torstenwalter
 
name: grafana
 
 
sources:
 
sources:
 
- https://github.com/grafana/grafana
 
- https://github.com/grafana/grafana
 
type: application
 
type: application
 
version: 6.17.8
 
version: 6.17.8
 +
 +
---
 +
rbac:
 +
  create: true
 +
  ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true)
 +
  # useExistingRole: name-of-some-(cluster)role
 +
  pspEnabled: true
 +
  pspUseAppArmor: true
 +
  namespaced: false
 +
  extraRoleRules: []
 +
  # - apiGroups: []
 +
  #  resources: []
 +
  #  verbs: []
 +
  extraClusterRoleRules: []
 +
  # - apiGroups: []
 +
  #  resources: []
 +
  #  verbs: []
 +
serviceAccount:
 +
  create: true
 +
  name:
 +
  nameTest:
 +
#  annotations:
 +
#    eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
 +
  autoMount: true
 +
 +
replicas: 1
 +
 +
## Create HorizontalPodAutoscaler object for deployment type
 +
#
 +
autoscaling:
 +
  enabled: false
 +
#  minReplicas: 1
 +
#  maxReplicas: 10
 +
#  metrics:
 +
#  - type: Resource
 +
#    resource:
 +
#      name: cpu
 +
#      targetAverageUtilization: 60
 +
#  - type: Resource
 +
#    resource:
 +
#      name: memory
 +
#      targetAverageUtilization: 60
 +
 +
## See `kubectl explain poddisruptionbudget.spec` for more
 +
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
 +
podDisruptionBudget: {}
 +
#  minAvailable: 1
 +
#  maxUnavailable: 1
 +
 +
## See `kubectl explain deployment.spec.strategy` for more
 +
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
 +
deploymentStrategy:
 +
  type: RollingUpdate
 +
</pre>
 +
 +
[[readinessProbe]]:
 +
  httpGet:
 +
    path: /api/health
 +
    port: 3000
 +
 +
[[livenessProbe]]:
 +
  httpGet:
 +
    path: /api/health
 +
    port: 3000
 +
  initialDelaySeconds: 60
 +
  timeoutSeconds: 30
 +
  failureThreshold: 10
 +
 +
<pre>
 +
## Use an alternate scheduler, e.g. "stork".
 +
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
 +
##
 +
# schedulerName: "default-scheduler"
 +
 +
image:
 +
  repository: grafana/grafana
 +
  tag: 8.2.5
 +
  sha: ""
 +
  pullPolicy: IfNotPresent
 +
 +
  ## Optionally specify an array of imagePullSecrets.
 +
  ## Secrets must be manually created in the namespace.
 +
  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
 +
  ##
 +
  # pullSecrets:
 +
  #  - myRegistrKeySecretName
 +
 +
testFramework:
 +
  enabled: true
 +
  image: "bats/bats"
 +
  tag: "v1.4.1"
 +
  imagePullPolicy: IfNotPresent
 +
  securityContext: {}
 +
 +
securityContext:
 +
  runAsUser: 472
 +
  runAsGroup: 472
 +
  fsGroup: 472
 +
 +
containerSecurityContext:
 +
  {}
 +
 +
extraConfigmapMounts: []
 +
  # - name: certs-configmap
 +
  #  mountPath: /etc/grafana/ssl/
 +
  #  subPath: certificates.crt # (optional)
 +
  #  configMap: certs-configmap
 +
  #  readOnly: true
 +
 +
extraEmptyDirMounts: []
 +
  # - name: provisioning-notifiers
 +
  #  mountPath: /etc/grafana/provisioning/notifiers
 +
 +
 +
# Apply extra labels to common labels.
 +
extraLabels: {}
 +
 +
## Assign a PriorityClassName to pods if set
 +
# priorityClassName:
 +
 +
downloadDashboardsImage:
 +
  repository: curlimages/curl
 +
  tag: 7.73.0
 +
  sha: ""
 +
  pullPolicy: IfNotPresent
 +
 +
downloadDashboards:
 +
  env: {}
 +
  envFromSecret: ""
 +
  resources: {}
 +
 +
## Pod Annotations
 +
# podAnnotations: {}
 +
 +
## Pod Labels
 +
# podLabels: {}
 +
 +
podPortName: grafana
 +
 +
## Deployment annotations
 +
# annotations: {}
 +
 +
## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).
 +
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
 +
## ref: http://kubernetes.io/docs/user-guide/services/
 +
##
 +
service:
 +
  enabled: true
 +
  type: ClusterIP
 +
  port: 80
 +
  targetPort: 3000
 +
    # targetPort: 4181 To be used with a proxy extraContainer
 +
  annotations: {}
 +
  labels: {}
 +
  portName: service
 +
 +
serviceMonitor:
 +
  ## If true, a ServiceMonitor CRD is created for a prometheus operator
 +
  ## https://github.com/coreos/prometheus-operator
 +
  ##
 +
  enabled: false
 +
  path: /metrics
 +
  #  namespace: monitoring  (defaults to use the namespace this chart is deployed to)
 +
  labels: {}
 +
  interval: 1m
 +
  scheme: http
 +
  tlsConfig: {}
 +
  scrapeTimeout: 30s
 +
  relabelings: []
 +
 +
extraExposePorts: []
 +
# - name: keycloak
 +
#  port: 8080
 +
#  targetPort: 8080
 +
#  type: ClusterIP
 +
 +
# overrides pod.spec.hostAliases in the grafana deployment's pods
 +
hostAliases: []
 +
  # - ip: "1.2.3.4"
 +
  #  hostnames:
 +
  #    - "my.host.com"
 +
 +
ingress:
 +
  enabled: false
 +
  # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
 +
  # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
 +
  # ingressClassName: nginx
 +
  # Values can be templated
 +
  annotations: {}
 +
    # kubernetes.io/ingress.class: nginx
 +
    # kubernetes.io/tls-acme: "true"
 +
  labels: {}
 +
  path: /
 +
 +
  # pathType is only for k8s >= 1.1=
 +
  pathType: Prefix
 +
 +
  hosts:
 +
    - chart-example.local
 +
  ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
 +
  extraPaths: []
 +
  # - path: /*
 +
  #  backend:
 +
  #    serviceName: ssl-redirect
 +
  #    servicePort: use-annotation
 +
  ## Or for k8s > 1.19
 +
  # - path: /*
 +
  #  pathType: Prefix
 +
  #  backend:
 +
  #    service:
 +
  #      name: ssl-redirect
 +
  #      port:
 +
  #        name: use-annotation
 +
 +
 +
  tls: []
 +
  #  - secretName: chart-example-tls
 +
  #    hosts:
 +
  #      - chart-example.local
 +
 +
resources: {}
 +
#  limits:
 +
#    cpu: 100m
 +
#    memory: 128Mi
 +
#  requests:
 +
#    cpu: 100m
 +
#    memory: 128Mi
 +
 +
## Node labels for pod assignment
 +
## ref: https://kubernetes.io/docs/user-guide/node-selection/
 +
#
 +
nodeSelector: {}
 +
 +
## Tolerations for pod assignment
 +
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
 +
##
 +
tolerations: []
 +
 +
## Affinity for pod assignment
 +
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
 +
##
 +
affinity: {}
 +
 +
extraInitContainers: []
 +
 +
## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
 +
extraContainers: ""
 +
# extraContainers: |
 +
# - name: proxy
 +
#  image: quay.io/gambol99/keycloak-proxy:latest
 +
#  args:
 +
#  - -provider=github
 +
#  - -client-id=
 +
#  - -client-secret=
 +
#  - -github-org=<ORG_NAME>
 +
#  - -email-domain=*
 +
#  - -cookie-secret=
 +
#  - -http-address=http://0.0.0.0:4181
 +
#  - -upstream-url=http://127.0.0.1:3000
 +
#  ports:
 +
#    - name: proxy-web
 +
#      containerPort: 4181
 +
 +
## Volumes that can be used in init containers that will not be mounted to deployment pods
 +
extraContainerVolumes: []
 +
#  - name: volume-from-secret
 +
#    secret:
 +
#      secretName: secret-to-mount
 +
#  - name: empty-dir-volume
 +
#    emptyDir: {}
 +
 +
## Enable persistence using Persistent Volume Claims
 +
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
 +
##
 +
persistence:
 +
  type: pvc
 +
  enabled: false
 +
  # storageClassName: default
 +
  accessModes:
 +
    - ReadWriteOnce
 +
  size: 10Gi
 +
  # annotations: {}
 +
  finalizers:
 +
    - kubernetes.io/pvc-protection
 +
  # selectorLabels: {}
 +
  # subPath: ""
 +
  # existingClaim:
 +
  ## If persistence is not enabled, this allows to mount the
 +
  ## local storage in-memory to improve performance
 +
  ##
 +
  inMemory:
 +
    enabled: false
 +
    ## The maximum usage on memory medium EmptyDir would be
 +
    ## the minimum value between the SizeLimit specified
 +
    ## here and the sum of memory limits of all containers in a pod
 +
    ##
 +
    # sizeLimit: 300Mi
 +
 +
initChownData:
 +
  ## If false, data ownership will not be reset at startup
 +
  ## This allows the prometheus-server to be run with an arbitrary user
 +
  ##
 +
  enabled: true
 +
 +
  ## initChownData container image
 +
  ##
 +
  image:
 +
    repository: busybox
 +
    tag: "1.31.1"
 +
    sha: ""
 +
    pullPolicy: IfNotPresent
 +
 +
  ## initChownData resource requests and limits
 +
  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
 +
  ##
 +
  resources: {}
 +
  #  limits:
 +
  #    cpu: 100m
 +
  #    memory: 128Mi
 +
  #  requests:
 +
  #    cpu: 100m
 +
  #    memory: 128Mi
 +
 +
# Administrator credentials when not using an existing secret (see below)
 +
adminUser: admin
 +
# adminPassword: strongpassword
 +
 +
# Use an existing secret for the admin user.
 +
admin:
 +
  existingSecret: ""
 +
  userKey: admin-user
 +
  passwordKey: admin-password
 +
 +
## Define command to be executed at startup by grafana container
 +
## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/)
 +
## Default is "run.sh" as defined in grafana's Dockerfile
 +
# command:
 +
# - "sh"
 +
# - "/run.sh"
 +
 +
## Use an alternate scheduler, e.g. "stork".
 +
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
 +
##
 +
# schedulerName:
 +
 +
## Use an alternate scheduler, e.g. "stork".
 +
##
 +
## Extra environment variables that will be pass onto deployment pods
 +
##
 +
## to provide grafana with access to CloudWatch on AWS EKS:
 +
## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later)
 +
## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the
 +
## same oidc eks provider as noted before (same as the existing line)
 +
## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name
 +
##
 +
##  "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana",
 +
##
 +
## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess
 +
## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name)
 +
##
 +
## env:
 +
##  AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here
 +
##  AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
 +
##  AWS_REGION: us-east-1
 +
##
 +
## 5. uncomment the EKS section in extraSecretMounts: below
 +
## 6. uncomment the annotation section in the serviceAccount: above
 +
## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn
 +
 +
env: {}
 +
 +
## "valueFrom" environment variable references that will be added to deployment pods
 +
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core
 +
## Renders in container spec as:
 +
##  env:
 +
##    ...
 +
##    - name: <key>
 +
##      valueFrom:
 +
##        <value rendered as YAML>
 +
envValueFrom: {}
 +
 +
## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
 +
## This can be useful for auth tokens, etc. Value is templated.
 +
envFromSecret: ""
 +
 +
## Sensible environment variables that will be rendered as new secret object
 +
## This can be useful for auth tokens, etc
 +
envRenderSecret: {}
 +
 +
## The names of secrets in the same kubernetes namespace which contain values to be added to the environment
 +
## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key.
 +
envFromSecrets: []
 +
## - name: secret-name
 +
##  optional: true
 +
 +
# Inject Kubernetes services as environment variables.
 +
# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables
 +
enableServiceLinks: true
 +
 +
## Additional grafana server secret mounts
 +
# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
 +
extraSecretMounts: []
 +
  # - name: secret-files
 +
  #  mountPath: /etc/secrets
 +
  #  secretName: grafana-secret-files
 +
  #  readOnly: true
 +
  #  subPath: ""
 +
  #
 +
  # for AWS EKS (cloudwatch) use the following (see also instruction in env: above)
 +
  # - name: aws-iam-token
 +
  #  mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount
 +
  #  readOnly: true
 +
  #  projected:
 +
  #    defaultMode: 420
 +
  #    sources:
 +
  #      - serviceAccountToken:
 +
  #          audience: sts.amazonaws.com
 +
  #          expirationSeconds: 86400
 +
  #          path: token
 +
  #
 +
  # for CSI e.g. Azure Key Vault use the following
 +
  # - name: secrets-store-inline
 +
  #  mountPath: /run/secrets
 +
  #  readOnly: true
 +
  #  csi:
 +
  #    driver: secrets-store.csi.k8s.io
 +
  #    readOnly: true
 +
  #    volumeAttributes:
 +
  #      secretProviderClass: "akv-grafana-spc"
 +
  #    nodePublishSecretRef:                      # Only required when using service principal mode
 +
  #      name: grafana-akv-creds                  # Only required when using service principal mode
 +
 +
## Additional grafana server volume mounts
 +
# Defines additional volume mounts.
 +
extraVolumeMounts: []
 +
  # - name: extra-volume-0
 +
  #  mountPath: /mnt/volume0
 +
  #  readOnly: true
 +
  #  existingClaim: volume-claim
 +
  # - name: extra-volume-1
 +
  #  mountPath: /mnt/volume1
 +
  #  readOnly: true
 +
  #  hostPath: /usr/shared/
 +
 +
## Pass the plugins you want installed as a list.
 +
##
 +
plugins: []
 +
  # - digrich-bubblechart-panel
 +
  # - grafana-clock-panel
 +
 +
## Configure grafana datasources
 +
## ref: http://docs.grafana.org/administration/provisioning/#datasources
 +
##
 +
datasources: {}
 +
#  datasources.yaml:
 +
#    apiVersion: 1
 +
#    datasources:
 +
#    - name: Prometheus
 +
#      type: prometheus
 +
#      url: http://prometheus-prometheus-server
 +
#      access: proxy
 +
#      isDefault: true
 +
#    - name: CloudWatch
 +
#      type: cloudwatch
 +
#      access: proxy
 +
#      uid: cloudwatch
 +
#      editable: false
 +
#      jsonData:
 +
#        authType: default
 +
#        defaultRegion: us-east-1
 +
 +
## Configure notifiers
 +
## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels
 +
##
 +
notifiers: {}
 +
#  notifiers.yaml:
 +
#    notifiers:
 +
#    - name: email-notifier
 +
#      type: email
 +
#      uid: email1
 +
#      # either:
 +
#      org_id: 1
 +
#      # or
 +
#      org_name: Main Org.
 +
#      is_default: true
 +
#      settings:
 +
#        addresses: [email protected]
 +
#    delete_notifiers:
 +
 +
## Configure grafana dashboard providers
 +
## ref: http://docs.grafana.org/administration/provisioning/#dashboards
 +
##
 +
## `path` must be /var/lib/grafana/dashboards/<provider_name>
 +
##
 +
dashboardProviders: {}
 +
#  dashboardproviders.yaml:
 +
#    apiVersion: 1
 +
#    providers:
 +
#    - name: 'default'
 +
#      orgId: 1
 +
#      folder: ''
 +
#      type: file
 +
#      disableDeletion: false
 +
#      editable: true
 +
#      options:
 +
#        path: /var/lib/grafana/dashboards/default
 +
 +
## Configure grafana dashboard to import
 +
## NOTE: To use dashboards you must also enable/configure dashboardProviders
 +
## ref: https://grafana.com/dashboards
 +
##
 +
## dashboards per provider, use provider name as key.
 +
##
 +
dashboards: {}
 +
  # default:
 +
  #  some-dashboard:
 +
  #    json: |
 +
  #      $RAW_JSON
 +
  #  custom-dashboard:
 +
  #    file: dashboards/custom-dashboard.json
 +
  #  prometheus-stats:
 +
  #    gnetId: 2
 +
  #    revision: 2
 +
  #    datasource: Prometheus
 +
  #  local-dashboard:
 +
  #    url: https://example.com/repository/test.json
 +
  #    token: ''
 +
  #  local-dashboard-base64:
 +
  #    url: https://example.com/repository/test-b64.json
 +
  #    token: ''
 +
  #    b64content: true
 +
 +
## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value.
 +
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
 +
## ConfigMap data example:
 +
##
 +
## data:
 +
##  example-dashboard.json: |
 +
##    RAW_JSON
 +
##
 +
dashboardsConfigMaps: {}
 +
#  default: ""
 +
 +
## Grafana's primary configuration
 +
## NOTE: values in map will be converted to ini format
 +
## ref: http://docs.grafana.org/installation/configuration/
 +
##
 +
grafana.ini:
 +
  paths:
 +
    data: /var/lib/grafana/
 +
    logs: /var/log/grafana
 +
    plugins: /var/lib/grafana/plugins
 +
    provisioning: /etc/grafana/provisioning
 +
  analytics:
 +
    check_for_updates: true
 +
  log:
 +
    mode: console
 +
  grafana_net:
 +
    url: https://grafana.net
 +
## grafana Authentication can be enabled with the following values on grafana.ini
 +
# server:
 +
      # The full public facing url you use in browser, used for redirects and emails
 +
#    root_url:
 +
# https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana
 +
# auth.github:
 +
#    enabled: false
 +
#    allow_sign_up: false
 +
#    scopes: user:email,read:org
 +
#    auth_url: https://github.com/login/oauth/authorize
 +
#    token_url: https://github.com/login/oauth/access_token
 +
#    api_url: https://api.github.com/user
 +
#    team_ids:
 +
#    allowed_organizations:
 +
#    client_id:
 +
#    client_secret:
 +
## LDAP Authentication can be enabled with the following values on grafana.ini
 +
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
 +
  # auth.ldap:
 +
  #  enabled: true
 +
  #  allow_sign_up: true
 +
  #  config_file: /etc/grafana/ldap.toml
 +
 +
## Grafana's LDAP configuration
 +
## Templated by the template in _helpers.tpl
 +
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
 +
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
 +
  enabled: false
 +
  # `existingSecret` is a reference to an existing secret containing the ldap configuration
 +
  # for Grafana in a key `ldap-toml`.
 +
  existingSecret: ""
 +
  # `config` is the content of `ldap.toml` that will be stored in the created secret
 +
  config: ""
 +
  # config: |-
 +
  #  verbose_logging = true
 +
 +
  #  [[servers]]
 +
  #  host = "my-ldap-server"
 +
  #  port = 636
 +
  #  use_ssl = true
 +
  #  start_tls = false
 +
  #  ssl_skip_verify = false
 +
  #  bind_dn = "uid=%s,ou=users,dc=myorg,dc=com"
 +
 +
## Grafana's SMTP configuration
 +
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
 +
## ref: http://docs.grafana.org/installation/configuration/#smtp
 +
smtp:
 +
  # `existingSecret` is a reference to an existing secret containing the smtp configuration
 +
  # for Grafana.
 +
  existingSecret: ""
 +
  userKey: "user"
 +
  passwordKey: "password"
 +
 +
## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
 +
sidecar:
 +
    tag: 1.14.2
 +
    sha: ""
 +
  imagePullPolicy: IfNotPresent
 +
  resources: {}
 +
#    memory: 100Mi
 +
#  requests:
 +
#    cpu: 50m
 +
#    memory: 50Mi
 +
  # skipTlsVerify Set to true to skip tls verification for kube api calls
 +
  # skipTlsVerify: true
 +
  enableUniqueFilenames: false
 +
  dashboards:
 +
    enabled: false
 +
    SCProvider: true
 +
    # label that the configmaps with dashboards are marked with
 +
    label: grafana_dashboard
 +
    # value of label that the configmaps with dashboards are set to
 +
    labelValue: null
 +
    # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set)
 +
    folder: /tmp/dashboards
 +
    # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead
 +
    defaultFolderName: null
 +
    # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces.
 +
    # Otherwise the namespace in which the sidecar is running will be used.
 +
    # It's also possible to specify ALL to search in all namespaces.
 +
    searchNamespace: null
 +
    watchMethod: WATCH
 +
    # search in configmap, secret or both
 +
    resource: both
 +
    # If specified, the sidecar will look for annotation with this name to create folder and put graph here.
 +
    folderAnnotation: null
 +
    # Absolute path to shell script to execute after a configmap got reloaded
 +
    script: null
 +
    # provider configuration that lets grafana manage the dashboards
 +
    provider:
 +
      # name of the provider, should be unique
 +
      name: sidecarProvider
 +
      # orgid as configured in grafana
 +
      orgid: 1
 +
      # folder in which the dashboards should be imported in grafana
 +
      folder: ''
 +
      # type of the provider
 +
      type: file
 +
      # disableDelete to activate a import-only behaviour
 +
      disableDelete: false
 +
      # allow updating provisioned dashboards from the UI
 +
      allowUiUpdates: false
 +
      # allow Grafana to replicate dashboard structure from filesystem
 +
      foldersFromFilesStructure: false
 +
  datasources:
 +
    enabled: false
 +
    # label that the configmaps with datasources are marked with
 +
    label: grafana_datasource
 +
    # value of label that the configmaps with datasources are set to
 +
    labelValue: null
 +
    # If specified, the sidecar will search for datasource config-maps inside this namespace.
 +
    # Otherwise the namespace in which the sidecar is running will be used.
 +
    # It's also possible to specify ALL to search in all namespaces
 +
    searchNamespace: null
 +
    watchMethod: LIST
 +
    # search in configmap, secret or both
 +
    resource: both
 +
  notifiers:
 +
    enabled: false
 +
    # label that the configmaps with notifiers are marked with
 +
    label: grafana_notifier
 +
    # If specified, the sidecar will search for notifier config-maps inside this namespace.
 +
    # Otherwise the namespace in which the sidecar is running will be used.
 +
    # It's also possible to specify ALL to search in all namespaces
 +
    searchNamespace: null
 +
    # search in configmap, secret or both
 +
    resource: both
 +
 +
## Override the deployment namespace
 +
##
 +
namespaceOverride: ""
 +
 +
## Number of old ReplicaSets to retain
 +
##
 +
revisionHistoryLimit: 10
 +
 +
## Add a seperate remote image renderer deployment/service
 +
imageRenderer:
 +
  # Enable the image-renderer deployment & service
 +
  enabled: false
 +
  replicas: 1
 +
  image:
 +
    # image-renderer Image repository
 +
    repository: grafana/grafana-image-renderer
 +
    # image-renderer Image tag
 +
    tag: latest
 +
    # image-renderer Image sha (optional)
 +
    sha: ""
 +
    # image-renderer ImagePullPolicy
 +
    pullPolicy: Always
 +
  # extra environment variables
 +
  env:
 +
    HTTP_HOST: "0.0.0.0"
 +
    # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758
 +
    # RENDERING_MODE: clustered
 +
  # image-renderer deployment serviceAccount
 +
  serviceAccountName: ""
 +
  # image-renderer deployment securityContext
 +
  securityContext: {}
 +
  # image-renderer deployment Host Aliases
 +
  hostAliases: []
 +
  # image-renderer deployment priority class
 +
  priorityClassName: ''
 +
  service:
 +
    # Enable the image-renderer service
 +
    enabled: true
 +
    # image-renderer service port name
 +
    portName: 'http'
 +
    # image-renderer service port used by both service and deployment
 +
    port: 8081
 +
    targetPort: 8081
 +
  # In case a sub_path is used this needs to be added to the image renderer callback
 +
  grafanaSubPath: ""
 +
  # name of the image-renderer port on the pod
 +
  podPortName: http
 +
  # number of image-renderer replica sets to keep
 +
  revisionHistoryLimit: 10
 +
  networkPolicy:
 +
    # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods
 +
    limitIngress: true
 +
    # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods
 +
    limitEgress: false
 +
  resources: {}
 +
#  limits:
 +
#    cpu: 100m
 +
#    memory: 100Mi
 +
#  requests:
 +
#    cpu: 50m
 +
#    memory: 50Mi
 +
 +
---
 +
# Grafana Helm Chart
 +
 +
* Installs the web dashboarding system [Grafana](http://grafana.org/)
 +
 +
## Get Repo Info
 +
 +
```console
 +
helm repo add grafana https://grafana.github.io/helm-charts
 +
helm repo update
 +
```
 +
 +
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
 +
 +
## Installing the Chart
 +
 +
To install the chart with the release name `my-release`:
 +
 +
```console
 +
helm install my-release grafana/grafana
 +
```
 +
 +
### Example ingress with path
 +
 +
With grafana 6.3 and above
 +
```yaml
 +
grafana.ini:
 +
  server:
 +
    serve_from_sub_path: true
 +
ingress:
 +
  enabled: true
 +
  hosts:
 +
    - "monitoring.example.com"
 +
  path: "/grafana"
 +
```
 +
 +
### Example of extraVolumeMounts
 +
 +
Volume can be type persistentVolumeClaim or hostPath but not both at same time.
 +
If none existingClaim or hostPath argument is givent then type is emptyDir.
 +
 +
```yaml
 +
- extraVolumeMounts:
 +
  - name: plugins
 +
    mountPath: /var/lib/grafana/plugins
 +
    subPath: configs/grafana/plugins
 +
    existingClaim: existing-grafana-claim
 +
    readOnly: false
 +
  - name: dashboards
 +
    mountPath: /var/lib/grafana/dashboards
 +
    hostPath: /usr/shared/grafana/dashboards
 +
    readOnly: false
 +
```
 +
 +
## Import dashboards
 +
 +
 +
      json: |
 +
        {
 +
          "annotations":
 +
 +
          "title": "Some Dashboard",
 +
          "uid": "abcd1234",
 +
          "version": 1
 +
        }
 +
    custom-dashboard:
 +
      # This is a path to a file inside the dashboards directory inside the chart directory
 +
      file: dashboards/custom-dashboard.json
 +
    prometheus-stats:
 +
      # Ref: https://grafana.com/dashboards/2
 +
      gnetId: 2
 +
      revision: 2
 +
      datasource: Prometheus
 +
    local-dashboard:
 +
      url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json
 +
```
 +
 +
## BASE64 dashboards
 +
 +
If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk.
 +
 +
### Gerrit use case
 +
 +
the url value is <https://yourgerritserver/a/user%2Frepo/branches/master/files/dir1%2Fdir2%2Fdashboard/content>
 +
 +
## Sidecar for dashboards
 +
 +
If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana
 +
pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with
 +
a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written
 +
to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported
 +
A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside
 +
one configmap is currently not properly mirrored in grafana.
 +
 +
Example dashboard config:
 +
 +
```yaml
 +
apiVersion: v1
 +
kind: ConfigMap
 +
metadata:
 +
  name: sample-grafana-dashboard
 +
  labels:
 +
    grafana_dashboard: "1"
 +
data:
 +
  k8s-dashboard.json: |-
 +
  [...]
 +
```
 +
 +
## Sidecar for datasources
 +
 +
If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana
 +
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
 +
filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in
 +
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
 +
the data sources in grafana can be imported.
 +
 +
Secrets are recommended over configmaps for this usecase because datasources usually contain private
 +
data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
 +
 +
 +
```yaml
 +
datasources:
 +
datasources.yaml:
 +
  apiVersion: 1
 +
  datasources:
 +
      # <string, required> name of the datasource. Required
 +
    - name: Graphite
 +
      # <string, required> datasource type. Required
 +
      type: graphite
 +
      # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
 +
      access: proxy
 +
      # <int> org id. will default to orgId 1 if not specified
 +
      orgId: 1
 +
      # <string> url
 +
      url: http://localhost:8080
 +
      # <string> database password, if used
 +
      password:
 +
      # <string> database user, if used
 +
      user:
 +
      # <string> database name, if used
 +
      database:
 +
      # <bool> enable/disable basic auth
 +
      basicAuth:
 +
      # <string> basic auth username
 +
      basicAuthUser:
 +
      # <string> basic auth password
 +
      basicAuthPassword:
 +
      # <bool> enable/disable with credentials headers
 +
      isDefault:
 +
      # <map> fields that will be converted to json and stored in json_data
 +
      jsonData:
 +
        graphiteVersion: "1.1"
 +
        tlsAuth: true
 +
        tlsAuthWithCACert: true
 +
      # <string> json object of data that will be encrypted.
 +
      secureJsonData:
 +
        tlsCACert: "..."
 +
        tlsClientCert: "..."
 +
        tlsClientKey: "..."
 +
      version: 1
 +
      # <bool> allow users to edit datasources from the UI.
 +
      editable: false
 +
```
 +
 +
## Sidecar for notifiers
 +
 +
If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana
 +
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
 +
filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in
 +
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
 +
the notification channels in grafana can be imported. The secrets must be created before
 +
`helm install` so that the notifiers init container can list the secrets.
 +
 +
Secrets are recommended over configmaps for this usecase because alert notification channels usually contain
 +
private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
 +
 +
 +
```yaml
 +
notifiers:
 +
  - name: notification-channel-1
 +
    type: slack
 +
    uid: notifier1
 +
    # either
 +
    org_id: 2
 +
    # or
 +
    org_name: Main Org.
 +
    is_default: true
 +
    send_reminder: true
 +
    frequency: 1h
 +
    disable_resolve_message: false
 +
    # See `Supported Settings` section for settings supporter for each
 +
    # alert notification type.
 +
    settings:
 +
      recipient: 'XXX'
 +
      token: 'xoxb'
 +
      uploadImage: true
 +
      url: https://slack.com
 +
 +
delete_notifiers:
 +
  - name: notification-channel-2
 +
    # default org_id: 1
 +
```
 +
 +
## How to serve Grafana with a path prefix (/grafana)
 +
 +
In order to serve Grafana with a prefix (e.g., <http://example.com/grafana>), add the following to your values.yaml.
 +
 +
```yaml
 +
ingress:
 +
  enabled: true
 +
  annotations:
 +
    kubernetes.io/ingress.class: "nginx"
 +
    nginx.ingress.kubernetes.io/rewrite-target: /$1
 +
    nginx.ingress.kubernetes.io/use-regex: "true"
 +
 +
  path: /grafana/?(.*)
 +
  hosts:
 +
    - k8s.example.dev
 +
 +
grafana.ini:
 +
  server:
 +
    root_url: http://localhost:3000/grafana # this host can be localhost
 +
```
 +
 +
## How to securely reference secrets in grafana.ini
 +
 +
 +
In grafana.ini:
 +
 +
```yaml
 +
grafana.ini:
 +
  [auth.generic_oauth]
 +
  enabled = true
 +
  client_id = $__file{/etc/secrets/auth_generic_oauth/client_id}
 +
 +
Existing secret, or created along with helm:
 +
 +
```yaml
 +
---
 +
apiVersion: v1
 +
kind: Secret
 +
metadata:
 +
  name: auth-generic-oauth-secret
 +
type: Opaque
 +
stringData:
 +
  client_id: <value>
 +
  client_secret: <value>
 +
```
 +
 +
Include in the `extraSecretMounts` configuration flag:
 +
 +
  - name: auth-generic-oauth-secret-mount
 +
    secretName: auth-generic-oauth-secret
 +
    defaultMode: 0440
 +
    mountPath: /etc/secrets/auth_generic_oauth
 +
    readOnly: true
 +
```
 +
 +
### extraSecretMounts using a Container Storage Interface (CSI) provider
 +
 +
```yaml
 +
- extraSecretMounts:
 +
  - name: secrets-store-inline
 +
    mountPath: /run/secrets
 +
    readOnly: true
 +
    csi:
 +
      driver: secrets-store.csi.k8s.io
 +
      readOnly: true
 +
      volumeAttributes:
 +
        secretProviderClass: "my-provider"
 +
      nodePublishSecretRef:
 +
        name: akv-creds
 +
```
 +
 +
## Image Renderer Plug-In
 +
 +
This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/docs/remote_rendering_using_docker.md)
 +
 +
```yaml
 +
imageRenderer:
 +
  enabled: true
 +
```
 +
 +
### Image Renderer NetworkPolicy
 +
 +
By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance
 +
 +
 +
 +
</pre>
 +
 +
== See also ==
 +
* {{helm}}
 +
 +
[[Category:Helm]]

Latest revision as of 19:40, 14 June 2022

helm show all grafana/grafana


apiVersion: v2
appVersion: 8.2.5
description: The leading tool for querying and visualizing time series and metrics.
home: https://grafana.net
icon: logo_transparent_400x.png
kubeVersion: ^1.8.0-0
maintainers:
- email: [email protected]
  name: zanhsieh
- email: [email protected]
  name: rtluckie
- email: [email protected]
  name: maorfr
- email: [email protected]
  name: Xtigyro
- email: [email protected]
  name: torstenwalter
name: grafana
sources:
- https://github.com/grafana/grafana
type: application
version: 6.17.8

---
rbac:
  create: true
  ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true)
  # useExistingRole: name-of-some-(cluster)role
  pspEnabled: true
  pspUseAppArmor: true
  namespaced: false
  extraRoleRules: []
  # - apiGroups: []
  #   resources: []
  #   verbs: []
  extraClusterRoleRules: []
  # - apiGroups: []
  #   resources: []
  #   verbs: []
serviceAccount:
  create: true
  name:
  nameTest:
#  annotations:
#    eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
  autoMount: true

replicas: 1

## Create HorizontalPodAutoscaler object for deployment type
#
autoscaling:
  enabled: false
#   minReplicas: 1
#   maxReplicas: 10
#   metrics:
#   - type: Resource
#     resource:
#       name: cpu
#       targetAverageUtilization: 60
#   - type: Resource
#     resource:
#       name: memory
#       targetAverageUtilization: 60

## See `kubectl explain poddisruptionbudget.spec` for more
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget: {}
#  minAvailable: 1
#  maxUnavailable: 1

## See `kubectl explain deployment.spec.strategy` for more
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
deploymentStrategy:
  type: RollingUpdate
readinessProbe:
  httpGet:
    path: /api/health
    port: 3000

livenessProbe:
  httpGet:
    path: /api/health
    port: 3000
  initialDelaySeconds: 60
  timeoutSeconds: 30
  failureThreshold: 10
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName: "default-scheduler"

image:
  repository: grafana/grafana
  tag: 8.2.5
  sha: ""
  pullPolicy: IfNotPresent

  ## Optionally specify an array of imagePullSecrets.
  ## Secrets must be manually created in the namespace.
  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
  ##
  # pullSecrets:
  #   - myRegistrKeySecretName

testFramework:
  enabled: true
  image: "bats/bats"
  tag: "v1.4.1"
  imagePullPolicy: IfNotPresent
  securityContext: {}

securityContext:
  runAsUser: 472
  runAsGroup: 472
  fsGroup: 472

containerSecurityContext:
  {}

extraConfigmapMounts: []
  # - name: certs-configmap
  #   mountPath: /etc/grafana/ssl/
  #   subPath: certificates.crt # (optional)
  #   configMap: certs-configmap
  #   readOnly: true

extraEmptyDirMounts: []
  # - name: provisioning-notifiers
  #   mountPath: /etc/grafana/provisioning/notifiers


# Apply extra labels to common labels.
extraLabels: {}

## Assign a PriorityClassName to pods if set
# priorityClassName:

downloadDashboardsImage:
  repository: curlimages/curl
  tag: 7.73.0
  sha: ""
  pullPolicy: IfNotPresent

downloadDashboards:
  env: {}
  envFromSecret: ""
  resources: {}

## Pod Annotations
# podAnnotations: {}

## Pod Labels
# podLabels: {}

podPortName: grafana

## Deployment annotations
# annotations: {}

## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
  enabled: true
  type: ClusterIP
  port: 80
  targetPort: 3000
    # targetPort: 4181 To be used with a proxy extraContainer
  annotations: {}
  labels: {}
  portName: service

serviceMonitor:
  ## If true, a ServiceMonitor CRD is created for a prometheus operator
  ## https://github.com/coreos/prometheus-operator
  ##
  enabled: false
  path: /metrics
  #  namespace: monitoring  (defaults to use the namespace this chart is deployed to)
  labels: {}
  interval: 1m
  scheme: http
  tlsConfig: {}
  scrapeTimeout: 30s
  relabelings: []

extraExposePorts: []
 # - name: keycloak
 #   port: 8080
 #   targetPort: 8080
 #   type: ClusterIP

# overrides pod.spec.hostAliases in the grafana deployment's pods
hostAliases: []
  # - ip: "1.2.3.4"
  #   hostnames:
  #     - "my.host.com"

ingress:
  enabled: false
  # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
  # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
  # ingressClassName: nginx
  # Values can be templated
  annotations: {}
    # kubernetes.io/ingress.class: nginx
    # kubernetes.io/tls-acme: "true"
  labels: {}
  path: /

  # pathType is only for k8s >= 1.1=
  pathType: Prefix

  hosts:
    - chart-example.local
  ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
  extraPaths: []
  # - path: /*
  #   backend:
  #     serviceName: ssl-redirect
  #     servicePort: use-annotation
  ## Or for k8s > 1.19
  # - path: /*
  #   pathType: Prefix
  #   backend:
  #     service:
  #       name: ssl-redirect
  #       port:
  #         name: use-annotation


  tls: []
  #  - secretName: chart-example-tls
  #    hosts:
  #      - chart-example.local

resources: {}
#  limits:
#    cpu: 100m
#    memory: 128Mi
#  requests:
#    cpu: 100m
#    memory: 128Mi

## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}

## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []

## Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}

extraInitContainers: []

## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
extraContainers: ""
# extraContainers: |
# - name: proxy
#   image: quay.io/gambol99/keycloak-proxy:latest
#   args:
#   - -provider=github
#   - -client-id=
#   - -client-secret=
#   - -github-org=<ORG_NAME>
#   - -email-domain=*
#   - -cookie-secret=
#   - -http-address=http://0.0.0.0:4181
#   - -upstream-url=http://127.0.0.1:3000
#   ports:
#     - name: proxy-web
#       containerPort: 4181

## Volumes that can be used in init containers that will not be mounted to deployment pods
extraContainerVolumes: []
#  - name: volume-from-secret
#    secret:
#      secretName: secret-to-mount
#  - name: empty-dir-volume
#    emptyDir: {}

## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
  type: pvc
  enabled: false
  # storageClassName: default
  accessModes:
    - ReadWriteOnce
  size: 10Gi
  # annotations: {}
  finalizers:
    - kubernetes.io/pvc-protection
  # selectorLabels: {}
  # subPath: ""
  # existingClaim:
  ## If persistence is not enabled, this allows to mount the
  ## local storage in-memory to improve performance
  ##
  inMemory:
    enabled: false
    ## The maximum usage on memory medium EmptyDir would be
    ## the minimum value between the SizeLimit specified
    ## here and the sum of memory limits of all containers in a pod
    ##
    # sizeLimit: 300Mi

initChownData:
  ## If false, data ownership will not be reset at startup
  ## This allows the prometheus-server to be run with an arbitrary user
  ##
  enabled: true

  ## initChownData container image
  ##
  image:
    repository: busybox
    tag: "1.31.1"
    sha: ""
    pullPolicy: IfNotPresent

  ## initChownData resource requests and limits
  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
  ##
  resources: {}
  #  limits:
  #    cpu: 100m
  #    memory: 128Mi
  #  requests:
  #    cpu: 100m
  #    memory: 128Mi

# Administrator credentials when not using an existing secret (see below)
adminUser: admin
# adminPassword: strongpassword

# Use an existing secret for the admin user.
admin:
  existingSecret: ""
  userKey: admin-user
  passwordKey: admin-password

## Define command to be executed at startup by grafana container
## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/)
## Default is "run.sh" as defined in grafana's Dockerfile
# command:
# - "sh"
# - "/run.sh"

## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:

## Use an alternate scheduler, e.g. "stork".
##
## Extra environment variables that will be pass onto deployment pods
##
## to provide grafana with access to CloudWatch on AWS EKS:
## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later)
## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the
## same oidc eks provider as noted before (same as the existing line)
## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name
##
##  "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana",
##
## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess
## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name)
##
## env:
##   AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here
##   AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
##   AWS_REGION: us-east-1
##
## 5. uncomment the EKS section in extraSecretMounts: below
## 6. uncomment the annotation section in the serviceAccount: above
## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn

env: {}

## "valueFrom" environment variable references that will be added to deployment pods
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core
## Renders in container spec as:
##   env:
##     ...
##     - name: <key>
##       valueFrom:
##         <value rendered as YAML>
envValueFrom: {}

## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
## This can be useful for auth tokens, etc. Value is templated.
envFromSecret: ""

## Sensible environment variables that will be rendered as new secret object
## This can be useful for auth tokens, etc
envRenderSecret: {}

## The names of secrets in the same kubernetes namespace which contain values to be added to the environment
## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key.
envFromSecrets: []
## - name: secret-name
##   optional: true

# Inject Kubernetes services as environment variables.
# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables
enableServiceLinks: true

## Additional grafana server secret mounts
# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
extraSecretMounts: []
  # - name: secret-files
  #   mountPath: /etc/secrets
  #   secretName: grafana-secret-files
  #   readOnly: true
  #   subPath: ""
  #
  # for AWS EKS (cloudwatch) use the following (see also instruction in env: above)
  # - name: aws-iam-token
  #   mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount
  #   readOnly: true
  #   projected:
  #     defaultMode: 420
  #     sources:
  #       - serviceAccountToken:
  #           audience: sts.amazonaws.com
  #           expirationSeconds: 86400
  #           path: token
  #
  # for CSI e.g. Azure Key Vault use the following
  # - name: secrets-store-inline
  #  mountPath: /run/secrets
  #  readOnly: true
  #  csi:
  #    driver: secrets-store.csi.k8s.io
  #    readOnly: true
  #    volumeAttributes:
  #      secretProviderClass: "akv-grafana-spc"
  #    nodePublishSecretRef:                       # Only required when using service principal mode
  #       name: grafana-akv-creds                  # Only required when using service principal mode

## Additional grafana server volume mounts
# Defines additional volume mounts.
extraVolumeMounts: []
  # - name: extra-volume-0
  #   mountPath: /mnt/volume0
  #   readOnly: true
  #   existingClaim: volume-claim
  # - name: extra-volume-1
  #   mountPath: /mnt/volume1
  #   readOnly: true
  #   hostPath: /usr/shared/

## Pass the plugins you want installed as a list.
##
plugins: []
  # - digrich-bubblechart-panel
  # - grafana-clock-panel

## Configure grafana datasources
## ref: http://docs.grafana.org/administration/provisioning/#datasources
##
datasources: {}
#  datasources.yaml:
#    apiVersion: 1
#    datasources:
#    - name: Prometheus
#      type: prometheus
#      url: http://prometheus-prometheus-server
#      access: proxy
#      isDefault: true
#    - name: CloudWatch
#      type: cloudwatch
#      access: proxy
#      uid: cloudwatch
#      editable: false
#      jsonData:
#        authType: default
#        defaultRegion: us-east-1

## Configure notifiers
## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels
##
notifiers: {}
#  notifiers.yaml:
#    notifiers:
#    - name: email-notifier
#      type: email
#      uid: email1
#      # either:
#      org_id: 1
#      # or
#      org_name: Main Org.
#      is_default: true
#      settings:
#        addresses: [email protected]
#    delete_notifiers:

## Configure grafana dashboard providers
## ref: http://docs.grafana.org/administration/provisioning/#dashboards
##
## `path` must be /var/lib/grafana/dashboards/<provider_name>
##
dashboardProviders: {}
#  dashboardproviders.yaml:
#    apiVersion: 1
#    providers:
#    - name: 'default'
#      orgId: 1
#      folder: ''
#      type: file
#      disableDeletion: false
#      editable: true
#      options:
#        path: /var/lib/grafana/dashboards/default

## Configure grafana dashboard to import
## NOTE: To use dashboards you must also enable/configure dashboardProviders
## ref: https://grafana.com/dashboards
##
## dashboards per provider, use provider name as key.
##
dashboards: {}
  # default:
  #   some-dashboard:
  #     json: |
  #       $RAW_JSON
  #   custom-dashboard:
  #     file: dashboards/custom-dashboard.json
  #   prometheus-stats:
  #     gnetId: 2
  #     revision: 2
  #     datasource: Prometheus
  #   local-dashboard:
  #     url: https://example.com/repository/test.json
  #     token: ''
  #   local-dashboard-base64:
  #     url: https://example.com/repository/test-b64.json
  #     token: ''
  #     b64content: true

## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value.
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
## ConfigMap data example:
##
## data:
##   example-dashboard.json: |
##     RAW_JSON
##
dashboardsConfigMaps: {}
#  default: ""

## Grafana's primary configuration
## NOTE: values in map will be converted to ini format
## ref: http://docs.grafana.org/installation/configuration/
##
grafana.ini:
  paths:
    data: /var/lib/grafana/
    logs: /var/log/grafana
    plugins: /var/lib/grafana/plugins
    provisioning: /etc/grafana/provisioning
  analytics:
    check_for_updates: true
  log:
    mode: console
  grafana_net:
    url: https://grafana.net
## grafana Authentication can be enabled with the following values on grafana.ini
 # server:
      # The full public facing url you use in browser, used for redirects and emails
 #    root_url:
 # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana
 # auth.github:
 #    enabled: false
 #    allow_sign_up: false
 #    scopes: user:email,read:org
 #    auth_url: https://github.com/login/oauth/authorize
 #    token_url: https://github.com/login/oauth/access_token
 #    api_url: https://api.github.com/user
 #    team_ids:
 #    allowed_organizations:
 #    client_id:
 #    client_secret:
## LDAP Authentication can be enabled with the following values on grafana.ini
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
  # auth.ldap:
  #   enabled: true
  #   allow_sign_up: true
  #   config_file: /etc/grafana/ldap.toml

## Grafana's LDAP configuration
## Templated by the template in _helpers.tpl
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
  enabled: false
  # `existingSecret` is a reference to an existing secret containing the ldap configuration
  # for Grafana in a key `ldap-toml`.
  existingSecret: ""
  # `config` is the content of `ldap.toml` that will be stored in the created secret
  config: ""
  # config: |-
  #   verbose_logging = true

  #   [[servers]]
  #   host = "my-ldap-server"
  #   port = 636
  #   use_ssl = true
  #   start_tls = false
  #   ssl_skip_verify = false
  #   bind_dn = "uid=%s,ou=users,dc=myorg,dc=com"

## Grafana's SMTP configuration
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
## ref: http://docs.grafana.org/installation/configuration/#smtp
smtp:
  # `existingSecret` is a reference to an existing secret containing the smtp configuration
  # for Grafana.
  existingSecret: ""
  userKey: "user"
  passwordKey: "password"

## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
sidecar:
    tag: 1.14.2
    sha: ""
  imagePullPolicy: IfNotPresent
  resources: {}
#     memory: 100Mi
#   requests:
#     cpu: 50m
#     memory: 50Mi
  # skipTlsVerify Set to true to skip tls verification for kube api calls
  # skipTlsVerify: true
  enableUniqueFilenames: false
  dashboards:
    enabled: false
    SCProvider: true
    # label that the configmaps with dashboards are marked with
    label: grafana_dashboard
    # value of label that the configmaps with dashboards are set to
    labelValue: null
    # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set)
    folder: /tmp/dashboards
    # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead
    defaultFolderName: null
    # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces.
    # Otherwise the namespace in which the sidecar is running will be used.
    # It's also possible to specify ALL to search in all namespaces.
    searchNamespace: null
    watchMethod: WATCH
    # search in configmap, secret or both
    resource: both
    # If specified, the sidecar will look for annotation with this name to create folder and put graph here.
    folderAnnotation: null
    # Absolute path to shell script to execute after a configmap got reloaded
    script: null
    # provider configuration that lets grafana manage the dashboards
    provider:
      # name of the provider, should be unique
      name: sidecarProvider
      # orgid as configured in grafana
      orgid: 1
      # folder in which the dashboards should be imported in grafana
      folder: ''
      # type of the provider
      type: file
      # disableDelete to activate a import-only behaviour
      disableDelete: false
      # allow updating provisioned dashboards from the UI
      allowUiUpdates: false
      # allow Grafana to replicate dashboard structure from filesystem
      foldersFromFilesStructure: false
  datasources:
    enabled: false
    # label that the configmaps with datasources are marked with
    label: grafana_datasource
    # value of label that the configmaps with datasources are set to
    labelValue: null
    # If specified, the sidecar will search for datasource config-maps inside this namespace.
    # Otherwise the namespace in which the sidecar is running will be used.
    # It's also possible to specify ALL to search in all namespaces
    searchNamespace: null
    watchMethod: LIST
    # search in configmap, secret or both
    resource: both
  notifiers:
    enabled: false
    # label that the configmaps with notifiers are marked with
    label: grafana_notifier
    # If specified, the sidecar will search for notifier config-maps inside this namespace.
    # Otherwise the namespace in which the sidecar is running will be used.
    # It's also possible to specify ALL to search in all namespaces
    searchNamespace: null
    # search in configmap, secret or both
    resource: both

## Override the deployment namespace
##
namespaceOverride: ""

## Number of old ReplicaSets to retain
##
revisionHistoryLimit: 10

## Add a seperate remote image renderer deployment/service
imageRenderer:
  # Enable the image-renderer deployment & service
  enabled: false
  replicas: 1
  image:
    # image-renderer Image repository
    repository: grafana/grafana-image-renderer
    # image-renderer Image tag
    tag: latest
    # image-renderer Image sha (optional)
    sha: ""
    # image-renderer ImagePullPolicy
    pullPolicy: Always
  # extra environment variables
  env:
    HTTP_HOST: "0.0.0.0"
    # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758
    # RENDERING_MODE: clustered
  # image-renderer deployment serviceAccount
  serviceAccountName: ""
  # image-renderer deployment securityContext
  securityContext: {}
  # image-renderer deployment Host Aliases
  hostAliases: []
  # image-renderer deployment priority class
  priorityClassName: ''
  service:
    # Enable the image-renderer service
    enabled: true
    # image-renderer service port name
    portName: 'http'
    # image-renderer service port used by both service and deployment
    port: 8081
    targetPort: 8081
  # In case a sub_path is used this needs to be added to the image renderer callback
  grafanaSubPath: ""
  # name of the image-renderer port on the pod
  podPortName: http
  # number of image-renderer replica sets to keep
  revisionHistoryLimit: 10
  networkPolicy:
    # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods
    limitIngress: true
    # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods
    limitEgress: false
  resources: {}
#   limits:
#     cpu: 100m
#     memory: 100Mi
#   requests:
#     cpu: 50m
#     memory: 50Mi

---
# Grafana Helm Chart

* Installs the web dashboarding system [Grafana](http://grafana.org/)

## Get Repo Info

```console
helm repo add grafana https://grafana.github.io/helm-charts
helm repo update
```

_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._

## Installing the Chart

To install the chart with the release name `my-release`:

```console
helm install my-release grafana/grafana
```

### Example ingress with path

With grafana 6.3 and above
```yaml
grafana.ini:
  server:
    serve_from_sub_path: true
ingress:
  enabled: true
  hosts:
    - "monitoring.example.com"
  path: "/grafana"
```

### Example of extraVolumeMounts

Volume can be type persistentVolumeClaim or hostPath but not both at same time.
If none existingClaim or hostPath argument is givent then type is emptyDir.

```yaml
- extraVolumeMounts:
  - name: plugins
    mountPath: /var/lib/grafana/plugins
    subPath: configs/grafana/plugins
    existingClaim: existing-grafana-claim
    readOnly: false
  - name: dashboards
    mountPath: /var/lib/grafana/dashboards
    hostPath: /usr/shared/grafana/dashboards
    readOnly: false
```

## Import dashboards


      json: |
        {
          "annotations":

          "title": "Some Dashboard",
          "uid": "abcd1234",
          "version": 1
        }
    custom-dashboard:
      # This is a path to a file inside the dashboards directory inside the chart directory
      file: dashboards/custom-dashboard.json
    prometheus-stats:
      # Ref: https://grafana.com/dashboards/2
      gnetId: 2
      revision: 2
      datasource: Prometheus
    local-dashboard:
      url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json
```

## BASE64 dashboards

If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk.

### Gerrit use case

the url value is <https://yourgerritserver/a/user%2Frepo/branches/master/files/dir1%2Fdir2%2Fdashboard/content>

## Sidecar for dashboards

If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana
pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with
a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written
to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported
A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside
one configmap is currently not properly mirrored in grafana.

Example dashboard config:

```yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: sample-grafana-dashboard
  labels:
     grafana_dashboard: "1"
data:
  k8s-dashboard.json: |-
  [...]
```

## Sidecar for datasources

If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
the data sources in grafana can be imported.

Secrets are recommended over configmaps for this usecase because datasources usually contain private
data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those.


```yaml
datasources:
 datasources.yaml:
   apiVersion: 1
   datasources:
      # <string, required> name of the datasource. Required
    - name: Graphite
      # <string, required> datasource type. Required
      type: graphite
      # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
      access: proxy
      # <int> org id. will default to orgId 1 if not specified
      orgId: 1
      # <string> url
      url: http://localhost:8080
      # <string> database password, if used
      password:
      # <string> database user, if used
      user:
      # <string> database name, if used
      database:
      # <bool> enable/disable basic auth
      basicAuth:
      # <string> basic auth username
      basicAuthUser:
      # <string> basic auth password
      basicAuthPassword:
      # <bool> enable/disable with credentials headers
      isDefault:
      # <map> fields that will be converted to json and stored in json_data
      jsonData:
         graphiteVersion: "1.1"
         tlsAuth: true
         tlsAuthWithCACert: true
      # <string> json object of data that will be encrypted.
      secureJsonData:
        tlsCACert: "..."
        tlsClientCert: "..."
        tlsClientKey: "..."
      version: 1
      # <bool> allow users to edit datasources from the UI.
      editable: false
```

## Sidecar for notifiers

If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
the notification channels in grafana can be imported. The secrets must be created before
`helm install` so that the notifiers init container can list the secrets.

Secrets are recommended over configmaps for this usecase because alert notification channels usually contain
private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those.


```yaml
notifiers:
  - name: notification-channel-1
    type: slack
    uid: notifier1
    # either
    org_id: 2
    # or
    org_name: Main Org.
    is_default: true
    send_reminder: true
    frequency: 1h
    disable_resolve_message: false
    # See `Supported Settings` section for settings supporter for each
    # alert notification type.
    settings:
      recipient: 'XXX'
      token: 'xoxb'
      uploadImage: true
      url: https://slack.com

delete_notifiers:
  - name: notification-channel-2
    # default org_id: 1
```

## How to serve Grafana with a path prefix (/grafana)

In order to serve Grafana with a prefix (e.g., <http://example.com/grafana>), add the following to your values.yaml.

```yaml
ingress:
  enabled: true
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/rewrite-target: /$1
    nginx.ingress.kubernetes.io/use-regex: "true"

  path: /grafana/?(.*)
  hosts:
    - k8s.example.dev

grafana.ini:
  server:
    root_url: http://localhost:3000/grafana # this host can be localhost
```

## How to securely reference secrets in grafana.ini


In grafana.ini:

```yaml
grafana.ini:
  [auth.generic_oauth]
  enabled = true
  client_id = $__file{/etc/secrets/auth_generic_oauth/client_id}

Existing secret, or created along with helm:

```yaml
---
apiVersion: v1
kind: Secret
metadata:
  name: auth-generic-oauth-secret
type: Opaque
stringData:
  client_id: <value>
  client_secret: <value>
```

Include in the `extraSecretMounts` configuration flag:

  - name: auth-generic-oauth-secret-mount
    secretName: auth-generic-oauth-secret
    defaultMode: 0440
    mountPath: /etc/secrets/auth_generic_oauth
    readOnly: true
```

### extraSecretMounts using a Container Storage Interface (CSI) provider

```yaml
- extraSecretMounts:
  - name: secrets-store-inline
    mountPath: /run/secrets
    readOnly: true
    csi:
      driver: secrets-store.csi.k8s.io
      readOnly: true
      volumeAttributes:
        secretProviderClass: "my-provider"
      nodePublishSecretRef:
        name: akv-creds
```

## Image Renderer Plug-In

This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/docs/remote_rendering_using_docker.md)

```yaml
imageRenderer:
  enabled: true
```

### Image Renderer NetworkPolicy

By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance



See also[edit]

Advertising: