Difference between revisions of "/opt/datadog-agent/etc/datadog.yaml"

From wikieduonline
Jump to navigation Jump to search
Line 1: Line 1:
 
  [[/opt/datadog-agent/etc/]]datadog.yaml
 
  [[/opt/datadog-agent/etc/]]datadog.yaml
 +
 +
cat /opt/datadog-agent/etc/datadog.yaml | wc -l
 +
3330
  
 
== Example ==
 
== Example ==

Revision as of 12:08, 5 June 2024

/opt/datadog-agent/etc/datadog.yaml
cat /opt/datadog-agent/etc/datadog.yaml | wc -l
3330

Example


#########################
## Basic Configuration ##
#########################

## @param api_key - string - required
## @env DD_API_KEY - string - required
## The Datadog API key used by your Agent to submit metrics and events to Datadog.
## Create a new API key here: https://app.datadoghq.com/organization-settings/api-keys .
## Read more about API keys here: https://docs.datadoghq.com/account_management/api-app-keys/#api-keys .
api_key: 123455678890

## @param app_key - string - optional
## The application key used to access Datadog's programatic API.
## Create a new application key here: https://app.datadoghq.com/organization-settings/application-keys .
## Read more about application keys here: https://docs.datadoghq.com/account_management/api-app-keys/#application-keys .
#
# app_key:

## @param site - string - optional - default: datadoghq.com
## @env DD_SITE - string - optional - default: datadoghq.com
## The site of the Datadog intake to send Agent data to.
## Set to 'datadoghq.eu' to send data to the EU site.
## Set to 'us3.datadoghq.com' to send data to the US3 site.
## Set to 'us5.datadoghq.com' to send data to the US5 site.
## Set to 'ap1.datadoghq.com' to send data to the AP1 site.
## Set to 'ddog-gov.com' to send data to the US1-FED site.
#
site: datadoghq.com

## @param dd_url - string - optional - default: https://app.datadoghq.com
## @env DD_DD_URL - string - optional - default: https://app.datadoghq.com
## @env DD_URL - string - optional - default: https://app.datadoghq.com
## The host of the Datadog intake server to send metrics to, only set this option
## if you need the Agent to send metrics to a custom URL, it overrides the site
## setting defined in "site". It does not affect APM, Logs or Live Process intake which have their
## own "*_dd_url" settings.
## If DD_DD_URL and DD_URL are both set, DD_DD_URL is used in priority.
#
# dd_url: https://app.datadoghq.com

## @param proxy - custom object - optional
## @env DD_PROXY_HTTP - string - optional
## @env DD_PROXY_HTTPS - string - optional
## @env DD_PROXY_NO_PROXY - space separated list of strings - optional
## If you need a proxy to connect to the Internet, provide it here (default:
## disabled). Refer to https://docs.datadoghq.com/agent/proxy/ to understand how to use these settings.
## For Logs proxy information, refer to https://docs.datadoghq.com/agent/proxy/#proxy-for-logs
#
# proxy:
#   https: http://<USERNAME>:<PASSWORD>@<PROXY_SERVER_FOR_HTTPS>:<PORT>
#   http: http://<USERNAME>:<PASSWORD>@<PROXY_SERVER_FOR_HTTP>:<PORT>
#   no_proxy:
#     - <HOSTNAME-1>
#     - <HOSTNAME-2>

## @param skip_ssl_validation - boolean - optional - default: false
## @env DD_SKIP_SSL_VALIDATION - boolean - optional - default: false
## Setting this option to "true" tells the Agent to skip validation of SSL/TLS certificates.
#
# skip_ssl_validation: false

## @param sslkeylogfile - string - optional - default: ""
## @env DD_SSLKEYLOGFILE - string - optional - default: ""
## sslkeylogfile specifies a destination for TLS master secrets
## in NSS key log format to allow external programs
## such as Wireshark to decrypt TLS connections.
## For more details, see https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format.
## Use of sslkeylogfile compromises security and should only be
## used for debugging.
# sslkeylogfile: ""


## @param min_tls_version - string - optional - default: "tlsv1.2"
## @env DD_MIN_TLS_VERSION - string - optional - default: "tlsv1.2"
## This option defines the minimum TLS version that will be used when
## submitting data to the Datadog intake specified in "site" or "dd_url".
## This parameter defaults to "tlsv1.2".
## Possible values are: tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3; values are case-
## insensitive.
#
# min_tls_version: "tlsv1.2"

## @param hostname - string - optional - default: auto-detected
## @env DD_HOSTNAME - string - optional - default: auto-detected
## Force the hostname name.
#
# hostname: <HOSTNAME_NAME>

## @param hostname_file - string - optional
## @env DD_HOSTNAME_FILE - string - optional
## In some environments, auto-detection of the hostname is not adequate and
## environment variables cannot be used to set the value. In such cases, the
## file on the host can also be used provide an appropriate value. If
## 'hostname' value has been set to a non-empty value, this option is ignored.
#
# hostname_file: /var/lib/cloud/data/instance-id

## @param hostname_fqdn - boolean - optional - default: false
## @env DD_HOSTNAME_FQDN - boolean - optional - default: false
## When the Agent relies on the OS to determine the hostname, make it use the
## FQDN instead of the short hostname. Recommended value: true
## More information at https://dtdg.co/flag-hostname-fqdn
#
# hostname_fqdn: false

## @param hostname_trust_uts_namespace - boolean - optional - default: false
## @env DD_HOSTNAME_TRUST_UTS_NAMESPACE - boolean - optional - default: false
## By default the Agent does not trust the hostname value retrieved from non-root UTS namespace,
## as it's usually a generated name, unrelated to the host (e.g. when running in a container).
## When enabled, the Agent will trust the value retrieved from non-root UTS namespace instead of failing
## hostname resolution.
## (Linux only)
#
# hostname_trust_uts_namespace: false

## @param host_aliases - list of strings - optional
## @env DD_HOST_ALIASES - space separated list of strings - optional
## List of host aliases to report in addition to any aliases collected
## automatically from cloud providers.
## More information at
## https://docs.datadoghq.com/agent/faq/how-datadog-agent-determines-the-hostname/?tab=agentv6v7#host-aliases
#
# host_aliases:
#   - <ALIAS-1>
#   - <ALIAS-2>

## @param tags  - list of key:value elements - optional
## @env DD_TAGS - space separated list of strings - optional
## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent.
##
## This configuration value merges with `DD_EXTRA_TAGS`, allowing some
## tags to be set in a configuration file (`tags`), and additional tags to be added
## with an environment variable (`DD_EXTRA_TAGS`).
##
## Learn more about tagging: https://docs.datadoghq.com/tagging/
#
# tags:
#   - team:infra
#   - <TAG_KEY>:<TAG_VALUE>

## @param extra_tags  - list of key:value elements - optional
## @env DD_EXTRA_TAGS - space separated list of strings - optional
## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent.
##
## This configuration value merges with `tags`, allowing some
## tags to be set in a configuration file (`tags`), and additional tags to be added
## with an environment variable (`DD_EXTRA_TAGS`).
##
## Learn more about tagging: https://docs.datadoghq.com/tagging/
#
# extra_tags:
#   - region:northerly
#   - <TAG_KEY>:<TAG_VALUE>

## @param env - string - optional
## @env DD_ENV - string - optional
## The environment name where the agent is running. Attached in-app to every
## metric, event, log, trace, and service check emitted by this Agent.
#
# env: <environment name>

## @param tag_value_split_separator - map - optional
## @env DD_TAG_VALUE_SPLIT_SEPARATOR - list of key:value strings - optional
## Split tag values according to a given separator. Only applies to host tags,
## and tags coming from container integrations. It does not apply to tags on dogstatsd metrics,
## and tags collected by other integrations.
##
## Example use-case:
##
##  With a raw collected tag "foo:1;2;3", using the following configuration:
##
##  tag_value_split_separator:
##    foo: ;
##
##  results in the raw tag being transformed into "foo:1", "foo:2", "foo:3" tags
#
# tag_value_split_separator:
#   <TAG_KEY>: <SEPARATOR>

## @param checks_tag_cardinality - string - optional - default: low
## @env DD_CHECKS_TAG_CARDINALITY - string - optional - default: low
## Configure the level of granularity of tags to send for checks metrics and events. Choices are:
##   * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...)
##   * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality
##   * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...)
## WARNING: sending container tags for checks metrics may create more metrics
## (one per container instead of one per host). This may impact your custom metric
s billing.
#
# checks_tag_cardinality: low

## @param dogstatsd_tag_cardinality - string - optional - default: low
## @env DD_DOGSTATSD_TAG_CARDINALITY - string - optional - default: low
## Configure the level of granularity of tags to send for DogStatsD metrics and events. Choices are:
##   * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...)
##   * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality
##   * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...)
##
## WARNING: sending container tags for dogstatsd metrics may create more metrics
## (one per container instead of one per host). This may impact your custom metrics billing.
#
# dogstatsd_tag_cardinality: low

## @param histogram_aggregates - list of strings - optional - default: ["max", "median", "avg", "count"]
## @env DD_HISTOGRAM_AGGREGATES - space separated list of strings - optional - default: max median avg count
## Configure which aggregated value to compute.
## Possible values are: min, max, median, avg, sum and count.
#
# histogram_aggregates:
#   - max
#   - median
#   - avg
#   - count

## @param histogram_percentiles - list of strings - optional - default: ["0.95"]
## @env DD_HISTOGRAM_PERCENTILES - space separated list of strings - optional - default: 0.95
## Configure which percentiles are computed by the Agent. It must be a list of float between 0 and 1.
## Warning: percentiles must be specified as yaml strings
#
# histogram_percentiles:
:s billing.
#
# checks_tag_cardinality: low

## @param dogstatsd_tag_cardinality - string - optional - default: low
## @env DD_DOGSTATSD_TAG_CARDINALITY - string - optional - default: low
## Configure the level of granularity of tags to send for DogStatsD metrics and events. Choices are:
##   * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...)
##   * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality
##   * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...)
##
## WARNING: sending container tags for dogstatsd metrics may create more metrics
## (one per container instead of one per host). This may impact your custom metrics billing.
#
# dogstatsd_tag_cardinality: low

## @param histogram_aggregates - list of strings - optional - default: ["max", "median", "avg", "count"]
## @env DD_HISTOGRAM_AGGREGATES - space separated list of strings - optional - default: max median avg count
## Configure which aggregated value to compute.
## Possible values are: min, max, median, avg, sum and count.
#
# histogram_aggregates:
#   - max
#   - median
#   - avg
#   - count

## @param histogram_percentiles - list of strings - optional - default: ["0.95"]
## @env DD_HISTOGRAM_PERCENTILES - space separated list of strings - optional - default: 0.95
## Configure which percentiles are computed by the Agent. It must be a list of float between 0 and 1.
## Warning: percentiles must be specified as yaml strings
#
# histogram_percentiles:
#   - "0.95"

## @param histogram_copy_to_distribution - boolean - optional - default: false
## @env DD_HISTOGRAM_COPY_TO_DISTRIBUTION - boolean - optional - default: false
## Copy histogram values to distributions for true global distributions (in beta)
:s billing.
#
# checks_tag_cardinality: low

## @param dogstatsd_tag_cardinality - string - optional - default: low
## @env DD_DOGSTATSD_TAG_CARDINALITY - string - optional - default: low
## Configure the level of granularity of tags to send for DogStatsD metrics and events. Choices are:
##   * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...)
##   * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality
##   * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...)
##
## WARNING: sending container tags for dogstatsd metrics may create more metrics
## (one per container instead of one per host). This may impact your custom metrics billing.
#
# dogstatsd_tag_cardinality: low

## @param histogram_aggregates - list of strings - optional - default: ["max", "median", "avg", "count"]
## @env DD_HISTOGRAM_AGGREGATES - space separated list of strings - optional - default: max median avg count
## Configure which aggregated value to compute.
## Possible values are: min, max, median, avg, sum and count.
#
# histogram_aggregates:
#   - max
#   - median
#   - avg
#   - count

## @param histogram_percentiles - list of strings - optional - default: ["0.95"]
## @env DD_HISTOGRAM_PERCENTILES - space separated list of strings - optional - default: 0.95
## Configure which percentiles are computed by the Agent. It must be a list of float between 0 and 1.
## Warning: percentiles must be specified as yaml strings
#
# histogram_percentiles:
#   - "0.95"

## @param histogram_copy_to_distribution - boolean - optional - default: false
## @env DD_HISTOGRAM_COPY_TO_DISTRIBUTION - boolean - optional - default: false
## Copy histogram values to distributions for true global distributions (in beta)
## Note: This increases the number of custom metrics created.
#
:s billing.
#
# checks_tag_cardinality: low

## @param dogstatsd_tag_cardinality - string - optional - default: low
## @env DD_DOGSTATSD_TAG_CARDINALITY - string - optional - default: low
## Configure the level of granularity of tags to send for DogStatsD metrics and events. Choices are:
##   * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...)
##   * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality
##   * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...)
##
## WARNING: sending container tags for dogstatsd metrics may create more metrics
## (one per container instead of one per host). This may impact your custom metrics billing.
#
# dogstatsd_tag_cardinality: low

## @param histogram_aggregates - list of strings - optional - default: ["max", "median", "avg", "count"]
## @env DD_HISTOGRAM_AGGREGATES - space separated list of strings - optional - default: max median avg count
## Configure which aggregated value to compute.
## Possible values are: min, max, median, avg, sum and count.
#
# histogram_aggregates:
#   - max
#   - median
#   - avg
#   - count

## @param histogram_percentiles - list of strings - optional - default: ["0.95"]
## @env DD_HISTOGRAM_PERCENTILES - space separated list of strings - optional - default: 0.95
## Configure which percentiles are computed by the Agent. It must be a list of float between 0 and 1.
## Warning: percentiles must be specified as yaml strings
#
# histogram_percentiles:
#   - "0.95"

## @param histogram_copy_to_distribution - boolean - optional - default: false
## @env DD_HISTOGRAM_COPY_TO_DISTRIBUTION - boolean - optional - default: false
## Copy histogram values to distributions for true global distributions (in beta)
## Note: This increases the number of custom metrics created.
#
# histogram_copy_to_distribution: false
:s billing.
#
# checks_tag_cardinality: low

## @param dogstatsd_tag_cardinality - string - optional - default: low
## @env DD_DOGSTATSD_TAG_CARDINALITY - string - optional - default: low
## Configure the level of granularity of tags to send for DogStatsD metrics and events. Choices are:
##   * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...)
##   * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality
##   * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...)
##
## WARNING: sending container tags for dogstatsd metrics may create more metrics
## (one per container instead of one per host). This may impact your custom metrics billing.
#
# dogstatsd_tag_cardinality: low

## @param histogram_aggregates - list of strings - optional - default: ["max", "median", "avg", "count"]
## @env DD_HISTOGRAM_AGGREGATES - space separated list of strings - optional - default: max median avg count
## Configure which aggregated value to compute.
## Possible values are: min, max, median, avg, sum and count.
#
# histogram_aggregates:
#   - max
#   - median
#   - avg
#   - count

## @param histogram_percentiles - list of strings - optional - default: ["0.95"]
## @env DD_HISTOGRAM_PERCENTILES - space separated list of strings - optional - default: 0.95
## Configure which percentiles are computed by the Agent. It must be a list of float between 0 and 1.
## Warning: percentiles must be specified as yaml strings
#
# histogram_percentiles:
#   - "0.95"

## @param histogram_copy_to_distribution - boolean - optional - default: false
## @env DD_HISTOGRAM_COPY_TO_DISTRIBUTION - boolean - optional - default: false
## Copy histogram values to distributions for true global distributions (in beta)
## Note: This increases the number of custom metrics created.
#
# histogram_copy_to_distribution: false

:s billing.
#
# checks_tag_cardinality: low

## @param dogstatsd_tag_cardinality - string - optional - default: low
## @env DD_DOGSTATSD_TAG_CARDINALITY - string - optional - default: low
## Configure the level of granularity of tags to send for DogStatsD metrics and events. Choices are:
##   * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...)
##   * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality
##   * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...)
##
## WARNING: sending container tags for dogstatsd metrics may create more metrics
## (one per container instead of one per host). This may impact your custom metrics billing.
#
# dogstatsd_tag_cardinality: low

## @param histogram_aggregates - list of strings - optional - default: ["max", "median", "avg", "count"]
## @env DD_HISTOGRAM_AGGREGATES - space separated list of strings - optional - default: max median avg count
## Configure which aggregated value to compute.
## Possible values are: min, max, median, avg, sum and count.
#
# histogram_aggregates:
#   - max
#   - median
#   - avg
#   - count

## @param histogram_percentiles - list of strings - optional - default: ["0.95"]
## @env DD_HISTOGRAM_PERCENTILES - space separated list of strings - optional - default: 0.95
## Configure which percentiles are computed by the Agent. It must be a list of float between 0 and 1.
## Warning: percentiles must be specified as yaml strings
#
# histogram_percentiles:
#   - "0.95"

## @param histogram_copy_to_distribution - boolean - optional - default: false
## @env DD_HISTOGRAM_COPY_TO_DISTRIBUTION - boolean - optional - default: false
## Copy histogram values to distributions for true global distributions (in beta)
## Note: This increases the number of custom metrics created.
#
# histogram_copy_to_distribution: false

:s billing.
#
# checks_tag_cardinality: low

## @param dogstatsd_tag_cardinality - string - optional - default: low
## @env DD_DOGSTATSD_TAG_CARDINALITY - string - optional - default: low
## Configure the level of granularity of tags to send for DogStatsD metrics and events. Choices are:
##   * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...)
##   * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality
##   * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...)
##
## WARNING: sending container tags for dogstatsd metrics may create more metrics
## (one per container instead of one per host). This may impact your custom metrics billing.
#
# dogstatsd_tag_cardinality: low

## @param histogram_aggregates - list of strings - optional - default: ["max", "median", "avg", "count"]
## @env DD_HISTOGRAM_AGGREGATES - space separated list of strings - optional - default: max median avg count
## Configure which aggregated value to compute.
## Possible values are: min, max, median, avg, sum and count.
#
# histogram_aggregates:
#   - max
#   - median
#   - avg
#   - count

## @param histogram_percentiles - list of strings - optional - default: ["0.95"]
## @env DD_HISTOGRAM_PERCENTILES - space separated list of strings - optional - default: 0.95
## Configure which percentiles are computed by the Agent. It must be a list of float between 0 and 1.
## Warning: percentiles must be specified as yaml strings
#
# histogram_percentiles:
#   - "0.95"

## @param histogram_copy_to_distribution - boolean - optional - default: false
## @env DD_HISTOGRAM_COPY_TO_DISTRIBUTION - boolean - optional - default: false
## Copy histogram values to distributions for true global distributions (in beta)
## Note: This increases the number of custom metrics created.
#
# histogram_copy_to_distribution: false

## @param histogram_copy_to_distribution_prefix - string - optional
## @env DD_HISTOGRAM_COPY_TO_DISTRIBUTION_PREFIX - string - optional
## A prefix to add to distribution metrics created when histogram_copy_to_distributions is true
#
# histogram_copy_to_distribution_prefix: "<PREFIX>"

## @param aggregator_stop_timeout - integer - optional - default: 2
## @env DD_AGGREGATOR_STOP_TIMEOUT - integer - optional - default: 2
## When stopping the agent, the Aggregator will try to flush out data ready for
## aggregation (metrics, events, ...). Data are flushed to the Forwarder in order
## to be sent to Datadog, therefore the Agent might take at most
## 'aggregator_stop_timeout'+'forwarder_stop_timeout' seconds to exit.
##
## You can set the maximum amount of time, in seconds, allocated to the
## Aggregator to do so. You can disable this feature by setting
## 'aggregator_stop_timeout' to 0.
#
# aggregator_stop_timeout: 2

## @param aggregator_buffer_size - integer - optional - default: 100
## @env DD_AGGREGATOR_BUFFER_SIZE - integer - optional - default: 100
## The default buffer size for the aggregator use a sane value for most of the
## use cases, however, it could be useful to manually set it in order to trade
## RSS usage with better performances.
#
# aggregator_buffer_size: 100

## @param forwarder_timeout - integer - optional - default: 20
## @env DD_FORWARDER_TIMEOUT - integer - optional - default: 20
## Forwarder timeout in seconds
#
# forwarder_timeout: 20

## @param forwarder_retry_queue_payloads_max_size - integer - optional - default: 15728640 (15MB)
## @env DD_FORWARDER_RETRY_QUEUE_PAYLOADS_MAX_SIZE - integer - optional - default: 15728640 (15MB)
## It defines the maximum size in bytes of all the payloads in the forwarder's retry queue.
## The actual memory used is greater than the payloads size as there are extra fields like HTTP headers,
## but no more than 2.5 times the payload size.
#
# forwarder_retry_queue_payloads_max_size: 15728640

## @param forwarder_num_workers - integer - optional - default: 1
## The number of workers used by the forwarder.
#
# forwarder_num_workers: 1

## @param forwarder_stop_timeout - integer - optional - default: 2
## @env DD_FORWARDER_STOP_TIMEOUT - integer - optional - default: 2
## When stopping the agent, the Forwarder will try to flush all new
## transactions (not the ones in retry state).  New transactions will be created
## as the Aggregator flush it's internal data too, therefore the Agent might take
## at most 'aggregator_stop_timeout'+'forwarder_stop_timeout' seconds to exit.
##
## You can set the maximum amount of time, in seconds, allocated to the
## Forwarder to send those transactions.  You can disable this feature by setting
## 'forwarder_stop_timeout' to 0.
#
# forwarder_stop_timeout: 2

## @param forwarder_storage_max_size_in_bytes - integer - optional - default: 0
## @env DD_FORWARDER_STORAGE_MAX_SIZE_IN_BYTES - integer - optional - default: 0
## When the retry queue of the forwarder is full, `forwarder_storage_max_size_in_bytes`
## defines the amount of disk space the Agent can use to store transactions on the disk.
## When `forwarder_storage_max_size_in_bytes` is `0`, the transactions are never stored on the disk.
#
# forwarder_storage_max_size_in_bytes: 50000000

## @param forwarder_storage_max_disk_ratio - float - optional - default: 0.8
## @env DD_FORWARDER_STORAGE_MAX_DISK_RATIO - float - optional - default: 0.8
## `forwarder_storage_max_disk_ratio` defines the disk capacity limit for storing transactions.
## `0.8` means the Agent can store transactions on disk until `forwarder_storage_max_size_in_bytes`
## is reached or when the disk mount for `forwarder_storage_path` exceeds 80% of the disk capacity,
## whichever is lower.
#
# forwarder_storage_max_disk_ratio: 0.8

## @param forwarder_outdated_file_in_days - integer - optional - default: 10
## @env DD_FORWARDER_OUTDATED_FILE_IN_DAYS - integer - optional - default: 10
## This value specifies how many days the overflow transactions will remain valid before
## being discarded. During the Agent restart, if a retry file contains transactions that were
## created more than `forwarder_outdated_file_in_days` days ago, they are removed.
#
# forwarder_outdated_file_in_days: 10

## @param forwarder_high_prio_buffer_size - int - optional - default: 100
## Defines the size of the high prio buffer.
## Increasing the buffer size can help if payload drops occur due to high prio buffer being full.
#
# forwarder_high_prio_buffer_size: 100

## @param forwarder_low_prio_buffer_size - int - optional - default: 100
## Defines the size of the low prio buffer.
#
# forwarder_low_prio_buffer_size: 100

## @param forwarder_requeue_buffer_size - int - optional - default: 100
## Defines the size of the requeue prio buffer.
#
# forwarder_requeue_buffer_size: 100

## @param forwarder_backoff_base - int - optional - default: 2
## @env DD_FORWARDER_BACKOFF_BASE - integer - optional - default: 2
## Defines the rate of exponential growth, and the first retry interval range.
## Do not set a lower value than the default. You may increase it if you use a proxy that benefits from a
## higher rate of exponential growth.
# forwarder_backoff_base: 2

## @param forwarder_backoff_max - int - optional - default: 64
## @env DD_FORWARDER_BACKOFF_MAX - integer - optional - default: 64
## Defines the maximum number of seconds to wait for a retry.
## Do not set a lower value than the default. You may increase it if you use a proxy that benefits from a
## higher maximum backoff time.
# forwarder_backoff_max: 64

## @param cloud_provider_metadata - list of strings -  optional - default: ["aws", "gcp", "azure", "alibaba", "oracle", "ibm"]
## @env DD_CLOUD_PROVIDER_METADATA - space separated list of strings - optional - default: aws gcp azure alibaba oracle ibm
## This option restricts which cloud provider endpoint will be used by the
## agent to retrieve metadata. By default the agent will try # AWS, GCP, Azure
## and alibaba providers. Some cloud provider are not enabled by default to not
## trigger security alert when querying unknown IP (for example, when enabling
## Tencent on AWS).
## Setting an empty list will disable querying any cloud metadata endpoints
## (falling back on system metadata). Disabling metadata for the cloud provider in which an Agent runs may result in
## duplicated hosts in your Datadog account and missing Autodiscovery features
##
## Possible values are:
## "aws"     AWS EC2, ECS/Fargate
## "gcp"     Google Cloud Provider
## "azure"   Azure
## "alibaba" Alibaba
## "tencent" Tencent
## "oracle"  Oracle Cloud
## "ibm"     IBM Cloud
#
# cloud_provider_metadata:
#   - "aws"
#   - "gcp"
#   - "azure"
#   - "alibaba"
#   - "oracle"
#   - "ibm"

## @param collect_ec2_tags - boolean - optional - default: false
## @env DD_COLLECT_EC2_TAGS - boolean - optional - default: false
## Collect AWS EC2 custom tags as host tags.
## Requires one of:
##  - `collect_ec2_tags_use_imds: true` and configuration of the
##    EC2 instance to allow tags in instance metadata; or
##  - configuration of the EC2 instance to have an IAM role with
##    the `EC2:DescribeTags` permission.
## See docs for further details:
## https://docs.datadoghq.com/integrations/faq/how-do-i-pull-my-ec2-tags-without-using-the-aws-integration/
#
# collect_ec2_tags: false

## @param exclude_ec2_tags - list of strings - optional - default: []
## @env DD_EXCLUDE_EC2_TAGS - space separated list of strings - optional - default: []
## EC2 tags to exclude from being converted into host tags -- only applicable when collect_ec2_tags is true. This does
## not impact tags collected by the AWS Integration (see https://docs.datadoghq.com/integrations/amazon_web_services/
## for more information on the AWS integration).
#
# exclude_ec2_tags: []

## @param collect_ec2_tags_use_imds - boolean - optional - default: false
## @env DD_COLLECT_EC2_TAGS_USE_IMDS - boolean - optional - default: false
## Use instance metadata service (IMDS) instead of EC2 API to collect AWS EC2 custom tags.
## Requires `collect_ec2_tags`.
#
# collect_ec2_tags_use_imds: false

## @param ec2_metadata_timeout - integer - optional - default: 300
## @env DD_EC2_METADATA_TIMEOUT - integer - optional - default: 300
## Timeout in milliseconds on calls to the AWS EC2 metadata endpoints.
#
# ec2_metadata_timeout: 300

## @param ec2_prefer_imdsv2 - boolean - optional - default: false
## @env DD_EC2_PREFER_IMDSV2 - boolean - optional - default: false
## If this flag is true then the agent will request EC2 metadata using IMDS v2,
## which offers additional security for accessing metadata. However, in some
## situations (such as a containerized agent on a plain EC2 instance) it may
## require additional configuration on the AWS side. See the AWS guidelines
## for further details:
## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html#instance-metadata-transition-to-version-2
#
# ec2_prefer_imdsv2: false

## @param collect_gce_tags - boolean - optional - default: true
## @env DD_COLLECT_GCE_TAGS - boolean - optional - default: true
## Collect Google Cloud Engine metadata as host tags
#
# collect_gce_tags: true

## @param exclude_gce_tags - list of strings - optional - default: ["bosh_settings" ,"cli-cert" ,"common-psm1" ,"configure-sh" ,"containerd-configure-sh" ,"disable-address-manager" ,"disable-legacy-endpoints" ,"enable-oslogin" ,"gce-container-declaration" ,"google-container-manifest" ,"ipsec-cert" ,"k8s-node-setup-psm1" ,"kube-env" ,"kubeconfig" ,"kubelet-config" ,"serial-port-logging-enable" ,"shutdown-script" ,"ssh-keys" ,"sshKeys" ,"ssl-cert" ,"startup-script" ,"user-data" ,"windows-keys" ,"windows-startup-script-ps1"]
## @env DD_EXCLUDE_GCE_TAGS - space separated list of strings - optional - default: bosh_settings cli-cert common-psm1 configure-sh containerd-configure-sh disable-address-manager disable-legacy-endpoints enable-oslogin gce-container-declaration google-container-manifest ipsec-cert k8s-node-setup-psm1 kube-env kubeconfig kubelet-config serial-port-logging-enable shutdown-script ssh-keys sshKeys ssl-cert startup-script user-data windows-keys windows-startup-script-ps1
## Google Cloud Engine metadata attribute to exclude from being converted into
## host tags -- only applicable when collect_gce_tags is true.
#
# exclude_gce_tags:
#   - "bosh_settings"
#   - "cli-cert"
#   - "common-psm1"
#   - "configure-sh"
#   - "containerd-configure-sh"
#   - "disable-address-manager"
#   - "disable-legacy-endpoints"
#   - "enable-oslogin"
#   - "gce-container-declaration"
#   - "google-container-manifest"
#   - "ipsec-cert"
#   - "k8s-node-setup-psm1"
#   - "kube-env"
#   - "kubeconfig"
#   - "kubelet-config"
#   - "serial-port-logging-enable"
#   - "shutdown-script"
#   - "ssh-keys"
#   - "sshKeys"
#   - "ssl-cert"
#   - "startup-script"
#   - "user-data"
#   - "windows-keys"
#   - "windows-startup-script-ps1"

## @param gce_send_project_id_tag - bool - optional - default: false
## @env DD_GCE_SEND_PROJECT_ID_TAG - bool - optional - default: false
## Send the project ID host tag with the `project_id:` tag key in addition to
## the `project:` tag key.
#
# gce_send_project_id_tag: false

## @param gce_metadata_timeout - integer - optional - default: 1000
## @env DD_GCE_METADATA_TIMEOUT - integer - optional - default: 1000
## Timeout in milliseconds on calls to the GCE metadata endpoints.
#
# gce_metadata_timeout: 1000

## @param azure_hostname_style - string - optional - default: "os"
## @env DD_AZURE_HOSTNAME_STYLE - string - optional - default: "os"
## Changes how agent hostname is set on Azure virtual machines.
##
## Possible values:
##   "os" - use the hostname reported by the operating system (default)
##   "name" - use the instance name
##   "name_and_resource_group" - use a combination of the instance name and resource group name
##   "full" - use a combination of the instance name, resource group name and subscription id
##   "vmid" - use the instance id
#
# azure_hostname_style: "os"

## @param scrubber - custom object - optional
## Configuration for scrubbing sensitive information from the Agent's logs, configuration and flares.
#
# scrubber:
#
  ## @param scrubber.additional_keys - list of strings - optional
  ## @env DD_SCRUBBER_ADDITIONAL_KEYS - space-separated list of strings - optional
  ## By default, the Agent removes known sensitive keys from Agent and integrations YAML configs before
  ## including them in the flare.
  ## Use this parameter to define additional sensitive keys that the Agent should scrub from
  ## the YAML files included in the flare.
  #
  # additional_keys:
  #   - "sensitive_key_1"
  #   - "sensitive_key_2"

## @param no_proxy_nonexact_match - boolean - optional - default: false
## @env DD_NO_PROXY_NONEXACT_MATCH - boolean - optional - default: false
## Enable more flexible no_proxy matching. See https://godoc.org/golang.org/x/net/http/httpproxy#Config
## for more information on accepted matching criteria.
#
# no_proxy_nonexact_match: false

## @param use_proxy_for_cloud_metadata - boolean - optional - default: false
## @env DD_USE_PROXY_FOR_CLOUD_METADATA - boolean - optional - default: false
## By default cloud provider IP's are added to the transport's `no_proxy` list.
## Use this parameter to remove them from the `no_proxy` list.
#
# use_proxy_for_cloud_metadata: false

## @param inventories_configuration_enabled - boolean - optional - default: true
## @env DD_INVENTORIES_CONFIGURATION_ENABLED - boolean - optional - default: true
## By default the Agent sends its own configuration to Datadog to be displayed in the `Agent Configuration` section of
## detail panel. See https://docs.datadoghq.com/infrastructure/list/#agent-configuration for more information.
##
## The Agent configuration is scrubbed of any sensitive information.
#
# inventories_configuration_enabled: true

## @param auto_exit - custom object - optional
## Configuration for the automatic exit mechanism: the Agent stops when some conditions are met.
#
# auto_exit:

  ## @param noprocess - custom object - optional
  ## Configure the `noprocess` automatic exit method.
  ## Detect when no other processes (non-agent) are running to trigger automatic exit. `HOST_PROC` is taken into account when gathering processes.
  ## Feature is only supported on POSIX systems.
  #
  # noprocess:
    ## @param enabled - boolean - optional - default: false
    ## @env DD_AUTO_EXIT_NOPROCESS_ENABLED - boolean - optional - default: false
    ## Enable the `noprocess` method
    #
    # enabled: false

    ## @param excluded_processes - list of strings - optional
    ## @env DD_AUTO_EXIT_NOPROCESS_EXCLUDED_PROCESSES - space separated list of strings - optional
    ## List of regular expressions to exclude extra processes (on top of built-in list).
    #
    # excluded_processes: []

  ## @param validation_period - integer - optional - default: 60
  ## @env DD_AUTO_EXIT_VALIDATION_PERIOD - integer - optional - default: 60
  ## Time (in seconds) delay during which the auto exit validates that the selected method continuously detects an exit condition, before exiting.
  ## The value is verified every 30s. By default, three consecutive checks need to return true to trigger an automatic exit.
  #
  # validation_period: 60


## @param fips - custom object - optional
## [BETA] Enter specific configurations for using the FIPS proxy.
## Uncomment this parameter and the one below to enable them.
#
# fips:

  ## @param enabled - boolean - optional - default: false
  ## @env DD_FIPS_ENABLED - boolean - optional - default: false
  ## This feature is in BETA.
  ##
  ## Enable the use of the FIPS proxy to send data to the DataDog backend. Enabling this will force all outgoing traffic
  ## from the Agent to the local proxy.
  ## It's important to note that enabling this will not make the Datadog Agent FIPS compliant, but will force all outgoing
  ## traffic to a local FIPS compliant proxy. The FIPS proxy need to be installed locally in addition to the agent.
  ##
  ## When setting this to true the following settings would be overridden, ignoring the values from the
  ## configuration:
  ## - dd_url
  ## - apm_config.apm_dd_url
  ## - apm_config.profiling_dd_url
  ## - apm_config.telemetry.dd_url
  ## - process_config.process_dd_url
  ## - logs_config.use_http
  ## - logs_config.logs_no_ssl
  ## - logs_config.logs_dd_url
  ## - database_monitoring.metrics.dd_url
  ## - database_monitoring.activity.dd_url
  ## - database_monitoring.samples.dd_url
  ## - compliance_config.endpoints.dd_url
  ## - runtime_security_config.endpoints.dd_url
  ## - network_devices.metadata.dd_url
  #
  ## The agent will also ignore 'proxy.*' settings and environment variables related to proxy (HTTP_PROXY, HTTPS_PROXY,
  ## DD_PROXY_HTTP and DD_PROXY_HTTPS).
  #
  # enabled: false

  ## @param local_address - string - optional - default: localhost
  ## @env DD_FIPS_LOCAL_ADDRESS - string - optional - default: localhost
  ## The local address that the FIPS proxy will bind ports on.
  #
  # local_address: localhost

## @param observability_pipelines_worker - custom object - optional
## Configuration for forwarding telemetry to an Observability Pipelines Worker instead of Datadog.
## https://www.datadoghq.com/product/observability-pipelines/
## Note: This config is interchangeable with `vector`
#
# observability_pipelines_worker:

  ## @param  metrics - custom object - optional
  ## Specific configurations for metrics
  #
  # metrics:

    ## @param enabled - boolean - optional - default: false
    ## @env DD_OBSERVABILITY_PIPELINES_WORKER_METRICS_ENABLED - boolean - optional - default: false
    ## Enables forwarding of metrics to an Observability Pipelines Worker
    #
    # enabled: false

    ## @param url - string - optional - default: ""
    ## @env DD_OBSERVABILITY_PIPELINES_WORKER_METRICS_URL - string - optional - default: ""
    ## URL endpoint for the Observability Pipelines Worker to send metrics to
    #
    # url: "http//127.0.0.1:8080"

  ## @param  logs - custom object - optional
  ## Specific configurations for logs
  #
  # logs:

    ## @param enabled - boolean - optional - default: false
    ## @env DD_OBSERVABILITY_PIPELINES_WORKER_LOGS_ENABLED - boolean - optional - default: false
    ## Enables forwarding of logs to an Observability Pipelines Worker
    #
    # enabled: false

    ## @param url - string - optional - default: ""
    ## @env DD_OBSERVABILITY_PIPELINES_WORKER_LOGS_URL - string - optional - default: ""
    ## URL endpoint for the Observability Pipelines Worker to send logs to
    #
    # url: "http//127.0.0.1:8080"

  ## @param  traces - custom object - optional
  ## Specific configurations for traces
  #
  # traces:

    ## @param enabled - boolean - optional - default: false
    ## @env DD_OBSERVABILITY_PIPELINES_WORKER_TRACES_ENABLED - boolean - optional - default: false
    ## Enables forwarding of traces to an Observability Pipelines Worker
    #
    # enabled: false

    ## @param url - string - optional - default: ""
    ## @env DD_OBSERVABILITY_PIPELINES_WORKER_TRACES_URL - string - optional - default: ""
    ## URL endpoint for the Observability Pipelines Worker to send traces to
    #
    # url: "http//127.0.0.1:8080"
 .../...




See also

Advertising: