metricbeat.reference.ymledit

The following reference file is available with your Metricbeat installation. It shows all non-deprecated Metricbeat options. You can copy from this file and paste configurations into the metricbeat.yml file to customize it.

The reference file is located in the same directory as the metricbeat.yml file. To locate the file, see Directory layout.

The contents of the file are included here for your convenience.

########################## Metricbeat Configuration ###########################

# This file is a full configuration example documenting all non-deprecated
# options in comments. For a shorter configuration example, that contains only
# the most common options, please see metricbeat.yml in the same directory.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/metricbeat/index.html

#============================  Config Reloading ===============================

# Config reloading allows to dynamically load modules. Each file which is
# monitored must contain one or multiple modules as a list.
metricbeat.config.modules:

  # Glob pattern for configuration reloading
  path: ${path.config}/modules.d/*.yml

  # Period on which files under path should be checked for changes
  reload.period: 10s

  # Set to true to enable config reloading
  reload.enabled: false

# Maximum amount of time to randomly delay the start of a metricset. Use 0 to
# disable startup delay.
metricbeat.max_start_delay: 10s

#============================== Autodiscover ===================================

# Autodiscover allows you to detect changes in the system and spawn new modules
# as they happen.

#metricbeat.autodiscover:
  # List of enabled autodiscover providers
#  providers:
#    - type: docker
#      templates:
#        - condition:
#            equals.docker.container.image: etcd
#          config:
#            - module: etcd
#              metricsets: ["leader", "self", "store"]
#              period: 10s
#              hosts: ["${host}:2379"]

#=========================== Timeseries instance ===============================

# Enabling this will add a `timeseries.instance` keyword field to all metric
# events. For a given metricset, this field will be unique for every single item
# being monitored.
# This setting is experimental.

#timeseries.enabled: false


#==========================  Modules configuration =============================
metricbeat.modules:

#-------------------------------- System Module --------------------------------
- module: system
  metricsets:
    - cpu             # CPU usage
    - load            # CPU load averages
    - memory          # Memory usage
    - network         # Network IO
    - process         # Per process metrics
    - process_summary # Process summary
    - uptime          # System Uptime
    - socket_summary  # Socket summary
    #- core           # Per CPU core usage
    #- diskio         # Disk IO
    #- filesystem     # File system usage for each mountpoint
    #- fsstat         # File system summary metrics
    #- raid           # Raid
    #- socket         # Sockets and connection info (linux only)
    #- service        # systemd service information
  enabled: true
  period: 10s
  processes: ['.*']

  # Configure the mount point of the host’s filesystem for use in monitoring a host from within a container
  #system.hostfs: "/hostfs"

  # Configure the metric types that are included by these metricsets.
  cpu.metrics:  ["percentages","normalized_percentages"]  # The other available option is ticks.
  core.metrics: ["percentages"]  # The other available option is ticks.

  # A list of filesystem types to ignore. The filesystem metricset will not
  # collect data from filesystems matching any of the specified types, and
  # fsstats will not include data from these filesystems in its summary stats.
  # If not set, types associated to virtual filesystems are automatically
  # added when this information is available in the system (e.g. the list of
  # `nodev` types in `/proc/filesystem`).
  #filesystem.ignore_types: []

  # These options allow you to filter out all processes that are not
  # in the top N by CPU or memory, in order to reduce the number of documents created.
  # If both the `by_cpu` and `by_memory` options are used, the union of the two sets
  # is included.
  #process.include_top_n:

    # Set to false to disable this feature and include all processes
    #enabled: true

    # How many processes to include from the top by CPU. The processes are sorted
    # by the `system.process.cpu.total.pct` field.
    #by_cpu: 0

    # How many processes to include from the top by memory. The processes are sorted
    # by the `system.process.memory.rss.bytes` field.
    #by_memory: 0

  # If false, cmdline of a process is not cached.
  #process.cmdline.cache.enabled: true

  # Enable collection of cgroup metrics from processes on Linux.
  #process.cgroups.enabled: true

  # A list of regular expressions used to whitelist environment variables
  # reported with the process metricset's events. Defaults to empty.
  #process.env.whitelist: []

  # Include the cumulative CPU tick values with the process metrics. Defaults
  # to false.
  #process.include_cpu_ticks: false

  # Raid mount point to monitor
  #raid.mount_point: '/'

  # Configure reverse DNS lookup on remote IP addresses in the socket metricset.
  #socket.reverse_lookup.enabled: false
  #socket.reverse_lookup.success_ttl: 60s
  #socket.reverse_lookup.failure_ttl: 60s

  # Diskio configurations
  #diskio.include_devices: []

  # Filter systemd services by status or sub-status
  #service.state_filter: ["active"]

  # Filter systemd services based on a name pattern
  #service.pattern_filter: ["ssh*", "nfs*"]

#------------------------------ Aerospike Module ------------------------------
- module: aerospike
  metricsets: ["namespace"]
  enabled: true
  period: 10s
  hosts: ["localhost:3000"]

#-------------------------------- Apache Module --------------------------------
- module: apache
  metricsets: ["status"]
  period: 10s
  enabled: true

  # Apache hosts
  hosts: ["http://127.0.0.1"]

  # Path to server status. Default server-status
  #server_status_path: "server-status"

  # Username of hosts.  Empty by default
  #username: username

  # Password of hosts. Empty by default
  #password: password

#--------------------------------- Beat Module ---------------------------------
- module: beat
  metricsets:
    - stats
    - state
  period: 10s
  hosts: ["http://localhost:5066"]
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]

  # Set to true to send data collected by module to X-Pack
  # Monitoring instead of metricbeat-* indices.
  #xpack.enabled: false

#--------------------------------- Ceph Module ---------------------------------
# Metricsets depending on the Ceph REST API (default port: 5000)
- module: ceph
  metricsets: ["cluster_disk", "cluster_health", "monitor_health", "pool_disk", "osd_tree"]
  period: 10s
  hosts: ["localhost:5000"]
  enabled: true

# Metricsets depending on the Ceph Manager Daemon (default port: 8003)
- module: ceph
  metricsets:
    - mgr_cluster_disk
    - mgr_osd_perf
    - mgr_pool_disk
    - mgr_osd_pool_stats
    - mgr_osd_tree
  period: 1m
  hosts: [ "https://localhost:8003" ]
  #username: "user"
  #password: "secret"

#-------------------------------- Consul Module --------------------------------
- module: consul
  metricsets:
  - agent
  enabled: true
  period: 10s
  hosts: ["localhost:8500"]


#------------------------------ Couchbase Module ------------------------------
- module: couchbase
  metricsets: ["bucket", "cluster", "node"]
  period: 10s
  hosts: ["localhost:8091"]
  enabled: true

#------------------------------- CouchDB Module -------------------------------
- module: couchdb
  metricsets: ["server"]
  period: 10s
  hosts: ["localhost:5984"]

#-------------------------------- Docker Module --------------------------------
- module: docker
  metricsets:
    - "container"
    - "cpu"
    - "diskio"
    - "event"
    - "healthcheck"
    - "info"
    #- "image"
    - "memory"
    - "network"
    #- "network_summary"
  hosts: ["unix:///var/run/docker.sock"]
  period: 10s
  enabled: true

  # If set to true, replace dots in labels with `_`.
  #labels.dedot: false

  # If set to true, collects metrics per core.
  #cpu.cores: true

  # To connect to Docker over TLS you must specify a client and CA certificate.
  #ssl:
    #certificate_authority: "/etc/pki/root/ca.pem"
    #certificate:           "/etc/pki/client/cert.pem"
    #key:                   "/etc/pki/client/cert.key"

#------------------------------ Dropwizard Module ------------------------------
- module: dropwizard
  metricsets: ["collector"]
  period: 10s
  hosts: ["localhost:8080"]
  metrics_path: /metrics/metrics
  namespace: example
  enabled: true

#---------------------------- Elasticsearch Module ----------------------------
- module: elasticsearch
  metricsets:
    - node
    - node_stats
    #- index
    #- index_recovery
    #- index_summary
    #- shard
    #- ml_job
  period: 10s
  hosts: ["http://localhost:9200"]
  #username: "elastic"
  #password: "changeme"
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]

  #index_recovery.active_only: true
  #xpack.enabled: false
  #scope: node

#------------------------------ Envoyproxy Module ------------------------------
- module: envoyproxy
  metricsets: ["server"]
  period: 10s
  hosts: ["localhost:9901"]

#--------------------------------- Etcd Module ---------------------------------
- module: etcd
  metricsets: ["leader", "self", "store"]
  period: 10s
  hosts: ["localhost:2379"]

#-------------------------------- Golang Module --------------------------------
- module: golang
  #metricsets:
  #  - expvar
  #  - heap
  period: 10s
  hosts: ["localhost:6060"]
  heap.path: "/debug/vars"
  expvar:
    namespace: "example"
    path: "/debug/vars"

#------------------------------- Graphite Module -------------------------------
- module: graphite
  metricsets: ["server"]
  enabled: true

  # Host address to listen on. Default localhost.
  #host: localhost

  # Listening port. Default 2003.
  #port: 2003

  # Protocol to listen on. This can be udp or tcp. Default udp.
  #protocol: "udp"

  # Receive buffer size in bytes
  #receive_buffer_size: 1024

  #templates:
  #  - filter: "test.*.bash.*" # This would match metrics like test.localhost.bash.stats
  #    namespace: "test"
  #    template: ".host.shell.metric*" # test.localhost.bash.stats would become metric=stats and tags host=localhost,shell=bash
  #    delimiter: "_"


#------------------------------- HAProxy Module -------------------------------
- module: haproxy
  metricsets: ["info", "stat"]
  period: 10s
  # TCP socket, UNIX socket, or HTTP address where HAProxy stats are reported
  # TCP socket
  hosts: ["tcp://127.0.0.1:14567"]
  # UNIX socket
  #hosts: ["unix:///path/to/haproxy.sock"]
  # Stats page
  #hosts: ["http://127.0.0.1:14567"]
  username : "admin"
  password : "admin"
  enabled: true

#--------------------------------- HTTP Module ---------------------------------
- module: http
  #metricsets:
  #  - json
  period: 10s
  hosts: ["localhost:80"]
  namespace: "json_namespace"
  path: "/"
  #body: ""
  #method: "GET"
  #username: "user"
  #password: "secret"
  #request.enabled: false
  #response.enabled: false
  #json.is_array: false
  #dedot.enabled: false

- module: http
  #metricsets:
  #  - server
  host: "localhost"
  port: "8080"
  enabled: false
  #paths:
  #  - path: "/foo"
  #    namespace: "foo"
  #    fields: # added to the the response in root. overwrites existing fields
  #      key: "value"

#------------------------------- Jolokia Module -------------------------------
- module: jolokia
  #metricsets: ["jmx"]
  period: 10s
  hosts: ["localhost"]
  namespace: "metrics"
  #path: "/jolokia/?ignoreErrors=true&canonicalNaming=false"
  #username: "user"
  #password: "secret"
  jmx.mappings:
    #- mbean: 'java.lang:type=Runtime'
    #  attributes:
    #    - attr: Uptime
    #      field: uptime
    #- mbean: 'java.lang:type=Memory'
    #  attributes:
    #    - attr: HeapMemoryUsage
    #      field: memory.heap_usage
    #    - attr: NonHeapMemoryUsage
    #      field: memory.non_heap_usage
    # GC Metrics - this depends on what is available on your JVM
    #- mbean: 'java.lang:type=GarbageCollector,name=ConcurrentMarkSweep'
    #  attributes:
    #    - attr: CollectionTime
    #      field: gc.cms_collection_time
    #    - attr: CollectionCount
    #      field: gc.cms_collection_count

  jmx.application:
  jmx.instance:

#-------------------------------- Kafka Module --------------------------------
# Kafka metrics collected using the Kafka protocol
- module: kafka
  #metricsets:
  #  - partition
  #  - consumergroup
  period: 10s
  hosts: ["localhost:9092"]

  #client_id: metricbeat
  #retries: 3
  #backoff: 250ms

  # List of Topics to query metadata for. If empty, all topics will be queried.
  #topics: []

  # Optional SSL. By default is off.
  # List of root certificates for HTTPS server verifications
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]

  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"

  # Client Certificate Key
  #ssl.key: "/etc/pki/client/cert.key"

  # Client Certificate Passphrase (in case your Client Certificate Key is encrypted)
  #ssl.key_passphrase: "yourKeyPassphrase"

  # SASL authentication
  #username: ""
  #password: ""

  # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512.
  # Defaults to PLAIN when `username` and `password` are configured.
  #sasl.mechanism: ''

# Metrics collected from a Kafka broker using Jolokia
#- module: kafka
#  metricsets:
#    - broker
#  period: 10s
#  hosts: ["localhost:8779"]

# Metrics collected from a Java Kafka consumer using Jolokia
#- module: kafka
#  metricsets:
#    - consumer
#  period: 10s
#  hosts: ["localhost:8774"]

# Metrics collected from a Java Kafka producer using Jolokia
#- module: kafka
#  metricsets:
#    - producer
#  period: 10s
#  hosts: ["localhost:8775"]

#-------------------------------- Kibana Module --------------------------------
- module: kibana
  metricsets: ["status"]
  period: 10s
  hosts: ["localhost:5601"]
  basepath: ""
  enabled: true

  # Set to true to send data collected by module to X-Pack
  # Monitoring instead of metricbeat-* indices.
  #xpack.enabled: false

#------------------------------ Kubernetes Module ------------------------------
# Node metrics, from kubelet:
- module: kubernetes
  metricsets:
    - container
    - node
    - pod
    - system
    - volume
  period: 10s
  enabled: true
  hosts: ["https://${NODE_NAME}:10250"]
  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
  ssl.verification_mode: "none"
  #ssl.certificate_authorities:
  #  - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt
  #ssl.certificate: "/etc/pki/client/cert.pem"
  #ssl.key: "/etc/pki/client/cert.key"

  # Enriching parameters:
  add_metadata: true
  # When used outside the cluster:
  #host: node_name
  # If kube_config is not set, KUBECONFIG environment variable will be checked
  # and if not present it will fall back to InCluster
  #kube_config: ~/.kube/config

# State metrics from kube-state-metrics service:
- module: kubernetes
  enabled: true
  metricsets:
    - state_node
    - state_daemonset
    - state_deployment
    - state_replicaset
    - state_statefulset
    - state_pod
    - state_container
    - state_job
    - state_cronjob
    - state_resourcequota
    - state_service
    - state_persistentvolume
    - state_persistentvolumeclaim
    - state_storageclass
    # Uncomment this to get k8s events:
    #- event  period: 10s
  hosts: ["kube-state-metrics:8080"]

  # Enriching parameters:
  add_metadata: true
  # When used outside the cluster:
  #host: node_name
  # If kube_config is not set, KUBECONFIG environment variable will be checked
  # and if not present it will fall back to InCluster
  #kube_config: ~/.kube/config

# Kubernetes API server
# (when running metricbeat as a deployment)
- module: kubernetes
  enabled: true
  metricsets:
    - apiserver
  hosts: ["https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}"]
  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
  ssl.certificate_authorities:
    - /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
  period: 30s



# Kubernetes proxy server
# (when running metricbeat locally at hosts or as a daemonset + host network)
- module: kubernetes
  enabled: true
  metricsets:
    - proxy
  hosts: ["localhost:10249"]
  period: 10s

# Kubernetes controller manager
# (URL and deployment method should be adapted to match the controller manager deployment / service / endpoint)
- module: kubernetes
  enabled: true
  metricsets:
    - controllermanager
  hosts: ["http://localhost:10252"]
  period: 10s

# Kubernetes scheduler
# (URL and deployment method should be adapted to match scheduler deployment / service / endpoint)
- module: kubernetes
  enabled: true
  metricsets:
    - scheduler
  hosts: ["localhost:10251"]
  period: 10s

#--------------------------------- KVM Module ---------------------------------
- module: kvm
  metricsets: ["dommemstat", "status"]
  enabled: true
  period: 10s
  hosts: ["unix:///var/run/libvirt/libvirt-sock"]
  # For remote hosts, setup network access in libvirtd.conf
  # and use the tcp scheme:
  # hosts: [ "tcp://<host>:16509" ]

  # Timeout to connect to Libvirt server
  #timeout: 1s

#-------------------------------- Linux Module --------------------------------
- module: linux
  period: 10s
  metricsets:
    - "pageinfo"
    - "memory"
    # - ksm
    # - conntrack
    # - iostat
    # - pressure
  enabled: true
  #hostfs: /hostfs


#------------------------------- Logstash Module -------------------------------
- module: logstash
  metricsets: ["node", "node_stats"]
  enabled: true
  period: 10s
  hosts: ["localhost:9600"]

#------------------------------ Memcached Module ------------------------------
- module: memcached
  metricsets: ["stats"]
  period: 10s
  hosts: ["localhost:11211"]
  enabled: true

#------------------------------- MongoDB Module -------------------------------
- module: mongodb
  metricsets: ["dbstats", "status", "collstats", "metrics", "replstatus"]
  period: 10s
  enabled: true

  # The hosts must be passed as MongoDB URLs in the format:
  # [mongodb://][user:pass@]host[:port].
  # The username and password can also be set using the respective configuration
  # options. The credentials in the URL take precedence over the username and
  # password configuration options.
  hosts: ["localhost:27017"]

  # Optional SSL. By default is off.
  #ssl.enabled: true

  # Mode of verification of server certificate ('none' or 'full')
  #ssl.verification_mode: 'full'

  # List of root certificates for TLS server verifications
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]

  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"

  # Client Certificate Key
  #ssl.key: "/etc/pki/client/cert.key"

  # Username to use when connecting to MongoDB. Empty by default.
  #username: user

  # Password to use when connecting to MongoDB. Empty by default.
  #password: pass

#-------------------------------- Munin Module --------------------------------
- module: munin
  metricsets: ["node"]
  enabled: true
  period: 10s
  hosts: ["localhost:4949"]

  # List of plugins to collect metrics from, by default it collects from
  # all the available ones.
  #munin.plugins: []

  # If set to true, it sanitizes fields names in concordance with munin
  # implementation (all characters that are not alphanumeric, or underscore
  # are replaced by underscores).
  #munin.sanitize: false

#-------------------------------- MySQL Module --------------------------------
- module: mysql
  metricsets:
    - status
  #  - galera_status
  #  - performance
  #  - query
  period: 10s

  # Host DSN should be defined as "user:pass@tcp(127.0.0.1:3306)/"
  # or "unix(/var/lib/mysql/mysql.sock)/",
  # or another DSN format supported by <https://github.com/Go-SQL-Driver/MySQL/>.
  # The username and password can either be set in the DSN or using the username
  # and password config options. Those specified in the DSN take precedence.
  hosts: ["root:secret@tcp(127.0.0.1:3306)/"]

  # Username of hosts. Empty by default.
  #username: root

  # Password of hosts. Empty by default.
  #password: secret

  # By setting raw to true, all raw fields from the status metricset will be added to the event.
  #raw: false

#--------------------------------- NATS Module ---------------------------------
- module: nats
  metricsets:
    - "connections"
    - "routes"
    - "stats"
    - "subscriptions"
    #- "connection"
    #- "route"
  period: 10s
  hosts: ["localhost:8222"]
  #stats.metrics_path: "/varz"
  #connections.metrics_path: "/connz"
  #routes.metrics_path: "/routez"
  #subscriptions.metrics_path: "/subsz"
  #connection.metrics_path: "/connz"
  #route.metrics_path: "/routez"

#-------------------------------- Nginx Module --------------------------------
- module: nginx
  metricsets: ["stubstatus"]
  enabled: true
  period: 10s

  # Nginx hosts
  hosts: ["http://127.0.0.1"]

  # Path to server status. Default nginx_status
  server_status_path: "nginx_status"

#----------------------------- Openmetrics Module -----------------------------
- module: openmetrics
  metricsets: ['collector']
  period: 10s
  hosts: ['localhost:9090']

  # This module uses the Prometheus collector metricset, all
  # the options for this metricset are also available here.
  metrics_path: /metrics
  metrics_filters:
    include: []
    exclude: []

#------------------------------- PHP_FPM Module -------------------------------
- module: php_fpm
  metricsets:
  - pool
  #- process
  enabled: true
  period: 10s
  status_path: "/status"
  hosts: ["localhost:8080"]

#------------------------------ PostgreSQL Module ------------------------------
- module: postgresql
  enabled: true
  metricsets:
    # Stats about every PostgreSQL database
    - database

    # Stats about the background writer process's activity
    - bgwriter

    # Stats about every PostgreSQL process
    - activity

    # Stats about every statement executed in the server. It requires the
    # `pg_stats_statement` library to be configured in the server.
    #- statement

  period: 10s

  # The host must be passed as PostgreSQL URL. Example:
  # postgres://localhost:5432?sslmode=disable
  # The available parameters are documented here:
  # https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters
  hosts: ["postgres://localhost:5432"]

  # Username to use when connecting to PostgreSQL. Empty by default.
  #username: user

  # Password to use when connecting to PostgreSQL. Empty by default.
  #password: pass

#------------------------------ Prometheus Module ------------------------------
# Metrics collected from a Prometheus endpoint
- module: prometheus
  period: 10s
  metricsets: ["collector"]
  hosts: ["localhost:9090"]
  metrics_path: /metrics
  #metrics_filters:
  #  include: []
  #  exclude: []
  #username: "user"
  #password: "secret"

  # This can be used for service account based authorization:
  #bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
  #ssl.certificate_authorities:
  #  - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt


# Metrics sent by a Prometheus server using remote_write option
#- module: prometheus
#  metricsets: ["remote_write"]
#  host: "localhost"
#  port: "9201"

  # Secure settings for the server using TLS/SSL:
  #ssl.certificate: "/etc/pki/server/cert.pem"
  #ssl.key: "/etc/pki/server/cert.key"

# Metrics that will be collected using a PromQL
#- module: prometheus
#  metricsets: ["query"]
#  hosts: ["localhost:9090"]
#  period: 10s
#  queries:
#  - name: "instant_vector"
#    path: "/api/v1/query"
#    params:
#      query: "sum(rate(prometheus_http_requests_total[1m]))"
#  - name: "range_vector"
#    path: "/api/v1/query_range"
#    params:
#      query: "up"
#      start: "2019-12-20T00:00:00.000Z"
#      end:  "2019-12-21T00:00:00.000Z"
#      step: 1h
#  - name: "scalar"
#    path: "/api/v1/query"
#    params:
#      query: "100"
#  - name: "string"
#    path: "/api/v1/query"
#    params:
#      query: "some_value"

#------------------------------- RabbitMQ Module -------------------------------
- module: rabbitmq
  metricsets: ["node", "queue", "connection"]
  enabled: true
  period: 10s
  hosts: ["localhost:15672"]

  # Management path prefix, if `management.path_prefix` is set in RabbitMQ
  # configuration, it has to be set to the same value.
  #management_path_prefix: ""

  #username: guest
  #password: guest

#-------------------------------- Redis Module --------------------------------
- module: redis
  metricsets: ["info", "keyspace"]
  enabled: true
  period: 10s

  # Redis hosts
  hosts: ["127.0.0.1:6379"]

  # Timeout after which time a metricset should return an error
  # Timeout is by default defined as period, as a fetch of a metricset
  # should never take longer then period, as otherwise calls can pile up.
  #timeout: 1s

  # Optional fields to be added to each event
  #fields:
  #  datacenter: west

  # Network type to be used for redis connection. Default: tcp
  #network: tcp

  # Max number of concurrent connections. Default: 10
  #maxconn: 10

  # Filters can be used to reduce the number of fields sent.
  #processors:
  #  - include_fields:
  #      fields: ["beat", "metricset", "redis.info.stats"]

  # Redis AUTH password. Empty by default.
  #password: foobared

#------------------------------- Traefik Module -------------------------------
- module: traefik
  metricsets: ["health"]
  period: 10s
  hosts: ["localhost:8080"]

#-------------------------------- UWSGI Module --------------------------------
- module: uwsgi
  metricsets: ["status"]
  enable: true
  period: 10s
  hosts: ["tcp://127.0.0.1:9191"]

#------------------------------- VSphere Module -------------------------------
- module: vsphere
  enabled: true
  metricsets: ["datastore", "host", "virtualmachine"]
  period: 10s
  hosts: ["https://localhost/sdk"]

  username: "user"
  password: "password"
  # If insecure is true, don't verify the server's certificate chain
  insecure: false
  # Get custom fields when using virtualmachine metric set. Default false.
  # get_custom_fields: false

#------------------------------- Windows Module -------------------------------
- module: windows
  metricsets: ["perfmon"]
  enabled: true
  period: 10s
  perfmon.ignore_non_existent_counters: false
  perfmon.group_measurements_by_instance: false
  perfmon.queries:
#  - object: 'Process'
#    instance: ["*"]
#    counters:
#    - name: '% Processor Time'
#      field: cpu_usage
#      format: "float"
#    - name: "Thread Count"

- module: windows
  metricsets: ["service"]
  enabled: true
  period: 60s

#------------------------------ ZooKeeper Module ------------------------------
- module: zookeeper
  enabled: true
  metricsets: ["mntr", "server"]
  period: 10s
  hosts: ["localhost:2181"]




# ================================== General ===================================

# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
# If this options is not defined, the hostname is used.
#name:

# The tags of the shipper are included in their own field with each
# transaction published. Tags make it easy to group servers by different
# logical properties.
#tags: ["service-X", "web-tier"]

# Optional fields that you can specify to add additional information to the
# output. Fields can be scalar values, arrays, dictionaries, or any nested
# combination of these.
#fields:
#  env: staging

# If this option is set to true, the custom fields are stored as top-level
# fields in the output document instead of being grouped under a fields
# sub-dictionary. Default is false.
#fields_under_root: false

# Internal queue configuration for buffering events to be published.
#queue:
  # Queue type by name (default 'mem')
  # The memory queue will present all available events (up to the outputs
  # bulk_max_size) to the output, the moment the output is ready to server
  # another batch of events.
  #mem:
    # Max number of events the queue can buffer.
    #events: 4096

    # Hints the minimum number of events stored in the queue,
    # before providing a batch of events to the outputs.
    # The default value is set to 2048.
    # A value of 0 ensures events are immediately available
    # to be sent to the outputs.
    #flush.min_events: 2048

    # Maximum duration after which events are available to the outputs,
    # if the number of events stored in the queue is < `flush.min_events`.
    #flush.timeout: 1s

  # The disk queue stores incoming events on disk until the output is
  # ready for them. This allows a higher event limit than the memory-only
  # queue and lets pending events persist through a restart.
  #disk:
    # The directory path to store the queue's data.
    #path: "${path.data}/diskqueue"

    # The maximum space the queue should occupy on disk. Depending on
    # input settings, events that exceed this limit are delayed or discarded.
    #max_size: 10GB

    # The maximum size of a single queue data file. Data in the queue is
    # stored in smaller segments that are deleted after all their events
    # have been processed.
    #segment_size: 1GB

    # The number of events to read from disk to memory while waiting for
    # the output to request them.
    #read_ahead: 512

    # The number of events to accept from inputs while waiting for them
    # to be written to disk. If event data arrives faster than it
    # can be written to disk, this setting prevents it from overflowing
    # main memory.
    #write_ahead: 2048

    # The duration to wait before retrying when the queue encounters a disk
    # write error.
    #retry_interval: 1s

    # The maximum length of time to wait before retrying on a disk write
    # error. If the queue encounters repeated errors, it will double the
    # length of its retry interval each time, up to this maximum.
    #max_retry_interval: 30s

  # The spool queue will store events in a local spool file, before
  # forwarding the events to the outputs.
  # Note: the spool queue is deprecated and will be removed in the future.
  # Use the disk queue instead.
  #
  # The spool file is a circular buffer, which blocks once the file/buffer is full.
  # Events are put into a write buffer and flushed once the write buffer
  # is full or the flush_timeout is triggered.
  # Once ACKed by the output, events are removed immediately from the queue,
  # making space for new events to be persisted.
  #spool:
    # The file namespace configures the file path and the file creation settings.
    # Once the file exists, the `size`, `page_size` and `prealloc` settings
    # will have no more effect.
    #file:
      # Location of spool file. The default value is ${path.data}/spool.dat.
      #path: "${path.data}/spool.dat"

      # Configure file permissions if file is created. The default value is 0600.
      #permissions: 0600

      # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB.
      #size: 100MiB

      # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB.
      #page_size: 4KiB

      # If prealloc is set, the required space for the file is reserved using
      # truncate. The default value is true.
      #prealloc: true

    # Spool writer settings
    # Events are serialized into a write buffer. The write buffer is flushed if:
    # - The buffer limit has been reached.
    # - The configured limit of buffered events is reached.
    # - The flush timeout is triggered.
    #write:
      # Sets the write buffer size.
      #buffer_size: 1MiB

      # Maximum duration after which events are flushed if the write buffer
      # is not full yet. The default value is 1s.
      #flush.timeout: 1s

      # Number of maximum buffered events. The write buffer is flushed once the
      # limit is reached.
      #flush.events: 16384

      # Configure the on-disk event encoding. The encoding can be changed
      # between restarts.
      # Valid encodings are: json, ubjson, and cbor.
      #codec: cbor
    #read:
      # Reader flush timeout, waiting for more events to become available, so
      # to fill a complete batch as required by the outputs.
      # If flush_timeout is 0, all available events are forwarded to the
      # outputs immediately.
      # The default value is 0s.
      #flush.timeout: 0s

# Sets the maximum number of CPUs that can be executing simultaneously. The
# default is the number of logical CPUs available in the system.
#max_procs:

# ================================= Processors =================================

# Processors are used to reduce the number of fields in the exported event or to
# enhance the event with external metadata. This section defines a list of
# processors that are applied one by one and the first one receives the initial
# event:
#
#   event -> filter1 -> event1 -> filter2 ->event2 ...
#
# The supported processors are drop_fields, drop_event, include_fields,
# decode_json_fields, and add_cloud_metadata.
#
# For example, you can use the following processors to keep the fields that
# contain CPU load percentages, but remove the fields that contain CPU ticks
# values:
#
#processors:
#  - include_fields:
#      fields: ["cpu"]
#  - drop_fields:
#      fields: ["cpu.user", "cpu.system"]
#
# The following example drops the events that have the HTTP response code 200:
#
#processors:
#  - drop_event:
#      when:
#        equals:
#          http.code: 200
#
# The following example renames the field a to b:
#
#processors:
#  - rename:
#      fields:
#        - from: "a"
#          to: "b"
#
# The following example tokenizes the string into fields:
#
#processors:
#  - dissect:
#      tokenizer: "%{key1} - %{key2}"
#      field: "message"
#      target_prefix: "dissect"
#
# The following example enriches each event with metadata from the cloud
# provider about the host machine. It works on EC2, GCE, DigitalOcean,
# Tencent Cloud, and Alibaba Cloud.
#
#processors:
#  - add_cloud_metadata: ~
#
# The following example enriches each event with the machine's local time zone
# offset from UTC.
#
#processors:
#  - add_locale:
#      format: offset
#
# The following example enriches each event with docker metadata, it matches
# given fields to an existing container id and adds info from that container:
#
#processors:
#  - add_docker_metadata:
#      host: "unix:///var/run/docker.sock"
#      match_fields: ["system.process.cgroup.id"]
#      match_pids: ["process.pid", "process.ppid"]
#      match_source: true
#      match_source_index: 4
#      match_short_id: false
#      cleanup_timeout: 60
#      labels.dedot: false
#      # To connect to Docker over TLS you must specify a client and CA certificate.
#      #ssl:
#      #  certificate_authority: "/etc/pki/root/ca.pem"
#      #  certificate:           "/etc/pki/client/cert.pem"
#      #  key:                   "/etc/pki/client/cert.key"
#
# The following example enriches each event with docker metadata, it matches
# container id from log path available in `source` field (by default it expects
# it to be /var/lib/docker/containers/*/*.log).
#
#processors:
#  - add_docker_metadata: ~
#
# The following example enriches each event with host metadata.
#
#processors:
#  - add_host_metadata: ~
#
# The following example enriches each event with process metadata using
# process IDs included in the event.
#
#processors:
#  - add_process_metadata:
#      match_pids: ["system.process.ppid"]
#      target: system.process.parent
#
# The following example decodes fields containing JSON strings
# and replaces the strings with valid JSON objects.
#
#processors:
#  - decode_json_fields:
#      fields: ["field1", "field2", ...]
#      process_array: false
#      max_depth: 1
#      target: ""
#      overwrite_keys: false
#
#processors:
#  - decompress_gzip_field:
#      from: "field1"
#      to: "field2"
#      ignore_missing: false
#      fail_on_error: true
#
# The following example copies the value of message to message_copied
#
#processors:
#  - copy_fields:
#      fields:
#        - from: message
#          to: message_copied
#      fail_on_error: true
#      ignore_missing: false
#
# The following example truncates the value of message to 1024 bytes
#
#processors:
#  - truncate_fields:
#      fields:
#        - message
#      max_bytes: 1024
#      fail_on_error: false
#      ignore_missing: true
#
# The following example preserves the raw message under event.original
#
#processors:
#  - copy_fields:
#      fields:
#        - from: message
#          to: event.original
#      fail_on_error: false
#      ignore_missing: true
#  - truncate_fields:
#      fields:
#        - event.original
#      max_bytes: 1024
#      fail_on_error: false
#      ignore_missing: true
#
# The following example URL-decodes the value of field1 to field2
#
#processors:
#  - urldecode:
#      fields:
#        - from: "field1"
#          to: "field2"
#      ignore_missing: false
#      fail_on_error: true

# =============================== Elastic Cloud ================================

# These settings simplify using Metricbeat with the Elastic Cloud (https://cloud.elastic.co/).

# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:

# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:

# ================================== Outputs ===================================

# Configure what output to use when sending the data collected by the beat.

# ---------------------------- Elasticsearch Output ----------------------------
output.elasticsearch:
  # Boolean flag to enable or disable the output module.
  #enabled: true

  # Array of hosts to connect to.
  # Scheme and port can be left out and will be set to the default (http and 9200)
  # In case you specify and additional path, the scheme is required: http://localhost:9200/path
  # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
  hosts: ["localhost:9200"]

  # Set gzip compression level.
  #compression_level: 0

  # Configure escaping HTML symbols in strings.
  #escape_html: false

  # Protocol - either `http` (default) or `https`.
  #protocol: "https"

  # Authentication credentials - either API key or username/password.
  #api_key: "id:api_key"
  #username: "elastic"
  #password: "changeme"

  # Dictionary of HTTP parameters to pass within the URL with index operations.
  #parameters:
    #param1: value1
    #param2: value2

  # Number of workers per Elasticsearch host.
  #worker: 1

  # Optional index name. The default is "metricbeat" plus date
  # and generates [metricbeat-]YYYY.MM.DD keys.
  # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly.
  #index: "metricbeat-%{[agent.version]}-%{+yyyy.MM.dd}"

  # Optional ingest pipeline. By default no pipeline will be used.
  #pipeline: ""

  # Optional HTTP path
  #path: "/elasticsearch"

  # Custom HTTP headers to add to each request
  #headers:
  #  X-My-Header: Contents of the header

  # Proxy server URL
  #proxy_url: http://proxy:3128

  # Whether to disable proxy settings for outgoing connections. If true, this
  # takes precedence over both the proxy_url field and any environment settings
  # (HTTP_PROXY, HTTPS_PROXY). The default is false.
  #proxy_disable: false

  # The number of times a particular Elasticsearch index operation is attempted. If
  # the indexing operation doesn't succeed after this many retries, the events are
  # dropped. The default is 3.
  #max_retries: 3

  # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
  # The default is 50.
  #bulk_max_size: 50

  # The number of seconds to wait before trying to reconnect to Elasticsearch
  # after a network error. After waiting backoff.init seconds, the Beat
  # tries to reconnect. If the attempt fails, the backoff timer is increased
  # exponentially up to backoff.max. After a successful connection, the backoff
  # timer is reset. The default is 1s.
  #backoff.init: 1s

  # The maximum number of seconds to wait before attempting to connect to
  # Elasticsearch after a network error. The default is 60s.
  #backoff.max: 60s

  # Configure HTTP request timeout before failing a request to Elasticsearch.
  #timeout: 90

  # Use SSL settings for HTTPS.
  #ssl.enabled: true

  # Controls the verification of certificates. Valid values are:
  # * full, which verifies that the provided certificate is signed by a trusted
  # authority (CA) and also verifies that the server's hostname (or IP address)
  # matches the names identified within the certificate.
  # * strict, which verifies that the provided certificate is signed by a trusted
  # authority (CA) and also verifies that the server's hostname (or IP address)
  # matches the names identified within the certificate. If the Subject Alternative
  # Name is empty, it returns an error.
  # * certificate, which verifies that the provided certificate is signed by a
  # trusted authority (CA), but does not perform any hostname verification.
  #  * none, which performs no verification of the server's certificate. This
  # mode disables many of the security benefits of SSL/TLS and should only be used
  # after very careful consideration. It is primarily intended as a temporary
  # diagnostic mechanism when attempting to resolve TLS errors; its use in
  # production environments is strongly discouraged.
  # The default value is full.
  #ssl.verification_mode: full

  # List of supported/valid TLS versions. By default all TLS versions from 1.1
  # up to 1.3 are enabled.
  #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]

  # List of root certificates for HTTPS server verifications
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]

  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"

  # Client certificate key
  #ssl.key: "/etc/pki/client/cert.key"

  # Optional passphrase for decrypting the certificate key.
  #ssl.key_passphrase: ''

  # Configure cipher suites to be used for SSL connections
  #ssl.cipher_suites: []

  # Configure curve types for ECDHE-based cipher suites
  #ssl.curve_types: []

  # Configure what types of renegotiation are supported. Valid options are
  # never, once, and freely. Default is never.
  #ssl.renegotiation: never

  # Configure a pin that can be used to do extra validation of the verified certificate chain,
  # this allow you to ensure that a specific certificate is used to validate the chain of trust.
  #
  # The pin is a base64 encoded string of the SHA-256 fingerprint.
  #ssl.ca_sha256: ""

  # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
  #kerberos.enabled: true

  # Authentication type to use with Kerberos. Available options: keytab, password.
  #kerberos.auth_type: password

  # Path to the keytab file. It is used when auth_type is set to keytab.
  #kerberos.keytab: /etc/elastic.keytab

  # Path to the Kerberos configuration.
  #kerberos.config_path: /etc/krb5.conf

  # Name of the Kerberos user.
  #kerberos.username: elastic

  # Password of the Kerberos user. It is used when auth_type is set to password.
  #kerberos.password: changeme

  # Kerberos realm.
  #kerberos.realm: ELASTIC


# ------------------------------ Logstash Output -------------------------------
#output.logstash:
  # Boolean flag to enable or disable the output module.
  #enabled: true

  # The Logstash hosts
  #hosts: ["localhost:5044"]

  # Number of workers per Logstash host.
  #worker: 1

  # Set gzip compression level.
  #compression_level: 3

  # Configure escaping HTML symbols in strings.
  #escape_html: false

  # Optional maximum time to live for a connection to Logstash, after which the
  # connection will be re-established.  A value of `0s` (the default) will
  # disable this feature.
  #
  # Not yet supported for async connections (i.e. with the "pipelining" option set)
  #ttl: 30s

  # Optionally load-balance events between Logstash hosts. Default is false.
  #loadbalance: false

  # Number of batches to be sent asynchronously to Logstash while processing
  # new batches.
  #pipelining: 2

  # If enabled only a subset of events in a batch of events is transferred per
  # transaction.  The number of events to be sent increases up to `bulk_max_size`
  # if no error is encountered.
  #slow_start: false

  # The number of seconds to wait before trying to reconnect to Logstash
  # after a network error. After waiting backoff.init seconds, the Beat
  # tries to reconnect. If the attempt fails, the backoff timer is increased
  # exponentially up to backoff.max. After a successful connection, the backoff
  # timer is reset. The default is 1s.
  #backoff.init: 1s

  # The maximum number of seconds to wait before attempting to connect to
  # Logstash after a network error. The default is 60s.
  #backoff.max: 60s

  # Optional index name. The default index name is set to metricbeat
  # in all lowercase.
  #index: 'metricbeat'

  # SOCKS5 proxy server URL
  #proxy_url: socks5://user:password@socks5-server:2233

  # Resolve names locally when using a proxy server. Defaults to false.
  #proxy_use_local_resolver: false

  # Use SSL settings for HTTPS.
  #ssl.enabled: true

  # Controls the verification of certificates. Valid values are:
  # * full, which verifies that the provided certificate is signed by a trusted
  # authority (CA) and also verifies that the server's hostname (or IP address)
  # matches the names identified within the certificate.
  # * strict, which verifies that the provided certificate is signed by a trusted
  # authority (CA) and also verifies that the server's hostname (or IP address)
  # matches the names identified within the certificate. If the Subject Alternative
  # Name is empty, it returns an error.
  # * certificate, which verifies that the provided certificate is signed by a
  # trusted authority (CA), but does not perform any hostname verification.
  #  * none, which performs no verification of the server's certificate. This
  # mode disables many of the security benefits of SSL/TLS and should only be used
  # after very careful consideration. It is primarily intended as a temporary
  # diagnostic mechanism when attempting to resolve TLS errors; its use in
  # production environments is strongly discouraged.
  # The default value is full.
  #ssl.verification_mode: full

  # List of supported/valid TLS versions. By default all TLS versions from 1.1
  # up to 1.3 are enabled.
  #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]

  # List of root certificates for HTTPS server verifications
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]

  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"

  # Client certificate key
  #ssl.key: "/etc/pki/client/cert.key"

  # Optional passphrase for decrypting the certificate key.
  #ssl.key_passphrase: ''

  # Configure cipher suites to be used for SSL connections
  #ssl.cipher_suites: []

  # Configure curve types for ECDHE-based cipher suites
  #ssl.curve_types: []

  # Configure what types of renegotiation are supported. Valid options are
  # never, once, and freely. Default is never.
  #ssl.renegotiation: never

  # Configure a pin that can be used to do extra validation of the verified certificate chain,
  # this allow you to ensure that a specific certificate is used to validate the chain of trust.
  #
  # The pin is a base64 encoded string of the SHA-256 fingerprint.
  #ssl.ca_sha256: ""

  # The number of times to retry publishing an event after a publishing failure.
  # After the specified number of retries, the events are typically dropped.
  # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting
  # and retry until all events are published.  Set max_retries to a value less
  # than 0 to retry until all events are published. The default is 3.
  #max_retries: 3

  # The maximum number of events to bulk in a single Logstash request. The
  # default is 2048.
  #bulk_max_size: 2048

  # The number of seconds to wait for responses from the Logstash server before
  # timing out. The default is 30s.
  #timeout: 30s

# -------------------------------- Kafka Output --------------------------------
#output.kafka:
  # Boolean flag to enable or disable the output module.
  #enabled: true

  # The list of Kafka broker addresses from which to fetch the cluster metadata.
  # The cluster metadata contain the actual Kafka brokers events are published
  # to.
  #hosts: ["localhost:9092"]

  # The Kafka topic used for produced events. The setting can be a format string
  # using any event field. To set the topic from document type use `%{[type]}`.
  #topic: beats

  # The Kafka event key setting. Use format string to create a unique event key.
  # By default no event key will be generated.
  #key: ''

  # The Kafka event partitioning strategy. Default hashing strategy is `hash`
  # using the `output.kafka.key` setting or randomly distributes events if
  # `output.kafka.key` is not configured.
  #partition.hash:
    # If enabled, events will only be published to partitions with reachable
    # leaders. Default is false.
    #reachable_only: false

    # Configure alternative event field names used to compute the hash value.
    # If empty `output.kafka.key` setting will be used.
    # Default value is empty list.
    #hash: []

  # Authentication details. Password is required if username is set.
  #username: ''
  #password: ''

  # SASL authentication mechanism used. Can be one of PLAIN, SCRAM-SHA-256 or SCRAM-SHA-512.
  # Defaults to PLAIN when `username` and `password` are configured.
  #sasl.mechanism: ''

  # Kafka version Metricbeat is assumed to run against. Defaults to the "1.0.0".
  #version: '1.0.0'

  # Configure JSON encoding
  #codec.json:
    # Pretty-print JSON event
    #pretty: false

    # Configure escaping HTML symbols in strings.
    #escape_html: false

  # Metadata update configuration. Metadata contains leader information
  # used to decide which broker to use when publishing.
  #metadata:
    # Max metadata request retry attempts when cluster is in middle of leader
    # election. Defaults to 3 retries.
    #retry.max: 3

    # Wait time between retries during leader elections. Default is 250ms.
    #retry.backoff: 250ms

    # Refresh metadata interval. Defaults to every 10 minutes.
    #refresh_frequency: 10m

    # Strategy for fetching the topics metadata from the broker. Default is false.
    #full: false

  # The number of concurrent load-balanced Kafka output workers.
  #worker: 1

  # The number of times to retry publishing an event after a publishing failure.
  # After the specified number of retries, events are typically dropped.
  # Some Beats, such as Filebeat, ignore the max_retries setting and retry until
  # all events are published.  Set max_retries to a value less than 0 to retry
  # until all events are published. The default is 3.
  #max_retries: 3

  # The number of seconds to wait before trying to republish to Kafka
  # after a network error. After waiting backoff.init seconds, the Beat
  # tries to republish. If the attempt fails, the backoff timer is increased
  # exponentially up to backoff.max. After a successful publish, the backoff
  # timer is reset. The default is 1s.
  #backoff.init: 1s

  # The maximum number of seconds to wait before attempting to republish to
  # Kafka after a network error. The default is 60s.
  #backoff.max: 60s

  # The maximum number of events to bulk in a single Kafka request. The default
  # is 2048.
  #bulk_max_size: 2048

  # Duration to wait before sending bulk Kafka request. 0 is no delay. The default
  # is 0.
  #bulk_flush_frequency: 0s

  # The number of seconds to wait for responses from the Kafka brokers before
  # timing out. The default is 30s.
  #timeout: 30s

  # The maximum duration a broker will wait for number of required ACKs. The
  # default is 10s.
  #broker_timeout: 10s

  # The number of messages buffered for each Kafka broker. The default is 256.
  #channel_buffer_size: 256

  # The keep-alive period for an active network connection. If 0s, keep-alives
  # are disabled. The default is 0 seconds.
  #keep_alive: 0

  # Sets the output compression codec. Must be one of none, snappy and gzip. The
  # default is gzip.
  #compression: gzip

  # Set the compression level. Currently only gzip provides a compression level
  # between 0 and 9. The default value is chosen by the compression algorithm.
  #compression_level: 4

  # The maximum permitted size of JSON-encoded messages. Bigger messages will be
  # dropped. The default value is 1000000 (bytes). This value should be equal to
  # or less than the broker's message.max.bytes.
  #max_message_bytes: 1000000

  # The ACK reliability level required from broker. 0=no response, 1=wait for
  # local commit, -1=wait for all replicas to commit. The default is 1.  Note:
  # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
  # on error.
  #required_acks: 1

  # The configurable ClientID used for logging, debugging, and auditing
  # purposes.  The default is "beats".
  #client_id: beats

  # Use SSL settings for HTTPS.
  #ssl.enabled: true

  # Controls the verification of certificates. Valid values are:
  # * full, which verifies that the provided certificate is signed by a trusted
  # authority (CA) and also verifies that the server's hostname (or IP address)
  # matches the names identified within the certificate.
  # * strict, which verifies that the provided certificate is signed by a trusted
  # authority (CA) and also verifies that the server's hostname (or IP address)
  # matches the names identified within the certificate. If the Subject Alternative
  # Name is empty, it returns an error.
  # * certificate, which verifies that the provided certificate is signed by a
  # trusted authority (CA), but does not perform any hostname verification.
  #  * none, which performs no verification of the server's certificate. This
  # mode disables many of the security benefits of SSL/TLS and should only be used
  # after very careful consideration. It is primarily intended as a temporary
  # diagnostic mechanism when attempting to resolve TLS errors; its use in
  # production environments is strongly discouraged.
  # The default value is full.
  #ssl.verification_mode: full

  # List of supported/valid TLS versions. By default all TLS versions from 1.1
  # up to 1.3 are enabled.
  #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]

  # List of root certificates for HTTPS server verifications
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]

  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"

  # Client certificate key
  #ssl.key: "/etc/pki/client/cert.key"

  # Optional passphrase for decrypting the certificate key.
  #ssl.key_passphrase: ''

  # Configure cipher suites to be used for SSL connections
  #ssl.cipher_suites: []

  # Configure curve types for ECDHE-based cipher suites
  #ssl.curve_types: []

  # Configure what types of renegotiation are supported. Valid options are
  # never, once, and freely. Default is never.
  #ssl.renegotiation: never

  # Configure a pin that can be used to do extra validation of the verified certificate chain,
  # this allow you to ensure that a specific certificate is used to validate the chain of trust.
  #
  # The pin is a base64 encoded string of the SHA-256 fingerprint.
  #ssl.ca_sha256: ""

  # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
  #kerberos.enabled: true

  # Authentication type to use with Kerberos. Available options: keytab, password.
  #kerberos.auth_type: password

  # Path to the keytab file. It is used when auth_type is set to keytab.
  #kerberos.keytab: /etc/security/keytabs/kafka.keytab

  # Path to the Kerberos configuration.
  #kerberos.config_path: /etc/krb5.conf

  # The service name. Service principal name is contructed from
  # service_name/hostname@realm.
  #kerberos.service_name: kafka

  # Name of the Kerberos user.
  #kerberos.username: elastic

  # Password of the Kerberos user. It is used when auth_type is set to password.
  #kerberos.password: changeme

  # Kerberos realm.
  #kerberos.realm: ELASTIC

  # Enables Kerberos FAST authentication. This may
  # conflict with certain Active Directory configurations.
  #kerberos.enable_krb5_fast: false

# -------------------------------- Redis Output --------------------------------
#output.redis:
  # Boolean flag to enable or disable the output module.
  #enabled: true

  # Configure JSON encoding
  #codec.json:
    # Pretty print json event
    #pretty: false

    # Configure escaping HTML symbols in strings.
    #escape_html: false

  # The list of Redis servers to connect to. If load-balancing is enabled, the
  # events are distributed to the servers in the list. If one server becomes
  # unreachable, the events are distributed to the reachable servers only.
  # The hosts setting supports redis and rediss urls with custom password like
  # redis://:password@localhost:6379.
  #hosts: ["localhost:6379"]

  # The name of the Redis list or channel the events are published to. The
  # default is metricbeat.
  #key: metricbeat

  # The password to authenticate to Redis with. The default is no authentication.
  #password:

  # The Redis database number where the events are published. The default is 0.
  #db: 0

  # The Redis data type to use for publishing events. If the data type is list,
  # the Redis RPUSH command is used. If the data type is channel, the Redis
  # PUBLISH command is used. The default value is list.
  #datatype: list

  # The number of workers to use for each host configured to publish events to
  # Redis. Use this setting along with the loadbalance option. For example, if
  # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each
  # host).
  #worker: 1

  # If set to true and multiple hosts or workers are configured, the output
  # plugin load balances published events onto all Redis hosts. If set to false,
  # the output plugin sends all events to only one host (determined at random)
  # and will switch to another host if the currently selected one becomes
  # unreachable. The default value is true.
  #loadbalance: true

  # The Redis connection timeout in seconds. The default is 5 seconds.
  #timeout: 5s

  # The number of times to retry publishing an event after a publishing failure.
  # After the specified number of retries, the events are typically dropped.
  # Some Beats, such as Filebeat, ignore the max_retries setting and retry until
  # all events are published. Set max_retries to a value less than 0 to retry
  # until all events are published. The default is 3.
  #max_retries: 3

  # The number of seconds to wait before trying to reconnect to Redis
  # after a network error. After waiting backoff.init seconds, the Beat
  # tries to reconnect. If the attempt fails, the backoff timer is increased
  # exponentially up to backoff.max. After a successful connection, the backoff
  # timer is reset. The default is 1s.
  #backoff.init: 1s

  # The maximum number of seconds to wait before attempting to connect to
  # Redis after a network error. The default is 60s.
  #backoff.max: 60s

  # The maximum number of events to bulk in a single Redis request or pipeline.
  # The default is 2048.
  #bulk_max_size: 2048

  # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The
  # value must be a URL with a scheme of socks5://.
  #proxy_url:

  # This option determines whether Redis hostnames are resolved locally when
  # using a proxy. The default value is false, which means that name resolution
  # occurs on the proxy server.
  #proxy_use_local_resolver: false

  # Use SSL settings for HTTPS.
  #ssl.enabled: true

  # Controls the verification of certificates. Valid values are:
  # * full, which verifies that the provided certificate is signed by a trusted
  # authority (CA) and also verifies that the server's hostname (or IP address)
  # matches the names identified within the certificate.
  # * strict, which verifies that the provided certificate is signed by a trusted
  # authority (CA) and also verifies that the server's hostname (or IP address)
  # matches the names identified within the certificate. If the Subject Alternative
  # Name is empty, it returns an error.
  # * certificate, which verifies that the provided certificate is signed by a
  # trusted authority (CA), but does not perform any hostname verification.
  #  * none, which performs no verification of the server's certificate. This
  # mode disables many of the security benefits of SSL/TLS and should only be used
  # after very careful consideration. It is primarily intended as a temporary
  # diagnostic mechanism when attempting to resolve TLS errors; its use in
  # production environments is strongly discouraged.
  # The default value is full.
  #ssl.verification_mode: full

  # List of supported/valid TLS versions. By default all TLS versions from 1.1
  # up to 1.3 are enabled.
  #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]

  # List of root certificates for HTTPS server verifications
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]

  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"

  # Client certificate key
  #ssl.key: "/etc/pki/client/cert.key"

  # Optional passphrase for decrypting the certificate key.
  #ssl.key_passphrase: ''

  # Configure cipher suites to be used for SSL connections
  #ssl.cipher_suites: []

  # Configure curve types for ECDHE-based cipher suites
  #ssl.curve_types: []

  # Configure what types of renegotiation are supported. Valid options are
  # never, once, and freely. Default is never.
  #ssl.renegotiation: never

  # Configure a pin that can be used to do extra validation of the verified certificate chain,
  # this allow you to ensure that a specific certificate is used to validate the chain of trust.
  #
  # The pin is a base64 encoded string of the SHA-256 fingerprint.
  #ssl.ca_sha256: ""


# -------------------------------- File Output ---------------------------------
#output.file:
  # Boolean flag to enable or disable the output module.
  #enabled: true

  # Configure JSON encoding
  #codec.json:
    # Pretty-print JSON event
    #pretty: false

    # Configure escaping HTML symbols in strings.
    #escape_html: false

  # Path to the directory where to save the generated files. The option is
  # mandatory.
  #path: "/tmp/metricbeat"

  # Name of the generated files. The default is `metricbeat` and it generates
  # files: `metricbeat`, `metricbeat.1`, `metricbeat.2`, etc.
  #filename: metricbeat

  # Maximum size in kilobytes of each file. When this size is reached, and on
  # every Metricbeat restart, the files are rotated. The default value is 10240
  # kB.
  #rotate_every_kb: 10000

  # Maximum number of files under path. When this number of files is reached,
  # the oldest file is deleted and the rest are shifted from last to first. The
  # default is 7 files.
  #number_of_files: 7

  # Permissions to use for file creation. The default is 0600.
  #permissions: 0600

# ------------------------------- Console Output -------------------------------
#output.console:
  # Boolean flag to enable or disable the output module.
  #enabled: true

  # Configure JSON encoding
  #codec.json:
    # Pretty-print JSON event
    #pretty: false

    # Configure escaping HTML symbols in strings.
    #escape_html: false

# =================================== Paths ====================================

# The home path for the Metricbeat installation. This is the default base path
# for all other path settings and for miscellaneous files that come with the
# distribution (for example, the sample dashboards).
# If not set by a CLI flag or in the configuration file, the default for the
# home path is the location of the binary.
#path.home:

# The configuration path for the Metricbeat installation. This is the default
# base path for configuration files, including the main YAML configuration file
# and the Elasticsearch template file. If not set by a CLI flag or in the
# configuration file, the default for the configuration path is the home path.
#path.config: ${path.home}

# The data path for the Metricbeat installation. This is the default base path
# for all the files in which Metricbeat needs to store its data. If not set by a
# CLI flag or in the configuration file, the default for the data path is a data
# subdirectory inside the home path.
#path.data: ${path.home}/data

# The logs path for a Metricbeat installation. This is the default location for
# the Beat's log files. If not set by a CLI flag or in the configuration file,
# the default for the logs path is a logs subdirectory inside the home path.
#path.logs: ${path.home}/logs

# ================================== Keystore ==================================

# Location of the Keystore containing the keys and their sensitive values.
#keystore.path: "${path.config}/beats.keystore"

# ================================= Dashboards =================================

# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards are disabled by default and can be enabled either by setting the
# options here, or by using the `-setup` CLI flag or the `setup` command.
#setup.dashboards.enabled: false

# The directory from where to read the dashboards. The default is the `kibana`
# folder in the home path.
#setup.dashboards.directory: ${path.home}/kibana

# The URL from where to download the dashboards archive. It is used instead of
# the directory if it has a value.
#setup.dashboards.url:

# The file archive (zip file) from where to read the dashboards. It is used instead
# of the directory when it has a value.
#setup.dashboards.file:

# In case the archive contains the dashboards from multiple Beats, this lets you
# select which one to load. You can load all the dashboards in the archive by
# setting this to the empty string.
#setup.dashboards.beat: metricbeat

# The name of the Kibana index to use for setting the configuration. Default is ".kibana"
#setup.dashboards.kibana_index: .kibana

# The Elasticsearch index name. This overwrites the index name defined in the
# dashboards and index pattern. Example: testbeat-*
#setup.dashboards.index:

# Always use the Kibana API for loading the dashboards instead of autodetecting
# how to install the dashboards by first querying Elasticsearch.
#setup.dashboards.always_kibana: false

# If true and Kibana is not reachable at the time when dashboards are loaded,
# it will retry to reconnect to Kibana instead of exiting with an error.
#setup.dashboards.retry.enabled: false

# Duration interval between Kibana connection retries.
#setup.dashboards.retry.interval: 1s

# Maximum number of retries before exiting with an error, 0 for unlimited retrying.
#setup.dashboards.retry.maximum: 0

# ================================== Template ==================================

# A template is used to set the mapping in Elasticsearch
# By default template loading is enabled and the template is loaded.
# These settings can be adjusted to load your own template or overwrite existing ones.

# Set to false to disable template loading.
#setup.template.enabled: true

# Select the kind of index template. From Elasticsearch 7.8, it is possible to
# use component templates. Available options: legacy, component, index.
# By default metricbeat uses index templates.
#setup.template.type: index

# Template name. By default the template name is "metricbeat-%{[agent.version]}"
# The template name and pattern has to be set in case the Elasticsearch index pattern is modified.
#setup.template.name: "metricbeat-%{[agent.version]}"

# Template pattern. By default the template pattern is "-%{[agent.version]}-*" to apply to the default index settings.
# The first part is the version of the beat and then -* is used to match all daily indices.
# The template name and pattern has to be set in case the Elasticsearch index pattern is modified.
#setup.template.pattern: "metricbeat-%{[agent.version]}-*"

# Path to fields.yml file to generate the template
#setup.template.fields: "${path.config}/fields.yml"

# A list of fields to be added to the template and Kibana index pattern. Also
# specify setup.template.overwrite: true to overwrite the existing template.
#setup.template.append_fields:
#- name: field_name
#  type: field_type

# Enable JSON template loading. If this is enabled, the fields.yml is ignored.
#setup.template.json.enabled: false

# Path to the JSON template file
#setup.template.json.path: "${path.config}/template.json"

# Name under which the template is stored in Elasticsearch
#setup.template.json.name: ""

# Overwrite existing template
# Do not enable this option for more than one instance of metricbeat as it might
# overload your Elasticsearch with too many update requests.
#setup.template.overwrite: false

# Elasticsearch template settings
setup.template.settings:

  # A dictionary of settings to place into the settings.index dictionary
  # of the Elasticsearch template. For more details, please check
  # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
  #index:
    #number_of_shards: 1
    #codec: best_compression

  # A dictionary of settings for the _source field. For more details, please check
  # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html
  #_source:
    #enabled: false

# ====================== Index Lifecycle Management (ILM) ======================

# Configure index lifecycle management (ILM). These settings create a write
# alias and add additional settings to the index template. When ILM is enabled,
# output.elasticsearch.index is ignored, and the write alias is used to set the
# index name.

# Enable ILM support. Valid values are true, false, and auto. When set to auto
# (the default), the Beat uses index lifecycle management when it connects to a
# cluster that supports ILM; otherwise, it creates daily indices.
#setup.ilm.enabled: auto

# Set the prefix used in the index lifecycle write alias name. The default alias
# name is 'metricbeat-%{[agent.version]}'.
#setup.ilm.rollover_alias: 'metricbeat'

# Set the rollover index pattern. The default is "%{now/d}-000001".
#setup.ilm.pattern: "{now/d}-000001"

# Set the lifecycle policy name. The default policy name is
# 'beatname'.
#setup.ilm.policy_name: "mypolicy"

# The path to a JSON file that contains a lifecycle policy configuration. Used
# to load your own lifecycle policy.
#setup.ilm.policy_file:

# Disable the check for an existing lifecycle policy. The default is true. If
# you disable this check, set setup.ilm.overwrite: true so the lifecycle policy
# can be installed.
#setup.ilm.check_exists: true

# Overwrite the lifecycle policy at startup. The default is false.
#setup.ilm.overwrite: false

# =================================== Kibana ===================================

# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:

  # Kibana Host
  # Scheme and port can be left out and will be set to the default (http and 5601)
  # In case you specify and additional path, the scheme is required: http://localhost:5601/path
  # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
  #host: "localhost:5601"

  # Optional protocol and basic auth credentials.
  #protocol: "https"
  #username: "elastic"
  #password: "changeme"

  # Optional HTTP path
  #path: ""

  # Optional Kibana space ID.
  #space.id: ""

  # Use SSL settings for HTTPS.
  #ssl.enabled: true

  # Controls the verification of certificates. Valid values are:
  # * full, which verifies that the provided certificate is signed by a trusted
  # authority (CA) and also verifies that the server's hostname (or IP address)
  # matches the names identified within the certificate.
  # * strict, which verifies that the provided certificate is signed by a trusted
  # authority (CA) and also verifies that the server's hostname (or IP address)
  # matches the names identified within the certificate. If the Subject Alternative
  # Name is empty, it returns an error.
  # * certificate, which verifies that the provided certificate is signed by a
  # trusted authority (CA), but does not perform any hostname verification.
  #  * none, which performs no verification of the server's certificate. This
  # mode disables many of the security benefits of SSL/TLS and should only be used
  # after very careful consideration. It is primarily intended as a temporary
  # diagnostic mechanism when attempting to resolve TLS errors; its use in
  # production environments is strongly discouraged.
  # The default value is full.
  #ssl.verification_mode: full

  # List of supported/valid TLS versions. By default all TLS versions from 1.1
  # up to 1.3 are enabled.
  #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]

  # List of root certificates for HTTPS server verifications
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]

  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"

  # Client certificate key
  #ssl.key: "/etc/pki/client/cert.key"

  # Optional passphrase for decrypting the certificate key.
  #ssl.key_passphrase: ''

  # Configure cipher suites to be used for SSL connections
  #ssl.cipher_suites: []

  # Configure curve types for ECDHE-based cipher suites
  #ssl.curve_types: []

  # Configure what types of renegotiation are supported. Valid options are
  # never, once, and freely. Default is never.
  #ssl.renegotiation: never

  # Configure a pin that can be used to do extra validation of the verified certificate chain,
  # this allow you to ensure that a specific certificate is used to validate the chain of trust.
  #
  # The pin is a base64 encoded string of the SHA-256 fingerprint.
  #ssl.ca_sha256: ""


# ================================== Logging ===================================

# There are four options for the log output: file, stderr, syslog, eventlog
# The file output is the default.

# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
#logging.level: info

# Enable debug output for selected components. To enable all selectors use ["*"]
# Other available selectors are "beat", "publisher", "service"
# Multiple selectors can be chained.
#logging.selectors: [ ]

# Send all logging output to stderr. The default is false.
#logging.to_stderr: false

# Send all logging output to syslog. The default is false.
#logging.to_syslog: false

# Send all logging output to Windows Event Logs. The default is false.
#logging.to_eventlog: false

# If enabled, Metricbeat periodically logs its internal metrics that have changed
# in the last period. For each metric that changed, the delta from the value at
# the beginning of the period is logged. Also, the total values for
# all non-zero internal metrics are logged on shutdown. The default is true.
#logging.metrics.enabled: true

# The period after which to log the internal metrics. The default is 30s.
#logging.metrics.period: 30s

# A list of metrics namespaces to report in the logs. Defaults to [stats].
# `stats` contains general Beat metrics. `dataset` may be present in some
# Beats and contains module or input metrics.
#logging.metrics.namespaces: [stats]

# Logging to rotating files. Set logging.to_files to false to disable logging to
# files.
logging.to_files: true
logging.files:
  # Configure the path where the logs are written. The default is the logs directory
  # under the home path (the binary location).
  #path: /var/log/metricbeat

  # The name of the files where the logs are written to.
  #name: metricbeat

  # Configure log file size limit. If limit is reached, log file will be
  # automatically rotated
  #rotateeverybytes: 10485760 # = 10MB

  # Number of rotated log files to keep. Oldest files will be deleted first.
  #keepfiles: 7

  # The permissions mask to apply when rotating log files. The default value is 0600.
  # Must be a valid Unix-style file permissions mask expressed in octal notation.
  #permissions: 0600

  # Enable log file rotation on time intervals in addition to size-based rotation.
  # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h
  # are boundary-aligned with minutes, hours, days, weeks, months, and years as
  # reported by the local system clock. All other intervals are calculated from the
  # Unix epoch. Defaults to disabled.
  #interval: 0

  # Rotate existing logs on startup rather than appending to the existing
  # file. Defaults to true.
  # rotateonstartup: true

  # Rotated files are either suffixed with a number e.g. metricbeat.1 when
  # renamed during rotation. Or when set to date, the date is added to
  # the end of the file. On rotation a new file is created, older files are untouched.
  #suffix: count

# Set to true to log messages in JSON format.
# Deprecated: As of 7.16.0 this setting is deprecated and will be removed in an upcoming release.
#logging.json: false

# Set to true, to log messages with minimal required Elastic Common Schema (ECS)
# information. Recommended to use in combination with `logging.json=true`
# Defaults to false.
# Deprecated: As of 7.16.0 this setting is deprecated and will be removed in an upcoming release.
#logging.ecs: false

# ============================= X-Pack Monitoring ==============================
# Metricbeat can export internal metrics to a central Elasticsearch monitoring
# cluster.  This requires xpack monitoring to be enabled in Elasticsearch.  The
# reporting is disabled by default.

# Set to true to enable the monitoring reporter.
#monitoring.enabled: false

# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
# Metricbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
#monitoring.cluster_uuid:

# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well.
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration, so if you have the Elasticsearch output configured such
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
# uncomment the following line.
#monitoring.elasticsearch:

  # Array of hosts to connect to.
  # Scheme and port can be left out and will be set to the default (http and 9200)
  # In case you specify and additional path, the scheme is required: http://localhost:9200/path
  # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
  #hosts: ["localhost:9200"]

  # Set gzip compression level.
  #compression_level: 0

  # Protocol - either `http` (default) or `https`.
  #protocol: "https"

  # Authentication credentials - either API key or username/password.
  #api_key: "id:api_key"
  #username: "beats_system"
  #password: "changeme"

  # Dictionary of HTTP parameters to pass within the URL with index operations.
  #parameters:
    #param1: value1
    #param2: value2

  # Custom HTTP headers to add to each request
  #headers:
  #  X-My-Header: Contents of the header

  # Proxy server url
  #proxy_url: http://proxy:3128

  # The number of times a particular Elasticsearch index operation is attempted. If
  # the indexing operation doesn't succeed after this many retries, the events are
  # dropped. The default is 3.
  #max_retries: 3

  # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
  # The default is 50.
  #bulk_max_size: 50

  # The number of seconds to wait before trying to reconnect to Elasticsearch
  # after a network error. After waiting backoff.init seconds, the Beat
  # tries to reconnect. If the attempt fails, the backoff timer is increased
  # exponentially up to backoff.max. After a successful connection, the backoff
  # timer is reset. The default is 1s.
  #backoff.init: 1s

  # The maximum number of seconds to wait before attempting to connect to
  # Elasticsearch after a network error. The default is 60s.
  #backoff.max: 60s

  # Configure HTTP request timeout before failing an request to Elasticsearch.
  #timeout: 90

  # Use SSL settings for HTTPS.
  #ssl.enabled: true

  # Controls the verification of certificates. Valid values are:
  # * full, which verifies that the provided certificate is signed by a trusted
  # authority (CA) and also verifies that the server's hostname (or IP address)
  # matches the names identified within the certificate.
  # * strict, which verifies that the provided certificate is signed by a trusted
  # authority (CA) and also verifies that the server's hostname (or IP address)
  # matches the names identified within the certificate. If the Subject Alternative
  # Name is empty, it returns an error.
  # * certificate, which verifies that the provided certificate is signed by a
  # trusted authority (CA), but does not perform any hostname verification.
  #  * none, which performs no verification of the server's certificate. This
  # mode disables many of the security benefits of SSL/TLS and should only be used
  # after very careful consideration. It is primarily intended as a temporary
  # diagnostic mechanism when attempting to resolve TLS errors; its use in
  # production environments is strongly discouraged.
  # The default value is full.
  #ssl.verification_mode: full

  # List of supported/valid TLS versions. By default all TLS versions from 1.1
  # up to 1.3 are enabled.
  #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]

  # List of root certificates for HTTPS server verifications
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]

  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"

  # Client certificate key
  #ssl.key: "/etc/pki/client/cert.key"

  # Optional passphrase for decrypting the certificate key.
  #ssl.key_passphrase: ''

  # Configure cipher suites to be used for SSL connections
  #ssl.cipher_suites: []

  # Configure curve types for ECDHE-based cipher suites
  #ssl.curve_types: []

  # Configure what types of renegotiation are supported. Valid options are
  # never, once, and freely. Default is never.
  #ssl.renegotiation: never

  # Configure a pin that can be used to do extra validation of the verified certificate chain,
  # this allow you to ensure that a specific certificate is used to validate the chain of trust.
  #
  # The pin is a base64 encoded string of the SHA-256 fingerprint.
  #ssl.ca_sha256: ""

  # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set.
  #kerberos.enabled: true

  # Authentication type to use with Kerberos. Available options: keytab, password.
  #kerberos.auth_type: password

  # Path to the keytab file. It is used when auth_type is set to keytab.
  #kerberos.keytab: /etc/elastic.keytab

  # Path to the Kerberos configuration.
  #kerberos.config_path: /etc/krb5.conf

  # Name of the Kerberos user.
  #kerberos.username: elastic

  # Password of the Kerberos user. It is used when auth_type is set to password.
  #kerberos.password: changeme

  # Kerberos realm.
  #kerberos.realm: ELASTIC

  #metrics.period: 10s
  #state.period: 1m

# The `monitoring.cloud.id` setting overwrites the `monitoring.elasticsearch.hosts`
# setting. You can find the value for this setting in the Elastic Cloud web UI.
#monitoring.cloud.id:

# The `monitoring.cloud.auth` setting overwrites the `monitoring.elasticsearch.username`
# and `monitoring.elasticsearch.password` settings. The format is `<user>:<pass>`.
#monitoring.cloud.auth:

# =============================== HTTP Endpoint ================================

# Each beat can expose internal metrics through a HTTP endpoint. For security
# reasons the endpoint is disabled by default. This feature is currently experimental.
# Stats can be access through http://localhost:5066/stats . For pretty JSON output
# append ?pretty to the URL.

# Defines if the HTTP endpoint is enabled.
#http.enabled: false

# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe.
# When using IP addresses, it is recommended to only use localhost.
#http.host: localhost

# Port on which the HTTP endpoint will bind. Default is 5066.
#http.port: 5066

# Define which user should be owning the named pipe.
#http.named_pipe.user:

# Define which the permissions that should be applied to the named pipe, use the Security
# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with
# `http.user`.
#http.named_pipe.security_descriptor:

# Defines if the HTTP pprof endpoints are enabled.
# It is recommended that this is only enabled on localhost as these endpoints may leak data.
#http.pprof.enabled: false

# ============================== Process Security ==============================

# Enable or disable seccomp system call filtering on Linux. Default is enabled.
#seccomp.enabled: true

# ============================== Instrumentation ===============================

# Instrumentation support for the metricbeat.
#instrumentation:
    # Set to true to enable instrumentation of metricbeat.
    #enabled: false

    # Environment in which metricbeat is running on (eg: staging, production, etc.)
    #environment: ""

    # APM Server hosts to report instrumentation results to.
    #hosts:
    #  - http://localhost:8200

    # API Key for the APM Server(s).
    # If api_key is set then secret_token will be ignored.
    #api_key:

    # Secret token for the APM Server(s).
    #secret_token:

    # Enable profiling of the server, recording profile samples as events.
    #
    # This feature is experimental.
    #profiling:
        #cpu:
            # Set to true to enable CPU profiling.
            #enabled: false
            #interval: 60s
            #duration: 10s
        #heap:
            # Set to true to enable heap profiling.
            #enabled: false
            #interval: 60s

# ================================= Migration ==================================

# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: false