diff --git a/Dockerfile b/Dockerfile index db2c5f0..d79c9fd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,8 +4,6 @@ WORKDIR /app COPY requirements.txt /app/requirements.txt COPY app.py /app/app.py -COPY read.py /app/read.py -COPY save.py /app/save.py COPY config.py /app/config.py COPY templates /app/templates COPY static /app/static diff --git a/app.py b/app.py index 6ce90da..c33419f 100644 --- a/app.py +++ b/app.py @@ -1,13 +1,25 @@ from flask import Flask, render_template, request, jsonify, redirect, url_for, session from time import time +import toml -from read import read_topics, read_server_addr, read_qos -from save import save_topics, save_server_addr, save_qos -from config import config, LOGIN_ATTEMPTS, MAX_ATTEMPTS, BLOCK_TIME, LOGIN_REQUIRED, LOGIN_PASSWORD +#from read import read_topics, read_server_addr, read_qos, read_connection_timeout +#from save import save_topics, save_server_addr, save_qos, save_connection_timeout +from config import config, LOGIN_ATTEMPTS, MAX_ATTEMPTS, BLOCK_TIME, LOGIN_REQUIRED, LOGIN_PASSWORD, TELEGRAF_CONFIG, TELEGRAF_CONFIG_PATH app = Flask(__name__) app.secret_key = config["auth"]["secret_key"] +def update_toml(input_data): + for section, section_data in input_data.items(): + if section in TELEGRAF_CONFIG: + for key, value in section_data.items(): + if isinstance(value, dict): + if key in TELEGRAF_CONFIG[section]: + update_toml(TELEGRAF_CONFIG[section], {key: value}) + else: + TELEGRAF_CONFIG[section][key] = value + return TELEGRAF_CONFIG + # Checks if an IP is blocked def is_blocked(ip): if ip in LOGIN_ATTEMPTS: @@ -73,11 +85,9 @@ def get_config(): if LOGIN_REQUIRED and not session.get("logged_in"): return jsonify({"error": "Nicht autorisiert."}), 403 - topics = read_topics() - server_addr = read_server_addr() - qos = read_qos() + conf = TELEGRAF_CONFIG["inputs"]["mqtt_consumer"] - return jsonify({"topics": topics, "server_addr": server_addr, "qos": qos}) + return jsonify(conf) @app.route('/api/save-config', methods=['POST']) def save_config(): @@ -85,22 +95,10 @@ def save_config(): return jsonify({"error": "Not authorized."}), 403 data = request.get_json() + new_config = update_toml(data) - topics = data.get('topics', []) - server_addr = data.get('server_addr', []) - qos = data.get('qos', None) - - if topics: - if not save_topics(topics): - return jsonify({"error": "Failed to save topics."}), 500 - - if server_addr: - if not save_server_addr(server_addr): - return jsonify({"error": "Failed to save Server adress."}), 500 - - if qos: - if not save_qos(qos): - return jsonify({"error": "Failed to save qos value."}), 500 + with open(TELEGRAF_CONFIG_PATH, 'w') as f: + toml.dump(new_config, f) return jsonify({"status": "success", "message": "config saved succesfully!"}) diff --git a/config.py b/config.py index 3d4c22a..e5d3281 100644 --- a/config.py +++ b/config.py @@ -1,9 +1,11 @@ import configparser +import toml config = configparser.ConfigParser() config.read('app.conf') -TELEGRAF_CONFIG_PATH = "telegraf.conf" +TELEGRAF_CONFIG = toml.load(config["config"]["telegraf_config_path"]) +TELEGRAF_CONFIG_PATH = config["config"]["telegraf_config_path"] LOGIN_REQUIRED = config.getboolean("auth", "login_required", fallback=False) LOGIN_PASSWORD = config.get("auth", "password", fallback="") diff --git a/read.py b/read.py deleted file mode 100644 index f6a2ae3..0000000 --- a/read.py +++ /dev/null @@ -1,42 +0,0 @@ -import re - -from config import TELEGRAF_CONFIG_PATH - -# Reads topic list from telegraf.conf -def read_topics(): - with open(TELEGRAF_CONFIG_PATH, "r") as file: - config_data = file.read() - - # searches for [[inputs.mqtt_consumer]] and extracts topics - mqtt_consumer_match = re.search(r'\[\[inputs\.mqtt_consumer\]\](.*?)\n\s*topics\s*=\s*\[(.*?)\]', config_data, re.DOTALL) - - if mqtt_consumer_match: - topics_string = mqtt_consumer_match.group(2).strip() - topics = re.findall(r'"([^"]*)"', topics_string) - return topics - return [] # In case no Topics are found - -# reads server address from telegraf.conf -def read_server_addr(): - with open(TELEGRAF_CONFIG_PATH, "r") as file: - config_data = file.read() - - # searches for [[inputs.mqtt_consumer]] and extracts topics - mqtt_consumer_match = re.search(r'\[\[inputs\.mqtt_consumer\]\](.*?)\n\s*servers\s*=\s*\[(.*?)\]', config_data, re.DOTALL) - - if mqtt_consumer_match: - servers_string = mqtt_consumer_match.group(2).strip() - servers = re.findall(r'"([^"]*)"', servers_string) - return servers - return [] # In case no Topics are found - -# reads QoS from telegraf.conf -def read_qos(): - with open(TELEGRAF_CONFIG_PATH, "r") as file: - config_data = file.read() - - mqtt_consumer_match = mqtt_consumer_match = re.search(r'\[\[inputs\.mqtt_consumer\]\](.*?)^[ \t]*qos\s*=\s*(\d+)', config_data, re.MULTILINE | re.DOTALL) - - if mqtt_consumer_match: - return mqtt_consumer_match.group(2) - return [] \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 5cbb75e..31a20ba 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ Flask==3.0.3 -configparser==7.1.0 \ No newline at end of file +configparser==7.1.0 +toml==0.10.2 \ No newline at end of file diff --git a/save.py b/save.py deleted file mode 100644 index 9731531..0000000 --- a/save.py +++ /dev/null @@ -1,68 +0,0 @@ -import re - -from config import TELEGRAF_CONFIG_PATH - -# Saves topics to telegraf.conf -def save_topics(topics): - with open(TELEGRAF_CONFIG_PATH, 'r') as file: - config_data = file.read() - - # RegEx to Find topics in telegraf.conf - topics_match = re.search(r'topics\s*=\s*\[(.*?)\]', config_data, re.DOTALL) - if topics_match: - new_topics_string = "\n ".join([f'"{topic}"' for topic in topics]) - config_data = re.sub(r'topics\s*=\s*\[([^\]]+)\]', f'topics = [\n {new_topics_string},\n ]', config_data) - - with open(TELEGRAF_CONFIG_PATH, 'w') as file: - file.write(config_data) - return True - - return False - -# Saves server address to telegraf.conf -def save_server_addr(server_addr): - with open(TELEGRAF_CONFIG_PATH, 'r') as file: - config_data = file.read() - # RegEx, um die servers-Zeile zu finden - mqtt_section_match = re.search(r'\[\[inputs\.mqtt_consumer\]\](.*?)(?=\n\[|\Z)', config_data, re.DOTALL) - - if mqtt_section_match: - section_content = mqtt_section_match.group(0) - # Nur aktive servers-Zeilen bearbeiten (keine auskommentierten) - active_servers_match = re.search(r'^\s*servers\s*=\s*\[([^\]]+)\]', section_content, re.MULTILINE) - - if active_servers_match: - # Neues Server-Format erstellen - new_servers_string = f'{server_addr}' - # Ersetzen der aktiven servers-Zeile - updated_section = re.sub(r'^\s*servers\s*=\s*\[([^\]]+)\]', f' servers = {new_servers_string}', section_content, flags=re.MULTILINE) - - # Abschnitt in der Konfiguration aktualisieren - config_data = config_data.replace(section_content, updated_section) - - # Datei speichern - with open(TELEGRAF_CONFIG_PATH, 'w') as file: - file.write(config_data) - return True - - return False - -# Saves QoS to telegraf.conf -def save_qos(qos): - if qos not in (0, 1, 2): - return False - - with open(TELEGRAF_CONFIG_PATH, "r") as file: - config_data = file.read() - - # Ersetzt den vorhandenen QoS-Wert durch den neuen Wert - updated_config_data = re.sub( - r'(^[ \t]*qos\s*=\s*)(\d+)', # Findet die `qos =`-Zeile mit dem aktuellen Wert - rf'\1{qos}', # Ersetzt den Wert durch `new_qos`, behält das Präfix bei - config_data, - flags=re.MULTILINE - ) - - with open(TELEGRAF_CONFIG_PATH, "w") as file: - file.write(updated_config_data) - return True \ No newline at end of file diff --git a/static/dashboard.js b/static/dashboard.js index 874fafa..235a639 100644 --- a/static/dashboard.js +++ b/static/dashboard.js @@ -1,29 +1,29 @@ -// Funktion, um Nachrichten anzuzeigen +// Shows Messages function showMessage(message, type = "success") { const messageBox = document.getElementById("message-box"); messageBox.textContent = message; messageBox.className = `message-box ${type}`; // Typ (success, error) hinzufügen - // Nachricht nach 5 Sekunden ausblenden + // Removes message after 5 seconds setTimeout(() => { messageBox.textContent = ""; messageBox.className = "message-box"; }, 5000); } -// Funktion, um die Topics dynamisch hinzuzufügen +// Adds Topics dynamically function loadTopics() { - fetch('/api/config') // API-Request für Topics + fetch('/api/config') // API-Request for Topics .then(response => response.json()) .then(data => { - const topics = data.topics || []; // Falls keine Topics vorhanden sind, leere Liste + const topics = data.topics || []; // When no topics are available, an empty array is returned const topicsList = document.getElementById('topics-list'); - // Entfernen aller vorhandenen Topics im DOM, bevor wir die neuen hinzufügen + // Remove all existing topics topicsList.innerHTML = ''; topics.forEach(topic => { - // Neues Topic-Element erstellen + // Create new topic item const topicItem = document.createElement('div'); topicItem.classList.add('topic-item'); topicItem.innerHTML = ` @@ -37,7 +37,7 @@ function loadTopics() { .catch(error => showMessage("Fehler beim Laden der Topics!", "error")); } -// Hinzufügen eines neuen Topic-Feldes +// Create new topic field document.getElementById('add-topic-btn').addEventListener('click', function() { const topicItem = document.createElement('div'); topicItem.classList.add('topic-item'); @@ -45,18 +45,18 @@ document.getElementById('add-topic-btn').addEventListener('click', function() { document.getElementById('topics-list').appendChild(topicItem); }); -// Entfernen eines Topics +// Remove topic document.addEventListener('click', function(event) { if (event.target.classList.contains('remove-topic-btn')) { event.target.parentElement.remove(); } }); -// Formular absenden +// Send formular data document.getElementById('topics-form').addEventListener('submit', function(event) { - event.preventDefault(); // Verhindert das Standard-Submit-Verhalten + event.preventDefault(); // prevents the default form submission - // Alle aktuellen Topics aus den Eingabefeldern sammeln + // Collect all topics const topics = []; document.querySelectorAll('input[name="topics"]').forEach(input => { if (input.value.trim()) { @@ -64,7 +64,7 @@ document.getElementById('topics-form').addEventListener('submit', function(event } }); - // Die geänderten Topics an die API senden + // Send changed topics to api fetch('/api/save-config', { method: 'POST', headers: { @@ -75,10 +75,10 @@ document.getElementById('topics-form').addEventListener('submit', function(event .then(response => response.json()) .then(data => { showMessage(data.message, "success"); - loadTopics(); // Aktualisieren der angezeigten Topics + loadTopics(); // Update displayed topics }) .catch(error => showMessage("Fehler beim Speichern der Topics!", "error")); }); -// Laden der Topics beim ersten Aufruf der Seite +// Load topics on page load window.onload = loadTopics; \ No newline at end of file diff --git a/static/login.js b/static/login.js index 1722ebe..ef070b4 100644 --- a/static/login.js +++ b/static/login.js @@ -1,4 +1,4 @@ -// Funktion zum Umschalten der Passwortsichtbarkeit +// Function to toggle password visibility function togglePasswordVisibility() { const passwordInput = document.getElementById('password'); const type = passwordInput.getAttribute('type') === 'password' ? 'text' : 'password'; diff --git a/static/styles.css b/static/styles.css index da2f817..6a9e673 100644 --- a/static/styles.css +++ b/static/styles.css @@ -1,4 +1,4 @@ -/* Allgemeine Body-Stile */ +/* Common Body-Stile */ body { font-family: 'Roboto', sans-serif; background-color: #f4f7f6; @@ -10,7 +10,7 @@ body { height: 100vh; } -/* Allgemeiner Container */ +/* Common Container */ .container { max-width: 400px; width: 100%; @@ -22,14 +22,14 @@ body { box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1); } -/* Überschriften */ +/* Header */ h1 { text-align: center; color: #333; margin-bottom: 20px; } -/* Nachricht-Box */ +/* Message-Box */ .message-box { margin: 15px 0; padding: 10px; @@ -51,7 +51,7 @@ h1 { border: 1px solid #f5c6cb; } -/* Fehlernachrichten */ +/* Error message */ .error { color: #721c24; background-color: #f8d7da; @@ -74,7 +74,7 @@ form { gap: 5px; } -/* Eingabefelder */ +/* Inputfield */ input[type="text"], input[type="password"] { padding: 10px; @@ -85,7 +85,7 @@ input[type="password"] { width: 100%; } -/* Passwort-Eingabe mit Button */ +/* Password entry with button */ .input-wrapper { display: flex; align-items: center; @@ -93,7 +93,7 @@ input[type="password"] { } .input-wrapper input { - padding-right: 40px; /* Platz für den Button */ + padding-right: 40px; /* Space for Button */ } .input-wrapper button.toggle-password { @@ -126,19 +126,19 @@ button:hover { filter: brightness(0.9); } -/* Hinzufügen-Button */ +/* Add-Button */ #add-topic-btn { background-color: #28a745; margin: 20px auto; } -/* Speichern-Button */ +/* Save-Button */ .save-btn { background-color: #007bff; margin-top: 20px; } -/* Löschen-Button */ +/* Delete-Button */ .topic-item button { background-color: #ff4d4d; padding: 5px 10px; @@ -154,7 +154,7 @@ button:hover { background-color: #007bff; } -/* Topics-Liste */ +/* Topics-List */ .topics-list { margin: 20px 0; display: flex; diff --git a/telegraf.conf b/telegraf.conf index 030bd36..26e3125 100644 --- a/telegraf.conf +++ b/telegraf.conf @@ -1,13282 +1,35 @@ -# Telegraf Configuration -# -# Telegraf is entirely plugin driven. All metrics are gathered from the -# declared inputs, and sent to the declared outputs. -# -# Plugins must be declared in here to be active. -# To deactivate a plugin, comment out the name and any variables. -# -# Use 'telegraf -config telegraf.conf -test' to see what metrics a config -# file would generate. -# -# Environment variables can be used anywhere in this config file, simply surround -# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), -# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) - - -# Global tags can be specified here in key="value" format. [global_tags] - # dc = "us-east-1" # will tag all metrics with dc=us-east-1 - # rack = "1a" - ## Environment variables can be used as tags, and throughout the config file - # user = "$USER" -# Configuration for telegraf agent [agent] - ## Default data collection interval for all inputs - interval = "1s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## Maximum number of unwritten metrics per output. Increasing this value - ## allows for longer periods of output downtime without dropping metrics at the - ## cost of higher maximum memory usage. - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Collection offset is used to shift the collection by the given amount. - ## This can be be used to avoid many plugins querying constraint devices - ## at the same time by manually scheduling them in time. - # collection_offset = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "1s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## Collected metrics are rounded to the precision specified. Precision is - ## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s). - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - ## - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s: - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - precision = "0s" - - ## Log at debug level. - # debug = false - ## Log only error level messages. - # quiet = false - - ## Log target controls the destination for logs and can be one of "file", - ## "stderr" or, on Windows, "eventlog". When set to "file", the output file - ## is determined by the "logfile" setting. - # logtarget = "file" - - ## Name of the file to be logged to when using the "file" logtarget. If set to - ## the empty string then logs are written to stderr. - # logfile = "" - - ## The logfile will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. Logs are rotated only when - ## written to, if there is no log activity rotation may be delayed. - # logfile_rotation_interval = "0h" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # logfile_rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # logfile_rotation_max_archives = 5 - - ## Pick a timezone to use when logging or type 'local' for local time. - ## Example: America/Chicago - # log_with_timezone = "" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false - - ## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which - ## translates by calling external programs snmptranslate and snmptable, - ## or "gosmi" which translates using the built-in gosmi library. - # snmp_translator = "netsnmp" - - ## Name of the file to load the state of plugins from and store the state to. - ## If uncommented and not empty, this file will be used to save the state of - ## stateful plugins on termination of Telegraf. If the file exists on start, - ## the state in the file will be restored for the plugins. - # statefile = "" - - ## Flag to skip running processors after aggregators - ## By default, processors are run a second time after aggregators. Changing - ## this setting to true will skip the second run of processors. - # skip_processors_after_aggregators = false - -############################################################################### -# SECRETSTORE PLUGINS # -############################################################################### - - -# # Secret-store to access Docker Secrets -# [[secretstores.docker]] -# ## Unique identifier for the secretstore. -# ## This id can later be used in plugins to reference the secrets -# ## in this secret-store via @{:} (mandatory) -# id = "docker_secretstore" -# -# ## Default Path to directory where docker stores the secrets file -# ## Current implementation in docker compose v2 only allows the following -# ## value for the path where the secrets are mounted at runtime -# # path = "/run/secrets" -# -# ## Allow dynamic secrets that are updated during runtime of telegraf -# ## Dynamic Secrets work only with `file` or `external` configuration -# ## in `secrets` section of the `docker-compose.yml` file -# # dynamic = false - - -# # Read secrets from a HTTP endpoint -# [[secretstores.http]] -# ## Unique identifier for the secret-store. -# ## This id can later be used in plugins to reference the secrets -# ## in this secret-store via @{:} (mandatory) -# id = "secretstore" -# -# ## URLs from which to read the secrets -# url = "http://localhost/secrets" -# -# ## Optional HTTP headers -# # headers = {"X-Special-Header" = "Special-Value"} -# -# ## Optional Token for Bearer Authentication via -# ## "Authorization: Bearer " header -# # token = "your-token" -# -# ## Optional Credentials for HTTP Basic Authentication -# # username = "username" -# # password = "pa$$word" -# -# ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. -# # client_id = "clientid" -# # client_secret = "secret" -# # token_url = "https://indentityprovider/oauth2/v1/token" -# # scopes = ["urn:opc:idm:__myscopes__"] -# -# ## HTTP Proxy support -# # use_system_proxy = false -# # http_proxy_url = "" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Minimal TLS version to accept by the client -# # tls_min_version = "TLS12" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Optional Cookie authentication -# # cookie_auth_url = "https://localhost/authMe" -# # cookie_auth_method = "POST" -# # cookie_auth_username = "username" -# # cookie_auth_password = "pa$$word" -# # cookie_auth_headers = { Content-Type = "application/json", X-MY-HEADER = "hello" } -# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' -# ## When unset or set to zero the authentication will only happen once -# ## and will never renew the cookie. Set to a suitable duration if you -# ## require cookie renewal! -# # cookie_auth_renewal = "0s" -# -# ## Amount of time allowed to complete the HTTP request -# # timeout = "5s" -# -# ## List of success status codes -# # success_status_codes = [200] -# -# ## JSONata expression to transform the server response into a -# ## { "secret name": "secret value", ... } -# ## form. See https://jsonata.org for more information and a playground. -# # transformation = '' -# -# ## Cipher used to decrypt the secrets. -# ## In case your secrets are transmitted in an encrypted form, you need -# ## to specify the cipher used and provide the corresponding configuration. -# ## Please refer to https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/http/README.md -# ## for supported values. -# # cipher = "none" -# -# ## AES cipher parameters -# # [secretstores.http.aes] -# # ## Key (hex-encoded) and initialization-vector (IV) for the decryption. -# # ## In case the key (and IV) is derived from a password, the values can -# # ## be omitted. -# # key = "" -# # init_vector = "" -# # -# # ## Parameters for password-based-key derivation. -# # ## These parameters must match the encryption side to derive the same -# # ## key on both sides! -# # # kdf_algorithm = "PBKDF2-HMAC-SHA256" -# # # password = "" -# # # salt = "" -# # # iterations = 0 - - -# # File based Javascript Object Signing and Encryption based secret-store -# [[secretstores.jose]] -# ## Unique identifier for the secret-store. -# ## This id can later be used in plugins to reference the secrets -# ## in this secret-store via @{:} (mandatory) -# id = "secretstore" -# -# ## Directory for storing the secrets -# path = "/etc/telegraf/secrets" -# -# ## Password to access the secrets. -# ## If no password is specified here, Telegraf will prompt for it at startup time. -# # password = "" - - -# # Secret-store to retrieve and maintain tokens from various OAuth2 services -# [[secretstores.oauth2]] -# ## Unique identifier for the secret-store. -# ## This id can later be used in plugins to reference the secrets -# ## in this secret-store via @{:} (mandatory) -# id = "secretstore" -# -# ## Service to retrieve the token(s) from -# ## Currently supported services are "custom", "auth0" and "AzureAD" -# # service = "custom" -# -# ## Setting to overwrite the queried token-endpoint -# ## This setting is optional for some services but mandatory for others such -# ## as "custom" or "auth0". Please check the documentation at -# ## https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/oauth2/README.md -# # token_endpoint = "" -# -# ## Tenant ID for the AzureAD service -# # tenant_id = "" -# -# ## Minimal remaining time until the token expires -# ## If a token expires less than the set duration in the future, the token is -# ## renewed. This is useful to avoid race-condition issues where a token is -# ## still valid, but isn't when the request reaches the API endpoint of -# ## your service using the token. -# # token_expiry_margin = "1s" -# -# ## Section for defining a token secret -# [[secretstores.oauth2.token]] -# ## Unique secret-key used for referencing the token via @{:} -# key = "" -# ## Client-ID and secret for the 2-legged OAuth flow -# client_id = "" -# client_secret = "" -# ## Scopes to send in the request -# # scopes = [] -# -# ## Additional (optional) parameters to include in the token request -# ## This might for example include the "audience" parameter required for -# ## auth0. -# # [secretstores.oauth2.token.parameters] -# # audience = "" - - -# # Operating System native secret-store -# [[secretstores.os]] -# ## Unique identifier for the secret-store. -# ## This id can later be used in plugins to reference the secrets -# ## in this secret-store via @{:} (mandatory) -# id = "secretstore" -# -# ## Keyring Name & Collection -# ## * Linux: keyring name used for the secrets, collection is unused -# ## * macOS: keyring specifies the macOS' Keychain name and collection is an -# ## optional Keychain service name -# ## * Windows: keys follow a fixed pattern in the form -# ## `::`. Please keep this in mind when -# ## creating secrets with the Windows credential tool. -# # keyring = "telegraf" -# # collection = "" -# -# ## macOS Keychain password -# ## If no password is specified here, Telegraf will prompt for it at startup -# ## time. -# # password = "" -# -# ## Allow dynamic secrets that are updated during runtime of telegraf -# # dynamic = false - - -# # Secret-store to access systemd secrets -# [[secretstores.systemd]] -# ## Unique identifier for the secretstore. -# ## This id can later be used in plugins to reference the secrets -# ## in this secret-store via @{:} (mandatory) -# id = "systemd" -# -# ## Path to systemd credentials directory -# ## This should not be required as systemd indicates this directory -# ## via the CREDENTIALS_DIRECTORY environment variable. -# # path = "${CREDENTIALS_DIRECTORY}" -# -# ## Prefix to remove from systemd credential-filenames to derive secret names -# # prefix = "telegraf." -# - - -############################################################################### -# OUTPUT PLUGINS # -############################################################################### - - -# # Configuration for sending metrics to InfluxDB 2.0 - [[outputs.influxdb_v2]] -# ## The URLs of the InfluxDB cluster nodes. -# ## -# ## Multiple URLs can be specified for a single cluster, only ONE of the -# ## urls will be written to each interval. -# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] - urls = ["https://127.0.0.1"] -# -# ## Token for authentication. - token = "e5gn-7I9LLZ6ZZxcytI_P2FCTStePJ5dYeG9YutRdgsgkezsaZBfyMIMB_FkoLOxzl_s9puPiapPQRzRj9BCfg==" -# -# ## Organization is the name of the organization you wish to write to. - organization = "Zuhause" -# -# ## Destination bucket to write into. - bucket = "Woche" -# -# ## The value of this tag will be used to determine the bucket. If this -# ## tag is not set the 'bucket' option is used as the default. -# # bucket_tag = "" -# -# ## If true, the bucket tag will not be added to the metric. -# # exclude_bucket_tag = false -# -# ## Timeout for HTTP messages. -# # timeout = "5s" -# -# ## Additional HTTP headers -# # http_headers = {"X-Special-Header" = "Special-Value"} -# -# ## HTTP Proxy override, if unset values the standard proxy environment -# ## variables are consulted to determine which proxy, if any, should be used. -# # http_proxy = "http://corporate.proxy:3128" -# -# ## HTTP User-Agent -# # user_agent = "telegraf" -# -# ## Content-Encoding for write request body, can be set to "gzip" to -# ## compress body or "identity" to apply no encoding. -# # content_encoding = "gzip" -# -# ## Enable or disable uint support for writing uints influxdb 2.0. -# # influx_uint_support = false -# -# ## HTTP/2 Timeouts -# ## The following values control the HTTP/2 client's timeouts. These settings -# ## are generally not required unless a user is seeing issues with client -# ## disconnects. If a user does see issues, then it is suggested to set these -# ## values to "15s" for ping timeout and "30s" for read idle timeout and -# ## retry. -# ## -# ## Note that the timer for read_idle_timeout begins at the end of the last -# ## successful write and not at the beginning of the next write. -# # ping_timeout = "0s" -# # read_idle_timeout = "0s" -# -# ## Optional TLS Config for use on HTTP connections. -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification - insecure_skip_verify = true - - -# # Configuration for Amon Server to send metrics to. -# [[outputs.amon]] -# ## Amon Server Key -# server_key = "my-server-key" # required. -# -# ## Amon Instance URL -# amon_instance = "https://youramoninstance" # required -# -# ## Connection timeout. -# # timeout = "5s" - - -# # Publishes metrics to an AMQP broker -# [[outputs.amqp]] -# ## Broker to publish to. -# ## deprecated in 1.7; use the brokers option -# # url = "amqp://localhost:5672/influxdb" -# -# ## Brokers to publish to. If multiple brokers are specified a random broker -# ## will be selected anytime a connection is established. This can be -# ## helpful for load balancing when not using a dedicated load balancer. -# brokers = ["amqp://localhost:5672/influxdb"] -# -# ## Maximum messages to send over a connection. Once this is reached, the -# ## connection is closed and a new connection is made. This can be helpful for -# ## load balancing when not using a dedicated load balancer. -# # max_messages = 0 -# -# ## Exchange to declare and publish to. -# exchange = "telegraf" -# -# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". -# # exchange_type = "topic" -# -# ## If true, exchange will be passively declared. -# # exchange_passive = false -# -# ## Exchange durability can be either "transient" or "durable". -# # exchange_durability = "durable" -# -# ## Additional exchange arguments. -# # exchange_arguments = { } -# # exchange_arguments = {"hash_property" = "timestamp"} -# -# ## Authentication credentials for the PLAIN auth_method. -# # username = "" -# # password = "" -# -# ## Auth method. PLAIN and EXTERNAL are supported -# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as -# ## described here: https://www.rabbitmq.com/plugins.html -# # auth_method = "PLAIN" -# -# ## Metric tag to use as a routing key. -# ## ie, if this tag exists, its value will be used as the routing key -# # routing_tag = "host" -# -# ## Static routing key. Used when no routing_tag is set or as a fallback -# ## when the tag specified in routing tag is not found. -# # routing_key = "" -# # routing_key = "telegraf" -# -# ## Delivery Mode controls if a published message is persistent. -# ## One of "transient" or "persistent". -# # delivery_mode = "transient" -# -# ## InfluxDB database added as a message header. -# ## deprecated in 1.7; use the headers option -# # database = "telegraf" -# -# ## InfluxDB retention policy added as a message header -# ## deprecated in 1.7; use the headers option -# # retention_policy = "default" -# -# ## Static headers added to each published message. -# # headers = { } -# # headers = {"database" = "telegraf", "retention_policy" = "default"} -# -# ## Connection timeout. If not provided, will default to 5s. 0s means no -# ## timeout (not recommended). -# # timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Optional Proxy Configuration -# # use_proxy = false -# # proxy_url = "localhost:8888" -# -# ## If true use batch serialization format instead of line based delimiting. -# ## Only applies to data formats which are not line based such as JSON. -# ## Recommended to set to true. -# # use_batch_format = false -# -# ## Content encoding for message payloads, can be set to "gzip" to or -# ## "identity" to apply no encoding. -# ## -# ## Please note that when use_batch_format = false each amqp message contains only -# ## a single metric, it is recommended to use compression with batch format -# ## for best results. -# # content_encoding = "identity" -# -# ## Data format to output. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# # data_format = "influx" - - -# # Send metrics to Azure Application Insights -# [[outputs.application_insights]] -# ## Instrumentation key of the Application Insights resource. -# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" -# -# ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints -# # endpoint_url = "https://dc.services.visualstudio.com/v2/track" -# -# ## Timeout for closing (default: 5s). -# # timeout = "5s" -# -# ## Enable additional diagnostic logging. -# # enable_diagnostic_logging = false -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## Context Tag Sources add Application Insights context tags to a tag value. -# ## -# ## For list of allowed context tag keys see: -# ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go -# # [outputs.application_insights.context_tag_sources] -# # "ai.cloud.role" = "kubernetes_container_name" -# # "ai.cloud.roleInstance" = "kubernetes_pod_name" - - -# # Sends metrics to Azure Data Explorer -# [[outputs.azure_data_explorer]] -# ## The URI property of the Azure Data Explorer resource on Azure -# ## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net -# endpoint_url = "" -# -# ## The Azure Data Explorer database that the metrics will be ingested into. -# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. -# ## ex: "exampledatabase" -# database = "" -# -# ## Timeout for Azure Data Explorer operations -# # timeout = "20s" -# -# ## Type of metrics grouping used when pushing to Azure Data Explorer. -# ## Default is "TablePerMetric" for one table per different metric. -# ## For more information, please check the plugin README. -# # metrics_grouping_type = "TablePerMetric" -# -# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). -# # table_name = "" -# -# ## Creates tables and relevant mapping if set to true(default). -# ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role. -# # create_tables = true -# -# ## Ingestion method to use. -# ## Available options are -# ## - managed -- streaming ingestion with fallback to batched ingestion or the "queued" method below -# ## - queued -- queue up metrics data and process sequentially -# # ingestion_type = "queued" - - -# # Send aggregate metrics to Azure Monitor -# [[outputs.azure_monitor]] -# ## Timeout for HTTP writes. -# # timeout = "20s" -# -# ## Set the namespace prefix, defaults to "Telegraf/". -# # namespace_prefix = "Telegraf/" -# -# ## Azure Monitor doesn't have a string value type, so convert string -# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows -# ## a maximum of 10 dimensions so Telegraf will only send the first 10 -# ## alphanumeric dimensions. -# # strings_as_dimensions = false -# -# ## Both region and resource_id must be set or be available via the -# ## Instance Metadata service on Azure Virtual Machines. -# # -# ## Azure Region to publish metrics against. -# ## ex: region = "southcentralus" -# # region = "" -# # -# ## The Azure Resource ID against which metric will be logged, e.g. -# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" -# # resource_id = "" -# -# ## Optionally, if in Azure US Government, China, or other sovereign -# ## cloud environment, set the appropriate REST endpoint for receiving -# ## metrics. (Note: region may be unused in this context) -# # endpoint_url = "https://monitoring.core.usgovcloudapi.net" - - -# # Configuration for Google Cloud BigQuery to send entries -# [[outputs.bigquery]] -# ## Credentials File -# credentials_file = "/path/to/service/account/key.json" -# -# ## Google Cloud Platform Project -# # project = "" -# -# ## The namespace for the metric descriptor -# dataset = "telegraf" -# -# ## Timeout for BigQuery operations. -# # timeout = "5s" -# -# ## Character to replace hyphens on Metric name -# # replace_hyphen_to = "_" -# -# ## Write all metrics in a single compact table -# # compact_table = "" - - -# ## Configuration to publish Telegraf metrics to Clarify -# [[outputs.clarify]] -# ## Credentials File (Oauth 2.0 from Clarify integration) -# credentials_file = "/path/to/clarify/credentials.json" -# -# ## Clarify username password (Basic Auth from Clarify integration) -# username = "i-am-bob" -# password = "secret-password" -# -# ## Timeout for Clarify operations -# # timeout = "20s" -# -# ## Optional tags to be included when generating the unique ID for a signal in Clarify -# # id_tags = [] -# # clarify_id_tag = 'clarify_input_id' - - -# # Publish Telegraf metrics to a Google Cloud PubSub topic -# [[outputs.cloud_pubsub]] -# ## Required. Name of Google Cloud Platform (GCP) Project that owns -# ## the given PubSub topic. -# project = "my-project" -# -# ## Required. Name of PubSub topic to publish metrics to. -# topic = "my-topic" -# -# ## Content encoding for message payloads, can be set to "gzip" or -# ## "identity" to apply no encoding. -# # content_encoding = "identity" -# -# ## Required. Data format to consume. -# ## Each data format has its own unique set of configuration options. -# ## Read more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" -# -# ## Optional. Filepath for GCP credentials JSON file to authorize calls to -# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use -# ## Application Default Credentials, which is preferred. -# # credentials_file = "path/to/my/creds.json" -# -# ## Optional. If true, will send all metrics per write in one PubSub message. -# # send_batched = true -# -# ## The following publish_* parameters specifically configures batching -# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read -# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings -# -# ## Optional. Send a request to PubSub (i.e. actually publish a batch) -# ## when it has this many PubSub messages. If send_batched is true, -# ## this is ignored and treated as if it were 1. -# # publish_count_threshold = 1000 -# -# ## Optional. Send a request to PubSub (i.e. actually publish a batch) -# ## when it has this many PubSub messages. If send_batched is true, -# ## this is ignored and treated as if it were 1 -# # publish_byte_threshold = 1000000 -# -# ## Optional. Specifically configures requests made to the PubSub API. -# # publish_num_go_routines = 2 -# -# ## Optional. Specifies a timeout for requests to the PubSub API. -# # publish_timeout = "30s" -# -# ## Optional. If true, published PubSub message data will be base64-encoded. -# # base64_data = false -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## Optional. PubSub attributes to add to metrics. -# # [outputs.cloud_pubsub.attributes] -# # my_attr = "tag_value" - - -# # Configuration for AWS CloudWatch output. -# [[outputs.cloudwatch]] -# ## Amazon REGION -# region = "us-east-1" -# -# ## Amazon Credentials -# ## Credentials are loaded in the following order -# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified -# ## 2) Assumed credentials via STS if role_arn is specified -# ## 3) explicit credentials from 'access_key' and 'secret_key' -# ## 4) shared profile from 'profile' -# ## 5) environment variables -# ## 6) shared credentials file -# ## 7) EC2 Instance Profile -# #access_key = "" -# #secret_key = "" -# #token = "" -# #role_arn = "" -# #web_identity_token_file = "" -# #role_session_name = "" -# #profile = "" -# #shared_credential_file = "" -# -# ## Endpoint to make request against, the correct endpoint is automatically -# ## determined and this option should only be set if you wish to override the -# ## default. -# ## ex: endpoint_url = "http://localhost:8000" -# # endpoint_url = "" -# -# ## Set http_proxy -# # use_system_proxy = false -# # http_proxy_url = "http://localhost:8888" -# -# ## Namespace for the CloudWatch MetricDatums -# namespace = "InfluxData/Telegraf" -# -# ## If you have a large amount of metrics, you should consider to send statistic -# ## values instead of raw metrics which could not only improve performance but -# ## also save AWS API cost. If enable this flag, this plugin would parse the required -# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. -# ## You could use basicstats aggregator to calculate those fields. If not all statistic -# ## fields are available, all fields would still be sent as raw metrics. -# # write_statistics = false -# -# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) -# # high_resolution_metrics = false - - -# # Configuration for AWS CloudWatchLogs output. -# [[outputs.cloudwatch_logs]] -# ## The region is the Amazon region that you wish to connect to. -# ## Examples include but are not limited to: -# ## - us-west-1 -# ## - us-west-2 -# ## - us-east-1 -# ## - ap-southeast-1 -# ## - ap-southeast-2 -# ## ... -# region = "us-east-1" -# -# ## Amazon Credentials -# ## Credentials are loaded in the following order -# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified -# ## 2) Assumed credentials via STS if role_arn is specified -# ## 3) explicit credentials from 'access_key' and 'secret_key' -# ## 4) shared profile from 'profile' -# ## 5) environment variables -# ## 6) shared credentials file -# ## 7) EC2 Instance Profile -# #access_key = "" -# #secret_key = "" -# #token = "" -# #role_arn = "" -# #web_identity_token_file = "" -# #role_session_name = "" -# #profile = "" -# #shared_credential_file = "" -# -# ## Endpoint to make request against, the correct endpoint is automatically -# ## determined and this option should only be set if you wish to override the -# ## default. -# ## ex: endpoint_url = "http://localhost:8000" -# # endpoint_url = "" -# -# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! -# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place -# log_group = "my-group-name" -# -# ## Log stream in log group -# ## Either log group name or reference to metric attribute, from which it can be parsed: -# ## tag: or field:. If log stream is not exist, it will be created. -# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) -# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) -# log_stream = "tag:location" -# -# ## Source of log data - metric name -# ## specify the name of the metric, from which the log data should be retrieved. -# ## I.e., if you are using docker_log plugin to stream logs from container, then -# ## specify log_data_metric_name = "docker_log" -# log_data_metric_name = "docker_log" -# -# ## Specify from which metric attribute the log data should be retrieved: -# ## tag: or field:. -# ## I.e., if you are using docker_log plugin to stream logs from container, then -# ## specify log_data_source = "field:message" -# log_data_source = "field:message" - - -# # Configuration for CrateDB to send metrics to. -# [[outputs.cratedb]] -# # A github.com/jackc/pgx/v4 connection string. -# # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig -# url = "postgres://user:password@localhost/schema?sslmode=disable" -# # Timeout for all CrateDB queries. -# timeout = "5s" -# # Name of the table to store metrics in. -# table = "metrics" -# # If true, and the metrics table does not exist, create it automatically. -# table_create = true -# # The character(s) to replace any '.' in an object key with -# key_separator = "_" - - -# # Configuration for DataDog API to send metrics to. -# [[outputs.datadog]] -# ## Datadog API key -# apikey = "my-secret-key" -# -# ## Connection timeout. -# # timeout = "5s" -# -# ## Write URL override; useful for debugging. -# # url = "https://app.datadoghq.com/api/v1/series" -# -# ## Set http_proxy -# # use_system_proxy = false -# # http_proxy_url = "http://localhost:8888" -# -# ## Override the default (none) compression used to send data. -# ## Supports: "zlib", "none" -# # compression = "none" - - -# # Send metrics to nowhere at all -# [[outputs.discard]] -# # no configuration - - -# # Send telegraf metrics to a Dynatrace environment -# [[outputs.dynatrace]] -# ## For usage with the Dynatrace OneAgent you can omit any configuration, -# ## the only requirement is that the OneAgent is running on the same host. -# ## Only setup environment url and token if you want to monitor a Host without the OneAgent present. -# ## -# ## Your Dynatrace environment URL. -# ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default) -# ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" -# ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest" -# url = "" -# -# ## Your Dynatrace API token. -# ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API -# ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. -# api_token = "" -# -# ## Optional prefix for metric names (e.g.: "telegraf") -# prefix = "telegraf" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Optional flag for ignoring tls certificate check -# # insecure_skip_verify = false -# -# ## Connection timeout, defaults to "5s" if not set. -# timeout = "5s" -# -# ## If you want metrics to be treated and reported as delta counters, add the metric names here -# additional_counters = [ ] -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## Optional dimensions to be added to every metric -# # [outputs.dynatrace.default_dimensions] -# # default_key = "default value" - - -# # Configuration for Elasticsearch to send metrics to. -# [[outputs.elasticsearch]] -# ## The full HTTP endpoint URL for your Elasticsearch instance -# ## Multiple urls can be specified as part of the same cluster, -# ## this means that only ONE of the urls will be written to each interval -# urls = [ "http://node1.es.example.com:9200" ] # required. -# ## Elasticsearch client timeout, defaults to "5s" if not set. -# timeout = "5s" -# ## Set to true to ask Elasticsearch a list of all cluster nodes, -# ## thus it is not necessary to list all nodes in the urls config option -# enable_sniffer = false -# ## Set to true to enable gzip compression -# enable_gzip = false -# ## Set the interval to check if the Elasticsearch nodes are available -# ## Setting to "0s" will disable the health check (not recommended in production) -# health_check_interval = "10s" -# ## Set the timeout for periodic health checks. -# # health_check_timeout = "1s" -# ## HTTP basic authentication details. -# ## HTTP basic authentication details -# # username = "telegraf" -# # password = "mypassword" -# ## HTTP bearer token authentication details -# # auth_bearer_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9" -# -# ## Index Config -# ## The target index for metrics (Elasticsearch will create if it not exists). -# ## You can use the date specifiers below to create indexes per time frame. -# ## The metric timestamp will be used to decide the destination index name -# # %Y - year (2016) -# # %y - last two digits of year (00..99) -# # %m - month (01..12) -# # %d - day of month (e.g., 01) -# # %H - hour (00..23) -# # %V - week of the year (ISO week) (01..53) -# ## Additionally, you can specify a tag name using the notation {{tag_name}} -# ## which will be used as part of the index name. If the tag does not exist, -# ## the default tag value will be used. -# # index_name = "telegraf-{{host}}-%Y.%m.%d" -# # default_tag_value = "none" -# index_name = "telegraf-%Y.%m.%d" # required. -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Template Config -# ## Set to true if you want telegraf to manage its index template. -# ## If enabled it will create a recommended index template for telegraf indexes -# manage_template = true -# ## The template name used for telegraf indexes -# template_name = "telegraf" -# ## Set to true if you want telegraf to overwrite an existing template -# overwrite_template = false -# ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string -# ## it will enable data resend and update metric points avoiding duplicated metrics with different id's -# force_document_id = false -# -# ## Specifies the handling of NaN and Inf values. -# ## This option can have the following values: -# ## none -- do not modify field-values (default); will produce an error if NaNs or infs are encountered -# ## drop -- drop fields containing NaNs or infs -# ## replace -- replace with the value in "float_replacement_value" (default: 0.0) -# ## NaNs and inf will be replaced with the given number, -inf with the negative of that number -# # float_handling = "none" -# # float_replacement_value = 0.0 -# -# ## Pipeline Config -# ## To use a ingest pipeline, set this to the name of the pipeline you want to use. -# # use_pipeline = "my_pipeline" -# ## Additionally, you can specify a tag name using the notation {{tag_name}} -# ## which will be used as part of the pipeline name. If the tag does not exist, -# ## the default pipeline will be used as the pipeline. If no default pipeline is set, -# ## no pipeline is used for the metric. -# # use_pipeline = "{{es_pipeline}}" -# # default_pipeline = "my_pipeline" - - -# # Configuration for Event Hubs output plugin -# [[outputs.event_hubs]] -# ## The full connection string to the Event Hub (required) -# ## The shared access key must have "Send" permissions on the target Event Hub. -# connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" -# -# ## Client timeout (defaults to 30s) -# # timeout = "30s" -# -# ## Partition key -# ## Metric tag or field name to use for the event partition key. The value of -# ## this tag or field is set as the key for events if it exists. If both, tag -# ## and field, exist the tag is preferred. -# # partition_key = "" -# -# ## Set the maximum batch message size in bytes -# ## The allowable size depends on the Event Hub tier -# ## See: https://learn.microsoft.com/azure/event-hubs/event-hubs-quotas#basic-vs-standard-vs-premium-vs-dedicated-tiers -# ## Setting this to 0 means using the default size from the Azure Event Hubs Client library (1000000 bytes) -# # max_message_size = 1000000 -# -# ## Data format to output. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# data_format = "json" - - -# # Send metrics to command as input over stdin -# [[outputs.exec]] -# ## Command to ingest metrics via stdin. -# command = ["tee", "-a", "/dev/null"] -# -# ## Environment variables -# ## Array of "key=value" pairs to pass as environment variables -# ## e.g. "KEY=value", "USERNAME=John Doe", -# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" -# # environment = [] -# -# ## Timeout for command to complete. -# # timeout = "5s" -# -# ## Whether the command gets executed once per metric, or once per metric batch -# ## The serializer will also run in batch mode when this is true. -# # use_batch_format = true -# -# ## Data format to output. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# # data_format = "influx" - - -# # Run executable as long-running output plugin -# [[outputs.execd]] -# ## One program to run as daemon. -# ## NOTE: process and each argument should each be their own string -# command = ["my-telegraf-output", "--some-flag", "value"] -# -# ## Environment variables -# ## Array of "key=value" pairs to pass as environment variables -# ## e.g. "KEY=value", "USERNAME=John Doe", -# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" -# # environment = [] -# -# ## Delay before the process is restarted after an unexpected termination -# restart_delay = "10s" -# -# ## Flag to determine whether execd should throw error when part of metrics is unserializable -# ## Setting this to true will skip the unserializable metrics and process the rest of metrics -# ## Setting this to false will throw error when encountering unserializable metrics and none will be processed -# ## This setting does not apply when use_batch_format is set. -# # ignore_serialization_error = false -# -# ## Use batch serialization instead of per metric. The batch format allows for the -# ## production of batch output formats and may more efficiently encode and write metrics. -# # use_batch_format = false -# -# ## Data format to export. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# data_format = "influx" - - -# # Send telegraf metrics to file(s) -# [[outputs.file]] -# ## Files to write to, "stdout" is a specially handled file. -# files = ["stdout", "/tmp/metrics.out"] -# -# ## Use batch serialization format instead of line based delimiting. The -# ## batch format allows for the production of non line based output formats and -# ## may more efficiently encode and write metrics. -# # use_batch_format = false -# -# ## The file will be rotated after the time interval specified. When set -# ## to 0 no time based rotation is performed. -# # rotation_interval = "0h" -# -# ## The logfile will be rotated when it becomes larger than the specified -# ## size. When set to 0 no size based rotation is performed. -# # rotation_max_size = "0MB" -# -# ## Maximum number of rotated archives to keep, any older logs are deleted. -# ## If set to -1, no archives are removed. -# # rotation_max_archives = 5 -# -# ## Data format to output. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# data_format = "influx" -# -# ## Compress output data with the specified algorithm. -# ## If empty, compression will be disabled and files will be plain text. -# ## Supported algorithms are "zstd", "gzip" and "zlib". -# # compression_algorithm = "" -# -# ## Compression level for the algorithm above. -# ## Please note that different algorithms support different levels: -# ## zstd -- supports levels 1, 3, 7 and 11. -# ## gzip -- supports levels 0, 1 and 9. -# ## zlib -- supports levels 0, 1, and 9. -# ## By default the default compression level for each algorithm is used. -# # compression_level = -1 - - -# # Configuration for Graphite server to send metrics to -# [[outputs.graphite]] -# ## TCP endpoint for your graphite instance. -# ## If multiple endpoints are configured, the output will be load balanced. -# ## Only one of the endpoints will be written to with each iteration. -# servers = ["localhost:2003"] -# -# ## Local address to bind when connecting to the server -# ## If empty or not set, the local address is automatically chosen. -# # local_address = "" -# -# ## Prefix metrics name -# prefix = "" -# -# ## Graphite output template -# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# template = "host.tags.measurement.field" -# -# ## Strict sanitization regex -# ## This is the default sanitization regex that is used on data passed to the -# ## graphite serializer. Users can add additional characters here if required. -# ## Be aware that the characters, '/' '@' '*' are always replaced with '_', -# ## '..' is replaced with '.', and '\' is removed even if added to the -# ## following regex. -# # graphite_strict_sanitize_regex = '[^a-zA-Z0-9-:._=\p{L}]' -# -# ## Enable Graphite tags support -# # graphite_tag_support = false -# -# ## Applied sanitization mode when graphite tag support is enabled. -# ## * strict - uses the regex specified above -# ## * compatible - allows for greater number of characters -# # graphite_tag_sanitize_mode = "strict" -# -# ## Character for separating metric name and field for Graphite tags -# # graphite_separator = "." -# -# ## Graphite templates patterns -# ## 1. Template for cpu -# ## 2. Template for disk* -# ## 3. Default template -# # templates = [ -# # "cpu tags.measurement.host.field", -# # "disk* measurement.field", -# # "host.measurement.tags.field" -# #] -# -# ## timeout in seconds for the write connection to graphite -# # timeout = "2s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Send telegraf metrics to graylog -# [[outputs.graylog]] -# ## Endpoints for your graylog instances. -# servers = ["udp://127.0.0.1:12201"] -# -# ## Connection timeout. -# # timeout = "5s" -# -# ## The field to use as the GELF short_message, if unset the static string -# ## "telegraf" will be used. -# ## example: short_message_field = "message" -# # short_message_field = "" -# -# ## According to GELF payload specification, additional fields names must be prefixed -# ## with an underscore. Previous versions did not prefix custom field 'name' with underscore. -# ## Set to true for backward compatibility. -# # name_field_no_prefix = false -# -# ## Connection retry options -# ## Attempt to connect to the endpoints if the initial connection fails. -# ## If 'false', Telegraf will give up after 3 connection attempt and will -# ## exit with an error. If set to 'true', the plugin will retry to connect -# ## to the unconnected endpoints infinitely. -# # connection_retry = false -# ## Time to wait between connection retry attempts. -# # connection_retry_wait_time = "15s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Send telegraf metrics to GroundWork Monitor -# [[outputs.groundwork]] -# ## URL of your groundwork instance. -# url = "https://groundwork.example.com" -# -# ## Agent uuid for GroundWork API Server. -# agent_id = "" -# -# ## Username and password to access GroundWork API. -# username = "" -# password = "" -# -# ## Default application type to use in GroundWork client -# # default_app_type = "TELEGRAF" -# -# ## Default display name for the host with services(metrics). -# # default_host = "telegraf" -# -# ## Default service state. -# # default_service_state = "SERVICE_OK" -# -# ## The name of the tag that contains the hostname. -# # resource_tag = "host" -# -# ## The name of the tag that contains the host group name. -# # group_tag = "group" - - -# # Configurable HTTP health check resource based on metrics -# [[outputs.health]] -# ## Address and port to listen on. -# ## ex: service_address = "http://localhost:8080" -# ## service_address = "unix:///var/run/telegraf-health.sock" -# # service_address = "http://:8080" -# -# ## The maximum duration for reading the entire request. -# # read_timeout = "5s" -# ## The maximum duration for writing the entire response. -# # write_timeout = "5s" -# -# ## Username and password to accept for HTTP basic authentication. -# # basic_username = "user1" -# # basic_password = "secret" -# -# ## Allowed CA certificates for client certificates. -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## TLS server certificate and private key. -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## One or more check sub-tables should be defined, it is also recommended to -# ## use metric filtering to limit the metrics that flow into this output. -# ## -# ## When using the default buffer sizes, this example will fail when the -# ## metric buffer is half full. -# ## -# ## namepass = ["internal_write"] -# ## tagpass = { output = ["influxdb"] } -# ## -# ## [[outputs.health.compares]] -# ## field = "buffer_size" -# ## lt = 5000.0 -# ## -# ## [[outputs.health.contains]] -# ## field = "buffer_size" - - -# # A plugin that can transmit metrics over HTTP -# [[outputs.http]] -# ## URL is the address to send metrics to -# url = "http://127.0.0.1:8080/telegraf" -# -# ## Timeout for HTTP message -# # timeout = "5s" -# -# ## HTTP method, one of: "POST" or "PUT" or "PATCH" -# # method = "POST" -# -# ## HTTP Basic Auth credentials -# # username = "username" -# # password = "pa$$word" -# -# ## OAuth2 Client Credentials Grant -# # client_id = "clientid" -# # client_secret = "secret" -# # token_url = "https://indentityprovider/oauth2/v1/token" -# # audience = "" -# # scopes = ["urn:opc:idm:__myscopes__"] -# -# ## Goole API Auth -# # google_application_credentials = "/etc/telegraf/example_secret.json" -# -# ## HTTP Proxy support -# # use_system_proxy = false -# # http_proxy_url = "" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Optional Cookie authentication -# # cookie_auth_url = "https://localhost/authMe" -# # cookie_auth_method = "POST" -# # cookie_auth_username = "username" -# # cookie_auth_password = "pa$$word" -# # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' -# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' -# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie -# # cookie_auth_renewal = "5m" -# -# ## Data format to output. -# ## Each data format has it's own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# # data_format = "influx" -# -# ## Use batch serialization format (default) instead of line based format. -# ## Batch format is more efficient and should be used unless line based -# ## format is really needed. -# # use_batch_format = true -# -# ## HTTP Content-Encoding for write request body, can be set to "gzip" to -# ## compress body or "identity" to apply no encoding. -# # content_encoding = "identity" -# -# ## MaxIdleConns controls the maximum number of idle (keep-alive) -# ## connections across all hosts. Zero means no limit. -# # max_idle_conn = 0 -# -# ## MaxIdleConnsPerHost, if non-zero, controls the maximum idle -# ## (keep-alive) connections to keep per-host. If zero, -# ## DefaultMaxIdleConnsPerHost is used(2). -# # max_idle_conn_per_host = 2 -# -# ## Idle (keep-alive) connection timeout. -# ## Maximum amount of time before idle connection is closed. -# ## Zero means no limit. -# # idle_conn_timeout = 0 -# -# ## Amazon Region -# #region = "us-east-1" -# -# ## Amazon Credentials -# ## Amazon Credentials are not built unless the following aws_service -# ## setting is set to a non-empty string. It may need to match the name of -# ## the service output to as well -# #aws_service = "execute-api" -# -# ## Credentials are loaded in the following order -# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified -# ## 2) Assumed credentials via STS if role_arn is specified -# ## 3) explicit credentials from 'access_key' and 'secret_key' -# ## 4) shared profile from 'profile' -# ## 5) environment variables -# ## 6) shared credentials file -# ## 7) EC2 Instance Profile -# #access_key = "" -# #secret_key = "" -# #token = "" -# #role_arn = "" -# #web_identity_token_file = "" -# #role_session_name = "" -# #profile = "" -# #shared_credential_file = "" -# -# ## Optional list of statuscodes (<200 or >300) upon which requests should not be retried -# # non_retryable_statuscodes = [409, 413] -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## Additional HTTP headers -# # [outputs.http.headers] -# # ## Should be set manually to "application/json" for json data_format -# # Content-Type = "text/plain; charset=utf-8" - - -# # Configuration for sending metrics to InfluxDB -# [[outputs.influxdb]] -# ## The full HTTP or UDP URL for your InfluxDB instance. -# ## -# ## Multiple URLs can be specified for a single cluster, only ONE of the -# ## urls will be written to each interval. -# # urls = ["unix:///var/run/influxdb.sock"] -# # urls = ["udp://127.0.0.1:8089"] -# # urls = ["http://127.0.0.1:8086"] -# -# ## The target database for metrics; will be created as needed. -# ## For UDP url endpoint database needs to be configured on server side. -# # database = "telegraf" -# -# ## The value of this tag will be used to determine the database. If this -# ## tag is not set the 'database' option is used as the default. -# # database_tag = "" -# -# ## If true, the 'database_tag' will not be included in the written metric. -# # exclude_database_tag = false -# -# ## If true, no CREATE DATABASE queries will be sent. Set to true when using -# ## Telegraf with a user without permissions to create databases or when the -# ## database already exists. -# # skip_database_creation = false -# -# ## Name of existing retention policy to write to. Empty string writes to -# ## the default retention policy. Only takes effect when using HTTP. -# # retention_policy = "" -# -# ## The value of this tag will be used to determine the retention policy. If this -# ## tag is not set the 'retention_policy' option is used as the default. -# # retention_policy_tag = "" -# -# ## If true, the 'retention_policy_tag' will not be included in the written metric. -# # exclude_retention_policy_tag = false -# -# ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". -# ## Only takes effect when using HTTP. -# # write_consistency = "any" -# -# ## Timeout for HTTP messages. -# # timeout = "5s" -# -# ## HTTP Basic Auth -# # username = "telegraf" -# # password = "metricsmetricsmetricsmetrics" -# -# ## HTTP User-Agent -# # user_agent = "telegraf" -# -# ## UDP payload size is the maximum packet size to send. -# # udp_payload = "512B" -# -# ## Optional TLS Config for use on HTTP connections. -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## HTTP Proxy override, if unset values the standard proxy environment -# ## variables are consulted to determine which proxy, if any, should be used. -# # http_proxy = "http://corporate.proxy:3128" -# -# ## Additional HTTP headers -# # http_headers = {"X-Special-Header" = "Special-Value"} -# -# ## HTTP Content-Encoding for write request body, can be set to "gzip" to -# ## compress body or "identity" to apply no encoding. -# # content_encoding = "gzip" -# -# ## When true, Telegraf will output unsigned integers as unsigned values, -# ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned -# ## integer values. Enabling this option will result in field type errors if -# ## existing data has been written. -# # influx_uint_support = false - - -# # Configuration for sending metrics to an Instrumental project -# [[outputs.instrumental]] -# ## Project API Token (required) -# api_token = "API Token" # required -# ## Prefix the metrics with a given name -# prefix = "" -# ## Stats output template (Graphite formatting) -# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite -# template = "host.tags.measurement.field" -# ## Timeout in seconds to connect -# timeout = "2s" -# ## Debug true - Print communication to Instrumental -# debug = false - - -# # Save metrics to an IoTDB Database -# [[outputs.iotdb]] -# ## Configuration of IoTDB server connection -# host = "127.0.0.1" -# # port = "6667" -# -# ## Configuration of authentication -# # user = "root" -# # password = "root" -# -# ## Timeout to open a new session. -# ## A value of zero means no timeout. -# # timeout = "5s" -# -# ## Configuration of type conversion for 64-bit unsigned int -# ## IoTDB currently DOES NOT support unsigned integers (version 13.x). -# ## 32-bit unsigned integers are safely converted into 64-bit signed integers by the plugin, -# ## however, this is not true for 64-bit values in general as overflows may occur. -# ## The following setting allows to specify the handling of 64-bit unsigned integers. -# ## Available values are: -# ## - "int64" -- convert to 64-bit signed integers and accept overflows -# ## - "int64_clip" -- convert to 64-bit signed integers and clip the values on overflow to 9,223,372,036,854,775,807 -# ## - "text" -- convert to the string representation of the value -# # uint64_conversion = "int64_clip" -# -# ## Configuration of TimeStamp -# ## TimeStamp is always saved in 64bits int. timestamp_precision specifies the unit of timestamp. -# ## Available value: -# ## "second", "millisecond", "microsecond", "nanosecond"(default) -# # timestamp_precision = "nanosecond" -# -# ## Handling of tags -# ## Tags are not fully supported by IoTDB. -# ## A guide with suggestions on how to handle tags can be found here: -# ## https://iotdb.apache.org/UserGuide/Master/API/InfluxDB-Protocol.html -# ## -# ## Available values are: -# ## - "fields" -- convert tags to fields in the measurement -# ## - "device_id" -- attach tags to the device ID -# ## -# ## For Example, a metric named "root.sg.device" with the tags `tag1: "private"` and `tag2: "working"` and -# ## fields `s1: 100` and `s2: "hello"` will result in the following representations in IoTDB -# ## - "fields" -- root.sg.device, s1=100, s2="hello", tag1="private", tag2="working" -# ## - "device_id" -- root.sg.device.private.working, s1=100, s2="hello" -# # convert_tags_to = "device_id" -# -# ## Handling of unsupported characters -# ## Some characters in different versions of IoTDB are not supported in path name -# ## A guide with suggetions on valid paths can be found here: -# ## for iotdb 0.13.x -> https://iotdb.apache.org/UserGuide/V0.13.x/Reference/Syntax-Conventions.html#identifiers -# ## for iotdb 1.x.x and above -> https://iotdb.apache.org/UserGuide/V1.3.x/User-Manual/Syntax-Rule.html#identifier -# ## -# ## Available values are: -# ## - "1.0", "1.1", "1.2", "1.3" -- enclose in `` the world having forbidden character -# ## such as @ $ # : [ ] { } ( ) space -# ## - "0.13" -- enclose in `` the world having forbidden character -# ## such as space -# ## -# ## Keep this section commented if you don't want to sanitize the path - - -# # Configuration for the Kafka server to send metrics to -# [[outputs.kafka]] -# ## URLs of kafka brokers -# ## The brokers listed here are used to connect to collect metadata about a -# ## cluster. However, once the initial metadata collect is completed, telegraf -# ## will communicate solely with the kafka leader and not all defined brokers. -# brokers = ["localhost:9092"] -# -# ## Kafka topic for producer messages -# topic = "telegraf" -# -# ## The value of this tag will be used as the topic. If not set the 'topic' -# ## option is used. -# # topic_tag = "" -# -# ## If true, the 'topic_tag' will be removed from to the metric. -# # exclude_topic_tag = false -# -# ## Optional Client id -# # client_id = "Telegraf" -# -# ## Set the minimal supported Kafka version. Setting this enables the use of new -# ## Kafka features and APIs. Of particular interested, lz4 compression -# ## requires at least version 0.10.0.0. -# ## ex: version = "1.1.0" -# # version = "" -# -# ## The routing tag specifies a tagkey on the metric whose value is used as -# ## the message key. The message key is used to determine which partition to -# ## send the message to. This tag is preferred over the routing_key option. -# routing_tag = "host" -# -# ## The routing key is set as the message key and used to determine which -# ## partition to send the message to. This value is only used when no -# ## routing_tag is set or as a fallback when the tag specified in routing tag -# ## is not found. -# ## -# ## If set to "random", a random value will be generated for each message. -# ## -# ## When unset, no message key is added and each message is routed to a random -# ## partition. -# ## -# ## ex: routing_key = "random" -# ## routing_key = "telegraf" -# # routing_key = "" -# -# ## Compression codec represents the various compression codecs recognized by -# ## Kafka in messages. -# ## 0 : None -# ## 1 : Gzip -# ## 2 : Snappy -# ## 3 : LZ4 -# ## 4 : ZSTD -# # compression_codec = 0 -# -# ## Idempotent Writes -# ## If enabled, exactly one copy of each message is written. -# # idempotent_writes = false -# -# ## RequiredAcks is used in Produce Requests to tell the broker how many -# ## replica acknowledgements it must see before responding -# ## 0 : the producer never waits for an acknowledgement from the broker. -# ## This option provides the lowest latency but the weakest durability -# ## guarantees (some data will be lost when a server fails). -# ## 1 : the producer gets an acknowledgement after the leader replica has -# ## received the data. This option provides better durability as the -# ## client waits until the server acknowledges the request as successful -# ## (only messages that were written to the now-dead leader but not yet -# ## replicated will be lost). -# ## -1: the producer gets an acknowledgement after all in-sync replicas have -# ## received the data. This option provides the best durability, we -# ## guarantee that no messages will be lost as long as at least one in -# ## sync replica remains. -# # required_acks = -1 -# -# ## The maximum number of times to retry sending a metric before failing -# ## until the next flush. -# # max_retry = 3 -# -# ## The maximum permitted size of a message. Should be set equal to or -# ## smaller than the broker's 'message.max.bytes'. -# # max_message_bytes = 1000000 -# -# ## Optional TLS Config -# # enable_tls = false -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Period between keep alive probes. -# ## Defaults to the OS configuration if not specified or zero. -# # keep_alive_period = "15s" -# -# ## Optional SOCKS5 proxy to use when connecting to brokers -# # socks5_enabled = true -# # socks5_address = "127.0.0.1:1080" -# # socks5_username = "alice" -# # socks5_password = "pass123" -# -# ## Optional SASL Config -# # sasl_username = "kafka" -# # sasl_password = "secret" -# -# ## Optional SASL: -# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI -# ## (defaults to PLAIN) -# # sasl_mechanism = "" -# -# ## used if sasl_mechanism is GSSAPI -# # sasl_gssapi_service_name = "" -# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH -# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" -# # sasl_gssapi_kerberos_config_path = "/" -# # sasl_gssapi_realm = "realm" -# # sasl_gssapi_key_tab_path = "" -# # sasl_gssapi_disable_pafxfast = false -# -# ## Access token used if sasl_mechanism is OAUTHBEARER -# # sasl_access_token = "" -# -# ## Arbitrary key value string pairs to pass as a TOML table. For example: -# # {logicalCluster = "cluster-042", poolId = "pool-027"} -# # sasl_extensions = {} -# -# ## SASL protocol version. When connecting to Azure EventHub set to 0. -# # sasl_version = 1 -# -# # Disable Kafka metadata full fetch -# # metadata_full = false -# -# ## Data format to output. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# # data_format = "influx" -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## Optional topic suffix configuration. -# ## If the section is omitted, no suffix is used. -# ## Following topic suffix methods are supported: -# ## measurement - suffix equals to separator + measurement's name -# ## tags - suffix equals to separator + specified tags' values -# ## interleaved with separator -# -# ## Suffix equals to "_" + measurement name -# # [outputs.kafka.topic_suffix] -# # method = "measurement" -# # separator = "_" -# -# ## Suffix equals to "__" + measurement's "foo" tag value. -# ## If there's no such a tag, suffix equals to an empty string -# # [outputs.kafka.topic_suffix] -# # method = "tags" -# # keys = ["foo"] -# # separator = "__" -# -# ## Suffix equals to "_" + measurement's "foo" and "bar" -# ## tag values, separated by "_". If there is no such tags, -# ## their values treated as empty strings. -# # [outputs.kafka.topic_suffix] -# # method = "tags" -# # keys = ["foo", "bar"] -# # separator = "_" - - -# # Configuration for the AWS Kinesis output. -# [[outputs.kinesis]] -# ## Amazon REGION of kinesis endpoint. -# region = "ap-southeast-2" -# -# ## Amazon Credentials -# ## Credentials are loaded in the following order -# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified -# ## 2) Assumed credentials via STS if role_arn is specified -# ## 3) explicit credentials from 'access_key' and 'secret_key' -# ## 4) shared profile from 'profile' -# ## 5) environment variables -# ## 6) shared credentials file -# ## 7) EC2 Instance Profile -# #access_key = "" -# #secret_key = "" -# #token = "" -# #role_arn = "" -# #web_identity_token_file = "" -# #role_session_name = "" -# #profile = "" -# #shared_credential_file = "" -# -# ## Endpoint to make request against, the correct endpoint is automatically -# ## determined and this option should only be set if you wish to override the -# ## default. -# ## ex: endpoint_url = "http://localhost:8000" -# # endpoint_url = "" -# -# ## Kinesis StreamName must exist prior to starting telegraf. -# streamname = "StreamName" -# -# ## Data format to output. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# data_format = "influx" -# -# ## debug will show upstream aws messages. -# debug = false -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## The partition key can be calculated using one of several methods: -# ## -# ## Use a static value for all writes: -# # [outputs.kinesis.partition] -# # method = "static" -# # key = "howdy" -# # -# ## Use a random partition key on each write: -# # [outputs.kinesis.partition] -# # method = "random" -# # -# ## Use the measurement name as the partition key: -# # [outputs.kinesis.partition] -# # method = "measurement" -# # -# ## Use the value of a tag for all writes, if the tag is not set the empty -# ## default option will be used. When no default, defaults to "telegraf" -# # [outputs.kinesis.partition] -# # method = "tag" -# # key = "host" -# # default = "mykey" - - -# # Configuration for Librato API to send metrics to. -# [[outputs.librato]] -# ## Librato API Docs -# ## http://dev.librato.com/v1/metrics-authentication -# ## Librato API user -# api_user = "telegraf@influxdb.com" # required. -# ## Librato API token -# api_token = "my-secret-token" # required. -# ## Debug -# # debug = false -# ## Connection timeout. -# # timeout = "5s" -# ## Output source Template (same as graphite buckets) -# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite -# ## This template is used in librato's source (not metric's name) -# template = "host" - - -# # A plugin that can send metrics over HTTPs to Logz.io -# [[outputs.logzio]] -# ## Connection timeout, defaults to "5s" if not set. -# # timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Logz.io account token -# token = "your logz.io token" # required -# -# ## Use your listener URL for your Logz.io account region. -# # url = "https://listener.logz.io:8071" - - -# # A plugin that can transmit logs to Loki -# [[outputs.loki]] -# ## The domain of Loki -# domain = "https://loki.domain.tld" -# -# ## Endpoint to write api -# # endpoint = "/loki/api/v1/push" -# -# ## Connection timeout, defaults to "5s" if not set. -# # timeout = "5s" -# -# ## Basic auth credential -# # username = "loki" -# # password = "pass" -# -# ## Additional HTTP headers -# # http_headers = {"X-Scope-OrgID" = "1"} -# -# ## If the request must be gzip encoded -# # gzip_request = false -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Metric Name Label -# ## Label to use for the metric name to when sending metrics. If set to an -# ## empty string, this will not add the label. This is NOT suggested as there -# ## is no way to differentiate between multiple metrics. -# # metric_name_label = "__name" - - -# # A plugin that can transmit logs to mongodb -# [[outputs.mongodb]] -# # connection string examples for mongodb -# dsn = "mongodb://localhost:27017" -# # dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" -# -# # overrides serverSelectionTimeoutMS in dsn if set -# # timeout = "30s" -# -# # default authentication, optional -# # authentication = "NONE" -# -# # for SCRAM-SHA-256 authentication -# # authentication = "SCRAM" -# # username = "root" -# # password = "***" -# -# # for x509 certificate authentication -# # authentication = "X509" -# # tls_ca = "ca.pem" -# # tls_key = "client.pem" -# # # tls_key_pwd = "changeme" # required for encrypted tls_key -# # insecure_skip_verify = false -# -# # database to store measurements and time series collections -# # database = "telegraf" -# -# # granularity can be seconds, minutes, or hours. -# # configuring this value will be based on your input collection frequency. -# # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection -# # granularity = "seconds" -# -# # optionally set a TTL to automatically expire documents from the measurement collections. -# # ttl = "360h" - - -# # Configuration for MQTT server to send metrics to -# [[outputs.mqtt]] -# ## MQTT Brokers -# ## The list of brokers should only include the hostname or IP address and the -# ## port to the broker. This should follow the format `[{scheme}://]{host}:{port}`. For -# ## example, `localhost:1883` or `mqtt://localhost:1883`. -# ## Scheme can be any of the following: tcp://, mqtt://, tls://, mqtts:// -# ## non-TLS and TLS servers can not be mix-and-matched. -# servers = ["localhost:1883", ] # or ["mqtts://tls.example.com:1883"] -# -# ## Protocol can be `3.1.1` or `5`. Default is `3.1.1` -# # protocol = "3.1.1" -# -# ## MQTT Topic for Producer Messages -# ## MQTT outputs send metrics to this topic format: -# ## {{ .TopicPrefix }}/{{ .Hostname }}/{{ .PluginName }}/{{ .Tag "tag_key" }} -# ## (e.g. prefix/web01.example.com/mem/some_tag_value) -# ## Each path segment accepts either a template placeholder, an environment variable, or a tag key -# ## of the form `{{.Tag "tag_key_name"}}`. Empty path elements as well as special MQTT characters -# ## (such as `+` or `#`) are invalid to form the topic name and will lead to an error. -# ## In case a tag is missing in the metric, that path segment omitted for the final topic. -# topic = "telegraf/{{ .Hostname }}/{{ .PluginName }}" -# -# ## QoS policy for messages -# ## The mqtt QoS policy for sending messages. -# ## See https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm -# ## 0 = at most once -# ## 1 = at least once -# ## 2 = exactly once -# # qos = 2 -# -# ## Keep Alive -# ## Defines the maximum length of time that the broker and client may not -# ## communicate. Defaults to 0 which turns the feature off. -# ## -# ## For version v2.0.12 and later mosquitto there is a bug -# ## (see https://github.com/eclipse/mosquitto/issues/2117), which requires -# ## this to be non-zero. As a reference eclipse/paho.mqtt.golang defaults to 30. -# # keep_alive = 0 -# -# ## username and password to connect MQTT server. -# # username = "telegraf" -# # password = "metricsmetricsmetricsmetrics" -# -# ## client ID -# ## The unique client id to connect MQTT server. If this parameter is not set -# ## then a random ID is generated. -# # client_id = "" -# -# ## Timeout for write operations. default: 5s -# # timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, -# ## metrics are written one metric per MQTT message. -# ## DEPRECATED: Use layout option instead -# # batch = false -# -# ## When true, metric will have RETAIN flag set, making broker cache entries until someone -# ## actually reads it -# # retain = false -# -# ## Layout of the topics published. -# ## The following choices are available: -# ## non-batch -- send individual messages, one for each metric -# ## batch -- send all metric as a single message per MQTT topic -# ## NOTE: The following options will ignore the 'data_format' option and send single values -# ## field -- send individual messages for each field, appending its name to the metric topic -# ## homie-v4 -- send metrics with fields and tags according to the 4.0.0 specs -# ## see https://homieiot.github.io/specification/ -# # layout = "non-batch" -# -# ## HOMIE specific settings -# ## The following options provide templates for setting the device name -# ## and the node-ID for the topics. Both options are MANDATORY and can contain -# ## {{ .PluginName }} (metric name), {{ .Tag "key"}} (tag reference to 'key') -# ## or constant strings. The templays MAY NOT contain slashes! -# # homie_device_name = "" -# # homie_node_id = "" -# -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# data_format = "influx" -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## Optional MQTT 5 publish properties -# ## These setting only apply if the "protocol" property is set to 5. This must -# ## be defined at the end of the plugin settings, otherwise TOML will assume -# ## anything else is part of this table. For more details on publish properties -# ## see the spec: -# ## https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901109 -# # [outputs.mqtt.v5] -# # content_type = "" -# # response_topic = "" -# # message_expiry = "0s" -# # topic_alias = 0 -# # [outputs.mqtt.v5.user_properties] -# # "key1" = "value 1" -# # "key2" = "value 2" - - -# # Send telegraf measurements to NATS -# [[outputs.nats]] -# ## URLs of NATS servers -# servers = ["nats://localhost:4222"] -# -# ## Optional client name -# # name = "" -# -# ## Optional credentials -# # username = "" -# # password = "" -# -# ## Optional NATS 2.0 and NATS NGS compatible user credentials -# # credentials = "/etc/telegraf/nats.creds" -# -# ## NATS subject for producer messages -# ## For jetstream this is also the subject where messages will be published -# subject = "telegraf" -# -# ## Use Transport Layer Security -# # secure = false -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Data format to output. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# data_format = "influx" -# -# ## Jetstream specific configuration. If not nil, it will assume Jetstream context. -# ## Since this is a table, it should be present at the end of the plugin section. Else you can use inline table format. -# # [outputs.nats.jetstream] -# ## Name of the stream, required when using jetstream. Telegraf will -# ## use the union of the above subject and below the subjects array. -# # name = "" -# # subjects = [] -# -# ## Full jetstream create stream config, refer: https://docs.nats.io/nats-concepts/jetstream/streams -# # retention = "limits" -# # max_consumers = -1 -# # max_msgs_per_subject = -1 -# # max_msgs = -1 -# # max_bytes = -1 -# # max_age = 0 -# # max_msg_size = -1 -# # storage = "file" -# # discard = "old" -# # num_replicas = 1 -# # duplicate_window = 120000000000 -# # sealed = false -# # deny_delete = false -# # deny_purge = false -# # allow_rollup_hdrs = false -# # allow_direct = true -# # mirror_direct = false - - -# # Send aggregated metrics to Nebius.Cloud Monitoring -# [[outputs.nebius_cloud_monitoring]] -# ## Timeout for HTTP writes. -# # timeout = "20s" -# -# ## Nebius.Cloud monitoring API endpoint. Normally should not be changed - - -# # Send metrics to New Relic metrics endpoint -# [[outputs.newrelic]] -# ## The 'insights_key' parameter requires a NR license key. -# ## New Relic recommends you create one -# ## with a convenient name such as TELEGRAF_INSERT_KEY. -# ## reference: https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#ingest-license-key -# # insights_key = "New Relic License Key Here" -# -# ## Prefix to add to add to metric name for easy identification. -# ## This is very useful if your metric names are ambiguous. -# # metric_prefix = "" -# -# ## Timeout for writes to the New Relic API. -# # timeout = "15s" -# -# ## HTTP Proxy override. If unset use values from the standard -# ## proxy environment variables to determine proxy, if any. -# # http_proxy = "http://corporate.proxy:3128" -# -# ## Metric URL override to enable geographic location endpoints. -# # If not set use values from the standard -# # metric_url = "https://metric-api.newrelic.com/metric/v1" - - -# # Send telegraf measurements to NSQD -# [[outputs.nsq]] -# ## Location of nsqd instance listening on TCP -# server = "localhost:4150" -# ## NSQ topic for producer messages -# topic = "telegraf" -# -# ## Data format to output. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# data_format = "influx" - - -# # Configuration for OpenSearch to send metrics to. -# [[outputs.opensearch]] -# ## URLs -# ## The full HTTP endpoint URL for your OpenSearch instance. Multiple URLs can -# ## be specified as part of the same cluster, but only one URLs is used to -# ## write during each interval. -# urls = ["http://node1.os.example.com:9200"] -# -# ## Index Name -# ## Target index name for metrics (OpenSearch will create if it not exists). -# ## This is a Golang template (see https://pkg.go.dev/text/template) -# ## You can also specify -# ## metric name (`{{.Name}}`), tag value (`{{.Tag "tag_name"}}`), field value (`{{.Field "field_name"}}`) -# ## If the tag does not exist, the default tag value will be empty string "". -# ## the timestamp (`{{.Time.Format "xxxxxxxxx"}}`). -# ## For example: "telegraf-{{.Time.Format \"2006-01-02\"}}-{{.Tag \"host\"}}" would set it to telegraf-2023-07-27-HostName -# index_name = "" -# -# ## Timeout -# ## OpenSearch client timeout -# # timeout = "5s" -# -# ## Sniffer -# ## Set to true to ask OpenSearch a list of all cluster nodes, -# ## thus it is not necessary to list all nodes in the urls config option -# # enable_sniffer = false -# -# ## GZIP Compression -# ## Set to true to enable gzip compression -# # enable_gzip = false -# -# ## Health Check Interval -# ## Set the interval to check if the OpenSearch nodes are available -# ## Setting to "0s" will disable the health check (not recommended in production) -# # health_check_interval = "10s" -# -# ## Set the timeout for periodic health checks. -# # health_check_timeout = "1s" -# ## HTTP basic authentication details. -# # username = "" -# # password = "" -# ## HTTP bearer token authentication details -# # auth_bearer_token = "" -# -# ## Optional TLS Config -# ## Set to true/false to enforce TLS being enabled/disabled. If not set, -# ## enable TLS only if any of the other options are specified. -# # tls_enable = -# ## Trusted root certificates for server -# # tls_ca = "/path/to/cafile" -# ## Used for TLS client certificate authentication -# # tls_cert = "/path/to/certfile" -# ## Used for TLS client certificate authentication -# # tls_key = "/path/to/keyfile" -# ## Send the specified TLS server name via SNI -# # tls_server_name = "kubernetes.example.com" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Template Config -# ## Manage templates -# ## Set to true if you want telegraf to manage its index template. -# ## If enabled it will create a recommended index template for telegraf indexes -# # manage_template = true -# -# ## Template Name -# ## The template name used for telegraf indexes -# # template_name = "telegraf" -# -# ## Overwrite Templates -# ## Set to true if you want telegraf to overwrite an existing template -# # overwrite_template = false -# -# ## Document ID -# ## If set to true a unique ID hash will be sent as -# ## sha256(concat(timestamp,measurement,series-hash)) string. It will enable -# ## data resend and update metric points avoiding duplicated metrics with -# ## different id's -# # force_document_id = false -# -# ## Value Handling -# ## Specifies the handling of NaN and Inf values. -# ## This option can have the following values: -# ## none -- do not modify field-values (default); will produce an error -# ## if NaNs or infs are encountered -# ## drop -- drop fields containing NaNs or infs -# ## replace -- replace with the value in "float_replacement_value" (default: 0.0) -# ## NaNs and inf will be replaced with the given number, -inf with the negative of that number -# # float_handling = "none" -# # float_replacement_value = 0.0 -# -# ## Pipeline Config -# ## To use a ingest pipeline, set this to the name of the pipeline you want to use. -# # use_pipeline = "my_pipeline" -# -# ## Pipeline Name -# ## Additionally, you can specify a tag name using the notation (`{{.Tag "tag_name"}}`) -# ## which will be used as the pipeline name (e.g. "{{.Tag "os_pipeline"}}"). -# ## If the tag does not exist, the default pipeline will be used as the pipeline. -# ## If no default pipeline is set, no pipeline is used for the metric. -# # default_pipeline = "" - - -# # Send OpenTelemetry metrics over gRPC -# [[outputs.opentelemetry]] -# ## Override the default (localhost:4317) OpenTelemetry gRPC service -# ## address:port -# # service_address = "localhost:4317" -# -# ## Override the default (5s) request timeout -# # timeout = "5s" -# -# ## Optional TLS Config. -# ## -# ## Root certificates for verifying server certificates encoded in PEM format. -# # tls_ca = "/etc/telegraf/ca.pem" -# ## The public and private key pairs for the client encoded in PEM format. -# ## May contain intermediate certificates. -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS, but skip TLS chain and host verification. -# # insecure_skip_verify = false -# ## Send the specified TLS server name via SNI. -# # tls_server_name = "foo.example.com" -# -# ## Override the default (gzip) compression used to send data. -# ## Supports: "gzip", "none" -# # compression = "gzip" -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## Configuration options for the Coralogix dialect -# ## Enable the following section of you use this plugin with a Coralogix endpoint -# # [outputs.opentelemetry.coralogix] -# # ## Your Coralogix private key (required). -# # ## Please note that this is sensitive data! -# # private_key = "your_coralogix_key" -# # -# # ## Application and subsystem names for the metrics (required) -# # application = "$NAMESPACE" -# # subsystem = "$HOSTNAME" -# -# ## Additional OpenTelemetry resource attributes -# # [outputs.opentelemetry.attributes] -# # "service.name" = "demo" -# -# ## Additional gRPC request metadata -# # [outputs.opentelemetry.headers] -# # key1 = "value1" - - -# # Configuration for OpenTSDB server to send metrics to -# [[outputs.opentsdb]] -# ## prefix for metrics keys -# prefix = "my.specific.prefix." -# -# ## DNS name of the OpenTSDB server -# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the -# ## telnet API. "http://opentsdb.example.com" will use the Http API. -# host = "opentsdb.example.com" -# -# ## Port of the OpenTSDB server -# port = 4242 -# -# ## Number of data points to send to OpenTSDB in Http requests. -# ## Not used with telnet API. -# http_batch_size = 50 -# -# ## URI Path for Http requests to OpenTSDB. -# ## Used in cases where OpenTSDB is located behind a reverse proxy. -# http_path = "/api/put" -# -# ## Debug true - Prints OpenTSDB communication -# debug = false -# -# ## Separator separates measurement name from field -# separator = "_" - - -# # Publishes metrics to a postgresql database -# [[outputs.postgresql]] -# ## Specify connection address via the standard libpq connection string: -# ## host=... user=... password=... sslmode=... dbname=... -# ## Or a URL: -# ## postgres://[user[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] -# ## See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING -# ## -# ## All connection parameters are optional. Environment vars are also supported. -# ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE -# ## All supported vars can be found here: -# ## https://www.postgresql.org/docs/current/libpq-envars.html -# ## -# ## Non-standard parameters: -# ## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts. -# ## pool_min_conns (default: 0) - Minimum size of connection pool. -# ## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing. -# ## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing. -# ## pool_health_check_period (default: 0s) - Duration between health checks on idle connections. -# # connection = "" -# -# ## Postgres schema to use. -# # schema = "public" -# -# ## Store tags as foreign keys in the metrics table. Default is false. -# # tags_as_foreign_keys = false -# -# ## Suffix to append to table name (measurement name) for the foreign tag table. -# # tag_table_suffix = "_tag" -# -# ## Deny inserting metrics if the foreign tag can't be inserted. -# # foreign_tag_constraint = false -# -# ## Store all tags as a JSONB object in a single 'tags' column. -# # tags_as_jsonb = false -# -# ## Store all fields as a JSONB object in a single 'fields' column. -# # fields_as_jsonb = false -# -# ## Name of the timestamp column -# ## NOTE: Some tools (e.g. Grafana) require the default name so be careful! -# # timestamp_column_name = "time" -# -# ## Type of the timestamp column -# ## Currently, "timestamp without time zone" and "timestamp with time zone" -# ## are supported -# # timestamp_column_type = "timestamp without time zone" -# -# ## Templated statements to execute when creating a new table. -# # create_templates = [ -# # '''CREATE TABLE {{ .table }} ({{ .columns }})''', -# # ] -# -# ## Templated statements to execute when adding columns to a table. -# ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. Points -# ## containing fields for which there is no column will have the field omitted. -# # add_column_templates = [ -# # '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''', -# # ] -# -# ## Templated statements to execute when creating a new tag table. -# # tag_table_create_templates = [ -# # '''CREATE TABLE {{ .table }} ({{ .columns }}, PRIMARY KEY (tag_id))''', -# # ] -# -# ## Templated statements to execute when adding columns to a tag table. -# ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. -# # tag_table_add_column_templates = [ -# # '''ALTER TABLE {{ .table }} ADD COLUMN IF NOT EXISTS {{ .columns|join ", ADD COLUMN IF NOT EXISTS " }}''', -# # ] -# -# ## The postgres data type to use for storing unsigned 64-bit integer values (Postgres does not have a native -# ## unsigned 64-bit integer type). -# ## The value can be one of: -# ## numeric - Uses the PostgreSQL "numeric" data type. -# ## uint8 - Requires pguint extension (https://github.com/petere/pguint) -# # uint64_type = "numeric" -# -# ## When using pool_max_conns>1, and a temporary error occurs, the query is retried with an incremental backoff. This -# ## controls the maximum backoff duration. -# # retry_max_backoff = "15s" -# -# ## Approximate number of tag IDs to store in in-memory cache (when using tags_as_foreign_keys). -# ## This is an optimization to skip inserting known tag IDs. -# ## Each entry consumes approximately 34 bytes of memory. -# # tag_cache_size = 100000 -# -# ## Enable & set the log level for the Postgres driver. -# # log_level = "warn" # trace, debug, info, warn, error, none - - -# # Configuration for the Prometheus client to spawn -# [[outputs.prometheus_client]] -# ## Address to listen on. -# ## ex: -# ## listen = ":9273" -# ## listen = "vsock://:9273" -# listen = ":9273" -# -# ## Maximum duration before timing out read of the request -# # read_timeout = "10s" -# ## Maximum duration before timing out write of the response -# # write_timeout = "10s" -# -# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. -# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. -# ## Valid options: 1, 2 -# # metric_version = 1 -# -# ## Use HTTP Basic Authentication. -# # basic_username = "Foo" -# # basic_password = "Bar" -# -# ## If set, the IP Ranges which are allowed to access metrics. -# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] -# # ip_range = [] -# -# ## Path to publish the metrics on. -# # path = "/metrics" -# -# ## Expiration interval for each metric. 0 == no expiration -# # expiration_interval = "60s" -# -# ## Collectors to enable, valid entries are "gocollector" and "process". -# ## If unset, both are enabled. -# # collectors_exclude = ["gocollector", "process"] -# -# ## Send string metrics as Prometheus labels. -# ## Unless set to false all string metrics will be sent as labels. -# # string_as_label = true -# -# ## If set, enable TLS with the given certificate. -# # tls_cert = "/etc/ssl/telegraf.crt" -# # tls_key = "/etc/ssl/telegraf.key" -# -# ## Set one or more allowed client CA certificate file names to -# ## enable mutually authenticated TLS connections -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Export metric collection time. -# # export_timestamp = false -# -# ## Specify the metric type explicitly. -# ## This overrides the metric-type of the Telegraf metric. Globbing is allowed. -# # [outputs.prometheus_client.metric_types] -# # counter = [] -# # gauge = [] - - -# # Publishes metrics to a redis timeseries server -# [[outputs.redistimeseries]] -# ## The address of the RedisTimeSeries server. -# address = "127.0.0.1:6379" -# -# ## Redis ACL credentials -# # username = "" -# # password = "" -# # database = 0 -# -# ## Timeout for operations such as ping or sending metrics -# # timeout = "10s" -# -# ## Enable attempt to convert string fields to numeric values -# ## If "false" or in case the string value cannot be converted the string -# ## field will be dropped. -# # convert_string_fields = true -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# # insecure_skip_verify = false - - -# # Configuration for Riemann to send metrics to -# [[outputs.riemann]] -# ## The full TCP or UDP URL of the Riemann server -# url = "tcp://localhost:5555" -# -# ## Riemann event TTL, floating-point time in seconds. -# ## Defines how long that an event is considered valid for in Riemann -# # ttl = 30.0 -# -# ## Separator to use between measurement and field name in Riemann service name -# ## This does not have any effect if 'measurement_as_attribute' is set to 'true' -# separator = "/" -# -# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name -# # measurement_as_attribute = false -# -# ## Send string metrics as Riemann event states. -# ## Unless enabled all string metrics will be ignored -# # string_as_state = false -# -# ## A list of tag keys whose values get sent as Riemann tags. -# ## If empty, all Telegraf tag values will be sent as tags -# # tag_keys = ["telegraf","custom_tag"] -# -# ## Additional Riemann tags to send. -# # tags = ["telegraf-output"] -# -# ## Description for Riemann event -# # description_text = "metrics collected from telegraf" -# -# ## Riemann client write timeout, defaults to "5s" if not set. -# # timeout = "5s" - - -# # Send aggregate metrics to Sensu Monitor -# [[outputs.sensu]] -# ## BACKEND API URL is the Sensu Backend API root URL to send metrics to -# ## (protocol, host, and port only). The output plugin will automatically -# ## append the corresponding backend API path -# ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). -# ## -# ## Backend Events API reference: -# ## https://docs.sensu.io/sensu-go/latest/api/events/ -# ## -# ## AGENT API URL is the Sensu Agent API root URL to send metrics to -# ## (protocol, host, and port only). The output plugin will automatically -# ## append the correspeonding agent API path (/events). -# ## -# ## Agent API Events API reference: -# ## https://docs.sensu.io/sensu-go/latest/api/events/ -# ## -# ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output -# ## plugin will use backend_api_url. If backend_api_url and agent_api_url are -# ## not provided, the output plugin will default to use an agent_api_url of -# ## http://127.0.0.1:3031 -# ## -# # backend_api_url = "http://127.0.0.1:8080" -# # agent_api_url = "http://127.0.0.1:3031" -# -# ## API KEY is the Sensu Backend API token -# ## Generate a new API token via: -# ## -# ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities -# ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf -# ## $ sensuctl user create telegraf --group telegraf --password REDACTED -# ## $ sensuctl api-key grant telegraf -# ## -# ## For more information on Sensu RBAC profiles & API tokens, please visit: -# ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ -# ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ -# ## -# # api_key = "${SENSU_API_KEY}" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Timeout for HTTP message -# # timeout = "5s" -# -# ## HTTP Content-Encoding for write request body, can be set to "gzip" to -# ## compress body or "identity" to apply no encoding. -# # content_encoding = "identity" -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## Sensu Event details -# ## -# ## Below are the event details to be sent to Sensu. The main portions of the -# ## event are the check, entity, and metrics specifications. For more information -# ## on Sensu events and its components, please visit: -# ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events -# ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks -# ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities -# ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics -# ## -# ## Check specification -# ## The check name is the name to give the Sensu check associated with the event -# ## created. This maps to check.metatadata.name in the event. -# [outputs.sensu.check] -# name = "telegraf" -# -# ## Entity specification -# ## Configure the entity name and namespace, if necessary. This will be part of -# ## the entity.metadata in the event. -# ## -# ## NOTE: if the output plugin is configured to send events to a -# ## backend_api_url and entity_name is not set, the value returned by -# ## os.Hostname() will be used; if the output plugin is configured to send -# ## events to an agent_api_url, entity_name and entity_namespace are not used. -# # [outputs.sensu.entity] -# # name = "server-01" -# # namespace = "default" -# -# ## Metrics specification -# ## Configure the tags for the metrics that are sent as part of the Sensu event -# # [outputs.sensu.tags] -# # source = "telegraf" -# -# ## Configure the handler(s) for processing the provided metrics -# # [outputs.sensu.metrics] -# # handlers = ["influxdb","elasticsearch"] - - -# # Send metrics and events to SignalFx -# [[outputs.signalfx]] -# ## SignalFx Org Access Token -# access_token = "my-secret-token" -# -# ## The SignalFx realm that your organization resides in -# signalfx_realm = "us9" # Required if ingest_url is not set -# -# ## You can optionally provide a custom ingest url instead of the -# ## signalfx_realm option above if you are using a gateway or proxy -# ## instance. This option takes precedence over signalfx_realm. -# ingest_url = "https://my-custom-ingest/" -# -# ## Event typed metrics are omitted by default, -# ## If you require an event typed metric you must specify the -# ## metric name in the following list. -# included_event_names = ["plugin.metric_name"] - - -# # Generic socket writer capable of handling multiple socket types. -# [[outputs.socket_writer]] -# ## URL to connect to -# # address = "tcp://127.0.0.1:8094" -# # address = "tcp://example.com:http" -# # address = "tcp4://127.0.0.1:8094" -# # address = "tcp6://127.0.0.1:8094" -# # address = "tcp6://[2001:db8::1]:8094" -# # address = "udp://127.0.0.1:8094" -# # address = "udp4://127.0.0.1:8094" -# # address = "udp6://127.0.0.1:8094" -# # address = "unix:///tmp/telegraf.sock" -# # address = "unixgram:///tmp/telegraf.sock" -# # address = "vsock://cid:port" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Period between keep alive probes. -# ## Only applies to TCP sockets. -# ## 0 disables keep alive probes. -# ## Defaults to the OS configuration. -# # keep_alive_period = "5m" -# -# ## Content encoding for message payloads, can be set to "gzip" or to -# ## "identity" to apply no encoding. -# ## -# # content_encoding = "identity" -# -# ## Data format to generate. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# # data_format = "influx" - - -# # Save metrics to an SQL Database -# [[outputs.sql]] -# ## Database driver -# ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), -# ## sqlite (SQLite3), snowflake (snowflake.com) clickhouse (ClickHouse) -# # driver = "" -# -# ## Data source name -# ## The format of the data source name is different for each database driver. -# ## See the plugin readme for details. -# # data_source_name = "" -# -# ## Timestamp column name -# # timestamp_column = "timestamp" -# -# ## Table creation template -# ## Available template variables: -# ## {TABLE} - table name as a quoted identifier -# ## {TABLELITERAL} - table name as a quoted string literal -# ## {COLUMNS} - column definitions (list of quoted identifiers and types) -# # table_template = "CREATE TABLE {TABLE}({COLUMNS})" -# -# ## Table existence check template -# ## Available template variables: -# ## {TABLE} - tablename as a quoted identifier -# # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" -# -# ## Initialization SQL -# # init_sql = "" -# -# ## Maximum amount of time a connection may be idle. "0s" means connections are -# ## never closed due to idle time. -# # connection_max_idle_time = "0s" -# -# ## Maximum amount of time a connection may be reused. "0s" means connections -# ## are never closed due to age. -# # connection_max_lifetime = "0s" -# -# ## Maximum number of connections in the idle connection pool. 0 means unlimited. -# # connection_max_idle = 2 -# -# ## Maximum number of open connections to the database. 0 means unlimited. -# # connection_max_open = 0 -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## Metric type to SQL type conversion -# ## The values on the left are the data types Telegraf has and the values on -# ## the right are the data types Telegraf will use when sending to a database. -# ## -# ## The database values used must be data types the destination database -# ## understands. It is up to the user to ensure that the selected data type is -# ## available in the database they are using. Refer to your database -# ## documentation for what data types are available and supported. -# #[outputs.sql.convert] -# # integer = "INT" -# # real = "DOUBLE" -# # text = "TEXT" -# # timestamp = "TIMESTAMP" -# # defaultvalue = "TEXT" -# # unsigned = "UNSIGNED" -# # bool = "BOOL" -# # ## This setting controls the behavior of the unsigned value. By default the -# # ## setting will take the integer value and append the unsigned value to it. The other -# # ## option is "literal", which will use the actual value the user provides to -# # ## the unsigned option. This is useful for a database like ClickHouse where -# # ## the unsigned value should use a value like "uint64". -# # # conversion_style = "unsigned_suffix" - - -# # Configuration for Google Cloud Stackdriver to send metrics to -# [[outputs.stackdriver]] -# ## GCP Project -# project = "erudite-bloom-151019" -# -# ## The namespace for the metric descriptor -# ## This is optional and users are encouraged to set the namespace as a -# ## resource label instead. If omitted it is not included in the metric name. -# namespace = "telegraf" -# -# ## Metric Type Prefix -# ## The DNS name used with the metric type as a prefix. -# # metric_type_prefix = "custom.googleapis.com" -# -# ## Metric Name Format -# ## Specifies the layout of the metric name, choose from: -# ## * path: 'metric_type_prefix_namespace_name_key' -# ## * official: 'metric_type_prefix/namespace_name_key/kind' -# # metric_name_format = "path" -# -# ## Metric Data Type -# ## By default, telegraf will use whatever type the metric comes in as. -# ## However, for some use cases, forcing int64, may be preferred for values: -# ## * source: use whatever was passed in -# ## * double: preferred datatype to allow queries by PromQL. -# # metric_data_type = "source" -# -# ## Tags as resource labels -# ## Tags defined in this option, when they exist, are added as a resource -# ## label and not included as a metric label. The values from tags override -# ## the values defined under the resource_labels config options. -# # tags_as_resource_label = [] -# -# ## Custom resource type -# # resource_type = "generic_node" -# -# ## Override metric type by metric name -# ## Metric names matching the values here, globbing supported, will have the -# ## metric type set to the corresponding type. -# # metric_counter = [] -# # metric_gauge = [] -# # metric_histogram = [] -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## Additional resource labels -# # [outputs.stackdriver.resource_labels] -# # node_id = "$HOSTNAME" -# # namespace = "myapp" -# # location = "eu-north0" - - -# # Configuration for active mq with stomp protocol to send metrics to -# [[outputs.stomp]] -# host = "localhost:61613" -# -# ## Queue name for producer messages -# queueName = "telegraf" -# -# ## Username and password if required by the Active MQ server. -# # username = "" -# # password = "" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Data format to output. -# data_format = "json" - - -# # A plugin that can send metrics to Sumo Logic HTTP metric collector. -# [[outputs.sumologic]] -# ## Unique URL generated for your HTTP Metrics Source. -# ## This is the address to send metrics to. -# # url = "https://events.sumologic.net/receiver/v1/http/" -# -# ## Data format to be used for sending metrics. -# ## This will set the "Content-Type" header accordingly. -# ## Currently supported formats: -# ## * graphite - for Content-Type of application/vnd.sumologic.graphite -# ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2 -# ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus -# ## -# ## More information can be found at: -# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics -# ## -# ## NOTE: -# ## When unset, telegraf will by default use the influx serializer which is currently unsupported -# ## in HTTP Source. -# data_format = "carbon2" -# -# ## Timeout used for HTTP request -# # timeout = "5s" -# -# ## Max HTTP request body size in bytes before compression (if applied). -# ## By default 1MB is recommended. -# ## NOTE: -# ## Bear in mind that in some serializer a metric even though serialized to multiple -# ## lines cannot be split any further so setting this very low might not work -# ## as expected. -# # max_request_body_size = 1000000 -# -# ## Additional, Sumo specific options. -# ## Full list can be found here: -# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers -# -# ## Desired source name. -# ## Useful if you want to override the source name configured for the source. -# # source_name = "" -# -# ## Desired host name. -# ## Useful if you want to override the source host configured for the source. -# # source_host = "" -# -# ## Desired source category. -# ## Useful if you want to override the source category configured for the source. -# # source_category = "" -# -# ## Comma-separated key=value list of dimensions to apply to every metric. -# ## Custom dimensions will allow you to query your metrics at a more granular level. -# # dimensions = "" - - -# # Configuration for Syslog server to send metrics to -# [[outputs.syslog]] -# ## URL to connect to -# ## ex: address = "tcp://127.0.0.1:8094" -# ## ex: address = "tcp4://127.0.0.1:8094" -# ## ex: address = "tcp6://127.0.0.1:8094" -# ## ex: address = "tcp6://[2001:db8::1]:8094" -# ## ex: address = "udp://127.0.0.1:8094" -# ## ex: address = "udp4://127.0.0.1:8094" -# ## ex: address = "udp6://127.0.0.1:8094" -# address = "tcp://127.0.0.1:8094" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Period between keep alive probes. -# ## Only applies to TCP sockets. -# ## 0 disables keep alive probes. -# ## Defaults to the OS configuration. -# # keep_alive_period = "5m" -# -# ## The framing technique with which it is expected that messages are -# ## transported (default = "octet-counting"). Whether the messages come -# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), -# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must -# ## be one of "octet-counting", "non-transparent". -# # framing = "octet-counting" -# -# ## The trailer to be expected in case of non-transparent framing (default = "LF"). -# ## Must be one of "LF", or "NUL". -# # trailer = "LF" -# -# ## SD-PARAMs settings -# ## Syslog messages can contain key/value pairs within zero or more -# ## structured data sections. For each unrecognized metric tag/field a -# ## SD-PARAMS is created. -# ## -# ## Example: -# ## [[outputs.syslog]] -# ## sdparam_separator = "_" -# ## default_sdid = "default@32473" -# ## sdids = ["foo@123", "bar@456"] -# ## -# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 -# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] -# -# ## SD-PARAMs separator between the sdid and tag/field key (default = "_") -# # sdparam_separator = "_" -# -# ## Default sdid used for tags/fields that don't contain a prefix defined in -# ## the explicit sdids setting below If no default is specified, no SD-PARAMs -# ## will be used for unrecognized field. -# # default_sdid = "default@32473" -# -# ## List of explicit prefixes to extract from tag/field keys and use as the -# ## SDID, if they match (see above example for more details): -# # sdids = ["foo@123", "bar@456"] -# -# ## Default severity value. Severity and Facility are used to calculate the -# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field -# ## with key "severity_code" is defined. If unset, 5 (notice) is the default -# # default_severity_code = 5 -# -# ## Default facility value. Facility and Severity are used to calculate the -# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with -# ## key "facility_code" is defined. If unset, 1 (user-level) is the default -# # default_facility_code = 1 -# -# ## Default APP-NAME value (RFC5424#section-6.2.5) -# ## Used when no metric tag with key "appname" is defined. -# ## If unset, "Telegraf" is the default -# # default_appname = "Telegraf" - - -# # Configuration for sending metrics to Amazon Timestream. -# [[outputs.timestream]] -# ## Amazon Region -# region = "us-east-1" -# -# ## Amazon Credentials -# ## Credentials are loaded in the following order: -# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified -# ## 2) Assumed credentials via STS if role_arn is specified -# ## 3) explicit credentials from 'access_key' and 'secret_key' -# ## 4) shared profile from 'profile' -# ## 5) environment variables -# ## 6) shared credentials file -# ## 7) EC2 Instance Profile -# #access_key = "" -# #secret_key = "" -# #token = "" -# #role_arn = "" -# #web_identity_token_file = "" -# #role_session_name = "" -# #profile = "" -# #shared_credential_file = "" -# -# ## Endpoint to make request against, the correct endpoint is automatically -# ## determined and this option should only be set if you wish to override the -# ## default. -# ## ex: endpoint_url = "http://localhost:8000" -# # endpoint_url = "" -# -# ## Timestream database where the metrics will be inserted. -# ## The database must exist prior to starting Telegraf. -# database_name = "yourDatabaseNameHere" -# -# ## Specifies if the plugin should describe the Timestream database upon starting -# ## to validate if it has access necessary permissions, connection, etc., as a safety check. -# ## If the describe operation fails, the plugin will not start -# ## and therefore the Telegraf agent will not start. -# describe_database_on_start = false -# -# ## Specifies how the data is organized in Timestream. -# ## Valid values are: single-table, multi-table. -# ## When mapping_mode is set to single-table, all of the data is stored in a single table. -# ## When mapping_mode is set to multi-table, the data is organized and stored in multiple tables. -# ## The default is multi-table. -# mapping_mode = "multi-table" -# -# ## Specifies if the plugin should create the table, if the table does not exist. -# create_table_if_not_exists = true -# -# ## Specifies the Timestream table magnetic store retention period in days. -# ## Check Timestream documentation for more details. -# ## NOTE: This property is valid when create_table_if_not_exists = true. -# create_table_magnetic_store_retention_period_in_days = 365 -# -# ## Specifies the Timestream table memory store retention period in hours. -# ## Check Timestream documentation for more details. -# ## NOTE: This property is valid when create_table_if_not_exists = true. -# create_table_memory_store_retention_period_in_hours = 24 -# -# ## Specifies how the data is written into Timestream. -# ## Valid values are: true, false -# ## When use_multi_measure_records is set to true, all of the tags and fields are stored -# ## as a single row in a Timestream table. -# ## When use_multi_measure_record is set to false, Timestream stores each field in a -# ## separate table row, thereby storing the tags multiple times (once for each field). -# ## The recommended setting is true. -# ## The default is false. -# use_multi_measure_records = "false" -# -# ## Specifies the measure_name to use when sending multi-measure records. -# ## NOTE: This property is valid when use_multi_measure_records=true and mapping_mode=multi-table -# measure_name_for_multi_measure_records = "telegraf_measure" -# -# ## Specifies the name of the table to write data into -# ## NOTE: This property is valid when mapping_mode=single-table. -# # single_table_name = "" -# -# ## Specifies the name of dimension when all of the data is being stored in a single table -# ## and the measurement name is transformed into the dimension value -# ## (see Mapping data from Influx to Timestream for details) -# ## NOTE: This property is valid when mapping_mode=single-table. -# # single_table_dimension_name_for_telegraf_measurement_name = "namespace" -# -# ## Only valid and optional if create_table_if_not_exists = true -# ## Specifies the Timestream table tags. -# ## Check Timestream documentation for more details -# # create_table_tags = { "foo" = "bar", "environment" = "dev"} -# -# ## Specify the maximum number of parallel go routines to ingest/write data -# ## If not specified, defaulted to 1 go routines -# max_write_go_routines = 25 -# -# ## Please see README.md to know how line protocol data is mapped to Timestream -# ## - - -# # Write metrics to Warp 10 -# [[outputs.warp10]] -# # Prefix to add to the measurement. -# prefix = "telegraf." -# -# # URL of the Warp 10 server -# warp_url = "http://localhost:8080" -# -# # Write token to access your app on warp 10 -# token = "Token" -# -# # Warp 10 query timeout -# # timeout = "15s" -# -# ## Print Warp 10 error body -# # print_error_body = false -# -# ## Max string error size -# # max_string_error_size = 511 -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# [[outputs.wavefront]] -# ## Url for Wavefront API or Wavefront proxy instance. -# ## Direct Ingestion via Wavefront API requires authentication. See below. -# url = "https://metrics.wavefront.com" -# -# ## Maximum number of metrics to send per HTTP request. This value should be higher than the `metric_batch_size`. Default is 10,000. Values higher than 40,000 are not recommended. -# # http_maximum_batch_size = 10000 -# -# ## prefix for metrics keys -# # prefix = "my.specific.prefix." -# -# ## whether to use "value" for name of simple fields. default is false -# # simple_fields = false -# -# ## character to use between metric and field name. default is . (dot) -# # metric_separator = "." -# -# ## Convert metric name paths to use metricSeparator character -# ## When true will convert all _ (underscore) characters in final metric name. default is true -# # convert_paths = true -# -# ## Use Strict rules to sanitize metric and tag names from invalid characters -# ## When enabled forward slash (/) and comma (,) will be accepted -# # use_strict = false -# -# ## Use Regex to sanitize metric and tag names from invalid characters -# ## Regex is more thorough, but significantly slower. default is false -# # use_regex = false -# -# ## point tags to use as the source name for Wavefront (if none found, host will be used) -# # source_override = ["hostname", "address", "agent_host", "node_host"] -# -# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true -# # convert_bool = true -# -# ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any -# ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. -# # truncate_tags = false -# -# ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics -# ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending -# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in -# ## Telegraf. -# # immediate_flush = true -# -# ## Send internal metrics (starting with `~sdk.go`) for valid, invalid, and dropped metrics. default is true. -# # send_internal_metrics = true -# -# ## Optional TLS Config -# ## Set to true/false to enforce TLS being enabled/disabled. If not set, -# ## enable TLS only if any of the other options are specified. -# # tls_enable = -# ## Trusted root certificates for server -# # tls_ca = "/path/to/cafile" -# ## Used for TLS client certificate authentication -# # tls_cert = "/path/to/certfile" -# ## Used for TLS client certificate authentication -# # tls_key = "/path/to/keyfile" -# ## Send the specified TLS server name via SNI -# # tls_server_name = "kubernetes.example.com" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## HTTP Timeout -# # timeout="10s" -# -# ## MaxIdleConns controls the maximum number of idle (keep-alive) -# ## connections across all hosts. Zero means no limit. -# # max_idle_conn = 0 -# -# ## MaxIdleConnsPerHost, if non-zero, controls the maximum idle -# ## (keep-alive) connections to keep per-host. If zero, -# ## DefaultMaxIdleConnsPerHost is used(2). -# # max_idle_conn_per_host = 2 -# -# ## Idle (keep-alive) connection timeout. -# ## Maximum amount of time before idle connection is closed. -# ## Zero means no limit. -# # idle_conn_timeout = 0 -# -# ## Authentication for Direct Ingestion. -# ## Direct Ingestion requires one of: `token`,`auth_csp_api_token`, or `auth_csp_client_credentials` -# ## See https://docs.wavefront.com/csp_getting_started.html to learn more about using CSP credentials with Wavefront. -# ## Not required if using a Wavefront proxy. -# -# ## Wavefront API Token Authentication. Ignored if using a Wavefront proxy. -# ## 1. Click the gear icon at the top right in the Wavefront UI. -# ## 2. Click your account name (usually your email) -# ## 3. Click *API access*. -# # token = "YOUR_TOKEN" -# -# ## Optional. defaults to "https://console.cloud.vmware.com/" -# ## Ignored if using a Wavefront proxy or a Wavefront API token. -# # auth_csp_base_url=https://console.cloud.vmware.com -# -# ## CSP API Token Authentication for Wavefront. Ignored if using a Wavefront proxy. -# # auth_csp_api_token=CSP_API_TOKEN_HERE -# -# ## CSP Client Credentials Authentication Information for Wavefront. Ignored if using a Wavefront proxy. -# ## See also: https://docs.wavefront.com/csp_getting_started.html#whats-a-server-to-server-app -# # [outputs.wavefront.auth_csp_client_credentials] -# # app_id=CSP_APP_ID_HERE -# # app_secret=CSP_APP_SECRET_HERE -# # org_id=CSP_ORG_ID_HERE - - -# # A plugin that can transmit metrics over WebSocket. -# [[outputs.websocket]] -# ## URL is the address to send metrics to. Make sure ws or wss scheme is used. -# url = "ws://127.0.0.1:3000/telegraf" -# -# ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). -# # connect_timeout = "30s" -# # write_timeout = "30s" -# # read_timeout = "30s" -# -# ## Optionally turn on using text data frames (binary by default). -# # use_text_frames = false -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Optional SOCKS5 proxy to use -# # socks5_enabled = true -# # socks5_address = "127.0.0.1:1080" -# # socks5_username = "alice" -# # socks5_password = "pass123" -# -# ## Optional HTTP proxy to use -# # use_system_proxy = false -# # http_proxy_url = "http://localhost:8888" -# -# ## Data format to output. -# ## Each data format has it's own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -# # data_format = "influx" -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## Additional HTTP Upgrade headers -# # [outputs.websocket.headers] -# # Authorization = "Bearer " - - -# # Send aggregated metrics to Yandex.Cloud Monitoring -# [[outputs.yandex_cloud_monitoring]] -# ## Timeout for HTTP writes. -# # timeout = "20s" -# -# ## Yandex.Cloud monitoring API endpoint. Normally should not be changed -# # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" -# -# ## All user metrics should be sent with "custom" service specified. Normally should not be changed -# # service = "custom" - - -# # Send metrics to Zabbix -# [[outputs.zabbix]] -# ## Address and (optional) port of the Zabbix server -# address = "zabbix.example.com:10051" -# -# ## Send metrics as type "Zabbix agent (active)" -# # agent_active = false -# -# ## Add prefix to all keys sent to Zabbix. -# # key_prefix = "telegraf." -# -# ## Name of the tag that contains the host name. Used to set the host in Zabbix. -# ## If the tag is not found, use the hostname of the system running Telegraf. -# # host_tag = "host" -# -# ## Skip measurement prefix to all keys sent to Zabbix. -# # skip_measurement_prefix = false -# -# ## This field will be sent as HostMetadata to Zabbix Server to autoregister the host. -# ## To enable this feature, this option must be set to a value other than "". -# # autoregister = "" -# -# ## Interval to resend auto-registration data to Zabbix. -# ## Only applies if autoregister feature is enabled. -# ## This value is a lower limit, the actual resend should be triggered by the next flush interval. -# # autoregister_resend_interval = "30m" -# -# ## Interval to send LLD data to Zabbix. -# ## This value is a lower limit, the actual resend should be triggered by the next flush interval. -# # lld_send_interval = "10m" -# -# ## Interval to delete stored LLD known data and start capturing it again. -# ## This value is a lower limit, the actual resend should be triggered by the next flush interval. -# # lld_clear_interval = "1h" - - -############################################################################### -# PROCESSOR PLUGINS # -############################################################################### - - -# # Attach AWS EC2 metadata to metrics -# [[processors.aws_ec2]] -# ## Instance identity document tags to attach to metrics. -# ## For more information see: -# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html -# ## -# ## Available tags: -# ## * accountId -# ## * architecture -# ## * availabilityZone -# ## * billingProducts -# ## * imageId -# ## * instanceId -# ## * instanceType -# ## * kernelId -# ## * pendingTime -# ## * privateIp -# ## * ramdiskId -# ## * region -# ## * version -# imds_tags = [] -# -# ## EC2 instance tags retrieved with DescribeTags action. -# ## In case tag is empty upon retrieval it's omitted when tagging metrics. -# ## Note that in order for this to work, role attached to EC2 instance or AWS -# ## credentials available from the environment must have a policy attached, that -# ## allows ec2:DescribeTags. -# ## -# ## For more information see: -# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html -# ec2_tags = [] -# -# ## Timeout for http requests made by against aws ec2 metadata endpoint. -# timeout = "10s" -# -# ## ordered controls whether or not the metrics need to stay in the same order -# ## this plugin received them in. If false, this plugin will change the order -# ## with requests hitting cached results moving through immediately and not -# ## waiting on slower lookups. This may cause issues for you if you are -# ## depending on the order of metrics staying the same. If so, set this to true. -# ## Keeping the metrics ordered may be slightly slower. -# ordered = false -# -# ## max_parallel_calls is the maximum number of AWS API calls to be in flight -# ## at the same time. -# ## It's probably best to keep this number fairly low. -# max_parallel_calls = 10 -# -# ## cache_ttl determines how long each cached item will remain in the cache before -# ## it is removed and subsequently needs to be queried for from the AWS API. By -# ## default, no items are cached. -# # cache_ttl = "0s" -# -# ## tag_cache_size determines how many of the values which are found in imds_tags -# ## or ec2_tags will be kept in memory for faster lookup on successive processing -# ## of metrics. You may want to adjust this if you have excessively large numbers -# ## of tags on your EC2 instances, and you are using the ec2_tags field. This -# ## typically does not need to be changed when using the imds_tags field. -# # tag_cache_size = 1000 -# -# ## log_cache_stats will emit a log line periodically to stdout with details of -# ## cache entries, hits, misses, and evacuations since the last time stats were -# ## emitted. This can be helpful in determining whether caching is being effective -# ## in your environment. Stats are emitted every 30 seconds. By default, this -# ## setting is disabled. - - -# # Apply metric modifications using override semantics. -# [[processors.clone]] -# ## All modifications on inputs and aggregators can be overridden: -# # name_override = "new_name" -# # name_prefix = "new_name_prefix" -# # name_suffix = "new_name_suffix" -# -# ## Tags to be added (all values must be strings) -# # [processors.clone.tags] -# # additional_tag = "tag_value" - - -# # Convert values to another metric value type -# [[processors.converter]] -# ## Tags to convert -# ## -# ## The table key determines the target type, and the array of key-values -# ## select the keys to convert. The array may contain globs. -# ## = [...] -# [processors.converter.tags] -# measurement = [] -# string = [] -# integer = [] -# unsigned = [] -# boolean = [] -# float = [] -# -# ## Optional tag to use as metric timestamp -# # timestamp = [] -# -# ## Format of the timestamp determined by the tag above. This can be any of -# ## "unix", "unix_ms", "unix_us", "unix_ns", or a valid Golang time format. -# ## It is required, when using the timestamp option. -# # timestamp_format = "" -# -# ## Fields to convert -# ## -# ## The table key determines the target type, and the array of key-values -# ## select the keys to convert. The array may contain globs. -# ## = [...] -# [processors.converter.fields] -# measurement = [] -# tag = [] -# string = [] -# integer = [] -# unsigned = [] -# boolean = [] -# float = [] -# -# ## Optional field to use as metric timestamp -# # timestamp = [] -# -# ## Format of the timestamp determined by the field above. This can be any -# ## of "unix", "unix_ms", "unix_us", "unix_ns", or a valid Golang time -# ## format. It is required, when using the timestamp option. -# # timestamp_format = "" - - -# # Dates measurements, tags, and fields that pass through this filter. -# [[processors.date]] -# ## New tag to create -# tag_key = "month" -# -# ## New field to create (cannot set both field_key and tag_key) -# # field_key = "month" -# -# ## Date format string, must be a representation of the Go "reference time" -# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". -# date_format = "Jan" -# -# ## If destination is a field, date format can also be one of -# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. -# # date_format = "unix" -# -# ## Offset duration added to the date string when writing the new tag. -# # date_offset = "0s" -# -# ## Timezone to use when creating the tag or field using a reference time -# ## string. This can be set to one of "UTC", "Local", or to a location name -# ## in the IANA Time Zone database. -# ## example: timezone = "America/Los_Angeles" -# # timezone = "UTC" - - -# # Filter metrics with repeating field values -# [[processors.dedup]] -# ## Maximum time to suppress output -# dedup_interval = "600s" - - -# ## Set default fields on your metric(s) when they are nil or empty -# [[processors.defaults]] -# ## Ensures a set of fields always exists on your metric(s) with their -# ## respective default value. -# ## For any given field pair (key = default), if it's not set, a field -# ## is set on the metric with the specified default. -# ## -# ## A field is considered not set if it is nil on the incoming metric; -# ## or it is not nil but its value is an empty string or is a string -# ## of one or more spaces. -# ## = -# [processors.defaults.fields] -# field_1 = "bar" -# time_idle = 0 -# is_error = true - - -# # Map enum values according to given table. -# [[processors.enum]] -# [[processors.enum.mapping]] -# ## Name of the field to map. Globs accepted. -# field = "status" -# -# ## Name of the tag to map. Globs accepted. -# # tag = "status" -# -# ## Destination tag or field to be used for the mapped value. By default the -# ## source tag or field is used, overwriting the original value. -# dest = "status_code" -# -# ## Default value to be used for all values not contained in the mapping -# ## table. When unset and no match is found, the original field will remain -# ## unmodified and the destination tag or field will not be created. -# # default = 0 -# -# ## Table of mappings -# [processors.enum.mapping.value_mappings] -# green = 1 -# amber = 2 -# red = 3 - - -# # Run executable as long-running processor plugin -# [[processors.execd]] -# ## One program to run as daemon. -# ## NOTE: process and each argument should each be their own string -# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] -# command = ["cat"] -# -# ## Environment variables -# ## Array of "key=value" pairs to pass as environment variables -# ## e.g. "KEY=value", "USERNAME=John Doe", -# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" -# # environment = [] -# -# ## Delay before the process is restarted after an unexpected termination -# # restart_delay = "10s" -# -# ## Serialization format for communicating with the executed program -# ## Please note that the corresponding data-format must exist both in -# ## parsers and serializers -# # data_format = "influx" - - -# # Performs file path manipulations on tags and fields -# [[processors.filepath]] -# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag -# # [[processors.filepath.basename]] -# # tag = "path" -# # dest = "basepath" -# -# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory -# # [[processors.filepath.dirname]] -# # field = "path" -# -# ## Treat the tag value as a path, converting it to its the last element without its suffix -# # [[processors.filepath.stem]] -# # tag = "path" -# -# ## Treat the tag value as a path, converting it to the shortest path name equivalent -# ## to path by purely lexical processing -# # [[processors.filepath.clean]] -# # tag = "path" -# -# ## Treat the tag value as a path, converting it to a relative path that is lexically -# ## equivalent to the source path when joined to 'base_path' -# # [[processors.filepath.rel]] -# # tag = "path" -# # base_path = "/var/log" -# -# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only -# ## effect on Windows -# # [[processors.filepath.toslash]] -# # tag = "path" - - -# # Filter metrics by the given criteria -# [[processors.filter]] -# ## Default action if no rule applies -# # default = "pass" -# -# ## Rules to apply on the incoming metrics (multiple rules are possible) -# ## The rules are evaluated in order and the first matching rule is applied. -# ## In case no rule matches the "default" is applied. -# ## All filter criteria in a rule must apply for the rule to match the metric -# ## i.e. the criteria are combined by a logical AND. If a criterion is -# ## omitted it is NOT applied at all and ignored. -# [[processors.filter.rule]] -# ## List of metric names to match including glob expressions -# # name = [] -# -# ## List of tag key/values pairs to match including glob expressions -# ## ALL given tags keys must exist and at least one value must match -# ## for the metric to match the rule. -# # tags = {} -# -# ## List of field keys to match including glob expressions -# ## At least one field must exist for the metric to match the rule. -# # fields = [] -# -# ## Action to apply for this rule -# ## "pass" will keep the metric and pass it on, while "drop" will remove -# ## the metric -# # action = "drop" - - -# # Add a tag of the network interface name looked up over SNMP by interface number -# [[processors.ifname]] -# ## Name of tag holding the interface number -# # tag = "ifIndex" -# -# ## Name of output tag where service name will be added -# # dest = "ifName" -# -# ## Name of tag of the SNMP agent to request the interface name from -# ## example: agent = "source" -# # agent = "agent" -# -# ## Timeout for each request. -# # timeout = "5s" -# -# ## SNMP version; can be 1, 2, or 3. -# # version = 2 -# -# ## SNMP community string. -# # community = "public" -# -# ## Number of retries to attempt. -# # retries = 3 -# -# ## The GETBULK max-repetitions parameter. -# # max_repetitions = 10 -# -# ## SNMPv3 authentication and encryption options. -# ## -# ## Security Name. -# # sec_name = "myuser" -# ## Authentication protocol; one of "MD5", "SHA", or "". -# # auth_protocol = "MD5" -# ## Authentication password. -# # auth_password = "pass" -# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". -# # sec_level = "authNoPriv" -# ## Context Name. -# # context_name = "" -# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". -# # priv_protocol = "" -# ## Privacy password used for encrypted messages. -# # priv_password = "" -# -# ## max_parallel_lookups is the maximum number of SNMP requests to -# ## make at the same time. -# # max_parallel_lookups = 100 -# -# ## ordered controls whether or not the metrics need to stay in the -# ## same order this plugin received them in. If false, this plugin -# ## may change the order when data is cached. If you need metrics to -# ## stay in order set this to true. keeping the metrics ordered may -# ## be slightly slower -# # ordered = false -# -# ## cache_ttl is the amount of time interface names are cached for a -# ## given agent. After this period elapses if names are needed they -# ## will be retrieved again. -# # cache_ttl = "8h" - - -# # Lookup a key derived from metrics in a static file -# [[processors.lookup]] -# ## List of files containing the lookup-table -# files = ["path/to/lut.json", "path/to/another_lut.json"] -# -# ## Format of the lookup file(s) -# ## Available formats are: -# ## json -- JSON file with 'key: {tag-key: tag-value, ...}' mapping -# ## csv_key_name_value -- CSV file with 'key,tag-key,tag-value,...,tag-key,tag-value' mapping -# ## csv_key_values -- CSV file with a header containing tag-names and -# ## rows with 'key,tag-value,...,tag-value' mappings -# # format = "json" -# -# ## Template for generating the lookup-key from the metric. -# ## This is a Golang template (see https://pkg.go.dev/text/template) to -# ## access the metric name (`{{.Name}}`), a tag value (`{{.Tag "name"}}`) or -# ## a field value (`{{.Field "name"}}`). -# key = '{{.Tag "host"}}' - - -# # Adds noise to numerical fields -# [[processors.noise]] -# ## Specified the type of the random distribution. -# ## Can be "laplacian", "gaussian" or "uniform". -# # type = "laplacian -# -# ## Center of the distribution. -# ## Only used for Laplacian and Gaussian distributions. -# # mu = 0.0 -# -# ## Scale parameter for the Laplacian or Gaussian distribution -# # scale = 1.0 -# -# ## Upper and lower bound of the Uniform distribution -# # min = -1.0 -# # max = 1.0 -# -# ## Apply the noise only to numeric fields matching the filter criteria below. -# ## Excludes takes precedence over includes. -# # include_fields = [] -# # exclude_fields = [] - - -# # Apply metric modifications using override semantics. -# [[processors.override]] -# ## All modifications on inputs and aggregators can be overridden: -# # name_override = "new_name" -# # name_prefix = "new_name_prefix" -# # name_suffix = "new_name_suffix" -# -# ## Tags to be added (all values must be strings) -# # [processors.override.tags] -# # additional_tag = "tag_value" - - -# # Parse a value in a specified field(s)/tag(s) and add the result in a new metric -# [[processors.parser]] -# ## The name of the fields whose value will be parsed. -# parse_fields = ["message"] -# -# ## The name of the tags whose value will be parsed. -# # parse_tags = [] -# -# ## If true, incoming metrics are not emitted. -# # drop_original = false -# -# ## Merge Behavior -# ## Only has effect when drop_original is set to false. Possible options -# ## include: -# ## * override: emitted metrics are merged by overriding the original metric -# ## using the newly parsed metrics, but retains the original metric -# ## timestamp. -# ## * override-with-timestamp: the same as "override", but the timestamp is -# ## set based on the new metrics if present. -# # merge = "" -# -# ## The dataformat to be read from files -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Rotate a single valued metric into a multi field metric -# [[processors.pivot]] -# ## Tag to use for naming the new field. -# tag_key = "name" -# ## Field to use as the value of the new field. -# value_key = "value" - - -# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file -# [[processors.port_name]] -# ## Name of tag holding the port number -# # tag = "port" -# ## Or name of the field holding the port number -# # field = "port" -# -# ## Name of output tag or field (depending on the source) where service name will be added -# # dest = "service" -# -# ## Default tcp or udp -# # default_protocol = "tcp" -# -# ## Tag containing the protocol (tcp or udp, case-insensitive) -# # protocol_tag = "proto" -# -# ## Field containing the protocol (tcp or udp, case-insensitive) -# # protocol_field = "proto" - - -# # Print all metrics that pass through this filter. -# [[processors.printer]] - - -# # Transforms tag and field values as well as measurement, tag and field names with regex pattern -# [[processors.regex]] -# namepass = ["nginx_requests"] -# -# ## Tag value conversion(s). Multiple instances are allowed. -# [[processors.regex.tags]] -# ## Tag(s) to process with optional glob expressions such as '*'. -# key = "resp_code" -# ## Regular expression to match the tag value. If the value doesn't -# ## match the tag is ignored. -# pattern = "^(\\d)\\d\\d$" -# ## Replacement expression defining the value of the target tag. You can -# ## use regexp groups or named groups e.g. ${1} references the first group. -# replacement = "${1}xx" -# ## Name of the target tag defaulting to 'key' if not specified. -# ## In case of wildcards being used in `key` the currently processed -# ## tag-name is used as target. -# # result_key = "method" -# ## Appends the replacement to the target tag instead of overwriting it when -# ## set to true. -# # append = false -# -# ## Field value conversion(s). Multiple instances are allowed. -# [[processors.regex.fields]] -# ## Field(s) to process with optional glob expressions such as '*'. -# key = "request" -# ## Regular expression to match the field value. If the value doesn't -# ## match or the field doesn't contain a string the field is ignored. -# pattern = "^/api(?P/[\\w/]+)\\S*" -# ## Replacement expression defining the value of the target field. You can -# ## use regexp groups or named groups e.g. ${method} references the group -# ## named "method". -# replacement = "${method}" -# ## Name of the target field defaulting to 'key' if not specified. -# ## In case of wildcards being used in `key` the currently processed -# ## field-name is used as target. -# # result_key = "method" -# -# ## Rename metric fields -# [[processors.regex.field_rename]] -# ## Regular expression to match on the field name -# pattern = "^search_(\\w+)d$" -# ## Replacement expression defining the name of the new field -# replacement = "${1}" -# ## If the new field name already exists, you can either "overwrite" the -# ## existing one with the value of the renamed field OR you can "keep" -# ## both the existing and source field. -# # result_key = "keep" -# -# ## Rename metric tags -# [[processors.regex.tag_rename]] -# ## Regular expression to match on a tag name -# pattern = "^search_(\\w+)d$" -# ## Replacement expression defining the name of the new tag -# replacement = "${1}" -# ## If the new tag name already exists, you can either "overwrite" the -# ## existing one with the value of the renamed tag OR you can "keep" -# ## both the existing and source tag. -# # result_key = "keep" -# -# ## Rename metrics -# [[processors.regex.metric_rename]] -# ## Regular expression to match on an metric name -# pattern = "^search_(\\w+)d$" -# ## Replacement expression defining the new name of the metric -# replacement = "${1}" - - -# # Rename measurements, tags, and fields that pass through this filter. -# [[processors.rename]] -# ## Specify one sub-table per rename operation. -# [[processors.rename.replace]] -# measurement = "network_interface_throughput" -# dest = "throughput" -# -# [[processors.rename.replace]] -# tag = "hostname" -# dest = "host" -# -# [[processors.rename.replace]] -# field = "lower" -# dest = "min" -# -# [[processors.rename.replace]] -# field = "upper" -# dest = "max" - - -# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name -# [[processors.reverse_dns]] -# ## For optimal performance, you may want to limit which metrics are passed to this -# ## processor. eg: -# ## namepass = ["my_metric_*"] -# -# ## cache_ttl is how long the dns entries should stay cached for. -# ## generally longer is better, but if you expect a large number of diverse lookups -# ## you'll want to consider memory use. -# cache_ttl = "24h" -# -# ## lookup_timeout is how long should you wait for a single dns request to repsond. -# ## this is also the maximum acceptable latency for a metric travelling through -# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will -# ## be passed on unaltered. -# ## multiple simultaneous resolution requests for the same IP will only make a -# ## single rDNS request, and they will all wait for the answer for this long. -# lookup_timeout = "3s" -# -# ## max_parallel_lookups is the maximum number of dns requests to be in flight -# ## at the same time. Requesting hitting cached values do not count against this -# ## total, and neither do mulptiple requests for the same IP. -# ## It's probably best to keep this number fairly low. -# max_parallel_lookups = 10 -# -# ## ordered controls whether or not the metrics need to stay in the same order -# ## this plugin received them in. If false, this plugin will change the order -# ## with requests hitting cached results moving through immediately and not -# ## waiting on slower lookups. This may cause issues for you if you are -# ## depending on the order of metrics staying the same. If so, set this to true. -# ## keeping the metrics ordered may be slightly slower. -# ordered = false -# -# [[processors.reverse_dns.lookup]] -# ## get the ip from the field "source_ip", and put the result in the field "source_name" -# field = "source_ip" -# dest = "source_name" -# -# [[processors.reverse_dns.lookup]] -# ## get the ip from the tag "destination_ip", and put the result in the tag -# ## "destination_name". -# tag = "destination_ip" -# dest = "destination_name" -# -# ## If you would prefer destination_name to be a field instead, you can use a -# ## processors.converter after this one, specifying the order attribute. - - -# # Add the S2 Cell ID as a tag based on latitude and longitude fields -# [[processors.s2geo]] -# ## The name of the lat and lon fields containing WGS-84 latitude and -# ## longitude in decimal degrees. -# # lat_field = "lat" -# # lon_field = "lon" -# -# ## New tag to create -# # tag_key = "s2_cell_id" -# -# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) -# # cell_level = 9 - - -# # Scale values with a predefined range to a different output range. -# [[processors.scale]] -# ## It is possible to define multiple different scaling that can be applied -# ## do different sets of fields. Each scaling expects the following -# ## arguments: -# ## - input_minimum: Minimum expected input value -# ## - input_maximum: Maximum expected input value -# ## - output_minimum: Minimum desired output value -# ## - output_maximum: Maximum desired output value -# ## alternatively you can specify a scaling with factor and offset -# ## - factor: factor to scale the input value with -# ## - offset: additive offset for value after scaling -# ## - fields: a list of field names (or filters) to apply this scaling to -# -# ## Example: Scaling with minimum and maximum values -# # [processors.scale.scaling] -# # input_minimum = 0 -# # input_maximum = 1 -# # output_minimum = 0 -# # output_maximum = 100 -# # fields = ["temperature1", "temperature2"] -# -# ## Example: Scaling with factor and offset -# # [processors.scale.scaling] -# # factor = 10.0 -# # offset = -5.0 -# # fields = ["voltage*"] - - -# # Lookup extra tags via SNMP based on the table index -# [[processors.snmp_lookup]] -# ## Name of tag of the SNMP agent to do the lookup on -# # agent_tag = "source" -# -# ## Name of tag holding the table row index -# # index_tag = "index" -# -# ## Timeout for each request. -# # timeout = "5s" -# -# ## SNMP version; can be 1, 2, or 3. -# # version = 2 -# -# ## SNMP community string. -# # community = "public" -# -# ## Number of retries to attempt. -# # retries = 3 -# -# ## The GETBULK max-repetitions parameter. -# # max_repetitions = 10 -# -# ## SNMPv3 authentication and encryption options. -# ## -# ## Security Name. -# # sec_name = "myuser" -# ## Authentication protocol; one of "MD5", "SHA", or "". -# # auth_protocol = "MD5" -# ## Authentication password. -# # auth_password = "pass" -# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". -# # sec_level = "authNoPriv" -# ## Context Name. -# # context_name = "" -# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". -# # priv_protocol = "" -# ## Privacy password used for encrypted messages. -# # priv_password = "" -# -# ## The maximum number of SNMP requests to make at the same time. -# # max_parallel_lookups = 16 -# -# ## The amount of agents to cache entries for. If limit is reached, -# ## oldest will be removed first. 0 means no limit. -# # max_cache_entries = 100 -# -# ## Control whether the metrics need to stay in the same order this plugin -# ## received them in. If false, this plugin may change the order when data is -# ## cached. If you need metrics to stay in order set this to true. Keeping the -# ## metrics ordered may be slightly slower. -# # ordered = false -# -# ## The amount of time entries are cached for a given agent. After this period -# ## elapses if tags are needed they will be retrieved again. -# # cache_ttl = "8h" -# -# ## Minimum time between requests to an agent in case an index could not be -# ## resolved. If set to zero no request on missing indices will be triggered. -# # min_time_between_updates = "5m" -# -# ## List of tags to be looked up. -# [[processors.snmp_lookup.tag]] -# ## Object identifier of the variable as a numeric or textual OID. -# oid = "IF-MIB::ifName" -# -# ## Name of the tag to create. If not specified, it defaults to the value of 'oid'. -# ## If 'oid' is numeric, an attempt to translate the numeric OID into a textual OID -# ## will be made. -# # name = "" -# -# ## Apply one of the following conversions to the variable value: -# ## hwaddr: Convert the value to a MAC address. -# ## ipaddr: Convert the value to an IP address. -# ## enum(1): Convert the value according to its syntax in the MIB (full). -# ## enum: Convert the value according to its syntax in the MIB. -# ## -# # conversion = "" - - -# # Split a metric into one or more metrics with the specified field(s)/tag(s) -# [[processors.split]] -# ## Keeps the original metric by default -# # drop_original = false -# -# ## Template for an output metric -# ## Users can define multiple templates to split the original metric into -# ## multiple, potentially overlapping, metrics. -# [[processors.split.template]] -# ## New metric name -# name = "" -# -# ## List of tag keys for this metric template, accepts globs, e.g. "*" -# tags = [] -# -# ## List of field keys for this metric template, accepts globs, e.g. "*" -# fields = [] - - -# # Process metrics using a Starlark script -# [[processors.starlark]] -# ## The Starlark source can be set as a string in this configuration file, or -# ## by referencing a file containing the script. Only one source or script -# ## should be set at once. -# -# ## Source of the Starlark script. -# source = ''' -# def apply(metric): -# return metric -# ''' -# -# ## File containing a Starlark script. -# # script = "/usr/local/bin/myscript.star" -# -# ## The constants of the Starlark script. -# # [processors.starlark.constants] -# # max_size = 10 -# # threshold = 0.75 -# # default_name = "Julia" -# # debug_mode = true - - -# # Perform string processing on tags, fields, and measurements -# [[processors.strings]] -# ## Convert a field value to lowercase and store in a new field -# # [[processors.strings.lowercase]] -# # field = "uri_stem" -# # dest = "uri_stem_normalised" -# -# ## Convert a tag value to uppercase -# # [[processors.strings.uppercase]] -# # tag = "method" -# -# ## Convert a field value to titlecase -# # [[processors.strings.titlecase]] -# # field = "status" -# -# ## Trim leading and trailing whitespace using the default cutset -# # [[processors.strings.trim]] -# # field = "message" -# -# ## Trim leading characters in cutset -# # [[processors.strings.trim_left]] -# # field = "message" -# # cutset = "\t" -# -# ## Trim trailing characters in cutset -# # [[processors.strings.trim_right]] -# # field = "message" -# # cutset = "\r\n" -# -# ## Trim the given prefix from the field -# # [[processors.strings.trim_prefix]] -# # field = "my_value" -# # prefix = "my_" -# -# ## Trim the given suffix from the field -# # [[processors.strings.trim_suffix]] -# # field = "read_count" -# # suffix = "_count" -# -# ## Replace all non-overlapping instances of old with new -# # [[processors.strings.replace]] -# # measurement = "*" -# # old = ":" -# # new = "_" -# -# ## Trims strings based on width -# # [[processors.strings.left]] -# # field = "message" -# # width = 10 -# -# ## Decode a base64 encoded utf-8 string -# # [[processors.strings.base64decode]] -# # field = "message" -# -# ## Sanitize a string to ensure it is a valid utf-8 string -# ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty -# # [[processors.strings.valid_utf8]] -# # field = "message" -# # replacement = "" - - -# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. -# [[processors.tag_limit]] -# ## Maximum number of tags to preserve -# limit = 3 -# -# ## List of tags to preferentially preserve -# keep = ["environment", "region"] - - -# # Uses a Go template to create a new tag -# [[processors.template]] -# ## Go template used to create the tag name of the output. In order to -# ## ease TOML escaping requirements, you should use single quotes around -# ## the template string. -# tag = "topic" -# -# ## Go template used to create the tag value of the output. In order to -# ## ease TOML escaping requirements, you should use single quotes around -# ## the template string. -# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' - - -# # Print all metrics that pass through this filter. -# [[processors.topk]] -# ## How many seconds between aggregations -# # period = 10 -# -# ## How many top buckets to return per field -# ## Every field specified to aggregate over will return k number of results. -# ## For example, 1 field with k of 10 will return 10 buckets. While 2 fields -# ## with k of 3 will return 6 buckets. -# # k = 10 -# -# ## Over which tags should the aggregation be done. Globs can be specified, in -# ## which case any tag matching the glob will aggregated over. If set to an -# ## empty list is no aggregation over tags is done -# # group_by = ['*'] -# -# ## The field(s) to aggregate -# ## Each field defined is used to create an independent aggregation. Each -# ## aggregation will return k buckets. If a metric does not have a defined -# ## field the metric will be dropped from the aggregation. Considering using -# ## the defaults processor plugin to ensure fields are set if required. -# # fields = ["value"] -# -# ## What aggregation function to use. Options: sum, mean, min, max -# # aggregation = "mean" -# -# ## Instead of the top k largest metrics, return the bottom k lowest metrics -# # bottomk = false -# -# ## The plugin assigns each metric a GroupBy tag generated from its name and -# ## tags. If this setting is different than "" the plugin will add a -# ## tag (which name will be the value of this setting) to each metric with -# ## the value of the calculated GroupBy tag. Useful for debugging -# # add_groupby_tag = "" -# -# ## These settings provide a way to know the position of each metric in -# ## the top k. The 'add_rank_field' setting allows to specify for which -# ## fields the position is required. If the list is non empty, then a field -# ## will be added to each and every metric for each string present in this -# ## setting. This field will contain the ranking of the group that -# ## the metric belonged to when aggregated over that field. -# ## The name of the field will be set to the name of the aggregation field, -# ## suffixed with the string '_topk_rank' -# # add_rank_fields = [] -# -# ## These settings provide a way to know what values the plugin is generating -# ## when aggregating metrics. The 'add_aggregate_field' setting allows to -# ## specify for which fields the final aggregation value is required. If the -# ## list is non empty, then a field will be added to each every metric for -# ## each field present in this setting. This field will contain -# ## the computed aggregation for the group that the metric belonged to when -# ## aggregated over that field. -# ## The name of the field will be set to the name of the aggregation field, -# ## suffixed with the string '_topk_aggregate' -# # add_aggregate_fields = [] - - -# # Rotate multi field metric into several single field metrics -# [[processors.unpivot]] -# ## Metric mode to pivot to -# ## Set to "tag", metrics are pivoted as a tag and the metric is kept as -# ## the original measurement name. Tag key name is set by tag_key value. -# ## Set to "metric" creates a new metric named the field name. With this -# ## option the tag_key is ignored. Be aware that this could lead to metric -# ## name conflicts! -# # use_fieldname_as = "tag" -# -# ## Tag to use for the name. -# # tag_key = "name" -# -# ## Field to use for the name of the value. -# # value_key = "value" - - -############################################################################### -# AGGREGATOR PLUGINS # -############################################################################### - - -# # Keep the aggregate basicstats of each metric passing through. -# [[aggregators.basicstats]] -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false -# -# ## Configures which basic stats to push as fields -# # stats = ["count","diff","rate","min","max","mean","non_negative_diff","non_negative_rate","percent_change","stdev","s2","sum","interval"] - - -# # Calculates a derivative for every field. -# [[aggregators.derivative]] -# ## The period in which to flush the aggregator. -# period = "30s" -# ## -# ## Suffix to append for the resulting derivative field. -# # suffix = "_rate" -# ## -# ## Field to use for the quotient when computing the derivative. -# ## When using a field as the derivation parameter the name of that field will -# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. -# ## By default the timestamps of the metrics are used and the suffix is omitted. -# # variable = "" -# ## -# ## Maximum number of roll-overs in case only one measurement is found during a period. -# # max_roll_over = 10 - - -# # Report the final metric of a series -# [[aggregators.final]] -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# # drop_original = false -# -# ## The time that a series is not updated until considering it final. -# # series_timeout = "5m" -# -# ## Output strategy, supported values: -# ## timeout -- output a metric if no new input arrived for `series_timeout`; -# ## useful for filling gaps in input data -# ## periodic -- output the last received metric every `period`; useful to -# ## downsample the input data -# # output_strategy = "timeout" - - -# # Configuration for aggregate histogram metrics -# [[aggregators.histogram]] -# ## The period in which to flush the aggregator. -# period = "30s" -# -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false -# -# ## If true, the histogram will be reset on flush instead -# ## of accumulating the results. -# reset = false -# -# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. -# ## Defaults to true. -# cumulative = true -# -# ## Expiration interval for each histogram. The histogram will be expired if -# ## there are no changes in any buckets for this time interval. 0 == no expiration. -# # expiration_interval = "0m" -# -# ## If true, aggregated histogram are pushed to output only if it was updated since -# ## previous push. Defaults to false. -# # push_only_on_update = false -# -# ## Example config that aggregates all fields of the metric. -# # [[aggregators.histogram.config]] -# # ## Right borders of buckets (with +Inf implicitly added). -# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] -# # ## The name of metric. -# # measurement_name = "cpu" -# -# ## Example config that aggregates only specific fields of the metric. -# # [[aggregators.histogram.config]] -# # ## Right borders of buckets (with +Inf implicitly added). -# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] -# # ## The name of metric. -# # measurement_name = "diskio" -# # ## The concrete fields of metric -# # fields = ["io_time", "read_time", "write_time"] - - -# # Merge metrics into multifield metrics by series key -# [[aggregators.merge]] -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = true - - -# # Keep the aggregate min/max of each metric passing through. -# [[aggregators.minmax]] -# ## General Aggregator Arguments: -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false - - -# # Keep the aggregate quantiles of each metric passing through. -# [[aggregators.quantile]] -# ## General Aggregator Arguments: -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false -# -# ## Quantiles to output in the range [0,1] -# # quantiles = [0.25, 0.5, 0.75] -# -# ## Type of aggregation algorithm -# ## Supported are: -# ## "t-digest" -- approximation using centroids, can cope with large number of samples -# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) -# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) -# ## NOTE: Do not use "exact" algorithms with large number of samples -# ## to not impair performance or memory consumption! -# # algorithm = "t-digest" -# -# ## Compression for approximation (t-digest). The value needs to be -# ## greater or equal to 1.0. Smaller values will result in more -# ## performance but less accuracy. -# # compression = 100.0 - - -# # Aggregate metrics using a Starlark script -# [[aggregators.starlark]] -# ## The Starlark source can be set as a string in this configuration file, or -# ## by referencing a file containing the script. Only one source or script -# ## should be set at once. -# ## -# ## Source of the Starlark script. -# source = ''' -# state = {} -# -# def add(metric): -# state["last"] = metric -# -# def push(): -# return state.get("last") -# -# def reset(): -# state.clear() -# ''' -# -# ## File containing a Starlark script. -# # script = "/usr/local/bin/myscript.star" -# -# ## The constants of the Starlark script. -# # [aggregators.starlark.constants] -# # max_size = 10 -# # threshold = 0.75 -# # default_name = "Julia" -# # debug_mode = true - - -# # Count the occurrence of values in fields. -# [[aggregators.valuecounter]] -# ## General Aggregator Arguments: -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false -# ## The fields for which the values will be counted -# fields = ["status"] - - -############################################################################### -# INPUT PLUGINS # -############################################################################### - - -# Read metrics about cpu usage -#[[inputs.cpu]] - ## Whether to report per-cpu stats or not -# percpu = true - ## Whether to report total system cpu stats or not -# totalcpu = true - ## If true, collect raw CPU time metrics -# collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states - ## NOTE: The resulting 'time_active' field INCLUDES 'iowait'! -# report_active = false - ## If true and the info is available then add core_id and physical_id tags -# core_tags = false - - -# Read metrics about disk usage by mount point -#[[inputs.disk]] - ## By default stats will be gathered for all mount points. - ## Set mount_points will restrict the stats to only the specified mount points. - # mount_points = ["/"] - - ## Ignore mount points by filesystem type. -# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] - - ## Ignore mount points by mount options. - ## The 'mount' command reports options of all mounts in parathesis. - ## Bind mounts can be ignored with the special 'bind' option. - # ignore_mount_opts = [] - - -# Read metrics about disk IO by device -#[[inputs.diskio]] - ## Devices to collect stats for - ## Wildcards are supported except for disk synonyms like '/dev/disk/by-id'. - ## ex. devices = ["sda", "sdb", "vd*", "/dev/disk/by-id/nvme-eui.00123deadc0de123"] - # devices = ["*"] - - ## Skip gathering of the disk's serial numbers. - # skip_serial_number = true - - ## Device metadata tags to add on systems supporting it (Linux only) - ## Use 'udevadm info -q property -n ' to get a list of properties. - ## Note: Most, but not all, udev properties can be accessed this way. Properties - ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. - # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] - - ## Using the same metadata source as device_tags, you can also customize the - ## name of the device via templates. - ## The 'name_templates' parameter is a list of templates to try and apply to - ## the device. The template may contain variables in the form of '$PROPERTY' or - ## '${PROPERTY}'. The first template which does not contain any variables not - ## present for the device is used as the device name tag. - ## The typical use case is for LVM volumes, to get the VG/LV name instead of - ## the near-meaningless DM-0 name. - # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] - - -# Plugin to collect various Linux kernel statistics. -# This plugin ONLY supports Linux -#[[inputs.kernel]] - ## Additional gather options - ## Possible options include: - ## * ksm - kernel same-page merging - ## * psi - pressure stall information - # collect = [] - - -# Read metrics about memory usage -#[[inputs.mem]] - # no configuration - - -# Get the number of processes and group them by status -# This plugin ONLY supports non-Windows -#[[inputs.processes]] - ## Use sudo to run ps command on *BSD systems. Linux systems will read - ## /proc, so this does not apply there. - # use_sudo = false - - -# Read metrics about swap memory usage -# This plugin ONLY supports Linux -#[[inputs.swap]] - # no configuration - - -# Read metrics about system load & uptime -#[[inputs.system]] - # no configuration - - -# # Gather ActiveMQ metrics -# [[inputs.activemq]] -# ## ActiveMQ WebConsole URL -# url = "http://127.0.0.1:8161" -# -# ## Required ActiveMQ Endpoint -# ## deprecated in 1.11; use the url option -# # server = "192.168.50.10" -# # port = 8161 -# -# ## Credentials for basic HTTP authentication -# # username = "admin" -# # password = "admin" -# -# ## Required ActiveMQ webadmin root path -# # webadmin = "admin" -# -# ## Maximum time to receive response. -# # response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# ## DEPRECATED: The "aerospike" plugin is deprecated in version 1.30.0, use 'inputs.prometheus' with the Aerospike Prometheus Exporter instead. -# # Read stats from aerospike server(s) -# [[inputs.aerospike]] -# ## Aerospike servers to connect to (with port) -# ## This plugin will query all namespaces the aerospike -# ## server has configured and get stats for them. -# servers = ["localhost:3000"] -# -# # username = "telegraf" -# # password = "pa$$word" -# -# ## Optional TLS Config -# # enable_tls = false -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# # tls_name = "tlsname" -# ## If false, skip chain & host verification -# # insecure_skip_verify = true -# -# # Feature Options -# # Add namespace variable to limit the namespaces executed on -# # Leave blank to do all -# # disable_query_namespaces = true # default false -# # namespaces = ["namespace1", "namespace2"] -# -# # Enable set level telemetry -# # query_sets = true # default: false -# # Add namespace set combinations to limit sets executed on -# # Leave blank to do all sets -# # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] -# -# # Histograms -# # enable_ttl_histogram = true # default: false -# # enable_object_size_linear_histogram = true # default: false -# -# # by default, aerospike produces a 100 bucket histogram -# # this is not great for most graphing tools, this will allow -# # the ability to squash this to a smaller number of buckets -# # To have a balanced histogram, the number of buckets chosen -# # should divide evenly into 100. -# # num_histogram_buckets = 100 # default: 10 - - -# # Query statistics from AMD Graphics cards using rocm-smi binary -# [[inputs.amd_rocm_smi]] -# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath -# # bin_path = "/opt/rocm/bin/rocm-smi" -# -# ## Optional: specifies plugin behavior regarding missing rocm-smi binary -# ## Available choices: -# ## - error: telegraf will return an error on startup -# ## - ignore: telegraf will ignore this plugin -# # startup_error_behavior = "error" -# -# ## Optional: timeout for GPU polling -# # timeout = "5s" - - -# # Read Apache status information (mod_status) -# [[inputs.apache]] -# ## An array of URLs to gather from, must be directed at the machine -# ## readable version of the mod_status page including the auto query string. -# ## Default is "http://localhost/server-status?auto". -# urls = ["http://localhost/server-status?auto"] -# -# ## Credentials for basic HTTP authentication. -# # username = "myuser" -# # password = "mypassword" -# -# ## Maximum time to receive response. -# # response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Monitor APC UPSes connected to apcupsd -# [[inputs.apcupsd]] -# # A list of running apcupsd server to connect to. -# # If not provided will default to tcp://127.0.0.1:3551 -# servers = ["tcp://127.0.0.1:3551"] -# -# ## Timeout for dialing server. -# timeout = "5s" - - -# # Gather metrics from Apache Aurora schedulers -# [[inputs.aurora]] -# ## Schedulers are the base addresses of your Aurora Schedulers -# schedulers = ["http://127.0.0.1:8081"] -# -# ## Set of role types to collect metrics from. -# ## -# ## The scheduler roles are checked each interval by contacting the -# ## scheduler nodes; zookeeper is not contacted. -# # roles = ["leader", "follower"] -# -# ## Timeout is the max time for total network operations. -# # timeout = "5s" -# -# ## Username and password are sent using HTTP Basic Auth. -# # username = "username" -# # password = "pa$$word" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Gather Azure resources metrics from Azure Monitor API -# [[inputs.azure_monitor]] -# # can be found under Overview->Essentials in the Azure portal for your application/service -# subscription_id = "<>" -# # can be obtained by registering an application under Azure Active Directory -# client_id = "<>" -# # can be obtained by registering an application under Azure Active Directory -# client_secret = "<>" -# # can be found under Azure Active Directory->Properties -# tenant_id = "<>" -# -# # resource target #1 to collect metrics from -# [[inputs.azure_monitor.resource_target]] -# # can be found under Overview->Essentials->JSON View in the Azure portal for your application/service -# # must start with 'resourceGroups/...' ('/subscriptions/xxxxxxxx-xxxx-xxxx-xxx-xxxxxxxxxxxx' -# # must be removed from the beginning of Resource ID property value) -# resource_id = "<>" -# # the metric names to collect -# # leave the array empty to use all metrics available to this resource -# metrics = [ "<>", "<>" ] -# # metrics aggregation type value to collect -# # can be 'Total', 'Count', 'Average', 'Minimum', 'Maximum' -# # leave the array empty to collect all aggregation types values for each metric -# aggregations = [ "<>", "<>" ] -# -# # resource target #2 to collect metrics from -# [[inputs.azure_monitor.resource_target]] -# resource_id = "<>" -# metrics = [ "<>", "<>" ] -# aggregations = [ "<>", "<>" ] -# -# # resource group target #1 to collect metrics from resources under it with resource type -# [[inputs.azure_monitor.resource_group_target]] -# # the resource group name -# resource_group = "<>" -# -# # defines the resources to collect metrics from -# [[inputs.azure_monitor.resource_group_target.resource]] -# # the resource type -# resource_type = "<>" -# metrics = [ "<>", "<>" ] -# aggregations = [ "<>", "<>" ] -# -# # defines the resources to collect metrics from -# [[inputs.azure_monitor.resource_group_target.resource]] -# resource_type = "<>" -# metrics = [ "<>", "<>" ] -# aggregations = [ "<>", "<>" ] -# -# # resource group target #2 to collect metrics from resources under it with resource type -# [[inputs.azure_monitor.resource_group_target]] -# resource_group = "<>" -# -# [[inputs.azure_monitor.resource_group_target.resource]] -# resource_type = "<>" -# metrics = [ "<>", "<>" ] -# aggregations = [ "<>", "<>" ] -# -# # subscription target #1 to collect metrics from resources under it with resource type -# [[inputs.azure_monitor.subscription_target]] -# resource_type = "<>" -# metrics = [ "<>", "<>" ] -# aggregations = [ "<>", "<>" ] -# -# # subscription target #2 to collect metrics from resources under it with resource type -# [[inputs.azure_monitor.subscription_target]] -# resource_type = "<>" -# metrics = [ "<>", "<>" ] -# aggregations = [ "<>", "<>" ] - - -# # Gather Azure Storage Queue metrics -# [[inputs.azure_storage_queue]] -# ## Required Azure Storage Account name -# account_name = "mystorageaccount" -# -# ## Required Azure Storage Account access key -# account_key = "storageaccountaccesskey" -# -# ## Set to false to disable peeking age of oldest message (executes faster) -# # peek_oldest_message_age = true - - -# # Read metrics of bcache from stats_total and dirty_data -# # This plugin ONLY supports Linux -# [[inputs.bcache]] -# ## Bcache sets path -# ## If not specified, then default is: -# bcachePath = "/sys/fs/bcache" -# -# ## By default, Telegraf gather stats for all bcache devices -# ## Setting devices will restrict the stats to the specified -# ## bcache devices. -# bcacheDevs = ["bcache0"] - - -# # Collects Beanstalkd server and tubes stats -# [[inputs.beanstalkd]] -# ## Server to collect data from -# server = "localhost:11300" -# -# ## List of tubes to gather stats about. -# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command -# tubes = ["notifications"] - - -# # Read metrics exposed by Beat -# [[inputs.beat]] -# ## An URL from which to read Beat-formatted JSON -# ## Default is "http://127.0.0.1:5066". -# url = "http://127.0.0.1:5066" -# -# ## Enable collection of the listed stats -# ## An empty list means collect all. Available options are currently -# ## "beat", "libbeat", "system" and "filebeat". -# # include = ["beat", "libbeat", "filebeat"] -# -# ## HTTP method -# # method = "GET" -# -# ## Optional HTTP headers -# # headers = {"X-Special-Header" = "Special-Value"} -# -# ## Override HTTP "Host" header -# # host_header = "logstash.example.com" -# -# ## Timeout for HTTP requests -# # timeout = "5s" -# -# ## Optional HTTP Basic Auth credentials -# # username = "username" -# # password = "pa$$word" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read BIND nameserver XML statistics -# [[inputs.bind]] -# ## An array of BIND XML statistics URI to gather stats. -# ## Default is "http://localhost:8053/xml/v3". -# # urls = ["http://localhost:8053/xml/v3"] -# # gather_memory_contexts = false -# # gather_views = false -# -# ## Timeout for http requests made by bind nameserver -# # timeout = "4s" - - -# # Collect bond interface status, slaves statuses and failures count -# [[inputs.bond]] -# ## Sets 'proc' directory path -# ## If not specified, then default is /proc -# # host_proc = "/proc" -# -# ## Sets 'sys' directory path -# ## If not specified, then default is /sys -# # host_sys = "/sys" -# -# ## By default, telegraf gather stats for all bond interfaces -# ## Setting interfaces will restrict the stats to the specified -# ## bond interfaces. -# # bond_interfaces = ["bond0"] -# -# ## Tries to collect additional bond details from /sys/class/net/{bond} -# ## currently only useful for LACP (mode 4) bonds -# # collect_sys_details = false - - -# # Collect Kafka topics and consumers status from Burrow HTTP API. -# [[inputs.burrow]] -# ## Burrow API endpoints in format "schema://host:port". -# ## Default is "http://localhost:8000". -# servers = ["http://localhost:8000"] -# -# ## Override Burrow API prefix. -# ## Useful when Burrow is behind reverse-proxy. -# # api_prefix = "/v3/kafka" -# -# ## Maximum time to receive response. -# # response_timeout = "5s" -# -# ## Limit per-server concurrent connections. -# ## Useful in case of large number of topics or consumer groups. -# # concurrent_connections = 20 -# -# ## Filter clusters, default is no filtering. -# ## Values can be specified as glob patterns. -# # clusters_include = [] -# # clusters_exclude = [] -# -# ## Filter consumer groups, default is no filtering. -# ## Values can be specified as glob patterns. -# # groups_include = [] -# # groups_exclude = [] -# -# ## Filter topics, default is no filtering. -# ## Values can be specified as glob patterns. -# # topics_include = [] -# # topics_exclude = [] -# -# ## Credentials for basic HTTP authentication. -# # username = "" -# # password = "" -# -# ## Optional SSL config -# # ssl_ca = "/etc/telegraf/ca.pem" -# # ssl_cert = "/etc/telegraf/cert.pem" -# # ssl_key = "/etc/telegraf/key.pem" -# # insecure_skip_verify = false - - -# # Collects performance metrics from the MON, OSD, MDS and RGW nodes -# # in a Ceph storage cluster. -# [[inputs.ceph]] -# ## This is the recommended interval to poll. Too frequent and you -# ## will lose data points due to timeouts during rebalancing and recovery -# interval = '1m' -# -# ## All configuration values are optional, defaults are shown below -# -# ## location of ceph binary -# ceph_binary = "/usr/bin/ceph" -# -# ## directory in which to look for socket files -# socket_dir = "/var/run/ceph" -# -# ## prefix of MON and OSD socket files, used to determine socket type -# mon_prefix = "ceph-mon" -# osd_prefix = "ceph-osd" -# mds_prefix = "ceph-mds" -# rgw_prefix = "ceph-client" -# -# ## suffix used to identify socket files -# socket_suffix = "asok" -# -# ## Ceph user to authenticate as, ceph will search for the corresponding -# ## keyring e.g. client.admin.keyring in /etc/ceph, or the explicit path -# ## defined in the client section of ceph.conf for example: -# ## -# ## [client.telegraf] -# ## keyring = /etc/ceph/client.telegraf.keyring -# ## -# ## Consult the ceph documentation for more detail on keyring generation. -# ceph_user = "client.admin" -# -# ## Ceph configuration to use to locate the cluster -# ceph_config = "/etc/ceph/ceph.conf" -# -# ## Whether to gather statistics via the admin socket -# gather_admin_socket_stats = true -# -# ## Whether to gather statistics via ceph commands, requires ceph_user -# ## and ceph_config to be specified -# gather_cluster_stats = false - - -# # Read specific statistics per cgroup -# # This plugin ONLY supports Linux -# [[inputs.cgroup]] -# ## Directories in which to look for files, globs are supported. -# ## Consider restricting paths to the set of cgroups you really -# ## want to monitor if you have a large number of cgroups, to avoid -# ## any cardinality issues. -# # paths = [ -# # "/sys/fs/cgroup/memory", -# # "/sys/fs/cgroup/memory/child1", -# # "/sys/fs/cgroup/memory/child2/*", -# # ] -# ## cgroup stat fields, as file names, globs are supported. -# ## these file names are appended to each path from above. -# # files = ["memory.*usage*", "memory.limit_in_bytes"] - - -# # Pull Metric Statistics from Amazon CloudWatch -# [[inputs.cloudwatch]] -# ## Amazon Region -# region = "us-east-1" -# -# ## Amazon Credentials -# ## Credentials are loaded in the following order -# ## 1) Web identity provider credentials via STS if role_arn and -# ## web_identity_token_file are specified -# ## 2) Assumed credentials via STS if role_arn is specified -# ## 3) explicit credentials from 'access_key' and 'secret_key' -# ## 4) shared profile from 'profile' -# ## 5) environment variables -# ## 6) shared credentials file -# ## 7) EC2 Instance Profile -# # access_key = "" -# # secret_key = "" -# # token = "" -# # role_arn = "" -# # web_identity_token_file = "" -# # role_session_name = "" -# # profile = "" -# # shared_credential_file = "" -# -# ## If you are using CloudWatch cross-account observability, you can -# ## set IncludeLinkedAccounts to true in a monitoring account -# ## and collect metrics from the linked source accounts -# # include_linked_accounts = false -# -# ## Endpoint to make request against, the correct endpoint is automatically -# ## determined and this option should only be set if you wish to override the -# ## default. -# ## ex: endpoint_url = "http://localhost:8000" -# # endpoint_url = "" -# -# ## Set http_proxy -# # use_system_proxy = false -# # http_proxy_url = "http://localhost:8888" -# -# ## The minimum period for Cloudwatch metrics is 1 minute (60s). However not -# ## all metrics are made available to the 1 minute period. Some are collected -# ## at 3 minute, 5 minute, or larger intervals. -# ## See https://aws.amazon.com/cloudwatch/faqs/#monitoring. -# ## Note that if a period is configured that is smaller than the minimum for a -# ## particular metric, that metric will not be returned by the Cloudwatch API -# ## and will not be collected by Telegraf. -# # -# ## Requested CloudWatch aggregation Period (required) -# ## Must be a multiple of 60s. -# period = "5m" -# -# ## Collection Delay (required) -# ## Must account for metrics availability via CloudWatch API -# delay = "5m" -# -# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid -# ## gaps or overlap in pulled data -# interval = "5m" -# -# ## Recommended if "delay" and "period" are both within 3 hours of request -# ## time. Invalid values will be ignored. Recently Active feature will only -# ## poll for CloudWatch ListMetrics values that occurred within the last 3h. -# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics -# ## API and require less memory to retain. -# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will -# ## not return data more than 3 hours old. -# ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html -# #recently_active = "PT3H" -# -# ## Configure the TTL for the internal cache of metrics. -# # cache_ttl = "1h" -# -# ## Metric Statistic Namespaces (required) -# namespaces = ["AWS/ELB"] -# -# ## Maximum requests per second. Note that the global default AWS rate limit -# ## is 50 reqs/sec, so if you define multiple namespaces, these should add up -# ## to a maximum of 50. -# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html -# # ratelimit = 25 -# -# ## Timeout for http requests made by the cloudwatch client. -# # timeout = "5s" -# -# ## Batch Size -# ## The size of each batch to send requests to Cloudwatch. 500 is the -# ## suggested largest size. If a request gets to large (413 errors), consider -# ## reducing this amount. -# # batch_size = 500 -# -# ## Namespace-wide statistic filters. These allow fewer queries to be made to -# ## cloudwatch. -# # statistic_include = ["average", "sum", "minimum", "maximum", sample_count"] -# # statistic_exclude = [] -# -# ## Metrics to Pull -# ## Defaults to all Metrics in Namespace if nothing is provided -# ## Refreshes Namespace available metrics every 1h -# #[[inputs.cloudwatch.metrics]] -# # names = ["Latency", "RequestCount"] -# # -# # ## Statistic filters for Metric. These allow for retrieving specific -# # ## statistics for an individual metric. -# # # statistic_include = ["average", "sum", "minimum", "maximum", sample_count"] -# # # statistic_exclude = [] -# # -# # ## Dimension filters for Metric. -# # ## All dimensions defined for the metric names must be specified in order -# # ## to retrieve the metric statistics. -# # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. -# # [[inputs.cloudwatch.metrics.dimensions]] -# # name = "LoadBalancerName" -# # value = "p-example" - - -# # Collects conntrack stats from the configured directories and files. -# # This plugin ONLY supports Linux -# [[inputs.conntrack]] -# ## The following defaults would work with multiple versions of conntrack. -# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across -# ## kernel versions, as are the directory locations. -# -# ## Look through /proc/net/stat/nf_conntrack for these metrics -# ## all - aggregated statistics -# ## percpu - include detailed statistics with cpu tag -# collect = ["all", "percpu"] -# -# ## User-specified directories and files to look through -# ## Directories to search within for the conntrack files above. -# ## Missing directories will be ignored. -# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] -# -# ## Superset of filenames to look for within the conntrack dirs. -# ## Missing files will be ignored. -# files = ["ip_conntrack_count","ip_conntrack_max", -# "nf_conntrack_count","nf_conntrack_max"] - - -# # Gather health check statuses from services registered in Consul -# [[inputs.consul]] -# ## Consul server address -# # address = "localhost:8500" -# -# ## URI scheme for the Consul server, one of "http", "https" -# # scheme = "http" -# -# ## Metric version controls the mapping from Consul metrics into -# ## Telegraf metrics. Version 2 moved all fields with string values -# ## to tags. -# ## -# ## example: metric_version = 1; deprecated in 1.16 -# ## metric_version = 2; recommended version -# # metric_version = 1 -# -# ## ACL token used in every request -# # token = "" -# -# ## HTTP Basic Authentication username and password. -# # username = "" -# # password = "" -# -# ## Data center to query the health checks from -# # datacenter = "" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = true -# -# ## Consul checks' tag splitting -# # When tags are formatted like "key:value" with ":" as a delimiter then -# # they will be split and reported as proper key:value in Telegraf -# # tag_delimiter = ":" - - -# # Read metrics from the Consul Agent API -# [[inputs.consul_agent]] -# ## URL for the Consul agent -# # url = "http://127.0.0.1:8500" -# -# ## Use auth token for authorization. -# ## If both are set, an error is thrown. -# ## If both are empty, no token will be used. -# # token_file = "/path/to/auth/token" -# ## OR -# # token = "a1234567-40c7-9048-7bae-378687048181" -# -# ## Set timeout (default 5 seconds) -# # timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = /path/to/cafile -# # tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile - - -# # Read per-node and per-bucket metrics from Couchbase -# [[inputs.couchbase]] -# ## specify servers via a url matching: -# ## [protocol://][:password]@address[:port] -# ## e.g. -# ## http://couchbase-0.example.com/ -# ## http://admin:secret@couchbase-0.example.com:8091/ -# ## -# ## If no servers are specified, then localhost is used as the host. -# ## If no protocol is specified, HTTP is used. -# ## If no port is specified, 8091 is used. -# servers = ["http://localhost:8091"] -# -# ## Filter bucket fields to include only here. -# # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification (defaults to false) -# ## If set to false, tls_cert and tls_key are required -# # insecure_skip_verify = false -# -# ## Whether to collect cluster-wide bucket statistics -# ## It is recommended to disable this in favor of node_stats -# ## to get a better view of the cluster. -# # cluster_bucket_stats = true -# -# ## Whether to collect bucket stats for each individual node -# # node_bucket_stats = false -# -# ## List of additional stats to collect, choose from: -# ## * autofailover -# # additional_stats = [] - - -# # Read CouchDB Stats from one or more servers -# [[inputs.couchdb]] -# ## Works with CouchDB stats endpoints out of the box -# ## Multiple Hosts from which to read CouchDB stats: -# hosts = ["http://localhost:8086/_stats"] -# -# ## Use HTTP Basic Authentication. -# # basic_username = "telegraf" -# # basic_password = "p@ssw0rd" - - -# # Fetch metrics from a CSGO SRCDS -# [[inputs.csgo]] -# ## Specify servers using the following format: -# ## servers = [ -# ## ["ip1:port1", "rcon_password1"], -# ## ["ip2:port2", "rcon_password2"], -# ## ] -# # -# ## If no servers are specified, no data will be collected -# servers = [] - - -# # Input plugin for DC/OS metrics -# [[inputs.dcos]] -# ## The DC/OS cluster URL. -# cluster_url = "https://dcos-master-1" -# -# ## The ID of the service account. -# service_account_id = "telegraf" -# ## The private key file for the service account. -# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem" -# -# ## Path containing login token. If set, will read on every gather. -# # token_file = "/home/dcos/.dcos/token" -# -# ## In all filter options if both include and exclude are empty all items -# ## will be collected. Arrays may contain glob patterns. -# ## -# ## Node IDs to collect metrics from. If a node is excluded, no metrics will -# ## be collected for its containers or apps. -# # node_include = [] -# # node_exclude = [] -# ## Container IDs to collect container metrics from. -# # container_include = [] -# # container_exclude = [] -# ## Container IDs to collect app metrics from. -# # app_include = [] -# # app_exclude = [] -# -# ## Maximum concurrent connections to the cluster. -# # max_connections = 10 -# ## Maximum time to receive a response from cluster. -# # response_timeout = "20s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## If false, skip chain & host verification -# # insecure_skip_verify = true -# -# ## Recommended filtering to reduce series cardinality. -# # [inputs.dcos.tagdrop] -# # path = ["/var/lib/mesos/slave/slaves/*"] - - -# # Read metrics from one or many disque servers -# [[inputs.disque]] -# ## An array of URI to gather stats about. Specify an ip or hostname -# ## with optional port and password. -# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. -# ## If no servers are specified, then localhost is used as the host. -# servers = ["localhost"] - - -# # Provide a native collection for dmsetup based statistics for dm-cache -# # This plugin ONLY supports Linux -# [[inputs.dmcache]] -# ## Whether to report per-device stats or not -# per_device = true - - -# # Query given DNS server and gives statistics -# [[inputs.dns_query]] -# ## servers to query -# servers = ["8.8.8.8"] -# -# ## Network is the network protocol name. -# # network = "udp" -# -# ## Domains or subdomains to query. -# # domains = ["."] -# -# ## Query record type. -# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. -# # record_type = "A" -# -# ## Dns server port. -# # port = 53 -# -# ## Query timeout -# # timeout = "2s" -# -# ## Include the specified additional properties in the resulting metric. -# ## The following values are supported: -# ## "first_ip" -- return IP of the first A and AAAA answer -# ## "all_ips" -- return IPs of all A and AAAA answers -# # include_fields = [] - - -# # Read metrics about docker containers -# [[inputs.docker]] -# ## Docker Endpoint -# ## To use TCP, set endpoint = "tcp://[ip]:[port]" -# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" -# endpoint = "unix:///var/run/docker.sock" -# -# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) -# ## Note: configure this in one of the manager nodes in a Swarm cluster. -# ## configuring in multiple Swarm managers results in duplication of metrics. -# gather_services = false -# -# ## Only collect metrics for these containers. Values will be appended to -# ## container_name_include. -# ## Deprecated (1.4.0), use container_name_include -# container_names = [] -# -# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars -# source_tag = false -# -# ## Containers to include and exclude. Collect all if empty. Globs accepted. -# container_name_include = [] -# container_name_exclude = [] -# -# ## Container states to include and exclude. Globs accepted. -# ## When empty only containers in the "running" state will be captured. -# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] -# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] -# # container_state_include = [] -# # container_state_exclude = [] -# -# ## Objects to include for disk usage query -# ## Allowed values are "container", "image", "volume" -# ## When empty disk usage is excluded -# storage_objects = [] -# -# ## Timeout for docker list, info, and stats commands -# timeout = "5s" -# -# ## Whether to report for each container per-device blkio (8:0, 8:1...), -# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. -# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. -# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting -# ## is honored. -# perdevice = true -# -# ## Specifies for which classes a per-device metric should be issued -# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) -# ## Please note that this setting has no effect if 'perdevice' is set to 'true' -# # perdevice_include = ["cpu"] -# -# ## Whether to report for each container total blkio and network stats or not. -# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. -# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting -# ## is honored. -# total = false -# -# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. -# ## Possible values are 'cpu', 'blkio' and 'network' -# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. -# ## Please note that this setting has no effect if 'total' is set to 'false' -# # total_include = ["cpu", "blkio", "network"] -# -# ## docker labels to include and exclude as tags. Globs accepted. -# ## Note that an empty array for both will include all labels as tags -# docker_label_include = [] -# docker_label_exclude = [] -# -# ## Which environment variables should we use as a tag -# tag_env = ["JAVA_HOME", "HEAP_SIZE"] -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read metrics about dovecot servers -# [[inputs.dovecot]] -# ## specify dovecot servers via an address:port list -# ## e.g. -# ## localhost:24242 -# ## or as an UDS socket -# ## e.g. -# ## /var/run/dovecot/old-stats -# ## -# ## If no servers are specified, then localhost is used as the host. -# servers = ["localhost:24242"] -# -# ## Type is one of "user", "domain", "ip", or "global" -# type = "global" -# -# ## Wildcard matches like "*.com". An empty string "" is same as "*" -# ## If type = "ip" filters should be -# filters = [""] - - -# # Read metrics about ECS containers -# [[inputs.ecs]] -# ## ECS metadata url. -# ## Metadata v2 API is used if set explicitly. Otherwise, -# ## v3 metadata endpoint API is used if available. -# # endpoint_url = "" -# -# ## Containers to include and exclude. Globs accepted. -# ## Note that an empty array for both will include all containers -# # container_name_include = [] -# # container_name_exclude = [] -# -# ## Container states to include and exclude. Globs accepted. -# ## When empty only containers in the "RUNNING" state will be captured. -# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", -# ## "RESOURCES_PROVISIONED", "STOPPED". -# # container_status_include = [] -# # container_status_exclude = [] -# -# ## ecs labels to include and exclude as tags. Globs accepted. -# ## Note that an empty array for both will include all labels as tags -# ecs_label_include = [ "com.amazonaws.ecs.*" ] -# ecs_label_exclude = [] -# -# ## Timeout for queries. -# # timeout = "5s" - - -# # Read stats from one or more Elasticsearch servers or clusters -# [[inputs.elasticsearch]] -# ## specify a list of one or more Elasticsearch servers -# ## you can add username and password to your url to use basic authentication: -# ## servers = ["http://user:pass@localhost:9200"] -# servers = ["http://localhost:9200"] -# -# ## Timeout for HTTP requests to the elastic search server(s) -# ## deprecated in 1.29.0; use 'timeout' instead -# http_timeout = "5s" -# -# ## When local is true (the default), the node will read only its own stats. -# ## Set local to false when you want to read the node stats from all nodes -# ## of the cluster. -# local = true -# -# ## Set cluster_health to true when you want to obtain cluster health stats -# cluster_health = false -# -# ## Adjust cluster_health_level when you want to obtain detailed health stats -# ## The options are -# ## - indices (default) -# ## - cluster -# # cluster_health_level = "indices" -# -# ## Set cluster_stats to true when you want to obtain cluster stats. -# cluster_stats = false -# -# ## Only gather cluster_stats from the master node. -# ## To work this require local = true -# cluster_stats_only_from_master = true -# -# ## Indices to collect; can be one or more indices names or _all -# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index -# ## names that end with a changing value, like a date. -# indices_include = ["_all"] -# -# ## One of "shards", "cluster", "indices" -# ## Currently only "shards" is implemented -# indices_level = "shards" -# -# ## node_stats is a list of sub-stats that you want to have gathered. -# ## Valid options are "indices", "os", "process", "jvm", "thread_pool", -# ## "fs", "transport", "http", "breaker". Per default, all stats are gathered. -# # node_stats = ["jvm", "http"] -# -# ## HTTP Basic Authentication username and password. -# # username = "" -# # password = "" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## If 'use_system_proxy' is set to true, Telegraf will check env vars such as -# ## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts). -# ## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is -# ## provided, Telegraf will use the specified URL as HTTP proxy. -# # use_system_proxy = false -# # http_proxy_url = "http://localhost:8888" -# -# ## Sets the number of most recent indices to return for indices that are -# ## configured with a date-stamped suffix. Each 'indices_include' entry -# ## ending with a wildcard (*) or glob matching pattern will group together -# ## all indices that match it, and sort them by the date or number after -# ## the wildcard. Metrics then are gathered for only the -# ## 'num_most_recent_indices' amount of most recent indices. -# # num_most_recent_indices = 0 - - -# # Derive metrics from aggregating Elasticsearch query results -# [[inputs.elasticsearch_query]] -# ## The full HTTP endpoint URL for your Elasticsearch instance -# ## Multiple urls can be specified as part of the same cluster, -# ## this means that only ONE of the urls will be written to each interval. -# urls = [ "http://node1.es.example.com:9200" ] # required. -# -# ## Elasticsearch client timeout, defaults to "5s". -# # timeout = "5s" -# -# ## Set to true to ask Elasticsearch a list of all cluster nodes, -# ## thus it is not necessary to list all nodes in the urls config option -# # enable_sniffer = false -# -# ## Set the interval to check if the Elasticsearch nodes are available -# ## This option is only used if enable_sniffer is also set (0s to disable it) -# # health_check_interval = "10s" -# -# ## HTTP basic authentication details (eg. when using x-pack) -# # username = "telegraf" -# # password = "mypassword" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## If 'use_system_proxy' is set to true, Telegraf will check env vars such as -# ## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts). -# ## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is -# ## provided, Telegraf will use the specified URL as HTTP proxy. -# # use_system_proxy = false -# # http_proxy_url = "http://localhost:8888" -# -# [[inputs.elasticsearch_query.aggregation]] -# ## measurement name for the results of the aggregation query -# measurement_name = "measurement" -# -# ## Elasticsearch indexes to query (accept wildcards). -# index = "index-*" -# -# ## The date/time field in the Elasticsearch index (mandatory). -# date_field = "@timestamp" -# -# ## If the field used for the date/time field in Elasticsearch is also using -# ## a custom date/time format it may be required to provide the format to -# ## correctly parse the field. -# ## -# ## If using one of the built in elasticsearch formats this is not required. -# # date_field_custom_format = "" -# -# ## Time window to query (eg. "1m" to query documents from last minute). -# ## Normally should be set to same as collection interval -# query_period = "1m" -# -# ## Lucene query to filter results -# # filter_query = "*" -# -# ## Fields to aggregate values (must be numeric fields) -# # metric_fields = ["metric"] -# -# ## Aggregation function to use on the metric fields -# ## Must be set if 'metric_fields' is set -# ## Valid values are: avg, sum, min, max, sum -# # metric_function = "avg" -# -# ## Fields to be used as tags -# ## Must be text, non-analyzed fields. Metric aggregations are performed -# ## per tag -# # tags = ["field.keyword", "field2.keyword"] -# -# ## Set to true to not ignore documents when the tag(s) above are missing -# # include_missing_tag = false -# -# ## String value of the tag when the tag does not exist -# ## Used when include_missing_tag is true -# # missing_tag_value = "null" - - -# # Returns ethtool statistics for given interfaces -# # This plugin ONLY supports Linux -# [[inputs.ethtool]] -# ## List of interfaces to pull metrics for -# # interface_include = ["eth0"] -# -# ## List of interfaces to ignore when pulling metrics. -# # interface_exclude = ["eth1"] -# -# ## Plugin behavior for downed interfaces -# ## Available choices: -# ## - expose: collect & report metrics for down interfaces -# ## - skip: ignore interfaces that are marked down -# # down_interfaces = "expose" -# -# ## Reading statistics from interfaces in additional namespaces is also -# ## supported, so long as the namespaces are named (have a symlink in -# ## /var/run/netns). The telegraf process will also need the CAP_SYS_ADMIN -# ## permission. -# ## By default, only the current namespace will be used. For additional -# ## namespace support, at least one of `namespace_include` and -# ## `namespace_exclude` must be provided. -# ## To include all namespaces, set `namespace_include` to `["*"]`. -# ## The initial namespace (if anonymous) can be specified with the empty -# ## string (""). -# -# ## List of namespaces to pull metrics for -# # namespace_include = [] -# -# ## List of namespace to ignore when pulling metrics. -# # namespace_exclude = [] -# -# ## Some drivers declare statistics with extra whitespace, different spacing, -# ## and mix cases. This list, when enabled, can be used to clean the keys. -# ## Here are the current possible normalizations: -# ## * snakecase: converts fooBarBaz to foo_bar_baz -# ## * trim: removes leading and trailing whitespace -# ## * lower: changes all capitalized letters to lowercase -# ## * underscore: replaces spaces with underscores -# # normalize_keys = ["snakecase", "trim", "lower", "underscore"] - - -# # Read metrics from one or more commands that can output to stdout -# [[inputs.exec]] -# ## Commands array -# commands = [ -# "/tmp/test.sh", -# "/usr/bin/mycollector --foo=bar", -# "/tmp/collect_*.sh" -# ] -# -# ## Environment variables -# ## Array of "key=value" pairs to pass as environment variables -# ## e.g. "KEY=value", "USERNAME=John Doe", -# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" -# # environment = [] -# -# ## Timeout for each command to complete. -# timeout = "5s" -# -# ## measurement name suffix (for separating different commands) -# name_suffix = "_mycollector" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Read metrics from fail2ban. -# [[inputs.fail2ban]] -# ## Use sudo to run fail2ban-client -# # use_sudo = false -# -# ## Use the given socket instead of the default one -# # socket = "/var/run/fail2ban/fail2ban.sock" - - -# # Read devices value(s) from a Fibaro controller -# [[inputs.fibaro]] -# ## Required Fibaro controller address/hostname. -# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available -# url = "http://:80" -# -# ## Required credentials to access the API (http://) -# username = "" -# password = "" -# -# ## Amount of time allowed to complete the HTTP request -# # timeout = "5s" -# -# ## Fibaro Device Type -# ## By default, this plugin will attempt to read using the HC2 API. For HC3 -# ## devices, set this to "HC3" -# # device_type = "HC2" - - -# # Parse a complete file each interval -# [[inputs.file]] -# ## Files to parse each interval. Accept standard unix glob matching rules, -# ## as well as ** to match recursive files and directories. -# files = ["/tmp/metrics.out"] -# -# ## Character encoding to use when interpreting the file contents. Invalid -# ## characters are replaced using the unicode replacement character. When set -# ## to the empty string the data is not decoded to text. -# ## ex: character_encoding = "utf-8" -# ## character_encoding = "utf-16le" -# ## character_encoding = "utf-16be" -# ## character_encoding = "" -# # character_encoding = "" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" -# -# -# ## Name a tag containing the name of the file the data was parsed from. Leave empty -# ## to disable. Cautious when file name variation is high, this can increase the cardinality -# ## significantly. Read more about cardinality here: -# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality -# # file_tag = "" - - -# # Count files in a directory -# [[inputs.filecount]] -# ## Directories to gather stats about. -# ## This accept standard unit glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories -# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories -# ## /var/log -> count all files in /var/log and all of its subdirectories -# directories = ["/var/cache/apt", "/tmp"] -# -# ## Only count files that match the name pattern. Defaults to "*". -# name = "*" -# -# ## Count files in subdirectories. Defaults to true. -# recursive = true -# -# ## Only count regular files. Defaults to true. -# regular_only = true -# -# ## Follow all symlinks while walking the directory tree. Defaults to false. -# follow_symlinks = false -# -# ## Only count files that are at least this size. If size is -# ## a negative number, only count files that are smaller than the -# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... -# ## Without quotes and units, interpreted as size in bytes. -# size = "0B" -# -# ## Only count files that have not been touched for at least this -# ## duration. If mtime is negative, only count files that have been -# ## touched in this duration. Defaults to "0s". -# mtime = "0s" - - -# # Read stats about given file(s) -# [[inputs.filestat]] -# ## Files to gather stats about. -# ## These accept standard unix glob matching rules, but with the addition of -# ## ** as a "super asterisk". See https://github.com/gobwas/glob. -# files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"] -# -# ## If true, read the entire file and calculate an md5 checksum. -# md5 = false - - -# # Read real time temps from fireboard.io servers -# [[inputs.fireboard]] -# ## Specify auth token for your account -# auth_token = "invalidAuthToken" -# ## You can override the fireboard server URL if necessary -# # url = https://fireboard.io/api/v1/devices.json -# ## You can set a different http_timeout if you need to -# ## You should set a string using an number and time indicator -# ## for example "12s" for 12 seconds. -# # http_timeout = "4s" - - -# # Read metrics exposed by fluentd in_monitor plugin -# [[inputs.fluentd]] -# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). -# ## -# ## Endpoint: -# ## - only one URI is allowed -# ## - https is not supported -# endpoint = "http://localhost:24220/api/plugins.json" -# -# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) -# exclude = [ -# "monitor_agent", -# "dummy", -# ] - - -# # Gather repository information from GitHub hosted repositories. -# [[inputs.github]] -# ## List of repositories to monitor -# repositories = [ -# "influxdata/telegraf", -# "influxdata/influxdb" -# ] -# -# ## Github API access token. Unauthenticated requests are limited to 60 per hour. -# # access_token = "" -# -# ## Github API enterprise url. Github Enterprise accounts must specify their base url. -# # enterprise_base_url = "" -# -# ## Timeout for HTTP requests. -# # http_timeout = "5s" -# -# ## List of additional fields to query. -# ## NOTE: Getting those fields might involve issuing additional API-calls, so please -# ## make sure you do not exceed the rate-limit of GitHub. -# ## -# ## Available fields are: -# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) -# # additional_fields = [] - - -# # Gather metrics by iterating the files located on a Cloud Storage Bucket. -# [[inputs.google_cloud_storage]] -# ## Required. Name of Cloud Storage bucket to ingest metrics from. -# bucket = "my-bucket" -# -# ## Optional. Prefix of Cloud Storage bucket keys to list metrics from. -# # key_prefix = "my-bucket" -# -# ## Key that will store the offsets in order to pick up where the ingestion was left. -# offset_key = "offset_key" -# -# ## Key that will store the offsets in order to pick up where the ingestion was left. -# objects_per_iteration = 10 -# -# ## Required. Data format to consume. -# ## Each data format has its own unique set of configuration options. -# ## Read more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" -# -# ## Optional. Filepath for GCP credentials JSON file to authorize calls to -# ## Google Cloud Storage APIs. If not set explicitly, Telegraf will attempt to use -# ## Application Default Credentials, which is preferred. -# # credentials_file = "path/to/my/creds.json" - - -# # Read flattened metrics from one or more GrayLog HTTP endpoints -# [[inputs.graylog]] -# ## API endpoint, currently supported API: -# ## -# ## - multiple (e.g. http://:9000/api/system/metrics/multiple) -# ## - namespace (e.g. http://:9000/api/system/metrics/namespace/{namespace}) -# ## -# ## For namespace endpoint, the metrics array will be ignored for that call. -# ## Endpoint can contain namespace and multiple type calls. -# ## -# ## Please check http://[graylog-server-ip]:9000/api/api-browser for full list -# ## of endpoints -# servers = [ -# "http://[graylog-server-ip]:9000/api/system/metrics/multiple", -# ] -# -# ## Set timeout (default 5 seconds) -# # timeout = "5s" -# -# ## Metrics list -# ## List of metrics can be found on Graylog webservice documentation. -# ## Or by hitting the web service api at: -# ## http://[graylog-host]:9000/api/system/metrics -# metrics = [ -# "jvm.cl.loaded", -# "jvm.memory.pools.Metaspace.committed" -# ] -# -# ## Username and password -# username = "" -# password = "" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read metrics of HAProxy, via stats socket or http endpoints -# [[inputs.haproxy]] -# ## List of stats endpoints. Metrics can be collected from both http and socket -# ## endpoints. Examples of valid endpoints: -# ## - http://myhaproxy.com:1936/haproxy?stats -# ## - https://myhaproxy.com:8000/stats -# ## - socket:/run/haproxy/admin.sock -# ## - /run/haproxy/*.sock -# ## - tcp://127.0.0.1:1936 -# ## -# ## Server addresses not starting with 'http://', 'https://', 'tcp://' will be -# ## treated as possible sockets. When specifying local socket, glob patterns are -# ## supported. -# servers = ["http://myhaproxy.com:1936/haproxy?stats"] -# -# ## By default, some of the fields are renamed from what haproxy calls them. -# ## Setting this option to true results in the plugin keeping the original -# ## field names. -# # keep_field_names = false -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Monitor disks' temperatures using hddtemp -# [[inputs.hddtemp]] -# ## By default, telegraf gathers temps data from all disks detected by the -# ## hddtemp. -# ## -# ## Only collect temps from the selected disks. -# ## -# ## A * as the device name will return the temperature values of all disks. -# ## -# # address = "127.0.0.1:7634" -# # devices = ["sda", "*"] - - -# # Read formatted metrics from one or more HTTP endpoints -# [[inputs.http]] -# ## One or more URLs from which to read formatted metrics. -# urls = [ -# "http://localhost/metrics", -# "http+unix:///run/user/420/podman/podman.sock:/d/v4.0.0/libpod/pods/json" -# ] -# -# ## HTTP method -# # method = "GET" -# -# ## Optional HTTP headers -# # headers = {"X-Special-Header" = "Special-Value"} -# -# ## HTTP entity-body to send with POST/PUT requests. -# # body = "" -# -# ## HTTP Content-Encoding for write request body, can be set to "gzip" to -# ## compress body or "identity" to apply no encoding. -# # content_encoding = "identity" -# -# ## Optional Bearer token settings to use for the API calls. -# ## Use either the token itself or the token file if you need a token. -# # token = "eyJhbGc...Qssw5c" -# # token_file = "/path/to/file" -# -# ## Optional HTTP Basic Auth Credentials -# # username = "username" -# # password = "pa$$word" -# -# ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. -# # client_id = "clientid" -# # client_secret = "secret" -# # token_url = "https://indentityprovider/oauth2/v1/token" -# # scopes = ["urn:opc:idm:__myscopes__"] -# -# ## HTTP Proxy support -# # use_system_proxy = false -# # http_proxy_url = "" -# -# ## Optional TLS Config -# ## Set to true/false to enforce TLS being enabled/disabled. If not set, -# ## enable TLS only if any of the other options are specified. -# # tls_enable = -# ## Trusted root certificates for server -# # tls_ca = "/path/to/cafile" -# ## Used for TLS client certificate authentication -# # tls_cert = "/path/to/certfile" -# ## Used for TLS client certificate authentication -# # tls_key = "/path/to/keyfile" -# ## Send the specified TLS server name via SNI -# # tls_server_name = "kubernetes.example.com" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Optional Cookie authentication -# # cookie_auth_url = "https://localhost/authMe" -# # cookie_auth_method = "POST" -# # cookie_auth_username = "username" -# # cookie_auth_password = "pa$$word" -# # cookie_auth_headers = { Content-Type = "application/json", X-MY-HEADER = "hello" } -# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' -# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie -# # cookie_auth_renewal = "5m" -# -# ## Amount of time allowed to complete the HTTP request -# # timeout = "5s" -# -# ## List of success status codes -# # success_status_codes = [200] -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# # data_format = "influx" -# - - -# # HTTP/HTTPS request given an address a method and a timeout -# [[inputs.http_response]] -# ## List of urls to query. -# # urls = ["http://localhost"] -# -# ## Set http_proxy. -# ## Telegraf uses the system wide proxy settings if it's is not set. -# # http_proxy = "http://localhost:8888" -# -# ## Set response_timeout (default 5 seconds) -# # response_timeout = "5s" -# -# ## HTTP Request Method -# # method = "GET" -# -# ## Whether to follow redirects from the server (defaults to false) -# # follow_redirects = false -# -# ## Optional file with Bearer token -# ## file content is added as an Authorization header -# # bearer_token = "/path/to/file" -# -# ## Optional HTTP Basic Auth Credentials -# # username = "username" -# # password = "pa$$word" -# -# ## Optional HTTP Request Body -# # body = ''' -# # {'fake':'data'} -# # ''' -# -# ## Optional HTTP Request Body Form -# ## Key value pairs to encode and set at URL form. Can be used with the POST -# ## method + application/x-www-form-urlencoded content type to replicate the -# ## POSTFORM method. -# # body_form = { "key": "value" } -# -# ## Optional name of the field that will contain the body of the response. -# ## By default it is set to an empty String indicating that the body's -# ## content won't be added -# # response_body_field = '' -# -# ## Maximum allowed HTTP response body size in bytes. -# ## 0 means to use the default of 32MiB. -# ## If the response body size exceeds this limit a "body_read_error" will -# ## be raised. -# # response_body_max_size = "32MiB" -# -# ## Optional substring or regex match in body of the response (case sensitive) -# # response_string_match = "\"service_status\": \"up\"" -# # response_string_match = "ok" -# # response_string_match = "\".*_status\".?:.?\"up\"" -# -# ## Expected response status code. -# ## The status code of the response is compared to this value. If they match, -# ## the field "response_status_code_match" will be 1, otherwise it will be 0. -# ## If the expected status code is 0, the check is disabled and the field -# ## won't be added. -# # response_status_code = 0 -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# ## Use the given name as the SNI server name on each URL -# # tls_server_name = "" -# ## TLS renegotiation method, choose from "never", "once", "freely" -# # tls_renegotiation_method = "never" -# -# ## HTTP Request Headers (all values must be strings) -# # [inputs.http_response.headers] -# # Host = "github.com" -# -# ## Optional setting to map response http headers into tags -# ## If the http header is not present on the request, no corresponding tag will -# ## be added. If multiple instances of the http header are present, only the -# ## first value will be used. -# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} -# -# ## Interface to use when dialing an address -# # interface = "eth0" - - -# # Gathers huge pages measurements. -# # This plugin ONLY supports Linux -# [[inputs.hugepages]] -# ## Supported huge page types: -# ## - "root" - based on root huge page control directory: -# ## /sys/kernel/mm/hugepages -# ## - "per_node" - based on per NUMA node directories: -# ## /sys/devices/system/node/node[0-9]*/hugepages -# ## - "meminfo" - based on /proc/meminfo file -# # types = ["root", "per_node"] - - -# # Gather Icinga2 status -# [[inputs.icinga2]] -# ## Required Icinga2 server address -# # server = "https://localhost:5665" -# -# ## Collected Icinga2 objects ("services", "hosts") -# ## Specify at least one object to collect from /v1/objects endpoint. -# # objects = ["services"] -# -# ## Collect metrics from /v1/status endpoint -# ## Choose from: -# ## "ApiListener", "CIB", "IdoMysqlConnection", "IdoPgsqlConnection" -# # status = [] -# -# ## Credentials for basic HTTP authentication -# # username = "admin" -# # password = "admin" -# -# ## Maximum time to receive response. -# # response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = true - - -# # Gets counters from all InfiniBand cards and ports installed -# # This plugin ONLY supports Linux -# [[inputs.infiniband]] -# # no configuration - - -# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints -# [[inputs.influxdb]] -# ## Works with InfluxDB debug endpoints out of the box, -# ## but other services can use this format too. -# ## See the influxdb plugin's README for more details. -# -# ## Multiple URLs from which to read InfluxDB-formatted JSON -# ## Default is "http://localhost:8086/debug/vars". -# urls = [ -# "http://localhost:8086/debug/vars" -# ] -# -# ## Username and password to send using HTTP Basic Authentication. -# # username = "" -# # password = "" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## http request & header timeout -# timeout = "5s" - - -# # Intel Baseband Accelerator Input Plugin collects metrics from both dedicated and integrated -# # Intel devices that provide Wireless Baseband hardware acceleration. -# # This plugin ONLY supports Linux. -# [[inputs.intel_baseband]] -# ## Path to socket exposed by pf-bb-config for CLI interaction (mandatory). -# ## In version v23.03 of pf-bb-config the path is created according to the schema: -# ## "/tmp/pf_bb_config.0000\:\:..sock" where 0000\:\:. is the PCI device ID. -# socket_path = "" -# -# ## Path to log file exposed by pf-bb-config with telemetry to read (mandatory). -# ## In version v23.03 of pf-bb-config the path is created according to the schema: -# ## "/var/log/pf_bb_cfg_0000\:\:..log" where 0000\:\:. is the PCI device ID. -# log_file_path = "" -# -# ## Specifies plugin behavior regarding unreachable socket (which might not have been initialized yet). -# ## Available choices: -# ## - error: Telegraf will return an error on startup if socket is unreachable -# ## - ignore: Telegraf will ignore error regarding unreachable socket on both startup and gather -# # unreachable_socket_behavior = "error" -# -# ## Duration that defines how long the connected socket client will wait for -# ## a response before terminating connection. -# ## Since it's local socket access to a fast packet processing application, the timeout should -# ## be sufficient for most users. -# ## Setting the value to 0 disables the timeout (not recommended). -# # socket_access_timeout = "1s" -# -# ## Duration that defines maximum time plugin will wait for pf-bb-config to write telemetry to the log file. -# ## Timeout may differ depending on the environment. -# ## Must be equal or larger than 50ms. -# # wait_for_telemetry_timeout = "1s" - - -# ## Reads metrics from DPDK using v2 telemetry interface. -# ## This plugin ONLY supports Linux -# [[inputs.intel_dlb]] -# ## Path to DPDK telemetry socket. -# # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" -# -# ## Default eventdev command list, it gathers metrics from socket by given commands. -# ## Supported options: -# ## "/eventdev/dev_xstats", "/eventdev/port_xstats", -# ## "/eventdev/queue_xstats", "/eventdev/queue_links" -# # eventdev_commands = ["/eventdev/dev_xstats", "/eventdev/port_xstats", "/eventdev/queue_xstats", "/eventdev/queue_links"] -# -# ## Detect DLB devices based on device id. -# ## Currently, only supported and tested device id is `0x2710`. -# ## Configuration added to support forward compatibility. -# # dlb_device_types = ["0x2710"] -# -# ## Specifies plugin behavior regarding unreachable socket (which might not have been initialized yet). -# ## Available choices: -# ## - error: Telegraf will return an error on startup if socket is unreachable -# ## - ignore: Telegraf will ignore error regarding unreachable socket on both startup and gather -# # unreachable_socket_behavior = "error" - - -# # Intel Platform Monitoring Technology plugin exposes Intel PMT metrics available through the Intel PMT kernel space. -# # This plugin ONLY supports Linux. -# [[inputs.intel_pmt]] -# ## Filepath to PMT XML within local copies of XML files from PMT repository. -# ## The filepath should be absolute. -# spec = "/home/telegraf/Intel-PMT/xml/pmt.xml" -# -# ## Enable metrics by their datatype. -# ## See the Enabling Metrics section in README for more details. -# ## If empty, all metrics are enabled. -# ## When used, the alternative option samples_enabled should NOT be used. -# # datatypes_enabled = [] -# -# ## Enable metrics by their name. -# ## See the Enabling Metrics section in README for more details. -# ## If empty, all metrics are enabled. -# ## When used, the alternative option datatypes_enabled should NOT be used. -# # samples_enabled = [] - - -# # Collect statistics about itself -# [[inputs.internal]] -# ## If true, collect telegraf memory stats. -# # collect_memstats = true -# -# ## If true, collect metrics from Go's runtime.metrics. For a full list see: -# ## https://pkg.go.dev/runtime/metrics -# # collect_gostats = false - - -# # Monitors internet speed using speedtest.net service -# [[inputs.internet_speed]] -# ## This plugin downloads many MB of data each time it is run. As such -# ## consider setting a higher interval for this plugin to reduce the -# ## demand on your internet connection. -# # interval = "60m" -# -# ## Enable to reduce memory usage -# # memory_saving_mode = false -# -# ## Caches the closest server location -# # cache = false -# -# ## Number of concurrent connections -# ## By default or set to zero, the number of CPU cores is used. Use this to -# ## reduce the impact on system performance or to increase the connections on -# ## faster connections to ensure the fastest speed. -# # connections = 0 -# -# ## Test mode -# ## By default, a single sever is used for testing. This may work for most, -# ## however, setting to "multi" will reach out to multiple servers in an -# ## attempt to get closer to ideal internet speeds. -# # test_mode = "single" -# -# ## Server ID exclude filter -# ## Allows the user to exclude or include specific server IDs received by -# ## speedtest-go. Values in the exclude option will be skipped over. Values in -# ## the include option are the only options that will be picked from. -# ## -# ## See the list of servers speedtest-go will return at: -# ## https://www.speedtest.net/api/js/servers?engine=js&limit=10 -# ## -# # server_id_exclude = [] -# # server_id_include = [] - - -# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. -# [[inputs.interrupts]] -# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is -# ## stored as a field. -# ## -# ## The default is false for backwards compatibility, and will be changed to -# ## true in a future version. It is recommended to set to true on new -# ## deployments. -# # cpu_as_tag = false -# -# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. -# # [inputs.interrupts.tagdrop] -# # irq = [ "NET_RX", "TASKLET" ] - - -# # Read metrics from the bare metal servers via IPMI -# [[inputs.ipmi_sensor]] -# ## optionally specify the path to the ipmitool executable -# # path = "/usr/bin/ipmitool" -# ## -# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. -# ## Sudo must be configured to allow the telegraf user to run ipmitool -# ## without a password. -# # use_sudo = false -# ## -# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR -# # privilege = "ADMINISTRATOR" -# ## -# ## optionally specify one or more servers via a url matching -# ## [username[:password]@][protocol[(address)]] -# ## e.g. -# ## root:passwd@lan(127.0.0.1) -# ## -# ## if no servers are specified, local machine sensor stats will be queried -# ## -# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] -# -# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid -# ## gaps or overlap in pulled data -# interval = "30s" -# -# ## Timeout for the ipmitool command to complete. Default is 20 seconds. -# timeout = "20s" -# -# ## Schema Version: (Optional, defaults to version 1) -# metric_version = 2 -# -# ## Optionally provide the hex key for the IMPI connection. -# # hex_key = "" -# -# ## If ipmitool should use a cache -# ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) -# ## the cache file may not work well for you if some sensors come up late -# # use_cache = false -# -# ## Path to the ipmitools cache file (defaults to OS temp dir) -# ## The provided path must exist and must be writable -# # cache_path = "" - - -# # Gather packets and bytes counters from Linux ipsets -# [[inputs.ipset]] -# ## By default, we only show sets which have already matched at least 1 packet. -# ## set include_unmatched_sets = true to gather them all. -# include_unmatched_sets = false -# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") -# ## You can avoid using sudo or root, by setting appropriate privileges for -# ## the telegraf.service systemd service. -# use_sudo = false -# ## The default timeout of 1s for ipset execution can be overridden here: -# # timeout = "1s" -# - - -# # Gather packets and bytes throughput from iptables -# # This plugin ONLY supports Linux -# [[inputs.iptables]] -# ## iptables require root access on most systems. -# ## Setting 'use_sudo' to true will make use of sudo to run iptables. -# ## Users must configure sudo to allow telegraf user to run iptables with -# ## no password. -# ## iptables can be restricted to only list command "iptables -nvL". -# use_sudo = false -# ## Setting 'use_lock' to true runs iptables with the "-w" option. -# ## Adjust your sudo settings appropriately if using this option -# ## ("iptables -w 5 -nvl") -# use_lock = false -# ## Define an alternate executable, such as "ip6tables". Default is "iptables". -# # binary = "ip6tables" -# ## defines the table to monitor: -# table = "filter" -# ## defines the chains to monitor. -# ## NOTE: iptables rules without a comment will not be monitored. -# ## Read the plugin documentation for more information. -# chains = [ "INPUT" ] - - -# # Collect virtual and real server stats from Linux IPVS -# # This plugin ONLY supports Linux -# [[inputs.ipvs]] -# # no configuration - - -# # Read jobs and cluster metrics from Jenkins instances -# [[inputs.jenkins]] -# ## The Jenkins URL in the format "schema://host:port" -# url = "http://my-jenkins-instance:8080" -# # username = "admin" -# # password = "admin" -# -# ## Set response_timeout -# response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Optional Max Job Build Age filter -# ## Default 1 hour, ignore builds older than max_build_age -# # max_build_age = "1h" -# -# ## Optional Sub Job Depth filter -# ## Jenkins can have unlimited layer of sub jobs -# ## This config will limit the layers of pulling, default value 0 means -# ## unlimited pulling until no more sub jobs -# # max_subjob_depth = 0 -# -# ## Optional Sub Job Per Layer -# ## In workflow-multibranch-plugin, each branch will be created as a sub job. -# ## This config will limit to call only the lasted branches in each layer, -# ## empty will use default value 10 -# # max_subjob_per_layer = 10 -# -# ## Jobs to include or exclude from gathering -# ## When using both lists, job_exclude has priority. -# ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] -# # job_include = [ "*" ] -# # job_exclude = [ ] -# -# ## Nodes to include or exclude from gathering -# ## When using both lists, node_exclude has priority. -# # node_include = [ "*" ] -# # node_exclude = [ ] -# -# ## Worker pool for jenkins plugin only -# ## Empty this field will use default value 5 -# # max_connections = 5 -# -# ## When set to true will add node labels as a comma-separated tag. If none, -# ## are found, then a tag with the value of 'none' is used. Finally, if a -# ## label contains a comma it is replaced with an underscore. -# # node_labels_as_tag = false - - -# # Read JMX metrics from a Jolokia REST agent endpoint -# [[inputs.jolokia2_agent]] -# # default_tag_prefix = "" -# # default_field_prefix = "" -# # default_field_separator = "." -# -# # Add agents URLs to query -# urls = ["http://localhost:8080/jolokia"] -# # username = "" -# # password = "" -# # response_timeout = "5s" -# -# ## Optional origin URL to include as a header in the request. Some endpoints -# ## may reject an empty origin. -# # origin = "" -# -# ## Optional TLS config -# # tls_ca = "/var/private/ca.pem" -# # tls_cert = "/var/private/client.pem" -# # tls_key = "/var/private/client-key.pem" -# # insecure_skip_verify = false -# -# ## Add metrics to read -# [[inputs.jolokia2_agent.metric]] -# name = "java_runtime" -# mbean = "java.lang:type=Runtime" -# paths = ["Uptime"] - - -# # Read JMX metrics from a Jolokia REST proxy endpoint -# [[inputs.jolokia2_proxy]] -# # default_tag_prefix = "" -# # default_field_prefix = "" -# # default_field_separator = "." -# -# ## Proxy agent -# url = "http://localhost:8080/jolokia" -# # username = "" -# # password = "" -# # response_timeout = "5s" -# -# ## Optional origin URL to include as a header in the request. Some endpoints -# ## may reject an empty origin. -# # origin = "" -# -# ## Optional TLS config -# # tls_ca = "/var/private/ca.pem" -# # tls_cert = "/var/private/client.pem" -# # tls_key = "/var/private/client-key.pem" -# # insecure_skip_verify = false -# -# ## Add proxy targets to query -# # default_target_username = "" -# # default_target_password = "" -# [[inputs.jolokia2_proxy.target]] -# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" -# # username = "" -# # password = "" -# -# ## Add metrics to read -# [[inputs.jolokia2_proxy.metric]] -# name = "java_runtime" -# mbean = "java.lang:type=Runtime" -# paths = ["Uptime"] - - -# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints -# [[inputs.kapacitor]] -# ## Multiple URLs from which to read Kapacitor-formatted JSON -# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". -# urls = [ -# "http://localhost:9092/kapacitor/v1/debug/vars" -# ] -# -# ## Time limit for http requests -# timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Get kernel statistics from /proc/vmstat -# # This plugin ONLY supports Linux -# [[inputs.kernel_vmstat]] -# # no configuration - - -# # Read status information from one or more Kibana servers -# [[inputs.kibana]] -# ## Specify a list of one or more Kibana servers -# servers = ["http://localhost:5601"] -# -# ## Timeout for HTTP requests -# timeout = "5s" -# -# ## HTTP Basic Auth credentials -# # username = "username" -# # password = "pa$$word" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## If 'use_system_proxy' is set to true, Telegraf will check env vars such as -# ## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts). -# ## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is -# ## provided, Telegraf will use the specified URL as HTTP proxy. -# # use_system_proxy = false -# # http_proxy_url = "http://localhost:8888" - - -# # Read metrics from the Kubernetes api -# [[inputs.kube_inventory]] -# ## URL for the Kubernetes API. -# ## If empty in-cluster config with POD's service account token will be used. -# # url = "" -# -# ## URL for the kubelet, if set it will be used to collect the pods resource metrics -# # url_kubelet = "http://127.0.0.1:10255" -# -# ## Namespace to use. Set to "" to use all namespaces. -# # namespace = "default" -# -# ## Node name to filter to. No filtering by default. -# # node_name = "" -# -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# ## -# ## Ignored if url is empty and in-cluster config is used. -# ## -# ## If both of these are empty, we'll use the default serviceaccount: -# ## at: /var/run/secrets/kubernetes.io/serviceaccount/token -# ## -# ## To auto-refresh the token, please use a file with the bearer_token option. -# ## If given a string, Telegraf cannot refresh the token periodically. -# # bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" -# ## OR -# ## deprecated in 1.24.0; use bearer_token with a file -# # bearer_token_string = "abc_123" -# -# ## Set response_timeout (default 5 seconds) -# # response_timeout = "5s" -# -# ## Optional Resources to exclude from gathering -# ## Leave them with blank with try to gather everything available. -# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", -# ## "nodes", "persistentvolumes", "persistentvolumeclaims", "pods", "services", -# ## "statefulsets" -# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] -# -# ## Optional Resources to include when gathering -# ## Overrides resource_exclude if both set. -# # resource_include = [ "deployments", "nodes", "statefulsets" ] -# -# ## selectors to include and exclude as tags. Globs accepted. -# ## Note that an empty array for both will include all selectors as tags -# ## selector_exclude overrides selector_include if both set. -# # selector_include = [] -# # selector_exclude = ["*"] -# -# ## Optional TLS Config -# ## Trusted root certificates for server -# # tls_ca = "/path/to/cafile" -# ## Used for TLS client certificate authentication -# # tls_cert = "/path/to/certfile" -# ## Used for TLS client certificate authentication -# # tls_key = "/path/to/keyfile" -# ## Send the specified TLS server name via SNI -# # tls_server_name = "kubernetes.example.com" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Uncomment to remove deprecated metrics. -# # fieldexclude = ["terminated_reason"] - - -# # Read metrics from the kubernetes kubelet api -# [[inputs.kubernetes]] -# ## URL for the kubelet, if empty read metrics from all nodes in the cluster -# url = "http://127.0.0.1:10255" -# -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# ## If both of these are empty, we'll use the default serviceaccount: -# ## at: /var/run/secrets/kubernetes.io/serviceaccount/token -# ## -# ## To re-read the token at each interval, please use a file with the -# ## bearer_token option. If given a string, Telegraf will always use that -# ## token. -# # bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" -# ## OR -# # bearer_token_string = "abc_123" -# -# ## Pod labels to be added as tags. An empty array for both include and -# ## exclude will include all labels. -# # label_include = [] -# # label_exclude = ["*"] -# -# ## Set response_timeout (default 5 seconds) -# # response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = /path/to/cafile -# # tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # LDAP monitoring plugin -# [[inputs.ldap]] -# ## Server to monitor -# ## The scheme determines the mode to use for connection with -# ## ldap://... -- unencrypted (non-TLS) connection -# ## ldaps://... -- TLS connection -# ## starttls://... -- StartTLS connection -# ## If no port is given, the default ports, 389 for ldap and starttls and -# ## 636 for ldaps, are used. -# server = "ldap://localhost" -# -# ## Server dialect, can be "openldap" or "389ds" -# # dialect = "openldap" -# -# # DN and password to bind with -# ## If bind_dn is empty an anonymous bind is performed. -# bind_dn = "" -# bind_password = "" -# -# ## Reverse the field names constructed from the monitoring DN -# # reverse_field_names = false -# -# ## Optional TLS Config -# ## Trusted root certificates for server -# # tls_ca = "/path/to/cafile" -# ## Used for TLS client certificate authentication -# # tls_cert = "/path/to/certfile" -# ## Used for TLS client certificate authentication -# # tls_key = "/path/to/keyfile" -# ## Send the specified TLS server name via SNI -# # tls_server_name = "kubernetes.example.com" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read metrics from a LeoFS Server via SNMP -# [[inputs.leofs]] -# ## An array of URLs of the form: -# ## host [ ":" port] -# servers = ["127.0.0.1:4010"] - - -# # The libvirt plugin collects statistics from virtualized guests using virtualization libvirt API. -# [[inputs.libvirt]] -# ## Domain names from which libvirt gather statistics. -# ## By default (empty or missing array) the plugin gather statistics from each domain registered in the host system. -# # domains = [] -# -# ## Libvirt connection URI with hypervisor. -# ## The plugin supports multiple transport protocols and approaches which are configurable via the URI. -# ## The general URI form: driver[+transport]://[username@][hostname][:port]/[path][?extraparameters] -# ## Supported transport protocols: ssh, tcp, tls, unix -# ## URI examples for each type of transport protocol: -# ## 1. SSH: qemu+ssh:///system?keyfile=/&known_hosts=/ -# ## 2. TCP: qemu+tcp:///system -# ## 3. TLS: qemu+tls:///system?pkipath=/certs_dir/ -# ## 4. UNIX: qemu+unix:///system?socket=/ -# ## Default URI is qemu:///system -# # libvirt_uri = "qemu:///system" -# -# ## Statistics groups for which libvirt plugin will gather statistics. -# ## Supported statistics groups: state, cpu_total, balloon, vcpu, interface, block, perf, iothread, memory, dirtyrate -# ## Empty array means no metrics for statistics groups will be exposed by the plugin. -# ## By default the plugin will gather all available statistics. -# # statistics_groups = ["state", "cpu_total", "balloon", "vcpu", "interface", "block", "perf", "iothread", "memory", "dirtyrate"] -# -# ## A list containing additional statistics to be exposed by libvirt plugin. -# ## Supported additional statistics: vcpu_mapping -# ## By default (empty or missing array) the plugin will not collect additional statistics. -# # additional_statistics = [] -# - - -# # Provides Linux CPU metrics -# # This plugin ONLY supports Linux -# [[inputs.linux_cpu]] -# ## Path for sysfs filesystem. -# ## See https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt -# ## Defaults: -# # host_sys = "/sys" -# -# ## CPU metrics collected by the plugin. -# ## Supported options: -# ## "cpufreq", "thermal" -# ## Defaults: -# # metrics = ["cpufreq"] - - -# # Provides Linux sysctl fs metrics -# [[inputs.linux_sysctl_fs]] -# # no configuration - - -# # Read metrics exposed by Logstash -# [[inputs.logstash]] -# ## The URL of the exposed Logstash API endpoint. -# url = "http://127.0.0.1:9600" -# -# ## Use Logstash 5 single pipeline API, set to true when monitoring -# ## Logstash 5. -# # single_pipeline = false -# -# ## Enable optional collection components. Can contain -# ## "pipelines", "process", and "jvm". -# # collect = ["pipelines", "process", "jvm"] -# -# ## Timeout for HTTP requests. -# # timeout = "5s" -# -# ## Optional HTTP Basic Auth credentials. -# # username = "username" -# # password = "pa$$word" -# -# ## Optional TLS Config. -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Use TLS but skip chain & host verification. -# # insecure_skip_verify = false -# -# ## If 'use_system_proxy' is set to true, Telegraf will check env vars such as -# ## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts). -# ## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is -# ## provided, Telegraf will use the specified URL as HTTP proxy. -# # use_system_proxy = false -# # http_proxy_url = "http://localhost:8888" -# -# ## Optional HTTP headers. -# # [inputs.logstash.headers] -# # "X-Special-Header" = "Special-Value" - - -# # Read metrics from local Lustre service on OST, MDS -# # This plugin ONLY supports Linux -# [[inputs.lustre2]] -# ## An array of /proc globs to search for Lustre stats -# ## If not specified, the default will work on Lustre 2.5.x -# ## -# # ost_procfiles = [ -# # "/proc/fs/lustre/obdfilter/*/stats", -# # "/proc/fs/lustre/osd-ldiskfs/*/stats", -# # "/proc/fs/lustre/obdfilter/*/job_stats", -# # "/proc/fs/lustre/obdfilter/*/exports/*/stats", -# # ] -# # mds_procfiles = [ -# # "/proc/fs/lustre/mdt/*/md_stats", -# # "/proc/fs/lustre/mdt/*/job_stats", -# # "/proc/fs/lustre/mdt/*/exports/*/stats", -# # ] - - -# # Read metrics about LVM physical volumes, volume groups, logical volumes. -# [[inputs.lvm]] -# ## Use sudo to run LVM commands -# use_sudo = false -# -# ## The default location of the pvs binary can be overridden with: -# #pvs_binary = "/usr/sbin/pvs" -# -# ## The default location of the vgs binary can be overridden with: -# #vgs_binary = "/usr/sbin/vgs" -# -# ## The default location of the lvs binary can be overridden with: -# #lvs_binary = "/usr/sbin/lvs" - - -# # Gathers metrics from the /3.0/reports MailChimp API -# [[inputs.mailchimp]] -# ## MailChimp API key -# ## get from https://admin.mailchimp.com/account/api/ -# api_key = "" # required -# -# ## Reports for campaigns sent more than days_old ago will not be collected. -# ## 0 means collect all and is the default value. -# days_old = 0 -# -# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old -# # campaign_id = "" - - -# # Retrieves information on a specific host in a MarkLogic Cluster -# [[inputs.marklogic]] -# ## Base URL of the MarkLogic HTTP Server. -# url = "http://localhost:8002" -# -# ## List of specific hostnames to retrieve information. At least (1) required. -# # hosts = ["hostname1", "hostname2"] -# -# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges -# # username = "myuser" -# # password = "mypassword" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read metrics from one or many mcrouter servers. -# [[inputs.mcrouter]] -# ## An array of address to gather stats about. Specify an ip or hostname -# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. -# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] -# -# ## Timeout for metric collections from all servers. Minimum timeout is "1s". -# # timeout = "5s" - - -# # Get kernel statistics from /proc/mdstat -# # This plugin ONLY supports Linux -# [[inputs.mdstat]] -# ## Sets file path -# ## If not specified, then default is /proc/mdstat -# # file_name = "/proc/mdstat" - - -# # Read metrics from one or many memcached servers. -# [[inputs.memcached]] -# # An array of address to gather stats about. Specify an ip on hostname -# # with optional port. ie localhost, 10.0.0.1:11211, etc. -# servers = ["localhost:11211"] -# # An array of unix memcached sockets to gather stats about. -# # unix_sockets = ["/var/run/memcached.sock"] -# -# ## Optional TLS Config -# # enable_tls = false -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## If false, skip chain & host verification -# # insecure_skip_verify = true - - -# # Telegraf plugin for gathering metrics from N Mesos masters -# [[inputs.mesos]] -# ## Timeout, in ms. -# timeout = 100 -# -# ## A list of Mesos masters. -# masters = ["http://localhost:5050"] -# -# ## Master metrics groups to be collected, by default, all enabled. -# master_collections = [ -# "resources", -# "master", -# "system", -# "agents", -# "frameworks", -# "framework_offers", -# "tasks", -# "messages", -# "evqueue", -# "registrar", -# "allocator", -# ] -# -# ## A list of Mesos slaves, default is [] -# # slaves = [] -# -# ## Slave metrics groups to be collected, by default, all enabled. -# # slave_collections = [ -# # "resources", -# # "agent", -# # "system", -# # "executors", -# # "tasks", -# # "messages", -# # ] -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Collects scores from a Minecraft server's scoreboard using the RCON protocol -# [[inputs.minecraft]] -# ## Address of the Minecraft server. -# # server = "localhost" -# -# ## Server RCON Port. -# # port = "25575" -# -# ## Server RCON Password. -# password = "" -# -# ## Uncomment to remove deprecated metric components. -# # tagdrop = ["server"] - - -# # Generate metrics for test and demonstration purposes -# [[inputs.mock]] -# ## Set the metric name to use for reporting -# metric_name = "mock" -# -# ## Optional string key-value pairs of tags to add to all metrics -# # [inputs.mock.tags] -# # "key" = "value" -# -# ## One or more mock data fields *must* be defined. -# # [[inputs.mock.constant]] -# # name = "constant" -# # value = value_of_any_type -# # [[inputs.mock.random]] -# # name = "rand" -# # min = 1.0 -# # max = 6.0 -# # [[inputs.mock.sine_wave]] -# # name = "wave" -# # amplitude = 1.0 -# # period = 0.5 -# # [[inputs.mock.step]] -# # name = "plus_one" -# # start = 0.0 -# # step = 1.0 -# # [[inputs.mock.stock]] -# # name = "abc" -# # price = 50.00 -# # volatility = 0.2 - - -# # Retrieve data from MODBUS slave devices -# [[inputs.modbus]] -# ## Connection Configuration -# ## -# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or -# ## via serial line communication in binary (RTU) or readable (ASCII) encoding -# ## -# ## Device name -# name = "Device" -# -# ## Slave ID - addresses a MODBUS device on the bus -# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] -# slave_id = 1 -# -# ## Timeout for each request -# timeout = "1s" -# -# ## Maximum number of retries and the time to wait between retries -# ## when a slave-device is busy. -# # busy_retries = 0 -# # busy_retries_wait = "100ms" -# -# # TCP - connect via Modbus/TCP -# controller = "tcp://localhost:502" -# -# ## Serial (RS485; RS232) -# ## For RS485 specific setting check the end of the configuration. -# ## For unix-like operating systems use: -# # controller = "file:///dev/ttyUSB0" -# ## For Windows operating systems use: -# # controller = "COM1" -# # baud_rate = 9600 -# # data_bits = 8 -# # parity = "N" -# # stop_bits = 1 -# -# ## Transmission mode for Modbus packets depending on the controller type. -# ## For Modbus over TCP you can choose between "TCP" , "RTUoverTCP" and -# ## "ASCIIoverTCP". -# ## For Serial controllers you can choose between "RTU" and "ASCII". -# ## By default this is set to "auto" selecting "TCP" for ModbusTCP connections -# ## and "RTU" for serial connections. -# # transmission_mode = "auto" -# -# ## Trace the connection to the modbus device as debug messages -# ## Note: You have to enable telegraf's debug mode to see those messages! -# # debug_connection = false -# -# ## Define the configuration schema -# ## |---register -- define fields per register type in the original style (only supports one slave ID) -# ## |---request -- define fields on a requests base -# ## |---metric -- define fields on a metric base -# configuration_type = "register" -# ## --- "register" configuration style --- -# -# ## Measurements -# ## -# -# ## Digital Variables, Discrete Inputs and Coils -# ## measurement - the (optional) measurement name, defaults to "modbus" -# ## name - the variable name -# ## data_type - the (optional) output type, can be BOOL or UINT16 (default) -# ## address - variable address -# -# discrete_inputs = [ -# { name = "start", address = [0]}, -# { name = "stop", address = [1]}, -# { name = "reset", address = [2]}, -# { name = "emergency_stop", address = [3]}, -# ] -# coils = [ -# { name = "motor1_run", address = [0]}, -# { name = "motor1_jog", address = [1]}, -# { name = "motor1_stop", address = [2]}, -# ] -# -# ## Analog Variables, Input Registers and Holding Registers -# ## measurement - the (optional) measurement name, defaults to "modbus" -# ## name - the variable name -# ## byte_order - the ordering of bytes -# ## |---AB, ABCD - Big Endian -# ## |---BA, DCBA - Little Endian -# ## |---BADC - Mid-Big Endian -# ## |---CDAB - Mid-Little Endian -# ## data_type - INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) -# ## INT16, UINT16, INT32, UINT32, INT64, UINT64, -# ## FLOAT16-IEEE, FLOAT32-IEEE, FLOAT64-IEEE (IEEE 754 binary representation) -# ## FIXED, UFIXED (fixed-point representation on input) -# ## FLOAT32 is a deprecated alias for UFIXED for historic reasons, should be avoided -# ## STRING (byte-sequence converted to string) -# ## scale - the final numeric variable representation -# ## address - variable address -# -# holding_registers = [ -# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, -# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, -# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, -# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, -# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, -# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, -# { name = "firmware", byte_order = "AB", data_type = "STRING", address = [5, 6, 7, 8, 9, 10, 11, 12]}, -# ] -# input_registers = [ -# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, -# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, -# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, -# ] -# -# ## --- "request" configuration style --- -# -# ## Per request definition -# ## -# -# ## Define a request sent to the device -# ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. -# [[inputs.modbus.request]] -# ## ID of the modbus slave device to query. -# ## If you need to query multiple slave-devices, create several "request" definitions. -# slave_id = 1 -# -# ## Byte order of the data. -# ## |---ABCD -- Big Endian (Motorola) -# ## |---DCBA -- Little Endian (Intel) -# ## |---BADC -- Big Endian with byte swap -# ## |---CDAB -- Little Endian with byte swap -# byte_order = "ABCD" -# -# ## Type of the register for the request -# ## Can be "coil", "discrete", "holding" or "input" -# register = "coil" -# -# ## Name of the measurement. -# ## Can be overridden by the individual field definitions. Defaults to "modbus" -# # measurement = "modbus" -# -# ## Request optimization algorithm. -# ## |---none -- Do not perform any optimization and use the given layout(default) -# ## |---shrink -- Shrink requests to actually requested fields -# ## | by stripping leading and trailing omits -# ## |---rearrange -- Rearrange request boundaries within consecutive address ranges -# ## | to reduce the number of requested registers by keeping -# ## | the number of requests. -# ## |---max_insert -- Rearrange request keeping the number of extra fields below the value -# ## provided in "optimization_max_register_fill". It is not necessary to define 'omitted' -# ## fields as the optimisation will add such field only where needed. -# # optimization = "none" -# -# ## Maximum number register the optimizer is allowed to insert between two fields to -# ## save requests. -# ## This option is only used for the 'max_insert' optimization strategy. -# ## NOTE: All omitted fields are ignored, so this option denotes the effective hole -# ## size to fill. -# # optimization_max_register_fill = 50 -# -# ## Field definitions -# ## Analog Variables, Input Registers and Holding Registers -# ## address - address of the register to query. For coil and discrete inputs this is the bit address. -# ## name *1 - field name -# ## type *1,2 - type of the modbus field, can be -# ## INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) -# ## INT16, UINT16, INT32, UINT32, INT64, UINT64 and -# ## FLOAT16, FLOAT32, FLOAT64 (IEEE 754 binary representation) -# ## STRING (byte-sequence converted to string) -# ## length *1,2 - (optional) number of registers, ONLY valid for STRING type -# ## scale *1,2,4 - (optional) factor to scale the variable with -# ## output *1,3,4 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. -# ## Defaults to FLOAT64 for numeric fields if "scale" is provided. -# ## Otherwise the input "type" class is used (e.g. INT* -> INT64). -# ## measurement *1 - (optional) measurement name, defaults to the setting of the request -# ## omit - (optional) omit this field. Useful to leave out single values when querying many registers -# ## with a single request. Defaults to "false". -# ## -# ## *1: These fields are ignored if field is omitted ("omit"=true) -# ## *2: These fields are ignored for both "coil" and "discrete"-input type of registers. -# ## *3: This field can only be "UINT16" or "BOOL" if specified for both "coil" -# ## and "discrete"-input type of registers. By default the fields are -# ## output as zero or one in UINT16 format unless "BOOL" is used. -# ## *4: These fields cannot be used with "STRING"-type fields. -# -# ## Coil / discrete input example -# fields = [ -# { address=0, name="motor1_run" }, -# { address=1, name="jog", measurement="motor" }, -# { address=2, name="motor1_stop", omit=true }, -# { address=3, name="motor1_overheating", output="BOOL" }, -# { address=4, name="firmware", type="STRING", length=8 }, -# ] -# -# [inputs.modbus.request.tags] -# machine = "impresser" -# location = "main building" -# -# [[inputs.modbus.request]] -# ## Holding example -# ## All of those examples will result in FLOAT64 field outputs -# slave_id = 1 -# byte_order = "DCBA" -# register = "holding" -# fields = [ -# { address=0, name="voltage", type="INT16", scale=0.1 }, -# { address=1, name="current", type="INT32", scale=0.001 }, -# { address=3, name="power", type="UINT32", omit=true }, -# { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, -# { address=7, name="frequency", type="UINT32", scale=0.1 }, -# { address=8, name="power_factor", type="INT64", scale=0.01 }, -# ] -# -# [inputs.modbus.request.tags] -# machine = "impresser" -# location = "main building" -# -# [[inputs.modbus.request]] -# ## Input example with type conversions -# slave_id = 1 -# byte_order = "ABCD" -# register = "input" -# fields = [ -# { address=0, name="rpm", type="INT16" }, # will result in INT64 field -# { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field -# { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field -# { address=4, name="hours", type="UINT32" }, # will result in UIN64 field -# ] -# -# [inputs.modbus.request.tags] -# machine = "impresser" -# location = "main building" -# -# ## --- "metric" configuration style --- -# -# ## Per metric definition -# ## -# -# ## Request optimization algorithm across metrics -# ## |---none -- Do not perform any optimization and just group requests -# ## | within metrics (default) -# ## |---max_insert -- Collate registers across all defined metrics and fill in -# ## holes to optimize the number of requests. -# # optimization = "none" -# -# ## Maximum number of registers the optimizer is allowed to insert between -# ## non-consecutive registers to save requests. -# ## This option is only used for the 'max_insert' optimization strategy and -# ## effectively denotes the hole size between registers to fill. -# # optimization_max_register_fill = 50 -# -# ## Define a metric produced by the requests to the device -# ## Multiple of those metrics can be defined. The referenced registers will -# ## be collated into requests send to the device -# [[inputs.modbus.metric]] -# ## ID of the modbus slave device to query -# ## If you need to query multiple slave-devices, create several "metric" definitions. -# slave_id = 1 -# -# ## Byte order of the data -# ## |---ABCD -- Big Endian (Motorola) -# ## |---DCBA -- Little Endian (Intel) -# ## |---BADC -- Big Endian with byte swap -# ## |---CDAB -- Little Endian with byte swap -# # byte_order = "ABCD" -# -# ## Name of the measurement -# # measurement = "modbus" -# -# ## Field definitions -# ## register - type of the modbus register, can be "coil", "discrete", -# ## "holding" or "input". Defaults to "holding". -# ## address - address of the register to query. For coil and discrete inputs this is the bit address. -# ## name - field name -# ## type *1 - type of the modbus field, can be -# ## INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) -# ## INT16, UINT16, INT32, UINT32, INT64, UINT64 and -# ## FLOAT16, FLOAT32, FLOAT64 (IEEE 754 binary representation) -# ## STRING (byte-sequence converted to string) -# ## length *1 - (optional) number of registers, ONLY valid for STRING type -# ## scale *1,3 - (optional) factor to scale the variable with -# ## output *2,3 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if -# ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). -# ## -# ## *1: These fields are ignored for both "coil" and "discrete"-input type of registers. -# ## *2: This field can only be "UINT16" or "BOOL" if specified for both "coil" -# ## and "discrete"-input type of registers. By default the fields are -# ## output as zero or one in UINT16 format unless "BOOL" is used. -# ## *3: These fields cannot be used with "STRING"-type fields. -# fields = [ -# { register="coil", address=0, name="door_open"}, -# { register="coil", address=1, name="status_ok"}, -# { register="holding", address=0, name="voltage", type="INT16" }, -# { address=1, name="current", type="INT32", scale=0.001 }, -# { address=5, name="energy", type="FLOAT32", scale=0.001 }, -# { address=7, name="frequency", type="UINT32", scale=0.1 }, -# { address=8, name="power_factor", type="INT64", scale=0.01 }, -# { address=9, name="firmware", type="STRING", length=8 }, -# ] -# -# ## Tags assigned to the metric -# # [inputs.modbus.metric.tags] -# # machine = "impresser" -# # location = "main building" -# -# -# ## RS485 specific settings. Only take effect for serial controllers. -# ## Note: This has to be at the end of the modbus configuration due to -# ## TOML constraints. -# # [inputs.modbus.rs485] -# ## Delay RTS prior to sending -# # delay_rts_before_send = "0ms" -# ## Delay RTS after to sending -# # delay_rts_after_send = "0ms" -# ## Pull RTS line to high during sending -# # rts_high_during_send = false -# ## Pull RTS line to high after sending -# # rts_high_after_send = false -# ## Enabling receiving (Rx) during transmission (Tx) -# # rx_during_tx = false -# -# ## Enable workarounds required by some devices to work correctly -# # [inputs.modbus.workarounds] -# ## Pause after connect delays the first request by the specified time. -# ## This might be necessary for (slow) devices. -# # pause_after_connect = "0ms" -# -# ## Pause between read requests sent to the device. -# ## This might be necessary for (slow) serial devices. -# # pause_between_requests = "0ms" -# -# ## Close the connection after every gather cycle. -# ## Usually the plugin closes the connection after a certain idle-timeout, -# ## however, if you query a device with limited simultaneous connectivity -# ## (e.g. serial devices) from multiple instances you might want to only -# ## stay connected during gather and disconnect afterwards. -# # close_connection_after_gather = false -# -# ## Force the plugin to read each field in a separate request. -# ## This might be necessary for devices not conforming to the spec, -# ## see https://github.com/influxdata/telegraf/issues/12071. -# # one_request_per_field = false -# -# ## Enforce the starting address to be zero for the first request on -# ## coil registers. This is necessary for some devices see -# ## https://github.com/influxdata/telegraf/issues/8905 -# # read_coils_starting_at_zero = false -# -# ## String byte-location in registers AFTER byte-order conversion -# ## Some device (e.g. EM340) place the string byte in only the upper or -# ## lower byte location of a register see -# ## https://github.com/influxdata/telegraf/issues/14748 -# ## Available settings: -# ## lower -- use only lower byte of the register i.e. 00XX 00XX 00XX 00XX -# ## upper -- use only upper byte of the register i.e. XX00 XX00 XX00 XX00 -# ## By default both bytes of the register are used i.e. XXXX XXXX. -# # string_register_location = "" - - -# # Read metrics and status information about processes managed by Monit -# [[inputs.monit]] -# ## Monit HTTPD address -# address = "http://127.0.0.1:2812" -# -# ## Username and Password for Monit -# # username = "" -# # password = "" -# -# ## Amount of time allowed to complete the HTTP request -# # timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Aggregates the contents of multiple files into a single point -# [[inputs.multifile]] -# ## Base directory where telegraf will look for files. -# ## Omit this option to use absolute paths. -# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" -# -# ## If true discard all data when a single file can't be read. -# ## Else, Telegraf omits the field generated from this file. -# # fail_early = true -# -# ## Files to parse each interval. -# [[inputs.multifile.file]] -# file = "in_pressure_input" -# dest = "pressure" -# conversion = "float" -# [[inputs.multifile.file]] -# file = "in_temp_input" -# dest = "temperature" -# conversion = "float(3)" -# [[inputs.multifile.file]] -# file = "in_humidityrelative_input" -# dest = "humidityrelative" -# conversion = "float(3)" - - -# # Read metrics from one or many mysql servers -# [[inputs.mysql]] -# ## specify servers via a url matching: -# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] -# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name -# ## e.g. -# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] -# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] -# # -# ## If no servers are specified, then localhost is used as the host. -# servers = ["tcp(127.0.0.1:3306)/"] -# -# ## Selects the metric output format. -# ## -# ## This option exists to maintain backwards compatibility, if you have -# ## existing metrics do not set or change this value until you are ready to -# ## migrate to the new format. -# ## -# ## If you do not have existing metrics from this plugin set to the latest -# ## version. -# ## -# ## Telegraf >=1.6: metric_version = 2 -# ## <1.6: metric_version = 1 (or unset) -# metric_version = 2 -# -# ## if the list is empty, then metrics are gathered from all database tables -# # table_schema_databases = [] -# -# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided -# ## in the list above -# # gather_table_schema = false -# -# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST -# # gather_process_list = false -# -# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS -# # gather_user_statistics = false -# -# ## gather auto_increment columns and max values from information schema -# # gather_info_schema_auto_inc = false -# -# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS -# # gather_innodb_metrics = false -# -# ## gather metrics from all channels from SHOW SLAVE STATUS command output -# # gather_all_slave_channels = false -# -# ## gather metrics from SHOW SLAVE STATUS command output -# # gather_slave_status = false -# -# ## use SHOW ALL SLAVES STATUS command output for MariaDB -# # mariadb_dialect = false -# -# ## gather metrics from SHOW BINARY LOGS command output -# # gather_binary_logs = false -# -# ## gather metrics from SHOW GLOBAL VARIABLES command output -# # gather_global_variables = true -# -# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE -# # gather_table_io_waits = false -# -# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS -# # gather_table_lock_waits = false -# -# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE -# # gather_index_io_waits = false -# -# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS -# # gather_event_waits = false -# -# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME -# # gather_file_events_stats = false -# -# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST -# # gather_perf_events_statements = false -# # -# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME -# # gather_perf_sum_per_acc_per_event = false -# # -# ## list of events to be gathered for gather_perf_sum_per_acc_per_event -# ## in case of empty list all events will be gathered -# # perf_summary_events = [] -# -# ## the limits for metrics form perf_events_statements -# # perf_events_statements_digest_text_limit = 120 -# # perf_events_statements_limit = 250 -# # perf_events_statements_time_limit = 86400 -# -# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) -# ## example: interval_slow = "30m" -# # interval_slow = "" -# -# ## Optional TLS Config (used if tls=custom parameter specified in server uri) -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Provides metrics about the state of a NATS server -# # This plugin does NOT support FreeBSD -# [[inputs.nats]] -# ## The address of the monitoring endpoint of the NATS server -# server = "http://localhost:8222" -# -# ## Maximum time to receive response -# # response_timeout = "5s" - - -# # Neptune Apex data collector -# [[inputs.neptune_apex]] -# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. -# ## Measurements will be logged under "apex". -# -# ## The base URL of the local Apex(es). If you specify more than one server, they will -# ## be differentiated by the "source" tag. -# servers = [ -# "http://apex.local", -# ] -# -# ## The response_timeout specifies how long to wait for a reply from the Apex. -# #response_timeout = "5s" -# - - -# # Gather metrics about network interfaces -# [[inputs.net]] -# ## By default, telegraf gathers stats from any up interface (excluding loopback) -# ## Setting interfaces will tell it to gather these explicit interfaces, -# ## regardless of status. When specifying an interface, glob-style -# ## patterns are also supported. -# ## -# # interfaces = ["eth*", "enp0s[0-1]", "lo"] -# ## -# ## On linux systems telegraf also collects protocol stats. -# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. -# ## -# ## DEPRECATION NOTICE: A value of 'false' is deprecated and discouraged! -# ## Please set this to `true` and use the 'inputs.nstat' -# ## plugin instead. -# # ignore_protocol_stats = false - - -# # Collect response time of a TCP or UDP connection -# [[inputs.net_response]] -# ## Protocol, must be "tcp" or "udp" -# ## NOTE: because the "udp" protocol does not respond to requests, it requires -# ## a send/expect string pair (see below). -# protocol = "tcp" -# ## Server address (default localhost) -# address = "localhost:80" -# -# ## Set timeout -# # timeout = "1s" -# -# ## Set read timeout (only used if expecting a response) -# # read_timeout = "1s" -# -# ## The following options are required for UDP checks. For TCP, they are -# ## optional. The plugin will send the given string to the server and then -# ## expect to receive the given 'expect' string back. -# ## string sent to the server -# # send = "ssh" -# ## expected string in answer -# # expect = "ssh" -# -# ## Uncomment to remove deprecated fields; recommended for new deploys -# # fieldexclude = ["result_type", "string_found"] - - -# # Read TCP metrics such as established, time wait and sockets counts. -# [[inputs.netstat]] -# # no configuration - - -# # Read per-mount NFS client metrics from /proc/self/mountstats -# [[inputs.nfsclient]] -# ## Read more low-level metrics (optional, defaults to false) -# # fullstat = false -# -# ## List of mounts to explicitly include or exclude (optional) -# ## The pattern (Go regexp) is matched against the mount point (not the -# ## device being mounted). If include_mounts is set, all mounts are ignored -# ## unless present in the list. If a mount is listed in both include_mounts -# ## and exclude_mounts, it is excluded. Go regexp patterns can be used. -# # include_mounts = [] -# # exclude_mounts = [] -# -# ## List of operations to include or exclude from collecting. This applies -# ## only when fullstat=true. Semantics are similar to {include,exclude}_mounts: -# ## the default is to collect everything; when include_operations is set, only -# ## those OPs are collected; when exclude_operations is set, all are collected -# ## except those listed. If include and exclude are set, the OP is excluded. -# ## See /proc/self/mountstats for a list of valid operations; note that -# ## NFSv3 and NFSv4 have different lists. While it is not possible to -# ## have different include/exclude lists for NFSv3/4, unused elements -# ## in the list should be okay. It is possible to have different lists -# ## for different mountpoints: use multiple [[input.nfsclient]] stanzas, -# ## with their own lists. See "include_mounts" above, and be careful of -# ## duplicate metrics. -# # include_operations = [] -# # exclude_operations = [] - - -# # Read Nginx's basic status information (ngx_http_stub_status_module) -# [[inputs.nginx]] -# ## An array of Nginx stub_status URI to gather stats. -# urls = ["http://localhost/server_status"] -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## HTTP response timeout (default: 5s) -# response_timeout = "5s" - - -# # Read Nginx Plus' advanced status information -# [[inputs.nginx_plus]] -# ## An array of Nginx status URIs to gather stats. -# urls = ["http://localhost/status"] -# -# # HTTP response timeout (default: 5s) -# response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read Nginx Plus API advanced status information -# [[inputs.nginx_plus_api]] -# ## An array of Nginx API URIs to gather stats. -# urls = ["http://localhost/api"] -# # Nginx API version, default: 3 -# # api_version = 3 -# -# # HTTP response timeout (default: 5s) -# response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read Nginx virtual host traffic status module information (nginx-module-sts) -# [[inputs.nginx_sts]] -# ## An array of ngx_http_status_module or status URI to gather stats. -# urls = ["http://localhost/status"] -# -# ## HTTP response timeout (default: 5s) -# response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) -# [[inputs.nginx_upstream_check]] -# ## An URL where Nginx Upstream check module is enabled -# ## It should be set to return a JSON formatted response -# url = "http://127.0.0.1/status?format=json" -# -# ## HTTP method -# # method = "GET" -# -# ## Optional HTTP headers -# # headers = {"X-Special-Header" = "Special-Value"} -# -# ## Override HTTP "Host" header -# # host_header = "check.example.com" -# -# ## Timeout for HTTP requests -# timeout = "5s" -# -# ## Optional HTTP Basic Auth credentials -# # username = "username" -# # password = "pa$$word" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read Nginx virtual host traffic status module information (nginx-module-vts) -# [[inputs.nginx_vts]] -# ## An array of ngx_http_status_module or status URI to gather stats. -# urls = ["http://localhost/status"] -# -# ## HTTP response timeout (default: 5s) -# response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read metrics from the Nomad API -# [[inputs.nomad]] -# ## URL for the Nomad agent -# # url = "http://127.0.0.1:4646" -# -# ## Set response_timeout (default 5 seconds) -# # response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = /path/to/cafile -# # tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile - - -# # A plugin to collect stats from the NSD DNS resolver -# [[inputs.nsd]] -# ## Address of server to connect to, optionally ':port'. Defaults to the -# ## address in the nsd config file. -# server = "127.0.0.1:8953" -# -# ## If running as a restricted user you can prepend sudo for additional access: -# # use_sudo = false -# -# ## The default location of the nsd-control binary can be overridden with: -# # binary = "/usr/sbin/nsd-control" -# -# ## The default location of the nsd config file can be overridden with: -# # config_file = "/etc/nsd/nsd.conf" -# -# ## The default timeout of 1s can be overridden with: -# # timeout = "1s" - - -# # Read NSQ topic and channel statistics. -# [[inputs.nsq]] -# ## An array of NSQD HTTP API endpoints -# endpoints = ["http://localhost:4151"] -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Collect kernel snmp counters and network interface statistics -# [[inputs.nstat]] -# ## file paths for proc files. If empty default paths will be used: -# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 -# ## These can also be overridden with env variables, see README. -# proc_net_netstat = "/proc/net/netstat" -# proc_net_snmp = "/proc/net/snmp" -# proc_net_snmp6 = "/proc/net/snmp6" -# ## dump metrics with 0 values too -# dump_zeros = true - - -# # Get standard NTP query metrics, requires ntpq executable. -# [[inputs.ntpq]] -# ## Servers to query with ntpq. -# ## If no server is given, the local machine is queried. -# # servers = [] -# -# ## If false, set the -n ntpq flag. Can reduce metric gather time. -# ## DEPRECATED since 1.24.0: add '-n' to 'options' instead to skip DNS lookup -# # dns_lookup = true -# -# ## Options to pass to the ntpq command. -# # options = "-p" -# -# ## Output format for the 'reach' field. -# ## Available values are -# ## octal -- output as is in octal representation e.g. 377 (default) -# ## decimal -- convert value to decimal representation e.g. 371 -> 249 -# ## count -- count the number of bits in the value. This represents -# ## the number of successful reaches, e.g. 37 -> 5 -# ## ratio -- output the ratio of successful attempts e.g. 37 -> 5/8 = 0.625 -# # reach_format = "octal" - - -# # Pulls statistics from nvidia GPUs attached to the host -# [[inputs.nvidia_smi]] -# ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" -# ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), -# ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned -# # bin_path = "/usr/bin/nvidia-smi" -# -# ## Optional: specifies plugin behavior regarding missing nvidia-smi binary -# ## Available choices: -# ## - error: telegraf will return an error on startup -# ## - ignore: telegraf will ignore this plugin -# # startup_error_behavior = "error" -# -# ## Optional: timeout for GPU polling -# # timeout = "5s" - - -# # Retrieve data from OPCUA devices -# [[inputs.opcua]] -# ## Metric name -# # name = "opcua" -# # -# ## OPC UA Endpoint URL -# # endpoint = "opc.tcp://localhost:4840" -# # -# ## Maximum time allowed to establish a connect to the endpoint. -# # connect_timeout = "10s" -# # -# ## Maximum time allowed for a request over the established connection. -# # request_timeout = "5s" -# # -# ## Security policy, one of "None", "Basic128Rsa15", "Basic256", -# ## "Basic256Sha256", or "auto" -# # security_policy = "auto" -# # -# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" -# # security_mode = "auto" -# # -# ## Path to cert.pem. Required when security mode or policy isn't "None". -# ## If cert path is not supplied, self-signed cert and key will be generated. -# # certificate = "/etc/telegraf/cert.pem" -# # -# ## Path to private key.pem. Required when security mode or policy isn't "None". -# ## If key path is not supplied, self-signed cert and key will be generated. -# # private_key = "/etc/telegraf/key.pem" -# # -# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To -# ## authenticate using a specific ID, select 'Certificate' or 'UserName' -# # auth_method = "Anonymous" -# # -# ## Username. Required for auth_method = "UserName" -# # username = "" -# # -# ## Password. Required for auth_method = "UserName" -# # password = "" -# # -# ## Option to select the metric timestamp to use. Valid options are: -# ## "gather" -- uses the time of receiving the data in telegraf -# ## "server" -- uses the timestamp provided by the server -# ## "source" -- uses the timestamp provided by the source -# # timestamp = "gather" -# # -# ## Client trace messages -# ## When set to true, and debug mode enabled in the agent settings, the OPCUA -# ## client's messages are included in telegraf logs. These messages are very -# ## noisey, but essential for debugging issues. -# # client_trace = false -# # -# ## Include additional Fields in each metric -# ## Available options are: -# ## DataType -- OPC-UA Data Type (string) -# # optional_fields = [] -# # -# ## Node ID configuration -# ## name - field name to use in the output -# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) -# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) -# ## identifier - OPC UA ID (tag as shown in opcua browser) -# ## tags - extra tags to be added to the output metric (optional); deprecated in 1.25.0; use default_tags -# ## default_tags - extra tags to be added to the output metric (optional) -# ## -# ## Use either the inline notation or the bracketed notation, not both. -# # -# ## Inline notation (default_tags not supported yet) -# # nodes = [ -# # {name="", namespace="", identifier_type="", identifier="", tags=[["tag1", "value1"], ["tag2", "value2"]}, -# # {name="", namespace="", identifier_type="", identifier=""}, -# # ] -# # -# ## Bracketed notation -# # [[inputs.opcua.nodes]] -# # name = "node1" -# # namespace = "" -# # identifier_type = "" -# # identifier = "" -# # default_tags = { tag1 = "value1", tag2 = "value2" } -# # -# # [[inputs.opcua.nodes]] -# # name = "node2" -# # namespace = "" -# # identifier_type = "" -# # identifier = "" -# # -# ## Node Group -# ## Sets defaults so they aren't required in every node. -# ## Default values can be set for: -# ## * Metric name -# ## * OPC UA namespace -# ## * Identifier -# ## * Default tags -# ## -# ## Multiple node groups are allowed -# #[[inputs.opcua.group]] -# ## Group Metric name. Overrides the top level name. If unset, the -# ## top level name is used. -# # name = -# # -# ## Group default namespace. If a node in the group doesn't set its -# ## namespace, this is used. -# # namespace = -# # -# ## Group default identifier type. If a node in the group doesn't set its -# ## namespace, this is used. -# # identifier_type = -# # -# ## Default tags that are applied to every node in this group. Can be -# ## overwritten in a node by setting a different value for the tag name. -# ## example: default_tags = { tag1 = "value1" } -# # default_tags = {} -# # -# ## Node ID Configuration. Array of nodes with the same settings as above. -# ## Use either the inline notation or the bracketed notation, not both. -# # -# ## Inline notation (default_tags not supported yet) -# # nodes = [ -# # {name="node1", namespace="", identifier_type="", identifier=""}, -# # {name="node2", namespace="", identifier_type="", identifier=""}, -# #] -# # -# ## Bracketed notation -# # [[inputs.opcua.group.nodes]] -# # name = "node1" -# # namespace = "" -# # identifier_type = "" -# # identifier = "" -# # default_tags = { tag1 = "override1", tag2 = "value2" } -# # -# # [[inputs.opcua.group.nodes]] -# # name = "node2" -# # namespace = "" -# # identifier_type = "" -# # identifier = "" -# -# ## Enable workarounds required by some devices to work correctly -# # [inputs.opcua.workarounds] -# ## Set additional valid status codes, StatusOK (0x0) is always considered valid -# # additional_valid_status_codes = ["0xC0"] -# -# # [inputs.opcua.request_workarounds] -# ## Use unregistered reads instead of registered reads -# # use_unregistered_reads = false - - -# # OpenLDAP cn=Monitor plugin -# [[inputs.openldap]] -# host = "localhost" -# port = 389 -# -# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. -# # note that port will likely need to be changed to 636 for ldaps -# # valid options: "" | "starttls" | "ldaps" -# tls = "" -# -# # skip peer certificate verification. Default is false. -# insecure_skip_verify = false -# -# # Path to PEM-encoded Root certificate to use to verify server certificate -# tls_ca = "/etc/ssl/certs.pem" -# -# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. -# bind_dn = "" -# bind_password = "" -# -# # reverse metric names so they sort more naturally -# # Defaults to false if unset, but is set to true when generating a new config -# reverse_metric_names = true - - -# # Get standard NTP query metrics from OpenNTPD. -# [[inputs.openntpd]] -# ## Run ntpctl binary with sudo. -# # use_sudo = false -# -# ## Location of the ntpctl binary. -# # binary = "/usr/sbin/ntpctl" -# -# ## Maximum time the ntpctl binary is allowed to run. -# # timeout = "5ms" - - -# # Derive metrics from aggregating OpenSearch query results -# [[inputs.opensearch_query]] -# ## OpenSearch cluster endpoint(s). Multiple urls can be specified as part -# ## of the same cluster. Only one successful call will be made per interval. -# urls = [ "https://node1.os.example.com:9200" ] # required. -# -# ## OpenSearch client timeout, defaults to "5s". -# # timeout = "5s" -# -# ## HTTP basic authentication details -# # username = "admin" -# # password = "admin" -# -# ## Skip TLS validation. Useful for local testing and self-signed certs. -# # insecure_skip_verify = false -# -# [[inputs.opensearch_query.aggregation]] -# ## measurement name for the results of the aggregation query -# measurement_name = "measurement" -# -# ## OpenSearch index or index pattern to search -# index = "index-*" -# -# ## The date/time field in the OpenSearch index (mandatory). -# date_field = "@timestamp" -# -# ## If the field used for the date/time field in OpenSearch is also using -# ## a custom date/time format it may be required to provide the format to -# ## correctly parse the field. -# ## -# ## If using one of the built in OpenSearch formats this is not required. -# ## https://opensearch.org/docs/2.4/opensearch/supported-field-types/date/#built-in-formats -# # date_field_custom_format = "" -# -# ## Time window to query (eg. "1m" to query documents from last minute). -# ## Normally should be set to same as collection interval -# query_period = "1m" -# -# ## Lucene query to filter results -# # filter_query = "*" -# -# ## Fields to aggregate values (must be numeric fields) -# # metric_fields = ["metric"] -# -# ## Aggregation function to use on the metric fields -# ## Must be set if 'metric_fields' is set -# ## Valid values are: avg, sum, min, max, sum -# # metric_function = "avg" -# -# ## Fields to be used as tags. Must be text, non-analyzed fields. Metric -# ## aggregations are performed per tag -# # tags = ["field.keyword", "field2.keyword"] -# -# ## Set to true to not ignore documents when the tag(s) above are missing -# # include_missing_tag = false -# -# ## String value of the tag when the tag does not exist -# ## Required when include_missing_tag is true -# # missing_tag_value = "null" - - -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver -# [[inputs.opensmtpd]] -# ## If running as a restricted user you can prepend sudo for additional access: -# #use_sudo = false -# -# ## The default location of the smtpctl binary can be overridden with: -# binary = "/usr/sbin/smtpctl" -# -# # The default timeout of 1s can be overridden with: -# #timeout = "1s" - - -# # Read current weather and forecasts data from openweathermap.org -# [[inputs.openweathermap]] -# ## OpenWeatherMap API key. -# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" -# -# ## City ID's to collect weather data from. -# city_id = ["5391959"] -# -# ## Language of the description field. Can be one of "ar", "bg", -# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", -# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", -# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" -# # lang = "en" -# -# ## APIs to fetch; can contain "weather" or "forecast". -# # fetch = ["weather", "forecast"] -# -# ## OpenWeatherMap base URL -# # base_url = "https://api.openweathermap.org/" -# -# ## Timeout for HTTP response. -# # response_timeout = "5s" -# -# ## Preferred unit system for temperature and wind speed. Can be one of -# ## "metric", "imperial", or "standard". -# # units = "metric" -# -# ## Style to query the current weather; available options -# ## batch -- query multiple cities at once using the "group" endpoint -# ## individual -- query each city individually using the "weather" endpoint -# ## You should use "individual" here as it is documented and provides more -# ## frequent updates. The default is "batch" for backward compatibility. -# # query_style = "batch" -# -# ## Query interval to fetch data. -# ## By default the global 'interval' setting is used. You should override the -# ## interval here if the global setting is shorter than 10 minutes as -# ## OpenWeatherMap weather data is only updated every 10 minutes. -# # interval = "10m" - - -# # P4Runtime telemetry input plugin -# [[inputs.p4runtime]] -# ## Define the endpoint of P4Runtime gRPC server to collect metrics. -# # endpoint = "127.0.0.1:9559" -# ## Set DeviceID required for Client Arbitration. -# ## https://p4.org/p4-spec/p4runtime/main/P4Runtime-Spec.html#sec-client-arbitration-and-controller-replication -# # device_id = 1 -# ## Filter counters by their names that should be observed. -# ## Example: counter_names_include=["ingressCounter", "egressCounter"] -# # counter_names_include = [] -# -# ## Optional TLS Config. -# ## Enable client-side TLS and define CA to authenticate the device. -# # enable_tls = false -# # tls_ca = "/etc/telegraf/ca.crt" -# ## Set minimal TLS version to accept by the client. -# # tls_min_version = "TLS12" -# ## Use TLS but skip chain & host verification. -# # insecure_skip_verify = true -# -# ## Define client-side TLS certificate & key to authenticate to the device. -# # tls_cert = "/etc/telegraf/client.crt" -# # tls_key = "/etc/telegraf/client.key" - - -# # Read metrics of passenger using passenger-status -# [[inputs.passenger]] -# ## Path of passenger-status. -# ## -# ## Plugin gather metric via parsing XML output of passenger-status -# ## More information about the tool: -# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html -# ## -# ## If no path is specified, then the plugin simply execute passenger-status -# ## hopefully it can be found in your PATH -# command = "passenger-status -v --show=xml" - - -# # Gather counters from PF -# [[inputs.pf]] -# ## PF require root access on most systems. -# ## Setting 'use_sudo' to true will make use of sudo to run pfctl. -# ## Users must configure sudo to allow telegraf user to run pfctl with no password. -# ## pfctl can be restricted to only list command "pfctl -s info". -# use_sudo = false - - -# # Read metrics of phpfpm, via HTTP status page or socket -# [[inputs.phpfpm]] -# ## An array of addresses to gather stats about. Specify an ip or hostname -# ## with optional port and path -# ## -# ## Plugin can be configured in three modes (either can be used): -# ## - http: the URL must start with http:// or https://, ie: -# ## "http://localhost/status" -# ## "http://192.168.130.1/status?full" -# ## -# ## - unixsocket: path to fpm socket, ie: -# ## "/var/run/php5-fpm.sock" -# ## or using a custom fpm status path: -# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" -# ## glob patterns are also supported: -# ## "/var/run/php*.sock" -# ## -# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: -# ## "fcgi://10.0.0.12:9000/status" -# ## "cgi://10.0.10.12:9001/status" -# ## -# ## Example of multiple gathering from local socket and remote host -# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] -# urls = ["http://localhost/status"] -# -# ## Format of stats to parse, set to "status" or "json" -# ## If the user configures the URL to return JSON (e.g. -# ## http://localhost/status?json), set to JSON. Otherwise, will attempt to -# ## parse line-by-line. The JSON mode will produce additonal metrics. -# # format = "status" -# -# ## Duration allowed to complete HTTP requests. -# # timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Ping given url(s) and return statistics -# [[inputs.ping]] -# ## Hosts to send ping packets to. -# urls = ["example.org"] -# -# ## Method used for sending pings, can be either "exec" or "native". When set -# ## to "exec" the systems ping command will be executed. When set to "native" -# ## the plugin will send pings directly. -# ## -# ## While the default is "exec" for backwards compatibility, new deployments -# ## are encouraged to use the "native" method for improved compatibility and -# ## performance. -# # method = "exec" -# -# ## Number of ping packets to send per interval. Corresponds to the "-c" -# ## option of the ping command. -# # count = 1 -# -# ## Time to wait between sending ping packets in seconds. Operates like the -# ## "-i" option of the ping command. -# # ping_interval = 1.0 -# -# ## If set, the time to wait for a ping response in seconds. Operates like -# ## the "-W" option of the ping command. -# # timeout = 1.0 -# -# ## If set, the total ping deadline, in seconds. Operates like the -w option -# ## of the ping command. -# # deadline = 10 -# -# ## Interface or source address to send ping from. Operates like the -I or -S -# ## option of the ping command. -# # interface = "" -# -# ## Percentiles to calculate. This only works with the native method. -# # percentiles = [50, 95, 99] -# -# ## Specify the ping executable binary. -# # binary = "ping" -# -# ## Arguments for ping command. When arguments is not empty, the command from -# ## the binary option will be used and other options (ping_interval, timeout, -# ## etc) will be ignored. -# # arguments = ["-c", "3"] -# -# ## Use only IPv4 addresses when resolving a hostname. By default, both IPv4 -# ## and IPv6 can be used. -# # ipv4 = false -# -# ## Use only IPv6 addresses when resolving a hostname. By default, both IPv4 -# ## and IPv6 can be used. -# # ipv6 = false -# -# ## Number of data bytes to be sent. Corresponds to the "-s" -# ## option of the ping command. This only works with the native method. -# # size = 56 - - -# # Measure postfix queue statistics -# # This plugin ONLY supports non-Windows -# [[inputs.postfix]] -# ## Postfix queue directory. If not provided, telegraf will try to use -# ## 'postconf -h queue_directory' to determine it. -# # queue_directory = "/var/spool/postfix" - - -# # Read metrics from one or many PowerDNS servers -# [[inputs.powerdns]] -# # An array of sockets to gather stats about. -# # Specify a path to unix socket. -# # -# # If no servers are specified, then '/var/run/pdns.controlsocket' is used as the path. -# unix_sockets = ["/var/run/pdns.controlsocket"] - - -# # Read metrics from one or many PowerDNS Recursor servers -# [[inputs.powerdns_recursor]] -# ## Path to the Recursor control socket. -# unix_sockets = ["/var/run/pdns_recursor.controlsocket"] -# -# ## Directory to create receive socket. This default is likely not writable, -# ## please reference the full plugin documentation for a recommended setup. -# # socket_dir = "/var/run/" -# ## Socket permissions for the receive socket. -# # socket_mode = "0666" -# -# ## The version of the PowerDNS control protocol to use. You will have to -# ## change this based on your PowerDNS Recursor version, see below: -# ## Version 1: PowerDNS <4.5.0 -# ## Version 2: PowerDNS 4.5.0 - 4.5.11 -# ## Version 3: PowerDNS >=4.6.0 -# ## By default this is set to 1. -# # control_protocol_version = 1 -# - - -# # Monitor process cpu and memory usage -# [[inputs.procstat]] -# ## PID file to monitor process -# pid_file = "/var/run/nginx.pid" -# ## executable name (ie, pgrep ) -# # exe = "nginx" -# ## pattern as argument for pgrep (ie, pgrep -f ) -# # pattern = "nginx" -# ## user as argument for pgrep (ie, pgrep -u ) -# # user = "nginx" -# ## Systemd unit name, supports globs when include_systemd_children is set to true -# # systemd_unit = "nginx.service" -# # include_systemd_children = false -# ## CGroup name or path, supports globs -# # cgroup = "systemd/system.slice/nginx.service" -# ## Supervisor service names of hypervisorctl management -# # supervisor_units = ["webserver", "proxy"] -# -# ## Windows service name -# # win_service = "" -# -# ## override for process_name -# ## This is optional; default is sourced from /proc//status -# # process_name = "bar" -# -# ## Field name prefix -# # prefix = "" -# -# ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. -# # mode = "irix" -# -# ## Add the given information tag instead of a field -# ## This allows to create unique metrics/series when collecting processes with -# ## otherwise identical tags. However, please be careful as this can easily -# ## result in a large number of series, especially with short-lived processes, -# ## creating high cardinality at the output. -# ## Available options are: -# ## cmdline -- full commandline -# ## pid -- ID of the process -# ## ppid -- ID of the process' parent -# ## status -- state of the process -# ## user -- username owning the process -# # tag_with = [] -# -# -# ## Method to use when finding process IDs. Can be one of 'pgrep', or -# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while -# ## the native finder performs the search directly in a manor dependent on the -# ## platform. Default is 'pgrep' -# # pid_finder = "pgrep" - - -# # Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2). -# [[inputs.proxmox]] -# ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. -# base_url = "https://localhost:8006/api2/json" -# api_token = "USER@REALM!TOKENID=UUID" -# -# ## Node name, defaults to OS hostname -# ## Unless Telegraf is on the same host as Proxmox, setting this is required -# ## for Telegraf to successfully connect to Proxmox. If not on the same host, -# ## leaving this empty will often lead to a "search domain is not set" error. -# # node_name = "" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# insecure_skip_verify = false -# -# # HTTP response timeout (default: 5s) -# response_timeout = "5s" - - -# # Reads last_run_summary.yaml file and converts to measurements -# [[inputs.puppetagent]] -# ## Location of puppet last run summary file -# location = "/var/lib/puppet/state/last_run_summary.yaml" - - -# # Reads metrics from RabbitMQ servers via the Management Plugin -# [[inputs.rabbitmq]] -# ## Management Plugin url. (default: http://localhost:15672) -# # url = "http://localhost:15672" -# ## Tag added to rabbitmq_overview series; deprecated: use tags -# # name = "rmq-server-1" -# ## Credentials -# # username = "guest" -# # password = "guest" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Optional request timeouts -# ## -# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait -# ## for a server's response headers after fully writing the request. -# # header_timeout = "3s" -# ## -# ## client_timeout specifies a time limit for requests made by this client. -# ## Includes connection time, any redirects, and reading the response body. -# # client_timeout = "4s" -# -# ## A list of nodes to gather as the rabbitmq_node measurement. If not -# ## specified, metrics for all nodes are gathered. -# # nodes = ["rabbit@node1", "rabbit@node2"] -# -# ## A list of queues to gather as the rabbitmq_queue measurement. If not -# ## specified, metrics for all queues are gathered. -# ## Deprecated in 1.6: Use queue_name_include instead. -# # queues = ["telegraf"] -# -# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not -# ## specified, metrics for all exchanges are gathered. -# # exchanges = ["telegraf"] -# -# ## Metrics to include and exclude. Globs accepted. -# ## Note that an empty array for both will include all metrics -# ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" -# # metric_include = [] -# # metric_exclude = [] -# -# ## Queues to include and exclude. Globs accepted. -# ## Note that an empty array for both will include all queues -# # queue_name_include = [] -# # queue_name_exclude = [] -# -# ## Federation upstreams to include and exclude specified as an array of glob -# ## pattern strings. Federation links can also be limited by the queue and -# ## exchange filters. -# # federation_upstream_include = [] -# # federation_upstream_exclude = [] - - -# [[inputs.radius]] -# ## An array of Server IPs and ports to gather from. If none specified, defaults to localhost. -# servers = ["127.0.0.1:1812","hostname.domain.com:1812"] -# -# ## Credentials for radius authentication. -# username = "myuser" -# password = "mypassword" -# secret = "mysecret" -# -# ## Maximum time to receive response. -# # response_timeout = "5s" - - -# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) -# [[inputs.raindrops]] -# ## An array of raindrops middleware URI to gather stats. -# urls = ["http://localhost:8080/_raindrops"] - - -# # Reads metrics from RavenDB servers via the Monitoring Endpoints -# [[inputs.ravendb]] -# ## Node URL and port that RavenDB is listening on. By default, -# ## attempts to connect securely over HTTPS, however, if the user -# ## is running a local unsecure development cluster users can use -# ## HTTP via a URL like "http://localhost:8080" -# url = "https://localhost:4433" -# -# ## RavenDB X509 client certificate setup -# # tls_cert = "/etc/telegraf/raven.crt" -# # tls_key = "/etc/telegraf/raven.key" -# -# ## Optional request timeout -# ## -# ## Timeout, specifies the amount of time to wait -# ## for a server's response headers after fully writing the request and -# ## time limit for requests made by this client -# # timeout = "5s" -# -# ## List of statistics which are collected -# # At least one is required -# # Allowed values: server, databases, indexes, collections -# # -# # stats_include = ["server", "databases", "indexes", "collections"] -# -# ## List of db where database stats are collected -# ## If empty, all db are concerned -# # db_stats_dbs = [] -# -# ## List of db where index status are collected -# ## If empty, all indexes from all db are concerned -# # index_stats_dbs = [] -# -# ## List of db where collection status are collected -# ## If empty, all collections from all db are concerned -# # collection_stats_dbs = [] - - -# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs -# [[inputs.redfish]] -# ## Redfish API Base URL. -# address = "https://127.0.0.1:5000" -# -# ## Credentials for the Redfish API. Can also use secrets. -# username = "root" -# password = "password123456" -# -# ## System Id to collect data for in Redfish APIs. -# computer_system_id="System.Embedded.1" -# -# ## Metrics to collect -# ## The metric collects to gather. Choose from "power" and "thermal". -# # include_metrics = ["power", "thermal"] -# -# ## Tag sets allow you to include redfish OData link parent data -# ## For Example. -# ## Thermal data is an OData link with parent Chassis which has a link of Location. -# ## For more info see the Redfish Resource and Schema Guide at DMTFs website. -# ## Available sets are: "chassis.location" and "chassis" -# # include_tag_sets = ["chassis.location"] -# -# ## Workarounds -# ## Defines workarounds for certain hardware vendors. Choose from: -# ## * ilo4-thermal - Do not pass 0Data-Version header to Thermal endpoint -# # workarounds = [] -# -# ## Amount of time allowed to complete the HTTP request -# # timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read metrics from one or many redis-sentinel servers -# [[inputs.redis_sentinel]] -# ## specify servers via a url matching: -# ## [protocol://][username:password]@address[:port] -# ## e.g. -# ## tcp://localhost:26379 -# ## tcp://username:password@192.168.99.100 -# ## unix:///var/run/redis-sentinel.sock -# ## -# ## If no servers are specified, then localhost is used as the host. -# ## If no port is specified, 26379 is used -# # servers = ["tcp://localhost:26379"] -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = true - - -# # Read metrics from one or many RethinkDB servers -# [[inputs.rethinkdb]] -# ## An array of URI to gather stats about. Specify an ip or hostname -# ## with optional port add password. ie, -# ## rethinkdb://user:auth_key@10.10.3.30:28105, -# ## rethinkdb://10.10.3.33:18832, -# ## 10.0.0.1:10000, etc. -# servers = ["127.0.0.1:28015"] -# -# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, -# ## protocol have to be named "rethinkdb2" - it will use 1_0 H. -# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] -# -# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol -# ## have to be named "rethinkdb". -# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] - - -# # Read metrics one or many Riak servers -# [[inputs.riak]] -# # Specify a list of one or more riak http servers -# servers = ["http://localhost:8098"] - - -# # Read API usage and limits for a Salesforce organisation -# [[inputs.salesforce]] -# ## specify your credentials -# ## -# username = "your_username" -# password = "your_password" -# ## -# ## (optional) security token -# # security_token = "your_security_token" -# ## -# ## (optional) environment type (sandbox or production) -# ## default is: production -# ## -# # environment = "production" -# ## -# ## (optional) API version (default: "39.0") -# ## -# # version = "39.0" - - -# # Monitor sensors, requires lm-sensors package -# # This plugin ONLY supports Linux -# [[inputs.sensors]] -# ## Remove numbers from field names. -# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. -# # remove_numbers = true -# -# ## Timeout is the maximum amount of time that the sensors command can run. -# # timeout = "5s" - - -# # Get slab statistics from procfs -# # This plugin ONLY supports Linux -# [[inputs.slab]] -# # no configuration - please see the plugin's README for steps to configure -# # sudo properly - - -# # Read metrics from storage devices supporting S.M.A.R.T. -# [[inputs.smart]] -# ## Optionally specify the path to the smartctl executable -# # path_smartctl = "/usr/bin/smartctl" -# -# ## Optionally specify the path to the nvme-cli executable -# # path_nvme = "/usr/bin/nvme" -# -# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case -# ## ["auto-on"] - automatically find and enable additional vendor specific disk info -# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info -# # enable_extensions = ["auto-on"] -# -# ## On most platforms used cli utilities requires root access. -# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. -# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli -# ## without a password. -# # use_sudo = false -# -# ## Adds an extra tag "device_type", which can be used to differentiate -# ## multiple disks behind the same controller (e.g., MegaRAID). -# # tag_with_device_type = false -# -# ## Skip checking disks in this power mode. Defaults to -# ## "standby" to not wake up disks that have stopped rotating. -# ## See --nocheck in the man pages for smartctl. -# ## smartctl version 5.41 and 5.42 have faulty detection of -# ## power mode and might require changing this value to -# ## "never" depending on your disks. -# # nocheck = "standby" -# -# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed -# ## information from each drive into the 'smart_attribute' measurement. -# # attributes = false -# -# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. -# # excludes = [ "/dev/pass6" ] -# -# ## Optionally specify devices and device type, if unset -# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done -# ## and all found will be included except for the excluded in excludes. -# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] -# -# ## Timeout for the cli command to complete. -# # timeout = "30s" -# -# ## Optionally call smartctl and nvme-cli with a specific concurrency policy. -# ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. -# ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of -# ## SMART data - one individual array drive at the time. In such case please set this configuration option -# ## to "sequential" to get readings for all drives. -# ## valid options: concurrent, sequential -# # read_method = "concurrent" - - -# # Retrieves SNMP values from remote agents -# [[inputs.snmp]] -# ## Agent addresses to retrieve values from. -# ## format: agents = [":"] -# ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. -# ## default is udp -# ## port: optional -# ## example: agents = ["udp://127.0.0.1:161"] -# ## agents = ["tcp://127.0.0.1:161"] -# ## agents = ["udp4://v4only-snmp-agent"] -# agents = ["udp://127.0.0.1:161"] -# -# ## Timeout for each request. -# # timeout = "5s" -# -# ## SNMP version; can be 1, 2, or 3. -# # version = 2 -# -# ## Unconnected UDP socket -# ## When true, SNMP responses are accepted from any address not just -# ## the requested address. This can be useful when gathering from -# ## redundant/failover systems. -# # unconnected_udp_socket = false -# -# ## Path to mib files -# ## Used by the gosmi translator. -# ## To add paths when translating with netsnmp, use the MIBDIRS environment variable -# # path = ["/usr/share/snmp/mibs"] -# -# ## SNMP community string. -# # community = "public" -# -# ## Agent host tag; should be set to "source" for consistent usage across plugins -# ## example: agent_host_tag = "source" -# ## The default value is inconsistent with other plugins. Users will get a -# ## warning that can be ignored if this is not changed. However, to have a -# ## consistent experience, set this to "source" in your config to align with -# ## other plugins. -# # agent_host_tag = "agent_host" -# -# ## Number of retries to attempt. -# # retries = 3 -# -# ## The GETBULK max-repetitions parameter. -# # max_repetitions = 10 -# -# ## SNMPv3 authentication and encryption options. -# ## -# ## Security Name. -# # sec_name = "myuser" -# ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". -# # auth_protocol = "MD5" -# ## Authentication password. -# # auth_password = "pass" -# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". -# # sec_level = "authNoPriv" -# ## Context Name. -# # context_name = "" -# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "". -# ### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools -# ### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html) -# # priv_protocol = "" -# ## Privacy password used for encrypted messages. -# # priv_password = "" -# -# ## Add fields and tables defining the variables you wish to collect. This -# ## example collects the system uptime and interface variables. Reference the -# ## full plugin documentation for configuration details. -# [[inputs.snmp.field]] -# oid = "RFC1213-MIB::sysUpTime.0" -# name = "sysUptime" -# conversion = "float(2)" -# -# [[inputs.snmp.field]] -# oid = "RFC1213-MIB::sysName.0" -# name = "sysName" -# is_tag = true -# -# [[inputs.snmp.table]] -# oid = "IF-MIB::ifTable" -# name = "interface" -# inherit_tags = ["sysName"] -# -# [[inputs.snmp.table.field]] -# oid = "IF-MIB::ifDescr" -# name = "ifDescr" -# is_tag = true - - -# # Gather indicators from established connections, using iproute2's ss command. -# # This plugin ONLY supports non-Windows -# [[inputs.socketstat]] -# ## ss can display information about tcp, udp, raw, unix, packet, dccp and sctp sockets -# ## Specify here the types you want to gather -# protocols = [ "tcp", "udp" ] -# -# ## The default timeout of 1s for ss execution can be overridden here: -# # timeout = "1s" - - -# # Gather timeseries from Google Cloud Platform v3 monitoring API -# [[inputs.stackdriver]] -# ## GCP Project -# project = "erudite-bloom-151019" -# -# ## Include timeseries that start with the given metric type. -# metric_type_prefix_include = [ -# "compute.googleapis.com/", -# ] -# -# ## Exclude timeseries that start with the given metric type. -# # metric_type_prefix_exclude = [] -# -# ## Most metrics are updated no more than once per minute; it is recommended -# ## to override the agent level interval with a value of 1m or greater. -# interval = "1m" -# -# ## Maximum number of API calls to make per second. The quota for accounts -# ## varies, it can be viewed on the API dashboard: -# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits -# # rate_limit = 14 -# -# ## The delay and window options control the number of points selected on -# ## each gather. When set, metrics are gathered between: -# ## start: now() - delay - window -# ## end: now() - delay -# # -# ## Collection delay; if set too low metrics may not yet be available. -# # delay = "5m" -# # -# ## If unset, the window will start at 1m and be updated dynamically to span -# ## the time between calls (approximately the length of the plugin interval). -# # window = "1m" -# -# ## TTL for cached list of metric types. This is the maximum amount of time -# ## it may take to discover new metrics. -# # cache_ttl = "1h" -# -# ## If true, raw bucket counts are collected for distribution value types. -# ## For a more lightweight collection, you may wish to disable and use -# ## distribution_aggregation_aligners instead. -# # gather_raw_distribution_buckets = true -# -# ## Aggregate functions to be used for metrics whose value type is -# ## distribution. These aggregate values are recorded in in addition to raw -# ## bucket counts; if they are enabled. -# ## -# ## For a list of aligner strings see: -# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner -# # distribution_aggregation_aligners = [ -# # "ALIGN_PERCENTILE_99", -# # "ALIGN_PERCENTILE_95", -# # "ALIGN_PERCENTILE_50", -# # ] -# -# ## Filters can be added to reduce the number of time series matched. All -# ## functions are supported: starts_with, ends_with, has_substring, and -# ## one_of. Only the '=' operator is supported. -# ## -# ## The logical operators when combining filters are defined statically using -# ## the following values: -# ## filter ::= {AND AND AND } -# ## resource_labels ::= {OR } -# ## metric_labels ::= {OR } -# ## user_labels ::= {OR } -# ## system_labels ::= {OR } -# ## -# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters -# # -# ## Resource labels refine the time series selection with the following expression: -# ## resource.labels. = -# # [[inputs.stackdriver.filter.resource_labels]] -# # key = "instance_name" -# # value = 'starts_with("localhost")' -# # -# ## Metric labels refine the time series selection with the following expression: -# ## metric.labels. = -# # [[inputs.stackdriver.filter.metric_labels]] -# # key = "device_name" -# # value = 'one_of("sda", "sdb")' -# # -# ## User labels refine the time series selection with the following expression: -# ## metadata.user_labels."" = -# # [[inputs.stackdriver.filter.user_labels]] -# # key = "environment" -# # value = 'one_of("prod", "staging")' -# # -# ## System labels refine the time series selection with the following expression: -# ## metadata.system_labels."" = -# # [[inputs.stackdriver.filter.system_labels]] -# # key = "machine_type" -# # value = 'starts_with("e2-")' - - -# # Gathers information about processes that running under supervisor using XML-RPC API -# [[inputs.supervisor]] -# ## Url of supervisor's XML-RPC endpoint if basic auth enabled in supervisor http server, -# ## than you have to add credentials to url (ex. http://login:pass@localhost:9001/RPC2) -# # url="http://localhost:9001/RPC2" -# ## With settings below you can manage gathering additional information about processes -# ## If both of them empty, then all additional information will be collected. -# ## Currently supported supported additional metrics are: pid, rc -# # metrics_include = [] - - -# # Get synproxy counter statistics from procfs -# # This plugin ONLY supports Linux -# [[inputs.synproxy]] -# # no configuration - - -# # Sysstat metrics collector -# # This plugin ONLY supports Linux -# [[inputs.sysstat]] -# ## Path to the sadc command. -# # -# ## Common Defaults: -# ## Debian/Ubuntu: /usr/lib/sysstat/sadc -# ## Arch: /usr/lib/sa/sadc -# ## RHEL/CentOS: /usr/lib64/sa/sadc -# sadc_path = "/usr/lib/sa/sadc" # required -# -# ## Path to the sadf command, if it is not in PATH -# # sadf_path = "/usr/bin/sadf" -# -# ## Activities is a list of activities, that are passed as argument to the -# ## sadc collector utility (e.g: DISK, SNMP etc...) -# ## The more activities that are added, the more data is collected. -# # activities = ["DISK"] -# -# ## Group metrics to measurements. -# ## -# ## If group is false each metric will be prefixed with a description -# ## and represents itself a measurement. -# ## -# ## If Group is true, corresponding metrics are grouped to a single measurement. -# # group = true -# -# ## Options for the sadf command. The values on the left represent the sadf options and -# ## the values on the right their description (which are used for grouping and prefixing metrics). -# ## -# ## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version. -# [inputs.sysstat.options] -# -C = "cpu" -# -B = "paging" -# -b = "io" -# -d = "disk" # requires DISK activity -# "-n ALL" = "network" -# "-P ALL" = "per_cpu" -# -q = "queue" -# -R = "mem" -# -r = "mem_util" -# -S = "swap_util" -# -u = "cpu_util" -# -v = "inode" -# -W = "swap" -# -w = "task" -# # -H = "hugepages" # only available for newer linux distributions -# # "-I ALL" = "interrupts" # requires INT activity -# -# ## Device tags can be used to add additional tags for devices. For example the configuration below -# ## adds a tag vg with value rootvg for all metrics with sda devices. -# # [[inputs.sysstat.device_tags.sda]] -# # vg = "rootvg" - - -# # Tacacs plugin collects successful tacacs authentication response times. -# [[inputs.tacacs]] -# ## An array of Server IPs (or hostnames) and ports to gather from. If none specified, defaults to localhost. -# # servers = ["127.0.0.1:49"] -# -# ## Request source server IP, normally the server running telegraf. -# # request_ip = "127.0.0.1" -# -# ## Credentials for tacacs authentication. -# username = "myuser" -# password = "mypassword" -# secret = "mysecret" -# -# ## Maximum time to receive response. -# # response_timeout = "5s" - - -# # Reads metrics from a Teamspeak 3 Server via ServerQuery -# [[inputs.teamspeak]] -# ## Server address for Teamspeak 3 ServerQuery -# # server = "127.0.0.1:10011" -# ## Username for ServerQuery -# username = "serverqueryuser" -# ## Password for ServerQuery -# password = "secret" -# ## Nickname of the ServerQuery client -# nickname = "telegraf" -# ## Array of virtual servers -# # virtual_servers = [1] - - -# # Read metrics about temperature -# [[inputs.temp]] -# ## Desired output format (Linux only) -# ## Available values are -# ## v1 -- use pre-v1.22.4 sensor naming, e.g. coretemp_core0_input -# ## v2 -- use v1.22.4+ sensor naming, e.g. coretemp_core_0_input -# # metric_format = "v2" -# -# ## Add device tag to distinguish devices with the same name (Linux only) -# # add_device_tag = false - - -# # Read Tengine's basic status information (ngx_http_reqstat_module) -# [[inputs.tengine]] -# ## An array of Tengine reqstat module URI to gather stats. -# urls = ["http://127.0.0.1/us"] -# -# ## HTTP response timeout (default: 5s) -# # response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Gather metrics from the Tomcat server status page. -# [[inputs.tomcat]] -# ## URL of the Tomcat server status -# # url = "http://127.0.0.1:8080/manager/status/all?XML=true" -# -# ## HTTP Basic Auth Credentials -# # username = "tomcat" -# # password = "s3cret" -# -# ## Request timeout -# # timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Inserts sine and cosine waves for demonstration purposes -# [[inputs.trig]] -# ## Set the amplitude -# amplitude = 10.0 - - -# # Read Twemproxy stats data -# [[inputs.twemproxy]] -# ## Twemproxy stats address and port (no scheme) -# addr = "localhost:22222" -# ## Monitor pool name -# pools = ["redis_pool", "mc_pool"] - - -# # A plugin to collect stats from the Unbound DNS resolver -# [[inputs.unbound]] -# ## Address of server to connect to, read from unbound conf default, optionally ':port' -# ## Will lookup IP if given a hostname -# server = "127.0.0.1:8953" -# -# ## If running as a restricted user you can prepend sudo for additional access: -# # use_sudo = false -# -# ## The default location of the unbound-control binary can be overridden with: -# # binary = "/usr/sbin/unbound-control" -# -# ## The default location of the unbound config file can be overridden with: -# # config_file = "/etc/unbound/unbound.conf" -# -# ## The default timeout of 1s can be overridden with: -# # timeout = "1s" -# -# ## When set to true, thread metrics are tagged with the thread id. -# ## -# ## The default is false for backwards compatibility, and will be changed to -# ## true in a future version. It is recommended to set to true on new -# ## deployments. -# thread_as_tag = false - - -# # Monitor UPSes connected via Network UPS Tools -# [[inputs.upsd]] -# ## A running NUT server to connect to. -# ## IPv6 addresses must be enclosed in brackets (e.g. "[::1]") -# # server = "127.0.0.1" -# # port = 3493 -# # username = "user" -# # password = "password" -# -# ## Force parsing numbers as floats -# ## It is highly recommended to enable this setting to parse numbers -# ## consistently as floats to avoid database conflicts where some numbers are -# ## parsed as integers and others as floats. -# # force_float = false -# -# ## Collect additional fields if they are available for the UPS -# ## The fields need to be specified as NUT variable names, see -# ## https://networkupstools.org/docs/developer-guide.chunked/apas02.html -# ## Wildcards are accepted. -# # additional_fields = [] -# -# ## Dump information for debugging -# ## Allows to print the raw variables (and corresponding types) as received -# ## from the NUT server ONCE for each UPS. The output is only available when -# ## running Telegraf in debug-mode. -# ## Please attach this information when reporting issues! -# # dump_raw_variables = false - - -# # Read uWSGI metrics. -# [[inputs.uwsgi]] -# ## List with urls of uWSGI Stats servers. Url must match pattern: -# ## scheme://address[:port] -# ## -# ## For example: -# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] -# servers = ["tcp://127.0.0.1:1717"] -# -# ## General connection timeout -# # timeout = "5s" - - -# # A plugin to collect stats from Varnish HTTP Cache -# # This plugin ONLY supports non-Windows -# [[inputs.varnish]] -# ## If running as a restricted user you can prepend sudo for additional access: -# #use_sudo = false -# -# ## The default location of the varnishstat binary can be overridden with: -# binary = "/usr/bin/varnishstat" -# -# ## Additional custom arguments for the varnishstat command -# # binary_args = ["-f", "MAIN.*"] -# -# ## The default location of the varnishadm binary can be overridden with: -# adm_binary = "/usr/bin/varnishadm" -# -# ## Custom arguments for the varnishadm command -# # adm_binary_args = [""] -# -# ## Metric version defaults to metric_version=1, use metric_version=2 for removal of nonactive vcls -# ## Varnish 6.0.2 and newer is required for metric_version=2. -# metric_version = 1 -# -# ## Additional regexps to override builtin conversion of varnish metrics into telegraf metrics. -# ## Regexp group "_vcl" is used for extracting the VCL name. Metrics that contain nonactive VCL's are skipped. -# ## Regexp group "_field" overrides the field name. Other named regexp groups are used as tags. -# # regexps = ['^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val'] -# -# ## By default, telegraf gather stats for 3 metric points. -# ## Setting stats will override the defaults shown below. -# ## Glob matching can be used, ie, stats = ["MAIN.*"] -# ## stats may also be set to ["*"], which will collect all stats -# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] -# -# ## Optional name for the varnish instance (or working directory) to query -# ## Usually append after -n in varnish cli -# # instance_name = instanceName -# -# ## Timeout for varnishstat command -# # timeout = "1s" - - -# # Read metrics from the Vault API -# [[inputs.vault]] -# ## URL for the Vault agent -# # url = "http://127.0.0.1:8200" -# -# ## Use Vault token for authorization. -# ## Vault token configuration is mandatory. -# ## If both are empty or both are set, an error is thrown. -# # token_file = "/path/to/auth/token" -# ## OR -# token = "s.CDDrgg5zPv5ssI0Z2P4qxJj2" -# -# ## Set response_timeout (default 5 seconds) -# # response_timeout = "5s" -# -# ## Optional TLS Config -# # tls_ca = /path/to/cafile -# # tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile - - -# # Input plugin to counterPath Performance Counters on Windows operating systems -# # This plugin ONLY supports Windows -# [[inputs.win_perf_counters]] -# ## By default this plugin returns basic CPU and Disk statistics. See the -# ## README file for more examples. Uncomment examples below or write your own -# ## as you see fit. If the system being polled for data does not have the -# ## Object at startup of the Telegraf agent, it will not be gathered. -# -# ## Print All matching performance counters -# # PrintValid = false -# -# ## Whether request a timestamp along with the PerfCounter data or use current -# ## time -# # UsePerfCounterTime = true -# -# ## If UseWildcardsExpansion params is set to true, wildcards (partial -# ## wildcards in instance names and wildcards in counters names) in configured -# ## counter paths will be expanded and in case of localized Windows, counter -# ## paths will be also localized. It also returns instance indexes in instance -# ## names. If false, wildcards (not partial) in instance names will still be -# ## expanded, but instance indexes will not be returned in instance names. -# # UseWildcardsExpansion = false -# -# ## When running on a localized version of Windows and with -# ## UseWildcardsExpansion = true, Windows will localize object and counter -# ## names. When LocalizeWildcardsExpansion = false, use the names in -# ## object.Counters instead of the localized names. Only Instances can have -# ## wildcards in this case. ObjectName and Counters must not have wildcards -# ## when this setting is false. -# # LocalizeWildcardsExpansion = true -# -# ## Period after which counters will be reread from configuration and -# ## wildcards in counter paths expanded -# # CountersRefreshInterval="1m" -# -# ## Accepts a list of PDH error codes which are defined in pdh.go, if this -# ## error is encountered it will be ignored. For example, you can provide -# ## "PDH_NO_DATA" to ignore performance counters with no instances. By default -# ## no errors are ignored You can find the list here: -# ## https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go -# ## e.g. IgnoredErrors = ["PDH_NO_DATA"] -# # IgnoredErrors = [] -# -# ## Maximum size of the buffer for values returned by the API -# ## Increase this value if you experience "buffer limit reached" errors. -# # MaxBufferSize = "4MiB" -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# # [[inputs.win_perf_counters.object]] -# # Measurement = "" -# # ObjectName = "" -# # Instances = [""] -# # Counters = [] -# ## Additional Object Settings -# ## * IncludeTotal: set to true to include _Total instance when querying -# ## for all metrics via '*' -# ## * WarnOnMissing: print out when the performance counter is missing -# ## from object, counter or instance -# ## * UseRawValues: gather raw values instead of formatted. Raw values are -# ## stored in the field name with the "_Raw" suffix, e.g. -# ## "Disk_Read_Bytes_sec_Raw". -# # IncludeTotal = false -# # WarnOnMissing = false -# # UseRawValues = false -# -# ## Processor usage, alternative to native, reports on a per core. -# # [[inputs.win_perf_counters.object]] -# # Measurement = "win_cpu" -# # ObjectName = "Processor" -# # Instances = ["*"] -# # UseRawValues = true -# # Counters = [ -# # "% Idle Time", -# # "% Interrupt Time", -# # "% Privileged Time", -# # "% User Time", -# # "% Processor Time", -# # "% DPC Time", -# # ] -# -# ## Disk times and queues -# # [[inputs.win_perf_counters.object]] -# # Measurement = "win_disk" -# # ObjectName = "LogicalDisk" -# # Instances = ["*"] -# # Counters = [ -# # "% Idle Time", -# # "% Disk Time", -# # "% Disk Read Time", -# # "% Disk Write Time", -# # "% User Time", -# # "% Free Space", -# # "Current Disk Queue Length", -# # "Free Megabytes", -# # ] -# -# # [[inputs.win_perf_counters.object]] -# # Measurement = "win_diskio" -# # ObjectName = "PhysicalDisk" -# # Instances = ["*"] -# # Counters = [ -# # "Disk Read Bytes/sec", -# # "Disk Write Bytes/sec", -# # "Current Disk Queue Length", -# # "Disk Reads/sec", -# # "Disk Writes/sec", -# # "% Disk Time", -# # "% Disk Read Time", -# # "% Disk Write Time", -# # ] -# -# # [[inputs.win_perf_counters.object]] -# # Measurement = "win_net" -# # ObjectName = "Network Interface" -# # Instances = ["*"] -# # Counters = [ -# # "Bytes Received/sec", -# # "Bytes Sent/sec", -# # "Packets Received/sec", -# # "Packets Sent/sec", -# # "Packets Received Discarded", -# # "Packets Outbound Discarded", -# # "Packets Received Errors", -# # "Packets Outbound Errors", -# # ] -# -# # [[inputs.win_perf_counters.object]] -# # Measurement = "win_system" -# # ObjectName = "System" -# # Instances = ["------"] -# # Counters = [ -# # "Context Switches/sec", -# # "System Calls/sec", -# # "Processor Queue Length", -# # "System Up Time", -# # ] -# -# ## Example counterPath where the Instance portion must be removed to get -# ## data back, such as from the Memory object. -# # [[inputs.win_perf_counters.object]] -# # Measurement = "win_mem" -# # ObjectName = "Memory" -# ## Use 6 x - to remove the Instance bit from the counterPath. -# # Instances = ["------"] -# # Counters = [ -# # "Available Bytes", -# # "Cache Faults/sec", -# # "Demand Zero Faults/sec", -# # "Page Faults/sec", -# # "Pages/sec", -# # "Transition Faults/sec", -# # "Pool Nonpaged Bytes", -# # "Pool Paged Bytes", -# # "Standby Cache Reserve Bytes", -# # "Standby Cache Normal Priority Bytes", -# # "Standby Cache Core Bytes", -# # ] -# -# ## Example query where the Instance portion must be removed to get data back, -# ## such as from the Paging File object. -# # [[inputs.win_perf_counters.object]] -# # Measurement = "win_swap" -# # ObjectName = "Paging File" -# # Instances = ["_Total"] -# # Counters = [ -# # "% Usage", -# # ] - - -# # Input plugin to report Windows services info. -# # This plugin ONLY supports Windows -# [[inputs.win_services]] -# ## Names of the services to monitor. Leave empty to monitor all the available -# ## services on the host. Globs accepted. Case insensitive. -# service_names = [ -# "LanmanServer", -# "TermService", -# "Win*", -# ] -# -# # optional, list of service names to exclude -# excluded_service_names = ['WinRM'] - - -# # Input plugin to query Windows Management Instrumentation -# # This plugin ONLY supports Windows -# [[inputs.win_wmi]] -# [[inputs.win_wmi.query]] -# # a string representing the WMI namespace to be queried -# namespace = "root\\cimv2" -# # a string representing the WMI class to be queried -# class_name = "Win32_Volume" -# # an array of strings representing the properties of the WMI class to be queried -# properties = ["Name", "Capacity", "FreeSpace"] -# # a string specifying a WHERE clause to use as a filter for the WQL -# filter = 'NOT Name LIKE "\\\\?\\%"' -# # WMI class properties which should be considered tags instead of fields -# tag_properties = ["Name"] - - -# # Collect Wireguard server interface and peer statistics -# [[inputs.wireguard]] -# ## Optional list of Wireguard device/interface names to query. -# ## If omitted, all Wireguard interfaces are queried. -# # devices = ["wg0"] - - -# # Monitor wifi signal strength and quality -# # This plugin ONLY supports Linux -# [[inputs.wireless]] -# ## Sets 'proc' directory path -# ## If not specified, then default is /proc -# # host_proc = "/proc" - - -# # Reads metrics from a SSL certificate -# [[inputs.x509_cert]] -# ## List certificate sources, support wildcard expands for files -# ## Prefix your entry with 'file://' if you intend to use relative paths -# sources = ["tcp://example.org:443", "https://influxdata.com:443", -# "smtp://mail.localhost:25", "udp://127.0.0.1:4433", -# "/etc/ssl/certs/ssl-cert-snakeoil.pem", -# "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] -# -# ## Timeout for SSL connection -# # timeout = "5s" -# -# ## Pass a different name into the TLS request (Server Name Indication). -# ## This is synonymous with tls_server_name, and only one of the two -# ## options may be specified at one time. -# ## example: server_name = "myhost.example.org" -# # server_name = "myhost.example.org" -# -# ## Only output the leaf certificates and omit the root ones. -# # exclude_root_certs = false -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# # tls_server_name = "myhost.example.org" -# -# ## Set the proxy URL -# # use_proxy = true -# # proxy_url = "http://localhost:8888" - - -# # Gathers Metrics From a Dell EMC XtremIO Storage Array's V3 API -# [[inputs.xtremio]] -# ## XtremIO User Interface Endpoint -# url = "https://xtremio.example.com/" # required -# -# ## Credentials -# username = "user1" -# password = "pass123" -# -# ## Metrics to collect from the XtremIO -# # collectors = ["bbus","clusters","ssds","volumes","xms"] -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use SSL but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets -# # This plugin ONLY supports Linux & FreeBSD -# [[inputs.zfs]] -# ## ZFS kstat path. Ignored on FreeBSD -# ## If not specified, then default is: -# # kstatPath = "/proc/spl/kstat/zfs" -# -# ## By default, telegraf gather all zfs stats -# ## Override the stats list using the kstatMetrics array: -# ## For FreeBSD, the default is: -# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] -# ## For Linux, the default is: -# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", -# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] -# -# ## By default, don't gather zpool stats -# # poolMetrics = false -# -# ## By default, don't gather dataset stats -# # datasetMetrics = false - - -# # Reads 'mntr' stats from one or many zookeeper servers -# [[inputs.zookeeper]] -# ## An array of address to gather stats about. Specify an ip or hostname -# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. -# -# ## If no servers are specified, then localhost is used as the host. -# ## If no port is specified, 2181 is used -# servers = [":2181"] -# -# ## Timeout for metric collections from all servers. Minimum timeout is "1s". -# # timeout = "5s" -# -# ## Float Parsing - the initial implementation forced any value unable to be -# ## parsed as an int to be a string. Setting this to "float" will attempt to -# ## parse float values as floats and not strings. This would break existing -# ## metrics and may cause issues if a value switches between a float and int. -# # parse_floats = "string" -# -# ## Optional TLS Config -# # enable_tls = false -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## If false, skip chain & host verification -# # insecure_skip_verify = true - - -############################################################################### -# SERVICE INPUT PLUGINS # -############################################################################### - - -# # Pull Metric Statistics from Aliyun CMS -# [[inputs.aliyuncms]] -# ## Aliyun Credentials -# ## Credentials are loaded in the following order -# ## 1) Ram RoleArn credential -# ## 2) AccessKey STS token credential -# ## 3) AccessKey credential -# ## 4) Ecs Ram Role credential -# ## 5) RSA keypair credential -# ## 6) Environment variables credential -# ## 7) Instance metadata credential -# -# # access_key_id = "" -# # access_key_secret = "" -# # access_key_sts_token = "" -# # role_arn = "" -# # role_session_name = "" -# # private_key = "" -# # public_key_id = "" -# # role_name = "" -# -# ## Specify ali cloud regions to be queried for metric and object discovery -# ## If not set, all supported regions (see below) would be covered, it can -# ## provide a significant load on API, so the recommendation here is to -# ## limit the list as much as possible. -# ## Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm -# ## Default supported regions are: -# ## cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou, -# ## cn-shanghai, cn-shenzhen, cn-heyuan,cn-chengdu,cn-hongkong, -# ## ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, -# ## ap-south-1,ap-northeast-1, us-west-1,us-east-1,eu-central-1, -# ## eu-west-1,me-east-1 -# ## -# ## From discovery perspective it set the scope for object discovery, -# ## the discovered info can be used to enrich the metrics with objects -# ## attributes/tags. Discovery is not supported for all projects. -# ## Currently, discovery supported for the following projects: -# ## - acs_ecs_dashboard -# ## - acs_rds_dashboard -# ## - acs_slb_dashboard -# ## - acs_vpc_eip -# regions = ["cn-hongkong"] -# -# ## Requested AliyunCMS aggregation Period (required) -# ## The period must be multiples of 60s and the minimum for AliyunCMS metrics -# ## is 1 minute (60s). However not all metrics are made available to the -# ## one minute period. Some are collected at 3 minute, 5 minute, or larger -# ## intervals. -# ## See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv -# ## Note that if a period is configured that is smaller than the minimum for -# ## a particular metric, that metric will not be returned by Aliyun's -# ## OpenAPI and will not be collected by Telegraf. -# period = "5m" -# -# ## Collection Delay (required) -# ## The delay must account for metrics availability via AliyunCMS API. -# delay = "1m" -# -# ## Recommended: use metric 'interval' that is a multiple of 'period' -# ## to avoid gaps or overlap in pulled data -# interval = "5m" -# -# ## Metric Statistic Project (required) -# project = "acs_slb_dashboard" -# -# ## Maximum requests per second, default value is 200 -# ratelimit = 200 -# -# ## How often the discovery API call executed (default 1m) -# #discovery_interval = "1m" -# -# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the -# ## plugin definition, otherwise additional config options are read as part of -# ## the table -# -# ## Metrics to Pull -# ## At least one metrics definition required -# [[inputs.aliyuncms.metrics]] -# ## Metrics names to be requested, -# ## Description can be found here (per project): -# ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq -# names = ["InstanceActiveConnection", "InstanceNewConnection"] -# -# ## Dimension filters for Metric (optional) -# ## This allows to get additional metric dimension. If dimension is not -# ## specified it can be returned or the data can be aggregated - it depends -# ## on particular metric, you can find details here: -# ## https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq -# ## -# ## Note, that by default dimension filter includes the list of discovered -# ## objects in scope (if discovery is enabled). Values specified here would -# ## be added into the list of discovered objects. You can specify either -# ## single dimension: -# # dimensions = '{"instanceId": "p-example"}' -# -# ## Or you can specify several dimensions at once: -# # dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' -# -# ## Tag Query Path -# ## The following tags added by default: -# ## * regionId (if discovery enabled) -# ## * userId -# ## * instanceId -# ## Enrichment tags, can be added from discovery (if supported) -# ## Notation is -# ## : -# ## To figure out which fields are available, consult the -# ## Describe API per project. For example, for SLB see: -# ## https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO -# # tag_query_path = [ -# # "address:Address", -# # "name:LoadBalancerName", -# # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" -# # ] -# -# ## Allow metrics without discovery data, if discovery is enabled. -# ## If set to true, then metric without discovery data would be emitted, otherwise dropped. -# ## This cane be of help, in case debugging dimension filters, or partial coverage of -# ## discovery scope vs monitoring scope -# # allow_dps_without_discovery = false - - -# # AMQP consumer plugin -# [[inputs.amqp_consumer]] -# ## Brokers to consume from. If multiple brokers are specified a random broker -# ## will be selected anytime a connection is established. This can be -# ## helpful for load balancing when not using a dedicated load balancer. -# brokers = ["amqp://localhost:5672/influxdb"] -# -# ## Authentication credentials for the PLAIN auth_method. -# # username = "" -# # password = "" -# -# ## Name of the exchange to declare. If unset, no exchange will be declared. -# exchange = "telegraf" -# -# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". -# # exchange_type = "topic" -# -# ## If true, exchange will be passively declared. -# # exchange_passive = false -# -# ## Exchange durability can be either "transient" or "durable". -# # exchange_durability = "durable" -# -# ## Additional exchange arguments. -# # exchange_arguments = { } -# # exchange_arguments = {"hash_property" = "timestamp"} -# -# ## AMQP queue name. -# queue = "telegraf" -# -# ## AMQP queue durability can be "transient" or "durable". -# queue_durability = "durable" -# -# ## If true, queue will be passively declared. -# # queue_passive = false -# -# ## Additional arguments when consuming from Queue -# # queue_consume_arguments = { } -# # queue_consume_arguments = {"x-stream-offset" = "first"} -# -# ## A binding between the exchange and queue using this binding key is -# ## created. If unset, no binding is created. -# binding_key = "#" -# -# ## Maximum number of messages server should give to the worker. -# # prefetch_count = 50 -# -# ## Max undelivered messages -# ## This plugin uses tracking metrics, which ensure messages are read to -# ## outputs before acknowledging them to the original broker to ensure data -# ## is not lost. This option sets the maximum messages to read from the -# ## broker that have not been written by an output. -# ## -# ## This value needs to be picked with awareness of the agent's -# ## metric_batch_size value as well. Setting max undelivered messages too high -# ## can result in a constant stream of data batches to the output. While -# ## setting it too low may never flush the broker's messages. -# # max_undelivered_messages = 1000 -# -# ## Auth method. PLAIN and EXTERNAL are supported -# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as -# ## described here: https://www.rabbitmq.com/plugins.html -# # auth_method = "PLAIN" -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Content encoding for message payloads, can be set to -# ## "gzip", "identity" or "auto" -# ## - Use "gzip" to decode gzip -# ## - Use "identity" to apply no encoding -# ## - Use "auto" determine the encoding using the ContentEncoding header -# # content_encoding = "identity" -# -# ## Maximum size of decoded message. -# ## Acceptable units are B, KiB, KB, MiB, MB... -# ## Without quotes and units, interpreted as size in bytes. -# # max_decompression_size = "500MB" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Get standard chrony metrics. -# [[inputs.chrony]] -# ## Server address of chronyd with address scheme -# ## If empty or not set, the plugin will mimic the behavior of chronyc and -# ## check "unix:///run/chrony/chronyd.sock", "udp://127.0.0.1:323" -# ## and "udp://[::1]:323". -# # server = "" -# -# ## Timeout for establishing the connection -# # timeout = "5s" -# -# ## Try to resolve received addresses to host-names via DNS lookups -# ## Disabled by default to avoid DNS queries especially for slow DNS servers. -# # dns_lookup = false -# -# ## Metrics to query named according to chronyc commands -# ## Available settings are: -# ## activity -- number of peers online or offline -# ## tracking -- information about system's clock performance -# ## serverstats -- chronyd server statistics -# ## sources -- extended information about peers -# ## sourcestats -- statistics on peers -# # metrics = ["tracking"] - - -# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms -# [[inputs.cisco_telemetry_mdt]] -# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when -# ## using the grpc transport. -# transport = "grpc" -# -# ## Address and port to host telemetry listener -# service_address = ":57000" -# -# ## Grpc Maximum Message Size, default is 4MB, increase the size. This is -# ## stored as a uint32, and limited to 4294967295. -# max_msg_size = 4000000 -# -# ## Enable TLS; grpc transport only. -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Enable TLS client authentication and define allowed CA certificates; grpc -# ## transport only. -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags -# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] -# -# ## Include the delete field in every telemetry message. -# # include_delete_field = false -# -# ## Specify custom name for incoming MDT source field. -# # source_field_name = "mdt_source" -# -# ## Define aliases to map telemetry encoding paths to simple measurement names -# [inputs.cisco_telemetry_mdt.aliases] -# ifstats = "ietf-interfaces:interfaces-state/interface/statistics" -# ## Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. -# [inputs.cisco_telemetry_mdt.dmes] -# # Global Property Xformation. -# # prop1 = "uint64 to int" -# # prop2 = "uint64 to string" -# # prop3 = "string to uint64" -# # prop4 = "string to int64" -# # prop5 = "string to float64" -# # auto-prop-xfrom = "auto-float-xfrom" #Xform any property which is string, and has float number to type float64 -# # Per Path property xformation, Name is telemetry configuration under sensor-group, path configuration "WORD Distinguished Name" -# # Per Path configuration is better as it avoid property collision issue of types. -# # dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}' -# # dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}' -# # dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' -# -# ## Additional GRPC connection settings. -# [inputs.cisco_telemetry_mdt.grpc_enforcement_policy] -# ## GRPC permit keepalives without calls, set to true if your clients are -# ## sending pings without calls in-flight. This can sometimes happen on IOS-XE -# ## devices where the GRPC connection is left open but subscriptions have been -# ## removed, and adding subsequent subscriptions does not keep a stable session. -# # permit_keepalive_without_calls = false -# -# ## GRPC minimum timeout between successive pings, decreasing this value may -# ## help if this plugin is closing connections with ENHANCE_YOUR_CALM (too_many_pings). -# # keepalive_minimum_time = "5m" - - -# # Read metrics from one or many ClickHouse servers -# [[inputs.clickhouse]] -# ## Username for authorization on ClickHouse server -# username = "default" -# -# ## Password for authorization on ClickHouse server -# # password = "" -# -# ## HTTP(s) timeout while getting metrics values -# ## The timeout includes connection time, any redirects, and reading the -# ## response body. -# # timeout = 5s -# -# ## List of servers for metrics scraping -# ## metrics scrape via HTTP(s) clickhouse interface -# ## https://clickhouse.tech/docs/en/interfaces/http/ -# servers = ["http://127.0.0.1:8123"] -# -# ## Server Variant -# ## When set to "managed", some queries are excluded from being run. This is -# ## useful for instances hosted in ClickHouse Cloud where certain tables are -# ## not available. -# # variant = "self-hosted" -# -# ## If "auto_discovery"" is "true" plugin tries to connect to all servers -# ## available in the cluster with using same "user:password" described in -# ## "user" and "password" parameters and get this server hostname list from -# ## "system.clusters" table. See -# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters -# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers -# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ -# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables -# # auto_discovery = true -# -# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" -# ## when this filter present then "WHERE cluster IN (...)" filter will apply -# ## please use only full cluster names here, regexp and glob filters is not -# ## allowed for "/etc/clickhouse-server/config.d/remote.xml" -# ## -# ## -# ## -# ## -# ## clickhouse-ru-1.local9000 -# ## clickhouse-ru-2.local9000 -# ## -# ## -# ## clickhouse-eu-1.local9000 -# ## clickhouse-eu-2.local9000 -# ## -# ## -# ## -# ## -# ## -# ## -# ## example: cluster_include = ["my-own-cluster"] -# # cluster_include = [] -# -# ## Filter cluster names in "system.clusters" when "auto_discovery" is -# ## "true" when this filter present then "WHERE cluster NOT IN (...)" -# ## filter will apply -# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] -# # cluster_exclude = [] -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Read metrics from Google PubSub -# [[inputs.cloud_pubsub]] -# ## Required. Name of Google Cloud Platform (GCP) Project that owns -# ## the given PubSub subscription. -# project = "my-project" -# -# ## Required. Name of PubSub subscription to ingest metrics from. -# subscription = "my-subscription" -# -# ## Required. Data format to consume. -# ## Each data format has its own unique set of configuration options. -# ## Read more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" -# -# ## Optional. Filepath for GCP credentials JSON file to authorize calls to -# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use -# ## Application Default Credentials, which is preferred. -# # credentials_file = "path/to/my/creds.json" -# -# ## Optional. Number of seconds to wait before attempting to restart the -# ## PubSub subscription receiver after an unexpected error. -# ## If the streaming pull for a PubSub Subscription fails (receiver), -# ## the agent attempts to restart receiving messages after this many seconds. -# # retry_delay_seconds = 5 -# -# ## Optional. Maximum byte length of a message to consume. -# ## Larger messages are dropped with an error. If less than 0 or unspecified, -# ## treated as no limit. -# # max_message_len = 1000000 -# -# ## Max undelivered messages -# ## This plugin uses tracking metrics, which ensure messages are read to -# ## outputs before acknowledging them to the original broker to ensure data -# ## is not lost. This option sets the maximum messages to read from the -# ## broker that have not been written by an output. -# ## -# ## This value needs to be picked with awareness of the agent's -# ## metric_batch_size value as well. Setting max undelivered messages too high -# ## can result in a constant stream of data batches to the output. While -# ## setting it too low may never flush the broker's messages. -# # max_undelivered_messages = 1000 -# -# ## The following are optional Subscription ReceiveSettings in PubSub. -# ## Read more about these values: -# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings -# -# ## Optional. Maximum number of seconds for which a PubSub subscription -# ## should auto-extend the PubSub ACK deadline for each message. If less than -# ## 0, auto-extension is disabled. -# # max_extension = 0 -# -# ## Optional. Maximum number of unprocessed messages in PubSub -# ## (unacknowledged but not yet expired in PubSub). -# ## A value of 0 is treated as the default PubSub value. -# ## Negative values will be treated as unlimited. -# # max_outstanding_messages = 0 -# -# ## Optional. Maximum size in bytes of unprocessed messages in PubSub -# ## (unacknowledged but not yet expired in PubSub). -# ## A value of 0 is treated as the default PubSub value. -# ## Negative values will be treated as unlimited. -# # max_outstanding_bytes = 0 -# -# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn -# ## to pull messages from PubSub concurrently. This limit applies to each -# ## subscription separately and is treated as the PubSub default if less than -# ## 1. Note this setting does not limit the number of messages that can be -# ## processed concurrently (use "max_outstanding_messages" instead). -# # max_receiver_go_routines = 0 -# -# ## Optional. If true, Telegraf will attempt to base64 decode the -# ## PubSub message data before parsing. Many GCP services that -# ## output JSON to Google PubSub base64-encode the JSON payload. -# # base64_data = false -# -# ## Content encoding for message payloads, can be set to "gzip" or -# ## "identity" to apply no encoding. -# # content_encoding = "identity" -# -# ## If content encoding is not "identity", sets the maximum allowed size, -# ## in bytes, for a message payload when it's decompressed. Can be increased -# ## for larger payloads or reduced to protect against decompression bombs. -# ## Acceptable units are B, KiB, KB, MiB, MB... -# # max_decompression_size = "500MB" - - -# # Google Cloud Pub/Sub Push HTTP listener -# [[inputs.cloud_pubsub_push]] -# ## Address and port to host HTTP listener on -# service_address = ":8080" -# -# ## Application secret to verify messages originate from Cloud Pub/Sub -# # token = "" -# -# ## Path to listen to. -# # path = "/" -# -# ## Maximum duration before timing out read of the request -# # read_timeout = "10s" -# ## Maximum duration before timing out write of the response. This should be -# ## set to a value large enough that you can send at least 'metric_batch_size' -# ## number of messages within the duration. -# # write_timeout = "10s" -# -# ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) -# # max_body_size = "500MB" -# -# ## Whether to add the pubsub metadata, such as message attributes and -# ## subscription as a tag. -# # add_meta = false -# -# ## Max undelivered messages -# ## This plugin uses tracking metrics, which ensure messages are read to -# ## outputs before acknowledging them to the original broker to ensure data -# ## is not lost. This option sets the maximum messages to read from the -# ## broker that have not been written by an output. -# ## -# ## This value needs to be picked with awareness of the agent's -# ## metric_batch_size value as well. Setting max undelivered messages too high -# ## can result in a constant stream of data batches to the output. While -# ## setting it too low may never flush the broker's messages. -# # max_undelivered_messages = 1000 -# -# ## Set one or more allowed client CA certificate file names to -# ## enable mutually authenticated TLS connections -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Add service certificate and key -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # AWS Metric Streams listener -# [[inputs.cloudwatch_metric_streams]] -# ## Address and port to host HTTP listener on -# service_address = ":443" -# -# ## Paths to listen to. -# # paths = ["/telegraf"] -# -# ## maximum duration before timing out read of the request -# # read_timeout = "10s" -# -# ## maximum duration before timing out write of the response -# # write_timeout = "10s" -# -# ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) -# # max_body_size = "500MB" -# -# ## Optional access key for Firehose security. -# # access_key = "test-key" -# -# ## An optional flag to keep Metric Streams metrics compatible with -# ## CloudWatch's API naming -# # api_compatability = false -# -# ## Set one or more allowed client CA certificate file names to -# ## enable mutually authenticated TLS connections -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Add service certificate and key -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" - - -# # A ctrlX Data Layer server sent event input plugin -# [[inputs.ctrlx_datalayer]] -# ## Hostname or IP address of the ctrlX CORE Data Layer server -# ## example: server = "localhost" # Telegraf is running directly on the device -# ## server = "192.168.1.1" # Connect to ctrlX CORE remote via IP -# ## server = "host.example.com" # Connect to ctrlX CORE remote via hostname -# ## server = "10.0.2.2:8443" # Connect to ctrlX CORE Virtual from development environment -# server = "localhost" -# -# ## Authentication credentials -# username = "boschrexroth" -# password = "boschrexroth" -# -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Timeout for HTTP requests. (default: "10s") -# # timeout = "10s" -# -# -# ## Create a ctrlX Data Layer subscription. -# ## It is possible to define multiple subscriptions per host. Each subscription can have its own -# ## sampling properties and a list of nodes to subscribe to. -# ## All subscriptions share the same credentials. -# [[inputs.ctrlx_datalayer.subscription]] -# ## The name of the measurement. (default: "ctrlx") -# measurement = "memory" -# -# ## Configure the ctrlX Data Layer nodes which should be subscribed. -# ## address - node address in ctrlX Data Layer (mandatory) -# ## name - field name to use in the output (optional, default: base name of address) -# ## tags - extra node tags to be added to the output metric (optional) -# ## Note: -# ## Use either the inline notation or the bracketed notation, not both. -# ## The tags property is only supported in bracketed notation due to toml parser restrictions -# ## Examples: -# ## Inline notation -# nodes=[ -# {name="available", address="framework/metrics/system/memavailable-mb"}, -# {name="used", address="framework/metrics/system/memused-mb"}, -# ] -# ## Bracketed notation -# # [[inputs.ctrlx_datalayer.subscription.nodes]] -# # name ="available" -# # address="framework/metrics/system/memavailable-mb" -# # ## Define extra tags related to node to be added to the output metric (optional) -# # [inputs.ctrlx_datalayer.subscription.nodes.tags] -# # node_tag1="node_tag1" -# # node_tag2="node_tag2" -# # [[inputs.ctrlx_datalayer.subscription.nodes]] -# # name ="used" -# # address="framework/metrics/system/memused-mb" -# -# ## The switch "output_json_string" enables output of the measurement as json. -# ## That way it can be used in in a subsequent processor plugin, e.g. "Starlark Processor Plugin". -# # output_json_string = false -# -# ## Define extra tags related to subscription to be added to the output metric (optional) -# # [inputs.ctrlx_datalayer.subscription.tags] -# # subscription_tag1 = "subscription_tag1" -# # subscription_tag2 = "subscription_tag2" -# -# ## The interval in which messages shall be sent by the ctrlX Data Layer to this plugin. (default: 1s) -# ## Higher values reduce load on network by queuing samples on server side and sending as a single TCP packet. -# # publish_interval = "1s" -# -# ## The interval a "keepalive" message is sent if no change of data occurs. (default: 60s) -# ## Only used internally to detect broken network connections. -# # keep_alive_interval = "60s" -# -# ## The interval an "error" message is sent if an error was received from a node. (default: 10s) -# ## Higher values reduce load on output target and network in case of errors by limiting frequency of error messages. -# # error_interval = "10s" -# -# ## The interval that defines the fastest rate at which the node values should be sampled and values captured. (default: 1s) -# ## The sampling frequency should be adjusted to the dynamics of the signal to be sampled. -# ## Higher sampling frequence increases load on ctrlX Data Layer. -# ## The sampling frequency can be higher, than the publish interval. Captured samples are put in a queue and sent in publish interval. -# ## Note: The minimum sampling interval can be overruled by a global setting in the ctrlX Data Layer configuration ('datalayer/subscriptions/settings'). -# # sampling_interval = "1s" -# -# ## The requested size of the node value queue. (default: 10) -# ## Relevant if more values are captured than can be sent. -# # queue_size = 10 -# -# ## The behaviour of the queue if it is full. (default: "DiscardOldest") -# ## Possible values: -# ## - "DiscardOldest" -# ## The oldest value gets deleted from the queue when it is full. -# ## - "DiscardNewest" -# ## The newest value gets deleted from the queue when it is full. -# # queue_behaviour = "DiscardOldest" -# -# ## The filter when a new value will be sampled. (default: 0.0) -# ## Calculation rule: If (abs(lastCapturedValue - newValue) > dead_band_value) capture(newValue). -# # dead_band_value = 0.0 -# -# ## The conditions on which a sample should be captured and thus will be sent as a message. (default: "StatusValue") -# ## Possible values: -# ## - "Status" -# ## Capture the value only, when the state of the node changes from or to error state. Value changes are ignored. -# ## - "StatusValue" -# ## Capture when the value changes or the node changes from or to error state. -# ## See also 'dead_band_value' for what is considered as a value change. -# ## - "StatusValueTimestamp": -# ## Capture even if the value is the same, but the timestamp of the value is newer. -# ## Note: This might lead to high load on the network because every sample will be sent as a message -# ## even if the value of the node did not change. -# # value_change = "StatusValue" -# - - -# # Ingests files in a directory and then moves them to a target directory. -# [[inputs.directory_monitor]] -# ## The directory to monitor and read files from (including sub-directories if "recursive" is true). -# directory = "" -# # -# ## The directory to move finished files to (maintaining directory hierarchy from source). -# finished_directory = "" -# # -# ## Setting recursive to true will make the plugin recursively walk the directory and process all sub-directories. -# # recursive = false -# # -# ## The directory to move files to upon file error. -# ## If not provided, erroring files will stay in the monitored directory. -# # error_directory = "" -# # -# ## The amount of time a file is allowed to sit in the directory before it is picked up. -# ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, -# ## set this higher so that the plugin will wait until the file is fully copied to the directory. -# # directory_duration_threshold = "50ms" -# # -# ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. -# # files_to_monitor = ["^.*\\.csv"] -# # -# ## A list of files to ignore, if necessary. Supports regex. -# # files_to_ignore = [".DS_Store"] -# # -# ## Maximum lines of the file to process that have not yet be written by the -# ## output. For best throughput set to the size of the output's metric_buffer_limit. -# ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. -# # max_buffered_metrics = 10000 -# # -# ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. -# ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. -# # file_queue_size = 100000 -# # -# ## Name a tag containing the name of the file the data was parsed from. Leave empty -# ## to disable. Cautious when file name variation is high, this can increase the cardinality -# ## significantly. Read more about cardinality here: -# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality -# # file_tag = "" -# # -# ## Specify if the file can be read completely at once or if it needs to be read line by line (default). -# ## Possible values: "line-by-line", "at-once" -# # parse_method = "line-by-line" -# # -# ## The dataformat to be read from the files. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Read logging output from the Docker engine -# [[inputs.docker_log]] -# ## Docker Endpoint -# ## To use TCP, set endpoint = "tcp://[ip]:[port]" -# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" -# # endpoint = "unix:///var/run/docker.sock" -# -# ## When true, container logs are read from the beginning; otherwise reading -# ## begins at the end of the log. If state-persistence is enabled for Telegraf, -# ## the reading continues at the last previously processed timestamp. -# # from_beginning = false -# -# ## Timeout for Docker API calls. -# # timeout = "5s" -# -# ## Containers to include and exclude. Globs accepted. -# ## Note that an empty array for both will include all containers -# # container_name_include = [] -# # container_name_exclude = [] -# -# ## Container states to include and exclude. Globs accepted. -# ## When empty only containers in the "running" state will be captured. -# # container_state_include = [] -# # container_state_exclude = [] -# -# ## docker labels to include and exclude as tags. Globs accepted. -# ## Note that an empty array for both will include all labels as tags -# # docker_label_include = [] -# # docker_label_exclude = [] -# -# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars -# source_tag = false -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - -# # Reads metrics from DPDK applications using v2 telemetry interface. -# # This plugin ONLY supports Linux -# [[inputs.dpdk]] -# ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK -# ## telemetry interface. -# # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" -# -# ## Duration that defines how long the connected socket client will wait for -# ## a response before terminating connection. -# ## This includes both writing to and reading from socket. Since it's local -# ## socket access to a fast packet processing application, the timeout should -# ## be sufficient for most users. -# ## Setting the value to 0 disables the timeout (not recommended) -# # socket_access_timeout = "200ms" -# -# ## Enables telemetry data collection for selected device types. -# ## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status, info). -# ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats). -# # device_types = ["ethdev"] -# -# ## List of custom, application-specific telemetry commands to query -# ## The list of available commands depend on the application deployed. -# ## Applications can register their own commands via telemetry library API -# ## https://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands -# ## For L3 Forwarding with Power Management Sample Application this could be: -# ## additional_commands = ["/l3fwd-power/stats"] -# # additional_commands = [] -# -# ## List of plugin options. -# ## Supported options: -# ## - "in_memory" option enables reading for multiple sockets when a dpdk application is running with --in-memory option. -# ## When option is enabled plugin will try to find additional socket paths related to provided socket_path. -# ## Details: https://doc.dpdk.org/guides/howto/telemetry.html#connecting-to-different-dpdk-processes -# # plugin_options = ["in_memory"] -# -# ## Specifies plugin behavior regarding unreachable socket (which might not have been initialized yet). -# ## Available choices: -# ## - error: Telegraf will return an error during the startup and gather phases if socket is unreachable -# ## - ignore: Telegraf will ignore error regarding unreachable socket on both startup and gather -# # unreachable_socket_behavior = "error" -# -# ## List of metadata fields which will be added to every metric produced by the plugin. -# ## Supported options: -# ## - "pid" - exposes PID of DPDK process. Example: pid=2179660i -# ## - "version" - exposes version of DPDK. Example: version="DPDK 21.11.2" -# # metadata_fields = ["pid", "version"] -# -# ## Allows turning off collecting data for individual "ethdev" commands. -# ## Remove "/ethdev/link_status" from list to gather link status metrics. -# [inputs.dpdk.ethdev] -# exclude_commands = ["/ethdev/link_status"] -# -# ## When running multiple instances of the plugin it's recommended to add a -# ## unique tag to each instance to identify metrics exposed by an instance -# ## of DPDK application. This is useful when multiple DPDK apps run on a -# ## single host. -# ## [inputs.dpdk.tags] -# ## dpdk_instance = "my-fwd-app" - - -# # Azure Event Hubs service input plugin -# [[inputs.eventhub_consumer]] -# ## The default behavior is to create a new Event Hub client from environment variables. -# ## This requires one of the following sets of environment variables to be set: -# ## -# ## 1) Expected Environment Variables: -# ## - "EVENTHUB_CONNECTION_STRING" -# ## -# ## 2) Expected Environment Variables: -# ## - "EVENTHUB_NAMESPACE" -# ## - "EVENTHUB_NAME" -# ## - "EVENTHUB_KEY_NAME" -# ## - "EVENTHUB_KEY_VALUE" -# -# ## 3) Expected Environment Variables: -# ## - "EVENTHUB_NAMESPACE" -# ## - "EVENTHUB_NAME" -# ## - "AZURE_TENANT_ID" -# ## - "AZURE_CLIENT_ID" -# ## - "AZURE_CLIENT_SECRET" -# -# ## Uncommenting the option below will create an Event Hub client based solely on the connection string. -# ## This can either be the associated environment variable or hard coded directly. -# ## If this option is uncommented, environment variables will be ignored. -# ## Connection string should contain EventHubName (EntityPath) -# # connection_string = "" -# -# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister -# # persistence_dir = "" -# -# ## Change the default consumer group -# # consumer_group = "" -# -# ## By default the event hub receives all messages present on the broker, alternative modes can be set below. -# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). -# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). -# # from_timestamp = -# # latest = true -# -# ## Set a custom prefetch count for the receiver(s) -# # prefetch_count = 1000 -# -# ## Add an epoch to the receiver(s) -# # epoch = 0 -# -# ## Change to set a custom user agent, "telegraf" is used by default -# # user_agent = "telegraf" -# -# ## To consume from a specific partition, set the partition_ids option. -# ## An empty array will result in receiving from all partitions. -# # partition_ids = ["0","1"] -# -# ## Max undelivered messages -# ## This plugin uses tracking metrics, which ensure messages are read to -# ## outputs before acknowledging them to the original broker to ensure data -# ## is not lost. This option sets the maximum messages to read from the -# ## broker that have not been written by an output. -# ## -# ## This value needs to be picked with awareness of the agent's -# ## metric_batch_size value as well. Setting max undelivered messages too high -# ## can result in a constant stream of data batches to the output. While -# ## setting it too low may never flush the broker's messages. -# # max_undelivered_messages = 1000 -# -# ## Set either option below to true to use a system property as timestamp. -# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. -# ## It is recommended to use this setting when the data itself has no timestamp. -# # enqueued_time_as_ts = true -# # iot_hub_enqueued_time_as_ts = true -# -# ## Tags or fields to create from keys present in the application property bag. -# ## These could for example be set by message enrichments in Azure IoT Hub. -# # application_property_tags = [] -# # application_property_fields = [] -# -# ## Tag or field name to use for metadata -# ## By default all metadata is disabled -# # sequence_number_field = "SequenceNumber" -# # enqueued_time_field = "EnqueuedTime" -# # offset_field = "Offset" -# # partition_id_tag = "PartitionID" -# # partition_key_tag = "PartitionKey" -# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" -# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" -# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" -# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" -# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Run executable as long-running input plugin -# [[inputs.execd]] -# ## One program to run as daemon. -# ## NOTE: process and each argument should each be their own string -# command = ["telegraf-smartctl", "-d", "/dev/sda"] -# -# ## Environment variables -# ## Array of "key=value" pairs to pass as environment variables -# ## e.g. "KEY=value", "USERNAME=John Doe", -# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" -# # environment = [] -# -# ## Define how the process is signaled on each collection interval. -# ## Valid values are: -# ## "none" : Do not signal anything. (Recommended for service inputs) -# ## The process must output metrics by itself. -# ## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs) -# ## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended) -# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. -# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. -# signal = "none" -# -# ## Delay before the process is restarted after an unexpected termination -# restart_delay = "10s" -# -# ## Buffer size used to read from the command output stream -# ## Optional parameter. Default is 64 Kib, minimum is 16 bytes -# # buffer_size = "64Kib" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # gNMI telemetry input plugin -# [[inputs.gnmi]] -# ## Address and port of the gNMI GRPC server -# addresses = ["10.49.234.114:57777"] -# -# ## define credentials -# username = "cisco" -# password = "cisco" -# -# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") -# # encoding = "proto" -# -# ## redial in case of failures after -# # redial = "10s" -# -# ## gRPC Maximum Message Size -# # max_msg_size = "4MB" -# -# ## Enable to get the canonical path as field-name -# # canonical_field_names = false -# -# ## Remove leading slashes and dots in field-name -# # trim_field_names = false -# -# ## Guess the path-tag if an update does not contain a prefix-path -# ## Supported values are -# ## none -- do not add a 'path' tag -# ## common path -- use the common path elements of all fields in an update -# ## subscription -- use the subscription path -# # path_guessing_strategy = "none" -# -# ## enable client-side TLS and define CA to authenticate the device -# # enable_tls = false -# # tls_ca = "/etc/telegraf/ca.pem" -# ## Minimal TLS version to accept by the client -# # tls_min_version = "TLS12" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = true -# -# ## define client-side TLS certificate & key to authenticate to the device -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## gNMI subscription prefix (optional, can usually be left empty) -# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths -# # origin = "" -# # prefix = "" -# # target = "" -# -# ## Vendor specific options -# ## This defines what vendor specific options to load. -# ## * Juniper Header Extension (juniper_header): some sensors are directly managed by -# ## Linecard, which adds the Juniper GNMI Header Extension. Enabling this -# ## allows the decoding of the Extension header if present. Currently this knob -# ## adds component, component_id & sub_component_id as additional tags -# # vendor_specific = [] -# -# ## Define additional aliases to map encoding paths to measurement names -# # [inputs.gnmi.aliases] -# # ifcounters = "openconfig:/interfaces/interface/state/counters" -# -# [[inputs.gnmi.subscription]] -# ## Name of the measurement that will be emitted -# name = "ifcounters" -# -# ## Origin and path of the subscription -# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths -# ## -# ## origin usually refers to a (YANG) data model implemented by the device -# ## and path to a specific substructure inside it that should be subscribed -# ## to (similar to an XPath). YANG models can be found e.g. here: -# ## https://github.com/YangModels/yang/tree/master/vendor/cisco/xr -# origin = "openconfig-interfaces" -# path = "/interfaces/interface/state/counters" -# -# ## Subscription mode ("target_defined", "sample", "on_change") and interval -# subscription_mode = "sample" -# sample_interval = "10s" -# -# ## Suppress redundant transmissions when measured values are unchanged -# # suppress_redundant = false -# -# ## If suppression is enabled, send updates at least every X seconds anyway -# # heartbeat_interval = "60s" -# -# ## Tag subscriptions are applied as tags to other subscriptions. -# # [[inputs.gnmi.tag_subscription]] -# # ## When applying this value as a tag to other metrics, use this tag name -# # name = "descr" -# # -# # ## All other subscription fields are as normal -# # origin = "openconfig-interfaces" -# # path = "/interfaces/interface/state" -# # subscription_mode = "on_change" -# # -# # ## Match strategy to use for the tag. -# # ## Tags are only applied for metrics of the same address. The following -# # ## settings are valid: -# # ## unconditional -- always match -# # ## name -- match by the "name" key -# # ## This resembles the previsou 'tag-only' behavior. -# # ## elements -- match by the keys in the path filtered by the path -# # ## parts specified `elements` below -# # ## By default, 'elements' is used if the 'elements' option is provided, -# # ## otherwise match by 'name'. -# # # match = "" -# # -# # ## For the 'elements' match strategy, at least one path-element name must -# # ## be supplied containing at least one key to match on. Multiple path -# # ## elements can be specified in any order. All given keys must be equal -# # ## for a match. -# # # elements = ["description", "interface"] - - -# ## DEPRECATED: The "http_listener" plugin is deprecated in version 1.9.0, has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead. -# # Accept metrics over InfluxDB 1.x HTTP API -# [[inputs.influxdb_listener]] -# ## Address and port to host HTTP listener on -# service_address = ":8186" -# -# ## maximum duration before timing out read of the request -# read_timeout = "10s" -# ## maximum duration before timing out write of the response -# write_timeout = "10s" -# -# ## Maximum allowed HTTP request body size in bytes. -# ## 0 means to use the default of 32MiB. -# max_body_size = 0 -# -# ## Maximum line size allowed to be sent in bytes. -# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored -# # max_line_size = 0 -# -# ## Set one or more allowed client CA certificate file names to -# ## enable mutually authenticated TLS connections -# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Add service certificate and key -# tls_cert = "/etc/telegraf/cert.pem" -# tls_key = "/etc/telegraf/key.pem" -# -# ## Optional tag name used to store the database name. -# ## If the write has a database in the query string then it will be kept in this tag name. -# ## This tag can be used in downstream outputs. -# ## The default value of nothing means it will be off and the database will not be recorded. -# ## If you have a tag that is the same as the one specified below, and supply a database, -# ## the tag will be overwritten with the database supplied. -# # database_tag = "" -# -# ## If set the retention policy specified in the write query will be added as -# ## the value of this tag name. -# # retention_policy_tag = "" -# -# ## Optional username and password to accept for HTTP basic authentication -# ## or authentication token. -# ## You probably want to make sure you have TLS configured above for this. -# ## Use these options for the authentication token in the form -# ## Authentication: Token : -# # basic_username = "foobar" -# # basic_password = "barfoo" -# -# ## Optional JWT token authentication for HTTP requests -# ## Please see the documentation at -# ## https://docs.influxdata.com/influxdb/v1.8/administration/authentication_and_authorization/#authenticate-using-jwt-tokens -# ## for further details. -# ## Please note: Token authentication and basic authentication cannot be used -# ## at the same time. -# # token_shared_secret = "" -# # token_username = "" -# -# ## Influx line protocol parser -# ## 'internal' is the default. 'upstream' is a newer parser that is faster -# ## and more memory efficient. -# # parser_type = "internal" - - -# # Generic HTTP write listener -# [[inputs.http_listener_v2]] -# ## Address and port to host HTTP listener on -# service_address = ":8080" -# -# ## Paths to listen to. -# # paths = ["/telegraf"] -# -# ## Save path as http_listener_v2_path tag if set to true -# # path_tag = false -# -# ## HTTP methods to accept. -# # methods = ["POST", "PUT"] -# -# ## Optional HTTP headers -# ## These headers are applied to the server that is listening for HTTP -# ## requests and included in responses. -# # http_headers = {"HTTP_HEADER" = "TAG_NAME"} -# -# ## maximum duration before timing out read of the request -# # read_timeout = "10s" -# ## maximum duration before timing out write of the response -# # write_timeout = "10s" -# -# ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) -# # max_body_size = "500MB" -# -# ## Part of the request to consume. Available options are "body" and -# ## "query". -# # data_source = "body" -# -# ## Set one or more allowed client CA certificate file names to -# ## enable mutually authenticated TLS connections -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Add service certificate and key -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Minimal TLS version accepted by the server -# # tls_min_version = "TLS12" -# -# ## Optional username and password to accept for HTTP basic authentication. -# ## You probably want to make sure you have TLS configured above for this. -# # basic_username = "foobar" -# # basic_password = "barfoo" -# -# ## Optional setting to map http headers into tags -# ## If the http header is not present on the request, no corresponding tag will be added -# ## If multiple instances of the http header are present, only the first value will be used -# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Accept metrics over InfluxDB 1.x HTTP API -# [[inputs.influxdb_listener]] -# ## Address and port to host HTTP listener on -# service_address = ":8186" -# -# ## maximum duration before timing out read of the request -# read_timeout = "10s" -# ## maximum duration before timing out write of the response -# write_timeout = "10s" -# -# ## Maximum allowed HTTP request body size in bytes. -# ## 0 means to use the default of 32MiB. -# max_body_size = 0 -# -# ## Maximum line size allowed to be sent in bytes. -# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored -# # max_line_size = 0 -# -# ## Set one or more allowed client CA certificate file names to -# ## enable mutually authenticated TLS connections -# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Add service certificate and key -# tls_cert = "/etc/telegraf/cert.pem" -# tls_key = "/etc/telegraf/key.pem" -# -# ## Optional tag name used to store the database name. -# ## If the write has a database in the query string then it will be kept in this tag name. -# ## This tag can be used in downstream outputs. -# ## The default value of nothing means it will be off and the database will not be recorded. -# ## If you have a tag that is the same as the one specified below, and supply a database, -# ## the tag will be overwritten with the database supplied. -# # database_tag = "" -# -# ## If set the retention policy specified in the write query will be added as -# ## the value of this tag name. -# # retention_policy_tag = "" -# -# ## Optional username and password to accept for HTTP basic authentication -# ## or authentication token. -# ## You probably want to make sure you have TLS configured above for this. -# ## Use these options for the authentication token in the form -# ## Authentication: Token : -# # basic_username = "foobar" -# # basic_password = "barfoo" -# -# ## Optional JWT token authentication for HTTP requests -# ## Please see the documentation at -# ## https://docs.influxdata.com/influxdb/v1.8/administration/authentication_and_authorization/#authenticate-using-jwt-tokens -# ## for further details. -# ## Please note: Token authentication and basic authentication cannot be used -# ## at the same time. -# # token_shared_secret = "" -# # token_username = "" -# -# ## Influx line protocol parser -# ## 'internal' is the default. 'upstream' is a newer parser that is faster -# ## and more memory efficient. -# # parser_type = "internal" - - -# # Accept metrics over InfluxDB 2.x HTTP API -# [[inputs.influxdb_v2_listener]] -# ## Address and port to host InfluxDB listener on -# ## (Double check the port. Could be 9999 if using OSS Beta) -# service_address = ":8086" -# -# ## Maximum duration before timing out read of the request -# # read_timeout = "10s" -# ## Maximum duration before timing out write of the response -# # write_timeout = "10s" -# -# ## Maximum allowed HTTP request body size in bytes. -# ## 0 means to use the default of 32MiB. -# # max_body_size = "32MiB" -# -# ## Optional tag to determine the bucket. -# ## If the write has a bucket in the query string then it will be kept in this tag name. -# ## This tag can be used in downstream outputs. -# ## The default value of nothing means it will be off and the database will not be recorded. -# # bucket_tag = "" -# -# ## Set one or more allowed client CA certificate file names to -# ## enable mutually authenticated TLS connections -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Add service certificate and key -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# -# ## Optional token to accept for HTTP authentication. -# ## You probably want to make sure you have TLS configured above for this. -# # token = "some-long-shared-secret-token" -# -# ## Influx line protocol parser -# ## 'internal' is the default. 'upstream' is a newer parser that is faster -# ## and more memory efficient. -# # parser_type = "internal" - - -# # Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem -# # This plugin ONLY supports Linux on amd64 -# [[inputs.intel_pmu]] -# ## List of filesystem locations of JSON files that contain PMU event definitions. -# event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"] -# -# ## List of core events measurement entities. There can be more than one core_events sections. -# [[inputs.intel_pmu.core_events]] -# ## List of events to be counted. Event names shall match names from event_definitions files. -# ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. -# ## If absent, all core events from provided event_definitions are counted skipping unresolvable ones. -# events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"] -# -# ## Limits the counting of events to core numbers specified. -# ## If absent, events are counted on all cores. -# ## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element. -# ## example: cores = ["0,2", "4", "12-16"] -# cores = ["0"] -# -# ## Indicator that plugin shall attempt to run core_events.events as a single perf group. -# ## If absent or set to false, each event is counted individually. Defaults to false. -# ## This limits the number of events that can be measured to a maximum of available hardware counters per core. -# ## Could vary depending on type of event, use of fixed counters. -# # perf_group = false -# -# ## Optionally set a custom tag value that will be added to every measurement within this events group. -# ## Can be applied to any group of events, unrelated to perf_group setting. -# # events_tag = "" -# -# ## List of uncore event measurement entities. There can be more than one uncore_events sections. -# [[inputs.intel_pmu.uncore_events]] -# ## List of events to be counted. Event names shall match names from event_definitions files. -# ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. -# ## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones. -# events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"] -# -# ## Limits the counting of events to specified sockets. -# ## If absent, events are counted on all sockets. -# ## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element. -# ## example: sockets = ["0-2"] -# sockets = ["0"] -# -# ## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore. -# ## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false. -# # aggregate_uncore_units = false -# -# ## Optionally set a custom tag value that will be added to every measurement within this events group. -# # events_tag = "" - - -# # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) -# # and per-CPU metrics like temperature, power and utilization. Please see the -# # plugin readme for details on software and hardware compatibility. -# # This plugin ONLY supports Linux. -# [[inputs.intel_powerstat]] -# ## The user can choose which package metrics are monitored by the plugin with -# ## the package_metrics setting: -# ## - The default, will collect "current_power_consumption", -# ## "current_dram_power_consumption" and "thermal_design_power". -# ## - Leaving this setting empty means no package metrics will be collected. -# ## - Finally, a user can specify individual metrics to capture from the -# ## supported options list. -# ## Supported options: -# ## "current_power_consumption", "current_dram_power_consumption", -# ## "thermal_design_power", "max_turbo_frequency", "uncore_frequency", -# ## "cpu_base_frequency" -# # package_metrics = ["current_power_consumption", "current_dram_power_consumption", "thermal_design_power"] -# -# ## The user can choose which per-CPU metrics are monitored by the plugin in -# ## cpu_metrics array. -# ## Empty or missing array means no per-CPU specific metrics will be collected -# ## by the plugin. -# ## Supported options: -# ## "cpu_frequency", "cpu_c0_state_residency", "cpu_c1_state_residency", -# ## "cpu_c3_state_residency", "cpu_c6_state_residency", "cpu_c7_state_residency", -# ## "cpu_temperature", "cpu_busy_frequency", "cpu_c0_substate_c01", -# ## "cpu_c0_substate_c02", "cpu_c0_substate_c0_wait" -# # cpu_metrics = [] -# -# ## Optionally the user can choose for which CPUs metrics configured in cpu_metrics array should be gathered. -# ## Can't be combined with excluded_cpus. -# ## Empty or missing array means CPU metrics are gathered for all CPUs. -# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] -# # included_cpus = [] -# -# ## Optionally the user can choose which CPUs should be excluded from gathering metrics configured in cpu_metrics array. -# ## Can't be combined with included_cpus. -# ## Empty or missing array means CPU metrics are gathered for all CPUs. -# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] -# # excluded_cpus = [] -# -# ## Filesystem location of JSON file that contains PMU event definitions. -# ## Mandatory only for perf-related metrics (cpu_c0_substate_c01, cpu_c0_substate_c02, cpu_c0_substate_c0_wait). -# # event_definitions = "" -# -# ## The user can set the timeout duration for MSR reading. -# ## Enabling this timeout can be useful in situations where, on heavily loaded systems, -# ## the code waits too long for a kernel response to MSR read requests. -# ## 0 disables the timeout (default). -# # msr_read_timeout = "0ms" - - -# # Read Intel RDT metrics -# # This plugin ONLY supports non-Windows -# [[inputs.intel_rdt]] -# ## Optionally set sampling interval to Nx100ms. -# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. -# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. -# # sampling_interval = "10" -# -# ## Optionally specify the path to pqos executable. -# ## If not provided, auto discovery will be performed. -# # pqos_path = "/usr/local/bin/pqos" -# -# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. -# ## If not provided, default value is false. -# # shortened_metrics = false -# -# ## Specify the list of groups of CPU core(s) to be provided as pqos input. -# ## Mandatory if processes aren't set and forbidden if processes are specified. -# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] -# # cores = ["0-3"] -# -# ## Specify the list of processes for which Metrics will be collected. -# ## Mandatory if cores aren't set and forbidden if cores are specified. -# ## e.g. ["qemu", "pmd"] -# # processes = ["process"] -# -# ## Specify if the pqos process should be called with sudo. -# ## Mandatory if the telegraf process does not run as root. -# # use_sudo = false - - -# # Subscribe and receive OpenConfig Telemetry data using JTI -# [[inputs.jti_openconfig_telemetry]] -# ## List of device addresses to collect telemetry from -# servers = ["localhost:1883"] -# -# ## Authentication details. Username and password are must if device expects -# ## authentication. Client ID must be unique when connecting from multiple instances -# ## of telegraf to the same device -# username = "user" -# password = "pass" -# client_id = "telegraf" -# -# ## Frequency to get data -# sample_frequency = "1000ms" -# -# ## Sensors to subscribe for -# ## A identifier for each sensor can be provided in path by separating with space -# ## Else sensor path will be used as identifier -# ## When identifier is used, we can provide a list of space separated sensors. -# ## A single subscription will be created with all these sensors and data will -# ## be saved to measurement with this identifier name -# sensors = [ -# "/interfaces/", -# "collection /components/ /lldp", -# ] -# -# ## We allow specifying sensor group level reporting rate. To do this, specify the -# ## reporting rate in Duration at the beginning of sensor paths / collection -# ## name. For entries without reporting rate, we use configured sample frequency -# sensors = [ -# "1000ms customReporting /interfaces /lldp", -# "2000ms collection /components", -# "/interfaces", -# ] -# -# ## Timestamp Source -# ## Set to 'collection' for time of collection, and 'data' for using the time -# ## provided by the _timestamp field. -# # timestamp_source = "collection" -# -# ## Optional TLS Config -# # enable_tls = false -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Minimal TLS version to accept by the client -# # tls_min_version = "TLS12" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. -# ## Failed streams/calls will not be retried if 0 is provided -# retry_delay = "1000ms" -# -# ## Period for sending keep-alive packets on idle connections -# ## This is helpful to identify broken connections to the server -# # keep_alive_period = "10s" -# -# ## To treat all string values as tags, set this to true -# str_as_tags = false - - -# # Read metrics from Kafka topics -# [[inputs.kafka_consumer]] -# ## Kafka brokers. -# brokers = ["localhost:9092"] -# -# ## Set the minimal supported Kafka version. Should be a string contains -# ## 4 digits in case if it is 0 version and 3 digits for versions starting -# ## from 1.0.0 separated by dot. This setting enables the use of new -# ## Kafka features and APIs. Must be 0.10.2.0(used as default) or greater. -# ## Please, check the list of supported versions at -# ## https://pkg.go.dev/github.com/Shopify/sarama#SupportedVersions -# ## ex: kafka_version = "2.6.0" -# ## ex: kafka_version = "0.10.2.0" -# # kafka_version = "0.10.2.0" -# -# ## Topics to consume. -# topics = [ - "multitemp" - "Feinstaub" - "wechselrichter/wechselrichter" - "kellerluefter" - "watermeter" - "temp/#" - "openWB/#" - "blumentopf/#" - "server_monitor", - ] -# -# ## Topic regular expressions to consume. Matches will be added to topics. -# ## Example: topic_regexps = [ "*test", "metric[0-9A-z]*" ] -# # topic_regexps = [ ] -# -# ## When set this tag will be added to all metrics with the topic as the value. -# # topic_tag = "" -# -# ## The list of Kafka message headers that should be pass as metric tags -# ## works only for Kafka version 0.11+, on lower versions the message headers -# ## are not available -# # msg_headers_as_tags = [] -# -# ## The name of kafka message header which value should override the metric name. -# ## In case when the same header specified in current option and in msg_headers_as_tags -# ## option, it will be excluded from the msg_headers_as_tags list. -# # msg_header_as_metric_name = "" -# -# ## Optional Client id -# # client_id = "Telegraf" -# -# ## Optional TLS Config -# # enable_tls = false -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Period between keep alive probes. -# ## Defaults to the OS configuration if not specified or zero. -# # keep_alive_period = "15s" -# -# ## SASL authentication credentials. These settings should typically be used -# ## with TLS encryption enabled -# # sasl_username = "kafka" -# # sasl_password = "secret" -# -# ## Optional SASL: -# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI -# ## (defaults to PLAIN) -# # sasl_mechanism = "" -# -# ## used if sasl_mechanism is GSSAPI -# # sasl_gssapi_service_name = "" -# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH -# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" -# # sasl_gssapi_kerberos_config_path = "/" -# # sasl_gssapi_realm = "realm" -# # sasl_gssapi_key_tab_path = "" -# # sasl_gssapi_disable_pafxfast = false -# -# ## used if sasl_mechanism is OAUTHBEARER -# # sasl_access_token = "" -# -# ## SASL protocol version. When connecting to Azure EventHub set to 0. -# # sasl_version = 1 -# -# # Disable Kafka metadata full fetch -# # metadata_full = false -# -# ## Name of the consumer group. -# # consumer_group = "telegraf_metrics_consumers" -# -# ## Compression codec represents the various compression codecs recognized by -# ## Kafka in messages. -# ## 0 : None -# ## 1 : Gzip -# ## 2 : Snappy -# ## 3 : LZ4 -# ## 4 : ZSTD -# # compression_codec = 0 -# ## Initial offset position; one of "oldest" or "newest". -# # offset = "oldest" -# -# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". -# # balance_strategy = "range" -# -# ## Maximum number of retries for metadata operations including -# ## connecting. Sets Sarama library's Metadata.Retry.Max config value. If 0 or -# ## unset, use the Sarama default of 3, -# # metadata_retry_max = 0 -# -# ## Type of retry backoff. Valid options: "constant", "exponential" -# # metadata_retry_type = "constant" -# -# ## Amount of time to wait before retrying. When metadata_retry_type is -# ## "constant", each retry is delayed this amount. When "exponential", the -# ## first retry is delayed this amount, and subsequent delays are doubled. If 0 -# ## or unset, use the Sarama default of 250 ms -# # metadata_retry_backoff = 0 -# -# ## Maximum amount of time to wait before retrying when metadata_retry_type is -# ## "exponential". Ignored for other retry types. If 0, there is no backoff -# ## limit. -# # metadata_retry_max_duration = 0 -# -# ## Strategy for making connection to kafka brokers. Valid options: "startup", -# ## "defer". If set to "defer" the plugin is allowed to start before making a -# ## connection. This is useful if the broker may be down when telegraf is -# ## started, but if there are any typos in the broker setting, they will cause -# ## connection failures without warning at startup -# # connection_strategy = "startup" -# -# ## Maximum length of a message to consume, in bytes (default 0/unlimited); -# ## larger messages are dropped -# max_message_len = 1000000 -# -# ## Max undelivered messages -# ## This plugin uses tracking metrics, which ensure messages are read to -# ## outputs before acknowledging them to the original broker to ensure data -# ## is not lost. This option sets the maximum messages to read from the -# ## broker that have not been written by an output. -# ## -# ## This value needs to be picked with awareness of the agent's -# ## metric_batch_size value as well. Setting max undelivered messages too high -# ## can result in a constant stream of data batches to the output. While -# ## setting it too low may never flush the broker's messages. -# # max_undelivered_messages = 1000 -# -# ## Maximum amount of time the consumer should take to process messages. If -# ## the debug log prints messages from sarama about 'abandoning subscription -# ## to [topic] because consuming was taking too long', increase this value to -# ## longer than the time taken by the output plugin(s). -# ## -# ## Note that the effective timeout could be between 'max_processing_time' and -# ## '2 * max_processing_time'. -# # max_processing_time = "100ms" -# -# ## The default number of message bytes to fetch from the broker in each -# ## request (default 1MB). This should be larger than the majority of -# ## your messages, or else the consumer will spend a lot of time -# ## negotiating sizes and not actually consuming. Similar to the JVM's -# ## `fetch.message.max.bytes`. -# # consumer_fetch_default = "1MB" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Configuration for the AWS Kinesis input. -# [[inputs.kinesis_consumer]] -# ## Amazon REGION of kinesis endpoint. -# region = "ap-southeast-2" -# -# ## Amazon Credentials -# ## Credentials are loaded in the following order -# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified -# ## 2) Assumed credentials via STS if role_arn is specified -# ## 3) explicit credentials from 'access_key' and 'secret_key' -# ## 4) shared profile from 'profile' -# ## 5) environment variables -# ## 6) shared credentials file -# ## 7) EC2 Instance Profile -# # access_key = "" -# # secret_key = "" -# # token = "" -# # role_arn = "" -# # web_identity_token_file = "" -# # role_session_name = "" -# # profile = "" -# # shared_credential_file = "" -# -# ## Endpoint to make request against, the correct endpoint is automatically -# ## determined and this option should only be set if you wish to override the -# ## default. -# ## ex: endpoint_url = "http://localhost:8000" -# # endpoint_url = "" -# -# ## Kinesis StreamName must exist prior to starting telegraf. -# streamname = "StreamName" -# -# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) -# # shard_iterator_type = "TRIM_HORIZON" -# -# ## Max undelivered messages -# ## This plugin uses tracking metrics, which ensure messages are read to -# ## outputs before acknowledging them to the original broker to ensure data -# ## is not lost. This option sets the maximum messages to read from the -# ## broker that have not been written by an output. -# ## -# ## This value needs to be picked with awareness of the agent's -# ## metric_batch_size value as well. Setting max undelivered messages too high -# ## can result in a constant stream of data batches to the output. While -# ## setting it too low may never flush the broker's messages. -# # max_undelivered_messages = 1000 -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" -# -# ## -# ## The content encoding of the data from kinesis -# ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" -# ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws -# ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding -# ## is done automatically by the golang sdk, as data is read from kinesis) -# ## -# # content_encoding = "identity" -# -# ## Optional -# ## Configuration for a dynamodb checkpoint -# [inputs.kinesis_consumer.checkpoint_dynamodb] -# ## unique name for this consumer -# app_name = "default" -# table_name = "default" - - -# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. -# [[inputs.knx_listener]] -# ## Type of KNX-IP interface. -# ## Can be either "tunnel_udp", "tunnel_tcp", "tunnel" (alias for tunnel_udp) or "router". -# # service_type = "tunnel" -# -# ## Address of the KNX-IP interface. -# service_address = "localhost:3671" -# -# ## Measurement definition(s) -# # [[inputs.knx_listener.measurement]] -# # ## Name of the measurement -# # name = "temperature" -# # ## Datapoint-Type (DPT) of the KNX messages -# # dpt = "9.001" -# # ## List of Group-Addresses (GAs) assigned to the measurement -# # addresses = ["5/5/1"] -# -# # [[inputs.knx_listener.measurement]] -# # name = "illumination" -# # dpt = "9.004" -# # addresses = ["5/5/3"] - - -# # Read metrics off Arista LANZ, via socket -# [[inputs.lanz]] -# ## URL to Arista LANZ endpoint -# servers = [ -# "tcp://switch1.int.example.com:50001", -# "tcp://switch2.int.example.com:50001", -# ] - - -# ## DEPRECATED: The "logparser" plugin is deprecated in version 1.15.0, use 'inputs.tail' with 'grok' data format instead. -# # Read metrics off Arista LANZ, via socket -# [[inputs.logparser]] -# ## Log files to parse. -# ## These accept standard unix glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## /var/log/**.log -> recursively find all .log files in /var/log -# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log -# ## /var/log/apache.log -> only tail the apache log file -# files = ["/var/log/apache/access.log"] -# -# ## Read files that currently exist from the beginning. Files that are created -# ## while telegraf is running (and that match the "files" globs) will always -# ## be read from the beginning. -# from_beginning = false -# -# ## Method used to watch for file updates. Can be either "inotify" or "poll". -# # watch_method = "inotify" -# -# ## Parse logstash-style "grok" patterns: -# [inputs.logparser.grok] -# ## This is a list of patterns to check the given log file(s) for. -# ## Note that adding patterns here increases processing time. The most -# ## efficient configuration is to have one pattern per logparser. -# ## Other common built-in patterns are: -# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) -# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) -# patterns = ["%{COMBINED_LOG_FORMAT}"] -# -# ## Name of the outputted measurement name. -# measurement = "apache_access_log" -# -# ## Full path(s) to custom pattern files. -# custom_pattern_files = [] -# -# ## Custom patterns can also be defined here. Put one pattern per line. -# custom_patterns = ''' -# ''' -# -# ## Timezone allows you to provide an override for timestamps that -# ## don't already include an offset -# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs -# ## -# ## Default: "" which renders UTC -# ## Options are as follows: -# ## 1. Local -- interpret based on machine localtime -# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones -# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC -# # timezone = "Canada/Eastern" -# -# ## When set to "disable", timestamp will not incremented if there is a -# ## duplicate. -# # unique_timestamp = "auto" - - -# # Read metrics from one or many MongoDB servers -# [[inputs.mongodb]] -# ## An array of URLs of the form: -# ## "mongodb://" [user ":" pass "@"] host [ ":" port] -# ## For example: -# ## mongodb://user:auth_key@10.10.3.30:27017, -# ## mongodb://10.10.3.33:18832, -# ## -# ## If connecting to a cluster, users must include the "?connect=direct" in -# ## the URL to ensure that the connection goes directly to the specified node -# ## and not have all connections passed to the master node. -# servers = ["mongodb://127.0.0.1:27017/?connect=direct"] -# -# ## When true, collect cluster status. -# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which -# ## may have an impact on performance. -# # gather_cluster_status = true -# -# ## When true, collect per database stats -# # gather_perdb_stats = false -# -# ## When true, collect per collection stats -# # gather_col_stats = false -# -# ## When true, collect usage statistics for each collection -# ## (insert, update, queries, remove, getmore, commands etc...). -# # gather_top_stat = false -# -# ## List of db where collections stats are collected -# ## If empty, all db are concerned -# # col_stats_dbs = ["local"] -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Specifies plugin behavior regarding disconnected servers -# ## Available choices : -# ## - error: telegraf will return an error on startup if one the servers is unreachable -# ## - skip: telegraf will skip unreachable servers on both startup and gather -# # disconnected_servers_behavior = "error" - - -# # Read metrics from MQTT topic(s) - [[inputs.mqtt_consumer]] -# ## Broker URLs for the MQTT server or cluster. To connect to multiple -# ## clusters or standalone servers, use a separate plugin instance. -# ## example: servers = ["tcp://localhost:1883"] -# ## servers = ["ssl://localhost:1883"] -# ## servers = ["ws://localhost:1883"] - servers = ["ssl://mqtt-broker.fritz.box:8883"] -# -# ## Topics that will be subscribed to. - topics = [ - "multitemp" - "Feinstaub" - "wechselrichter/wechselrichter" - "kellerluefter" - "watermeter" - "temp/#" - "openWB/#" - "blumentopf/#" - "server_monitor", - ] -# -# ## The message topic will be stored in a tag specified by this value. If set -# ## to the empty string no topic tag will be created. -# # topic_tag = "topic" -# -# ## QoS policy for messages -# ## 0 = at most once -# ## 1 = at least once -# ## 2 = exactly once -# ## -# ## When using a QoS of 1 or 2, you should enable persistent_session to allow -# ## resuming unacknowledged messages. - qos = 0 -# -# ## Connection timeout for initial connection in seconds - connection_timeout = "30s" -# -# ## Max undelivered messages -# ## This plugin uses tracking metrics, which ensure messages are read to -# ## outputs before acknowledging them to the original broker to ensure data -# ## is not lost. This option sets the maximum messages to read from the -# ## broker that have not been written by an output. -# ## -# ## This value needs to be picked with awareness of the agent's -# ## metric_batch_size value as well. Setting max undelivered messages too high -# ## can result in a constant stream of data batches to the output. While -# ## setting it too low may never flush the broker's messages. -# # max_undelivered_messages = 1000 -# -# ## Persistent session disables clearing of the client session on connection. -# ## In order for this option to work you must also set client_id to identify -# ## the client. To receive messages that arrived while the client is offline, -# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when -# ## publishing. Finally, using a persistent session will use the initial -# ## connection topics and not subscribe to any new topics even after -# ## reconnecting or restarting without a change in client ID. -# # persistent_session = false -# -# ## If unset, a random client ID will be generated. - client_id = "telegraf" -# -# ## Username and password to connect MQTT server. - username = "telegraf" - password = "tnTXzNZzCqjQgVgK" -# -# ## Optional TLS Config - tls_ca = "/etc/ssl/certs/mqtt-broker.fritz.box.crt" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification - insecure_skip_verify = false -# -# ## Client trace messages -# ## When set to true, and debug mode enabled in the agent settings, the MQTT -# ## client's messages are included in telegraf logs. These messages are very -# ## noisey, but essential for debugging issues. -# # client_trace = false -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "json" -# json_query = "value, . *" -# data_type = "float" -# - - -# ## Enable extracting tag values from MQTT topics -# ## _ denotes an ignored entry in the topic path -# [[inputs.mqtt_consumer.topic_parsing]] -# topic = "watermeter/m3/#" -# measurement = "/measurement -# [[inputs.mqtt_consumer.topic.types]] -# test = float - - -# # Read metrics from NATS subject(s) -# [[inputs.nats_consumer]] -# ## urls of NATS servers -# servers = ["nats://localhost:4222"] -# -# ## subject(s) to consume -# ## If you use jetstream you need to set the subjects -# ## in jetstream_subjects -# subjects = ["telegraf"] -# -# ## jetstream subjects -# ## jetstream is a streaming technology inside of nats. -# ## With jetstream the nats-server persists messages and -# ## a consumer can consume historical messages. This is -# ## useful when telegraf needs to restart it don't miss a -# ## message. You need to configure the nats-server. -# ## https://docs.nats.io/nats-concepts/jetstream. -# jetstream_subjects = ["js_telegraf"] -# -# ## name a queue group -# queue_group = "telegraf_consumers" -# -# ## Optional authentication with username and password credentials -# # username = "" -# # password = "" -# -# ## Optional authentication with NATS credentials file (NATS 2.0) -# # credentials = "/etc/telegraf/nats.creds" -# -# ## Optional authentication with nkey seed file (NATS 2.0) -# # nkey_seed = "/etc/telegraf/seed.txt" -# -# ## Use Transport Layer Security -# # secure = false -# -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Sets the limits for pending msgs and bytes for each subscription -# ## These shouldn't need to be adjusted except in very high throughput scenarios -# # pending_message_limit = 65536 -# # pending_bytes_limit = 67108864 -# -# ## Max undelivered messages -# ## This plugin uses tracking metrics, which ensure messages are read to -# ## outputs before acknowledging them to the original broker to ensure data -# ## is not lost. This option sets the maximum messages to read from the -# ## broker that have not been written by an output. -# ## -# ## This value needs to be picked with awareness of the agent's -# ## metric_batch_size value as well. Setting max undelivered messages too high -# ## can result in a constant stream of data batches to the output. While -# ## setting it too low may never flush the broker's messages. -# # max_undelivered_messages = 1000 -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "json" - - -# # Netflow v5, Netflow v9 and IPFIX collector -# [[inputs.netflow]] -# ## Address to listen for netflow,ipfix or sflow packets. -# ## example: service_address = "udp://:2055" -# ## service_address = "udp4://:2055" -# ## service_address = "udp6://:2055" -# service_address = "udp://:2055" -# -# ## Set the size of the operating system's receive buffer. -# ## example: read_buffer_size = "64KiB" -# ## Uses the system's default if not set. -# # read_buffer_size = "" -# -# ## Protocol version to use for decoding. -# ## Available options are -# ## "ipfix" -- IPFIX / Netflow v10 protocol (also works for Netflow v9) -# ## "netflow v5" -- Netflow v5 protocol -# ## "netflow v9" -- Netflow v9 protocol (also works for IPFIX) -# ## "sflow v5" -- sFlow v5 protocol -# # protocol = "ipfix" -# -# ## Private Enterprise Numbers (PEN) mappings for decoding -# ## This option allows to specify vendor-specific mapping files to use during -# ## decoding. -# # private_enterprise_number_files = [] -# -# ## Dump incoming packets to the log -# ## This can be helpful to debug parsing issues. Only active if -# ## Telegraf is in debug mode. -# # dump_packets = false - - -# # Read metrics from NSQD topic(s) -# [[inputs.nsq_consumer]] -# ## Server option still works but is deprecated, we just prepend it to the nsqd array. -# # server = "localhost:4150" -# -# ## An array representing the NSQD TCP HTTP Endpoints -# nsqd = ["localhost:4150"] -# -# ## An array representing the NSQLookupd HTTP Endpoints -# nsqlookupd = ["localhost:4161"] -# topic = "telegraf" -# channel = "consumer" -# max_in_flight = 100 -# -# ## Max undelivered messages -# ## This plugin uses tracking metrics, which ensure messages are read to -# ## outputs before acknowledging them to the original broker to ensure data -# ## is not lost. This option sets the maximum messages to read from the -# ## broker that have not been written by an output. -# ## -# ## This value needs to be picked with awareness of the agent's -# ## metric_batch_size value as well. Setting max undelivered messages too high -# ## can result in a constant stream of data batches to the output. While -# ## setting it too low may never flush the broker's messages. -# # max_undelivered_messages = 1000 -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" - - -# # Retrieve data from OPCUA devices -# [[inputs.opcua_listener]] -# ## Metric name -# # name = "opcua_listener" -# # -# ## OPC UA Endpoint URL -# # endpoint = "opc.tcp://localhost:4840" -# # -# ## Maximum time allowed to establish a connect to the endpoint. -# # connect_timeout = "10s" -# # -# ## Behavior when we fail to connect to the endpoint on initialization. Valid options are: -# ## "error": throw an error and exits Telegraf -# ## "ignore": ignore this plugin if errors are encountered -# # "retry": retry connecting at each interval -# # connect_fail_behavior = "error" -# # -# ## Maximum time allowed for a request over the established connection. -# # request_timeout = "5s" -# # -# ## The interval at which the server should at least update its monitored items -# # subscription_interval = "100ms" -# # -# ## Security policy, one of "None", "Basic128Rsa15", "Basic256", -# ## "Basic256Sha256", or "auto" -# # security_policy = "auto" -# # -# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" -# # security_mode = "auto" -# # -# ## Path to cert.pem. Required when security mode or policy isn't "None". -# ## If cert path is not supplied, self-signed cert and key will be generated. -# # certificate = "/etc/telegraf/cert.pem" -# # -# ## Path to private key.pem. Required when security mode or policy isn't "None". -# ## If key path is not supplied, self-signed cert and key will be generated. -# # private_key = "/etc/telegraf/key.pem" -# # -# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To -# ## authenticate using a specific ID, select 'Certificate' or 'UserName' -# # auth_method = "Anonymous" -# # -# ## Username. Required for auth_method = "UserName" -# # username = "" -# # -# ## Password. Required for auth_method = "UserName" -# # password = "" -# # -# ## Option to select the metric timestamp to use. Valid options are: -# ## "gather" -- uses the time of receiving the data in telegraf -# ## "server" -- uses the timestamp provided by the server -# ## "source" -- uses the timestamp provided by the source -# # timestamp = "gather" -# # -# ## The default timetsamp format is RFC3339Nano -# # Other timestamp layouts can be configured using the Go language time -# # layout specification from https://golang.org/pkg/time/#Time.Format -# # e.g.: json_timestamp_format = "2006-01-02T15:04:05Z07:00" -# #timestamp_format = "" -# # -# # -# ## Client trace messages -# ## When set to true, and debug mode enabled in the agent settings, the OPCUA -# ## client's messages are included in telegraf logs. These messages are very -# ## noisey, but essential for debugging issues. -# # client_trace = false -# # -# ## Include additional Fields in each metric -# ## Available options are: -# ## DataType -- OPC-UA Data Type (string) -# # optional_fields = [] -# # -# ## Node ID configuration -# ## name - field name to use in the output -# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) -# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) -# ## identifier - OPC UA ID (tag as shown in opcua browser) -# ## default_tags - extra tags to be added to the output metric (optional) -# ## monitoring_params - additional settings for the monitored node (optional) -# ## -# ## Monitoring parameters -# ## sampling_interval - interval at which the server should check for data -# ## changes (default: 0s) -# ## queue_size - size of the notification queue (default: 10) -# ## discard_oldest - how notifications should be handled in case of full -# ## notification queues, possible values: -# ## true: oldest value added to queue gets replaced with new -# ## (default) -# ## false: last value added to queue gets replaced with new -# ## data_change_filter - defines the condition under which a notification should -# ## be reported -# ## -# ## Data change filter -# ## trigger - specify the conditions under which a data change notification -# ## should be reported, possible values: -# ## "Status": only report notifications if the status changes -# ## (default if parameter is omitted) -# ## "StatusValue": report notifications if either status or value -# ## changes -# ## "StatusValueTimestamp": report notifications if either status, -# ## value or timestamp changes -# ## deadband_type - type of the deadband filter to be applied, possible values: -# ## "Absolute": absolute change in a data value to report a notification -# ## "Percent": works only with nodes that have an EURange property set -# ## and is defined as: send notification if -# ## (last value - current value) > -# ## (deadband_value/100.0) * ((high–low) of EURange) -# ## deadband_value - value to deadband_type, must be a float value, no filter is set -# ## for negative values -# ## -# ## Use either the inline notation or the bracketed notation, not both. -# # -# ## Inline notation (default_tags not supported yet) -# # nodes = [ -# # {name="node1", namespace="", identifier_type="", identifier="",} -# # {name="node2", namespace="", identifier_type="", identifier="", monitoring_params={sampling_interval="0s", queue_size=10, discard_oldest=true, data_change_filter={trigger="Status", deadband_type="Absolute", deadband_value=0.0}}}, -# # ] -# # -# ## Bracketed notation -# # [[inputs.opcua_listener.nodes]] -# # name = "node1" -# # namespace = "" -# # identifier_type = "" -# # identifier = "" -# # default_tags = { tag1 = "value1", tag2 = "value2" } -# # -# # [[inputs.opcua_listener.nodes]] -# # name = "node2" -# # namespace = "" -# # identifier_type = "" -# # identifier = "" -# # -# # [inputs.opcua_listener.nodes.monitoring_params] -# # sampling_interval = "0s" -# # queue_size = 10 -# # discard_oldest = true -# # -# # [inputs.opcua_listener.nodes.monitoring_params.data_change_filter] -# # trigger = "Status" -# # deadband_type = "Absolute" -# # deadband_value = 0.0 -# # -# ## Node Group -# ## Sets defaults so they aren't required in every node. -# ## Default values can be set for: -# ## * Metric name -# ## * OPC UA namespace -# ## * Identifier -# ## * Default tags -# ## * Sampling interval -# ## -# ## Multiple node groups are allowed -# #[[inputs.opcua_listener.group]] -# ## Group Metric name. Overrides the top level name. If unset, the -# ## top level name is used. -# # name = -# # -# ## Group default namespace. If a node in the group doesn't set its -# ## namespace, this is used. -# # namespace = -# # -# ## Group default identifier type. If a node in the group doesn't set its -# ## namespace, this is used. -# # identifier_type = -# # -# ## Default tags that are applied to every node in this group. Can be -# ## overwritten in a node by setting a different value for the tag name. -# ## example: default_tags = { tag1 = "value1" } -# # default_tags = {} -# # -# ## Group default sampling interval. If a node in the group doesn't set its -# ## sampling interval, this is used. -# # sampling_interval = "0s" -# # -# ## Node ID Configuration. Array of nodes with the same settings as above. -# ## Use either the inline notation or the bracketed notation, not both. -# # -# ## Inline notation (default_tags not supported yet) -# # nodes = [ -# # {name="node1", namespace="", identifier_type="", identifier="",} -# # {name="node2", namespace="", identifier_type="", identifier="", monitoring_params={sampling_interval="0s", queue_size=10, discard_oldest=true, data_change_filter={trigger="Status", deadband_type="Absolute", deadband_value=0.0}}}, -# #] -# # -# ## Bracketed notation -# # [[inputs.opcua_listener.group.nodes]] -# # name = "node1" -# # namespace = "" -# # identifier_type = "" -# # identifier = "" -# # default_tags = { tag1 = "override1", tag2 = "value2" } -# # -# # [[inputs.opcua_listener.group.nodes]] -# # name = "node2" -# # namespace = "" -# # identifier_type = "" -# # identifier = "" -# # -# # [inputs.opcua_listener.group.nodes.monitoring_params] -# # sampling_interval = "0s" -# # queue_size = 10 -# # discard_oldest = true -# # -# # [inputs.opcua_listener.group.nodes.monitoring_params.data_change_filter] -# # trigger = "Status" -# # deadband_type = "Absolute" -# # deadband_value = 0.0 -# # -# -# ## Enable workarounds required by some devices to work correctly -# # [inputs.opcua_listener.workarounds] -# ## Set additional valid status codes, StatusOK (0x0) is always considered valid -# # additional_valid_status_codes = ["0xC0"] -# -# # [inputs.opcua_listener.request_workarounds] -# ## Use unregistered reads instead of registered reads -# # use_unregistered_reads = false - - -# # Collects performance metrics from OpenStack services -# [[inputs.openstack]] -# ## The recommended interval to poll is '30m' -# -# ## The identity endpoint to authenticate against and get the service catalog from. -# authentication_endpoint = "https://my.openstack.cloud:5000" -# -# ## The domain to authenticate against when using a V3 identity endpoint. -# # domain = "default" -# -# ## The project to authenticate as. -# # project = "admin" -# -# ## User authentication credentials. Must have admin rights. -# username = "admin" -# password = "password" -# -# ## Available services are: -# ## "agents", "aggregates", "cinder_services", "flavors", "hypervisors", "networks", -# ## "nova_services", "ports", "projects", "servers", "services", "stacks", "storage_pools", -# ## "subnets", "volumes" -# # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] -# -# ## Collect Server Diagnostics -# # server_diagnotics = false -# -# ## output secrets (such as adminPass(for server) and UserID(for volume)). -# # output_secrets = false -# -# ## Amount of time allowed to complete the HTTP(s) request. -# # timeout = "5s" -# -# ## HTTP Proxy support -# # http_proxy_url = "" -# -# ## Optional TLS Config -# # tls_ca = /path/to/cafile -# # tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Options for tags received from Openstack -# # tag_prefix = "openstack_tag_" -# # tag_value = "true" -# -# ## Timestamp format for timestamp data received from Openstack. -# ## If false format is unix nanoseconds. -# # human_readable_timestamps = false -# -# ## Measure Openstack call duration -# # measure_openstack_requests = false - - -# # Receive OpenTelemetry traces, metrics, and logs over gRPC -# [[inputs.opentelemetry]] -# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service -# ## address:port -# # service_address = "0.0.0.0:4317" -# -# ## Override the default (5s) new connection timeout -# # timeout = "5s" -# -# ## Override the default span attributes to be used as line protocol tags. -# ## These are always included as tags: -# ## - trace ID -# ## - span ID -# ## The default values are strongly recommended for use with Jaeger: -# ## - service.name -# ## - span.name -# ## Other common attributes can be found here: -# ## - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv -# # span_dimensions = ["service.name", "span.name"] -# -# ## Override the default log record attributes to be used as line protocol tags. -# ## These are always included as tags, if available: -# ## - trace ID -# ## - span ID -# ## The default values: -# ## - service.name -# ## Other common attributes can be found here: -# ## - https://github.com/open-telemetry/opentelemetry-collector/tree/main/semconv -# ## When using InfluxDB for both logs and traces, be certain that log_record_dimensions -# ## matches the span_dimensions value. -# # log_record_dimensions = ["service.name"] -# -# ## Override the default (prometheus-v1) metrics schema. -# ## Supports: "prometheus-v1", "prometheus-v2" -# ## For more information about the alternatives, read the Prometheus input -# ## plugin notes. -# # metrics_schema = "prometheus-v1" -# -# ## Optional TLS Config. -# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md -# ## -# ## Set one or more allowed client CA certificate file names to -# ## enable mutually authenticated TLS connections. -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# ## Add service certificate and key. -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" - - -# # Read metrics from one or many pgbouncer servers -# [[inputs.pgbouncer]] -# ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] -# ## or a simple string: -# ## host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production -# ## -# ## All connection parameters are optional. -# ## -# address = "host=localhost user=pgbouncer sslmode=disable" -# -# ## Specify which "show" commands to gather metrics for. -# ## Choose from: "stats", "pools", "lists", "databases" -# # show_commands = ["stats", "pools"] - - -# # Read metrics from one or many postgresql servers -# [[inputs.postgresql]] -# ## Specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]&statement_timeout=... -# ## or a simple string: -# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production -# ## Users can pass the path to the socket as the host value to use a socket -# ## connection (e.g. `/var/run/postgresql`). -# ## -# ## All connection parameters are optional. -# ## -# ## Without the dbname parameter, the driver will default to a database -# ## with the same name as the user. This dbname is just for instantiating a -# ## connection with the server and doesn't restrict the databases we are trying -# ## to grab metrics for. -# ## -# address = "host=localhost user=postgres sslmode=disable" -# -# ## A custom name for the database that will be used as the "server" tag in the -# ## measurement output. If not specified, a default one generated from -# ## the connection address is used. -# # outputaddress = "db01" -# -# ## connection configuration. -# ## maxlifetime - specify the maximum lifetime of a connection. -# ## default is forever (0s) -# ## -# ## Note that this does not interrupt queries, the lifetime will not be enforced -# ## whilst a query is running -# # max_lifetime = "0s" -# -# ## A list of databases to explicitly ignore. If not specified, metrics for all -# ## databases are gathered. Do NOT use with the 'databases' option. -# # ignored_databases = ["postgres", "template0", "template1"] -# -# ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. Do NOT use with the 'ignored_databases' option. -# # databases = ["app_production", "testing"] -# -# ## Whether to use prepared statements when connecting to the database. -# ## This should be set to false when connecting through a PgBouncer instance -# ## with pool_mode set to transaction. -# prepared_statements = true - - -# # Read metrics from one or many postgresql servers -# [[inputs.postgresql_extensible]] -# # specify address via a url matching: -# # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=...&statement_timeout=... -# # or a simple string: -# # host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production -# # -# # All connection parameters are optional. -# # Without the dbname parameter, the driver will default to a database -# # with the same name as the user. This dbname is just for instantiating a -# # connection with the server and doesn't restrict the databases we are trying -# # to grab metrics for. -# # -# address = "host=localhost user=postgres sslmode=disable" -# -# ## A list of databases to pull metrics about. -# ## deprecated in 1.22.3; use the sqlquery option to specify database to use -# # databases = ["app_production", "testing"] -# -# ## Whether to use prepared statements when connecting to the database. -# ## This should be set to false when connecting through a PgBouncer instance -# ## with pool_mode set to transaction. -# prepared_statements = true -# -# # Define the toml config where the sql queries are stored -# # The script option can be used to specify the .sql file path. -# # If script and sqlquery options specified at same time, sqlquery will be used -# # -# # the measurement field defines measurement name for metrics produced -# # by the query. Default is "postgresql". -# # -# # the tagvalue field is used to define custom tags (separated by comas). -# # the query is expected to return columns which match the names of the -# # defined tags. The values in these columns must be of a string-type, -# # a number-type or a blob-type. -# # -# # The timestamp field is used to override the data points timestamp value. By -# # default, all rows inserted with current time. By setting a timestamp column, -# # the row will be inserted with that column's value. -# # -# # The min_version field specifies minimal database version this query -# # will run on. -# # -# # The max_version field when set specifies maximal database version -# # this query will NOT run on. -# # -# # Database version in `minversion` and `maxversion` is represented as -# # a single integer without last component, for example: -# # 9.6.2 -> 906 -# # 15.2 -> 1500 -# # -# # Structure : -# # [[inputs.postgresql_extensible.query]] -# # measurement string -# # sqlquery string -# # min_version int -# # max_version int -# # withdbname boolean -# # tagvalue string (coma separated) -# # timestamp string -# [[inputs.postgresql_extensible.query]] -# measurement="pg_stat_database" -# sqlquery="SELECT * FROM pg_stat_database where datname" -# min_version=901 -# tagvalue="" -# [[inputs.postgresql_extensible.query]] -# script="your_sql-filepath.sql" -# min_version=901 -# max_version=1300 -# tagvalue="" - - -# # Read metrics from one or many prometheus clients -# [[inputs.prometheus]] -# ## An array of urls to scrape metrics from. -# urls = ["http://localhost:9100/metrics"] -# -# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. -# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. -# ## Valid options: 1, 2 -# # metric_version = 1 -# -# ## Url tag name (tag containing scrapped url. optional, default is "url") -# # url_tag = "url" -# -# ## Whether the timestamp of the scraped metrics will be ignored. -# ## If set to true, the gather time will be used. -# # ignore_timestamp = false -# -# ## An array of Kubernetes services to scrape metrics from. -# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] -# -# ## Kubernetes config file to create client from. -# # kube_config = "/path/to/kubernetes.config" -# -# ## Scrape Pods -# ## Enable scraping of k8s pods. Further settings as to which pods to scape -# ## are determiend by the 'method' option below. When enabled, the default is -# ## to use annotations to determine whether to scrape or not. -# # monitor_kubernetes_pods = false -# -# ## Scrape Pods Method -# ## annotations: default, looks for specific pod annotations documented below -# ## settings: only look for pods matching the settings provided, not -# ## annotations -# ## settings+annotations: looks at pods that match annotations using the user -# ## defined settings -# # monitor_kubernetes_pods_method = "annotations" -# -# ## Scrape Pods 'annotations' method options -# ## If set method is set to 'annotations' or 'settings+annotations', these -# ## annotation flags are looked for: -# ## - prometheus.io/scrape: Required to enable scraping for this pod. Can also -# ## use 'prometheus.io/scrape=false' annotation to opt-out entirely. -# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will -# ## need to set this to 'https' & most likely set the tls config -# ## - prometheus.io/path: If the metrics path is not /metrics, define it with -# ## this annotation -# ## - prometheus.io/port: If port is not 9102 use this annotation -# -# ## Scrape Pods 'settings' method options -# ## When using 'settings' or 'settings+annotations', the default values for -# ## annotations can be modified using with the following options: -# # monitor_kubernetes_pods_scheme = "http" -# # monitor_kubernetes_pods_port = "9102" -# # monitor_kubernetes_pods_path = "/metrics" -# -# ## Get the list of pods to scrape with either the scope of -# ## - cluster: the kubernetes watch api (default, no need to specify) -# ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. -# # pod_scrape_scope = "cluster" -# -# ## Only for node scrape scope: node IP of the node that telegraf is running on. -# ## Either this config or the environment variable NODE_IP must be set. -# # node_ip = "10.180.1.1" -# -# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. -# ## Default is 60 seconds. -# # pod_scrape_interval = 60 -# -# ## Content length limit -# ## When set, telegraf will drop responses with length larger than the configured value. -# ## Default is "0KB" which means unlimited. -# # content_length_limit = "0KB" -# -# ## Restricts Kubernetes monitoring to a single namespace -# ## ex: monitor_kubernetes_pods_namespace = "default" -# # monitor_kubernetes_pods_namespace = "" -# ## The name of the label for the pod that is being scraped. -# ## Default is 'namespace' but this can conflict with metrics that have the label 'namespace' -# # pod_namespace_label_name = "namespace" -# # label selector to target pods which have the label -# # kubernetes_label_selector = "env=dev,app=nginx" -# # field selector to target pods -# # eg. To scrape pods on a specific node -# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" -# -# ## Filter which pod annotations and labels will be added to metric tags -# # -# # pod_annotation_include = ["annotation-key-1"] -# # pod_annotation_exclude = ["exclude-me"] -# # pod_label_include = ["label-key-1"] -# # pod_label_exclude = ["exclude-me"] -# -# # cache refresh interval to set the interval for re-sync of pods list. -# # Default is 60 minutes. -# # cache_refresh_interval = 60 -# -# ## Scrape Services available in Consul Catalog -# # [inputs.prometheus.consul] -# # enabled = true -# # agent = "http://localhost:8500" -# # query_interval = "5m" -# -# # [[inputs.prometheus.consul.query]] -# # name = "a service name" -# # tag = "a service tag" -# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' -# # [inputs.prometheus.consul.query.tags] -# # host = "{{.Node}}" -# -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# # bearer_token = "/path/to/bearer/token" -# ## OR -# # bearer_token_string = "abc_123" -# -# ## HTTP Basic Authentication username and password. ('bearer_token' and -# ## 'bearer_token_string' take priority) -# # username = "" -# # password = "" -# -# ## Optional custom HTTP headers -# # http_headers = {"X-Special-Header" = "Special-Value"} -# -# ## Specify timeout duration for slower prometheus clients (default is 5s) -# # timeout = "5s" -# -# ## deprecated in 1.26; use the timeout option -# # response_timeout = "5s" -# -# ## HTTP Proxy support -# # use_system_proxy = false -# # http_proxy_url = "" -# -# ## Optional TLS Config -# # tls_ca = /path/to/cafile -# # tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false -# -# ## Use the given name as the SNI server name on each URL -# # tls_server_name = "myhost.example.org" -# -# ## TLS renegotiation method, choose from "never", "once", "freely" -# # tls_renegotiation_method = "never" -# -# ## Enable/disable TLS -# ## Set to true/false to enforce TLS being enabled/disabled. If not set, -# ## enable TLS only if any of the other options are specified. -# # tls_enable = true -# -# ## This option allows you to report the status of prometheus requests. -# # enable_request_metrics = false -# -# ## Control pod scraping based on pod namespace annotations -# ## Pass and drop here act like tagpass and tagdrop, but instead -# ## of filtering metrics they filters pod candidates for scraping -# #[inputs.prometheus.namespace_annotation_pass] -# # annotation_key = ["value1", "value2"] -# #[inputs.prometheus.namespace_annotation_drop] -# # some_annotation_key = ["dont-scrape"] - - -# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). -# # This plugin ONLY supports Linux on 386, amd64, arm, and arm64 -# [[inputs.ras]] -# ## Optional path to RASDaemon sqlite3 database. -# ## Default: /var/lib/rasdaemon/ras-mc_event.db -# # db_path = "" - - -# # Read metrics from one or many redis servers -# [[inputs.redis]] -# ## specify servers via a url matching: -# ## [protocol://][username:password]@address[:port] -# ## e.g. -# ## tcp://localhost:6379 -# ## tcp://username:password@192.168.99.100 -# ## unix:///var/run/redis.sock -# ## -# ## If no servers are specified, then localhost is used as the host. -# ## If no port is specified, 6379 is used -# servers = ["tcp://localhost:6379"] -# -# ## Optional. Specify redis commands to retrieve values -# # [[inputs.redis.commands]] -# # # The command to run where each argument is a separate element -# # command = ["get", "sample-key"] -# # # The field to store the result in -# # field = "sample-key-value" -# # # The type of the result -# # # Can be "string", "integer", or "float" -# # type = "string" -# -# ## Specify username and password for ACL auth (Redis 6.0+). You can add this -# ## to the server URI above or specify it here. The values here take -# ## precedence. -# # username = "" -# # password = "" -# -# ## Optional TLS Config -# ## Check tls/config.go ClientConfig for more options -# # tls_enable = true -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = true - - -# # Riemann protobuff listener -# [[inputs.riemann_listener]] -# ## URL to listen on -# ## Default is "tcp://:5555" -# # service_address = "tcp://:8094" -# # service_address = "tcp://127.0.0.1:http" -# # service_address = "tcp4://:8094" -# # service_address = "tcp6://:8094" -# # service_address = "tcp6://[2001:db8::1]:8094" -# -# ## Maximum number of concurrent connections. -# ## 0 (default) is unlimited. -# # max_connections = 1024 -# ## Read timeout. -# ## 0 (default) is unlimited. -# # read_timeout = "30s" -# ## Optional TLS configuration. -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Enables client authentication if set. -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# ## Maximum socket buffer size (in bytes when no unit specified). -# # read_buffer_size = "64KiB" -# ## Period between keep alive probes. -# ## 0 disables keep alive probes. -# ## Defaults to the OS configuration. -# # keep_alive_period = "5m" - - -# # Plugin for retrieving data from Siemens PLCs via the S7 protocol (RFC1006) -# [[inputs.s7comm]] -# ## Parameters to contact the PLC (mandatory) -# ## The server is in the [:port] format where the port defaults to 102 -# ## if not explicitly specified. -# server = "127.0.0.1:102" -# rack = 0 -# slot = 0 -# -# ## Max count of fields to be bundled in one batch-request. (PDU size) -# # pdu_size = 20 -# -# ## Timeout for requests -# # timeout = "10s" -# -# ## Log detailed connection messages for debugging -# ## This option only has an effect when Telegraf runs in debug mode -# # debug_connection = false -# -# ## Metric definition(s) -# [[inputs.s7comm.metric]] -# ## Name of the measurement -# # name = "s7comm" -# -# ## Field definitions -# ## name - field name -# ## address - indirect address ".
[.extra]" -# ## area - e.g. be "DB1" for data-block one -# ## type - supported types are (uppercase) -# ## X -- bit, requires the bit-number as 'extra' -# ## parameter -# ## B -- byte (8 bit) -# ## C -- character (8 bit) -# ## W -- word (16 bit) -# ## DW -- double word (32 bit) -# ## I -- integer (16 bit) -# ## DI -- double integer (32 bit) -# ## R -- IEEE 754 real floating point number (32 bit) -# ## DT -- date-time, always converted to unix timestamp -# ## with nano-second precision -# ## S -- string, requires the maximum length of the -# ## string as 'extra' parameter -# ## address - start address to read if not specified otherwise -# ## in the type field -# ## extra - extra parameter e.g. for the bit and string type -# fields = [ -# { name="rpm", address="DB1.R4" }, -# { name="status_ok", address="DB1.X2.1" }, -# { name="last_error", address="DB2.S1.32" }, -# { name="last_error_time", address="DB2.DT2" } -# ] -# -# ## Tags assigned to the metric -# # [inputs.s7comm.metric.tags] -# # device = "compressor" -# # location = "main building" - - -# # SFlow V5 Protocol Listener -# [[inputs.sflow]] -# ## Address to listen for sFlow packets. -# ## example: service_address = "udp://:6343" -# ## service_address = "udp4://:6343" -# ## service_address = "udp6://:6343" -# service_address = "udp://:6343" -# -# ## Set the size of the operating system's receive buffer. -# ## example: read_buffer_size = "64KiB" -# # read_buffer_size = "" - - -# # Receive SNMP traps -# [[inputs.snmp_trap]] -# ## Transport, local address, and port to listen on. Transport must -# ## be "udp://". Omit local address to listen on all interfaces. -# ## example: "udp://127.0.0.1:1234" -# ## -# ## Special permissions may be required to listen on a port less than -# ## 1024. See README.md for details -# ## -# # service_address = "udp://:162" -# ## -# ## Path to mib files -# ## Used by the gosmi translator. -# ## To add paths when translating with netsnmp, use the MIBDIRS environment variable -# # path = ["/usr/share/snmp/mibs"] -# ## -# ## Deprecated in 1.20.0; no longer running snmptranslate -# ## Timeout running snmptranslate command -# # timeout = "5s" -# ## Snmp version; one of "1", "2c" or "3". -# # version = "2c" -# ## SNMPv3 authentication and encryption options. -# ## -# ## Security Name. -# # sec_name = "myuser" -# ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". -# # auth_protocol = "MD5" -# ## Authentication password. -# # auth_password = "pass" -# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". -# # sec_level = "authNoPriv" -# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". -# # priv_protocol = "" -# ## Privacy password used for encrypted messages. -# # priv_password = "" - - -# # Generic socket listener capable of handling multiple socket types. -# [[inputs.socket_listener]] -# ## URL to listen on -# # service_address = "tcp://:8094" -# # service_address = "tcp://127.0.0.1:http" -# # service_address = "tcp4://:8094" -# # service_address = "tcp6://:8094" -# # service_address = "tcp6://[2001:db8::1]:8094" -# # service_address = "udp://:8094" -# # service_address = "udp4://:8094" -# # service_address = "udp6://:8094" -# # service_address = "unix:///tmp/telegraf.sock" -# # service_address = "unixgram:///tmp/telegraf.sock" -# # service_address = "vsock://cid:port" -# -# ## Permission for unix sockets (only available on unix sockets) -# ## This setting may not be respected by some platforms. To safely restrict -# ## permissions it is recommended to place the socket into a previously -# ## created directory with the desired permissions. -# ## ex: socket_mode = "777" -# # socket_mode = "" -# -# ## Maximum number of concurrent connections (only available on stream sockets like TCP) -# ## Zero means unlimited. -# # max_connections = 0 -# -# ## Read timeout (only available on stream sockets like TCP) -# ## Zero means unlimited. -# # read_timeout = "0s" -# -# ## Optional TLS configuration (only available on stream sockets like TCP) -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Enables client authentication if set. -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Maximum socket buffer size (in bytes when no unit specified) -# ## For stream sockets, once the buffer fills up, the sender will start -# ## backing up. For datagram sockets, once the buffer fills up, metrics will -# ## start dropping. Defaults to the OS default. -# # read_buffer_size = "64KiB" -# -# ## Period between keep alive probes (only applies to TCP sockets) -# ## Zero disables keep alive probes. Defaults to the OS configuration. -# # keep_alive_period = "5m" -# -# ## Content encoding for message payloads -# ## Can be set to "gzip" for compressed payloads or "identity" for no encoding. -# # content_encoding = "identity" -# -# ## Maximum size of decoded packet (in bytes when no unit specified) -# # max_decompression_size = "500MB" -# -# ## Message splitting strategy and corresponding settings for stream sockets -# ## (tcp, tcp4, tcp6, unix or unixpacket). The setting is ignored for packet -# ## listeners such as udp. -# ## Available strategies are: -# ## newline -- split at newlines (default) -# ## null -- split at null bytes -# ## delimiter -- split at delimiter byte-sequence in hex-format -# ## given in `splitting_delimiter` -# ## fixed length -- split after number of bytes given in `splitting_length` -# ## variable length -- split depending on length information received in the -# ## data. The length field information is specified in -# ## `splitting_length_field`. -# # splitting_strategy = "newline" -# -# ## Delimiter used to split received data to messages consumed by the parser. -# ## The delimiter is a hex byte-sequence marking the end of a message -# ## e.g. "0x0D0A", "x0d0a" or "0d0a" marks a Windows line-break (CR LF). -# ## The value is case-insensitive and can be specified with "0x" or "x" prefix -# ## or without. -# ## Note: This setting is only used for splitting_strategy = "delimiter". -# # splitting_delimiter = "" -# -# ## Fixed length of a message in bytes. -# ## Note: This setting is only used for splitting_strategy = "fixed length". -# # splitting_length = 0 -# -# ## Specification of the length field contained in the data to split messages -# ## with variable length. The specification contains the following fields: -# ## offset -- start of length field in bytes from begin of data -# ## bytes -- length of length field in bytes -# ## endianness -- endianness of the value, either "be" for big endian or -# ## "le" for little endian -# ## header_length -- total length of header to be skipped when passing -# ## data on to the parser. If zero (default), the header -# ## is passed on to the parser together with the message. -# ## Note: This setting is only used for splitting_strategy = "variable length". -# # splitting_length_field = {offset = 0, bytes = 0, endianness = "be", header_length = 0} -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# # data_format = "influx" - - -# # Read stats from one or more Solr servers or cores -# [[inputs.solr]] -# ## specify a list of one or more Solr servers -# servers = ["http://localhost:8983"] -# -# ## specify a list of one or more Solr cores (default - all) -# # cores = ["*"] -# -# ## Optional HTTP Basic Auth Credentials -# # username = "username" -# # password = "pa$$word" -# -# ## Timeout for HTTP requests -# # timeout = "5s" - - -# # Read metrics from SQL queries -# [[inputs.sql]] -# ## Database Driver -# ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for -# ## a list of supported drivers. -# driver = "mysql" -# -# ## Data source name for connecting -# ## The syntax and supported options depends on selected driver. -# dsn = "username:password@mysqlserver:3307/dbname?param=value" -# -# ## Timeout for any operation -# ## Note that the timeout for queries is per query not per gather. -# # timeout = "5s" -# -# ## Connection time limits -# ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections -# ## will not be closed automatically. If you specify a positive time, the connections will be closed after -# ## idleing or existing for at least that amount of time, respectively. -# # connection_max_idle_time = "0s" -# # connection_max_life_time = "0s" -# -# ## Connection count limits -# ## By default the number of open connections is not limited and the number of maximum idle connections -# ## will be inferred from the number of queries specified. If you specify a positive number for any of the -# ## two options, connections will be closed when reaching the specified limit. The number of idle connections -# ## will be clipped to the maximum number of connections limit if any. -# # connection_max_open = 0 -# # connection_max_idle = auto -# -# ## Specifies plugin behavior regarding disconnected servers -# ## Available choices : -# ## - error: telegraf will return an error on startup if one the servers is unreachable -# ## - ignore: telegraf will ignore unreachable servers on both startup and gather -# # disconnected_servers_behavior = "error" -# -# [[inputs.sql.query]] -# ## Query to perform on the server -# query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" -# ## Alternatively to specifying the query directly you can select a file here containing the SQL query. -# ## Only one of 'query' and 'query_script' can be specified! -# # query_script = "/path/to/sql/script.sql" -# -# ## Name of the measurement -# ## In case both measurement and 'measurement_col' are given, the latter takes precedence. -# # measurement = "sql" -# -# ## Column name containing the name of the measurement -# ## If given, this will take precedence over the 'measurement' setting. In case a query result -# ## does not contain the specified column, we fall-back to the 'measurement' setting. -# # measurement_column = "" -# -# ## Column name containing the time of the measurement -# ## If omitted, the time of the query will be used. -# # time_column = "" -# -# ## Format of the time contained in 'time_col' -# ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. -# ## See https://golang.org/pkg/time/#Time.Format for details. -# # time_format = "unix" -# -# ## Column names containing tags -# ## An empty include list will reject all columns and an empty exclude list will not exclude any column. -# ## I.e. by default no columns will be returned as tag and the tags are empty. -# # tag_columns_include = [] -# # tag_columns_exclude = [] -# -# ## Column names containing fields (explicit types) -# ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over -# ## the automatic (driver-based) conversion below. -# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. -# # field_columns_float = [] -# # field_columns_int = [] -# # field_columns_uint = [] -# # field_columns_bool = [] -# # field_columns_string = [] -# -# ## Column names containing fields (automatic types) -# ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty -# ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. -# ## NOTE: We rely on the database driver to perform automatic datatype conversion. -# # field_columns_include = [] -# # field_columns_exclude = [] - - -# # Read metrics from Microsoft SQL Server -# [[inputs.sqlserver]] -# ## Specify instances to monitor with a list of connection strings. -# ## All connection parameters are optional. -# ## By default, the host is localhost, listening on default port, TCP 1433. -# ## for Windows, the user is the currently running AD user (SSO). -# ## See https://github.com/microsoft/go-mssqldb for detailed connection -# ## parameters, in particular, tls connections can be created like so: -# ## "encrypt=true;certificate=;hostNameInCertificate=" -# servers = [ -# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", -# ] -# -# ## Timeout for query execution operation -# ## Note that the timeout for queries is per query not per gather. -# ## 0 value means no timeout -# # query_timeout = "0s" -# -# ## Authentication method -# ## valid methods: "connection_string", "AAD" -# # auth_method = "connection_string" -# -# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 -# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" -# database_type = "SQLServer" -# -# ## A list of queries to include. If not specified, all the below listed queries are used. -# include_query = [] -# -# ## A list of queries to explicitly ignore. -# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] -# -# ## Queries enabled by default for database_type = "SQLServer" are - -# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, -# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates, -# ## SQLServerRecentBackups -# -# ## Queries enabled by default for database_type = "AzureSQLDB" are - -# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, -# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers -# -# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - -# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, -# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers -# -# ## Queries enabled by default for database_type = "AzureSQLPool" are - -# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, -# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers -# -# ## Queries enabled by default for database_type = "AzureArcSQLManagedInstance" are - -# ## AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, -# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers -# -# ## Following are old config settings -# ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use -# ## the new mechanism of identifying the database_type there by use it's corresponding queries -# -# ## Optional parameter, setting this to 2 will use a new version -# ## of the collection queries that break compatibility with the original -# ## dashboards. -# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB -# # query_version = 2 -# -# ## If you are using AzureDB, setting this to true will gather resource utilization metrics -# # azuredb = false -# -# ## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health". -# ## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers". -# ## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues. -# ## This setting/metric is optional and is disabled by default. -# # health_metric = false -# -# ## Possible queries across different versions of the collectors -# ## Queries enabled by default for specific Database Type -# -# ## database_type = AzureSQLDB by default collects the following queries -# ## - AzureSQLDBWaitStats -# ## - AzureSQLDBResourceStats -# ## - AzureSQLDBResourceGovernance -# ## - AzureSQLDBDatabaseIO -# ## - AzureSQLDBServerProperties -# ## - AzureSQLDBOsWaitstats -# ## - AzureSQLDBMemoryClerks -# ## - AzureSQLDBPerformanceCounters -# ## - AzureSQLDBRequests -# ## - AzureSQLDBSchedulers -# -# ## database_type = AzureSQLManagedInstance by default collects the following queries -# ## - AzureSQLMIResourceStats -# ## - AzureSQLMIResourceGovernance -# ## - AzureSQLMIDatabaseIO -# ## - AzureSQLMIServerProperties -# ## - AzureSQLMIOsWaitstats -# ## - AzureSQLMIMemoryClerks -# ## - AzureSQLMIPerformanceCounters -# ## - AzureSQLMIRequests -# ## - AzureSQLMISchedulers -# -# ## database_type = AzureSQLPool by default collects the following queries -# ## - AzureSQLPoolResourceStats -# ## - AzureSQLPoolResourceGovernance -# ## - AzureSQLPoolDatabaseIO -# ## - AzureSQLPoolOsWaitStats, -# ## - AzureSQLPoolMemoryClerks -# ## - AzureSQLPoolPerformanceCounters -# ## - AzureSQLPoolSchedulers -# -# ## database_type = SQLServer by default collects the following queries -# ## - SQLServerPerformanceCounters -# ## - SQLServerWaitStatsCategorized -# ## - SQLServerDatabaseIO -# ## - SQLServerProperties -# ## - SQLServerMemoryClerks -# ## - SQLServerSchedulers -# ## - SQLServerRequests -# ## - SQLServerVolumeSpace -# ## - SQLServerCpu -# ## - SQLServerRecentBackups -# ## and following as optional (if mentioned in the include_query list) -# ## - SQLServerAvailabilityReplicaStates -# ## - SQLServerDatabaseReplicaStates -# -# ## Version 2 by default collects the following queries -# ## Version 2 is being deprecated, please consider using database_type. -# ## - PerformanceCounters -# ## - WaitStatsCategorized -# ## - DatabaseIO -# ## - ServerProperties -# ## - MemoryClerk -# ## - Schedulers -# ## - SqlRequests -# ## - VolumeSpace -# ## - Cpu -# -# ## Version 1 by default collects the following queries -# ## Version 1 is deprecated, please consider using database_type. -# ## - PerformanceCounters -# ## - WaitStatsCategorized -# ## - CPUHistory -# ## - DatabaseIO -# ## - DatabaseSize -# ## - DatabaseStats -# ## - DatabaseProperties -# ## - MemoryClerk -# ## - VolumeSpace -# ## - PerformanceMetrics - - -# # Statsd Server -# [[inputs.statsd]] -# ## Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp) -# protocol = "udp" -# -# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) -# max_tcp_connections = 250 -# -# ## Enable TCP keep alive probes (default=false) -# tcp_keep_alive = false -# -# ## Specifies the keep-alive period for an active network connection. -# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. -# ## Defaults to the OS configuration. -# # tcp_keep_alive_period = "2h" -# -# ## Address and port to host UDP listener on -# service_address = ":8125" -# -# ## The following configuration options control when telegraf clears it's cache -# ## of previous values. If set to false, then telegraf will only clear it's -# ## cache when the daemon is restarted. -# ## Reset gauges every interval (default=true) -# delete_gauges = true -# ## Reset counters every interval (default=true) -# delete_counters = true -# ## Reset sets every interval (default=true) -# delete_sets = true -# ## Reset timings & histograms every interval (default=true) -# delete_timings = true -# -# ## Enable aggregation temporality adds temporality=delta or temporality=commulative tag, and -# ## start_time field, which adds the start time of the metric accumulation. -# ## You should use this when using OpenTelemetry output. -# # enable_aggregation_temporality = false -# -# ## Percentiles to calculate for timing & histogram stats. -# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] -# -# ## separator to use between elements of a statsd metric -# metric_separator = "_" -# -# ## Parses tags in the datadog statsd format -# ## http://docs.datadoghq.com/guides/dogstatsd/ -# ## deprecated in 1.10; use datadog_extensions option instead -# parse_data_dog_tags = false -# -# ## Parses extensions to statsd in the datadog statsd format -# ## currently supports metrics and datadog tags. -# ## http://docs.datadoghq.com/guides/dogstatsd/ -# datadog_extensions = false -# -# ## Parses distributions metric as specified in the datadog statsd format -# ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition -# datadog_distributions = false -# -# ## Statsd data translation templates, more info can be read here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md -# # templates = [ -# # "cpu.* measurement*" -# # ] -# -# ## Number of UDP messages allowed to queue up, once filled, -# ## the statsd server will start dropping packets -# allowed_pending_messages = 10000 -# -# ## Number of worker threads used to parse the incoming messages. -# # number_workers_threads = 5 -# -# ## Number of timing/histogram values to track per-measurement in the -# ## calculation of percentiles. Raising this limit increases the accuracy -# ## of percentiles but also increases the memory usage and cpu time. -# percentile_limit = 1000 -# -# ## Maximum socket buffer size in bytes, once the buffer fills up, metrics -# ## will start dropping. Defaults to the OS default. -# # read_buffer_size = 65535 -# -# ## Max duration (TTL) for each metric to stay cached/reported without being updated. -# # max_ttl = "10h" -# -# ## Sanitize name method -# ## By default, telegraf will pass names directly as they are received. -# ## However, upstream statsd now does sanitization of names which can be -# ## enabled by using the "upstream" method option. This option will a) replace -# ## white space with '_', replace '/' with '-', and remove characters not -# ## matching 'a-zA-Z_\-0-9\.;='. -# #sanitize_name_method = "" -# -# ## Replace dots (.) with underscore (_) and dashes (-) with -# ## double underscore (__) in metric names. -# # convert_names = false - - -# # Suricata stats and alerts plugin -# [[inputs.suricata]] -# ## Source -# ## Data sink for Suricata stats log. This is expected to be a filename of a -# ## unix socket to be created for listening. -# # source = "/var/run/suricata-stats.sock" -# -# ## Delimiter -# ## Used for flattening field keys, e.g. subitem "alert" of "detect" becomes -# ## "detect_alert" when delimiter is "_". -# # delimiter = "_" -# -# ## Metric version -# ## Version 1 only collects stats and optionally will look for alerts if -# ## the configuration setting alerts is set to true. -# ## Version 2 parses any event type message by default and produced metrics -# ## under a single metric name using a tag to differentiate between event -# ## types. The timestamp for the message is applied to the generated metric. -# ## Additional tags and fields are included as well. -# # version = "1" -# -# ## Alerts -# ## In metric version 1, only status is captured by default, alerts must be -# ## turned on with this configuration option. This option does not apply for -# ## metric version 2. -# # alerts = false - - -# [[inputs.syslog]] -# ## Protocol, address and port to host the syslog receiver. -# ## If no host is specified, then localhost is used. -# ## If no port is specified, 6514 is used (RFC5425#section-4.1). -# ## ex: server = "tcp://localhost:6514" -# ## server = "udp://:6514" -# ## server = "unix:///var/run/telegraf-syslog.sock" -# ## When using tcp, consider using 'tcp4' or 'tcp6' to force the usage of IPv4 -# ## or IPV6 respectively. There are cases, where when not specified, a system -# ## may force an IPv4 mapped IPv6 address. -# server = "tcp://127.0.0.1:6514" -# -# ## Permission for unix sockets (only available on unix sockets) -# ## This setting may not be respected by some platforms. To safely restrict -# ## permissions it is recommended to place the socket into a previously -# ## created directory with the desired permissions. -# ## ex: socket_mode = "777" -# # socket_mode = "" -# -# ## Maximum number of concurrent connections (only available on stream sockets like TCP) -# ## Zero means unlimited. -# # max_connections = 0 -# -# ## Read timeout (only available on stream sockets like TCP) -# ## Zero means unlimited. -# # read_timeout = "0s" -# -# ## Optional TLS configuration (only available on stream sockets like TCP) -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Enables client authentication if set. -# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] -# -# ## Maximum socket buffer size (in bytes when no unit specified) -# ## For stream sockets, once the buffer fills up, the sender will start -# ## backing up. For datagram sockets, once the buffer fills up, metrics will -# ## start dropping. Defaults to the OS default. -# # read_buffer_size = "64KiB" -# -# ## Period between keep alive probes (only applies to TCP sockets) -# ## Zero disables keep alive probes. Defaults to the OS configuration. -# # keep_alive_period = "5m" -# -# ## Content encoding for message payloads -# ## Can be set to "gzip" for compressed payloads or "identity" for no encoding. -# # content_encoding = "identity" -# -# ## Maximum size of decoded packet (in bytes when no unit specified) -# # max_decompression_size = "500MB" -# -# ## Framing technique used for messages transport -# ## Available settings are: -# ## octet-counting -- see RFC5425#section-4.3.1 and RFC6587#section-3.4.1 -# ## non-transparent -- see RFC6587#section-3.4.2 -# # framing = "octet-counting" -# -# ## The trailer to be expected in case of non-transparent framing (default = "LF"). -# ## Must be one of "LF", or "NUL". -# # trailer = "LF" -# -# ## Whether to parse in best effort mode or not (default = false). -# ## By default best effort parsing is off. -# # best_effort = false -# -# ## The RFC standard to use for message parsing -# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) -# ## Must be one of "RFC5424", or "RFC3164". -# # syslog_standard = "RFC5424" -# -# ## Character to prepend to SD-PARAMs (default = "_"). -# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. -# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] -# ## For each combination a field is created. -# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. -# # sdparam_separator = "_" - - -# # Gather information about systemd-unit states -# # This plugin ONLY supports Linux -# [[inputs.systemd_units]] -# ## Pattern of units to collect -# ## A space-separated list of unit-patterns including wildcards determining -# ## the units to collect. -# ## ex: pattern = "telegraf* influxdb* user@*" -# # pattern = "*" -# -# ## Filter for a specific unit type -# ## Available settings are: service, socket, target, device, mount, -# ## automount, swap, timer, path, slice and scope -# # unittype = "service" -# -# ## Collect detailed information for the units -# # details = false -# -# ## Timeout for state-collection -# # timeout = "5s" - - -# # Parse the new lines appended to a file -# [[inputs.tail]] -# ## File names or a pattern to tail. -# ## These accept standard unix glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## "/var/log/**.log" -> recursively find all .log files in /var/log -# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log -# ## "/var/log/apache.log" -> just tail the apache log file -# ## "/var/log/log[!1-2]* -> tail files without 1-2 -# ## "/var/log/log[^1-2]* -> identical behavior as above -# ## See https://github.com/gobwas/glob for more examples -# ## -# files = ["/var/mymetrics.out"] -# -# ## Read file from beginning. -# # from_beginning = false -# -# ## Whether file is a named pipe -# # pipe = false -# -# ## Method used to watch for file updates. Can be either "inotify" or "poll". -# # watch_method = "inotify" -# -# ## Maximum lines of the file to process that have not yet be written by the -# ## output. For best throughput set based on the number of metrics on each -# ## line and the size of the output's metric_batch_size. -# # max_undelivered_lines = 1000 -# -# ## Character encoding to use when interpreting the file contents. Invalid -# ## characters are replaced using the unicode replacement character. When set -# ## to the empty string the data is not decoded to text. -# ## ex: character_encoding = "utf-8" -# ## character_encoding = "utf-16le" -# ## character_encoding = "utf-16be" -# ## character_encoding = "" -# # character_encoding = "" -# -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" -# -# ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. -# # path_tag = "path" -# -# ## Filters to apply to files before generating metrics -# ## "ansi_color" removes ANSI colors -# # filters = [] -# -# ## multiline parser/codec -# ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html -# #[inputs.tail.multiline] -# ## The pattern should be a regexp which matches what you believe to be an indicator that the field is part of an event consisting of multiple lines of log data. -# #pattern = "^\s" -# -# ## The field's value must be previous or next and indicates the relation to the -# ## multi-line event. -# #match_which_line = "previous" -# -# ## The invert_match can be true or false (defaults to false). -# ## If true, a message not matching the pattern will constitute a match of the multiline filter and the what will be applied. (vice-versa is also true) -# #invert_match = false -# -# ## The handling method for quoted text (defaults to 'ignore'). -# ## The following methods are available: -# ## ignore -- do not consider quotation (default) -# ## single-quotes -- consider text quoted by single quotes (') -# ## double-quotes -- consider text quoted by double quotes (") -# ## backticks -- consider text quoted by backticks (`) -# ## When handling quotes, escaped quotes (e.g. \") are handled correctly. -# #quotation = "ignore" -# -# ## The preserve_newline option can be true or false (defaults to false). -# ## If true, the newline character is preserved for multiline elements, -# ## this is useful to preserve message-structure e.g. for logging outputs. -# #preserve_newline = false -# -# #After the specified timeout, this plugin sends the multiline event even if no new pattern is found to start a new event. The default is 5s. -# #timeout = 5s - - -# # Read metrics from one or many vCenters -# [[inputs.vsphere]] -# ## List of vCenter URLs to be monitored. These three lines must be uncommented -# ## and edited for the plugin to work. -# vcenters = [ "https://vcenter.local/sdk" ] -# username = "user@corp.local" -# password = "secret" -# -# ## VMs -# ## Typical VM metrics (if omitted or empty, all metrics are collected) -# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) -# # vm_exclude = [] # Inventory paths to exclude -# vm_metric_include = [ -# "cpu.demand.average", -# "cpu.idle.summation", -# "cpu.latency.average", -# "cpu.readiness.average", -# "cpu.ready.summation", -# "cpu.run.summation", -# "cpu.usagemhz.average", -# "cpu.used.summation", -# "cpu.wait.summation", -# "mem.active.average", -# "mem.granted.average", -# "mem.latency.average", -# "mem.swapin.average", -# "mem.swapinRate.average", -# "mem.swapout.average", -# "mem.swapoutRate.average", -# "mem.usage.average", -# "mem.vmmemctl.average", -# "net.bytesRx.average", -# "net.bytesTx.average", -# "net.droppedRx.summation", -# "net.droppedTx.summation", -# "net.usage.average", -# "power.power.average", -# "virtualDisk.numberReadAveraged.average", -# "virtualDisk.numberWriteAveraged.average", -# "virtualDisk.read.average", -# "virtualDisk.readOIO.latest", -# "virtualDisk.throughput.usage.average", -# "virtualDisk.totalReadLatency.average", -# "virtualDisk.totalWriteLatency.average", -# "virtualDisk.write.average", -# "virtualDisk.writeOIO.latest", -# "sys.uptime.latest", -# ] -# # vm_metric_exclude = [] ## Nothing is excluded by default -# # vm_instances = true ## true by default -# -# ## Hosts -# ## Typical host metrics (if omitted or empty, all metrics are collected) -# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) -# # host_exclude [] # Inventory paths to exclude -# host_metric_include = [ -# "cpu.coreUtilization.average", -# "cpu.costop.summation", -# "cpu.demand.average", -# "cpu.idle.summation", -# "cpu.latency.average", -# "cpu.readiness.average", -# "cpu.ready.summation", -# "cpu.swapwait.summation", -# "cpu.usage.average", -# "cpu.usagemhz.average", -# "cpu.used.summation", -# "cpu.utilization.average", -# "cpu.wait.summation", -# "disk.deviceReadLatency.average", -# "disk.deviceWriteLatency.average", -# "disk.kernelReadLatency.average", -# "disk.kernelWriteLatency.average", -# "disk.numberReadAveraged.average", -# "disk.numberWriteAveraged.average", -# "disk.read.average", -# "disk.totalReadLatency.average", -# "disk.totalWriteLatency.average", -# "disk.write.average", -# "mem.active.average", -# "mem.latency.average", -# "mem.state.latest", -# "mem.swapin.average", -# "mem.swapinRate.average", -# "mem.swapout.average", -# "mem.swapoutRate.average", -# "mem.totalCapacity.average", -# "mem.usage.average", -# "mem.vmmemctl.average", -# "net.bytesRx.average", -# "net.bytesTx.average", -# "net.droppedRx.summation", -# "net.droppedTx.summation", -# "net.errorsRx.summation", -# "net.errorsTx.summation", -# "net.usage.average", -# "power.power.average", -# "storageAdapter.numberReadAveraged.average", -# "storageAdapter.numberWriteAveraged.average", -# "storageAdapter.read.average", -# "storageAdapter.write.average", -# "sys.uptime.latest", -# ] -# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" -# # ip_addresses = ["ipv6", "ipv4" ] -# -# # host_metric_exclude = [] ## Nothing excluded by default -# # host_instances = true ## true by default -# -# -# ## Clusters -# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) -# # cluster_exclude = [] # Inventory paths to exclude -# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected -# # cluster_metric_exclude = [] ## Nothing excluded by default -# # cluster_instances = false ## false by default -# -# ## Resource Pools -# # resource_pool_include = [ "/*/host/**"] # Inventory path to resource pools to collect (by default all are collected) -# # resource_pool_exclude = [] # Inventory paths to exclude -# # resource_pool_metric_include = [] ## if omitted or empty, all metrics are collected -# # resource_pool_metric_exclude = [] ## Nothing excluded by default -# # resource_pool_instances = false ## false by default -# -# ## Datastores -# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) -# # datastore_exclude = [] # Inventory paths to exclude -# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected -# # datastore_metric_exclude = [] ## Nothing excluded by default -# # datastore_instances = false ## false by default -# -# ## Datacenters -# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) -# # datacenter_exclude = [] # Inventory paths to exclude -# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected -# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. -# # datacenter_instances = false ## false by default -# -# ## VSAN -# # vsan_metric_include = [] ## if omitted or empty, all metrics are collected -# # vsan_metric_exclude = [ "*" ] ## vSAN are not collected by default. -# ## Whether to skip verifying vSAN metrics against the ones from GetSupportedEntityTypes API. -# # vsan_metric_skip_verify = false ## false by default. -# -# ## Interval for sampling vSAN performance metrics, can be reduced down to -# ## 30 seconds for vSAN 8 U1. -# # vsan_interval = "5m" -# -# ## Plugin Settings -# ## separator character to use for measurement and field names (default: "_") -# # separator = "_" -# -# ## number of objects to retrieve per query for realtime resources (vms and hosts) -# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) -# # max_query_objects = 256 -# -# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) -# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) -# # max_query_metrics = 256 -# -# ## number of go routines to use for collection and discovery of objects and metrics -# # collect_concurrency = 1 -# # discover_concurrency = 1 -# -# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) -# # object_discovery_interval = "300s" -# -# ## timeout applies to any of the api request made to vcenter -# # timeout = "60s" -# -# ## When set to true, all samples are sent as integers. This makes the output -# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all -# ## samples from vCenter, with the exception of percentages, are integer -# ## values, but under some conditions, some averaging takes place internally in -# ## the plugin. Setting this flag to "false" will send values as floats to -# ## preserve the full precision when averaging takes place. -# # use_int_samples = true -# -# ## Custom attributes from vCenter can be very useful for queries in order to slice the -# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled -# ## by default, since they can add a considerable amount of tags to the resulting metrics. To -# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include -# ## to select the attributes you want to include. -# ## By default, since they can add a considerable amount of tags to the resulting metrics. To -# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include -# ## to select the attributes you want to include. -# # custom_attribute_include = [] -# # custom_attribute_exclude = ["*"] -# -# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In -# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported -# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing -# ## it too much may cause performance issues. -# # metric_lookback = 3 -# -# ## Optional SSL Config -# # ssl_ca = "/path/to/cafile" -# # ssl_cert = "/path/to/certfile" -# # ssl_key = "/path/to/keyfile" -# ## Use SSL but skip chain & host verification -# # insecure_skip_verify = false -# -# ## The Historical Interval value must match EXACTLY the interval in the daily -# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals -# # historical_interval = "5m" -# -# ## Specifies plugin behavior regarding disconnected servers -# ## Available choices : -# ## - error: telegraf will return an error on startup if one the servers is unreachable -# ## - ignore: telegraf will ignore unreachable servers on both startup and gather -# # disconnected_servers_behavior = "error" -# -# ## HTTP Proxy support -# # use_system_proxy = true -# # http_proxy_url = "" - - -# # A Webhooks Event collector -# [[inputs.webhooks]] -# ## Address and port to host Webhook listener on -# service_address = ":1619" -# -# ## Maximum duration before timing out read of the request -# # read_timeout = "10s" -# ## Maximum duration before timing out write of the response -# # write_timeout = "10s" -# -# [inputs.webhooks.filestack] -# path = "/filestack" -# -# ## HTTP basic auth -# #username = "" -# #password = "" -# -# [inputs.webhooks.github] -# path = "/github" -# # secret = "" -# -# ## HTTP basic auth -# #username = "" -# #password = "" -# -# [inputs.webhooks.mandrill] -# path = "/mandrill" -# -# ## HTTP basic auth -# #username = "" -# #password = "" -# -# [inputs.webhooks.rollbar] -# path = "/rollbar" -# -# ## HTTP basic auth -# #username = "" -# #password = "" -# -# [inputs.webhooks.papertrail] -# path = "/papertrail" -# -# ## HTTP basic auth -# #username = "" -# #password = "" -# -# [inputs.webhooks.particle] -# path = "/particle" -# -# ## HTTP basic auth -# #username = "" -# #password = "" -# -# [inputs.webhooks.artifactory] -# path = "/artifactory" - - -# # Input plugin to collect Windows Event Log messages -# # This plugin ONLY supports Windows -# [[inputs.win_eventlog]] -# ## Telegraf should have Administrator permissions to subscribe for some -# ## Windows Events channels (e.g. System log) -# -# ## LCID (Locale ID) for event rendering -# ## 1033 to force English language -# ## 0 to use default Windows locale -# # locale = 0 -# -# ## Name of eventlog, used only if xpath_query is empty -# ## Example: "Application" -# # eventlog_name = "" -# -# ## xpath_query can be in defined short form like "Event/System[EventID=999]" -# ## or you can form a XML Query. Refer to the Consuming Events article: -# ## https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events -# ## XML query is the recommended form, because it is most flexible -# ## You can create or debug XML Query by creating Custom View in Windows Event Viewer -# ## and then copying resulting XML here -# xpath_query = ''' -# -# -# -# *[System[( (EventID >= 5152 and EventID <= 5158) or EventID=5379 or EventID=4672)]] -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# ''' -# -# ## When true, event logs are read from the beginning; otherwise only future -# ## events will be logged. -# # from_beginning = false -# -# # Process UserData XML to fields, if this node exists in Event XML -# # process_userdata = true -# -# # Process EventData XML to fields, if this node exists in Event XML -# # process_eventdata = true -# -# ## Separator character to use for unrolled XML Data field names -# # separator = "_" -# -# ## Get only first line of Message field. For most events first line is -# ## usually more than enough -# # only_first_line_of_message = true -# -# ## Parse timestamp from TimeCreated.SystemTime event field. -# ## Will default to current time of telegraf processing on parsing error or if -# ## set to false -# # timestamp_from_event = true -# -# ## System field names: -# ## "Source", "EventID", "Version", "Level", "Task", "Opcode", "Keywords", -# ## "TimeCreated", "EventRecordID", "ActivityID", "RelatedActivityID", -# ## "ProcessID", "ThreadID", "ProcessName", "Channel", "Computer", "UserID", -# ## "UserName", "Message", "LevelText", "TaskText", "OpcodeText" -# ## -# ## In addition to System, Data fields can be unrolled from additional XML -# ## nodes in event. Human-readable representation of those nodes is formatted -# ## into event Message field, but XML is more machine-parsable -# -# ## Event fields to include as tags -# ## The values below are included by default. -# ## Globbing supported (e.g. "Level*" matches both "Level" and "LevelText") -# # event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] -# -# ## Event fields to include -# ## All fields are sent by default. -# ## Globbing supported (e.g. "Level*" matches both "Level" and "LevelText") -# # event_fields = ["*"] -# -# ## Event fields to exclude -# ## Note that if you exclude all fields then no metrics are produced. A valid -# ## metric includes at least one field. -# ## Globbing supported (e.g. "Level*" matches both "Level" and "LevelText") -# # exclude_fields = [] -# -# ## Event fields to exclude if their value is empty or equals to zero -# ## The values below are included by default. -# ## Globbing supported (e.g. "Level*" matches both "Level" and "LevelText") -# # exclude_empty = ["Task", "Opcode", "*ActivityID", "UserID"] - - -# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. -# [[inputs.zipkin]] -# ## URL path for span data -# # path = "/api/v1/spans" -# -# ## Port on which Telegraf listens -# # port = 9411 -# -# ## Maximum duration before timing out read of the request -# # read_timeout = "10s" -# ## Maximum duration before timing out write of the response -# # write_timeout = "10s" +interval = "1s" +round_interval = true +metric_batch_size = 1000 +metric_buffer_limit = 10000 +collection_jitter = "0s" +flush_interval = "1s" +flush_jitter = "0s" +precision = "0s" +hostname = "" +omit_hostname = false + +[outputs] +[[outputs.influxdb_v2]] +urls = [ "https://127.0.0.1",] +token = "e5gn-7I9LLZ6ZZxcytI_P2FCTStePJ5dYeG9YutRdgsgkezsaZBfyMIMB_FkoLOxzl_s9puPiapPQRzRj9BCfg==" +organization = "Zuhause" +bucket = "Woche" +insecure_skip_verify = true + +[inputs] +[[inputs.mqtt_consumer]] +servers = [ "ssl://mqtt-broker.fritz.box:8883",] +topics = [ "multitemp", "Feinstaub", "wechselrichter/wechselrichter", "kellerluefter", "watermeter", "temp/#", "openWB/#", "blumentopf/#", "server_monitor",] +qos = 0 +connection_timeout = "30s" +client_id = "telegraf" +username = "telegraf" +password = "tnTXzNZzCqjQgVgK" +tls_ca = "/etc/ssl/certs/mqtt-broker.fritz.box.crt" +insecure_skip_verify = false +data_format = "json" diff --git a/templates/index.html b/templates/index.html index 49dbe65..821b4e4 100644 --- a/templates/index.html +++ b/templates/index.html @@ -11,19 +11,19 @@

MQTT Topics

- +
- +
- + \ No newline at end of file diff --git a/templates/login.html b/templates/login.html index 8f0570e..e14c5e1 100644 --- a/templates/login.html +++ b/templates/login.html @@ -25,7 +25,7 @@

Login

- + \ No newline at end of file