Skip to content

Commit

Permalink
feat: added mysqld exporter on matomo
Browse files Browse the repository at this point in the history
  • Loading branch information
root committed Dec 19, 2023
1 parent f8ca84b commit 7641c70
Showing 1 changed file with 120 additions and 0 deletions.
120 changes: 120 additions & 0 deletions confs/matomo/default/prometheus-mysqld-exporter
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
# By default the connection string will be read from
# $HOME/my.cnf or from the file specified with the -config.my-cnf parameter.

# To set a connection string from the environment instead, uncomment one of the
# following lines.

# Using a TCP connection and password authentication:
# DATA_SOURCE_NAME="login:password@(hostname:port)/dbname"

# Using UNIX domain sockets and authentication:
DATA_SOURCE_NAME="prometheus:nopassword@unix(/run/mysqld/mysqld.sock)/"

# Note the user must be granted enough privileges for the exporter to run.
# Example to create a user to connect with the UNIX socket:
#
# CREATE USER IF NOT EXISTS 'prometheus'@'localhost' IDENTIFIED VIA unix_socket;
# GRANT PROCESS, REPLICATION CLIENT, SELECT ON *.* TO 'prometheus'@'localhost';

# Set the command-line arguments to pass to the exporter.
# ARGS='-config.my-cnf /etc/mysql/debian.cnf'

# Usage of prometheus-mysqld-exporter:
# -collect.auto_increment.columns
# Collect auto_increment columns and max values from information_schema
# --exporter.lock_wait_timeout=2
# Set a lock_wait_timeout on the connection to avoid long metadata
# locking.
# --exporter.log_slow_filter
# Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not
# supported by Oracle MySQL.
# --collect.heartbeat.database="heartbeat"
# Database from where to collect heartbeat data
# --collect.heartbeat.table="heartbeat"
# Table from where to collect heartbeat data
# --collect.info_schema.processlist.min_time=0
# Minimum time a thread must be in each state to be counted
# --collect.info_schema.tables.databases="*"
# The list of databases to collect table stats for, or '*' for all
# --collect.perf_schema.eventsstatements.limit=250
# Limit the number of events statements digests by response time
# --collect.perf_schema.eventsstatements.timelimit=86400
# Limit how old the 'last_seen' events statements can be, in seconds
# --collect.perf_schema.eventsstatements.digest_text_limit=120
# Maximum length of the normalized statement text
# --collect.perf_schema.file_instances.filter=".*"
# RegEx file_name filter for performance_schema.file_summary_by_instance
# --collect.perf_schema.file_instances.remove_prefix="/var/lib/mysql/"
# Remove path prefix in performance_schema.file_summary_by_instance
# --web.listen-address=":9104"
# Address to listen on for web interface and telemetry.
# --web.telemetry-path="/metrics"
# Path under which to expose metrics.
# --config.my-cnf="$HOME/.my.cnf"
# Path to .my.cnf file to read MySQL credentials from.
# --collect.global_variables
# Collect from SHOW GLOBAL VARIABLES
# --collect.slave_status
# Collect from SHOW SLAVE STATUS
# --collect.info_schema.processlist
# Collect current thread state counts from the
# information_schema.processlist
# --collect.info_schema.tables
# Collect metrics from information_schema.tables
# --collect.info_schema.innodb_tablespaces
# Collect metrics from information_schema.innodb_sys_tablespaces
# --collect.info_schema.innodb_metrics
# Collect metrics from information_schema.innodb_metrics
# --collect.auto_increment.columns
# Collect auto_increment columns and max values from information_schema
# --collect.global_status
# Collect from SHOW GLOBAL STATUS
# --collect.perf_schema.tableiowaits
# Collect metrics from performance_schema.table_io_waits_summary_by_table
# --collect.perf_schema.indexiowaits
# Collect metrics from
# performance_schema.table_io_waits_summary_by_index_usage
# --collect.perf_schema.tablelocks
# Collect metrics from
# performance_schema.table_lock_waits_summary_by_table
# --collect.perf_schema.eventsstatements
# Collect metrics from
# performance_schema.events_statements_summary_by_digest
# --collect.perf_schema.eventswaits
# Collect metrics from
# performance_schema.events_waits_summary_global_by_event_name
# --collect.perf_schema.file_events
# Collect metrics from performance_schema.file_summary_by_event_name
# --collect.perf_schema.file_instances
# Collect metrics from performance_schema.file_summary_by_instance
# --collect.binlog_size
# Collect the current size of all registered binlog files
# --collect.info_schema.userstats
# If running with userstat=1, set to true to collect user statistics
# --collect.info_schema.clientstats
# If running with userstat=1, set to true to collect client statistics
# --collect.info_schema.tablestats
# If running with userstat=1, set to true to collect table statistics
# --collect.info_schema.innodb_cmp
# Collect metrics from information_schema.innodb_cmp
# --collect.info_schema.innodb_cmpmem
# Collect metrics from information_schema.innodb_cmpmem
# --collect.info_schema.query_response_time
# Collect query response time distribution if query_response_time_stats
# is ON.
# --collect.engine_tokudb_status
# Collect from SHOW ENGINE TOKUDB STATUS
# --collect.perf_schema.replication_group_member_stats
# Collect metrics from performance_schema.replication_group_member_stats
# --collect.heartbeat
# Collect from heartbeat
# --collect.slave_hosts
# Scrape information from 'SHOW SLAVE HOSTS'
# --collect.engine_innodb_status
# Collect from SHOW ENGINE INNODB STATUS
# --log.level="info"
# Only log messages with the given severity or above. Valid levels:
# [debug, info, warn, error, fatal]
# --log.format="logger:stderr"
# Set the log target and format. Example:
# "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"

0 comments on commit 7641c70

Please sign in to comment.