Prometheus¶
There are various customizations you can do to tailor the deployment of OpenStack Prometheus. You can find those below.
General Parameters¶
conf.httpd
Type: string
Description:
ServerRoot \"/usr/local/apache2\" Listen 80 LoadModule mpm_event_module modules/mod_mpm_event.so LoadModule authn_file_module modules/mod_authn_file.so LoadModule authn_core_module modules/mod_authn_core.so LoadModule authz_host_module modules/mod_authz_host.so LoadModule authz_groupfile_module modules/mod_authz_groupfile.so LoadModule authz_user_module modules/mod_authz_user.so LoadModule authz_core_module modules/mod_authz_core.so LoadModule access_compat_module modules/mod_access_compat.so LoadModule auth_basic_module modules/mod_auth_basic.so LoadModule ldap_module modules/mod_ldap.so LoadModule authnz_ldap_module modules/mod_authnz_ldap.so LoadModule reqtimeout_module modules/mod_reqtimeout.so LoadModule filter_module modules/mod_filter.so LoadModule proxy_html_module modules/mod_proxy_html.so LoadModule log_config_module modules/mod_log_config.so LoadModule env_module modules/mod_env.so LoadModule headers_module modules/mod_headers.so LoadModule setenvif_module modules/mod_setenvif.so LoadModule version_module modules/mod_version.so LoadModule proxy_module modules/mod_proxy.so LoadModule proxy_connect_module modules/mod_proxy_connect.so LoadModule proxy_http_module modules/mod_proxy_http.so LoadModule proxy_balancer_module modules/mod_proxy_balancer.so LoadModule slotmem_shm_module modules/mod_slotmem_shm.so LoadModule slotmem_plain_module modules/mod_slotmem_plain.so LoadModule unixd_module modules/mod_unixd.so LoadModule status_module modules/mod_status.so LoadModule autoindex_module modules/mod_autoindex.so <IfModule unixd_module> User daemon Group daemon </IfModule> <Directory /> AllowOverride none Require all denied </Directory> <Files \".ht*\"> Require all denied </Files> ErrorLog /dev/stderr LogLevel warn <IfModule log_config_module> LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" combined LogFormat \"%{X-Forwarded-For}i %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\" proxy LogFormat \"%h %l %u %t \\\"%r\\\" %>s %b\" common <IfModule logio_module> LogFormat \"%a %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\" %I %O\" combinedio </IfModule> SetEnvIf X-Forwarded-For \"^.*\\..*\\..*\\..*\" forwarded CustomLog /dev/stdout common CustomLog /dev/stdout combined CustomLog /dev/stdout proxy env=forwarded </IfModule> <Directory \"/usr/local/apache2/cgi-bin\"> AllowOverride None Options None Require all granted </Directory> <IfModule headers_module> RequestHeader unset Proxy early </IfModule> <IfModule proxy_html_module> Include conf/extra/proxy-html.conf </IfModule> <VirtualHost *:80> # Expose metrics to all users, as this is not sensitive information and # circumvents the inability of Prometheus to interpolate environment vars # in its configuration file <Location /metrics> ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/metrics ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/metrics Satisfy Any Allow from all </Location> # Expose the /federate endpoint to all users, as this is also not # sensitive information and circumvents the inability of Prometheus to # interpolate environment vars in its configuration file <Location /federate> ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/metrics ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/metrics Satisfy Any Allow from all </Location> # Restrict general user (LDAP) access to the /graph endpoint, as general trusted # users should only be able to query Prometheus for metrics and not have access # to information like targets, configuration, flags or build info for Prometheus <Location /> ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/ ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/ AuthName \"Prometheus\" AuthType Basic AuthBasicProvider file ldap AuthUserFile /usr/local/apache2/conf/.htpasswd AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }} AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }} AuthLDAPURL {{ tuple \"ldap\" \"default\" \"ldap\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | quote }} Require valid-user </Location> <Location /graph> ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/graph ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/graph AuthName \"Prometheus\" AuthType Basic AuthBasicProvider file ldap AuthUserFile /usr/local/apache2/conf/.htpasswd AuthLDAPBindDN {{ .Values.endpoints.ldap.auth.admin.bind }} AuthLDAPBindPassword {{ .Values.endpoints.ldap.auth.admin.password }} AuthLDAPURL {{ tuple \"ldap\" \"default\" \"ldap\" . | include \"helm-toolkit.endpoints.keystone_endpoint_uri_lookup\" | quote }} Require valid-user </Location> # Restrict access to the /config (dashboard) and /api/v1/status/config (http) endpoints # to the admin user <Location /config> ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/config ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/config AuthName \"Prometheus\" AuthType Basic AuthBasicProvider file AuthUserFile /usr/local/apache2/conf/.htpasswd Require valid-user </Location> <Location /api/v1/status/config> ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/status/config ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/status/config AuthName \"Prometheus\" AuthType Basic AuthBasicProvider file AuthUserFile /usr/local/apache2/conf/.htpasswd Require valid-user </Location> # Restrict access to the /flags (dashboard) and /api/v1/status/flags (http) endpoints # to the admin user <Location /flags> ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/flags ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/flags AuthName \"Prometheus\" AuthType Basic AuthBasicProvider file AuthUserFile /usr/local/apache2/conf/.htpasswd Require valid-user </Location> <Location /api/v1/status/flags> ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/status/flags ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/status/flags AuthName \"Prometheus\" AuthType Basic AuthBasicProvider file AuthUserFile /usr/local/apache2/conf/.htpasswd Require valid-user </Location> # Restrict access to the /status (dashboard) endpoint to the admin user <Location /status> ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/status ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/status AuthName \"Prometheus\" AuthType Basic AuthBasicProvider file AuthUserFile /usr/local/apache2/conf/.htpasswd Require valid-user </Location> # Restrict access to the /rules (dashboard) endpoint to the admin user <Location /rules> ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/rules ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/rules AuthName \"Prometheus\" AuthType Basic AuthBasicProvider file AuthUserFile /usr/local/apache2/conf/.htpasswd Require valid-user </Location> # Restrict access to the /targets (dashboard) and /api/v1/targets (http) endpoints # to the admin user <Location /targets> ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/targets ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/targets AuthName \"Prometheus\" AuthType Basic AuthBasicProvider file AuthUserFile /usr/local/apache2/conf/.htpasswd Require valid-user </Location> <Location /api/v1/targets> ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/targets ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/targets AuthName \"Prometheus\" AuthType Basic AuthBasicProvider file AuthUserFile /usr/local/apache2/conf/.htpasswd Require valid-user </Location> # Restrict access to the /api/v1/admin/tsdb/ endpoints (http) to the admin user. # These endpoints are disabled by default, but are included here to ensure only # an admin user has access to these endpoints when enabled <Location /api/v1/admin/tsdb/> ProxyPass http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/admin/tsdb/ ProxyPassReverse http://localhost:{{ tuple \"monitoring\" \"internal\" \"api\" . | include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}/api/v1/admin/tsdb/ AuthName \"Prometheus\" AuthType Basic AuthBasicProvider file AuthUserFile /usr/local/apache2/conf/.htpasswd Require valid-user </Location> </VirtualHost>conf.prometheus.command_line_flags.”log.level”
Type: string
Description:
“info”
conf.prometheus.command_line_flags.”query.max_concurrency”
Type: int
Description:
20
conf.prometheus.command_line_flags.”query.timeout”
Type: string
Description:
“2m”
conf.prometheus.command_line_flags.”storage.tsdb.path”
Type: string
Description:
“/var/lib/prometheus/data”
conf.prometheus.command_line_flags.”storage.tsdb.retention.time”
Type: string
Description:
“7d”
conf.prometheus.command_line_flags.”web.enable_admin_api”
Type: bool
Description:
false
conf.prometheus.command_line_flags.”web.enable_lifecycle”
Type: bool
Description:
false
conf.prometheus.rules
Type: list
Description:
[]
conf.prometheus.scrape_configs.template
Type: string
Description:
{{- $promHost := tuple \"monitoring\" \"public\" . | include \"helm-toolkit.endpoints.hostname_fqdn_endpoint_lookup\" }} {{- if not (empty .Values.conf.prometheus.rules)}} rule_files: {{- $rulesKeys := keys .Values.conf.prometheus.rules -}} {{- range $rule := $rulesKeys }} {{ printf \"- /etc/config/rules/%s.rules\" $rule }} {{- end }} {{- end }} global: scrape_interval: 60s evaluation_interval: 60s external_labels: prometheus_host: {{$promHost}} scrape_configs: - job_name: kubelet scheme: https # This TLS & bearer token file config is used to connect to the actual scrape # endpoints for cluster components. This is separate to discovery auth # configuration because discovery & scraping are two separate concerns in # Prometheus. The discovery auth config is automatic if Prometheus runs inside # the cluster. Otherwise, more config options have to be provided within the # <kubernetes_sd_config>. tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - role: node scrape_interval: 45s relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - target_label: __address__ replacement: kubernetes.default.svc:443 - source_labels: - __meta_kubernetes_node_name regex: (.+) target_label: __metrics_path__ replacement: /api/v1/nodes/${1}/proxy/metrics - source_labels: - __meta_kubernetes_node_name action: replace target_label: kubernetes_io_hostname # Scrape config for Kubelet cAdvisor. # # This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics # (those whose names begin with 'container_') have been removed from the # Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to # retrieve those metrics. # # In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor # HTTP endpoint; use \"replacement: /api/v1/nodes/${1}:4194/proxy/metrics\" # in that case (and ensure cAdvisor's HTTP server hasn't been disabled with # the --cadvisor-port=0 Kubelet flag). # # This job is not necessary and should be removed in Kubernetes 1.6 and # earlier versions, or it will cause the metrics to be scraped twice. - job_name: 'kubernetes-cadvisor' # Default to scraping over https. If required, just disable this or change to # `http`. scheme: https # This TLS & bearer token file config is used to connect to the actual scrape # endpoints for cluster components. This is separate to discovery auth # configuration because discovery & scraping are two separate concerns in # Prometheus. The discovery auth config is automatic if Prometheus runs inside # the cluster. Otherwise, more config options have to be provided within the # <kubernetes_sd_config>. tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token kubernetes_sd_configs: - role: node relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) - target_label: __address__ replacement: kubernetes.default.svc:443 - source_labels: - __meta_kubernetes_node_name regex: (.+) target_label: __metrics_path__ replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor metric_relabel_configs: - source_labels: - __name__ regex: 'container_network_tcp_usage_total' action: drop - source_labels: - __name__ regex: 'container_tasks_state' action: drop - source_labels: - __name__ regex: 'container_network_udp_usage_total' action: drop - source_labels: - __name__ regex: 'container_memory_failures_total' action: drop - source_labels: - __name__ regex: 'container_cpu_load_average_10s' action: drop - source_labels: - __name__ regex: 'container_cpu_system_seconds_total' action: drop - source_labels: - __name__ regex: 'container_cpu_user_seconds_total' action: drop - source_labels: - __name__ regex: 'container_fs_inodes_free' action: drop - source_labels: - __name__ regex: 'container_fs_inodes_total' action: drop - source_labels: - __name__ regex: 'container_fs_io_current' action: drop - source_labels: - __name__ regex: 'container_fs_io_time_seconds_total' action: drop - source_labels: - __name__ regex: 'container_fs_io_time_weighted_seconds_total' action: drop - source_labels: - __name__ regex: 'container_fs_read_seconds_total' action: drop - source_labels: - __name__ regex: 'container_fs_reads_merged_total' action: drop - source_labels: - __name__ regex: 'container_fs_reads_merged_total' action: drop - source_labels: - __name__ regex: 'container_fs_reads_total' action: drop - source_labels: - __name__ regex: 'container_fs_sector_reads_total' action: drop - source_labels: - __name__ regex: 'container_fs_sector_writes_total' action: drop - source_labels: - __name__ regex: 'container_fs_write_seconds_total' action: drop - source_labels: - __name__ regex: 'container_fs_writes_bytes_total' action: drop - source_labels: - __name__ regex: 'container_fs_writes_merged_total' action: drop - source_labels: - __name__ regex: 'container_fs_writes_total' action: drop - source_labels: - __name__ regex: 'container_last_seen' action: drop - source_labels: - __name__ regex: 'container_memory_cache' action: drop - source_labels: - __name__ regex: 'container_memory_failcnt' action: drop - source_labels: - __name__ regex: 'container_memory_max_usage_bytes' action: drop - source_labels: - __name__ regex: 'container_memory_rss' action: drop - source_labels: - __name__ regex: 'container_memory_swap' action: drop - source_labels: - __name__ regex: 'container_memory_usage_bytes' action: drop - source_labels: - __name__ regex: 'container_network_receive_errors_total' action: drop - source_labels: - __name__ regex: 'container_network_receive_packets_dropped_total' action: drop - source_labels: - __name__ regex: 'container_network_receive_packets_total' action: drop - source_labels: - __name__ regex: 'container_network_transmit_errors_total' action: drop - source_labels: - __name__ regex: 'container_network_transmit_packets_dropped_total' action: drop - source_labels: - __name__ regex: 'container_network_transmit_packets_total' action: drop - source_labels: - __name__ regex: 'container_spec_cpu_period' action: drop - source_labels: - __name__ regex: 'container_spec_cpu_shares' action: drop - source_labels: - __name__ regex: 'container_spec_memory_limit_bytes' action: drop - source_labels: - __name__ regex: 'container_spec_memory_reservation_limit_bytes' action: drop - source_labels: - __name__ regex: 'container_spec_memory_swap_limit_bytes' action: drop - source_labels: - __name__ regex: 'container_start_time_seconds' action: drop # Scrape config for API servers. # # Kubernetes exposes API servers as endpoints to the default/kubernetes # service so this uses `endpoints` role and uses relabelling to only keep # the endpoints associated with the default/kubernetes service using the # default named port `https`. This works for single API server deployments as # well as HA API server deployments. - job_name: 'apiserver' kubernetes_sd_configs: - role: endpoints scrape_interval: 45s # Default to scraping over https. If required, just disable this or change to # `http`. scheme: https # This TLS & bearer token file config is used to connect to the actual scrape # endpoints for cluster components. This is separate to discovery auth # configuration because discovery & scraping are two separate concerns in # Prometheus. The discovery auth config is automatic if Prometheus runs inside # the cluster. Otherwise, more config options have to be provided within the # <kubernetes_sd_config>. tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt # If your node certificates are self-signed or use a different CA to the # master CA, then disable certificate verification below. Note that # certificate verification is an integral part of a secure infrastructure # so this should only be disabled in a controlled environment. You can # disable certificate verification by uncommenting the line below. # # insecure_skip_verify: true bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # Keep only the default/kubernetes service endpoints for the https port. This # will add targets for each API server which Kubernetes adds an endpoint to # the default/kubernetes service. relabel_configs: - source_labels: - __meta_kubernetes_namespace - __meta_kubernetes_service_name - __meta_kubernetes_endpoint_port_name action: keep regex: default;kubernetes;https metric_relabel_configs: - source_labels: - __name__ regex: 'apiserver_admission_controller_admission_latencies_seconds_bucket' action: drop - source_labels: - __name__ regex: 'rest_client_request_latency_seconds_bucket' action: drop - source_labels: - __name__ regex: 'apiserver_response_sizes_bucket' action: drop - source_labels: - __name__ regex: 'apiserver_admission_step_admission_latencies_seconds_bucket' action: drop - source_labels: - __name__ regex: 'apiserver_admission_controller_admission_latencies_seconds_count' action: drop - source_labels: - __name__ regex: 'apiserver_admission_controller_admission_latencies_seconds_sum' action: drop - source_labels: - __name__ regex: 'apiserver_request_latencies_summary' action: drop # Scrape config for service endpoints. # # The relabeling allows the actual service scrape endpoint to be configured # via the following annotations: # # * `prometheus.io/scrape`: Only scrape services that have a value of `true` # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need # to set this to `https` & most likely set the `tls_config` of the scrape config. # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. # * `prometheus.io/port`: If the metrics are exposed on a different port to the # service then set this appropriately. - job_name: 'openstack-exporter' kubernetes_sd_configs: - role: endpoints scrape_interval: 60s relabel_configs: - source_labels: - __meta_kubernetes_service_name action: keep regex: \"openstack-metrics\" - source_labels: - __meta_kubernetes_service_annotation_prometheus_io_scrape action: keep regex: true - source_labels: - __meta_kubernetes_service_annotation_prometheus_io_scheme action: replace target_label: __scheme__ regex: (https?) - source_labels: - __meta_kubernetes_service_annotation_prometheus_io_path action: replace target_label: __metrics_path__ regex: (.+) - source_labels: - __address__ - __meta_kubernetes_service_annotation_prometheus_io_port action: replace target_label: __address__ regex: ([^:]+)(?::\\d+)?;(\\d+) replacement: $1:$2 - action: labelmap regex: __meta_kubernetes_service_label_(.+) - source_labels: - __meta_kubernetes_namespace action: replace target_label: kubernetes_namespace - source_labels: - __meta_kubernetes_service_name action: replace target_label: instance - source_labels: - __meta_kubernetes_service_name action: replace target_label: kubernetes_name - source_labels: - __meta_kubernetes_service_name target_label: job replacement: ${1} - job_name: 'node-exporter' kubernetes_sd_configs: - role: endpoints scrape_interval: 60s relabel_configs: - source_labels: - __meta_kubernetes_service_name action: keep regex: 'node-exporter' - source_labels: - __meta_kubernetes_pod_node_name action: replace target_label: hostname - job_name: 'kubernetes-service-endpoints' kubernetes_sd_configs: - role: endpoints scrape_interval: 60s relabel_configs: - source_labels: - __meta_kubernetes_service_name action: drop regex: '(openstack-metrics|prom-metrics|ceph-mgr|node-exporter)' - source_labels: - __meta_kubernetes_service_annotation_prometheus_io_scrape action: keep regex: true - source_labels: - __meta_kubernetes_service_annotation_prometheus_io_scheme action: replace target_label: __scheme__ regex: (https?) - source_labels: - __meta_kubernetes_service_annotation_prometheus_io_path action: replace target_label: __metrics_path__ regex: (.+) - source_labels: - __address__ - __meta_kubernetes_service_annotation_prometheus_io_port action: replace target_label: __address__ regex: ([^:]+)(?::\\d+)?;(\\d+) replacement: $1:$2 - action: labelmap regex: __meta_kubernetes_service_label_(.+) - source_labels: - __meta_kubernetes_namespace action: replace target_label: kubernetes_namespace - source_labels: - __meta_kubernetes_service_name action: replace target_label: kubernetes_name - source_labels: - __meta_kubernetes_service_name target_label: job replacement: ${1} # Example scrape config for pods # # The relabeling allows the actual pod scrape endpoint to be configured via the # following annotations: # # * `prometheus.io/scrape`: Only scrape pods that have a value of `true` # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the # pod's declared ports (default is a port-free target if none are declared). - job_name: 'kubernetes-pods' kubernetes_sd_configs: - role: pod relabel_configs: - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] action: keep regex: true - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] action: replace target_label: __metrics_path__ regex: (.+) - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] action: replace regex: ([^:]+)(?::\\d+)?;(\\d+) replacement: $1:$2 target_label: __address__ - action: labelmap regex: __meta_kubernetes_pod_label_(.+) - source_labels: [__meta_kubernetes_namespace] action: replace target_label: kubernetes_namespace - source_labels: [__meta_kubernetes_pod_name] action: replace target_label: kubernetes_pod_name - job_name: calico-etcd kubernetes_sd_configs: - role: service scrape_interval: 20s relabel_configs: - action: labelmap regex: __meta_kubernetes_service_label_(.+) - action: keep source_labels: - __meta_kubernetes_service_name regex: \"calico-etcd\" - action: keep source_labels: - __meta_kubernetes_namespace regex: kube-system target_label: namespace - source_labels: - __meta_kubernetes_pod_name target_label: pod - source_labels: - __meta_kubernetes_service_name target_label: service - source_labels: - __meta_kubernetes_service_name target_label: job replacement: ${1} - source_labels: - __meta_kubernetes_service_label target_label: job regex: calico-etcd replacement: ${1} - target_label: endpoint replacement: \"calico-etcd\" - job_name: ceph-mgr kubernetes_sd_configs: - role: service scrape_interval: 20s relabel_configs: - action: labelmap regex: __meta_kubernetes_service_label_(.+) - action: keep source_labels: - __meta_kubernetes_service_name regex: \"ceph-mgr\" - source_labels: - __meta_kubernetes_service_port_name action: drop regex: 'ceph-mgr' - action: keep source_labels: - __meta_kubernetes_namespace regex: ceph target_label: namespace - source_labels: - __meta_kubernetes_pod_name target_label: pod - source_labels: - __meta_kubernetes_service_name target_label: service - source_labels: - __meta_kubernetes_service_name target_label: job replacement: ${1} - source_labels: - __meta_kubernetes_service_label target_label: job regex: ceph-mgr replacement: ${1} - target_label: endpoint replacement: \"ceph-mgr\" alerting: alertmanagers: - kubernetes_sd_configs: - role: pod tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token relabel_configs: - source_labels: [__meta_kubernetes_pod_label_application] regex: prometheus-alertmanager action: keep - source_labels: [__meta_kubernetes_pod_container_port_name] regex: alerts-api action: keep - source_labels: [__meta_kubernetes_pod_container_port_name] regex: peer-mesh action: dropdependencies.dynamic.common.local_image_registry.jobs[0]
Type: string
Description:
“prometheus-image-repo-sync”
dependencies.dynamic.common.local_image_registry.services[0].endpoint
Type: string
Description:
“node”
dependencies.dynamic.common.local_image_registry.services[0].service
Type: string
Description:
“local_image_registry”
dependencies.static.image_repo_sync.services[0].endpoint
Type: string
Description:
“internal”
dependencies.static.image_repo_sync.services[0].service
Type: string
Description:
“local_image_registry”
dependencies.static.prometheus.services
Type: string
Description:
nil
dependencies.static.tests.services[0].endpoint
Type: string
Description:
“internal”
dependencies.static.tests.services[0].service
Type: string
Description:
“monitoring”
endpoints.alertmanager.host_fqdn_override.default
Type: string
Description:
nil
endpoints.alertmanager.hosts.default
Type: string
Description:
“alerts-engine”
endpoints.alertmanager.hosts.discovery
Type: string
Description:
“prometheus-alertmanager-discovery”
endpoints.alertmanager.hosts.public
Type: string
Description:
“prometheus-alertmanager”
endpoints.alertmanager.name
Type: string
Description:
“prometheus-alertmanager”
endpoints.alertmanager.namespace
Type: string
Description:
nil
endpoints.alertmanager.path.default
Type: string
Description:
nil
endpoints.alertmanager.port.api.default
Type: int
Description:
9093
endpoints.alertmanager.port.api.public
Type: int
Description:
80
endpoints.alertmanager.port.mesh.default
Type: int
Description:
9094
endpoints.alertmanager.scheme.default
Type: string
Description:
“http”
endpoints.cluster_domain_suffix
Type: string
Description:
“cluster.local”
endpoints.ldap.auth.admin.bind
Type: string
Description:
“cn=admin,dc=cluster,dc=local”
endpoints.ldap.auth.admin.password
Type: string
Description:
“password”
endpoints.ldap.host_fqdn_override.default
Type: string
Description:
nil
endpoints.ldap.hosts.default
Type: string
Description:
“ldap”
endpoints.ldap.path.default
Type: string
Description:
“/ou=People,dc=cluster,dc=local”
endpoints.ldap.port.ldap.default
Type: int
Description:
389
endpoints.ldap.scheme.default
Type: string
Description:
“ldap”
endpoints.local_image_registry.host_fqdn_override.default
Type: string
Description:
nil
endpoints.local_image_registry.hosts.default
Type: string
Description:
“localhost”
endpoints.local_image_registry.hosts.internal
Type: string
Description:
“docker-registry”
endpoints.local_image_registry.hosts.node
Type: string
Description:
“localhost”
endpoints.local_image_registry.name
Type: string
Description:
“docker-registry”
endpoints.local_image_registry.namespace
Type: string
Description:
“docker-registry”
endpoints.local_image_registry.port.registry.node
Type: int
Description:
5000
endpoints.monitoring.auth.admin.password
Type: string
Description:
“changeme”
endpoints.monitoring.auth.admin.username
Type: string
Description:
“admin”
endpoints.monitoring.auth.federate.password
Type: string
Description:
“changeme”
endpoints.monitoring.auth.federate.username
Type: string
Description:
“federate”
endpoints.monitoring.host_fqdn_override.default
Type: string
Description:
nil
endpoints.monitoring.hosts.default
Type: string
Description:
“prom-metrics”
endpoints.monitoring.hosts.public
Type: string
Description:
“prometheus”
endpoints.monitoring.name
Type: string
Description:
“prometheus”
endpoints.monitoring.namespace
Type: string
Description:
nil
endpoints.monitoring.path.default
Type: string
Description:
nil
endpoints.monitoring.port.api.default
Type: int
Description:
9090
endpoints.monitoring.port.http.default
Type: int
Description:
80
endpoints.monitoring.scheme.default
Type: string
Description:
“http”
endpoints.oci_image_registry.auth.enabled
Type: bool
Description:
false
endpoints.oci_image_registry.auth.prometheus.password
Type: string
Description:
“password”
endpoints.oci_image_registry.auth.prometheus.username
Type: string
Description:
“prometheus”
endpoints.oci_image_registry.host_fqdn_override.default
Type: string
Description:
nil
endpoints.oci_image_registry.hosts.default
Type: string
Description:
“localhost”
endpoints.oci_image_registry.name
Type: string
Description:
“oci-image-registry”
endpoints.oci_image_registry.namespace
Type: string
Description:
“oci-image-registry”
endpoints.oci_image_registry.port.registry.default
Type: string
Description:
nil
images.local_registry.active
Type: bool
Description:
false
images.local_registry.exclude[0]
Type: string
Description:
“dep_check”
images.local_registry.exclude[1]
Type: string
Description:
“image_repo_sync”
images.pull_policy
Type: string
Description:
“IfNotPresent”
images.tags.apache_proxy
Type: string
Description:
“docker.io/library/httpd:2.4”
images.tags.dep_check
Type: string
Description:
“quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal”
images.tags.helm_tests
Type: string
Description:
“docker.io/openstackhelm/heat:wallaby-ubuntu_focal”
images.tags.image_repo_sync
Type: string
Description:
“docker.io/library/docker:17.07.0”
images.tags.prometheus
Type: string
Description:
“docker.io/prom/prometheus:v2.25.0”
labels.job.node_selector_key
Type: string
Description:
“openstack-control-plane”
labels.job.node_selector_value
Type: string
Description:
“enabled”
labels.prometheus.node_selector_key
Type: string
Description:
“openstack-control-plane”
labels.prometheus.node_selector_value
Type: string
Description:
“enabled”
labels.test.node_selector_key
Type: string
Description:
“openstack-control-plane”
labels.test.node_selector_value
Type: string
Description:
“enabled”
manifests.certificates
Type: bool
Description:
false
manifests.configmap_bin
Type: bool
Description:
true
manifests.configmap_etc
Type: bool
Description:
true
manifests.helm_tests
Type: bool
Description:
true
manifests.ingress
Type: bool
Description:
true
manifests.job_image_repo_sync
Type: bool
Description:
true
manifests.network_policy
Type: bool
Description:
true
manifests.secret_ingress_tls
Type: bool
Description:
true
manifests.secret_prometheus
Type: bool
Description:
true
manifests.secret_registry
Type: bool
Description:
true
manifests.service
Type: bool
Description:
true
manifests.service_ingress
Type: bool
Description:
true
manifests.statefulset_prometheus
Type: bool
Description:
true
monitoring.prometheus.enabled
Type: bool
Description:
true
monitoring.prometheus.prometheus.scrape
Type: bool
Description:
true
network.prometheus.ingress.annotations.”nginx.ingress.kubernetes.io/affinity”
Type: string
Description:
“cookie”
network.prometheus.ingress.annotations.”nginx.ingress.kubernetes.io/rewrite-target”
Type: string
Description:
“/”
network.prometheus.ingress.annotations.”nginx.ingress.kubernetes.io/session-cookie-expires”
Type: string
Description:
“600”
network.prometheus.ingress.annotations.”nginx.ingress.kubernetes.io/session-cookie-hash”
Type: string
Description:
“sha1”
network.prometheus.ingress.annotations.”nginx.ingress.kubernetes.io/session-cookie-max-age”
Type: string
Description:
“600”
network.prometheus.ingress.annotations.”nginx.ingress.kubernetes.io/session-cookie-name”
Type: string
Description:
“kube-ingress-session-prometheus”
network.prometheus.ingress.classes.cluster
Type: string
Description:
“nginx-cluster”
network.prometheus.ingress.classes.namespace
Type: string
Description:
“nginx”
network.prometheus.ingress.public
Type: bool
Description:
true
network.prometheus.node_port.enabled
Type: bool
Description:
false
network.prometheus.node_port.port
Type: int
Description:
30900
network_policy.prometheus.egress[0]
Type: object
Description:
{}
network_policy.prometheus.ingress[0]
Type: object
Description:
{}
pod.affinity.anti.topologyKey.default
Type: string
Description:
“kubernetes.io/hostname”
pod.affinity.anti.type.default
Type: string
Description:
“preferredDuringSchedulingIgnoredDuringExecution”
pod.affinity.anti.weight.default
Type: int
Description:
10
pod.env.prometheus
Type: string
Description:
nil
pod.lifecycle.termination_grace_period.prometheus.timeout
Type: int
Description:
30
pod.lifecycle.upgrades.statefulsets.pod_replacement_strategy
Type: string
Description:
“RollingUpdate”
pod.mounts.prometheus.init_container
Type: string
Description:
nil
pod.mounts.prometheus.prometheus
Type: string
Description:
nil
pod.probes.prometheus.prometheus.liveness.enabled
Type: bool
Description:
false
pod.probes.prometheus.prometheus.liveness.params.initialDelaySeconds
Type: int
Description:
120
pod.probes.prometheus.prometheus.liveness.params.timeoutSeconds
Type: int
Description:
30
pod.probes.prometheus.prometheus.readiness.enabled
Type: bool
Description:
true
pod.probes.prometheus.prometheus.readiness.params.initialDelaySeconds
Type: int
Description:
30
pod.probes.prometheus.prometheus.readiness.params.timeoutSeconds
Type: int
Description:
30
pod.replicas.prometheus
Type: int
Description:
1
pod.resources.enabled
Type: bool
Description:
false
pod.resources.jobs.image_repo_sync.limits.cpu
Type: string
Description:
“2000m”
pod.resources.jobs.image_repo_sync.limits.memory
Type: string
Description:
“1024Mi”
pod.resources.jobs.image_repo_sync.requests.cpu
Type: string
Description:
“100m”
pod.resources.jobs.image_repo_sync.requests.memory
Type: string
Description:
“128Mi”
pod.resources.jobs.tests.limits.cpu
Type: string
Description:
“2000m”
pod.resources.jobs.tests.limits.memory
Type: string
Description:
“1024Mi”
pod.resources.jobs.tests.requests.cpu
Type: string
Description:
“100m”
pod.resources.jobs.tests.requests.memory
Type: string
Description:
“128Mi”
pod.resources.prometheus.limits.cpu
Type: string
Description:
“2000m”
pod.resources.prometheus.limits.memory
Type: string
Description:
“1024Mi”
pod.resources.prometheus.requests.cpu
Type: string
Description:
“500m”
pod.resources.prometheus.requests.memory
Type: string
Description:
“128Mi”
pod.security_context.api.container.apache_proxy.readOnlyRootFilesystem
Type: bool
Description:
false
pod.security_context.api.container.apache_proxy.runAsUser
Type: int
Description:
0
pod.security_context.api.container.prometheus.allowPrivilegeEscalation
Type: bool
Description:
false
pod.security_context.api.container.prometheus.readOnlyRootFilesystem
Type: bool
Description:
true
pod.security_context.api.container.prometheus_perms.readOnlyRootFilesystem
Type: bool
Description:
false
pod.security_context.api.container.prometheus_perms.runAsUser
Type: int
Description:
0
pod.security_context.api.pod.runAsUser
Type: int
Description:
65534
pod.security_context.test.container.prometheus_helm_tests.allowPrivilegeEscalation
Type: bool
Description:
false
pod.security_context.test.container.prometheus_helm_tests.readOnlyRootFilesystem
Type: bool
Description:
true
pod.security_context.test.pod.runAsUser
Type: int
Description:
65534
proc_launch.prometheus.custom_launch
Type: string
Description:
while true do echo \"If 'proc_launch.prometheus.default: false'.\" echo \"Your custom shell script code you can put here.\" sleep 10 doneproc_launch.prometheus.default
Type: bool
Description:
true
secrets.oci_image_registry.prometheus
Type: string
Description:
“prometheus-oci-image-registry-key”
secrets.tls.monitoring.prometheus.internal
Type: string
Description:
“prometheus-tls-api”
secrets.tls.monitoring.prometheus.public
Type: string
Description:
“prometheus-tls-public”
storage.enabled
Type: bool
Description:
true
storage.pvc.access_mode[0]
Type: string
Description:
“ReadWriteOnce”
storage.pvc.name
Type: string
Description:
“prometheus-pvc”
storage.requests.storage
Type: string
Description:
“5Gi”
storage.storage_class
Type: string
Description:
“general”
tls_configs
Type: string
Description:
nil