X-Git-Url: https://iankelling.org/git/?a=blobdiff_plain;f=filesystem%2Fetc%2Fprometheus%2Frules%2Fiank.yml;h=75b5cbcfc760925b79d51fd6c3262de7a6186a97;hb=802e885e3e7fa3857f8bc4f54c261d5ca76f2454;hp=4439c41d41cb61c6a59a4bbb5b967fbf0aeebbb3;hpb=d7551546ac323c5d4b49370c885646bcf96e959f;p=distro-setup diff --git a/filesystem/etc/prometheus/rules/iank.yml b/filesystem/etc/prometheus/rules/iank.yml index 4439c41..75b5cbc 100644 --- a/filesystem/etc/prometheus/rules/iank.yml +++ b/filesystem/etc/prometheus/rules/iank.yml @@ -8,39 +8,169 @@ groups: - name: standard rules: - - alert: mailtest-check + +## uncomment for testing an alert firing +# - alert: test-alert4 +# expr: vector(1) +# # expr: nonexistent_metric +# for: 0m +# labels: +# severity: day +# annotations: +# description: "always-firing alert VALUE = {{ $value }}" + + + +###### BEGIN MISC NOTES ###### + +# +# other interesting exporters +# https://github.com/prometheus-community/node-exporter-textfile-collector-scripts +# + +# interesting post: https://www.metricfire.com/blog/top-5-prometheus-alertmanager-gotchas/ + +# interesting promql query that could be useful later. +# changes(ALERTS_FOR_STATE[24h]) +# +# +# +# alert flap strategy. +# https://roidelapluie.be/blog/2019/02/21/prometheus-last/ +# +# Another idea generally is to make an alert that fires for 24 hours and +# inhibits another alert for the same thing, which we want at most +# 1 alert per 24 hours. + +###### END MISC NOTES ###### + + + + +# alerting on missing metrics: +# https://www.robustperception.io/absent-alerting-for-scraped-metrics +# that doesnt work if we want to alert across multiple hosts, eg +# up{job="node"} == 1 unless node_systemd_unit_state{name="systemstatus.service",state="active",job="node"} +# however, google lead me to a solution here +# https://www.linkedin.com/pulse/prometheus-alert-missing-metrics-labels-nirav-shah +# there is also the absent() function, but i didnt see a way to make that work + - alert: mysers_units_missing + expr: |- + count(up{job="node"} == 1) by (instance) * 3 unless + count(node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"}) by (instance) + for: 20m + labels: + severity: warn + + - alert: epanicclean_not_active + expr: |- + node_systemd_unit_state{name="epanicclean.service",state="active"} != 1 + for: 20m + labels: + severity: warn + + - alert: epanicclean_missing + expr: |- + count(up{job=~"node|tlsnode"} == 1) by (instance) unless + count(node_systemd_unit_state{job=~"node|tlsnode",name="epanicclean.service",state="active"}) by (instance) + for: 20m + labels: + severity: warn + + - alert: mysers_not_active + expr: |- + node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"} != 1 + for: 20m + labels: + severity: warn + + - alert: sysd_result_fail + expr: |- + rate(node_systemd_unit_result_fail_count[30m]) > 0 + labels: + severity: day + + - alert: mailtest_check_vps expr: |- - time() - mailtest_check_last_usec > 60 * 12 + time() - mailtest_check_last_usec{job="tlsnode"} >= 60 * 12 labels: severity: day annotations: - description: '{{ $labels.instance }} mailtest-check' - summary: '{{ $labels.instance }} mailtest-check' + summary: '12 minutes down' # 42 mins: enough for a 30 min queue run plus 12 - - alert: mailtest-check + - alert: mailtest_check_vps expr: |- - time() - mailtest_check_last_usec > 60 * 42 + time() - mailtest_check_last_usec{job="tlsnode"} >= 60 * 42 labels: severity: prod annotations: - description: '{{ $labels.instance }} mailtest-check' - summary: '{{ $labels.instance }} mailtest-check' + summary: '42 minutes down' + + - alert: mailtest_check_mailhost + expr: |- + time() - max by (folder,from) (mailtest_check_last_usec{job="node"}) >= 60 * 12 + labels: + severity: day + annotations: + summary: '12 minutes down' + + # 42 mins: enough for a 30 min queue run plus 12 + - alert: mailtest_check_mailhost + expr: |- + time() - max by (folder,from) (mailtest_check_last_usec{job="node"}) >= 60 * 42 + labels: + severity: prod + annotations: + summary: '42 minutes down' + - alert: 1pmtest - expr: hour() == 18 and minute() < 5 + expr: hour() == 17 and minute() < 5 for: 0m labels: severity: daytest annotations: - summary: Prometheus daily test alert (instance {{ $labels.instance }}) - description: "Prometheus daily test alert if no other alerts. It - is an end to end test.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Prometheus daily test alert + + +#### Inhibit notes #### +## Example of expressions to detect if the target_down alert +# fired in the last 24 hours. Initially, I thought his could +# be an alert which inhibits up_resets, but eventually I figured +# that doesn't make much sense, and the idea of using an alert +# that is not an indication of something wrong, only inhibits another +# alert, I think works better to integrate directly into the +# alert it would inhibit, this may mean a recording rule. That avoids +# an alert we have to ignore or filter out. +# +# Alternate expression, to calculate if the alert would have fired is: +# min_over_time(sum_over_time(up[30m])[1d:]) == 0 +# where 30m matches the for: time in target_down +# +# Note: for graphing, surround in the expression in sum_over_time() +# ALERTS{alertname="target_down",alertstate="firing"}[1d] +#### end Inhibit notes #### -# https://awesome-prometheus-alerts.grep.to/rules +# For targets where we alert only on long downtimes, we +# still want to know if it is going down many times for short times over +# a long period of time. But ignore reboots. +# +## Another way would be to detect an overall downtime: +# avg_over_time(node_systemd_unit_state{name="dynamicipupdate.service",state="active"}[1d]) < .95 + - alert: up_resets + expr: |- + resets(up[2d]) - changes(node_boot_time_seconds[2d]) > 12 + labels: + severity: warn + annotations: + summary: "Target has gone down {{ $value }} times in 2 days, > 12" + +# https://awesome-prometheus-alerts.grep.to/rules + # todo, we should probably group the prometheus alerts that indicate a # host-local problem. # eg, set a label alert-group: local-prom, then make a receiver that @@ -56,27 +186,30 @@ groups: severity: day annotations: summary: Prometheus job missing (instance {{ $labels.instance }}) - description: "A Prometheus job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "A Prometheus job has disappeared\n VALUE = {{ $value }}" - - alert: PrometheusTargetMissing +# TODO: some hosts, notably li and MAIL_HOST, we want to alert sooner than 30m, +# and severity to day. mail host is tricky since it roams, but I think the +# right way to do it is to check for absence of this metric: +# mailtest_check_last_usec{folder="/m/md/l/testignore",from="ian@iankelling.org"} + - alert: target_down expr: up == 0 for: 30m labels: severity: warn annotations: - summary: Prometheus target missing (instance {{ $labels.instance }}) - description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Target down for 30m - # todo: this should supress the above alert - # - alert: PrometheusAllTargetsMissing - # expr: count by (job) (up) == 0 - # for: 30m - # labels: - # severity: day - # alert-group: local-prom - # annotations: - # summary: Prometheus all targets missing (instance {{ $labels.instance }}) - # description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + # todo: this should group with the above alert + - alert: PrometheusAllTargetsMissing + expr: count by (job) (up) == 0 + for: 10m + labels: + severity: day +# alert-group: local-prom + annotations: + description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}" - alert: PrometheusConfigurationReloadFailure expr: prometheus_config_last_reload_successful != 1 @@ -84,20 +217,15 @@ groups: labels: severity: day annotations: - summary: Prometheus configuration reload failure (instance {{ $labels.instance }}) - description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - # I have an out of band alert to make sure prometheus is up. this - # looks like it would generate false positives. todo: think - # through what a valid crash loop detection would look like. - # - alert: PrometheusTooManyRestarts - # expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 10 - # for: 0m - # labels: - # severity: warning - # annotations: - # summary: Prometheus too many restarts (instance {{ $labels.instance }}) - # description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus configuration reload error\n VALUE = {{ $value }}" + + - alert: PrometheusTooManyRestarts + expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[30m]) > 10 + for: 0m + labels: + severity: warning + annotations: + description: "Prometheus has restarted more than ten times in the last 30 minutes. It might be crashlooping.\n VALUE = {{ $value }}" - alert: PrometheusAlertmanagerJobMissing expr: absent(up{job="alertmanager"}) @@ -105,8 +233,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus AlertManager job missing (instance {{ $labels.instance }}) - description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}" - alert: PrometheusAlertmanagerConfigurationReloadFailure expr: alertmanager_config_last_reload_successful != 1 @@ -114,8 +241,7 @@ groups: labels: severity: day annotations: - summary: Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }}) - description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "AlertManager configuration reload error\n VALUE = {{ $value }}" - alert: PrometheusNotConnectedToAlertmanager expr: prometheus_notifications_alertmanagers_discovered < 1 @@ -123,8 +249,7 @@ groups: labels: severity: day annotations: - summary: Prometheus not connected to alertmanager (instance {{ $labels.instance }}) - description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}" - alert: PrometheusRuleEvaluationFailures expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0 @@ -132,8 +257,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus rule evaluation failures (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}" - alert: PrometheusTemplateTextExpansionFailures expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0 @@ -141,8 +265,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus template text expansion failures (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}" - alert: PrometheusRuleEvaluationSlow expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds @@ -150,8 +273,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus rule evaluation slow (instance {{ $labels.instance }}) - description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}" - alert: PrometheusNotificationsBacklog expr: min_over_time(prometheus_notifications_queue_length[30m]) > 0 @@ -159,8 +281,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus notifications backlog (instance {{ $labels.instance }}) - description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}" - alert: PrometheusAlertmanagerNotificationFailing expr: rate(alertmanager_notifications_failed_total[1m]) > 0 @@ -168,8 +289,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }}) - description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}" # file_sd doesnt count as service discovery, so 0 is expected. # - alert: PrometheusTargetEmpty @@ -178,8 +298,7 @@ groups: # labels: # severity: day # annotations: - # summary: Prometheus target empty (instance {{ $labels.instance }}) - # description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + # description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}" - alert: PrometheusTargetScrapingSlow expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 90 @@ -187,8 +306,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus target scraping slow (instance {{ $labels.instance }}) - description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}" - alert: PrometheusLargeScrape expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10 @@ -196,8 +314,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus large scrape (instance {{ $labels.instance }}) - description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}" - alert: PrometheusTargetScrapeDuplicate expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0 @@ -205,8 +322,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus target scrape duplicate (instance {{ $labels.instance }}) - description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}" - alert: PrometheusTsdbCheckpointCreationFailures expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0 @@ -214,8 +330,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}" - alert: PrometheusTsdbCheckpointDeletionFailures expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0 @@ -223,8 +338,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}" - alert: PrometheusTsdbCompactionsFailed expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0 @@ -232,8 +346,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB compactions failed (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}" - alert: PrometheusTsdbHeadTruncationsFailed expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0 @@ -241,8 +354,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}" - alert: PrometheusTsdbReloadFailures expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0 @@ -250,8 +362,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB reload failures (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}" - alert: PrometheusTsdbWalCorruptions expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0 @@ -259,8 +370,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB WAL corruptions (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}" - alert: PrometheusTsdbWalTruncationsFailed expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0 @@ -268,5 +378,4 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}"