X-Git-Url: https://iankelling.org/git/?a=blobdiff_plain;f=filesystem%2Fetc%2Fprometheus%2Frules%2Fiank.yml;h=651eb00de164a8a41b84fe53d6429a5821f8f3d9;hb=95eb9558206f8287febab80dd3f51d168a3ca831;hp=4439c41d41cb61c6a59a4bbb5b967fbf0aeebbb3;hpb=d7551546ac323c5d4b49370c885646bcf96e959f;p=distro-setup diff --git a/filesystem/etc/prometheus/rules/iank.yml b/filesystem/etc/prometheus/rules/iank.yml index 4439c41..651eb00 100644 --- a/filesystem/etc/prometheus/rules/iank.yml +++ b/filesystem/etc/prometheus/rules/iank.yml @@ -8,39 +8,251 @@ groups: - name: standard rules: - - alert: mailtest-check + +# ## uncomment for testing an alert firing +# - alert: test-alert4 +# expr: vector(1) +# for: 0m +# labels: +# severity: day +# annotations: +# description: "always-firing alert VALUE = {{ $value }}" + + + +###### BEGIN MISC NOTES ###### + +# +# other interesting exporters +# https://github.com/prometheus-community/node-exporter-textfile-collector-scripts +# + +# interesting post: https://www.metricfire.com/blog/top-5-prometheus-alertmanager-gotchas/ + +# interesting promql query that could be useful later. +# changes(ALERTS_FOR_STATE[24h]) +# +# +# +# alert flap strategy. +# https://roidelapluie.be/blog/2019/02/21/prometheus-last/ +# +# Another idea generally is to make an alert that fires for 24 hours and +# inhibits another alert for the same thing, which we want at most +# 1 alert per 24 hours. + +###### END MISC NOTES ###### + + +# various queries only look at increases, so invert the up metric so we +# can better query on down. + - record: down + expr: up == bool 0 + + # convenience metric to use in multiple alert expressions + - record: mailtest_lag_inhibit + expr: present_over_time(ALERTS{alertname=~"kd_eth0_down|target_down|cmc_wan_down"}[17m]) or on() count_over_time(up{job="prometheus"}[19m]) <= 18 + + + # the node_network_info here goes away when it is down, + # https://www.robustperception.io/absent-alerting-for-scraped-metrics + # + # What this says is: return metric if up == 1 if there isnt also + # the right hand metric (with the same instance+job). + # + # aka: + # ! exists(operstate=up) && up + - alert: cmc_wan_down + expr: |- + up{instance="10.2.0.1:9100"} == 1 unless on(instance,job) node_network_info{instance="10.2.0.1:9100",device="wan",operstate="up"} + labels: + severity: day + + - alert: kd_eth0_down expr: |- - time() - mailtest_check_last_usec > 60 * 12 + node_network_up{instance="kdwg:9101",device="eth0"} != 1 + labels: + severity: day + + +# alerting on missing metrics: +# https://www.robustperception.io/absent-alerting-for-scraped-metrics +# that doesnt work if we want to alert across multiple hosts, eg +# up{job="node"} == 1 unless node_systemd_unit_state{name="systemstatus.service",state="active",job="node"} +# however, google lead me to a solution here +# https://www.linkedin.com/pulse/prometheus-alert-missing-metrics-labels-nirav-shah +# there is also the absent() function, but i didnt see a way to make that work + - alert: mysers_units_missing + expr: |- + count(up{job="node"} == 1) by (instance) * 3 unless + count(node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"}) by (instance) + for: 20m + labels: + severity: warn + + - alert: epanicclean_not_active + expr: |- + node_systemd_unit_state{name="epanicclean.service",state="active"} != 1 + for: 20m + labels: + severity: warn + + - alert: epanicclean_missing + expr: |- + count(up{job=~"node|tlsnode"} == 1) by (instance) unless + count(node_systemd_unit_state{job=~"node|tlsnode",name="epanicclean.service",state="active"}) by (instance) + for: 20m + labels: + severity: warn + + - alert: mysers_not_active + expr: |- + node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"} != 1 + for: 20m + labels: + severity: warn + + # todo: at some point, look into making mailtest-check either be resilient to the internet going down, + # or inhibit or group this alert with it going down. + - alert: sysd_result_fail + # not sure 30m is really needed, it prevents the alert from flapping + # i guess. + expr: |- + rate(node_systemd_unit_result_fail_count[30m]) > 0 + labels: + severity: day + + - alert: exim_paniclog + expr: |- + exim_paniclog > 0 + labels: + severity: day + + - alert: check_crypttab + expr: |- + check_crypttab > 0 + labels: + severity: prod + +# 17 minutes: We try to send every 5 minutes. if we reboot causing 1 +# send to fail, thats 10 minutes between 2 sends. we test this every 5 +# minutes, so thats 15 minutes of time we can expect for 1 failed email, +# and 1 failed email is expected due to reboots or other tiny issues we +# dont care about. +# +# cmc_wan_down etc, inhibits other alerts, but mailtest_check needs +# additional time to recover after an outage. We can only inhibit while +# an alert is actually firing, it doesnt affect the "for:" +# condition. So, we have those alerts that need to be delayed be +# conditioned on a query for that alert having not been firing in the +# last X minutes. However, there is a special case when prometheus +# itself was down, and so there was no alert. So, I test for missing +# of metric that gets generated for prometheus itself. If for some +# reason that has a problem, I could make it more conservative by +# checking that we booted recently instead, eg: +# time() - node_boot_time_seconds{instance="kdwg:9101"} <= 60 * 17 + - alert: mailtest_check_vps + expr: |- + time() - mailtest_check_last_usec{job="tlsnode"} >= 60 * 17 unless on() mailtest_lag_inhibit + labels: + severity: day + annotations: + summary: '17 minutes down' + + - alert: mailtest_check_mailhost + expr: |- + time() - max by (folder,from) (mailtest_check_last_usec{job="node"}) >= 60 * 17 unless on() mailtest_lag_inhibit labels: severity: day annotations: - description: '{{ $labels.instance }} mailtest-check' - summary: '{{ $labels.instance }} mailtest-check' + summary: '17 minutes down' - # 42 mins: enough for a 30 min queue run plus 12 - - alert: mailtest-check + # 20 minutes. just allow for more due to prod alert. + - alert: mailtest_check_gnu_mailhost expr: |- - time() - mailtest_check_last_usec > 60 * 42 + time() - max by (folder,from) (mailtest_check_last_usec{folder="/m/md/l/testignore", from="iank@gnu.org"}) >= 60 * 20 unless on() mailtest_lag_inhibit labels: severity: prod annotations: - description: '{{ $labels.instance }} mailtest-check' - summary: '{{ $labels.instance }} mailtest-check' + summary: '20 minutes down' + + - alert: mailtest_check_unexpected_spamd_vps + expr: |- + mailtest_check_unexpected_spamd_results >= 1 + labels: + severity: day + annotations: + summary: 'jr -u mailtest-check -e' + + - alert: mailtest_check_missing_dnswl + expr: |- + mailtest_check_missing_dnswl >= 1 + for: 30m + labels: + severity: day + annotations: + summary: 'jr -u mailtest-check -e' + + # We expect to be getting metrics, if we come up and notice we have + # any missing in the past, and it wasn't from a reboot, and we haven't + # fired any other alerts, make an alert. In testing, the the count is + # 19 for 19 minutes, but I make it 18 just to give a bit of slack. + - alert: historical_missing_metric + expr: |- + count_over_time(up{job="prometheus"}[19m]) <= 18 unless on() present_over_time(ALERTS[19m]) unless on() time() - node_boot_time_seconds{instance="kdwg:9101"} <= 60 * 17 + labels: + severity: warn - alert: 1pmtest - expr: hour() == 18 and minute() < 5 + expr: hour() == 17 and minute() < 5 for: 0m labels: severity: daytest annotations: - summary: Prometheus daily test alert (instance {{ $labels.instance }}) - description: "Prometheus daily test alert if no other alerts. It - is an end to end test.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Prometheus daily test alert + + +#### Inhibit notes #### +## Example of expressions to detect if the target_down alert +# fired in the last 24 hours. Initially, I thought his could +# be an alert which inhibits up_resets, but eventually I figured +# that doesn't make much sense, and the idea of using an alert +# that is not an indication of something wrong, only inhibits another +# alert, I think works better to integrate directly into the +# alert it would inhibit, this may mean a recording rule. That avoids +# an alert we have to ignore or filter out. +# +# Alternate expression, to calculate if the alert would have fired is: +# min_over_time(sum_over_time(up[30m])[1d:]) == 0 +# where 30m matches the for: time in target_down +# +# Note: for graphing, surround in the expression in sum_over_time() +# ALERTS{alertname="target_down",alertstate="firing"}[1d] +#### end Inhibit notes #### -# https://awesome-prometheus-alerts.grep.to/rules +# For targets where we alert only on long downtimes, we +# still want to know if it is going down many times for short times over +# a long period of time. But ignore reboots. +# +## Another way would be to detect an overall downtime: +# avg_over_time(node_systemd_unit_state{name="dynamicipupdate.service",state="active"}[1d]) < .95 + +# However, this seems to just find too many false positives for now, so +# commenting it out. + + # - alert: up_resets + # expr: |- + # resets(up[1d]) - changes(node_boot_time_seconds[1d]) > 12 + # labels: + # severity: warn + # annotations: + # summary: "Target has gone down {{ $value }} times in 1 day, > 12" + +# https://awesome-prometheus-alerts.grep.to/rules + # todo, we should probably group the prometheus alerts that indicate a # host-local problem. # eg, set a label alert-group: local-prom, then make a receiver that @@ -56,27 +268,35 @@ groups: severity: day annotations: summary: Prometheus job missing (instance {{ $labels.instance }}) - description: "A Prometheus job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "A Prometheus job has disappeared\n VALUE = {{ $value }}" - - alert: PrometheusTargetMissing - expr: up == 0 + - alert: lowpri_target_down + expr: up{instance!~"kdwg:9101|bkex.b8.nz:9101|liex.b8.nz:9101|10.2.0.1:9100"} == 0 for: 30m labels: severity: warn annotations: - summary: Prometheus target missing (instance {{ $labels.instance }}) - description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + summary: Target down for 30m - # todo: this should supress the above alert - # - alert: PrometheusAllTargetsMissing - # expr: count by (job) (up) == 0 - # for: 30m - # labels: - # severity: day - # alert-group: local-prom - # annotations: - # summary: Prometheus all targets missing (instance {{ $labels.instance }}) - # description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + - alert: target_down + expr: up{instance=~"kdwg:9101|bkex.b8.nz:9101|liex.b8.nz:9101|10.2.0.1:9100"} == 0 + for: 5m + labels: + severity: day + annotations: + summary: High priority target down for 5m + + - alert: target_down + expr: absent(present_over_time(mailtest_check_last_usec{folder="/m/md/l/testignore",from="ian@iankelling.org"}[5m])) + for: 5m + labels: + severity: day + annotations: + summary: MAIL_HOST likely down for 5m + + +# note, the next upstream metric is intentionally omitted: +# https://github.com/samber/awesome-prometheus-alerts/issues/283 - alert: PrometheusConfigurationReloadFailure expr: prometheus_config_last_reload_successful != 1 @@ -84,20 +304,15 @@ groups: labels: severity: day annotations: - summary: Prometheus configuration reload failure (instance {{ $labels.instance }}) - description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - - # I have an out of band alert to make sure prometheus is up. this - # looks like it would generate false positives. todo: think - # through what a valid crash loop detection would look like. - # - alert: PrometheusTooManyRestarts - # expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 10 - # for: 0m - # labels: - # severity: warning - # annotations: - # summary: Prometheus too many restarts (instance {{ $labels.instance }}) - # description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus configuration reload error\n VALUE = {{ $value }}" + + - alert: PrometheusTooManyRestarts + expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[30m]) > 10 + for: 0m + labels: + severity: warning + annotations: + description: "Prometheus has restarted more than ten times in the last 30 minutes. It might be crashlooping.\n VALUE = {{ $value }}" - alert: PrometheusAlertmanagerJobMissing expr: absent(up{job="alertmanager"}) @@ -105,8 +320,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus AlertManager job missing (instance {{ $labels.instance }}) - description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}" - alert: PrometheusAlertmanagerConfigurationReloadFailure expr: alertmanager_config_last_reload_successful != 1 @@ -114,8 +328,7 @@ groups: labels: severity: day annotations: - summary: Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }}) - description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "AlertManager configuration reload error\n VALUE = {{ $value }}" - alert: PrometheusNotConnectedToAlertmanager expr: prometheus_notifications_alertmanagers_discovered < 1 @@ -123,8 +336,7 @@ groups: labels: severity: day annotations: - summary: Prometheus not connected to alertmanager (instance {{ $labels.instance }}) - description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}" - alert: PrometheusRuleEvaluationFailures expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0 @@ -132,8 +344,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus rule evaluation failures (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}" - alert: PrometheusTemplateTextExpansionFailures expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0 @@ -141,8 +352,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus template text expansion failures (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}" - alert: PrometheusRuleEvaluationSlow expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds @@ -150,8 +360,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus rule evaluation slow (instance {{ $labels.instance }}) - description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}" - alert: PrometheusNotificationsBacklog expr: min_over_time(prometheus_notifications_queue_length[30m]) > 0 @@ -159,8 +368,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus notifications backlog (instance {{ $labels.instance }}) - description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}" - alert: PrometheusAlertmanagerNotificationFailing expr: rate(alertmanager_notifications_failed_total[1m]) > 0 @@ -168,8 +376,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }}) - description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}" # file_sd doesnt count as service discovery, so 0 is expected. # - alert: PrometheusTargetEmpty @@ -178,8 +385,7 @@ groups: # labels: # severity: day # annotations: - # summary: Prometheus target empty (instance {{ $labels.instance }}) - # description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + # description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}" - alert: PrometheusTargetScrapingSlow expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 90 @@ -187,8 +393,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus target scraping slow (instance {{ $labels.instance }}) - description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}" - alert: PrometheusLargeScrape expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10 @@ -196,8 +401,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus large scrape (instance {{ $labels.instance }}) - description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}" - alert: PrometheusTargetScrapeDuplicate expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0 @@ -205,8 +409,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus target scrape duplicate (instance {{ $labels.instance }}) - description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}" - alert: PrometheusTsdbCheckpointCreationFailures expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0 @@ -214,8 +417,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}" - alert: PrometheusTsdbCheckpointDeletionFailures expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0 @@ -223,8 +425,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}" - alert: PrometheusTsdbCompactionsFailed expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0 @@ -232,8 +433,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB compactions failed (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}" - alert: PrometheusTsdbHeadTruncationsFailed expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0 @@ -241,8 +441,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}" - alert: PrometheusTsdbReloadFailures expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0 @@ -250,8 +449,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB reload failures (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}" - alert: PrometheusTsdbWalCorruptions expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0 @@ -259,8 +457,7 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB WAL corruptions (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}" - alert: PrometheusTsdbWalTruncationsFailed expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0 @@ -268,5 +465,4 @@ groups: labels: severity: warn annotations: - summary: Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }}) - description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}"