###### END MISC NOTES ######
+
# various queries only look at increases, so invert the up metric so we
# can better query on down.
- record: down
expr: up == bool 0
+ # convenience metric to use in multiple alert expressions
+ - record: mailtest_lag_inhibit
+ expr: present_over_time(ALERTS{alertname=~"kd_eth0_down|target_down|cmc_wan_down"}[17m]) or on() count_over_time(up{job="prometheus"}[19m]) <= 18
+
+
+ # the node_network_info here goes away when it is down,
+ # https://www.robustperception.io/absent-alerting-for-scraped-metrics
+ #
+ # What this says is: return metric if up == 1 if there isnt also
+ # the right hand metric (with the same instance+job).
+ #
+ # aka:
+ # ! exists(operstate=up) && up
+ - alert: cmc_wan_down
+ expr: |-
+ up{instance="10.2.0.1:9100"} == 1 unless on(instance,job) node_network_info{instance="10.2.0.1:9100",device="wan",operstate="up"}
+ labels:
+ severity: day
+
+ - alert: kd_eth0_down
+ expr: |-
+ node_network_up{instance="kdwg:9101",device="eth0"} != 1
+ labels:
+ severity: day
+
# alerting on missing metrics:
# https://www.robustperception.io/absent-alerting-for-scraped-metrics
labels:
severity: warn
+ # todo: at some point, look into making mailtest-check either be resilient to the internet going down,
+ # or inhibit or group this alert with it going down.
- alert: sysd_result_fail
# not sure 30m is really needed, it prevents the alert from flapping
# i guess.
labels:
severity: prod
-# 17 minutes: if we reboot causing 1 send to fail, thats 10 minutes. we
-# test this every 5 minutes, so thats 15 minutes at most.
+# 17 minutes: We try to send every 5 minutes. if we reboot causing 1
+# send to fail, thats 10 minutes between 2 sends. we test this every 5
+# minutes, so thats 15 minutes of time we can expect for 1 failed email,
+# and 1 failed email is expected due to reboots or other tiny issues we
+# dont care about.
+#
+# cmc_wan_down etc, inhibits other alerts, but mailtest_check needs
+# additional time to recover after an outage. We can only inhibit while
+# an alert is actually firing, it doesnt affect the "for:"
+# condition. So, we have those alerts that need to be delayed be
+# conditioned on a query for that alert having not been firing in the
+# last X minutes. However, there is a special case when prometheus
+# itself was down, and so there was no alert. So, I test for missing
+# of metric that gets generated for prometheus itself. If for some
+# reason that has a problem, I could make it more conservative by
+# checking that we booted recently instead, eg:
+# time() - node_boot_time_seconds{instance="kdwg:9101"} <= 60 * 17
- alert: mailtest_check_vps
expr: |-
- time() - mailtest_check_last_usec{job="tlsnode"} >= 60 * 17
+ time() - mailtest_check_last_usec{job="tlsnode"} >= 60 * 17 unless on() mailtest_lag_inhibit
labels:
severity: day
annotations:
summary: '17 minutes down'
- - alert: mailtest_check_unexpected_spamd_vps
- expr: |-
- mailtest_check_unexpected_spamd_results >= 1
- labels:
- severity: day
- annotations:
- summary: 'jr -u mailtest-check -e'
-
- alert: mailtest_check_mailhost
expr: |-
- time() - max by (folder,from) (mailtest_check_last_usec{job="node"}) >= 60 * 17
+ time() - max by (folder,from) (mailtest_check_last_usec{job="node"}) >= 60 * 17 unless on() mailtest_lag_inhibit
labels:
severity: day
annotations:
# 20 minutes. just allow for more due to prod alert.
- alert: mailtest_check_gnu_mailhost
expr: |-
- time() - max by (folder,from) (mailtest_check_last_usec{folder="/m/md/l/testignore", from="iank@gnu.org"}) >= 60 * 20
+ time() - max by (folder,from) (mailtest_check_last_usec{folder="/m/md/l/testignore", from="iank@gnu.org"}) >= 60 * 20 unless on() mailtest_lag_inhibit
labels:
severity: prod
annotations:
summary: '20 minutes down'
+ - alert: mailtest_check_unexpected_spamd_vps
+ expr: |-
+ mailtest_check_unexpected_spamd_results >= 1
+ labels:
+ severity: day
+ annotations:
+ summary: 'jr -u mailtest-check -e -n 10000'
- - alert: 1pmtest
- expr: hour() == 17 and minute() < 5
+ - alert: mailtest_check_missing_dnswl
+ expr: |-
+ mailtest_check_missing_dnswl >= 1
+ for: 30m
+ labels:
+ severity: day
+ annotations:
+ summary: 'jr -u mailtest-check -e -n 10000'
+
+ # We expect to be getting metrics, if we come up and notice we have
+ # any missing in the past, and it wasn't from a reboot, and we haven't
+ # fired any other alerts, make an alert. In testing, the the count is
+ # 19 for 19 minutes, but I make it 18 just to give a bit of slack.
+ - alert: historical_missing_metric
+ expr: |-
+ count_over_time(up{job="prometheus"}[19m]) <= 18 unless on() present_over_time(ALERTS[19m]) unless on() time() - node_boot_time_seconds{instance="kdwg:9101"} <= 60 * 17
+ labels:
+ severity: warn
+
+ # 10 am friday. but, do it 1 minute early so it is closer to actually
+ # firing at 10 am.
+ - alert: dead_man_test
+ expr: |-
+ ( hour() == 13 and minute() >= 59 or hour() == 14 and minute() < 3 ) and day_of_week() == 5
for: 0m
labels:
severity: daytest
annotations:
- summary: Prometheus daily test alert
+ summary: Prometheus weekly test alert
#### Inhibit notes ####
#
## Another way would be to detect an overall downtime:
# avg_over_time(node_systemd_unit_state{name="dynamicipupdate.service",state="active"}[1d]) < .95
- - alert: up_resets
- expr: |-
- resets(up[1d]) - changes(node_boot_time_seconds[1d]) > 12
- labels:
- severity: warn
- annotations:
- summary: "Target has gone down {{ $value }} times in 1 day, > 12"
+
+# However, this seems to just find too many false positives for now, so
+# commenting it out.
+
+ # - alert: up_resets
+ # expr: |-
+ # resets(up[1d]) - changes(node_boot_time_seconds[1d]) > 12
+ # labels:
+ # severity: warn
+ # annotations:
+ # summary: "Target has gone down {{ $value }} times in 1 day, > 12"
summary: Prometheus job missing (instance {{ $labels.instance }})
description: "A Prometheus job has disappeared\n VALUE = {{ $value }}"
-# TODO: some hosts, notably li and MAIL_HOST, we want to alert sooner than 30m,
-# and severity to day. mail host is tricky since it roams, but I think the
-# right way to do it is to check for absence of this metric:
-# mailtest_check_last_usec{folder="/m/md/l/testignore",from="ian@iankelling.org"}
- - alert: target_down
- expr: up == 0
+ - alert: lowpri_target_down
+ expr: up{instance!~"kdwg:9101|bkex.b8.nz:9101|liex.b8.nz:9101|10.2.0.1:9100|kwwg:9101"} == 0
for: 30m
labels:
severity: warn
annotations:
summary: Target down for 30m
+ # note PrometheusAllTargetsMissing is intentionally omitted because it
+ # is redundant to the above.
- # todo: this should group with the above alert
- - alert: PrometheusAllTargetsMissing
- expr: count by (job) (up) == 0
- for: 10m
+ - alert: target_down
+ expr: up{instance=~"kdwg:9101|bkex.b8.nz:9101|liex.b8.nz:9101|10.2.0.1:9100"} == 0
+ for: 5m
+ labels:
+ severity: day
+ annotations:
+ summary: High priority target down for 5m
+
+ - alert: target_down
+ expr: absent(present_over_time(mailtest_check_last_usec{folder="/m/md/l/testignore",from="ian@iankelling.org"}[5m]))
+ for: 5m
labels:
severity: day
-# alert-group: local-prom
annotations:
- description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}"
+ summary: MAIL_HOST likely down for 5m
- alert: PrometheusConfigurationReloadFailure
expr: prometheus_config_last_reload_successful != 1