- name: standard
rules:
-## uncomment for testing an alert firing
+# ## uncomment for testing an alert firing
# - alert: test-alert4
# expr: vector(1)
-# # expr: nonexistent_metric
# for: 0m
# labels:
# severity: day
###### END MISC NOTES ######
-
+# various queries only look at increases, so invert the up metric so we
+# can better query on down.
+ - record: down
+ expr: up == bool 0
# alerting on missing metrics:
# there is also the absent() function, but i didnt see a way to make that work
- alert: mysers_units_missing
expr: |-
- count(up{job="node"}) by (instance) * 3 unless count(node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"}) by (instance)
+ count(up{job="node"} == 1) by (instance) * 3 unless
+ count(node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"}) by (instance)
+ for: 20m
+ labels:
+ severity: warn
+
+ - alert: epanicclean_not_active
+ expr: |-
+ node_systemd_unit_state{name="epanicclean.service",state="active"} != 1
+ for: 20m
+ labels:
+ severity: warn
+
+ - alert: epanicclean_missing
+ expr: |-
+ count(up{job=~"node|tlsnode"} == 1) by (instance) unless
+ count(node_systemd_unit_state{job=~"node|tlsnode",name="epanicclean.service",state="active"}) by (instance)
for: 20m
labels:
severity: warn
severity: warn
- alert: sysd_result_fail
+ # not sure 30m is really needed, it prevents the alert from flapping
+ # i guess.
expr: |-
rate(node_systemd_unit_result_fail_count[30m]) > 0
labels:
severity: day
+ - alert: exim_paniclog
+ expr: |-
+ exim_paniclog > 0
+ labels:
+ severity: warn
- - alert: mailtest_check
+ - alert: check_crypttab
expr: |-
- time() - mailtest_check_last_usec > 60 * 12
+ check_crypttab > 0
+ labels:
+ severity: prod
+
+# 17 minutes: if we reboot causing 1 send to fail, thats 10 minutes. we
+# test this every 5 minutes, so thats 15 minutes at most.
+ - alert: mailtest_check_vps
+ expr: |-
+ time() - mailtest_check_last_usec{job="tlsnode"} >= 60 * 17
labels:
severity: day
annotations:
- summary: '12 minutes down'
+ summary: '17 minutes down'
- # 42 mins: enough for a 30 min queue run plus 12
- - alert: mailtest_check
+ - alert: mailtest_check_unexpected_spamd_vps
expr: |-
- time() - mailtest_check_last_usec > 60 * 42
+ mailtest_check_unexpected_spamd_results >= 1
+ labels:
+ severity: day
+ annotations:
+ summary: 'jr -u mailtest-check -e'
+
+ - alert: mailtest_check_mailhost
+ expr: |-
+ time() - max by (folder,from) (mailtest_check_last_usec{job="node"}) >= 60 * 17
+ labels:
+ severity: day
+ annotations:
+ summary: '17 minutes down'
+
+ # 20 minutes. just allow for more due to prod alert.
+ - alert: mailtest_check_gnu_mailhost
+ expr: |-
+ time() - max by (folder,from) (mailtest_check_last_usec{folder="/m/md/l/testignore", from="iank@gnu.org"}) >= 60 * 20
labels:
severity: prod
annotations:
- summary: '43 minutes down'
+ summary: '20 minutes down'
+
- alert: 1pmtest
expr: hour() == 17 and minute() < 5
summary: Prometheus daily test alert
-
-# alternate expression, to calculate if the alert would have fired is:
+#### Inhibit notes ####
+## Example of expressions to detect if the target_down alert
+# fired in the last 24 hours. Initially, I thought his could
+# be an alert which inhibits up_resets, but eventually I figured
+# that doesn't make much sense, and the idea of using an alert
+# that is not an indication of something wrong, only inhibits another
+# alert, I think works better to integrate directly into the
+# alert it would inhibit, this may mean a recording rule. That avoids
+# an alert we have to ignore or filter out.
+#
+# Alternate expression, to calculate if the alert would have fired is:
# min_over_time(sum_over_time(up[30m])[1d:]) == 0
# where 30m matches the for: time in target_down
#
-# sum_over_time is not needed, just convenience for graphing
- - alert: target_down_inhibitor
- expr: |-
- sum_over_time(ALERTS{alertname="target_down"}[1d])
- labels:
- severity: ignore
- annotations:
- summary: alert that indicates target_down alert fired in the last day
- description: "VALUE = {{ $value }}"
+# Note: for graphing, surround in the expression in sum_over_time()
+# ALERTS{alertname="target_down",alertstate="firing"}[1d]
+#### end Inhibit notes ####
+
-# For targets where we alert except for longer downtimes, we
+# For targets where we alert only on long downtimes, we
# still want to know if it is going down many times for short times over
# a long period of time. But ignore reboots.
#
# avg_over_time(node_systemd_unit_state{name="dynamicipupdate.service",state="active"}[1d]) < .95
- alert: up_resets
expr: |-
- resets(up[3d]) - changes(node_boot_time_seconds[3d]) > 15
+ resets(up[1d]) - changes(node_boot_time_seconds[1d]) > 12
labels:
severity: warn
annotations:
- summary: "Target has gone down {{ $value }} times in 3 days, > 15"
-
+ summary: "Target has gone down {{ $value }} times in 1 day, > 12"
# https://awesome-prometheus-alerts.grep.to/rules
-
# todo, we should probably group the prometheus alerts that indicate a
# host-local problem.
# eg, set a label alert-group: local-prom, then make a receiver that