fixes and qd for better source subvol error repo
[distro-setup] / filesystem / etc / prometheus / rules / iank.yml
1 # other rules to consider:
2 # filesystem, network, ntp rules:
3 # https://github.com/cloudalchemy/ansible-prometheus defaults/main.yml
4 # on my system, the interpolated values are in /a/opt/ansible-prometheus/rules.yml
5 #
6
7
8 groups:
9 - name: standard
10 rules:
11
12 # ## uncomment for testing an alert firing
13 # - alert: test-alert4
14 # expr: vector(1)
15 # for: 0m
16 # labels:
17 # severity: day
18 # annotations:
19 # description: "always-firing alert VALUE = {{ $value }}"
20
21
22
23 ###### BEGIN MISC NOTES ######
24
25 #
26 # other interesting exporters
27 # https://github.com/prometheus-community/node-exporter-textfile-collector-scripts
28 #
29
30 # interesting post: https://www.metricfire.com/blog/top-5-prometheus-alertmanager-gotchas/
31
32 # interesting promql query that could be useful later.
33 # changes(ALERTS_FOR_STATE[24h])
34 #
35 #
36 #
37 # alert flap strategy.
38 # https://roidelapluie.be/blog/2019/02/21/prometheus-last/
39 #
40 # Another idea generally is to make an alert that fires for 24 hours and
41 # inhibits another alert for the same thing, which we want at most
42 # 1 alert per 24 hours.
43
44 ###### END MISC NOTES ######
45
46
47 # various queries only look at increases, so invert the up metric so we
48 # can better query on down.
49 - record: down
50 expr: up == bool 0
51
52 # convenience metric to use in multiple alert expressions
53 - record: mailtest_lag_inhibit
54 expr: present_over_time(ALERTS{alertname=~"kd_eth0_down|target_down|cmc_wan_down"}[17m]) or on() count_over_time(up{job="prometheus"}[19m]) <= 18
55
56
57 # the node_network_info here goes away when it is down,
58 # https://www.robustperception.io/absent-alerting-for-scraped-metrics
59 #
60 # What this says is: return metric if up == 1 if there isnt also
61 # the right hand metric (with the same instance+job).
62 #
63 # aka:
64 # ! exists(operstate=up) && up
65 - alert: cmc_wan_down
66 expr: |-
67 up{instance="10.2.0.1:9100"} == 1 unless on(instance,job) node_network_info{instance="10.2.0.1:9100",device="wan",operstate="up"}
68 labels:
69 severity: day
70
71 - alert: kd_eth0_down
72 expr: |-
73 node_network_up{instance="kdwg:9101",device="eth0"} != 1
74 labels:
75 severity: day
76
77
78 # alerting on missing metrics:
79 # https://www.robustperception.io/absent-alerting-for-scraped-metrics
80 # that doesnt work if we want to alert across multiple hosts, eg
81 # up{job="node"} == 1 unless node_systemd_unit_state{name="systemstatus.service",state="active",job="node"}
82 # however, google lead me to a solution here
83 # https://www.linkedin.com/pulse/prometheus-alert-missing-metrics-labels-nirav-shah
84 # there is also the absent() function, but i didnt see a way to make that work
85 - alert: mysers_units_missing
86 expr: |-
87 count(up{job="node"} == 1) by (instance) * 3 unless
88 count(node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"}) by (instance)
89 for: 20m
90 labels:
91 severity: warn
92
93 - alert: epanicclean_not_active
94 expr: |-
95 node_systemd_unit_state{name="epanicclean.service",state="active"} != 1
96 for: 20m
97 labels:
98 severity: warn
99
100 - alert: epanicclean_missing
101 expr: |-
102 count(up{job=~"node|tlsnode"} == 1) by (instance) unless
103 count(node_systemd_unit_state{job=~"node|tlsnode",name="epanicclean.service",state="active"}) by (instance)
104 for: 20m
105 labels:
106 severity: warn
107
108 - alert: mysers_not_active
109 expr: |-
110 node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"} != 1
111 for: 20m
112 labels:
113 severity: warn
114
115 # todo: at some point, look into making mailtest-check either be resilient to the internet going down,
116 # or inhibit or group this alert with it going down.
117 - alert: sysd_result_fail
118 # not sure 30m is really needed, it prevents the alert from flapping
119 # i guess.
120 expr: |-
121 rate(node_systemd_unit_result_fail_count[30m]) > 0
122 labels:
123 severity: day
124
125 - alert: exim_paniclog
126 expr: |-
127 exim_paniclog > 0
128 labels:
129 severity: day
130
131 - alert: check_crypttab
132 expr: |-
133 check_crypttab > 0
134 labels:
135 severity: prod
136
137 # 17 minutes: We try to send every 5 minutes. if we reboot causing 1
138 # send to fail, thats 10 minutes between 2 sends. we test this every 5
139 # minutes, so thats 15 minutes of time we can expect for 1 failed email,
140 # and 1 failed email is expected due to reboots or other tiny issues we
141 # dont care about.
142 #
143 # cmc_wan_down etc, inhibits other alerts, but mailtest_check needs
144 # additional time to recover after an outage. We can only inhibit while
145 # an alert is actually firing, it doesnt affect the "for:"
146 # condition. So, we have those alerts that need to be delayed be
147 # conditioned on a query for that alert having not been firing in the
148 # last X minutes. However, there is a special case when prometheus
149 # itself was down, and so there was no alert. So, I test for missing
150 # of metric that gets generated for prometheus itself. If for some
151 # reason that has a problem, I could make it more conservative by
152 # checking that we booted recently instead, eg:
153 # time() - node_boot_time_seconds{instance="kdwg:9101"} <= 60 * 17
154 - alert: mailtest_check_vps
155 expr: |-
156 time() - mailtest_check_last_usec{job="tlsnode"} >= 60 * 17 unless on() mailtest_lag_inhibit
157 labels:
158 severity: day
159 annotations:
160 summary: '17 minutes down'
161
162 - alert: mailtest_check_mailhost
163 expr: |-
164 time() - max by (folder,from) (mailtest_check_last_usec{job="node"}) >= 60 * 17 unless on() mailtest_lag_inhibit
165 labels:
166 severity: day
167 annotations:
168 summary: '17 minutes down'
169
170 # 20 minutes. just allow for more due to prod alert.
171 - alert: mailtest_check_gnu_mailhost
172 expr: |-
173 time() - max by (folder,from) (mailtest_check_last_usec{folder="/m/md/l/testignore", from="iank@gnu.org"}) >= 60 * 20 unless on() mailtest_lag_inhibit
174 labels:
175 severity: prod
176 annotations:
177 summary: '20 minutes down'
178
179 - alert: mailtest_check_unexpected_spamd_vps
180 expr: |-
181 mailtest_check_unexpected_spamd_results >= 1
182 labels:
183 severity: day
184 annotations:
185 summary: 'jr -u mailtest-check -e -n 10000'
186
187 - alert: mailtest_check_missing_dnswl
188 expr: |-
189 mailtest_check_missing_dnswl >= 1
190 for: 30m
191 labels:
192 severity: day
193 annotations:
194 summary: 'jr -u mailtest-check -e -n 10000'
195
196 # We expect to be getting metrics, if we come up and notice we have
197 # any missing in the past, and it wasn't from a reboot, and we haven't
198 # fired any other alerts, make an alert. In testing, the the count is
199 # 19 for 19 minutes, but I make it 18 just to give a bit of slack.
200 - alert: historical_missing_metric
201 expr: |-
202 count_over_time(up{job="prometheus"}[19m]) <= 18 unless on() present_over_time(ALERTS[19m]) unless on() time() - node_boot_time_seconds{instance="kd"} <= 60 * 17
203 labels:
204 severity: warn
205
206 # 10 am friday. but, do it 1 minute early so it is closer to actually
207 # firing at 10 am.
208 - alert: dead_man_test
209 expr: |-
210 ( hour() == 13 and minute() >= 59 or hour() == 14 and minute() < 3 ) and day_of_week() == 5
211 for: 0m
212 labels:
213 severity: daytest
214 annotations:
215 summary: Prometheus weekly test alert
216
217
218 #### Inhibit notes ####
219 ## Example of expressions to detect if the target_down alert
220 # fired in the last 24 hours. Initially, I thought his could
221 # be an alert which inhibits up_resets, but eventually I figured
222 # that doesn't make much sense, and the idea of using an alert
223 # that is not an indication of something wrong, only inhibits another
224 # alert, I think works better to integrate directly into the
225 # alert it would inhibit, this may mean a recording rule. That avoids
226 # an alert we have to ignore or filter out.
227 #
228 # Alternate expression, to calculate if the alert would have fired is:
229 # min_over_time(sum_over_time(up[30m])[1d:]) == 0
230 # where 30m matches the for: time in target_down
231 #
232 # Note: for graphing, surround in the expression in sum_over_time()
233 # ALERTS{alertname="target_down",alertstate="firing"}[1d]
234 #### end Inhibit notes ####
235
236
237 # For targets where we alert only on long downtimes, we
238 # still want to know if it is going down many times for short times over
239 # a long period of time. But ignore reboots.
240 #
241 ## Another way would be to detect an overall downtime:
242 # avg_over_time(node_systemd_unit_state{name="dynamicipupdate.service",state="active"}[1d]) < .95
243
244 # However, this seems to just find too many false positives for now, so
245 # commenting it out.
246
247 # - alert: up_resets
248 # expr: |-
249 # resets(up[1d]) - changes(node_boot_time_seconds[1d]) > 12
250 # labels:
251 # severity: warn
252 # annotations:
253 # summary: "Target has gone down {{ $value }} times in 1 day, > 12"
254
255
256
257 # https://awesome-prometheus-alerts.grep.to/rules
258
259 # todo, we should probably group the prometheus alerts that indicate a
260 # host-local problem.
261 # eg, set a label alert-group: local-prom, then make a receiver that
262 # groups by it when the alert-group is local-prom.
263
264 - name: awesome prometheus alerts
265 rules:
266
267 - alert: PrometheusJobMissing
268 expr: absent(up{job="prometheus"})
269 for: 30m
270 labels:
271 severity: day
272 annotations:
273 summary: Prometheus job missing (instance {{ $labels.instance }})
274 description: "A Prometheus job has disappeared\n VALUE = {{ $value }}"
275
276 - alert: lowpri_target_down
277 expr: up{instance!~"kdwg:9101|bkex.b8.nz:9101|liex.b8.nz:9101|10.2.0.1:9100|kwwg:9101"} == 0
278 for: 30m
279 labels:
280 severity: warn
281 annotations:
282 summary: Target down for 30m
283
284 # note PrometheusAllTargetsMissing is intentionally omitted because it
285 # is redundant to the above.
286
287 - alert: target_down
288 expr: up{instance=~"kdwg:9101|bkex.b8.nz:9101|liex.b8.nz:9101|10.2.0.1:9100"} == 0
289 for: 5m
290 labels:
291 severity: day
292 annotations:
293 summary: High priority target down for 5m
294
295 - alert: target_down
296 expr: absent(present_over_time(mailtest_check_last_usec{folder="/m/md/l/testignore",from="ian@iankelling.org"}[5m]))
297 for: 5m
298 labels:
299 severity: day
300 annotations:
301 summary: MAIL_HOST likely down for 5m
302
303 - alert: PrometheusConfigurationReloadFailure
304 expr: prometheus_config_last_reload_successful != 1
305 for: 30m
306 labels:
307 severity: day
308 annotations:
309 description: "Prometheus configuration reload error\n VALUE = {{ $value }}"
310
311 - alert: PrometheusTooManyRestarts
312 expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[30m]) > 10
313 for: 0m
314 labels:
315 severity: warning
316 annotations:
317 description: "Prometheus has restarted more than ten times in the last 30 minutes. It might be crashlooping.\n VALUE = {{ $value }}"
318
319 - alert: PrometheusAlertmanagerJobMissing
320 expr: absent(up{job="alertmanager"})
321 for: 30m
322 labels:
323 severity: warn
324 annotations:
325 description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}"
326
327 - alert: PrometheusAlertmanagerConfigurationReloadFailure
328 expr: alertmanager_config_last_reload_successful != 1
329 for: 30m
330 labels:
331 severity: day
332 annotations:
333 description: "AlertManager configuration reload error\n VALUE = {{ $value }}"
334
335 - alert: PrometheusNotConnectedToAlertmanager
336 expr: prometheus_notifications_alertmanagers_discovered < 1
337 for: 30m
338 labels:
339 severity: day
340 annotations:
341 description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}"
342
343 - alert: PrometheusRuleEvaluationFailures
344 expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
345 for: 30m
346 labels:
347 severity: warn
348 annotations:
349 description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}"
350
351 - alert: PrometheusTemplateTextExpansionFailures
352 expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
353 for: 30m
354 labels:
355 severity: warn
356 annotations:
357 description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}"
358
359 - alert: PrometheusRuleEvaluationSlow
360 expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
361 for: 5m
362 labels:
363 severity: warn
364 annotations:
365 description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}"
366
367 - alert: PrometheusNotificationsBacklog
368 expr: min_over_time(prometheus_notifications_queue_length[30m]) > 0
369 for: 0m
370 labels:
371 severity: warn
372 annotations:
373 description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}"
374
375 - alert: PrometheusAlertmanagerNotificationFailing
376 expr: rate(alertmanager_notifications_failed_total[1m]) > 0
377 for: 30m
378 labels:
379 severity: warn
380 annotations:
381 description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}"
382
383 # file_sd doesnt count as service discovery, so 0 is expected.
384 # - alert: PrometheusTargetEmpty
385 # expr: prometheus_sd_discovered_targets == 0
386 # for: 30m
387 # labels:
388 # severity: day
389 # annotations:
390 # description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}"
391
392 - alert: PrometheusTargetScrapingSlow
393 expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 90
394 for: 30m
395 labels:
396 severity: warn
397 annotations:
398 description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}"
399
400 - alert: PrometheusLargeScrape
401 expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
402 for: 30m
403 labels:
404 severity: warn
405 annotations:
406 description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}"
407
408 - alert: PrometheusTargetScrapeDuplicate
409 expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0
410 for: 30m
411 labels:
412 severity: warn
413 annotations:
414 description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}"
415
416 - alert: PrometheusTsdbCheckpointCreationFailures
417 expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
418 for: 30m
419 labels:
420 severity: warn
421 annotations:
422 description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}"
423
424 - alert: PrometheusTsdbCheckpointDeletionFailures
425 expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
426 for: 30m
427 labels:
428 severity: warn
429 annotations:
430 description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}"
431
432 - alert: PrometheusTsdbCompactionsFailed
433 expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
434 for: 30m
435 labels:
436 severity: warn
437 annotations:
438 description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}"
439
440 - alert: PrometheusTsdbHeadTruncationsFailed
441 expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
442 for: 30m
443 labels:
444 severity: warn
445 annotations:
446 description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}"
447
448 - alert: PrometheusTsdbReloadFailures
449 expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
450 for: 30m
451 labels:
452 severity: warn
453 annotations:
454 description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}"
455
456 - alert: PrometheusTsdbWalCorruptions
457 expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0
458 for: 30m
459 labels:
460 severity: warn
461 annotations:
462 description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}"
463
464 - alert: PrometheusTsdbWalTruncationsFailed
465 expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0
466 for: 30m
467 labels:
468 severity: warn
469 annotations:
470 description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}"