1 # other rules to consider:
2 # filesystem, network, ntp rules:
3 # https://github.com/cloudalchemy/ansible-prometheus defaults/main.yml
4 # on my system, the interpolated values are in /a/opt/ansible-prometheus/rules.yml
12 ## uncomment for testing an alert firing
13 # - alert: test-alert4
15 # # expr: nonexistent_metric
20 # description: "always-firing alert VALUE = {{ $value }}"
24 ###### BEGIN MISC NOTES ######
27 # other interesting exporters
28 # https://github.com/prometheus-community/node-exporter-textfile-collector-scripts
31 # interesting post: https://www.metricfire.com/blog/top-5-prometheus-alertmanager-gotchas/
33 # interesting promql query that could be useful later.
34 # changes(ALERTS_FOR_STATE[24h])
38 # alert flap strategy.
39 # https://roidelapluie.be/blog/2019/02/21/prometheus-last/
41 # Another idea generally is to make an alert that fires for 24 hours and
42 # inhibits another alert for the same thing, which we want at most
43 # 1 alert per 24 hours.
45 ###### END MISC NOTES ######
50 # alerting on missing metrics:
51 # https://www.robustperception.io/absent-alerting-for-scraped-metrics
52 # that doesnt work if we want to alert across multiple hosts, eg
53 # up{job="node"} == 1 unless node_systemd_unit_state{name="systemstatus.service",state="active",job="node"}
54 # however, google lead me to a solution here
55 # https://www.linkedin.com/pulse/prometheus-alert-missing-metrics-labels-nirav-shah
56 # there is also the absent() function, but i didnt see a way to make that work
57 - alert: mysers_units_missing
59 count(up{job="node"}) by (instance) * 3 unless count(node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"}) by (instance)
64 - alert: mysers_not_active
66 node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"} != 1
71 - alert: sysd_result_fail
73 rate(node_systemd_unit_result_fail_count[30m]) > 0
78 - alert: mailtest_check
80 time() - mailtest_check_last_usec > 60 * 12
84 summary: '12 minutes down'
86 # 42 mins: enough for a 30 min queue run plus 12
87 - alert: mailtest_check
89 time() - mailtest_check_last_usec > 60 * 42
93 summary: '43 minutes down'
96 expr: hour() == 17 and minute() < 5
101 summary: Prometheus daily test alert
105 # alternate expression, to calculate if the alert would have fired is:
106 # min_over_time(sum_over_time(up[30m])[1d:]) == 0
107 # where 30m matches the for: time in target_down
109 # sum_over_time is not needed, just convenience for graphing
110 - alert: target_down_inhibitor
112 sum_over_time(ALERTS{alertname="target_down"}[1d])
116 summary: alert that indicates target_down alert fired in the last day
117 description: "VALUE = {{ $value }}"
119 # For targets where we alert except for longer downtimes, we
120 # still want to know if it is going down many times for short times over
121 # a long period of time. But ignore reboots.
123 ## Another way would be to detect an overall downtime:
124 # avg_over_time(node_systemd_unit_state{name="dynamicipupdate.service",state="active"}[1d]) < .95
127 resets(up[3d]) - changes(node_boot_time_seconds[3d]) > 15
131 summary: "Target has gone down {{ $value }} times in 3 days, > 15"
136 # https://awesome-prometheus-alerts.grep.to/rules
139 # todo, we should probably group the prometheus alerts that indicate a
140 # host-local problem.
141 # eg, set a label alert-group: local-prom, then make a receiver that
142 # groups by it when the alert-group is local-prom.
144 - name: awesome prometheus alerts
147 - alert: PrometheusJobMissing
148 expr: absent(up{job="prometheus"})
153 summary: Prometheus job missing (instance {{ $labels.instance }})
154 description: "A Prometheus job has disappeared\n VALUE = {{ $value }}"
156 # TODO: some hosts, notably li and MAIL_HOST, we want to alert sooner than 30m,
157 # and severity to day. mail host is tricky since it roams, but I think the
158 # right way to do it is to check for absence of this metric:
159 # mailtest_check_last_usec{folder="/m/md/l/testignore",from="ian@iankelling.org"}
166 summary: Target down for 30m
169 # todo: this should group with the above alert
170 - alert: PrometheusAllTargetsMissing
171 expr: count by (job) (up) == 0
175 # alert-group: local-prom
177 description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}"
179 - alert: PrometheusConfigurationReloadFailure
180 expr: prometheus_config_last_reload_successful != 1
185 description: "Prometheus configuration reload error\n VALUE = {{ $value }}"
187 - alert: PrometheusTooManyRestarts
188 expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[30m]) > 10
193 description: "Prometheus has restarted more than ten times in the last 30 minutes. It might be crashlooping.\n VALUE = {{ $value }}"
195 - alert: PrometheusAlertmanagerJobMissing
196 expr: absent(up{job="alertmanager"})
201 description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}"
203 - alert: PrometheusAlertmanagerConfigurationReloadFailure
204 expr: alertmanager_config_last_reload_successful != 1
209 description: "AlertManager configuration reload error\n VALUE = {{ $value }}"
211 - alert: PrometheusNotConnectedToAlertmanager
212 expr: prometheus_notifications_alertmanagers_discovered < 1
217 description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}"
219 - alert: PrometheusRuleEvaluationFailures
220 expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
225 description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}"
227 - alert: PrometheusTemplateTextExpansionFailures
228 expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
233 description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}"
235 - alert: PrometheusRuleEvaluationSlow
236 expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
241 description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}"
243 - alert: PrometheusNotificationsBacklog
244 expr: min_over_time(prometheus_notifications_queue_length[30m]) > 0
249 description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}"
251 - alert: PrometheusAlertmanagerNotificationFailing
252 expr: rate(alertmanager_notifications_failed_total[1m]) > 0
257 description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}"
259 # file_sd doesnt count as service discovery, so 0 is expected.
260 # - alert: PrometheusTargetEmpty
261 # expr: prometheus_sd_discovered_targets == 0
266 # description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}"
268 - alert: PrometheusTargetScrapingSlow
269 expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 90
274 description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}"
276 - alert: PrometheusLargeScrape
277 expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
282 description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}"
284 - alert: PrometheusTargetScrapeDuplicate
285 expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0
290 description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}"
292 - alert: PrometheusTsdbCheckpointCreationFailures
293 expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
298 description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}"
300 - alert: PrometheusTsdbCheckpointDeletionFailures
301 expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
306 description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}"
308 - alert: PrometheusTsdbCompactionsFailed
309 expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
314 description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}"
316 - alert: PrometheusTsdbHeadTruncationsFailed
317 expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
322 description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}"
324 - alert: PrometheusTsdbReloadFailures
325 expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
330 description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}"
332 - alert: PrometheusTsdbWalCorruptions
333 expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0
338 description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}"
340 - alert: PrometheusTsdbWalTruncationsFailed
341 expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0
346 description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}"