1 # other rules to consider:
2 # filesystem, network, ntp rules:
3 # https://github.com/cloudalchemy/ansible-prometheus defaults/main.yml
4 # on my system, the interpolated values are in /a/opt/ansible-prometheus/rules.yml
11 - alert: mailtest-check
13 time() - mailtest_check_last_usec > 60 * 12
17 description: '{{ $labels.instance }} mailtest-check'
18 summary: '{{ $labels.instance }} mailtest-check'
20 # 42 mins: enough for a 30 min queue run plus 12
21 - alert: mailtest-check
23 time() - mailtest_check_last_usec > 60 * 42
27 description: '{{ $labels.instance }} mailtest-check'
28 summary: '{{ $labels.instance }} mailtest-check'
31 expr: hour() == 18 and minute() < 5
36 summary: Prometheus daily test alert (instance {{ $labels.instance }})
37 description: "Prometheus daily test alert if no other alerts. It
38 is an end to end test.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
41 # https://awesome-prometheus-alerts.grep.to/rules
44 # todo, we should probably group the prometheus alerts that indicate a
46 # eg, set a label alert-group: local-prom, then make a receiver that
47 # groups by it when the alert-group is local-prom.
49 - name: awesome prometheus alerts
52 - alert: PrometheusJobMissing
53 expr: absent(up{job="prometheus"})
58 summary: Prometheus job missing (instance {{ $labels.instance }})
59 description: "A Prometheus job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
61 - alert: PrometheusTargetMissing
67 summary: Prometheus target missing (instance {{ $labels.instance }})
68 description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
70 # todo: this should supress the above alert
71 # - alert: PrometheusAllTargetsMissing
72 # expr: count by (job) (up) == 0
76 # alert-group: local-prom
78 # summary: Prometheus all targets missing (instance {{ $labels.instance }})
79 # description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
81 - alert: PrometheusConfigurationReloadFailure
82 expr: prometheus_config_last_reload_successful != 1
87 summary: Prometheus configuration reload failure (instance {{ $labels.instance }})
88 description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
90 # I have an out of band alert to make sure prometheus is up. this
91 # looks like it would generate false positives. todo: think
92 # through what a valid crash loop detection would look like.
93 # - alert: PrometheusTooManyRestarts
94 # expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 10
99 # summary: Prometheus too many restarts (instance {{ $labels.instance }})
100 # description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
102 - alert: PrometheusAlertmanagerJobMissing
103 expr: absent(up{job="alertmanager"})
108 summary: Prometheus AlertManager job missing (instance {{ $labels.instance }})
109 description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
111 - alert: PrometheusAlertmanagerConfigurationReloadFailure
112 expr: alertmanager_config_last_reload_successful != 1
117 summary: Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }})
118 description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
120 - alert: PrometheusNotConnectedToAlertmanager
121 expr: prometheus_notifications_alertmanagers_discovered < 1
126 summary: Prometheus not connected to alertmanager (instance {{ $labels.instance }})
127 description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
129 - alert: PrometheusRuleEvaluationFailures
130 expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
135 summary: Prometheus rule evaluation failures (instance {{ $labels.instance }})
136 description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
138 - alert: PrometheusTemplateTextExpansionFailures
139 expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
144 summary: Prometheus template text expansion failures (instance {{ $labels.instance }})
145 description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
147 - alert: PrometheusRuleEvaluationSlow
148 expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
153 summary: Prometheus rule evaluation slow (instance {{ $labels.instance }})
154 description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
156 - alert: PrometheusNotificationsBacklog
157 expr: min_over_time(prometheus_notifications_queue_length[30m]) > 0
162 summary: Prometheus notifications backlog (instance {{ $labels.instance }})
163 description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
165 - alert: PrometheusAlertmanagerNotificationFailing
166 expr: rate(alertmanager_notifications_failed_total[1m]) > 0
171 summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }})
172 description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
174 # file_sd doesnt count as service discovery, so 0 is expected.
175 # - alert: PrometheusTargetEmpty
176 # expr: prometheus_sd_discovered_targets == 0
181 # summary: Prometheus target empty (instance {{ $labels.instance }})
182 # description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
184 - alert: PrometheusTargetScrapingSlow
185 expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 90
190 summary: Prometheus target scraping slow (instance {{ $labels.instance }})
191 description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
193 - alert: PrometheusLargeScrape
194 expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
199 summary: Prometheus large scrape (instance {{ $labels.instance }})
200 description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
202 - alert: PrometheusTargetScrapeDuplicate
203 expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0
208 summary: Prometheus target scrape duplicate (instance {{ $labels.instance }})
209 description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
211 - alert: PrometheusTsdbCheckpointCreationFailures
212 expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
217 summary: Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }})
218 description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
220 - alert: PrometheusTsdbCheckpointDeletionFailures
221 expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
226 summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }})
227 description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
229 - alert: PrometheusTsdbCompactionsFailed
230 expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
235 summary: Prometheus TSDB compactions failed (instance {{ $labels.instance }})
236 description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
238 - alert: PrometheusTsdbHeadTruncationsFailed
239 expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
244 summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance }})
245 description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
247 - alert: PrometheusTsdbReloadFailures
248 expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
253 summary: Prometheus TSDB reload failures (instance {{ $labels.instance }})
254 description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
256 - alert: PrometheusTsdbWalCorruptions
257 expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0
262 summary: Prometheus TSDB WAL corruptions (instance {{ $labels.instance }})
263 description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
265 - alert: PrometheusTsdbWalTruncationsFailed
266 expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0
271 summary: Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }})
272 description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"