various fixes
[distro-setup] / filesystem / etc / prometheus / rules / iank.yml
1 # other rules to consider:
2 # filesystem, network, ntp rules:
3 # https://github.com/cloudalchemy/ansible-prometheus defaults/main.yml
4 # on my system, the interpolated values are in /a/opt/ansible-prometheus/rules.yml
5 #
6
7
8 groups:
9 - name: standard
10 rules:
11 - alert: mailtest-check
12 expr: |-
13 time() - mailtest_check_last_usec > 60 * 12
14 labels:
15 severity: day
16 annotations:
17 description: '{{ $labels.instance }} mailtest-check'
18 summary: '{{ $labels.instance }} mailtest-check'
19
20 # 42 mins: enough for a 30 min queue run plus 12
21 - alert: mailtest-check
22 expr: |-
23 time() - mailtest_check_last_usec > 60 * 42
24 labels:
25 severity: prod
26 annotations:
27 description: '{{ $labels.instance }} mailtest-check'
28 summary: '{{ $labels.instance }} mailtest-check'
29
30 - alert: 1pmtest
31 expr: hour() == 18 and minute() < 5
32 for: 0m
33 labels:
34 severity: daytest
35 annotations:
36 summary: Prometheus daily test alert (instance {{ $labels.instance }})
37 description: "Prometheus daily test alert if no other alerts. It
38 is an end to end test.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
39
40
41 # https://awesome-prometheus-alerts.grep.to/rules
42
43
44 # todo, we should probably group the prometheus alerts that indicate a
45 # host-local problem.
46 # eg, set a label alert-group: local-prom, then make a receiver that
47 # groups by it when the alert-group is local-prom.
48
49 - name: awesome prometheus alerts
50 rules:
51
52 - alert: PrometheusJobMissing
53 expr: absent(up{job="prometheus"})
54 for: 30m
55 labels:
56 severity: day
57 annotations:
58 summary: Prometheus job missing (instance {{ $labels.instance }})
59 description: "A Prometheus job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
60
61 - alert: PrometheusTargetMissing
62 expr: up == 0
63 for: 30m
64 labels:
65 severity: warn
66 annotations:
67 summary: Prometheus target missing (instance {{ $labels.instance }})
68 description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
69
70 # todo: this should supress the above alert
71 # - alert: PrometheusAllTargetsMissing
72 # expr: count by (job) (up) == 0
73 # for: 30m
74 # labels:
75 # severity: day
76 # alert-group: local-prom
77 # annotations:
78 # summary: Prometheus all targets missing (instance {{ $labels.instance }})
79 # description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
80
81 - alert: PrometheusConfigurationReloadFailure
82 expr: prometheus_config_last_reload_successful != 1
83 for: 30m
84 labels:
85 severity: day
86 annotations:
87 summary: Prometheus configuration reload failure (instance {{ $labels.instance }})
88 description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
89
90 # I have an out of band alert to make sure prometheus is up. this
91 # looks like it would generate false positives. todo: think
92 # through what a valid crash loop detection would look like.
93 # - alert: PrometheusTooManyRestarts
94 # expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 10
95 # for: 0m
96 # labels:
97 # severity: warning
98 # annotations:
99 # summary: Prometheus too many restarts (instance {{ $labels.instance }})
100 # description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
101
102 - alert: PrometheusAlertmanagerJobMissing
103 expr: absent(up{job="alertmanager"})
104 for: 30m
105 labels:
106 severity: warn
107 annotations:
108 summary: Prometheus AlertManager job missing (instance {{ $labels.instance }})
109 description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
110
111 - alert: PrometheusAlertmanagerConfigurationReloadFailure
112 expr: alertmanager_config_last_reload_successful != 1
113 for: 30m
114 labels:
115 severity: day
116 annotations:
117 summary: Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }})
118 description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
119
120 - alert: PrometheusNotConnectedToAlertmanager
121 expr: prometheus_notifications_alertmanagers_discovered < 1
122 for: 30m
123 labels:
124 severity: day
125 annotations:
126 summary: Prometheus not connected to alertmanager (instance {{ $labels.instance }})
127 description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
128
129 - alert: PrometheusRuleEvaluationFailures
130 expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
131 for: 30m
132 labels:
133 severity: warn
134 annotations:
135 summary: Prometheus rule evaluation failures (instance {{ $labels.instance }})
136 description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
137
138 - alert: PrometheusTemplateTextExpansionFailures
139 expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
140 for: 30m
141 labels:
142 severity: warn
143 annotations:
144 summary: Prometheus template text expansion failures (instance {{ $labels.instance }})
145 description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
146
147 - alert: PrometheusRuleEvaluationSlow
148 expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
149 for: 5m
150 labels:
151 severity: warn
152 annotations:
153 summary: Prometheus rule evaluation slow (instance {{ $labels.instance }})
154 description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
155
156 - alert: PrometheusNotificationsBacklog
157 expr: min_over_time(prometheus_notifications_queue_length[30m]) > 0
158 for: 0m
159 labels:
160 severity: warn
161 annotations:
162 summary: Prometheus notifications backlog (instance {{ $labels.instance }})
163 description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
164
165 - alert: PrometheusAlertmanagerNotificationFailing
166 expr: rate(alertmanager_notifications_failed_total[1m]) > 0
167 for: 30m
168 labels:
169 severity: warn
170 annotations:
171 summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }})
172 description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
173
174 # file_sd doesnt count as service discovery, so 0 is expected.
175 # - alert: PrometheusTargetEmpty
176 # expr: prometheus_sd_discovered_targets == 0
177 # for: 30m
178 # labels:
179 # severity: day
180 # annotations:
181 # summary: Prometheus target empty (instance {{ $labels.instance }})
182 # description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
183
184 - alert: PrometheusTargetScrapingSlow
185 expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 90
186 for: 30m
187 labels:
188 severity: warn
189 annotations:
190 summary: Prometheus target scraping slow (instance {{ $labels.instance }})
191 description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
192
193 - alert: PrometheusLargeScrape
194 expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
195 for: 30m
196 labels:
197 severity: warn
198 annotations:
199 summary: Prometheus large scrape (instance {{ $labels.instance }})
200 description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
201
202 - alert: PrometheusTargetScrapeDuplicate
203 expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0
204 for: 30m
205 labels:
206 severity: warn
207 annotations:
208 summary: Prometheus target scrape duplicate (instance {{ $labels.instance }})
209 description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
210
211 - alert: PrometheusTsdbCheckpointCreationFailures
212 expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
213 for: 30m
214 labels:
215 severity: warn
216 annotations:
217 summary: Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }})
218 description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
219
220 - alert: PrometheusTsdbCheckpointDeletionFailures
221 expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
222 for: 30m
223 labels:
224 severity: warn
225 annotations:
226 summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }})
227 description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
228
229 - alert: PrometheusTsdbCompactionsFailed
230 expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
231 for: 30m
232 labels:
233 severity: warn
234 annotations:
235 summary: Prometheus TSDB compactions failed (instance {{ $labels.instance }})
236 description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
237
238 - alert: PrometheusTsdbHeadTruncationsFailed
239 expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
240 for: 30m
241 labels:
242 severity: warn
243 annotations:
244 summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance }})
245 description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
246
247 - alert: PrometheusTsdbReloadFailures
248 expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
249 for: 30m
250 labels:
251 severity: warn
252 annotations:
253 summary: Prometheus TSDB reload failures (instance {{ $labels.instance }})
254 description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
255
256 - alert: PrometheusTsdbWalCorruptions
257 expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0
258 for: 30m
259 labels:
260 severity: warn
261 annotations:
262 summary: Prometheus TSDB WAL corruptions (instance {{ $labels.instance }})
263 description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
264
265 - alert: PrometheusTsdbWalTruncationsFailed
266 expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0
267 for: 30m
268 labels:
269 severity: warn
270 annotations:
271 summary: Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }})
272 description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"