groups:
- name: standard
rules:
- - alert: mailtest-check
+
+## uncomment for testing an alert firing
+# - alert: test-alert4
+# expr: vector(1)
+# # expr: nonexistent_metric
+# for: 0m
+# labels:
+# severity: day
+# annotations:
+# description: "always-firing alert VALUE = {{ $value }}"
+
+
+
+###### BEGIN MISC NOTES ######
+
+#
+# other interesting exporters
+# https://github.com/prometheus-community/node-exporter-textfile-collector-scripts
+#
+
+# interesting post: https://www.metricfire.com/blog/top-5-prometheus-alertmanager-gotchas/
+
+# interesting promql query that could be useful later.
+# changes(ALERTS_FOR_STATE[24h])
+#
+#
+#
+# alert flap strategy.
+# https://roidelapluie.be/blog/2019/02/21/prometheus-last/
+#
+# Another idea generally is to make an alert that fires for 24 hours and
+# inhibits another alert for the same thing, which we want at most
+# 1 alert per 24 hours.
+
+###### END MISC NOTES ######
+
+
+
+
+# alerting on missing metrics:
+# https://www.robustperception.io/absent-alerting-for-scraped-metrics
+# that doesnt work if we want to alert across multiple hosts, eg
+# up{job="node"} == 1 unless node_systemd_unit_state{name="systemstatus.service",state="active",job="node"}
+# however, google lead me to a solution here
+# https://www.linkedin.com/pulse/prometheus-alert-missing-metrics-labels-nirav-shah
+# there is also the absent() function, but i didnt see a way to make that work
+ - alert: mysers_units_missing
+ expr: |-
+ count(up{job="node"}) by (instance) * 3 unless count(node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"}) by (instance)
+ for: 20m
+ labels:
+ severity: warn
+
+ - alert: mysers_not_active
+ expr: |-
+ node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"} != 1
+ for: 20m
+ labels:
+ severity: warn
+
+ - alert: sysd_result_fail
+ expr: |-
+ rate(node_systemd_unit_result_fail_count[30m]) > 0
+ labels:
+ severity: day
+
+
+ - alert: mailtest_check
expr: |-
time() - mailtest_check_last_usec > 60 * 12
labels:
severity: day
annotations:
- description: '{{ $labels.instance }} mailtest-check'
- summary: '{{ $labels.instance }} mailtest-check'
+ summary: '12 minutes down'
# 42 mins: enough for a 30 min queue run plus 12
- - alert: mailtest-check
+ - alert: mailtest_check
expr: |-
time() - mailtest_check_last_usec > 60 * 42
labels:
severity: prod
annotations:
- description: '{{ $labels.instance }} mailtest-check'
- summary: '{{ $labels.instance }} mailtest-check'
+ summary: '43 minutes down'
- alert: 1pmtest
- expr: hour() == 18 and minute() < 5
+ expr: hour() == 17 and minute() < 5
for: 0m
labels:
severity: daytest
annotations:
- summary: Prometheus daily test alert (instance {{ $labels.instance }})
- description: "Prometheus daily test alert if no other alerts. It
- is an end to end test.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ summary: Prometheus daily test alert
+
+
+
+# alternate expression, to calculate if the alert would have fired is:
+# min_over_time(sum_over_time(up[30m])[1d:]) == 0
+# where 30m matches the for: time in target_down
+#
+# sum_over_time is not needed, just convenience for graphing
+ - alert: target_down_inhibitor
+ expr: |-
+ sum_over_time(ALERTS{alertname="target_down"}[1d])
+ labels:
+ severity: ignore
+ annotations:
+ summary: alert that indicates target_down alert fired in the last day
+ description: "VALUE = {{ $value }}"
+
+# For targets where we alert except for longer downtimes, we
+# still want to know if it is going down many times for short times over
+# a long period of time. But ignore reboots.
+#
+## Another way would be to detect an overall downtime:
+# avg_over_time(node_systemd_unit_state{name="dynamicipupdate.service",state="active"}[1d]) < .95
+ - alert: up_resets
+ expr: |-
+ resets(up[3d]) - changes(node_boot_time_seconds[3d]) > 15
+ labels:
+ severity: warn
+ annotations:
+ summary: "Target has gone down {{ $value }} times in 3 days, > 15"
+
+
# https://awesome-prometheus-alerts.grep.to/rules
severity: day
annotations:
summary: Prometheus job missing (instance {{ $labels.instance }})
- description: "A Prometheus job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "A Prometheus job has disappeared\n VALUE = {{ $value }}"
- - alert: PrometheusTargetMissing
+# TODO: some hosts, notably li and MAIL_HOST, we want to alert sooner than 30m,
+# and severity to day. mail host is tricky since it roams, but I think the
+# right way to do it is to check for absence of this metric:
+# mailtest_check_last_usec{folder="/m/md/l/testignore",from="ian@iankelling.org"}
+ - alert: target_down
expr: up == 0
for: 30m
labels:
severity: warn
annotations:
- summary: Prometheus target missing (instance {{ $labels.instance }})
- description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ summary: Target down for 30m
- # todo: this should supress the above alert
- # - alert: PrometheusAllTargetsMissing
- # expr: count by (job) (up) == 0
- # for: 30m
- # labels:
- # severity: day
- # alert-group: local-prom
- # annotations:
- # summary: Prometheus all targets missing (instance {{ $labels.instance }})
- # description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ # todo: this should group with the above alert
+ - alert: PrometheusAllTargetsMissing
+ expr: count by (job) (up) == 0
+ for: 10m
+ labels:
+ severity: day
+# alert-group: local-prom
+ annotations:
+ description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}"
- alert: PrometheusConfigurationReloadFailure
expr: prometheus_config_last_reload_successful != 1
labels:
severity: day
annotations:
- summary: Prometheus configuration reload failure (instance {{ $labels.instance }})
- description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
-
- # I have an out of band alert to make sure prometheus is up. this
- # looks like it would generate false positives. todo: think
- # through what a valid crash loop detection would look like.
- # - alert: PrometheusTooManyRestarts
- # expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 10
- # for: 0m
- # labels:
- # severity: warning
- # annotations:
- # summary: Prometheus too many restarts (instance {{ $labels.instance }})
- # description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus configuration reload error\n VALUE = {{ $value }}"
+
+ - alert: PrometheusTooManyRestarts
+ expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[30m]) > 10
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ description: "Prometheus has restarted more than ten times in the last 30 minutes. It might be crashlooping.\n VALUE = {{ $value }}"
- alert: PrometheusAlertmanagerJobMissing
expr: absent(up{job="alertmanager"})
labels:
severity: warn
annotations:
- summary: Prometheus AlertManager job missing (instance {{ $labels.instance }})
- description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}"
- alert: PrometheusAlertmanagerConfigurationReloadFailure
expr: alertmanager_config_last_reload_successful != 1
labels:
severity: day
annotations:
- summary: Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }})
- description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "AlertManager configuration reload error\n VALUE = {{ $value }}"
- alert: PrometheusNotConnectedToAlertmanager
expr: prometheus_notifications_alertmanagers_discovered < 1
labels:
severity: day
annotations:
- summary: Prometheus not connected to alertmanager (instance {{ $labels.instance }})
- description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}"
- alert: PrometheusRuleEvaluationFailures
expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
labels:
severity: warn
annotations:
- summary: Prometheus rule evaluation failures (instance {{ $labels.instance }})
- description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}"
- alert: PrometheusTemplateTextExpansionFailures
expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
labels:
severity: warn
annotations:
- summary: Prometheus template text expansion failures (instance {{ $labels.instance }})
- description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}"
- alert: PrometheusRuleEvaluationSlow
expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
labels:
severity: warn
annotations:
- summary: Prometheus rule evaluation slow (instance {{ $labels.instance }})
- description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}"
- alert: PrometheusNotificationsBacklog
expr: min_over_time(prometheus_notifications_queue_length[30m]) > 0
labels:
severity: warn
annotations:
- summary: Prometheus notifications backlog (instance {{ $labels.instance }})
- description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}"
- alert: PrometheusAlertmanagerNotificationFailing
expr: rate(alertmanager_notifications_failed_total[1m]) > 0
labels:
severity: warn
annotations:
- summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }})
- description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}"
# file_sd doesnt count as service discovery, so 0 is expected.
# - alert: PrometheusTargetEmpty
# labels:
# severity: day
# annotations:
- # summary: Prometheus target empty (instance {{ $labels.instance }})
- # description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ # description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}"
- alert: PrometheusTargetScrapingSlow
expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 90
labels:
severity: warn
annotations:
- summary: Prometheus target scraping slow (instance {{ $labels.instance }})
- description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}"
- alert: PrometheusLargeScrape
expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
labels:
severity: warn
annotations:
- summary: Prometheus large scrape (instance {{ $labels.instance }})
- description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}"
- alert: PrometheusTargetScrapeDuplicate
expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0
labels:
severity: warn
annotations:
- summary: Prometheus target scrape duplicate (instance {{ $labels.instance }})
- description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}"
- alert: PrometheusTsdbCheckpointCreationFailures
expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
labels:
severity: warn
annotations:
- summary: Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }})
- description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}"
- alert: PrometheusTsdbCheckpointDeletionFailures
expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
labels:
severity: warn
annotations:
- summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }})
- description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}"
- alert: PrometheusTsdbCompactionsFailed
expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
labels:
severity: warn
annotations:
- summary: Prometheus TSDB compactions failed (instance {{ $labels.instance }})
- description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}"
- alert: PrometheusTsdbHeadTruncationsFailed
expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
labels:
severity: warn
annotations:
- summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance }})
- description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}"
- alert: PrometheusTsdbReloadFailures
expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
labels:
severity: warn
annotations:
- summary: Prometheus TSDB reload failures (instance {{ $labels.instance }})
- description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}"
- alert: PrometheusTsdbWalCorruptions
expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0
labels:
severity: warn
annotations:
- summary: Prometheus TSDB WAL corruptions (instance {{ $labels.instance }})
- description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}"
- alert: PrometheusTsdbWalTruncationsFailed
expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0
labels:
severity: warn
annotations:
- summary: Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }})
- description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+ description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}"