+ # I have an out of band alert to make sure prometheus is up. this
+ # looks like it would generate false positives. todo: think
+ # through what a valid crash loop detection would look like.
+ # - alert: PrometheusTooManyRestarts
+ # expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 10
+ # for: 0m
+ # labels:
+ # severity: warning
+ # annotations:
+ # summary: Prometheus too many restarts (instance {{ $labels.instance }})
+ # description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusAlertmanagerJobMissing
+ expr: absent(up{job="alertmanager"})
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus AlertManager job missing (instance {{ $labels.instance }})
+ description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusAlertmanagerConfigurationReloadFailure
+ expr: alertmanager_config_last_reload_successful != 1
+ for: 30m
+ labels:
+ severity: day
+ annotations:
+ summary: Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }})
+ description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusNotConnectedToAlertmanager
+ expr: prometheus_notifications_alertmanagers_discovered < 1
+ for: 30m
+ labels:
+ severity: day
+ annotations:
+ summary: Prometheus not connected to alertmanager (instance {{ $labels.instance }})
+ description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusRuleEvaluationFailures
+ expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus rule evaluation failures (instance {{ $labels.instance }})
+ description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusTemplateTextExpansionFailures
+ expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus template text expansion failures (instance {{ $labels.instance }})
+ description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusRuleEvaluationSlow
+ expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
+ for: 5m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus rule evaluation slow (instance {{ $labels.instance }})
+ description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusNotificationsBacklog
+ expr: min_over_time(prometheus_notifications_queue_length[30m]) > 0
+ for: 0m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus notifications backlog (instance {{ $labels.instance }})
+ description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusAlertmanagerNotificationFailing
+ expr: rate(alertmanager_notifications_failed_total[1m]) > 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }})
+ description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ # file_sd doesnt count as service discovery, so 0 is expected.
+ # - alert: PrometheusTargetEmpty
+ # expr: prometheus_sd_discovered_targets == 0
+ # for: 30m
+ # labels:
+ # severity: day
+ # annotations:
+ # summary: Prometheus target empty (instance {{ $labels.instance }})
+ # description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusTargetScrapingSlow
+ expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 90
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus target scraping slow (instance {{ $labels.instance }})
+ description: "Prometheus is scraping exporters slowly\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusLargeScrape
+ expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus large scrape (instance {{ $labels.instance }})
+ description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusTargetScrapeDuplicate
+ expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus target scrape duplicate (instance {{ $labels.instance }})
+ description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusTsdbCheckpointCreationFailures
+ expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }})
+ description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusTsdbCheckpointDeletionFailures
+ expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }})
+ description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusTsdbCompactionsFailed
+ expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus TSDB compactions failed (instance {{ $labels.instance }})
+ description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusTsdbHeadTruncationsFailed
+ expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance }})
+ description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusTsdbReloadFailures
+ expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus TSDB reload failures (instance {{ $labels.instance }})
+ description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusTsdbWalCorruptions
+ expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Prometheus TSDB WAL corruptions (instance {{ $labels.instance }})
+ description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
+
+ - alert: PrometheusTsdbWalTruncationsFailed
+ expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0
+ for: 30m