- # - alert: NodeFilesystemAlmostOutOfSpace
- # annotations:
- # description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has
- # only {{ printf "%.2f" $value }}% available space left.
- # summary: Filesystem has less than 5% space left.
- # expr: |-
- # (
- # node_filesystem_avail_bytes{job="node",fstype!=""} / node_filesystem_size_bytes{job="node",fstype!=""} * 100 < 5
- # and
- # node_filesystem_readonly{job="node",fstype!=""} == 0
- # )
- # for: 1h
- # labels:
- # severity: warning
- # - alert: NodeFilesystemAlmostOutOfSpace
- # annotations:
- # description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has
- # only {{ printf "%.2f" $value }}% available space left.
- # summary: Filesystem has less than 3% space left.
- # expr: |-
- # (
- # node_filesystem_avail_bytes{job="node",fstype!=""} / node_filesystem_size_bytes{job="node",fstype!=""} * 100 < 3
- # and
- # node_filesystem_readonly{job="node",fstype!=""} == 0
- # )
- # for: 1h
- # labels:
- # severity: critical
- # - alert: NodeFilesystemFilesFillingUp
- # annotations:
- # description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has
- # only {{ printf "%.2f" $value }}% available inodes left and is filling up.
- # summary: Filesystem is predicted to run out of inodes within the next 24 hours.
- # expr: |-
- # (
- # node_filesystem_files_free{job="node",fstype!=""} / node_filesystem_files{job="node",fstype!=""} * 100 < 40
- # and
- # predict_linear(node_filesystem_files_free{job="node",fstype!=""}[6h], 24*60*60) < 0
- # and
- # node_filesystem_readonly{job="node",fstype!=""} == 0
- # )
- # for: 1h
- # labels:
- # severity: warning
- # - alert: NodeFilesystemFilesFillingUp
- # annotations:
- # description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has
- # only {{ printf "%.2f" $value }}% available inodes left and is filling up fast.
- # summary: Filesystem is predicted to run out of inodes within the next 4 hours.
- # expr: |-
- # (
- # node_filesystem_files_free{job="node",fstype!=""} / node_filesystem_files{job="node",fstype!=""} * 100 < 20
- # and
- # predict_linear(node_filesystem_files_free{job="node",fstype!=""}[6h], 4*60*60) < 0
- # and
- # node_filesystem_readonly{job="node",fstype!=""} == 0
- # )
- # for: 1h
- # labels:
- # severity: critical
- # - alert: NodeFilesystemAlmostOutOfFiles
- # annotations:
- # description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has
- # only {{ printf "%.2f" $value }}% available inodes left.
- # summary: Filesystem has less than 5% inodes left.
- # expr: |-
- # (
- # node_filesystem_files_free{job="node",fstype!=""} / node_filesystem_files{job="node",fstype!=""} * 100 < 5
- # and
- # node_filesystem_readonly{job="node",fstype!=""} == 0
- # )
- # for: 1h
- # labels:
- # severity: warning
- # - alert: NodeFilesystemAlmostOutOfFiles
- # annotations:
- # description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has
- # only {{ printf "%.2f" $value }}% available inodes left.
- # summary: Filesystem has less than 3% inodes left.
- # expr: |-
- # (
- # node_filesystem_files_free{job="node",fstype!=""} / node_filesystem_files{job="node",fstype!=""} * 100 < 3
- # and
- # node_filesystem_readonly{job="node",fstype!=""} == 0
- # )
- # for: 1h
- # labels:
- # severity: critical
- # - alert: NodeNetworkReceiveErrs
- # annotations:
- # description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered
- # {{ printf "%.0f" $value }} receive errors in the last two minutes.'
- # summary: Network interface is reporting many receive errors.
- # expr: |-
- # increase(node_network_receive_errs_total[2m]) > 10
- # for: 1h
- # labels:
- # severity: warning
- # - alert: NodeNetworkTransmitErrs
- # annotations:
- # description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered
- # {{ printf "%.0f" $value }} transmit errors in the last two minutes.'
- # summary: Network interface is reporting many transmit errors.
- # expr: |-
- # increase(node_network_transmit_errs_total[2m]) > 10
- # for: 1h
- # labels:
- # severity: warning
- # - alert: NodeHighNumberConntrackEntriesUsed
- # annotations:
- # description: '{{ $value | humanizePercentage }} of conntrack entries are used'
- # summary: Number of conntrack are getting close to the limit
- # expr: |-
- # (node_nf_conntrack_entries / node_nf_conntrack_entries_limit) > 0.75
- # labels:
- # severity: warning
- # - alert: NodeClockSkewDetected
- # annotations:
- # message: Clock on {{ $labels.instance }} is out of sync by more than 300s. Ensure
- # NTP is configured correctly on this host.
- # summary: Clock skew detected.
- # expr: |-
- # (
- # node_timex_offset_seconds > 0.05
- # and
- # deriv(node_timex_offset_seconds[5m]) >= 0
- # )
- # or
- # (
- # node_timex_offset_seconds < -0.05
- # and
- # deriv(node_timex_offset_seconds[5m]) <= 0
- # )
- # for: 10m
- # labels:
- # severity: warning
- # - alert: NodeClockNotSynchronising
- # annotations:
- # message: Clock on {{ $labels.instance }} is not synchronising. Ensure NTP is configured
- # on this host.
- # summary: Clock not synchronising.
- # expr: |-
- # min_over_time(node_timex_sync_status[5m]) == 0
- # for: 10m
- # labels:
- # severity: warning
- # - alert: ianktest
- # expr: node_systemd_version >= 300
+
+ - alert: 1pmtest
+ expr: hour() == 17 and minute() < 5
+ for: 0m
+ labels:
+ severity: daytest
+ annotations:
+ summary: Prometheus daily test alert
+
+
+#### Inhibit notes ####
+## Example of expressions to detect if the target_down alert
+# fired in the last 24 hours. Initially, I thought his could
+# be an alert which inhibits up_resets, but eventually I figured
+# that doesn't make much sense, and the idea of using an alert
+# that is not an indication of something wrong, only inhibits another
+# alert, I think works better to integrate directly into the
+# alert it would inhibit, this may mean a recording rule. That avoids
+# an alert we have to ignore or filter out.
+#
+# Alternate expression, to calculate if the alert would have fired is:
+# min_over_time(sum_over_time(up[30m])[1d:]) == 0
+# where 30m matches the for: time in target_down
+#
+# Note: for graphing, surround in the expression in sum_over_time()
+# ALERTS{alertname="target_down",alertstate="firing"}[1d]
+#### end Inhibit notes ####
+
+
+# For targets where we alert only on long downtimes, we
+# still want to know if it is going down many times for short times over
+# a long period of time. But ignore reboots.
+#
+## Another way would be to detect an overall downtime:
+# avg_over_time(node_systemd_unit_state{name="dynamicipupdate.service",state="active"}[1d]) < .95
+ - alert: up_resets
+ expr: |-
+ resets(up[2d]) - changes(node_boot_time_seconds[2d]) > 12
+ labels:
+ severity: warn
+ annotations:
+ summary: "Target has gone down {{ $value }} times in 2 days, > 12"
+
+
+
+# https://awesome-prometheus-alerts.grep.to/rules
+
+# todo, we should probably group the prometheus alerts that indicate a
+# host-local problem.
+# eg, set a label alert-group: local-prom, then make a receiver that
+# groups by it when the alert-group is local-prom.
+
+- name: awesome prometheus alerts
+ rules:
+
+ - alert: PrometheusJobMissing
+ expr: absent(up{job="prometheus"})
+ for: 30m
+ labels:
+ severity: day
+ annotations:
+ summary: Prometheus job missing (instance {{ $labels.instance }})
+ description: "A Prometheus job has disappeared\n VALUE = {{ $value }}"
+
+# TODO: some hosts, notably li and MAIL_HOST, we want to alert sooner than 30m,
+# and severity to day. mail host is tricky since it roams, but I think the
+# right way to do it is to check for absence of this metric:
+# mailtest_check_last_usec{folder="/m/md/l/testignore",from="ian@iankelling.org"}
+ - alert: target_down
+ expr: up == 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ summary: Target down for 30m
+
+
+ # todo: this should group with the above alert
+ - alert: PrometheusAllTargetsMissing
+ expr: count by (job) (up) == 0
+ for: 10m
+ labels:
+ severity: day
+# alert-group: local-prom
+ annotations:
+ description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}"
+
+ - alert: PrometheusConfigurationReloadFailure
+ expr: prometheus_config_last_reload_successful != 1
+ for: 30m
+ labels:
+ severity: day
+ annotations:
+ description: "Prometheus configuration reload error\n VALUE = {{ $value }}"
+
+ - alert: PrometheusTooManyRestarts
+ expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[30m]) > 10
+ for: 0m
+ labels:
+ severity: warning
+ annotations:
+ description: "Prometheus has restarted more than ten times in the last 30 minutes. It might be crashlooping.\n VALUE = {{ $value }}"
+
+ - alert: PrometheusAlertmanagerJobMissing
+ expr: absent(up{job="alertmanager"})
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}"
+
+ - alert: PrometheusAlertmanagerConfigurationReloadFailure
+ expr: alertmanager_config_last_reload_successful != 1
+ for: 30m
+ labels:
+ severity: day
+ annotations:
+ description: "AlertManager configuration reload error\n VALUE = {{ $value }}"
+
+ - alert: PrometheusNotConnectedToAlertmanager
+ expr: prometheus_notifications_alertmanagers_discovered < 1
+ for: 30m
+ labels:
+ severity: day
+ annotations:
+ description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}"
+
+ - alert: PrometheusRuleEvaluationFailures
+ expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}"
+
+ - alert: PrometheusTemplateTextExpansionFailures
+ expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}"
+
+ - alert: PrometheusRuleEvaluationSlow
+ expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
+ for: 5m
+ labels:
+ severity: warn
+ annotations:
+ description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}"
+
+ - alert: PrometheusNotificationsBacklog
+ expr: min_over_time(prometheus_notifications_queue_length[30m]) > 0
+ for: 0m
+ labels:
+ severity: warn
+ annotations:
+ description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}"
+
+ - alert: PrometheusAlertmanagerNotificationFailing
+ expr: rate(alertmanager_notifications_failed_total[1m]) > 0
+ for: 30m
+ labels:
+ severity: warn
+ annotations:
+ description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}"
+
+ # file_sd doesnt count as service discovery, so 0 is expected.
+ # - alert: PrometheusTargetEmpty
+ # expr: prometheus_sd_discovered_targets == 0
+ # for: 30m