mostly fixes
[distro-setup] / filesystem / etc / prometheus / rules / iank.yml
index b78368fef48df7af5332d0633fc5e8fa3fafeb7d..9838cac3ca219cbbf748da0ea366843056dbeb53 100644 (file)
+# other rules to consider:
+# filesystem, network, ntp rules:
+# https://github.com/cloudalchemy/ansible-prometheus defaults/main.yml
+# on my system, the interpolated values are in /a/opt/ansible-prometheus/rules.yml
+#
+
 
 groups:
-- name: ansible managed alert rules
+- name: standard
   rules:
-  - alert: NodeFilesystemAlmostOutOfSpace
-    annotations:
-      description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has
-        only {{ printf "%.2f" $value }}% available space left.
-      summary: Filesystem has less than 5% space left.
+
+# ## uncomment for testing an alert firing
+#   - alert: test-alert4
+#     expr: vector(1)
+#     for: 0m
+#     labels:
+#       severity: day
+#     annotations:
+#       description: "always-firing alert VALUE = {{ $value }}"
+
+
+
+###### BEGIN MISC NOTES ######
+
+#
+# other interesting exporters
+# https://github.com/prometheus-community/node-exporter-textfile-collector-scripts
+#
+
+# interesting post: https://www.metricfire.com/blog/top-5-prometheus-alertmanager-gotchas/
+
+# interesting promql query that could be useful later.
+# changes(ALERTS_FOR_STATE[24h])
+#
+#
+#
+# alert flap strategy.
+# https://roidelapluie.be/blog/2019/02/21/prometheus-last/
+#
+# Another idea generally is to make an alert that fires for 24 hours and
+# inhibits another alert for the same thing, which we want at most
+# 1 alert per 24 hours.
+
+###### END MISC NOTES ######
+
+# various queries only look at increases, so invert the up metric so we
+# can better query on down.
+  - record: down
+    expr: up == bool 0
+
+
+# alerting on missing metrics:
+# https://www.robustperception.io/absent-alerting-for-scraped-metrics
+# that doesnt work if we want to alert across multiple hosts, eg
+# up{job="node"} == 1 unless node_systemd_unit_state{name="systemstatus.service",state="active",job="node"}
+# however, google lead me to a solution here
+# https://www.linkedin.com/pulse/prometheus-alert-missing-metrics-labels-nirav-shah
+# there is also the absent() function, but i didnt see a way to make that work
+  - alert: mysers_units_missing
     expr: |-
-      (
-        node_filesystem_avail_bytes{job="node",fstype!=""} / node_filesystem_size_bytes{job="node",fstype!=""} * 100 < 5
-      and
-        node_filesystem_readonly{job="node",fstype!=""} == 0
-      )
-    for: 1h
+      count(up{job="node"} == 1) by (instance) * 3 unless
+      count(node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"}) by (instance)
+    for: 20m
     labels:
-      severity: warning
-  - alert: NodeFilesystemAlmostOutOfSpace
-    annotations:
-      description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has
-        only {{ printf "%.2f" $value }}% available space left.
-      summary: Filesystem has less than 3% space left.
+      severity: warn
+
+  - alert: epanicclean_not_active
     expr: |-
-      (
-        node_filesystem_avail_bytes{job="node",fstype!=""} / node_filesystem_size_bytes{job="node",fstype!=""} * 100 < 3
-      and
-        node_filesystem_readonly{job="node",fstype!=""} == 0
-      )
-    for: 1h
-    labels:
-      severity: critical
-  - alert: NodeFilesystemFilesFillingUp
-    annotations:
-      description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has
-        only {{ printf "%.2f" $value }}% available inodes left and is filling up.
-      summary: Filesystem is predicted to run out of inodes within the next 24 hours.
+      node_systemd_unit_state{name="epanicclean.service",state="active"} != 1
+    for: 20m
+    labels:
+      severity: warn
+
+  - alert: epanicclean_missing
     expr: |-
-      (
-        node_filesystem_files_free{job="node",fstype!=""} / node_filesystem_files{job="node",fstype!=""} * 100 < 40
-      and
-        predict_linear(node_filesystem_files_free{job="node",fstype!=""}[6h], 24*60*60) < 0
-      and
-        node_filesystem_readonly{job="node",fstype!=""} == 0
-      )
-    for: 1h
+      count(up{job=~"node|tlsnode"} == 1) by (instance) unless
+      count(node_systemd_unit_state{job=~"node|tlsnode",name="epanicclean.service",state="active"}) by (instance)
+    for: 20m
     labels:
-      severity: warning
-  - alert: NodeFilesystemFilesFillingUp
-    annotations:
-      description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has
-        only {{ printf "%.2f" $value }}% available inodes left and is filling up fast.
-      summary: Filesystem is predicted to run out of inodes within the next 4 hours.
+      severity: warn
+
+  - alert: mysers_not_active
     expr: |-
-      (
-        node_filesystem_files_free{job="node",fstype!=""} / node_filesystem_files{job="node",fstype!=""} * 100 < 20
-      and
-        predict_linear(node_filesystem_files_free{job="node",fstype!=""}[6h], 4*60*60) < 0
-      and
-        node_filesystem_readonly{job="node",fstype!=""} == 0
-      )
-    for: 1h
-    labels:
-      severity: critical
-  - alert: NodeFilesystemAlmostOutOfFiles
-    annotations:
-      description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has
-        only {{ printf "%.2f" $value }}% available inodes left.
-      summary: Filesystem has less than 5% inodes left.
+      node_systemd_unit_state{name=~"(systemstatus|btrfsmaintstop|dynamicipupdate).service",state="active"} != 1
+    for: 20m
+    labels:
+      severity: warn
+
+  - alert: sysd_result_fail
+    # not sure 30m is really needed, it prevents the alert from flapping
+    # i guess.
     expr: |-
-      (
-        node_filesystem_files_free{job="node",fstype!=""} / node_filesystem_files{job="node",fstype!=""} * 100 < 5
-      and
-        node_filesystem_readonly{job="node",fstype!=""} == 0
-      )
-    for: 1h
+      rate(node_systemd_unit_result_fail_count[30m]) > 0
     labels:
-      severity: warning
-  - alert: NodeFilesystemAlmostOutOfFiles
-    annotations:
-      description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has
-        only {{ printf "%.2f" $value }}% available inodes left.
-      summary: Filesystem has less than 3% inodes left.
+      severity: day
+
+  - alert: exim_paniclog
     expr: |-
-      (
-        node_filesystem_files_free{job="node",fstype!=""} / node_filesystem_files{job="node",fstype!=""} * 100 < 3
-      and
-        node_filesystem_readonly{job="node",fstype!=""} == 0
-      )
-    for: 1h
-    labels:
-      severity: critical
-  - alert: NodeNetworkReceiveErrs
-    annotations:
-      description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered
-        {{ printf "%.0f" $value }} receive errors in the last two minutes.'
-      summary: Network interface is reporting many receive errors.
+      exim_paniclog > 0
+    labels:
+      severity: warn
+
+  - alert: check_crypttab
     expr: |-
-      increase(node_network_receive_errs_total[2m]) > 10
-    for: 1h
+      check_crypttab > 0
     labels:
-      severity: warning
-  - alert: NodeNetworkTransmitErrs
+      severity: prod
+
+# 17 minutes: if we reboot causing 1 send to fail, thats 10 minutes. we
+# test this every 5 minutes, so thats 15 minutes at most.
+  - alert: mailtest_check_vps
+    expr: |-
+      time() - mailtest_check_last_usec{job="tlsnode"} >= 60 * 17
+    labels:
+      severity: day
     annotations:
-      description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered
-        {{ printf "%.0f" $value }} transmit errors in the last two minutes.'
-      summary: Network interface is reporting many transmit errors.
+      summary: '17 minutes down'
+
+  - alert: mailtest_check_unexpected_spamd_vps
     expr: |-
-      increase(node_network_transmit_errs_total[2m]) > 10
-    for: 1h
+      mailtest_check_unexpected_spamd_results >= 1
     labels:
-      severity: warning
-  - alert: NodeHighNumberConntrackEntriesUsed
+      severity: day
     annotations:
-      description: '{{ $value | humanizePercentage }} of conntrack entries are used'
-      summary: Number of conntrack are getting close to the limit
+      summary: 'jr -u mailtest-check -e'
+
+  - alert: mailtest_check_mailhost
     expr: |-
-      (node_nf_conntrack_entries / node_nf_conntrack_entries_limit) > 0.75
+      time() - max by (folder,from) (mailtest_check_last_usec{job="node"}) >= 60 * 17
     labels:
-      severity: warning
-  - alert: NodeClockSkewDetected
+      severity: day
     annotations:
-      message: Clock on {{ $labels.instance }} is out of sync by more than 300s. Ensure
-        NTP is configured correctly on this host.
-      summary: Clock skew detected.
+      summary: '17 minutes down'
+
+  # 20 minutes. just allow for more due to prod alert.
+  - alert: mailtest_check_gnu_mailhost
     expr: |-
-      (
-        node_timex_offset_seconds > 0.05
-      and
-        deriv(node_timex_offset_seconds[5m]) >= 0
-      )
-      or
-      (
-        node_timex_offset_seconds < -0.05
-      and
-        deriv(node_timex_offset_seconds[5m]) <= 0
-      )
-    for: 10m
+      time() - max by (folder,from) (mailtest_check_last_usec{folder="/m/md/l/testignore", from="iank@gnu.org"}) >= 60 * 20
     labels:
-      severity: warning
-  - alert: NodeClockNotSynchronising
+      severity: prod
     annotations:
-      message: Clock on {{ $labels.instance }} is not synchronising. Ensure NTP is configured
-        on this host.
-      summary: Clock not synchronising.
+      summary: '20 minutes down'
+
+
+  - alert: 1pmtest
+    expr: hour() == 17 and minute() < 5
+    for: 0m
+    labels:
+      severity: daytest
+    annotations:
+      summary: Prometheus daily test alert
+
+
+#### Inhibit notes ####
+## Example of expressions to detect if the target_down alert
+# fired in the last 24 hours. Initially, I thought his could
+# be an alert which inhibits up_resets, but eventually I figured
+# that doesn't make much sense, and the idea of using an alert
+# that is not an indication of something wrong, only inhibits another
+# alert, I think works better to integrate directly into the
+# alert it would inhibit, this may mean a recording rule. That avoids
+# an alert we have to ignore or filter out.
+#
+# Alternate expression, to calculate if the alert would have fired is:
+#  min_over_time(sum_over_time(up[30m])[1d:]) == 0
+#  where 30m matches the for: time in target_down
+#
+# Note: for graphing, surround in the expression in sum_over_time()
+# ALERTS{alertname="target_down",alertstate="firing"}[1d]
+#### end Inhibit notes ####
+
+
+# For targets where we alert only on long downtimes, we
+# still want to know if it is going down many times for short times over
+# a long period of time. But ignore reboots.
+#
+## Another way would be to detect an overall downtime:
+# avg_over_time(node_systemd_unit_state{name="dynamicipupdate.service",state="active"}[1d]) < .95
+  - alert: up_resets
     expr: |-
-      min_over_time(node_timex_sync_status[5m]) == 0
+      resets(up[1d]) - changes(node_boot_time_seconds[1d]) > 12
+    labels:
+      severity: warn
+    annotations:
+      summary: "Target has gone down {{ $value }} times in 1 day, > 12"
+
+
+
+# https://awesome-prometheus-alerts.grep.to/rules
+
+# todo, we should probably group the prometheus alerts that indicate a
+# host-local problem.
+# eg, set a label  alert-group: local-prom, then make a receiver that
+# groups by it when the alert-group is local-prom.
+
+- name: awesome prometheus alerts
+  rules:
+
+  - alert: PrometheusJobMissing
+    expr: absent(up{job="prometheus"})
+    for: 30m
+    labels:
+      severity: day
+    annotations:
+      summary: Prometheus job missing (instance {{ $labels.instance }})
+      description: "A Prometheus job has disappeared\n  VALUE = {{ $value }}"
+
+# TODO: some hosts, notably li and MAIL_HOST, we want to alert sooner than 30m,
+# and severity to day. mail host is tricky since it roams, but I think the
+# right way to do it is to check for absence of this metric:
+# mailtest_check_last_usec{folder="/m/md/l/testignore",from="ian@iankelling.org"}
+  - alert: target_down
+    expr: up == 0
+    for: 30m
+    labels:
+      severity: warn
+    annotations:
+      summary: Target down for 30m
+
+
+    # todo: this should group with the above alert
+  - alert: PrometheusAllTargetsMissing
+    expr: count by (job) (up) == 0
     for: 10m
+    labels:
+      severity: day
+#      alert-group: local-prom
+    annotations:
+      description: "A Prometheus job does not have living target anymore.\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusConfigurationReloadFailure
+    expr: prometheus_config_last_reload_successful != 1
+    for: 30m
+    labels:
+      severity: day
+    annotations:
+      description: "Prometheus configuration reload error\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusTooManyRestarts
+    expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[30m]) > 10
+    for: 0m
     labels:
       severity: warning
-  - alert: ianktest
-    expr: node_systemd_version >= 300
+    annotations:
+      description: "Prometheus has restarted more than ten times in the last 30 minutes. It might be crashlooping.\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusAlertmanagerJobMissing
+    expr: absent(up{job="alertmanager"})
+    for: 30m
     labels:
-      severity: critical
+      severity: warn
     annotations:
-      description: '{{ $labels.instance }} ianktest.'
-      summary: Instance {{ $labels.instance }} - ianktest
-  - alert: ianktest
-    expr: node_systemd_version >= 300
+      description: "A Prometheus AlertManager job has disappeared\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusAlertmanagerConfigurationReloadFailure
+    expr: alertmanager_config_last_reload_successful != 1
+    for: 30m
     labels:
-      severity: critical
+      severity: day
     annotations:
-      description: '{{ $labels.instance }} ianktest.'
-      summary: Instance {{ $labels.instance }} - ianktest
+      description: "AlertManager configuration reload error\n  VALUE = {{ $value }}"
 
-  - alert: ianktest
-    expr: |-
-      time() - mailtest_check_last_usec > 60 * 8
+  - alert: PrometheusNotConnectedToAlertmanager
+    expr: prometheus_notifications_alertmanagers_discovered < 1
+    for: 30m
+    labels:
+      severity: day
+    annotations:
+      description: "Prometheus cannot connect the alertmanager\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusRuleEvaluationFailures
+    expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
+    for: 30m
+    labels:
+      severity: warn
+    annotations:
+      description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusTemplateTextExpansionFailures
+    expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
+    for: 30m
+    labels:
+      severity: warn
+    annotations:
+      description: "Prometheus encountered {{ $value }} template text expansion failures\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusRuleEvaluationSlow
+    expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
+    for: 5m
+    labels:
+      severity: warn
+    annotations:
+      description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusNotificationsBacklog
+    expr: min_over_time(prometheus_notifications_queue_length[30m]) > 0
+    for: 0m
+    labels:
+      severity: warn
+    annotations:
+      description: "The Prometheus notification queue has not been empty for 10 minutes\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusAlertmanagerNotificationFailing
+    expr: rate(alertmanager_notifications_failed_total[1m]) > 0
+    for: 30m
+    labels:
+      severity: warn
+    annotations:
+      description: "Alertmanager is failing sending notifications\n  VALUE = {{ $value }}"
+
+  # file_sd doesnt count as service discovery, so 0 is expected.
+  # - alert: PrometheusTargetEmpty
+  #   expr: prometheus_sd_discovered_targets == 0
+  #   for: 30m
+  #   labels:
+  #     severity: day
+  #   annotations:
+  #     description: "Prometheus has no target in service discovery\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusTargetScrapingSlow
+    expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 90
+    for: 30m
+    labels:
+      severity: warn
+    annotations:
+      description: "Prometheus is scraping exporters slowly\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusLargeScrape
+    expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
+    for: 30m
+    labels:
+      severity: warn
+    annotations:
+      description: "Prometheus has many scrapes that exceed the sample limit\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusTargetScrapeDuplicate
+    expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0
+    for: 30m
+    labels:
+      severity: warn
+    annotations:
+      description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusTsdbCheckpointCreationFailures
+    expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
+    for: 30m
+    labels:
+      severity: warn
+    annotations:
+      description: "Prometheus encountered {{ $value }} checkpoint creation failures\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusTsdbCheckpointDeletionFailures
+    expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
+    for: 30m
+    labels:
+      severity: warn
+    annotations:
+      description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusTsdbCompactionsFailed
+    expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
+    for: 30m
+    labels:
+      severity: warn
+    annotations:
+      description: "Prometheus encountered {{ $value }} TSDB compactions failures\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusTsdbHeadTruncationsFailed
+    expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
+    for: 30m
+    labels:
+      severity: warn
+    annotations:
+      description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusTsdbReloadFailures
+    expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
+    for: 30m
+    labels:
+      severity: warn
+    annotations:
+      description: "Prometheus encountered {{ $value }} TSDB reload failures\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusTsdbWalCorruptions
+    expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0
+    for: 30m
+    labels:
+      severity: warn
+    annotations:
+      description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n  VALUE = {{ $value }}"
+
+  - alert: PrometheusTsdbWalTruncationsFailed
+    expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0
+    for: 30m
     labels:
-      severity: critical
+      severity: warn
     annotations:
-      description: '{{ $labels.instance }} mailtest'
-      summary: Instance {{ $labels.instance }} - ianktest
+      description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n  VALUE = {{ $value }}"