{{ define "wechat.default.message" }}
{{ range $i, $alert :=.Alerts }}
========監控報警==========
告警狀態:{{ .Status }}
告警級別:{{ $alert.Labels.severity }}
告警類型:{{ $alert.Labels.alertname }}
告警應用:{{ $alert.Annotations.summary }}
告警主機:{{ $alert.Labels.instance }}
告警詳情:{{ $alert.Annotations.description }}
觸發閥值:{{ $alert.Annotations.value }}
告警時間:{{ $alert.StartsAt.Format "2006-01-02 15:04:05" }}
========end=============
{{ end }}
{{ end }}
groups:
- name: ServiceStatus
rules:
# prometheus
- alert: prometheus down
expr: prometheus_config_last_reload_successful != 1
for: 1m
labels:
name: prometheus
severity: error
annotations:
summary: "prometheus down (instance {{ $labels.instance }})"
description: "prometheus instance is down"
value: "{{ $value }}"
#alertmanager
- alert: alertmanager down
expr: alertmanager_config_last_reload_successful != 1
for: 1m
labels:
name: alertmanager
severity: error
annotations:
summary: "alertmanager down (instance {{ $labels.instance }})"
description: "alertmanager instance is down"
value: "{{ $value }}"
# node_exporter
- alert: instanceDown
expr: up == 0
for: 1m
labels:
name: instance
severity: Critical
annotations:
summary: "{{ $labels.app_name }} down"
description: "服務停止運行"
value: "{{ $value }}"
- alert: host load
expr: node_load5 > 4
for: 1m
labels:
severity: Critical
annotations:
summary: "{{ $labels.appname }} "
description: "主機5分鐘負載超過4"
value: "{{ $value }}"
- alert: cpu usege load
expr: 100 - (avg by(instance) (irate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 80
for: 1m
labels:
name: cpu
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} CPU使用率過高"
description: "{{$labels.mountpoint }} CPU使用大於80%"
value: "{{ $value }}%"
- alert: mem usage
expr: (node_memory_MemTotal_bytes - (node_memory_MemFree_bytes + node_memory_Buffers_bytes + node_memory_Cached_bytes)) / node_memory_MemTotal_bytes * 100 > 85
for: 1m
labels:
name: memory
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} 內存使用率過高!"
description: "{{$labels.mountpoint }} 內存使用大於85%"
value: "{{ $value }}%"
- alert: disk usage
expr: 100-(node_filesystem_free_bytes{fstype=~"ext4|xfs"}/node_filesystem_size_bytes {fstype=~"ext4|xfs"}*100) > 80
for: 1m
labels:
name: disk
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} 磁盤空間使用率過高!"
description: "{{$labels.mountpoint }} 磁盤空間使用大於80%"
value: "{{ $value }}%"
- alert: volume fullIn fourdaysd
expr: predict_linear(node_filesystem_free_bytes{job="node_exporter"}[2h], 4 * 24 * 3600) < 0
for: 5m
labels:
name: disk
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} 預計主機可用磁盤空間4天后將寫滿"
description: "{{$labels.mountpoint }}"
value: "{{ $value }}%"
- alert: disk write rate
expr: sum by (instance) (irate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50
for: 1m
labels:
name: disk
severity: Critical
annotations:
summary: "disk write rate (instance {{ $labels.instance }})"
description: "磁盤寫入速率大於50MB/s"
value: "{{ $value }}%"
- alert: disk read latency
expr: rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 100
for: 1m
labels:
name: disk
severity: Critical
annotations:
summary: "unusual disk read latency (instance {{ $labels.instance }})"
description: "磁盤讀取延遲大於100毫秒"
value: "{{ $value }}%"
- alert: disk write latency
expr: rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 100
for: 1m
labels:
name: disk
severity: Critical
annotations:
summary: "unusual disk write latency (instance {{ $labels.instance }})"
description: "磁盤寫入延遲大於100毫秒"
value: "{{ $value }}%"
- alert: network in
expr: sum by (instance) (irate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100
for: 1m
labels:
name: network
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} 流入網絡帶寬過高"
description: "{{$labels.mountpoint }}流入網絡異常,高於100M"
value: "{{ $value }}"
- alert: network out
expr: sum by (instance) (irate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100
for: 1m
labels:
name: network
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} 發送網絡帶寬過高"
description: "{{$labels.mountpoint }}發送網絡異常,高於100M"
value: "{{ $value }}"
- alert: tcp connection
expr: node_netstat_Tcp_CurrEstab > 5000
for: 1m
labels:
name: tcp
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} TCP_ESTABLISHED過高!"
description: "{{$labels.mountpoint }} TCP_ESTABLISHED大於5000"
value: "{{ $value }}"
# redis_exporter
- alert: reids down
expr: redis_up == 0
for: 1m
labels:
name: redis
severity: error
annotations:
summary: "redis down (instance {{ $labels.instance }})"
description: "redis instance is down"
value: "{{ $value }}"
- alert: reids client connection
expr: redis_connected_clients < 5
for: 1m
labels:
name: redis
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} redis client connection Too few connections"
description: "{{$labels.mountpoint }} redis client connectionToo few connections 少於5個"
value: "{{ $value }}"
- alert: reids client connection
expr: redis_connected_clients > 5000
for: 1m
labels:
name: redis
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} redis client TooManyConnections"
description: "{{$labels.mountpoint }} redis client connection超過5000"
value: "{{ $value }}"
- alert: reids men usage
expr: redis_memory_used_bytes/ (1024 * 1024 ) > 4096
for: 1m
labels:
name: redis
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} redis memory usage 超過4096MB!"
description: "{{$labels.mountpoint }} redis memory usage 超過4096MB"
value: "{{ $value }}"
- alert: reids outofmemory
expr: redis_memory_used_bytes / redis_memory_max_bytes * 100 > 80
for: 1m
labels:
name: redis
severity: Critical
annotations:
summary: "{{$labels.mountpoint}} redis memory usage 使用率過高"
description: "{{$labels.mountpoint }} redis memory usage 使用率超過80%"
value: "{{ $value }}"
- alert: reids rejectedconnections
expr: increase(redis_rejected_connections_total[1m]) > 0
for: 1m
labels:
name: redis
severity: Critical
annotations:
summary: "rejected connections (instance {{ $labels.instance }})"
description: "{{$labels.mountpoint }} redis has been rejected"
value: "{{ $value }}"
# zoookeeper_exporter
- alert: zookeeper down
expr: zk_up == 0
for: 1m
labels:
name: zoookeeper
severity: error
annotations:
summary: "zookeeper down (instance {{ $labels.instance }})"
description: "zoookeeper instance is down"
value: "{{ $value }}"
- alert: zoookeeper leader status
expr: zk_server_state{state="leader"} !=1
for: 1m
labels:
name: zoookeeper
severity: Critical
annotations:
summary: "{{$labels.instance }} zoookeeper leader is die."
description: "{{$labels.instance }} zoookeeper leader is die,please check it quickly."
value: "{{ $value }}"
- alert: zoookeeper nodes num
expr: sum(zk_server_state) < 3
for: 1m
labels:
name: zoookeeper
severity: Critical
annotations:
summary: "{{$labels.instance }} zoookeeper node number 小於 3臺"
description: "{{$labels.instance }} zoookeeper集羣正常節點小於3臺"
value: "{{ $value }}"
- alert: zoookeeper snapshot size
expr: zk_approximate_data_size / 1024 > 1024
for: 1m
labels:
name: zoookeeper
severity: Critical
annotations:
summary: "{{$labels.instance }} zoookeeper snapshot size It's too big"
description: "{{$labels.instance }} zoookeeper snapshot size 大於 1GB"
value: "{{ $value }}"
- alert: zoookeeper descriptor
expr: zk_open_file_descriptor_count > zk_max_file_descriptor_count * 0.85
for: 1m
labels:
name: zoookeeper
severity: Critical
annotations:
summary: "{{$labels.instance }} zoookeeper 文件句柄數過高"
description: "{{$labels.instance }} zoookeeper 打開文件句柄數大於系統配置參數"
value: "{{ $value }}"
# jmx_exporter
- alert: heap space usage
expr: jvm_memory_bytes_used{job="upp", area="heap"} / jvm_memory_bytes_max * 100 >80
for: 3m
labels:
name: jmx
severity: Critical
annotations:
summary: "JVM Instance {{ $labels.instance }} memory usage > 80%"
description: "{{ $labels.instance }} of job {{ $labels.job }} has been in status [heap usage > 80%] for more than 1 minutes. current usage ({{ $value }}%)"
value: "{{ $value }}"
- alert: old gc
expr: increase(jvm_gc_collection_seconds_sum{gc="PS MarkSweep"}[5m]) > 5 * 60 * 0.8
for: 3m
labels:
name: jmx
severity: Critical
annotations:
summary: "JVM Instance {{ $labels.instance }} Old GC time > 80% running time"
description: "{{ $labels.instance }} of job {{ $labels.job }} has been in status [Old GC time > 80% running time] for more than 5 minutes. current seconds ({{ $value }}%)"
value: "{{ $value }}"
# consul_exporter
- alert: consul service check
expr: consul_catalog_service_node_healthy == 0
for: 3m
labels:
name: consul
severity: Critical
annotations:
summary: "Service healthcheck failed (instance {{ $labels.instance }})"
description: "Service: `{{ $labels.service_name }}` Healthcheck: `{{ $labels.service_id }}"
value: "{{ $value }}"
- alert: consul server node sum
expr: sum(consul_catalog_service_node_healthy {service_name="consul"} ) < 3
for: 3m
labels:
name: consul
severity: Critical
annotations:
summary: "consul server node 小於 3臺 (instance {{ $labels.instance }})"
description: "consul server node 小於 3臺 (instance {{ $labels.instance }})"
value: "{{ $value }}"
# blackbox_exporter
- alert: probe failed
expr: probe_success == 0
for: 3m
labels:
name: blackbox
severity: Critical
annotations:
summary: Probe failed (instance {{ $labels.instance }})"
description: "Probe failed LABELS: {{ $labels }}"
value: "{{ $value }}"
- alert: http status code
expr: probe_http_status_code <= 199 OR probe_http_status_code >= 300
for: 3m
labels:
name: blackbox
severity: Critical
annotations:
summary: "Status Code (instance {{ $labels.instance }})"
description: "HTTP status code is not 200-299 LABELS: {{ $labels }}"
value: "{{ $value }}"
- alert: ping delay
expr: probe_icmp_duration_seconds > 2
for: 3m
labels:
name: blackbox
severity: Critical
annotations:
summary: "Blackbox slow ping (instance {{ $labels.instance }})"
description: "Blackbox ping took more than 2s LABELS: {{ $labels }}"
value: "{{ $value }}"
【轉】Prometheus 告警規則模板
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.