k8s中安裝部署alertmanager

準備鏡像:

[root@hdss7-200 ~]# docker pull docker.io/prom/alertmanager:v0.19.0
v0.19.0: Pulling from prom/alertmanager
8e674ad76dce: Already exists
e77d2419d1c2: Already exists
fc0b06cce5a2: Pull complete
1cc6eb76696f: Pull complete
c4b97307695d: Pull complete
d49e70084386: Pull complete
Digest: sha256:7dbf4949a317a056d11ed8f379826b04d0665fad5b9334e1d69b23e946056cd3
Status: Downloaded newer image for prom/alertmanager:v0.19.0
docker.io/prom/alertmanager:v0.19.0
[root@hdss7-200 ~]# docker images|grep alert
prom/alertmanager                          v0.19.0                    30594e96cbe8        7 months ago        53.2MB
[root@hdss7-200 ~]# docker tag 30594e96cbe8 harbor.od.com/infra/alertmanager:v0.19.0
[root@hdss7-200 ~]# docker push harbor.od.com/infra/alertmanager:v0.19.0
The push refers to repository [harbor.od.com/infra/alertmanager]
bb7386721ef9: Pushed
13b4609b0c95: Pushed
ba550e698377: Pushed
fa5b6d2332d5: Pushed
3163e6173fcc: Mounted from infra/prometheus
6194458b07fc: Mounted from infra/prometheus
v0.19.0: digest: sha256:8088fac0a74480912fbb76088247d0c4e934f1dd2bd199b52c40c1e9dba69917 size: 1575

準備資源配置清單:

[root@hdss7-200 alertmanager]# cat cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: alertmanager-config
  namespace: infra
data:
  config.yml: |-
    global:
      # 在沒有報警的情況下聲明爲已解決的時間
      resolve_timeout: 5m
      # 配置郵件發送信息
      smtp_smarthost: 'smtp.163.com:25'
      smtp_from: '[email protected]'
      smtp_auth_username: '[email protected]'
      smtp_auth_password: 'xxxxxx'
      smtp_require_tls: false
      # 所有報警信息進入後的根路由,用來設置報警的分發策略
    route:
      # 這裏的標籤列表是接收到報警信息後的重新分組標籤,例如,接收到的報警信息裏面有許多具有 cluster=A 和 alertname=LatncyHigh 這樣的標籤的報警信息將會批量被聚合到一個分組裏面
      group_by: ['alertname', 'cluster']
      # 當一個新的報警分組被創建後,需要等待至少group_wait時間來初始化通知,這種方式可以確保您能有足夠的時間爲同一分組來獲取多個警報,然後一起觸發這個報警信息。
      group_wait: 30s
      # 當第一個報警發送後,等待'group_interval'時間來發送新的一組報警信息。
      group_interval: 5m
      # 如果一個報警信息已經發送成功了,等待'repeat_interval'時間來重新發送他們
      repeat_interval: 5m
      # 默認的receiver:如果一個報警沒有被一個route匹配,則發送給默認的接收器
      receiver: default
    receivers:
    - name: 'default'
      email_configs:
      - to: '[email protected]'
        send_resolved: true
[root@hdss7-200 alertmanager]# cat dp.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: alertmanager
  namespace: infra
spec:
  replicas: 1
  selector:
    matchLabels:
      app: alertmanager
  template:
    metadata:
      labels:
        app: alertmanager
    spec:
      containers:
      - name: alertmanager
        image: harbor.od.com/infra/alertmanager:v0.19.0
        args:
          - "--config.file=/etc/alertmanager/config.yml"
          - "--storage.path=/alertmanager"
        ports:
        - name: alertmanager
          containerPort: 9093
        volumeMounts:
        - name: alertmanager-cm
          mountPath: /etc/alertmanager
      volumes:
      - name: alertmanager-cm
        configMap:
          name: alertmanager-config
      imagePullSecrets:
      - name: harbor
[root@hdss7-200 alertmanager]# cat svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: alertmanager
  namespace: infra
spec:
  selector:
    app: alertmanager
  ports:
    - port: 80
      targetPort: 9093

配置告警規則:

[root@hdss7-200 alertmanager]# cat /data/nfs-volume/prometheus/etc/rules.yml
groups:
- name: hostStatsAlert
  rules:
  - alert: hostCpuUsageAlert
    expr: sum(avg without (cpu)(irate(node_cpu{mode!='idle'}[5m]))) by (instance) > 0.85
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "{{ $labels.instance }} CPU usage above 85% (current value: {{ $value }}%)"
  - alert: hostMemUsageAlert
    expr: (node_memory_MemTotal - node_memory_MemAvailable)/node_memory_MemTotal > 0.85
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "{{ $labels.instance }} MEM usage above 85% (current value: {{ $value }}%)"
  - alert: OutOfInodes
    expr: node_filesystem_free{fstype="overlay",mountpoint ="/"} / node_filesystem_size{fstype="overlay",mountpoint ="/"} * 100 < 10
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Out of inodes (instance {{ $labels.instance }})"
      description: "Disk is almost running out of available inodes (< 10% left) (current value: {{ $value }})"
  - alert: OutOfDiskSpace
    expr: node_filesystem_free{fstype="overlay",mountpoint ="/rootfs"} / node_filesystem_size{fstype="overlay",mountpoint ="/rootfs"} * 100 < 10
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Out of disk space (instance {{ $labels.instance }})"
      description: "Disk is almost full (< 10% left) (current value: {{ $value }})"
  - alert: UnusualNetworkThroughputIn
    expr: sum by (instance) (irate(node_network_receive_bytes[2m])) / 1024 / 1024 > 100
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Unusual network throughput in (instance {{ $labels.instance }})"
      description: "Host network interfaces are probably receiving too much data (> 100 MB/s) (current value: {{ $value }})"
  - alert: UnusualNetworkThroughputOut
    expr: sum by (instance) (irate(node_network_transmit_bytes[2m])) / 1024 / 1024 > 100
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Unusual network throughput out (instance {{ $labels.instance }})"
      description: "Host network interfaces are probably sending too much data (> 100 MB/s) (current value: {{ $value }})"
  - alert: UnusualDiskReadRate
    expr: sum by (instance) (irate(node_disk_bytes_read[2m])) / 1024 / 1024 > 50
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Unusual disk read rate (instance {{ $labels.instance }})"
      description: "Disk is probably reading too much data (> 50 MB/s) (current value: {{ $value }})"
  - alert: UnusualDiskWriteRate
    expr: sum by (instance) (irate(node_disk_bytes_written[2m])) / 1024 / 1024 > 50
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Unusual disk write rate (instance {{ $labels.instance }})"
      description: "Disk is probably writing too much data (> 50 MB/s) (current value: {{ $value }})"
  - alert: UnusualDiskReadLatency
    expr: rate(node_disk_read_time_ms[1m]) / rate(node_disk_reads_completed[1m]) > 100
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Unusual disk read latency (instance {{ $labels.instance }})"
      description: "Disk latency is growing (read operations > 100ms) (current value: {{ $value }})"
  - alert: UnusualDiskWriteLatency
    expr: rate(node_disk_write_time_ms[1m]) / rate(node_disk_writes_completedl[1m]) > 100
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Unusual disk write latency (instance {{ $labels.instance }})"
      description: "Disk latency is growing (write operations > 100ms) (current value: {{ $value }})"
- name: http_status
  rules:
  - alert: ProbeFailed
    expr: probe_success == 0
    for: 1m
    labels:
      severity: error
    annotations:
      summary: "Probe failed (instance {{ $labels.instance }})"
      description: "Probe failed (current value: {{ $value }})"
  - alert: StatusCode
    expr: probe_http_status_code <= 199 OR probe_http_status_code >= 400
    for: 1m
    labels:
      severity: error
    annotations:
      summary: "Status Code (instance {{ $labels.instance }})"
      description: "HTTP status code is not 200-399 (current value: {{ $value }})"
  - alert: SslCertificateWillExpireSoon
    expr: probe_ssl_earliest_cert_expiry - time() < 86400 * 30
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "SSL certificate will expire soon (instance {{ $labels.instance }})"
      description: "SSL certificate expires in 30 days (current value: {{ $value }})"
  - alert: SslCertificateHasExpired
    expr: probe_ssl_earliest_cert_expiry - time()  <= 0
    for: 5m
    labels:
      severity: error
    annotations:
      summary: "SSL certificate has expired (instance {{ $labels.instance }})"
      description: "SSL certificate has expired already (current value: {{ $value }})"
  - alert: BlackboxSlowPing
    expr: probe_icmp_duration_seconds > 2
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Blackbox slow ping (instance {{ $labels.instance }})"
      description: "Blackbox ping took more than 2s (current value: {{ $value }})"
  - alert: BlackboxSlowRequests
    expr: probe_http_duration_seconds > 2
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Blackbox slow requests (instance {{ $labels.instance }})"
      description: "Blackbox request took more than 2s (current value: {{ $value }})"
  - alert: PodCpuUsagePercent
    expr: sum(sum(label_replace(irate(container_cpu_usage_seconds_total[1m]),"pod","$1","container_label_io_kubernetes_pod_name", "(.*)"))by(pod) / on(pod) group_right kube_pod_container_resource_limits_cpu_cores *100 )by(container,namespace,node,pod,severity) > 80
    for: 5m
    labels:
      severity: warning
    annotations:
      summary: "Pod cpu usage percent has exceeded 80% (current value: {{ $value }}%)"

應用資源配置清單:

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/alertmanager/cm.yaml
configmap/alertmanager-config created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/alertmanager/dp.yaml
deployment.extensions/alertmanager created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/alertmanager/svc.yaml
service/alertmanager created

容器啓動報錯:

image.png容器的logs:

[root@hdss7-21 ~]# kubectl logs -f  alertmanager-6754975dbf-mjgb8 -n infra
level=info ts=2020-04-06T08:13:43.280Z caller=main.go:217 msg="Starting Alertmanager" version="(version=0.19.0, branch=HEAD, revision=7aa5d19fea3f58e3d27dbdeb0f2883037168914a)"
level=info ts=2020-04-06T08:13:43.281Z caller=main.go:218 build_context="(go=go1.12.8, user=root@587d0268f963, date=20190903-15:01:40)"
level=warn ts=2020-04-06T08:13:43.282Z caller=cluster.go:154 component=cluster err="couldn't deduce an advertise address: no private IP found, explicit advertise addr not provided"
level=error ts=2020-04-06T08:13:43.284Z caller=main.go:242 msg="unable to initialize gossip mesh" err="create memberlist: Failed to get final advertise address: No private IP address found, and explicit IP not provided"

0.19.0版本的可能存在一些問題,將鏡像版本回退至0.14.0,啓動正常:

[root@hdss7-200 alertmanager]# docker pull docker.io/prom/alertmanager:v0.14.0
v0.14.0: Pulling from prom/alertmanager
Image docker.io/prom/alertmanager:v0.14.0 uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/
65fc92611f38: Pull complete
439b527af350: Pull complete
a3ed95caeb02: Pull complete
f65042d2fee2: Pull complete
282a28c3341d: Pull complete
f36e0769f073: Pull complete
Digest: sha256:2ff45fb2704a387347aa34f154f450d4ad86a8f47bcf72437761267ebdf45efb
Status: Downloaded newer image for prom/alertmanager:v0.14.0
docker.io/prom/alertmanager:v0.14.0
[root@hdss7-200 alertmanager]#
[root@hdss7-200 alertmanager]#
[root@hdss7-200 alertmanager]# docker images|grep alert
prom/alertmanager                          v0.19.0                    30594e96cbe8        7 months ago        53.2MB
harbor.od.com/infra/alertmanager           v0.19.0                    30594e96cbe8        7 months ago        53.2MB
prom/alertmanager                          v0.14.0                    23744b2d645c        2 years ago         31.9MB
[root@hdss7-200 alertmanager]# docker tag 23744b2d645c harbor.od.com/infra/alertmanager:v0.14.0
[root@hdss7-200 alertmanager]# docker push harbor.od.com/infra/alertmanager:v0.14.0
The push refers to repository [harbor.od.com/infra/alertmanager]
5f70bf18a086: Mounted from infra/dubbo-monitor
b5abc4736d3f: Pushed
6b961451fcb0: Pushed
30d4e7b232e4: Pushed
68d1a8b41cc0: Pushed
4febd3792a1f: Pushed
v0.14.0: digest: sha256:77a5439a03d76ba275b9a6e004113252ec4ce3336cf850a274a637090858a5ed size: 2603
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/alertmanager/dp.yaml
deployment.extensions/alertmanager configured
[root@hdss7-21 ~]# kubectl -n infra get pod  -o wide
NAME                             READY   STATUS    RESTARTS   AGE    IP            NODE                NOMINATED NODE   READINESS GATES
alertmanager-5d46bdc7b4-mpwd9    1/1     Running   0          16s    172.7.21.7    hdss7-21.host.com   <none>           <none>
dubbo-monitor-6676dd74cc-fccl4   1/1     Running   0          4h6m   172.7.21.14   hdss7-21.host.com   <none>           <none>
grafana-d6588db94-t4cvc          1/1     Running   0          93m    172.7.22.7    hdss7-22.host.com   <none>           <none>
prometheus-6767456ffb-5fzfb      1/1     Running   0          136m   172.7.21.3    hdss7-21.host.com   <none>           <none>

在prometheus配置文件中追加配置:

alerting:
  alertmanagers:
    - static_configs:
        - targets: ["alertmanager"]
rule_files:
 - "/data/etc/rules.yml"

值得注意的是,prometheus在實際生產中,我們能不重啓POD則不重啓,因爲佔用資源較多,容易拖垮集羣,所以我們可以這樣平滑加載:

[root@hdss7-21 ~]# kubectl -n infra get pod  -o wide
NAME                             READY   STATUS    RESTARTS   AGE    IP            NODE                NOMINATED NODE   READINESS GATES
alertmanager-5d46bdc7b4-mpwd9    1/1     Running   0          16s    172.7.21.7    hdss7-21.host.com   <none>           <none>
dubbo-monitor-6676dd74cc-fccl4   1/1     Running   0          4h6m   172.7.21.14   hdss7-21.host.com   <none>           <none>
grafana-d6588db94-t4cvc          1/1     Running   0          93m    172.7.22.7    hdss7-22.host.com   <none>           <none>
prometheus-6767456ffb-5fzfb      1/1     Running   0          136m   172.7.21.3    hdss7-21.host.com   <none>           <none>
[root@hdss7-21 ~]# ps -ef |grep prometheus
root      7292 22343  0 16:30 pts/0    00:00:00 grep --color=auto prometheus
root     12367 12349  6 14:09 ?        00:09:26 /bin/prometheus --config.file=/data/etc/prometheus.yml --storage.tsdb.path=/data/prom-db --storage.tsdb.min-block-duration=10m --storage.tsdb.retention=72h
root     24205 24186  1 15:35 ?        00:00:47 traefik traefik --api --kubernetes --logLevel=INFO --insecureskipverify=true --kubernetes.endpoint=https://10.4.7.11:7443 --accesslog --accesslog.filepath=/var/log/traefik_access.log --traefiklog --traefiklog.filepath=/var/log/traefik.log --metrics.prometheus
[root@hdss7-21 ~]# kill -SIGHUP 12367
[root@hdss7-21 ~]# ps -ef |grep prometheus
root      7855 22343  0 16:30 pts/0    00:00:00 grep --color=auto prometheus
root     12367 12349  6 14:09 ?        00:09:29 /bin/prometheus --config.file=/data/etc/prometheus.yml --storage.tsdb.path=/data/prom-db --storage.tsdb.min-block-duration=10m --storage.tsdb.retention=72h
root     24205 24186  1 15:35 ?        00:00:47 traefik traefik --api --kubernetes --logLevel=INFO --insecureskipverify=true --kubernetes.endpoint=https://10.4.7.11:7443 --accesslog --accesslog.filepath=/var/log/traefik_access.log --traefiklog --traefiklog.filepath=/var/log/traefik.log --metrics.prometheus

查看alert:

image.png



發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章