ELK入門-簡單實現日誌收集

小生博客:http://xsboke.blog.51cto.com

                    -------謝謝您的參考,如有疑問,歡迎交流

目錄:

  • 本次使用的組件
  • 環境
  • WEB配置
  • Elasticsearch配置
  • 通過nginx訪問elasticsearchkibana
  • 擴展:filebeat input 配置
  • 排錯方法

組件簡介和作用

filebeat收集日誌   ->  logstash過濾/格式化  -> elasticsearch存儲  ->  kibana展示

# 個人理解
其實logstash和filebeat都可以收集日誌並且直接輸出到elasticsearch.
只不過logstash功能比filebeat更多,比如:過濾,格式化
filebeat比logstash更輕,所以filebeat收集日誌速度更快.

環境

centos7.2-web               172.16.100.251      nginx/filebeat/logstash
centos7.2-elasticsearch     172.16.100.252      elasticsearch/kibana

WEB配置

1. 安裝Nginx
   yum -y install yum-utils
   vim /etc/yum.repos.d/nginx.repo
    [nginx-stable]
    name=nginx stable repo
    baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
    gpgcheck=1
    enabled=1
    gpgkey=https://nginx.org/keys/nginx_signing.key
    module_hotfixes=true

    [nginx-mainline]
    name=nginx mainline repo
    baseurl=http://nginx.org/packages/mainline/centos/$releasever/$basearch/
    gpgcheck=1
    enabled=1
    gpgkey=https://nginx.org/keys/nginx_signing.key
    module_hotfixes=true

   yum-config-manager --enable nginx-mainline
   yum -y install nginx
   nginx
2. 配置JDK
   tar zxf jdk-8u202-linux-x64.tar.gz
   mv jdk1.8.0_202 /usr/local/jdk1.8
   vim /etc/profile
    export JAVA_HOME=/usr/local/jdk1.8
    export JRE_HOME=/usr/local/jdk1.8/jre
    export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib:$CLASSPATH
    export PATH=$JAVE_HOME/bin:$JRE_HOME/bin:$PATH

   source /etc/profile
   # 如果不做這個軟連接logstash依然會報錯找不到openSDK
   ln -s /usr/local/jdk1.8/bin/java /usr/bin/java   
3. 安裝並且配置filebeat
   curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.4.0-x86_64.rpm
   rpm -vi filebeat-7.4.0-x86_64.rpm    
   vim /etc/filebeat/filebeat.yml
    filebeat.inputs:
     - type: log
       enabled: true
         paths:
           - /var/log/nginx/access.log  # 監控的日誌
         tags: ["access"]               # 用於實現多日誌收集

     - type: log
       enabled: true
         paths:
           - /var/log/nginx/error.log
         tags: ["error"]

    output.logstash:
      hosts: ["localhost:5044"] # logstash的配置文件會指定監聽這個端口

   # 註釋: "output.elasticsearch",否則在啓用logstash模塊時會報錯:Error initializing beat: error unpacking config data: more than one namespace configured accessing 'output' (source:'/etc/filebeat/filebeat.yml')

   # 啓動logstatsh模塊,其實修改的是這個文件"/etc/filebeat/modules.d/logstash.yml"
   filebeat modules enable logstash
4. 安裝logstash
   rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
   vim /etc/yum.repos.d/logstash.repo
    [logstash-7.x]
    name=Elastic repository for 7.x packages
    baseurl=https://artifacts.elastic.co/packages/7.x/yum
    gpgcheck=1
    gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
    enabled=1
    autorefresh=1
    type=rpm-md

   yum -y install logstash
   ln -s /usr/share/logstash/bin/logstash /usr/local/bin/

   # logstash.yml部分配置簡介
   path.data: 數據存放目錄
   config.reload.automatic: 是否動態加載配置文件
   config.reload.interval: 動態加載配置文件間隔
   http.host: 監聽主機
   http.port: 端口

   # 在logstash/conf.d/ 下編寫你的配置文件
   vim /etc/logstash/conf.d/nginx.conf
    input {
           beats {
               port => 5044
           }
    }
    output {  
           if "access" in [tags] {    # 通過判斷標籤名,爲不同的日誌配置不同的index
               elasticsearch {
                   hosts => ["172.16.100.252:9200"]
                   index => "nginx-access-%{+YYYY.MM.dd}" # 索引名不能大寫
                   sniffing => true
                   template_overwrite => true
               }
           }

           if "error" in [tags] {
               elasticsearch {
                   hosts => ["172.16.100.252:9200"]
                   index => "nginx-error-%{+YYYY.MM.dd}"
                   sniffing => true
                   template_overwrite => true
               }
           }
    }

   systemctl daemon-reload
   systemctl enable logstashe
   systemctl start logstashe
5. 防火牆配置
   firewall-cmd --permanent --add-port=80/tcp
   firewall-cmd --reload

Elasticsearch配置

1. 配置JDK
   tar zxf jdk-8u202-linux-x64.tar.gz
   mv jdk1.8.0_202 /usr/local/jdk1.8
   vim /etc/profile
    export JAVA_HOME=/usr/local/jdk1.8
    export JRE_HOME=/usr/local/jdk1.8/jre
    export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib:$CLASSPATH
    export PATH=$JAVE_HOME/bin:$JRE_HOME/bin:$PATH

   source /etc/profile
2. 安裝elasticsearch
``````shell
vim /etc/yum.repos.d/elasticsearch.repo
    [elasticsearch-7.x]
    name=Elasticsearch repository for 7.x packages
    baseurl=https://artifacts.elastic.co/packages/7.x/yum
    gpgcheck=1
    gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
    enabled=1
    autorefresh=1
    type=rpm-md

yum -y install elasticsearch
# 修改elasticsearch
    關鍵字:
        cluster.name:                       羣集名字 
        node.name:                          節點名字
        path.data:                          數據存放路徑
        path.logs:                          日誌存放路徑
        bootstrap.memory_lock:              在啓動時侯是否鎖定內存
        network.host:                       提供服務綁定的ip地址,0.0.0.0代表所有地址
        http.port:                          偵聽端口
        discovery.seed_hosts:               集羣主機
        cluster.initial_master_nodes:       指定master節點

sed -i "/#cluster.name: my-application/a\cluster.name: my-elk-cluster" /etc/elasticsearch/elasticsearch.yml
sed -i "/#node.name: node-1/a\node.name: node-1" /etc/elasticsearch/elasticsearch.yml
sed -i "s/path.data: \/var\/lib\/elasticsearch/path.data: \/data\/elasticsearch/g" /etc/elasticsearch/elasticsearch.yml
sed -i "/#bootstrap.memory_lock: true/a\bootstrap.memory_lock: false" /etc/elasticsearch/elasticsearch.yml
sed -i "/#network.host: 192.168.0.1/a\network.host: 0.0.0.0" /etc/elasticsearch/elasticsearch.yml
sed -i "/#http.port: 9200/a\http.port: 9200" /etc/elasticsearch/elasticsearch.yml
sed -i '/#discovery.seed_hosts: \["host1", "host2"\]/a\discovery.seed_hosts: \["172.16.100.252"\]' /etc/elasticsearch/elasticsearch.yml
sed -i '/#cluster.initial_master_nodes: \["node-1", "node-2"\]/a\cluster.initial_master_nodes: \["node-1"\]' /etc/elasticsearch/elasticsearch.yml

mkdir -p /data/elasticsearch
chown  elasticsearch:elasticsearch /data/elasticsearch
systemctl daemon-reload
systemctl enable elasticsearch
systemctl start elasticsearch
``````
3. 安裝配置Kibana
   rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
   vim /etc/yum.repos.d/kibana.repo
    [kibana-7.x]
    name=Kibana repository for 7.x packages
    baseurl=https://artifacts.elastic.co/packages/7.x/yum
    gpgcheck=1
    gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
    enabled=1
    autorefresh=1
    type=rpm-md

   yum -y install kibana

   sed -i "/#server.port: 5601/a\server.port: 5601" /etc/kibana/kibana.yml
   sed -i '/#server.host: "localhost"/a\server.host: "0.0.0.0"' /etc/kibana/kibana.yml
   sed -i '/#elasticsearch.hosts: \["http:\/\/localhost:9200"\]/a\elasticsearch.hosts: \["http:\/\/localhost:9200"\]' /etc/kibana/kibana.yml
   sed -i '/#kibana.index: ".kibana"/a\kibana.index: ".kibana"' /etc/kibana/kibana.yml

   systemctl daemon-reload
   systemctl enable kibana
   systemctl start kibana
4. 防火牆配置
   firewall-cmd --permanent --add-port=9200/tcp
   # firewall-cmd --permanent --add-port=9300/tcp # 集羣端口
   firewall-cmd --permanent --add-port=5601/tcp
   firewall-cmd --reload

通過nginx訪問elasticsearchkibana(使用nginx實現elasticsearchkibana的訪問限制)

1. 172.16.100.252
   # 修改hosts
   vim /etc/hosts
   172.16.100.252   elk.elasticsearch

   # 安裝nginx並且配置
   server {
       listen       80;
       server_name  elk.elasticsearch;

       location / {
           allow 127.0.0.1/32;
           allow 172.16.100.251/32;
           deny all;
           proxy_pass http://127.0.0.1:9200;
       }
   }

   server {
       listen       80;
       server_name  elk.kibana;

       location / {
           allow "可以訪問kibana的IP";
           deny all;
           proxy_pass http://127.0.0.1:5601;
       }
   }

   # 修改elasticsearch配置
   network.host: 127.0.0.1
   discovery.seed_hosts: ["elk.elasticsearch"]

   # 修改kibana配置
   server.host: "127.0.0.1"

   systemctl restart elasticsearch
   systemctl restart kibana
2. 172.16.100.251
   #  修改hosts
   vim /etc/hosts
   172.16.100.252   elk.elasticsearch

   # logstash input output conf
   vim /etc/logstash/conf.d/nginx.conf
    input {
           beats {
               port => 5044
           }
    }
    output {  
           if "access" in [tags] {    # 通過判斷標籤名,爲不同的日誌配置不同的index
               elasticsearch {
                   hosts => ["elk.elasticsearch:80"]    # 必須指定端口,否則默認訪問9200
                   index => "nginx-access-%{+YYYY.MM.dd}" # 索引名不能大寫
                   sniffing => false
                   template_overwrite => true
               }
           }

           if "error" in [tags] {
               elasticsearch {
                   hosts => ["elk.elasticsearch:80"]
                   index => "nginx-error-%{+YYYY.MM.dd}"
                   sniffing => false
                   template_overwrite => true
               }
           }
    }

   systemctl restart logstash

擴展:filebeat input 配置

# filebeat將多行合併爲一行收集
filebeat.inputs:
     - type: log
       enabled: true
         paths:
           - /var/log/nginx/access.log
         tags: ["access"]
         multiline.pattern: '^\[[0-9]{4}'   # 指定要匹配的正則表達式模式,匹配以[YYYY 開頭的行.
         multiline.negate: true             # 不匹配模式的連續行
         multiline.match: after             # 追加到不匹配的前一行

# filebeat收集指定目錄下的日誌並且包括子目錄
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - "/var/log/**"
  recursive_glob.enabled: true  # 開啓遞歸模式
  tags: ["LogAll"]

排錯方法

# 啓動filebeat並且將信息輸出到終端
filebeat -e

# 啓動logstash並且將信息輸出到終端
logstash /etc/logstash/conf.d/nginx.conf

# 隨意寫入內容到收集的日誌中
echo "1" >> /var/log/nginx/access.log

# 然後通過查看filebeat和logstash的輸出來判斷錯誤
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章