ELK是一整套解決方案
日誌分析平臺,是三個軟件產品的首字母縮寫,ELK分別代表:
- Elasticsearch:負責日誌檢索和儲存
- Logstash:負責日誌的收集和分析、處理
- Kibana:負責日誌的可視化,提供圖形化界面,實時總結流量和數據的圖表。
ELk的架構:
![在這裏插入圖片描述]()
準備環境:
![在這裏插入圖片描述]()
一: ES集羣安裝
安裝一臺ES服務器(其他4臺數據庫服務器操作一致):
- 需要自定義yum源 普通光盤鏡像裏沒有elk的軟件包(利用ftp服務共享軟件包)
- 因爲elasticsearch是java開發的,所以需要安裝java-1.8.0-openjdk
- 修改主機名,設置主機名映射
- 修改配置文件
- 啓動服務
- 檢查服務
[root@es1 ~]# cat /etc/yum.repos.d/local.repo
[http] #centos7.5自帶的軟件包
name=http
baseurl = http://192.168.1.51 #將光盤掛載到/var/www/html
enabled=1
gpgcheck=0
[ftp] //自定義的軟件包
name = ftp
baseurl = ftp://192.168.1.51/ansible #將自定義yum源拷貝到/var/ftp/
enabled=1
gpgcheck=0
[root@se1 ~]# vim /etc/hosts //配置主機映射
192.168.1.51 es1
192.168.1.52 es2
192.168.1.53 es3
192.168.1.54 es4
192.168.1.55 es5
[root@es1 ~]# yum -y install java-1.8.0-openjdk elasticsearch
[root@es1 ~]# vim /etc/elasticsearch/elasticsearch.yml #修改主配置文件
cluster.name: myelk 將17行配置集羣名字
node.name: es1 將23行當前主機名稱
network.host: 0.0.0.0 將54行 0.0.0.0(監聽所有地址)
discovery.zen.ping.unicast.hosts: ["es1", "es2", "es3"] 68行聲明集羣裏的主機成員有誰,不需要全部寫進去
:wq
[root@es1 ~]# systemctl restart elasticsearch.service #重啓服務
[root@es1 ~]# curl http://127.0.0.1:9200 #訪問驗證
{
"name" : "es1", #節點名稱
"cluster_name" : "myelk", #集羣名稱
"version" : {
"number" : "2.3.4",
"build_hash" : "e455fd0c13dceca8dbbdbb1665d068ae55dabe3f",
"build_timestamp" : "2016-06-30T11:24:31Z",
"build_snapshot" : false,
"lucene_version" : "5.5.0"
},
"tagline" : "You Know, for Search"
}
返回一組json數據表示這臺elashticsearch數據庫安裝成功
部署其他四臺:
- 用ansible批量部署(ansible搭建環境及使用方法見上一章博客)
- 修改第一臺 es的配置文件將第23 行改爲 node.name: {{ ansible_hostname }}
- 編寫playbook實現批量部署
- 驗證
[root@es1 ~]# vim es.yml
---
- name: 部署ES集羣
hosts: es
tasks:
- name: 拷貝主機名映射文件
copy:
src: /etc/hosts
dest: /etc/hosts
owner: root
group: root
mode: 0644
- name: 拷貝YUM源
copy:
src: /etc/yum.repos.d/local.repo
dest: /etc/yum.repos.d/local.repo
owner: root
group: root
mode: 0644
- name: 安裝elasticsearch軟件和java-1.8.0
yum:
name: java-1.8.0-openjdk,elasticsearch
state: installed
update_cache: yes
- name: 拷貝集羣主配置文件
template: #使用remplate模塊可以解析配置文件中的變量
src: /etc/elasticsearch/elasticsearch.yml
dest: /etc/elasticsearch/elasticsearch.yml
owner: root
group: root
mode: 0644
- name: 重啓elasticsearch服務
service:
name: elasticsearch.service
state: restarted
[root@es1 ~]# ansible-playbook es.yml #執行palybook
[root@es1 ~]# curl http://192.168.1.51:9200/_cluster/health?pretty #查看集羣狀態
{
"cluster_name" : "myelk",
"status" : "green", #green表示正常
"timed_out" : false,
"number_of_nodes" : 5, #機器節點有5臺
"number_of_data_nodes" : 5,
"active_primary_shards" : 0,
"active_shards" : 0,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
部署ES插件
- 插件要安裝在/usr/share/elasticsearch/bin下
- 在/usr/share/elasticsearch/bin下有個可執行文件plugin
- 在哪臺機器上安裝的只能在哪臺機器上訪問(這裏安裝在es5上)
- 插件的三種使用方法
- ./plugin install 下載插件
- ./plugin remove 移除插件
- ./plugin list 列出插件
[root@es5 ~]# ftp 192.168.1.51
ftp> ls
drwxr-xr-x 3 0 0 4096 Jun 30 03:10 ansible
drwxr-xr-x 2 0 0 6 Jun 23 05:05 centos-1804
drwxr-xr-x 2 0 0 106 Jun 30 03:10 elk
226 Directory send OK.
ftp> cd elk
250 Directory successfully changed.
ftp> ls
227 Entering Passive Mode (192,168,1,51,149,149).
150 Here comes the directory listing.
-rw-r--r-- 1 0 0 274341 Jun 29 14:49 bigdesk-master.zip
-rw-r--r-- 1 0 0 899857 Jun 29 14:49 elasticsearch-head-master.zip
-rw-r--r-- 1 0 0 2228148 Jun 29 14:49 elasticsearch-kopf-master.zip
[root@es5 ~]# cd /usr/share/elasticsearch/bin/ #必須在此目錄下載
[root@es5 bin]# ./plugin install ftp://192.168.1.51/elk/elasticsearch-head-master.zip
[root@es5 bin]# ./plugin install ftp://192.168.1.51/elk/elasticsearch-kopf-master.zip
[root@es5 bin]#./plugin install ftp://192.168.1.51/elk/bigdesk-master.zip
[root@es5 bin]# ./plugin list #列出插件
Installed plugins in /usr/share/elasticsearch/plugins:
- head
- kopf
- bigdesk
訪問插件測試(這裏拿head插件舉例):
- firefox 192.168.1.55:9200/_plugin/head(也可以直接用真機瀏覽器訪問,前提爲真機可以ping通該主機)
- firefox 192.168.1.55:9200/_plugin/bigdesk
- firefox 192.168.1.55:9200/_plugin/kopf
ES數據庫的增刪改查 - curl 常用參數介紹
- -A 修改請求agent
- -X 設置請求方法
- -I 顯示返回頭信息
_cat提供一組查詢集羣的信息
[root@ecs-proxy ~]# curl 192.168.1.51:9200/_cat #列出API的支持
=^.^=
/_cat/allocation
/_cat/shards
/_cat/shards/{index}
/_cat/master #集羣的主節點是哪個
/_cat/nodes #集羣的節點信息
/_cat/indices
/_cat/indices/{index}
/_cat/segments
/_cat/segments/{index}
/_cat/count
/_cat/count/{index}
/_cat/recovery
/_cat/recovery/{index}
/_cat/health #集羣健康狀態
/_cat/pending_tasks
/_cat/aliases
/_cat/aliases/{alias}
/_cat/thread_pool
/_cat/plugins #集羣的插件
/_cat/fielddata
/_cat/fielddata/{fields}
/_cat/nodeattrs
/_cat/repositories
/_cat/snapshots/{repository}
[root@ecs-proxy ~]# curl -XGET 192.168.1.51:9200/_cat/master?v #查看集羣主節點 ?v表示顯示詳細信息
id host ip node
Gn2JfHDUQXywHOsfCjx2YQ 192.168.1.53 192.168.1.53 ecs-0003
[root@ecs-proxy ~]# curl -XGET 192.168.1.51:9200/_cat/nodes?v #查看集羣節點信息
host ip heap.percent ram.percent load node.role master name
192.168.1.53 192.168.1.53 3 60 0.00 d * ecs-0003
192.168.1.55 192.168.1.55 8 60 0.00 d m ecs-0005
192.168.1.54 192.168.1.54 2 60 0.00 d m ecs-0004
192.168.1.52 192.168.1.52 2 60 0.00 d m ecs-0002
192.168.1.51 192.168.1.51 3 61 0.00 d m ecs-0001
數據導入
- 必須使用POST方式
- 數據格式必須是json
- 編碼格式data-binary
- 使用關鍵字_bulk導入數據
導入提前準備的json格式的日誌
[root@ecs-0001 ~]# gzip -d logs.jsonl.gz #解壓縮
[root@ecs-0001 ~]# ls
logs.jsonl
[root@ecs-0001 ~]#curl -XPOST http://192.168.1.52:9200/_bulk --data-binary @logs.jsonl #在任意節點將數據寫入都可以
數據導入後的樣子:
![在這裏插入圖片描述]()
二:安裝kibana
[root@kibana ~]# yum -y install kibana #yum源配置文件同上
[root@kibana ~]# vim /etc/hosts #修改主機名映射文件
192.168.1.51 ecs-0001
192.168.1.52 ecs-0002
192.168.1.53 ecs-0003
192.168.1.54 ecs-0004
192.168.1.55 ecs-0005
192.168.1.56 kibana
[root@kibana ~]# Vim /opt/kibana/config/kibana.yml //修改kibana的配置文件
server.port: 5601 //服務監聽的端口號
server.host: "0.0.0.0" //監聽的地址
Elasticsearch.url: http://192.168.1.51:9200 //後臺集羣地址其他去除註釋
kibana.index: ".kibana" //自動在數據庫生成.kibana索引來存數據
kibana.defaultAppId: "discover" //服務的默認首頁
elasticsearch.pingTimeout: 1500
elasticsearch.requestTimeout: 30000
elasticsearch.startupTimeout: 5000
[root@kibana ~]# systemctl start kibana
[root@kibana ~]# systemctl enable kibana
[root@kibana ~]# ss -ntulp | grep 5601
訪問驗證
真機瀏覽器訪問kibana主機ip:5601
匹配數據
修改時間(kibana會根據導入到es數據庫的日誌進行繪圖並呈現出來)
kibana自動繪製出圖表
除了柱狀圖,Kibana還支持很多種展示方式
做一個餅圖,選擇Pie chart
選擇from a new serach
選擇Spilt Slices
顯示哪個國家訪問網站次數最多(前五名)
繪製二級餅圖(在剛纔圖的基礎上)
查看使用的哪種操作系統多
保存(這裏又繪製了一張圖表,保存方法都一樣)
一次查看多張表
三. 部署logstash
logstash是什麼
- 是一個數據採集,加工處理以及傳輸的工具
- 所有類型的數據集中處理
- 不同模式和格式數據的正常化
- 自定義日誌格式的迅速擴展
- 未自定義數據源輕鬆添加插件
logstash 裏的三個模塊
- input模塊 :作用是負責日誌收集
- filter模塊:負責對日誌的加工處理將其轉爲json格式
- output模塊: 負責將日誌寫出到es集羣中
修改主機名映射文件告知logstash集羣的成員
[root@logstash ~]# vim /etc/hosts
192.168.1.51 ecs1
192.168.1.52 ecs2
192.168.1.53 ecs3
192.168.1.54 ecs4
192.168.1.55 ecs5
192.168.1.56 kibana
192.168.1.57 logstash
安裝
[root@logstash ~]# yum -y install java-1.8.0-openjdk
[root@logstash ~]# yum -y install logstash
修改配置文件測試運行
[root@ecs-logstash ~]# cd /etc/logstash/
[root@ecs-logstash logstash]# touch logstash.cnf #創建logstash配置文件
[root@ecs-logstash logstash]# vim logstash.cnf
input {
stdin {} #標準輸入
} #定義區域
filter {}
output{
stdout {} #標準輸出
}
[root@ecs-logstash bin]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.cnf #-f 表示加載配置文件
Settings: Default pipeline workers: 2
Pipeline main started #出現這個表示運行成功,並等待用戶輸入
test logstash is health # 用戶輸入的字符串
2020-07-04T07:03:48.463Z ecs-logstash test logstash is health #logstash輸出的
logstash插件的使用:
[root@ecs-logstash bin]# /opt/logstash/bin/logstash-plugin list #列出logstash的插件
[root@ecs-logstash bin]# vim /etc/logstash/logstash.cnf
input {
stdin { codec => "json"} // codec定義將輸入的格式爲"json"
}
filter {}
output{
stdout { codec => "rubydebug"} // codec定義將輸出的格式爲rubydebug(易讀)
}
[root@ecs-logstash bin]# ./logstash -f /etc/logstash /logstash.cnf
Settings: Default pipeline workers: 2
Pipeline main started
{"aa":1,"bb":2,"cc":3} //輸入一組json格式的數據
{
"aa" => 1,
"bb" => 2,
"cc" => 3,
"@version" => "1",
"@timestamp" => "2020-07-04T07:29:08.391Z",
"host" => "ecs-logstash"
}
官方文檔(查看各區域中插件的使用):
https://www.elastic.co/guide/en/logstash/current/index.html
input區域的使用:
- input區域的作用是讀取日誌文件(收集素材)
[root@ecs-logstash ~]# vim /etc/logstash/logstash.cnf
input {
stdin { codec => "json"} # codec表示輸入的格式爲"json"
file {
path => [ "/tmp/apache.log", "/tmp/db.log" ]
}
}
filter {}
output{
stdout { codec => "rubydebug"} #codec表示將輸出的格式定義爲rubydebug模式(易讀)
}
[root@ecs-logstash tmp]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.cnf #運行加載logstash文件
Settings: Default pipeline workers: 2
Pipeline main started
另外開一個終端連接到logstash主機上
[root@ecs-logstash ~]# cd /tmp
[root@ecs-logstash tmp]# touch apache.log db.log
[root@ecs-logstash tmp]# echo A_${RANDOM} > apache.log
[root@ecs-logstash tmp]# echo B_${RANDOM} > db.log #隨機往日誌裏寫東西
此時查看第一個終端
{
"message" => "A_31017",
"@version" => "1",
"@timestamp" => "2020-07-04T08:28:42.986Z",
"path" => "/tmp/apache.log",
"host" => "ecs-logstash"
}
{
"message" => "B_11512",
"@version" => "1",
"@timestamp" => "2020-07-04T08:30:19.040Z",
"path" => "/tmp/db.log",
"host" => "ecs-logstash"
}
就會發現logstash將日誌的內容給顯示出在屏幕上了
那當日志文件裏已經有日誌記錄了該怎麼辦呢?
使用file插件裏的sincedb_path 和 start_position 參數
[root@ecs-logstash ~]# vim /etc/logstash/logstash.cnf
input {
stdin { codec => "json"} # codec表示輸入的格式爲"json"
file {
path => [ "/tmp/apache.log" ]
sincedb_path => "/root/.sincedb" #指定指針記錄文件的存放路徑,不指定是默認存放在當前用戶的家目錄下的(隱藏文件)
start_position => "beginning" #當沒有指針記錄文件時從第一條日誌開始讀取
type => "httplog" #打個標籤方便標識
}
file {
path => [ "/tmp/db.log" ,"xx.log"]
sincedb_path => "/root/.sincedb"
start_position => "beginning"
type => "mysqllog"
}
}
filter {
}
output{
stdout { codec => "rubydebug"} #codec表示將輸出的格式定義爲rubydebug模式(易讀)
}
打開第二個終端將寫兩條日誌測試
[root@ecs-logstash tmp]# echo X_${RANDOM} >> xx.log
[root@ecs-logstash tmp]# echo X_${RANDOM} >> xx.log
返回第一個終端運行加載配置文件
[root@ecs-logstash ~]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.cnf
Settings: Default pipeline workers: 2
Pipeline main started
{
"message" => "X_28128",
"@version" => "1",
"@timestamp" => "2020-07-04T09:03:31.028Z",
"path" => "/tmp/xx.log",
"host" => "ecs-logstash",
"type" => "mysqllog"
}
{
"message" => "X_6186",
"@version" => "1",
"@timestamp" => "2020-07-04T09:03:31.029Z",
"path" => "/tmp/xx.log",
"host" => "ecs-logstash",
"type" => "mysqllog"
}
從這裏就能看出可以將新的日誌文件從頭讀取
當logstash服務停止的時候,因爲有指針記錄文件所以也是可以從斷點繼續讀取日誌文件
先將服務停止,向/tmp/xx.log寫入日誌記錄
[root@ecs-logstash tmp]# echo "停止服務再寫日誌也是可以讀出來的 " >> xx.log
再次啓動服務加載配置文件
[root@ecs-logstash ~]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.cnf
Settings: Default pipeline workers: 2
Pipeline main started
{
"message" => "停止服務再寫日誌也是可以讀出來的 ",
"@version" => "1",
"@timestamp" => "2020-07-04T09:08:27.291Z",
"path" => "/tmp/xx.log",
"host" => "ecs-logstash",
"type" => "mysqllog"
}
filter區域的使用:
- filter的作用是將日誌記錄轉化爲json格式
- 這裏使用 grok 模塊
首先搭建web服務器,並真機訪問測試
[root@ecs-web ~]# yum -y install httpd
[root@ecs-web ~]# systemctl start httpd
[root@ecs-web ~]# cat /var/log/httpd/access_log
100.125.27.29 - - [04/Jul/2020:17:23:13 +0800] "GET / HTTP/1.1" 200 12 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0"
httpd日誌格式解析:
100.125.27.29 :表示來訪者IP地址
"- -" :表示遠程日誌和遠程用戶(開啓認證的web網站纔有參數) 這裏爲空
[04/Jul/2020:17:23:13 +0800] :表示時間戳
GET :表示請求方法
/ : 表示訪問地址的URL
/ HTTP/1.1 :表示請求的協議和版本
200 :表示返回值
12 :表示頁面的大小
將web主機上的訪問日誌拷貝到logstash主機上測試filter模塊的使用
[root@ecs-logstash ]# vim /tmp/apache.log
100.125.27.29 - - [04/Jul/2020:17:23:13 +0800] "GET / HTTP/1.1" 200 12 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0"
將真實的日誌記錄寫進去
修改配置文件
[root@ecs-logstash ]# vim /etc/logstash/logstash.cnf
input {
stdin { codec => "json"}
file {
path => [ "/tmp/apache.log" ]
sincedb_path => "/dev/null" #多次測試可以將指針記錄文件修改爲dev/null裏
start_position => "beginning"
type => "httplog"
}
}
filter {
grok {
match => { "message" => "%{COMBINEDAPACHELOG}" } #利用grok模塊匹配日誌文件的各項含義
#調用logstash作者已經寫好的正則表達式,在這裏稱作爲宏COMBINEDAPACHELOG
}
}
output{
stdout { codec => "rubydebug"}
}
[root@ecs-logstash ~]# cd /opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-p
#作者寫的宏的存放路徑
[root@ecs-logstash patterns]# cat grok-patterns
測試運行
[root@ecs-logstash bin]# ./logstash -f /etc/logstash/logstash.cnf
Settings: Default pipeline workers: 2
Pipeline main started
{
"message" => "100.125.27.29 - - [04/Jul/2020:17:23:13 +0800] \"GET / HTTP/1.1\" 200 12 \"-\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0\"",
"@version" => "1",
"@timestamp" => "2020-07-04T10:36:14.970Z",
"path" => "/tmp/apache.log",
"host" => "ecs-logstash",
"type" => "httplog",
"clientip" => "100.125.27.29",
"ident" => "-",
"auth" => "-",
"timestamp" => "04/Jul/2020:17:23:13 +0800",
"verb" => "GET",
"request" => "/",
"httpversion" => "1.1",
"response" => "200",
"bytes" => "12",
"referrer" => "\"-\"",
"agent" => "\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0\""
}
output區域的使用:
- output的作用時將json格式的日誌寫到ES分佈式集羣中
- 這裏使用 elasticsearch 模塊
[root@ecs-logstash bin]# vim /etc/logstash/logstash.cnf
input {
stdin { codec => "json"}
file {
path => [ "/tmp/apache.log" ]
sincedb_path => "/dev/null"
start_position => "beginning"
type => "httplog"
}
}
filter {
grok {
match => { "message" => "%{COMBINEDAPACHELOG}" }
}
}
output{
stdout { codec => "rubydebug"}
elasticsearch {
hosts => ["ecs-0001:9200","ecs-0002:9200","ecs-0003:9200"] #告知logstash,ES集羣的名字,
#ecs-0001/2/3必須可以ping通
index => "web-%{+YYYY.MM.dd}" #在es集羣中創建索引(庫名)的名稱
flush_size => 2000 #先將數據緩存到本地當達到2000字節時寫到ES數據庫中
idle_flush_time => 10 #當10秒沒有數據寫進來時,將本地數據寫入ES分佈式集羣中
}
}
測試運行
[root@ecs-logstash bin]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.cnf
Settings: Default pipeline workers: 2
Pipeline main started
{
"message" => "100.125.27.29 - - [04/Jul/2020:17:23:13 +0800] \"GET / HTTP/1.1\" 200 12 \"-\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0\"",
"@version" => "1",
"@timestamp" => "2020-07-04T10:58:51.005Z",
"path" => "/tmp/apache.log",
"host" => "ecs-logstash",
"type" => "httplog",
"clientip" => "100.125.27.29",
"ident" => "-",
"auth" => "-",
"timestamp" => "04/Jul/2020:17:23:13 +0800",
"verb" => "GET",
"request" => "/",
"httpversion" => "1.1",
"response" => "200",
"bytes" => "12",
"referrer" => "\"-\"",
"agent" => "\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0\""
}
用瀏覽器訪問head插件(我把head插件安裝在了192.168.1.55上)
http://192.168.1.55/_plugin/head/
此時我們發現在head插件上有個名爲web的索引說明數據已經成功寫入ES分佈式集羣中
filebeats
首先要在web主機上安裝filebeats
- 作用是在本地讀取web日誌,通過本地網絡最終將日誌發送給logstash
修改logstash配置文件添加beats模塊
[root@ecs-logstash bin]# vim /etc/logstash/logstash.cnf
input {
stdin { codec => "json"}
file {
path => [ "/tmp/apache.log" ]
sincedb_path => "/root/.sincedb"
start_position => "beginning"
type => "httplog"
}
beats {
port => 5044 #logstash將監聽5044這個端口,等待web主機發送web日誌
}
}
.....
運行logstash
[root@ecs-logstash ~]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.cnf
Settings: Default pipeline workers: 2
Pipeline main started
另開終端查看是否監聽5044端口
[root@ecs-logstash ~]# ss -ntulp | grep 5044
tcp LISTEN 0 50 [::]:5044 [::]:* users:(("java",pid=14514,fd=14))
登錄web主機安裝filebeat
[root@ecs-web ~]# yum -y install filebeat
[root@ecs-web ~]# systemctl start filebeat
[root@ecs-web ~]# systemctl enable filebeat
[root@ecs-web ~]# vim /etc/filebeat/filebeat.yml
修改15行 - /var/log/httpd/access_log #定義傳輸日誌文件的路徑
修改72行 document_type: apache.log #打標籤定義是什麼日誌
修改183行添加註釋 #elasticsearch: #默認web主機是將日誌輸出給es集羣
修改278行去除註釋 logstash: #定義web主機將日誌輸出給logsstash
修改280行 hosts: ["192.168.1.57:5044"] #告知web機器logstash地址
[root@ecs-web ~]# grep -Pv "^\s*(#|$)" /etc/filebeat /filebeat.yml #查看有效配置
filebeat:
prospectors:
-
paths:
- /var/log/httpd/access_log
input_type: log
document_type: apache.log
registry_file: /var/lib/filebeat/registry
output:
logstash:
hosts: ["192.168.1.57:5044"]
shipper:
logging:
files:
rotateeverybytes: 10485760 # = 10MB
修改logstash配置文件匹配相應標籤的日誌
[root@ecs-logstash ~]# vim /etc/logstash/logstash.cnf
input {
stdin { codec => "json"}
file {
path => [ "/tmp/apache.log" ]
sincedb_path => "/root/.sincedb"
start_position => "beginning"
type => "httplog"
}
beats {
port => 5044
}
}
filter {
if [type] == "apache.log" { #匹配日誌標籤是apache.log的,才加載grok模塊
grok {
match => { "message" => "%{COMBINEDAPACHELOG}" }
#if [type] == "nginx.log" { #可以匹配多個標籤
....
}}
}
output{
stdout { codec => "rubydebug"}
if [type] == "apache.log" { #匹配日誌標籤是apache.log的
elasticsearch {
hosts => ["ecs-0001:9200","ecs-0002:9200","ecs-0003:9200"]
index => "web-%{+YYYY.MM.dd}"
flush_size => 2000
idle_flush_time => 10
}}
}
運行logstash
[root@ecs-logstash ~]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.cnf
真機訪問web網站
[root@ecs-web ~]#http://192.168.1.58:80
再次查看logstash主機的顯示(等待十幾秒)
"message" => "100.125.27.13 - - [04/Jul/2020:20:12:10 +0800] \"GET / HTTP/1.1\" 304 - \"-\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0\"",
"@version" => "1",
"@timestamp" => "2020-07-04T12:12:16.770Z",
"offset" => 6200,
"input_type" => "log",
"beat" => {
"hostname" => "ecs-web",
"name" => "ecs-web"
},
"source" => "/var/log/httpd/access_log",
"type" => "apache.log",
"count" => 1,
"fields" => nil,
"host" => "ecs-web",
"tags" => [
[0] "beats_input_codec_plain_applied"
],
"clientip" => "100.125.27.13",
"ident" => "-",
"auth" => "-",
"timestamp" => "04/Jul/2020:20:12:10 +0800",
"verb" => "GET",
"request" => "/",
"httpversion" => "1.1",
"response" => "304",
"referrer" => "\"-\"",
"agent" => "\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0\""
}
此時說明web主機成功將本地的http日誌文件通過網絡傳送給logstash主機了
將所有組件串聯
[root@kibana ~]# systemctl stop kibana
[root@kibana ~]# curl -XDELETE http://ecs-0002:9200/* 刪除所有測試索引
[root@kibana ~]# systemctl start kibana
運行logstash
[root@ecs-logstash ~]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.cnf
真機訪問web網站
[root@ecs-web ~]#http://192.168.1.58:80
真機查看56主機上的head插件,查看是否有web開頭的索引
[root@ecs-0005 ~]#http://192.168.1.56:9200/_plugin/head
使用kibana繪製圖表
再次訪問web網站的頁面
通過logstash查看測試訪問的時間
最後觀察kibana(如果訪問多我們還可以繪製餅圖)
繪製餅圖方法見上方kibana介紹