Docker部署運行ELK

mkdir -p /home/docker/elasticsearch/data
mkdir -p /home/docker/elasticsearch/logs
mkdir -p /home/docker/logstash

在 /home/docker/logstash目錄下 vi logstash.conf



input {
    kafka {
        bootstrap_servers => ["192.168.1.80:9092"] # 注意這裏配置的kafka的broker地址不是zk的地址
        group_id => "applogs-msg" # 自定義groupid 
        topics => ["applogs"]  # kafka topic 名稱 
        consumer_threads => 5 
        decorate_events => true
        codec => "json"
        tags => ["applogs1"]
        
      }
   # 這樣可以配置多個topic,多個tags,如果不需要可以不加
   kafka {
        bootstrap_servers => ["192.168.1.80:9092"] # 注意這裏配置的kafka的broker地址不是zk的地址
        group_id => "applogs-msg" # 自定義groupid
        topics => ["applogs1"]  # kafka topic 名稱
        consumer_threads => 5
        decorate_events => true
        codec => "json"
        tags => ["applogs2"]
      }
}

output{
        elasticsearch{
                hosts=>["192.168.1.80:9200"]  
                index => "%{tags}-%{+YYYY.MM.dd}"
        }
        stdout{codec => rubydebug}
}

在/home/docker/logstash目錄下, vi logstash.yml

path.config: /etc/logstash/conf.d

在/home/docker 目錄下,vi docker-elk.yml

version: '3.4'
services:
 elasticsearch:
  image: elasticsearch:7.4.2
  container_name: elasticsearch-single
  restart: always
  ulimits:
    nproc: 65535
    nofile:
      soft: 40000
      hard: 65535 
    memlock:
      soft: -1
      hard: -1
  ports:
    - "9200:9200"
    - "9300:9300"
  environment:
    - TZ=Asia/Shanghai
    - "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
    - bootstrap.memory_lock=true
    - http.host=0.0.0.0
    #- cluster.initial_master_nodes=["node-1"]
    #- bootstrap.memory_lock=false
    - bootstrap.system_call_filter=false
    - discovery.type=single-node #單機運行
  volumes:
    #- /home/docker/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml 
    - /home/docker/elasticsearch/data:/usr/share/elasticsearch/data
    - /home/docker/elasticsearch/logs:/usr/share/elasticsearch/logs 
    #- /home/docker/elasticsearch/plugins:/usr/share/elasticsearch/plugins 

 logstash:
  image: logstash:7.4.2
  container_name: logstash-single 
  restart: always
  #network_mode: "bridge"
  #command: logstash -f /config-dir
  ports:
    - "5044:5044"
  volumes:
    - /home/docker/logstash/logstash.conf:/opt/logstash/config/logstash.conf    
    - /home/docker/logstash/logstash.yml:/opt/logstash/config/logstash.yml   
    - /home/docker/logstash/logstash.conf:/etc/logstash/conf.d/logstash.conf
    - /home/docker/logstash/logstash.yml:/etc/logstash/logstash.yml
  environment:
      # 設置時區
    TZ: Asia/Shanghai
  external_links:
    - elasticsearch:elasticsearch-single

 kibana:
  image: kibana:7.4.2
  container_name: kibana-single 
  restart: always
  #network_mode: "bridge"
  ports:
    - "5601:5601"
  external_links:
    - elasticsearch:elasticsearch-single

最後docker-compose -f docker-elk.yml up -d

部署遇到的問題

* soft noproc 11000
* hard noproc 11000
* soft nofile 4100
* hard nofile 4100 
* - nofile65536 
* - memlock unlimited

max virtual memory areas vm.max_map_count [65530]is too low, increase to at least [262144]
這個錯誤,如果是linux直接部署es的話
vi /etc/sysctl.conf
添加
vm.max_map_count=655360

max file descriptors [65535] for elasticsearchprocess is too low, increase to at least [65536]
這個錯誤,如果是linux直接部署es的話
vi /etc/security/limits.conf
* soft noproc 11000
* hard noproc 11000
* soft nofile 4100
* hard nofile 4100 
* - nofile65536 
* - memlock unlimited

執行 sysctl -p 立即生效

docker部署的話,以上這裏兩個錯誤需要修改配置
 ulimits:
    nproc: 65535
    nofile:
      soft: 40000
      hard: 65535 
    memlock:
      soft: -1
      hard: -1
docker-compose.yml里加上這個

the default discovery settings are unsuitable for production use; at least one of [discovery.seed_hosts, discovery.seed_providers, cluster.initial_master_nodes] must be configure
這個錯誤是es配置的問題
docker-compose.yml加上這個,或者 在es的配置文件里加
environment:
  - bootstrap.system_call_filter: false
  - cluster.initial_master_nodes: ["node-1"]



發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章