Docker搭建zookeeper + kafka集羣(Mac)

軟件下載

docker pull wurstmeister/zookeeper
docker pull wurstmeister/kafka

zookeeper僞集羣安裝

  • 這裏演示使用,只部署單節點。如需高可用,則最好部署多臺zk節點
  • 默認容器內配置文件在/conf/zoo.cfg,數據和日誌目錄默認在/data 和 /datalog,需要的話可以將上述目錄映射到宿主機的可靠文件目錄下
docker run -d --name zookeeper -p 2181:2181 -t wurstmeister/zookeeper

kafka集羣安裝

  • 使用docker命令可快速在同一臺機器搭建多個kafka,只需要改變端口即可
  • 節點1
# 此處zk地址不能使用127.0.0.1或者localhost 如果IP變了之後需要重新生成容器
# 端口 2181 即zk地址
docker run  -d --name kafka1 -p 9092:9092 -e KAFKA_BROKER_ID=0 -e KAFKA_ZOOKEEPER_CONNECT=172.31.15.175:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://172.31.15.175:9092 -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 -t wurstmeister/kafka
  • 節點2
docker run  -d --name kafka2 -p 9093:9093 -e KAFKA_BROKER_ID=1 -e KAFKA_ZOOKEEPER_CONNECT=172.31.15.175:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://172.31.15.175:9093 -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9093 -t wurstmeister/kafka
  • 節點3
docker run  -d --name kafka3 -p 9094:9094 -e KAFKA_BROKER_ID=2 -e KAFKA_ZOOKEEPER_CONNECT=172.31.15.175:2181 -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://172.31.15.175:9094 -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9094 -t wurstmeister/kafka
  • 最終kafka集羣搭建完成,對應端口爲 9092、9093、9094 通過命令docker ps
    kafka集羣節點

kafka-manager安裝

# 根據自己需要 確認是否增加restart參數 由於本人公司和家裏IP不同,所以沒加此參數
# --restart=always 在容器退出時總是重啓容器
docker run -itd --name=kafka-manager -p 9000:9000 -e ZK_HOSTS="172.31.15.175:2181" sheepkiller/kafka-manager

topic創建

  • 訪問 localhost:9000
  • 創建集羣界面
    在這裏插入圖片描述
    如下圖所示 其他默認值即可
    在這裏插入圖片描述
  • topic創建界面
    在這裏插入圖片描述
    只需填寫topic名字即可
    在這裏插入圖片描述
    topic列表
    在這裏插入圖片描述

應用測試topic是否可用

  • jar依賴(pom.xml)
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>com.zbj.kafka</groupId>
    <artifactId>kafka-demo</artifactId>
    <version>1.0-SNAPSHOT</version>

    <dependencies>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka_2.12</artifactId>
            <version>2.3.0</version>
        </dependency>

        <dependency>
            <groupId>com.google.guava</groupId>
            <artifactId>guava</artifactId>
            <version>21.0</version>
        </dependency>
    </dependencies>
</project>
  • kafka提供者
package com.zbj.kafka;

import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;

/**
 * KafkaProducer
 *
 * @author weigang
 * @create 2019-09-16
 **/
public class KafkaProducer {

    private final Producer<String, String> producer;

    public final static String TOPIC = "kafka-1";

    private KafkaProducer() {
        Properties properties = new Properties();

        // kafka端口
        properties.put("bootstrap.servers", "172.31.15.175:9092,172.31.15.175:9093,172.31.15.175:9094");

        // 配置value的序列化類
        //properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer", StringSerializer.class.getName());

        // 配置key的序列化
        properties.put("key.serializer", StringSerializer.class.getName());

        //request.required.acks
        //0, which means that the producer never waits for an acknowledgement from the broker (the same behavior as 0.7). This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails).
        //1, which means that the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost).
        //-1, which means that the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains.
        properties.put("request.required.acks", "-1");

        producer = new org.apache.kafka.clients.producer.KafkaProducer<>(properties);
    }

    void producer() {
        int messageNo = 1000;
        final int COUNT = 100000;
        while (messageNo < COUNT) {
            String key = String.valueOf(messageNo);
            String data = "hello kafka message " + key;
            producer.send(new ProducerRecord<>(TOPIC, key, data));
            System.out.println(data);
            messageNo ++;
        }
    }

    public static void main(String[] args) {
        new KafkaProducer().producer();
    }
}
  • kafka消費者
package com.zbj.kafka;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;

/**
 * KafkaConsumer
 *
 * @author weigang
 * @create 2019-09-16
 **/
public class KafkaConsumer {

    private final org.apache.kafka.clients.consumer.KafkaConsumer consumer;

    private KafkaConsumer() {

        Properties props = new Properties();
        props.put("bootstrap.servers", "172.31.15.175:9092");
        props.put("group.id", "zbj-group");
        props.put("max.poll.records", 100);
        props.put("enable.auto.commit", "false");
        props.put("auto.commit.interval.ms", "1000");
        props.put("session.timeout.ms", "30000");
        //props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("key.deserializer", StringDeserializer.class.getName());
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        consumer = new org.apache.kafka.clients.consumer.KafkaConsumer(props);
    }

    void consume() {

        consumer.subscribe(Arrays.asList(KafkaProducer.TOPIC));
        final int minBatchSize = 200;
        List<ConsumerRecord<String, String>> buffer = new ArrayList<>();
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record : records) {
                buffer.add(record);
            }
            if (buffer.size() >= minBatchSize) {
                // 入庫操作
                //insertIntoDb(buffer);
                for (ConsumerRecord<String, String> record : buffer) {
                    System.out.println(record.key() + "-> " + record.value());
                }

                consumer.commitSync();
                buffer.clear();
            }
        }
    }

    public static void main(String[] args) {
        new KafkaConsumer().consume();
    }
}
  • 先啓動消費者 在啓動生成者
  • 出現如下圖 即成功
    在這裏插入圖片描述

參考文章

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章