1.說明
此代碼實現是針對kafka_2.10的0.8.2.1版本Java代碼實現,消費者是針對多個Topic消費的多線程實現
2.安裝
參考:搭建Kafka簡單教程
3.導入依賴
此處只導入kafka的依賴,線程池用的是spring的ThreadPoolTaskExeccutor線程池。
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.8.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.8.2.1</version>
</dependency>
4.配置
4.1 線程池的簡單配置
@Configuration
public class SpringAsyncConfig {
@Value(value = "${async.pool.max.size:80}")
private int maxPoolSize;
@Value(value = "${async.pool.queue.size:20}")
private int queueSize;
@Value(value= "${async.pool.core.size:5}")
private int corePoolSize;
@Value(value= "${async.pool.core.size:5}")
private int knowsCorePoolSize;
@Bean(name = "asyncTaskExecutor")
public AsyncTaskExecutor taskExecutor() {
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
executor.setMaxPoolSize(maxPoolSize);
executor.setQueueCapacity(queueSize);
executor.setCorePoolSize(corePoolSize);
executor.setWaitForTasksToCompleteOnShutdown(true);
return executor;
}
}
4.2 Kafka的簡單配置
@Configuration
public class KafkaConfig {
public Properties producerConfig(){
Properties props = new Properties();
// 注意服務其的路徑,尤其是在Linux系統上裝的Kafka,路徑錯了,會導致收不到消息
props.put("bootstrap.servers", "localhost:9092");
props.put("timeout.ms", 3000);
props.put("metadata.fetch.timeout.ms", 3000);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("acks", "0");
return props;
}
@Bean
public KafkaProducer kafkaProducer() {
return new KafkaProducer(producerConfig());
}
public Properties consumerConfig(){
Properties props = new Properties();
props.put("auto.offset.reset", "smallest");
// 注意服務其的路徑,尤其是在Linux系統上裝的Kafka,路徑錯了,會導致收不到消息
props.put("zookeeper.connect", "172.31.52.83:2181");
props.put("group.id", "defaultGroup");
props.put("zookeeper.session.timeout.ms", "10000");
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000");
props.put("partition.assignment.strategy", "range");
return props;
}
@Bean
public ConsumerConfig kafkaConsumer() {
ConsumerConfig consumerConfig = new ConsumerConfig(consumerConfig());
return consumerConfig;
}
}
5.生產者
@Component
public class KafkaProducerClient {
@Autowired
private KafkaProducer kafkaProducer;
private void produce(String topic,String message){
ProducerRecord<String,String> record = new ProducerRecord<>(topic,message);
kafkaProducer.send(record,(metadata,e) -> {
if(e != null){
// 消息發送失敗
System.out.println("消息發送失敗");
}
});
}
}
6.消費者
可以有若干個Topic,這裏演示的是有兩個Topic
6.1 消費者啓動入口
/**
* 消費者啓動類
* 簡單說明:實現InitializingBean是爲了項目啓動後就啓動消費者
*/
@Component
public class KafkaConsumerStart implements InitializingBean {
/**
* 線程池
*/
@Autowired
private AsyncTaskExecutor taskExecutor;
/**
* 消費者配置
*/
@Autowired
private ConsumerConfig consumerConfig;
/**
* 業務層處理得到的消息,因爲多線程,Biz在新線程中不可見,所以需要先引入,,這裏只是模擬,具體類實現省略
*/
@Autowired
private TopicsHandleBiz topicsHandleBiz;
private ConsumerConnector consumer;
@Override
public void afterPropertiesSet() {
try{
consumer = Consumer.createJavaConsumerConnector(consumerConfig);
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
// topics
String testTopic1 = "testTopic1";
String testTopic2 = "testTopic2";
topicCountMap.put(testTopic1, 1);
topicCountMap.put(testTopic2, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
//啓線程消費第一個testTopic1
List<KafkaStream<byte[], byte[]>> streams1 = consumerMap.get(testTopic1);
if(streams1.size() > 0){
taskExecutor.execute(new TestTopicHandle1(streams1.get(0),topicsHandleBiz));
}
//啓線程消費第二個testTopic2
List<KafkaStream<byte[], byte[]>> streams2 = consumerMap.get(testTopic2);
if(streams2.size() > 0){
taskExecutor.execute(new TestTopicHandle2(streams2.get(0),topicsHandleBiz));
}
}catch (Exception e){
e.printStackTrace();
}
}
}
6.2 線程處理testTopic1
public class TestTopicHandle1 implements Runnable {
private TopicsHandleBiz topicsHandleBiz;
private KafkaStream kafkaStream;
public TestTopicHandle1(KafkaStream kafkaStream,TopicsHandleBiz topicsHandleBiz) throws ClassNotFoundException {
this.kafkaStream = kafkaStream;
this.topicsHandleBiz = topicsHandleBiz;
}
@Override
public void run() {
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
String message = new String(it.next().message());
try {
// 保存testTopic1的消息
TopicsHandleBiz.save(message);
}catch (Exception e){
// 異常
e.printStackTrace();
}
}
}
}
6.3 線程處理testTopic2
public class TestTopicHandle2 implements Runnable {
private TopicsHandleBiz topicsHandleBiz;
private KafkaStream kafkaStream;
public TestTopicHandle2(KafkaStream kafkaStream,TopicsHandleBiz topicsHandleBiz) throws ClassNotFoundException {
this.kafkaStream = kafkaStream;
this.topicsHandleBiz = topicsHandleBiz;
}
@Override
public void run() {
ConsumerIterator<byte[], byte[]> it = kafkaStream.iterator();
while (it.hasNext()) {
String message = new String(it.next().message());
try {
// 保存testTopic2的消息
TopicsHandleBiz.save(message);
}catch (Exception e){
// 異常
e.printStackTrace();
}
}
}
}