上接: kafka 簡易發送/接收框架 之1
KafkaTemplate 模板類:
package org.test;
import java.util.concurrent.Future;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
/**
* kafka 接收發送消息操作模板 <br>
* 包括啓支kafka接收線程和發送消息的操作方法, 收/發操作只要用此模板.
*
* @author guishuanglin 2019-09-5
*
*/
public class KafkaTemplate implements IKafkaOperation{
private Log logger = LogFactory.getLog(this.getClass());
private String fn = "Kafka操作模板";
//唯一實例
private static KafkaTemplate template = null;
//發送參數配置
private MsgProducerConfig pConfig = null;
//發送數據對象
private KafkaProducer<String, String> producer = null;
/** 取得唯一實例, 進行消息發送*/
public static KafkaTemplate getInstance() {
if(template == null){
template = new KafkaTemplate();
}
return template;
}
//--------------------------------sendMessage-------------------------------------------
@Override
public boolean sendMessage(String topic, String message) {
boolean br = false;
String key = MsgUtils.getMsgId19() + "";
br = this.sendMessage(topic, key, message);
return br;
}
@Override
public boolean sendMessage(String topic, String key, String message) {
initProducer();
boolean br = false;
if(logger.isInfoEnabled()) {
logger.info(fn + ",發送消息:topic ="+ topic +",key ="+ key +",value ="+message);
}
try {
ProducerRecord<String, String> pRecord = new ProducerRecord<String, String>(topic, key, message);
Future<RecordMetadata> rm = producer.send(pRecord);
rm.get();//發送異常等待超時
br = true;
} catch (Exception e) {
br = false;
logger.error(fn + ", 發送數據時異常/超時," + message, e);
//throw new RuntimeException(fn + ",發送數據時異常.", e);
}
return br;
}
@Override
public boolean sendMessage(String topic, String message, IReceiveCallback callback) {
boolean br = false;
String key = MsgUtils.getMsgId19() + "";
br = this.sendMessage(topic, key, message, callback);
return br;
}
@Override
public boolean sendMessage(String topic, String key, String message, IReceiveCallback callback) {
initProducer();
boolean br = false;
if(logger.isInfoEnabled()) {
logger.info(fn + ",發送消息:topic ="+ topic +",key ="+ key +",value ="+message);
}
try {
if(callback.getRecTopic() == null || callback.getRecSubTopic() == null) {
logger.error(fn + ", 發送消息異常: IReceiveCallback.getRecSubTopic 方法返回空");
return br;
}
ReceiveProcessFactory.setReceiveCallback(callback, callback.getRecTopic(), callback.getRecSubTopic() );
ProducerRecord<String, String> pRecord = new ProducerRecord<String, String>(topic, key, message);
Future<RecordMetadata> rm = producer.send(pRecord);
rm.get();//發送異常等待超時
br = true;
} catch (Exception e) {
br = false;
logger.error(fn + ", 發送數據時異常/超時," + message, e);
//throw new RuntimeException(fn + ",發送數據時異常.", e);
}
return br;
}
@Override
public boolean sendMessage(MsgRecord msgRecord) {
return sendMessage(msgRecord.getTopic(), msgRecord.getKey(), msgRecord.getValue());
}
@Override
public boolean sendMessage(MsgRecord msgRecord, IReceiveCallback callback) {
return sendMessage(msgRecord.getTopic(), msgRecord.getKey(), msgRecord.getValue(), callback);
}
//--------------------------------run Receive Listener-------------------------------------------
@Override
public boolean runListener(AbstractConfig config, IReceivePreprocessor prepr) {
boolean br =false;
if(config == null || prepr == null) {
throw new RuntimeException(fn + ",接收消息參數不能爲空");
}
ReceiveKafkaThread rece = new ReceiveKafkaThread(config, prepr);
new Thread(rece).start();
br = true;
return br;
}
@Override
public void addReceiveCallback(IReceiveCallback callback) {
ReceiveProcessFactory.setReceiveCallback(callback, callback.getRecTopic(), callback.getRecSubTopic() );
}
@Override
public boolean closeListener() {
boolean br = false;
br = ReceiveKafkaThread.stop();
return br;
}
@Override
public boolean closeProducer() {
boolean br = false;
if(producer != null) {
producer.close();
producer = null;
}
br = true;
return br;
}
//-------------------------------- private -------------------------------------------
private boolean initProducer() {
boolean br =false;
if(producer != null) { return true; }
//發送配置
pConfig = new MsgProducerConfig();
if(! pConfig.checkConfig()){
return br;
}
logger.info("kafka發送配置, clientId =" + pConfig.getClientId() + ", servers =" + pConfig.getServers());
producer = new KafkaProducer<String, String>(pConfig.getPropertiesConfig());
return true;
}
}
MsgConsumerConfig 接收端配置類:
package org.test;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.smsc.calc.env.GlobalEnv;
/**
* Kafka 接收消息Consumer配置<br>
*
* 注意實現/修改: initConfig 方法;
*
* @author guishuanglin 2019-09-5
*/
public class MsgConsumerConfig extends AbstractConfig{
private Log logger = LogFactory.getLog(this.getClass());
protected static boolean isInit = false;
//可選配置
private String enable_auto_commit = "true"; //是否自動確認offset
private String auto_commit_interval_ms = "2000"; //自動確認offset的時間間隔
private String key_deserializer = "org.apache.kafka.common.serialization.StringDeserializer"; //key的序列化類
private String value_deserializer = "org.apache.kafka.common.serialization.StringDeserializer"; //val的序列化類
private String max_poll_records = "100"; //每次調用poll()時取到的records的最大數
public MsgConsumerConfig() {
this.initConfig();
}
public String getEnable_auto_commit() {
return enable_auto_commit;
}
public void setEnable_auto_commit(String enable_auto_commit) {
this.enable_auto_commit = enable_auto_commit;
}
public String getAuto_commit_interval_ms() {
return auto_commit_interval_ms;
}
public void setAuto_commit_interval_ms(String auto_commit_interval_ms) {
this.auto_commit_interval_ms = auto_commit_interval_ms;
}
public String getKey_deserializer() {
return key_deserializer;
}
public void setKey_deserializer(String key_deserializer) {
this.key_deserializer = key_deserializer;
}
public String getValue_deserializer() {
return value_deserializer;
}
public void setValue_deserializer(String value_deserializer) {
this.value_deserializer = value_deserializer;
}
public String getMax_poll_records() {
return max_poll_records;
}
public void setMax_poll_records(String max_poll_records) {
this.max_poll_records = max_poll_records;
}
@Override
public Properties getPropertiesConfig() {
if(! isInit) {
this.initConfig();
}
Properties props = new Properties();
props.put("bootstrap.servers", this.getServers());
props.put("group.id", this.getGroupId());
//props.put("consumer.id", this.getClientId());
props.put("client.id", this.getClientId());
props.put("enable.auto.commit", this.getEnable_auto_commit());
props.put("auto.commit.interval.ms", this.getAuto_commit_interval_ms());
props.put("key.deserializer", this.getKey_deserializer());
props.put("value.deserializer", this.getValue_deserializer());
props.put("max.poll.records", this.getMax_poll_records());
return props;
}
@Override
public Map<String, Object> getMapConfig() {
if(! isInit) {
this.initConfig();
}
Map <String, Object> map = new HashMap<>();
map.put("bootstrap.servers", this.getServers());
map.put("group.id", this.getGroupId());
//map.put("consumer.id", this.getClientId());
map.put("client.id", this.getClientId());
map.put("enable.auto.commit", this.getEnable_auto_commit());
map.put("auto.commit.interval.ms", this.getAuto_commit_interval_ms());
map.put("key.deserializer", this.getKey_deserializer());
map.put("value.deserializer", this.getValue_deserializer());
map.put("max.poll.records", this.getMax_poll_records());
return map;
}
@Override
public boolean checkConfig() {
if(! isInit) {
this.initConfig();
}
boolean br = true;
if (servers == null || servers.equals("") ) {
logger.warn("讀取kafka服務器配置爲空, 請檢查 文件是否配置 kafka.Servers 屬性");
servers = "127.0.0.1:9092";
}
if (topics == null || topics.size() ==0 ) {
logger.warn("讀取kafka主題配置爲空, 請檢查 文件是否配置 kafka.Topics 屬性");
topics = Arrays.asList(MsgTopic.TOPIC_COMMAND_RECE, MsgTopic.TOPIC_COMMAND_SEND);
}
//client.id 各客戶端ID不能相同
if (clientId == null || clientId.equals("") ) {
logger.warn("讀取kafka客戶端名爲空, 請檢查 文件是否配置 kafka.Client 屬性");
//int randomNum = (int) Math.round(Math.random() * 1000);
clientId = "consumer";
}
clientId = clientId + "_" + MsgUtils.getHostIP();
//group.id, 正常情況配置client.id = group.id; 配置相同的GroupId是在負載均衡時纔用到,負載均衡環境不能配置相同, 否則有可能收不到消息.
groupId = clientId;
if (projectName == null || projectName.equals("") ) {
br = false;
logger.error("讀取kafka項目名爲空, 請檢查 文件是否配置 projectName 屬性");
throw new RuntimeException("讀取kafka項目名稱空, 請檢查配置文件 projectName 屬性");
}
return br;
}
@Override
public boolean initConfig() {
if(isInit) return true;
//從外部獲取配置參數,如果參數獲取不正確,請修改此處代碼
String Servers = "";
String groupId = "";
String Topics = "";
String clientId = "";
String projectName = "";
Servers = GlobalEnv.getInitProperty("kafka.Servers");
Topics = GlobalEnv.getInitProperty("kafka.Topics");
clientId = GlobalEnv.getInitProperty("kafka.Client");
projectName = GlobalEnv.getInitProperty("projectName");
groupId = clientId;
//設置必須的值
this.setServers(Servers);
this.setTopics(Topics);
this.setClientId(clientId);
this.setGroupId(groupId);
this.setProjectName(projectName);
isInit = true;
return true;
}
}
MsgProducerConfig 發送端配置類:
package org.test;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.smsc.calc.env.GlobalEnv;
/**
* Kafka消息發送 Producer 配置
*
* 注意實現/修改: initConfig 方法;
*
* @author guishuanglin 2019-09-5
*/
public class MsgProducerConfig extends AbstractConfig{
private Log logger = LogFactory.getLog(this.getClass());
protected static boolean isInit = false;
//可選配置
private String acks = "1"; //0不要答覆, 1有一個答覆就行, "all"所有broker都要收到纔行.
private Integer retries = 0; //重試發送次數,不重發,重發會消息混亂.
private Integer batch_size = 16384; //
private Integer linger_ms = 0; //逗留時間,0立即發送
private Integer buffer_memory = 33554432; //32M
private Integer request_timeout_ms = 6 * 1000; //請求超時
private Integer max_block_ms = 6 * 1000; //發送阻塞時間, 發送阻塞/連服務器超時, 則拋出異常
private String key_deserializer = "org.apache.kafka.common.serialization.StringSerializer"; //key的序列化類
private String value_deserializer = "org.apache.kafka.common.serialization.StringSerializer"; //val的序列化類
public MsgProducerConfig() {
this.initConfig();
}
public String getAcks() {
return acks;
}
public void setAcks(String acks) {
this.acks = acks;
}
public Integer getRetries() {
return retries;
}
public void setRetries(Integer retries) {
this.retries = retries;
}
public Integer getBatch_size() {
return batch_size;
}
public void setBatch_size(Integer batch_size) {
this.batch_size = batch_size;
}
public Integer getLinger_ms() {
return linger_ms;
}
public void setLinger_ms(Integer linger_ms) {
this.linger_ms = linger_ms;
}
public Integer getBuffer_memory() {
return buffer_memory;
}
public void setBuffer_memory(Integer buffer_memory) {
this.buffer_memory = buffer_memory;
}
public String getKey_deserializer() {
return key_deserializer;
}
public void setKey_deserializer(String key_deserializer) {
this.key_deserializer = key_deserializer;
}
public String getValue_deserializer() {
return value_deserializer;
}
public void setValue_deserializer(String value_deserializer) {
this.value_deserializer = value_deserializer;
}
public Integer getRequest_timeout_ms() {
return request_timeout_ms;
}
public void setRequest_timeout_ms(Integer request_timeout_ms) {
this.request_timeout_ms = request_timeout_ms;
}
public Integer getMax_block_ms() {
return max_block_ms;
}
public void setMax_block_ms(Integer max_block_ms) {
this.max_block_ms = max_block_ms;
}
@Override
public Properties getPropertiesConfig() {
if(! isInit) {
this.initConfig();
}
Properties props = new Properties();
props.put("bootstrap.servers", this.getServers());
props.put("client.id", this.getClientId());
props.put("acks", "all");
props.put("retries", this.getRetries());
props.put("batch.size", this.getBatch_size());
props.put("linger.ms", this.getLinger_ms());
props.put("request.timeout.ms", this.getRequest_timeout_ms());
props.put("max.block.ms", this.getMax_block_ms());
props.put("buffer.memory", this.getBuffer_memory());
props.put("key.serializer", this.getKey_deserializer());
props.put("value.serializer", this.getValue_deserializer());
return props;
}
@Override
public Map<String, Object> getMapConfig() {
if(! isInit) {
this.initConfig();
}
Map <String, Object> map = new HashMap<>();
map.put("bootstrap.servers", this.getServers());
map.put("client.id", this.getClientId());
map.put("acks", this.getAcks());
map.put("retries", this.getRetries());
map.put("batch.size", this.getBatch_size());
map.put("linger.ms", this.getLinger_ms());
map.put("request.timeout.ms", this.getRequest_timeout_ms());
map.put("max.block.ms", this.getMax_block_ms());
map.put("buffer.memory", this.getBuffer_memory());
map.put("key.deserializer", this.getKey_deserializer());
map.put("value.deserializer", this.getValue_deserializer());
return map;
}
@Override
public boolean checkConfig() {
if(! isInit) {
this.initConfig();
}
boolean br = true;
if (servers == null || servers.equals("") ) {
logger.warn("讀取kafka服務器配置爲空, 請檢查 文件是否配置 kafka.Servers 屬性");
servers = "127.0.0.1:9092";
}
if (clientId == null || clientId.equals("") ) {
logger.warn("讀取kafka客戶端名爲空, 請檢查文件是否配置 kafka.Client 屬性");
//int randomNum = (int) Math.round(Math.random() * 1000);
clientId = "producer";
}
//client.id 客戶端ID採用 [名稱_IP] 的方式防止重複
clientId = clientId + "_" + MsgUtils.getHostIP();
if (projectName == null || projectName.equals("") ) {
br = false;
logger.error("讀取kafka項目名爲空, 請檢查 文件是否配置 projectName 屬性");
throw new RuntimeException("讀取kafka項目名稱空, 請檢查配置文件 projectName 屬性");
}
return br;
}
@Override
public boolean initConfig() {
if(isInit) return true;
//從外部獲取配置參數,如果參數獲取不正確,請修改此處代碼
String Servers = "";
String clientId = "";
String projectName = "";
Servers = GlobalEnv.getInitProperty("kafka.Servers");
clientId = GlobalEnv.getInitProperty("kafka.Client");
projectName = GlobalEnv.getInitProperty("projectName");
//設置必須的值
this.setServers(Servers);
this.setClientId(clientId);
this.setProjectName(projectName);
isInit = true;
return true;
}
}
MsgReceivePreprocessor 消息預處理:
package org.test;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.ConsumerRecord;
/**
* 例子實現, 接收消息,預處理處理接口<br>
*
* 應當根據實現情況, 應對代碼進行修改.<br>
* <br>
* 提醒: 子主題用來在消息回調時, 用來獲取對應的回調業務類, 請與 IReceiveCallback.getRecSubTopic() 回調實現方法保持一致.
*
* @author guishuanglin 2019-09-5
*/
public class MsgReceivePreprocessor implements IReceivePreprocessor {
private static Log logger = LogFactory.getLog(MsgReceivePreprocessor.class);
@Override
public MsgRecord preprocessor(ConsumerRecord<String, String> inRecord, Map<String,Object> inMap) {
String topic = inRecord.topic();
MsgRecord outRecord = new MsgRecord(topic, inRecord.key(), inRecord.value());
outRecord.setMsgTime(inRecord.timestamp());
//解析子主題,默認: 子主題名 = 主題名
String subTopic = topic;
String msgId = null;
if (MsgTopic.TOPIC_DATA_UP.equals(topic)) {
if(logger.isDebugEnabled()) {
logger.debug("收到上報數據:key = "+inRecord.key()+", value = "+inRecord.value());
}
if (inMap.containsKey("alarm")) {
subTopic = "alarm"; //自動上報警告數據
} else if (inMap.containsKey("power")) {
subTopic = "power"; //自動上報功率數據
}
outRecord.setSubTopic(subTopic);
} else if (MsgTopic.TOPIC_COMMAND_SEND.equals(topic)) {
if(logger.isDebugEnabled()) {
logger.debug("收到發送命令:key = "+inRecord.key()+", value = "+inRecord.value());
}
if (inMap.containsKey("r-switch")) {
subTopic = "r-switch"; //發送[讀]開關命令
} else if (inMap.containsKey("w-switch")) {
subTopic = "w-switch"; //發送[寫]開關命令
} else if (inMap.containsKey("r-state")) {
subTopic = "r-state"; //發送[讀]在線狀態
}
outRecord.setSubTopic(subTopic);
//消息唯一ID
msgId = MsgUtils.getString(inMap.get("seq"));
outRecord.setMsgId(msgId);
} else if (MsgTopic.TOPIC_COMMAND_RECE.equals(topic)) {
if(logger.isDebugEnabled()) {
logger.debug("收到命令回覆:key = "+inRecord.key()+", value = "+inRecord.value());
}
if (inMap.containsKey("r-switch")) {
subTopic = "r-switch"; //回覆[讀]開關命令
} else if (inMap.containsKey("w-switch")) {
subTopic = "w-switch"; //回覆[寫]開關命令
} else if (inMap.containsKey("r-state")) {
subTopic = "r-state"; //回覆[讀]在線狀態
}
outRecord.setSubTopic(subTopic);
//消息唯一ID
msgId = MsgUtils.getString(inMap.get("seq"));
outRecord.setMsgId(msgId);
} else {
outRecord.setSubTopic(subTopic);
}
return outRecord;
}
}
MsgRecord 自定義內部消息對象:
package org.test;
/**
* 對消息相關字段簡單包裝,減少對kafka的依賴.
*
* @author guishuanglin 2019-09-5
*/
public class MsgRecord{
//消息唯一ID, 需要接收回復消息則要填寫, 消息有ID則用消息ID, 消息沒有ID則可以用 MsgUtils 產生一個.
private String msgId;
//消息體
private String topic;
private String key;
private String value;
//附加內容
private String subTopic; //接收時用到子主題
private long msgTime; //發送/接收時間
private int count; //發送次數,如果重發3次則認爲超時, 這個可以自定義.
private int status;//發送次數,0未發送,1發送成功,2發送超時,3發送失敗, 可自己定義.
public MsgRecord(String topic, String key, String value ) {
this.topic = topic;
this.key = key;
this.value = value;
this.msgTime = System.currentTimeMillis();
//String msgSeqId = MsgUtils.getMsgId19() + "";
}
public MsgRecord(String topic, String key, String value, String msgId) {
this.topic = topic;
this.key = key;
this.value = value;
this.msgId = msgId;
this.msgTime = System.currentTimeMillis();
}
public String getMsgId() {
return msgId;
}
public void setMsgId(String msgId) {
this.msgId = msgId;
}
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public String getSubTopic() {
return subTopic;
}
public void setSubTopic(String subTopic) {
this.subTopic = subTopic;
}
public long getMsgTime() {
return msgTime;
}
public void setMsgTime(long msgTime) {
this.msgTime = msgTime;
}
public int getStatus() {
return status;
}
public void setStatus(int status) {
this.status = status;
}
public int getCount() {
return count;
}
public void setCount(int count) {
this.count = count;
}
}