接上: kafka 簡易發送/接收框架 代碼之2
MsgTopic 消息主題常量:
package org.test;
/**
* 消息主題常量,
* 消息分類常量,
* 消息分類字典,
* 可根據情況增加/修改
*
* @author guishuanglin 2019-09-5
*/
public class MsgTopic {
//==================== 消息體中一些公用字段 =======================
/** 項目名稱*/ public static final String MSG_PROJECT = "project";
//==================== 消息主題常量 =======================
/** 系統資料共享 */ public static final String TOPIC_SYS_ARCH = "sys-arch"; //在修改系統資料後,發佈給各子系統
/** 數據主動上報 */ public static final String TOPIC_DATA_UP = "data-up"; //有些設備數據是定時主動上報的
/** 命令主動發送 */ public static final String TOPIC_COMMAND_SEND = "command-send";//開關命令,抄數據命令等等
/** 命令結果回覆 */ public static final String TOPIC_COMMAND_RECE = "command-rece";//(回覆數據或回覆命令執行結果)
//==================== 命令子主題常量 =======================
/** [讀]在線狀態 */ public static final String COMMAND_R_STATE = "r-state";
/** [讀]開關命令 */ public static final String COMMAND_R_SWITCH = "r-switch";
/** [寫]開關命令 */ public static final String COMMAND_W_SWITCH = "w-switch";
/** [讀]溫度數據 */ public static final String COMMAND_R_TEMPERATURE = "r-temperature";
/** [寫]溫度數據 */ public static final String COMMAND_W_TEMPERATURE = "r-temperature";
//==================== 上報子主題常量 =======================
/** 上報警告數據 */ public static final String DATA_UP_ALARM = "alarm";
/** 上報功率數據 */ public static final String DATA_UP_POWER = "power";
}
MsgUtils 消息內部工具, 爲了不引用其它項目的工具特copy了一些工具獨立使用:
package org.test;
import java.math.BigDecimal;
import java.net.InetAddress;
import java.net.UnknownHostException;
/**
* 內部用到的工具.
* 收發消息見:KafkaTemplate
*
* @author guishuanglin 2019-09-5
*/
public class MsgUtils {
private static final int scalev = 4;//默認浮點小數位
private static long serial = 1;
private static String hostIp = null;
/**
* 產生一個19位數內部ID, 10位時間 +3位隨機數 +6位本地序號
*/
public synchronized static long getMsgId19() {
long xd = System.currentTimeMillis() /1000;
int randomInt = (int) Math.round(Math.random() * 1000);
xd = xd * 1000 + randomInt;
if(serial > 999999) { serial = 1; }
long localId = serial ++;
return xd * 1000000 + localId;
}
/**
* 獲取本機IP
*/
public static String getHostIP() {
if(hostIp == null) {
try {
hostIp = InetAddress.getLocalHost().getHostAddress();
System.out.println(hostIp);
} catch (UnknownHostException e) {
hostIp = "localhost";
e.printStackTrace();
}
}
return hostIp;
}
/**
* 把Object,轉化成String對象
*/
public static String getString(Object data){
if(data == null) return null;
if(data instanceof String){
return trimEmpty((String)data);
}
if(data instanceof Integer){
return Integer.toString(((Integer)data).intValue());
}
if(data instanceof Long){
return Long.toString(((Long)data).longValue());
}
if(data instanceof Double){
return BigDecimal.valueOf(((Double)data).doubleValue()).setScale(scalev, BigDecimal.ROUND_HALF_UP).toString();
}
if(data instanceof Float){
return BigDecimal.valueOf(((Float)data).doubleValue()).setScale(scalev, BigDecimal.ROUND_HALF_UP).toString();
}
if(data instanceof BigDecimal){
BigDecimal bb =(BigDecimal)data;
return bb.scale()==0? bb.toString() : bb.setScale(scalev, BigDecimal.ROUND_HALF_UP).toString();
}
if(data instanceof Short){
return Short.toString(((Short)data).shortValue());
}
if(data instanceof Byte){
return Byte.toString(((Byte)data).byteValue());
}//日期對象不處理
String s = String.valueOf(data).trim();
s = trimEmpty(s);
data = null;
return s;
}
/**
* 把Object,轉化成String對象
* @date 2008-10-25
*/
public static Long getLong(Object data){
if(data==null){ return null; }
if(data instanceof Long){
return (Long)data;
}
if(data instanceof Integer){
return Long.valueOf(((Integer)data).longValue());
}
if(data instanceof Short){
return Long.valueOf(((Short)data).longValue());
}
if(data instanceof Double){
return Long.valueOf(((Double)data).longValue());
}
if(data instanceof Float){
return Long.valueOf(((Float)data).longValue());
}
if(data instanceof BigDecimal){
return Long.valueOf(((BigDecimal)data).longValue());
}
if(data instanceof Byte){
return Long.valueOf(((Byte)data).longValue());
}
String s = String.valueOf(data).trim();
if(s.equals("")){ s = null; return null; }
Long v = null;
try {
v = new Long(s);
} catch (Exception e) {
data = null; v = null;
System.out.println("對象 "+s+" 轉換成 Long 數據錯誤.");
} finally {
data = null;
s = null;
}
return v;
}
/**
* 把Object, 轉化成指定小數位的BigDecimal對象
* @date 2008-10-25
*/
public static BigDecimal getBigDecimal(Object data,int scale){
if(data==null){ return null; }
if(data instanceof BigDecimal){
return ((BigDecimal)data).setScale(scale, BigDecimal.ROUND_HALF_UP);
}
if(data instanceof Double){
BigDecimal v = BigDecimal.valueOf(((Double)data).doubleValue())
.setScale(scale, BigDecimal.ROUND_HALF_UP);
return v;
}
if(data instanceof Float){
BigDecimal v = BigDecimal.valueOf(((Float)data).doubleValue())
.setScale(scale, BigDecimal.ROUND_HALF_UP);
return v;
}
if(data instanceof Long){
return BigDecimal.valueOf(((Long)data).longValue());
}
if(data instanceof Integer){
return BigDecimal.valueOf(((Integer)data).longValue());
}
String s = String.valueOf(data).trim();
if(s.equals("")){ s = null; return null; }
BigDecimal v = null;
try {
v = new BigDecimal(s).setScale(scale, BigDecimal.ROUND_HALF_UP);
} catch (Exception e) {
data = null; v = null;
System.out.println("對象 "+s+" 轉換成 BigDecimal 數據錯誤.");
} finally {
data = null; s = null;
}
return v;
}
public static String trimEmpty(String o){
if(o == null) return null;
String str = o.trim();
if(str == null ||str.equals("") ||str.equals("null") ||str.equals("NULL")) return null;
return str;
}
public static void main(String[] args) {
int randomNum = (int) Math.round(Math.random() * 1000);
long seqId = MsgUtils.getMsgId19();
System.out.println(seqId);
}
}
ReceiveKafkaThread 消息接收線程:
package org.test;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import com.alibaba.fastjson.JSON;
/**
* 消息接收線程(單線程運行)<br>
* 請在 KafkaTemplate 中調用方法啓動
*
* @author guishuanglin 2019-09-5
*
*/
public class ReceiveKafkaThread implements Runnable {
private Log logger = LogFactory.getLog(this.getClass());
private String fn = "Kafka接收線程";
private static boolean isRuning = false;
private static String projectName = null;
private static boolean isStop = false;
//接收參數配置
private static AbstractConfig cConfig = null;
//接收監聽對象
private static KafkaConsumer<String, String> consumer = null;
//接收預處理
private static IReceivePreprocessor preprocessor = null;
private static CountDownLatch stopSignal = new CountDownLatch(1);
/**
* @param config 接收配置
* @param prepr 收到消息,預處理邏輯
*/
public ReceiveKafkaThread(AbstractConfig config, IReceivePreprocessor prepr) {
super();
cConfig = config;
preprocessor = prepr;
}
@Override
public void run() {
if(isRuning) return;
if(! init()) return;
isRuning = true;
while (! isStop) {
try{
long tvb = System.currentTimeMillis();
int count = 0;
ConsumerRecords<String, String> records = consumer.poll(200);
count = records.count();
for (ConsumerRecord<String, String> record : records) {
processMsg(record, projectName);
}
long tve = System.currentTimeMillis();
if(logger.isDebugEnabled()) {
logger.debug("完成接收"+ count +"條消息, 耗時:"+ (tve - tvb) +" ms");
}
}catch(Exception e){
logger.error("讀取Kafka消息出錯: ", e);
} finally{
//consumer只能單線程,在多線程時不安全.
}
}
isStop = true;
isRuning = false;
stopSignal.countDown();
}
/**
* 處理kafka信息, 此處用單線程接收,保證消息順序.<br>
* 注意: 不要在此類採用多線程處理, 因爲消息是有順序的, 如果在此啓用多線程接收, 可能消息前後順序會亂.<br>
* 正確的多線程處理方式是在 IMsgBusiness 實現接收消息隊列, 快速接收消息, 然後採用多線程處理隊列消息.<br>
*/
private void processMsg(ConsumerRecord<String, String> record, String projectName) {
try{
Map<String,Object> msgdata = JSON.parseObject(record.value());
String project = MsgUtils.getString( msgdata.get(MsgTopic.MSG_PROJECT) );
if( project != null && project.equals(projectName)) {
//接收消息進行預處理
MsgRecord msgRecord = preprocessor.preprocessor(record, msgdata);
String subTopic = msgRecord.getSubTopic();
//取到消息子類才能定位回調處理
IReceiveCallback back = ReceiveProcessFactory.getReceiveCallback(record.topic(), subTopic);
if (back != null) {
back.receiveProcess(msgRecord);
} else {
if(logger.isDebugEnabled()) {
logger.debug("收到未知主題消息:"+ record.toString());
}
}
}else {
if(logger.isDebugEnabled()) {
logger.debug("收到未知項目消息:"+ record.toString());
}
}
}catch(Exception e){
logger.error("處理Kafka消息出錯: ", e);
} finally{
}
}
/**
* KafkaConsumer 接收線程是否停止.
* @return
*/
private boolean init() {
boolean br = false;
if(consumer != null) { return true; }
if(cConfig == null) {
logger.error(fn + ", 啓動消息監聽線程失敗: kafka 配置爲空, 請先設置 AbstractConfig 參數");
return br;
}
if(! cConfig.checkConfig()){
return br;
}
if(preprocessor == null) {
logger.error(fn + ", 啓動消息監聽線程失敗: 接收預處理對象爲空, 請先設置 IReceivePreprocessor 參數");
return br;
}
projectName = cConfig.getProjectName();
logger.info("kafka接收配置, clientId ="+cConfig.getClientId() +", groupId ="+cConfig.getGroupId()+", Servers ="+cConfig.getServers());
KafkaConsumer<String, String> _consumer = new KafkaConsumer<String, String>( cConfig.getPropertiesConfig() );
_consumer.subscribe(cConfig.getTopics());
consumer = _consumer;
br = true;
return br;
}
/**
* KafkaConsumer 接收線程是否停止.
* @return
*/
public static boolean isStop() {
return isStop;
}
/**
* 停止運行 KafkaConsumer 接收線程,並且關閉 KafkaConsumer.
* @return
*/
public static boolean stop() {
isStop = true;
stopSignal = new CountDownLatch(1);
try {
stopSignal.await();
} catch (Exception e) {
e.printStackTrace();
}
consumer = null;
cConfig = null;
preprocessor = null;
isRuning = false;
return isStop;
}
}
ReceiveProcessFactory 接收回調處理業務工廠:
package org.test;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* 接收回調處理業務工廠<br>
* 如果需要接收消息進行處理, 接收之前, 請增加消息處理回調.<br>
* <br>
* 注意: 如果發送消息時, 調用帶有回調參數的方法, 則會自己增加到此工廠; 也可以在實現IReceiveCallback時的實現類中增加到工廠.
*
* @author guishuanglin 2019-09-5
*/
public class ReceiveProcessFactory {
private static final Map<String, IReceiveCallback> PROCESS_MAP = new ConcurrentHashMap<String, IReceiveCallback>();
public static IReceiveCallback getReceiveCallback(String topic, String subTopic) {
return PROCESS_MAP.get(topic +"_"+subTopic);
}
public static void setReceiveCallback(IReceiveCallback callback, String topic, String subTopic) {
String key = topic +"_"+subTopic;
if(PROCESS_MAP.containsKey(key)) {
return;
}
PROCESS_MAP.put(key, callback);
}
}
kafka 簡易發送/接收框架 代碼全部完成 (此代碼已經過測試)
說明: 代碼可以免費用於商業用途, 但請保留作者信息.
kafka的安裝, 包的下載, windows版的運行, 請參考網上其它文檔.
提醒: kafka 發送與回覆是異步方式的, 發送時用發送主題, 接收時用接收主題. 這也是kafka速度快的一個原因. 當然,現實中我們需要發送時等待回覆, 這也是本框架中定義發送/接收回調類的原因, 通過回調類就可以實現發送/接收的單獨配合處理. 又不影響kafka的效率..