springboot整合logback,自定義發送日誌到kafka

項目動態獲取kafka地址,還有對kafka是否在線ping

這個是通過獲取log的日誌輸出,然後發送到kafka裏。部署了一套ELK,logstash負責監聽kafka,有消息直接寫入到es裏

首先項目的maven依賴s

<!-- logback日誌appender到kafka -->
        <dependency>
            <groupId>com.github.danielwegener</groupId>
            <artifactId>logback-kafka-appender</artifactId>
            <version>0.1.0</version>
        </dependency>
        <dependency>
            <groupId>ch.qos.logback</groupId>
            <artifactId>logback-core</artifactId>
            <version>1.1.11</version>
        </dependency>
        <dependency>
            <groupId>ch.qos.logback</groupId>
            <artifactId>logback-classic</artifactId>
            <version>1.1.11</version>
        </dependency>
        <dependency>
            <groupId>net.logstash.logback</groupId>
            <artifactId>logstash-logback-encoder</artifactId>
            <version>4.11</version>
        </dependency>

三個ping相關的類

Constants.java


/**
 * @ClassName Constants
 * @Author dlh
 * @Date 2019/11/13 18:02
 * @Version 1.0
 **/
public class Constants {
    /***
     *刪除狀態
     */
    public static enum DeleteStatus{
        NORMAL("0","NORMAL","正常"),DELETE("1","DELETE","刪除");
        private DeleteStatus(String value,String name,String desc){
            this.value=value;
            this.name=name;
            this.desc=desc;
        }
        private final String value;
        private final String name;
        private final String desc;

        public String getValue() {
            return value;
        }

        public String getName() {
            return name;
        }

        public String getDesc() {
            return desc;
        }
    }

    /***
     * Result code
     */
    public static enum ResultCode {
        /** 成功 */
        SUCCESS("200", "成功"),
        NULL_DATA("205","無數據"),
        /** 沒有登錄 */
        NOT_LOGIN("400", "沒有登錄"),

        /** 發生異常 */
        EXCEPTION("401", "發生異常"),

        /** 系統錯誤 */
        SYS_ERROR("402", "系統錯誤"),

        /** 參數錯誤 */
        PARAMS_ERROR("403", "參數錯誤 "),

        /** 不支持或已經廢棄 */
        NOT_SUPPORTED("410", "不支持或已經廢棄"),

        /** AuthCode錯誤 */
        INVALID_AUTHCODE("444", "無效的AuthCode"),

        /** 太頻繁的調用 */
        TOO_FREQUENT("445", "太頻繁的調用"),

        /** 未知的錯誤 */
        UNKNOWN_ERROR("499", "未知錯誤");


        private ResultCode(String value, String msg){
            this.val = value;
            this.msg = msg;
        }

        public String val() {
            return val;
        }

        public String msg() {
            return msg;
        }

        private String val;
        private String msg;
        }
}

JsonResult.java

/**
 * @ClassName JsonResult
 * @Author dlh
 * @Date 2019/11/13 18:01
 * @Version 1.0
 **/

public class JsonResult {
    /**
     * 結果編碼
     */
    private String code;
    /**
     * 結果描述
     */
    private String message;
    private Object data;

    public JsonResult() {
        this.setCode(Constants.ResultCode.SUCCESS);
        this.setMessage(Constants.ResultCode.SUCCESS.msg());

    }

    public JsonResult(Constants.ResultCode code) {
        this.setCode(code);
        this.setMessage(code.msg());
    }

    public JsonResult(Constants.ResultCode code, String message) {
        this.setCode(code);
        this.setMessage(message);
    }

    public JsonResult(Constants.ResultCode code, String message, Object data) {
        this.setCode(code);
        this.setMessage(message);
        this.setData(data);
    }

    public String getCode() {
        return code;
    }
    public void setCode(Constants.ResultCode code) {
        this.code = code.val();
        this.message = code.msg();
    }
    public String getMessage() {
        return message;
    }
    public void setMessage(String message) {
        this.message = message;
    }

    public Object getData() {
        return data;
    }

    public void setData(Object data) {
        this.data = data;
    }

    @Override
    public String toString() {
        return "JsonResult{" +
                "code='" + code + '\'' +
                ", message='" + message + '\'' +
                ", data=" + data +
                '}';
    }
}

PTUtil.java

import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.UnknownHostException;

/**
 * @ClassName PTUtil
 * @Author dlh
 * @Date 2019/11/13 18:01
 * @Version 1.0
 **/

public class PTUtil {
    /***
     *  ping操作
     * @param hostname
     * @param timeout in milliseconds
     * @return
     */
    public static JsonResult pingResult(String hostname,Integer timeout){
        JsonResult jsonResult = new JsonResult();
        try {
            InetAddress address = InetAddress.getByName(hostname);
            boolean flag = address.isReachable(timeout);
            if(flag){
                jsonResult.setMessage("ping結果:the address is reachable.");
            }else{
                jsonResult.setCode(Constants.ResultCode.EXCEPTION);
                jsonResult.setMessage("ping結果:the address is unreachable.");
            }
        } catch (UnknownHostException e) {
            jsonResult.setCode(Constants.ResultCode.EXCEPTION);
            jsonResult.setMessage("ping結果:UnknownHostException:"+e.getMessage());
        } catch (IOException e) {
            jsonResult.setCode(Constants.ResultCode.EXCEPTION);
            jsonResult.setMessage("ping結果:IOException:"+e.getMessage());
        }
        return jsonResult;
    }
    /***
     *  telnet 操作
     * @param hostname
     * @param timeout in milliseconds
     * @return
     */
    public static JsonResult telnetResult(String hostname,Integer timeout){
        JsonResult jsonResult = new JsonResult();
        String[] split = hostname.split(":");
        try {
            Socket server = new Socket();
            InetSocketAddress address = new InetSocketAddress(split[0],Integer.parseInt(split[1]));
            server.connect(address, timeout);
            server.close();
            jsonResult.setMessage("telnet結果:success!");
        }catch (UnknownHostException e) {
            jsonResult.setCode(Constants.ResultCode.EXCEPTION);
            jsonResult.setMessage("telnet結果:UnknownHostException:"+e.getMessage());
        } catch (IOException e) {
            jsonResult.setCode(Constants.ResultCode.EXCEPTION);
            jsonResult.setMessage("telnet結果:IOException:"+e.getMessage());
        }
        return jsonResult;
    }

    /***
     *  telnet 操作
     * @param hostname
     * @param timeout in milliseconds
     * @return
     */
    public static JsonResult telnetResult(String hostname,Integer port,Integer timeout){
        JsonResult jsonResult = new JsonResult();
        try {
            Socket server = new Socket();
            InetSocketAddress address = new InetSocketAddress(hostname,port);
            server.connect(address, timeout);
            server.close();
            jsonResult.setMessage("telnet結果:success!");
        }catch (UnknownHostException e) {
            jsonResult.setCode(Constants.ResultCode.EXCEPTION);
            jsonResult.setMessage("telnet結果:UnknownHostException:"+e.getMessage());
        } catch (IOException e) {
            jsonResult.setCode(Constants.ResultCode.EXCEPTION);
            jsonResult.setMessage("telnet結果:IOException:"+e.getMessage());
        }
        return jsonResult;
    }

    public static void main(String[] args) {
        JsonResult jsonResult = telnetResult("192.168.1.80", 9092, 2000);
        System.out.println(jsonResult);
    }
}

日誌輸出到kafka

import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.core.Appender;
import ch.qos.logback.core.spi.AppenderAttachableImpl;
import com.github.danielwegener.logback.kafka.KafkaAppenderConfig;
import com.github.danielwegener.logback.kafka.delivery.FailedDeliveryCallback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.serialization.ByteArraySerializer;

import java.util.HashMap;
import java.util.Iterator;
import java.util.concurrent.ConcurrentLinkedQueue;

/**
 * @ClassName KafkaAppenderAds
 * @Author dlh
 * @Date 2019/11/19 14:29
 * @Version 1.0
 **/

public class KafkaAppenderAds<E> extends KafkaAppenderConfig<E> {
    /**
     * Kafka clients uses this prefix for its slf4j logging.
     * This appender defers appends of any Kafka logs since it could cause harmful infinite recursion/self feeding effects.
     */
    private static final String KAFKA_LOGGER_PREFIX = "org.apache.kafka.clients";

    public static volatile KafkaAppenderAds.LazyProducer lazyProducer = null;
    private final AppenderAttachableImpl<E> aai = new AppenderAttachableImpl<E>();
    private final ConcurrentLinkedQueue<E> queue = new ConcurrentLinkedQueue<E>();
    final static String LOCAL = "local-log";

    final static String KAFKA = "kafka-log";

    private static volatile Boolean telnetKafkaSucc = false;
    private final FailedDeliveryCallback<E> failedDeliveryCallback = new FailedDeliveryCallback<E>() {
        @Override
        public void onFailedDelivery(E evt, Throwable throwable) {
            aai.appendLoopOnAppenders(evt);
        }
    };

    public KafkaAppenderAds() {
        // setting these as config values sidesteps an unnecessary warning (minor bug in KafkaProducer)
        addProducerConfigValue(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
        addProducerConfigValue(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    }

    @Override
    public void doAppend(E e) {
        ensureDeferredAppends();
        if (e instanceof ILoggingEvent && ((ILoggingEvent)e).getLoggerName().startsWith(KAFKA_LOGGER_PREFIX)) {
            deferAppend(e);
        } else {
            super.doAppend(e);
        }
    }

    @Override
    public void start() {
        // only error free appenders should be activated
        if (!checkPrerequisites()) {
            return;
        }

        lazyProducer = new KafkaAppenderAds.LazyProducer();

        super.start();
    }

    @Override
    public void stop() {
        super.stop();
        if (lazyProducer != null && lazyProducer.isInitialized()) {
            try {
                lazyProducer.get().close();
            } catch (KafkaException e) {
                this.addWarn("Failed to shut down kafka producer: " + e.getMessage(), e);
            }
            lazyProducer = null;
        }
    }

    @Override
    public void addAppender(Appender<E> newAppender) {
        aai.addAppender(newAppender);
    }

    @Override
    public Iterator<Appender<E>> iteratorForAppenders() {
        return aai.iteratorForAppenders();
    }

    @Override
    public Appender<E> getAppender(String name) {
        return aai.getAppender(name);
    }

    @Override
    public boolean isAttached(Appender<E> appender) {
        return aai.isAttached(appender);
    }

    @Override
    public void detachAndStopAllAppenders() {
        aai.detachAndStopAllAppenders();
    }

    @Override
    public boolean detachAppender(Appender<E> appender) {
        return aai.detachAppender(appender);
    }

    @Override
    public boolean detachAppender(String name) {
        return aai.detachAppender(name);
    }


    @Override
    protected void append(E e) {
        if (telnetKafkaSucc == null){
            synchronized (this){
                if (telnetKafkaSucc == null){
                    telnetKafkaSucc = false;
                }
            }
        }
        if (!telnetKafkaSucc){
            return;
        }
        if (kafkaServerAddr == null){
            return;
        }
        final byte[] payload = encoder.doEncode(e);
        final byte[] key = keyingStrategy.createKey(e);
        final ProducerRecord<byte[], byte[]> record = new ProducerRecord<byte[],byte[]>(topic, key, payload);
        deliveryStrategy.send(lazyProducer.get(), record, e, failedDeliveryCallback);
    }


    public static void setTelnetKafkaSucc(boolean telnetKafkaSucc){
        KafkaAppenderAds.telnetKafkaSucc = telnetKafkaSucc;
    }

    public static Boolean getTelnetKafkaSucc(){
        return telnetKafkaSucc;
    }

    public static volatile String kafkaServerAddr;
    public static void setKafkaServerAddr(String kafkaServerAddr){
        KafkaAppenderAds.kafkaServerAddr = kafkaServerAddr;
    }

    protected Producer<byte[], byte[]> createProducer() {
        producerConfig.put("bootstrap.servers",KafkaAppenderAds.kafkaServerAddr);
        KafkaProducer<byte[], byte[]> kafkaProducer =
                new KafkaProducer<>(new HashMap<String, Object>(producerConfig));
        return kafkaProducer;
    }

    private void deferAppend(E event) {
        queue.add(event);
    }

    // drains queue events to super
    private void ensureDeferredAppends() {
        E event;

        while ((event = queue.poll()) != null) {
            super.doAppend(event);
        }
    }
    public static KafkaAppenderAds.LazyProducer getLazyProducer(){
        return lazyProducer;
    }
    /**
     * Lazy initializer for producer, patterned after commons-lang.
     *
     * @see <a href="https://commons.apache.org/proper/commons-lang/javadocs/api-3.4/org/apache/commons/lang3/concurrent/LazyInitializer.html">LazyInitializer</a>
     */
    public class LazyProducer {

        private volatile Producer<byte[], byte[]> producer;

        public Producer<byte[], byte[]> get() {
            Producer<byte[], byte[]> result = this.producer;
            if (result == null) {
                synchronized(this) {
                    result = this.producer;
                    if(result == null) {
                        this.producer = result = this.initialize();
                    }
                }
            }

            return result;
        }

        protected Producer<byte[], byte[]> initialize() {
            Producer<byte[], byte[]> producer = null;
            try {
                producer = createProducer();
            } catch (Exception e) {
                addError("error creating producer", e);
            }
            return producer;
        }

        public boolean isInitialized() { return producer != null; }

        public boolean closeProducer(){
            if (isInitialized()){
                this.producer.close();
                this.producer = null;
                return true;
            }
            return false;
        }
    }
}

定時調度

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;

/**
 * @ClassName ScheduledTelnet
 * @Author dlh
 * @Date 2019/11/14 16:07
 * @Version 1.0
 **/
@Component
public class ScheduledTelnetThread {

    private static final Logger LOGGER = LoggerFactory.getLogger(ScheduledTelnetThread.class);

    private static final String DEFAULT_IP = "127.0.0.1:8000";

    @Value("${spring.kafka.bootstrap-servers:127.0.0.1:8000}")
    String kafkaServerAddr;

    Integer timeOut = 2000;

    private static final String SUCCESS_CODE = "200";



    @PostConstruct
    public void init(){
        TelnetThread telnetThread = new TelnetThread();
        telnetThread.setDaemon(true);
        telnetThread.start();
        KafkaTemplateInit kafkaTemplateInit = new KafkaTemplateInit();
        kafkaTemplateInit.setDaemon(true);
        kafkaTemplateInit.start();
    }

    class KafkaTemplateInit extends Thread{
        @Override
        public void run(){
            int count = 0;
            LOGGER.info("KafkaTemplateInit Thread start...");
            while (true){
                count++;
                if (count < 120 && DEFAULT_IP.equals(kafkaServerAddr)){
                    try {
                        sleep(500);
                    }catch (Exception e){
                        e.printStackTrace();
                    }
                    continue;
                }
                if (!DEFAULT_IP.equals(kafkaServerAddr)){
                    KafkaAppenderAds.setKafkaServerAddr(kafkaServerAddr);
                    LOGGER.info("讀取到的kafka地址:{}",kafkaServerAddr);
                    break;
                }
            }
        }
    }

    class TelnetThread extends Thread{
        @Override
        public void run(){
            LOGGER.info("TelnetThread start..");
            while (true){
                try {
                    if (kafkaServerAddr == null || DEFAULT_IP.equals(kafkaServerAddr)){
                        sleep(500);
                        continue;
                    }
                    telnet();
                    sleep(1000 * 60 * 2);
                }catch (Exception e){
                    e.printStackTrace();
                }
            }
        }

        private void telnet(){
            try {
                String[] addrSplit = kafkaServerAddr.split(":");
                String hostName = addrSplit[0];
                Integer port = Integer.parseInt(addrSplit[1]);
                JsonResult jsonResult = PTUtil.telnetResult(hostName, port, timeOut);
                LOGGER.info("telnet:{},result:{}",KafkaAppenderAds.getTelnetKafkaSucc(),jsonResult);
                boolean telnetRes = SUCCESS_CODE.equals(jsonResult.getCode());
                if (!telnetRes){
                    KafkaAppenderAds.getLazyProducer().closeProducer();
                    System.out.println("關閉kafka");
                }
                if (KafkaAppenderAds.getTelnetKafkaSucc() == telnetRes){
                    return;
                }
                KafkaAppenderAds.setTelnetKafkaSucc(telnetRes);
            }catch (Exception e){
                e.printStackTrace();
            }
        }
    }
}

logback.xml

<?xml version="1.0" encoding="UTF-8"?>
<configuration>
    <springProperty scope="context" name="server-name" source="spring.application.name"
                    defaultValue="default-server"/>
    <contextName>poll-server</contextName>
    <property name="LOG_PATH" value="/home" />
    <!--設置系統日誌目錄-->
    <property name="APP_DIR" value="app-server" />
    <property name="project_name" value="app-server"/>
    <property name="log.pattern" value="%d{yyyy-MM-dd HH:mm:ss.SSS} ${LOG_LEVEL_PATTERN:-%5p} ${PID:-} [%thread]  [%-40.40logger{39}:%line] %m%n" />

    <!-- 控制檯輸出設置 -->
    <appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
        <encoder>
            <pattern>${log.pattern}</pattern>
        </encoder>
    </appender>

    <!-- ERROR日誌記錄到文件 -->
    <appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <!-- 正在記錄的日誌文件的路徑及文件名 -->
        <file>${LOG_PATH}/${APP_DIR}/${project_name}_log_error.log</file>
        <!-- 日誌記錄器的滾動策略,按日期,按大小記錄 -->
        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
            <fileNamePattern>${LOG_PATH}/${APP_DIR}/error/${project_name}_log-error-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <maxFileSize>1GB</maxFileSize>
            <maxHistory>60</maxHistory>
            <totalSizeCap>20GB</totalSizeCap>
        </rollingPolicy>
        <!-- 追加方式記錄日誌 -->
        <append>true</append>
        <!-- 日誌文件的格式 -->
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>${log.pattern}</pattern>
            <charset>utf-8</charset>
        </encoder>
        <!-- 此日誌文件只記錄error級別的 -->
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>ERROR</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
    </appender>

    <!-- 日誌記錄器,日期滾動記錄 -->
    <appender name="FILE_WARN" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <!-- 正在記錄的日誌文件的路徑及文件名 -->
        <file>${LOG_PATH}/${APP_DIR}/${project_name}_log_warn.log</file>
        <!-- 日誌記錄器的滾動策略,按日期,按大小記錄 -->
        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
            <fileNamePattern>${LOG_PATH}/${APP_DIR}/warn/${project_name}_log-warn-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <maxFileSize>20MB</maxFileSize>
            <maxHistory>60</maxHistory>
            <totalSizeCap>20GB</totalSizeCap>
        </rollingPolicy>
        <!-- 追加方式記錄日誌 -->
        <append>true</append>
        <!-- 日誌文件的格式 -->
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>${log.pattern}</pattern>
            <charset>utf-8</charset>
        </encoder>
        <!-- 此日誌文件只記錄warn級別的 -->
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>WARN</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
    </appender>

    <!-- 日誌記錄器,日期滾動記錄 -->
    <appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <!-- 正在記錄的日誌文件的路徑及文件名 -->
        <file>${LOG_PATH}/${APP_DIR}/${project_name}_log_info.log</file>
        <!-- 日誌記錄器的滾動策略,按日期,按大小記錄 -->
        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
            <fileNamePattern>${LOG_PATH}/${APP_DIR}/info/${project_name}_log-info-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <maxFileSize>20MB</maxFileSize>
            <maxHistory>60</maxHistory>
            <totalSizeCap>20GB</totalSizeCap>
        </rollingPolicy>
        <!-- 追加方式記錄日誌 -->
        <append>true</append>
        <!-- 日誌文件的格式 -->
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>${log.pattern}</pattern>
            <charset>utf-8</charset>
        </encoder>
        <!-- 此日誌文件只記錄info級別的 -->
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>INFO</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
    </appender>

    <!-- 日誌記錄器,日期滾動記錄 -->
    <appender name="FILE_DEBUG" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <!-- 正在記錄的日誌文件的路徑及文件名 -->
        <file>${LOG_PATH}/${APP_DIR}/${project_name}_log_debug.log</file>
        <!-- 日誌記錄器的滾動策略,按日期,按大小記錄 -->
        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
            <fileNamePattern>${LOG_PATH}/${APP_DIR}/debug/${project_name}_log-debug-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <maxFileSize>20MB</maxFileSize>
            <maxHistory>60</maxHistory>
            <totalSizeCap>20GB</totalSizeCap>
        </rollingPolicy>
        <!-- 追加方式記錄日誌 -->
        <append>true</append>
        <!-- 日誌文件的格式 -->
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>${log.pattern}</pattern>
            <charset>utf-8</charset>
        </encoder>
        <!-- 此日誌文件只記錄debug級別的 -->
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>DEBUG</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
    </appender>

    <appender name="KafkaAppender"
              class="com.shanhe.api.config.log.KafkaAppenderAds">
        <encoder
                class="com.github.danielwegener.logback.kafka.encoding.LayoutKafkaMessageEncoder">
            <layout class="net.logstash.logback.layout.LogstashLayout">
                <!-- 是否包含上下文 -->
                <includeContext>true</includeContext>
                <!-- 是否包含日誌來源 -->
                <includeCallerData>true</includeCallerData>
                <!-- 自定義附加字段 -->
                <!--<customFields>{"system":"test"}</customFields>-->
                <!-- 自定義字段的簡稱 -->
                <fieldNames class="net.logstash.logback.fieldnames.ShortenedFieldNames" />
            </layout>
            <charset>UTF-8</charset>
        </encoder>
        <!--kafka topic 需要與配置文件裏面的topic一致 -->
        <topic>
            app-logs
        </topic>
        <keyingStrategy
                class="com.github.danielwegener.logback.kafka.keying.HostNameKeyingStrategy" />
        <deliveryStrategy
                class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />
        <producerConfig>
            bootstrap.servers=192.168.1.80:9092
        </producerConfig>
        <!-- this is the fallback appender if kafka is not available. -->
        <appender-ref ref="CONSOLE" />
    </appender>

    <!--log4jdbc -->
    <logger name="jdbc.sqltiming" level="Info"/>

    <logger name="org.mybatis" level="Info" />
    <logger name="org.apache.ibatis" level="Info" />
    <logger name="org.apache.ibatis.common.jdbc.ScriptRunner" level="Info" />
    <logger name="org.apache.ibatis.sqlmap.engine.impl.SqlMapClientDelegate" level="Info" />
    <logger name="java.sql.Connection" level="Info" />
    <logger name="java.sql.Statement" level="Info" />
    <logger name="java.sql.PreparedStatement" level="Info" />
    <logger name="java.sql.ResultSet" level="Info" />

    <logger name="org.springframework" level="Info" />
    <logger name="com.shanhe" level="Info" />

    <root level="Info">
        <appender-ref ref="FILE_ERROR" />
        <appender-ref ref="FILE_WARN" />
        <appender-ref ref="FILE_INFO" />
        <appender-ref ref="FILE_DEBUG" />
        <appender-ref ref="CONSOLE" />
        <appender-ref ref="KafkaAppender" />
    </root>
</configuration>

最後yml配置

spring:
  kafka:
    bootstrap-servers:192.168.1.80:9092
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章