Java分批寫入大量數據到kafka,sparkStreaming實時消費kafka,完成數據下沉(Hive到Hive數據遷移/同步)

最近遇到這樣的一個場景:

存在兩個Hadoop集羣,需要將一個集羣中的hive數據傳輸到另一個集羣的hive中。且源端hive爲其他公司數據源,涉及到的一定的安全和保密性。

現大致思路爲:

Java讀取源端hive—>我司kafka—>sparkStreaming讀取kafka—>目標端hive

代碼示例:

Java獲取其他公司hive表數據:
package com.zhbr.dataImport.test;

import com.alibaba.fastjson.JSON;
import com.zhbr.dataImport.rdbms.ImportRDBMSData;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.sql.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

/**
 * @ClassName GW_to_Kafka_test
 * @Description TODO
 * @Autor yanni
 * @Date 2020/3/25 9:07
 * @Version 1.0
 **/
public class GW_to_Kafka_test2 {

    private static String brokerList = "192.168.72.141:9092,192.168.72.142:9092,192.168.72.143:9092";

    // public static final String topic="topic-demo";
    private static String topic = "hive2kafka2";

    public static void main(String[] args) throws SQLException {
    	//自定義的JDBC方式讀取
        Connection conn  = ImportRDBMSData.getConn();
        Statement stmt  = ImportRDBMSData.getStatement(conn);
        String querySQL = "select * from lsb_copy";

        //查詢
        ResultSet res = stmt.executeQuery(querySQL);

        //創建ListBuffer集合
        ArrayList<Map<String, Object>> list = new ArrayList<Map<String, Object>>();

        //獲得結果集結構信息(元數據)
        ResultSetMetaData metaData = res.getMetaData();

        //ResultSet列數
        int columnCount = metaData.getColumnCount();

        //配置生產者客戶端參數
        //將配置序列化
        Properties properties = new Properties();
        properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        //內存緩衝
        properties.put("buffer.memory", 67108864);
        //批處理大小
        properties.put("batch.size", 131072);
        //發送間隔
        properties.put("linger.ms", 100);
        //消息的最大大小
        properties.put("max.request.size", 10485760);
        //失敗重試
        properties.put("retries", 3);
        properties.put("retry.backoff.ms", 20000);
        //ack級別(1代表保證leader收到)
        properties.put("acks", "1");
        properties.put("bootstrap.servers", brokerList);
        //壓縮
        properties.put("compression.type", "gzip");
        //創建KafkaProducer 實例
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(properties);

        try {
        // ResultSet轉List<Map>數據結構
        // next用於移動到ResultSet的下一行,使下一行成爲當前行
        while (res.next()) {

            //創建map集合
            HashMap<String, Object> map = new HashMap<String, Object>();

            // 遍歷獲取對當前行的每一列的鍵值對,put到map中
            for (int i = 1;i<=columnCount;i++) {
                // 獲取當前行某一列字段的字段名
                String allColumnName = metaData.getColumnName(i).toLowerCase();

                // rs.getObject(i) 獲得當前行某一列字段的值
                Object columnValue = res.getObject(i);
                map.put(allColumnName,columnValue);
            }

            //將數據添加到list集合
            list.add(map);

            //當list集合容量爲5000時,發送一次
            if(list.size()==5000){
                String str = JSON.toJSONString(list);

                //構建待發送的消息
                ProducerRecord<String,String> record=new ProducerRecord<String, String>(topic,str);

                //嘗試發送消息
                kafkaProducer.send(record);
                //打印發送成功
                System.out.println("batchSize 5000 send success from producer");

                //清空list集合
                list.clear();
            }


        }

        //將剩下的不滿5000條的數據發送
        if(list.size()>0){
            String str = JSON.toJSONString(list);

            //構建待發送的消息
            ProducerRecord<String,String> record=new ProducerRecord<String, String>(topic,str);

            //嘗試發送消息
            kafkaProducer.send(record);
            //打印發送成功
            System.out.println("batchSize "+list.size()+" send success from producer");

            //清空list集合
            list.clear();
        }

        } catch (Exception e) {
            e.printStackTrace();
        }finally {
            //關閉生產者客戶端實例
            kafkaProducer.close();
            ImportRDBMSData.closeAllConn(stmt,conn);
        }
    }
}

分批次寫入,避免因爲性能問題導致數據丟失及服務器宕機,如此可基本保證hive表大數據量的寫入工作。

\

sparkStreaming實時消費kafka,將數據保存到hive
package com.zhbr.dataImport.test

import kafka.serializer.StringDecoder
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

object Kafka_to_Hive {

    def main(args: Array[String]): Unit = {

      //獲取sparkSession
      val spark = SparkSession.builder().appName(this.getClass.getSimpleName.filter(!_.equals('$')))
        .master("local[4]").config("spark.streaming.receiver.writeAheadLog.enable","true").getOrCreate()

      //獲取sparkContext
      val sc = spark.sparkContext

      //設置日誌級別
      sc.setLogLevel("WARN")

      val ssc: StreamingContext = new StreamingContext(sc,Seconds(5))

      //設置檢查點,通常生產環境當中,爲了保證數據不丟失,將數據放到hdfs之上,hdfs的高容錯,多副本特徵
      ssc.checkpoint("./kafka-chk2")

      //設置kafkaParams
      val kafkaParams=Map("metadata.broker.list"->"node01:9092,node02:9092,node03:9092","group.id"->"group1")

      //設置topics
      val topics=Set("hive2kafka2")

      //獲取數據
      val data: InputDStream[(String, String)] = KafkaUtils.createDirectStream[String,String,StringDecoder,StringDecoder](ssc,kafkaParams,topics)

      //獲取真正的數據,數據在元組的第二位
      val realData: DStream[String] = data.map(x=>x._2)

      realData.map(record => record.toString).foreachRDD(rdd => {
        import spark.implicits._
        val df = spark.read.json(spark.createDataset(rdd))

        //存入MySQL
        df.write.mode(SaveMode.Append).format("jdbc")
                  .option(JDBCOptions.JDBC_URL,"jdbc:mysql://localhost:3306/test11")
                  .option("user","root")
                  .option("password","123")
                  .option(JDBCOptions.JDBC_TABLE_NAME,"lsb_copy")
                  .save()

        //存入hive
        //df.createTempView("df_tmp")
        //spark.sql("insert into table df_copy select * from df_tmp")
      })

      //開啓流式計算
      ssc.start()
      ssc.awaitTermination()
  }
}



\

最後:

歡迎各位大神提出更簡單、更快捷的解決思路。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章