flink-esSink

  • 1.所需依賴
 <dependency>
      <groupId>org.apache.flink</groupId>
      <artifactId>flink-connector-elasticsearch6_2.11</artifactId>
      <version>1.9.0</version>
    </dependency>
<!-- https://mvnrepository.com/artifact/org.apache.httpcomponents/httpclient -->
<dependency>
    <groupId>org.apache.httpcomponents</groupId>
    <artifactId>httpclient</artifactId>
    <version>4.5.5</version>
</dependency>

  • 2.獲取esSink
 /**
    * 生成ElasticsearchSink
    */
   private static ElasticsearchSink<Tuple2<String, Long>> generateESSink() {
       // 配置HttpHost
       List<HttpHost> httpHosts = Collections.singletonList(
               //es的連接參數
               new HttpHost("192.168.19.10", 9200,"http")
       );
       ElasticsearchSinkFunction<Tuple2<String, Long>> sinkFunction = new ElasticsearchSinkFunction<Tuple2<String, Long>>() {
           @Override
           public void process(Tuple2<String, Long> tuple2, RuntimeContext runtimeContext, RequestIndexer requestIndexer) {
               // 封裝數據
               HashMap<String, String> map = new HashMap<>();
               map.put("content", tuple2.f0);
               map.put("eventTime", tuple2.f1.toString());
               map.put("processTime", String.valueOf(System.currentTimeMillis()));
               // 封裝Request
               IndexRequest request = Requests.indexRequest()
                       .index("my_index")//這個索引必須已經存在
                       .type("my_data")
                       .source(map);

               // 發送request
               requestIndexer.add(request);
           }
       };
       ElasticsearchSink.Builder<Tuple2<String, Long>> esSinkBuilder = new ElasticsearchSink.Builder<>(httpHosts, sinkFunction);
       //設置緩存區的大小 (一次存入的數據條數)  如果不設置  默認一次寫入
       esSinkBuilder.setBulkFlushMaxActions(50);
       //構建essink
       return esSinkBuilder.build();
   }
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章