pom:
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.tzb.bigdata</groupId>
<artifactId>spark-test</artifactId>
<!--<packaging>pom</packaging>-->
<version>1.0</version>
<!--<modules>-->
<!--<module>hbase</module>-->
<!--</modules>-->
<properties>
<scala.version>2.10.6</scala.version>
<hadoop.version>2.6.0</hadoop.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>2.1.1</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>2.1.1</version>
</dependency>
<!--<dependency>-->
<!--<groupId>org.apache.spark</groupId>-->
<!--<artifactId>spark-sql_2.10</artifactId>-->
<!--<version>1.6.0</version>-->
<!--</dependency>-->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_2.11</artifactId>
<version>2.1.1</version>
</dependency>
<dependency>
<groupId>com.typesafe.play</groupId>
<artifactId>play-mailer_2.11</artifactId>
<version>7.0.0</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.41</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.11</artifactId>
<version>2.1.1</version>
</dependency>
<!--=========================spark-streaming-kafka===========================-->
<!--0.8版本版本-->
<!--<dependency>-->
<!--<groupId>org.apache.spark</groupId>-->
<!--<artifactId>spark-streaming-kafka-0-8_2.11</artifactId>-->
<!--<version>2.1.1</version>-->
<!--</dependency>-->
<!--0.10版本 新版本-->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
<version>2.3.0</version>
<exclusions>
<exclusion>
<artifactId>scala-library</artifactId>
<groupId>org.scala-lang</groupId>
</exclusion>
</exclusions>
</dependency>
<!--======================================================================-->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.11.0.2</version>
</dependency>
<!--<dependency>-->
<!--<groupId>org.scala-lang</groupId>-->
<!--<artifactId>scala-library</artifactId>-->
<!--<version>2.10.6</version>-->
<!--</dependency>-->
<!--<dependency>-->
<!--<groupId>org.apache.hadoop</groupId>-->
<!--<artifactId>hadoop-common</artifactId>-->
<!--</dependency>-->
<!--測試Hbase時再打開註釋,否則idea本地連接測試環境會報錯-->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>2.0.1</version>
<exclusions>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>net.sf.json-lib</groupId>
<artifactId>json-lib</artifactId>
<version>2.4</version>
<classifier>jdk15</classifier>
</dependency>
<dependency>
<groupId>org.neo4j.driver</groupId>
<artifactId>neo4j-java-driver</artifactId>
<version>4.0.0</version>
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<version>2.8.5</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<!-- 去掉scope作用域,使用默認的compile,編譯、測試、運行都有效的作用域 -->
<!--<scope>test</scope>-->
</dependency>
<dependency>
<groupId>net.minidev</groupId>
<artifactId>json-smart</artifactId>
<version>2.3</version>
</dependency>
<!-- 郵件發送 -->
<!--<dependency>-->
<!--<groupId>com.typesafe.play</groupId>-->
<!--<artifactId>play-mailer_2.11</artifactId>-->
<!--<version>7.0.0</version>-->
<!--</dependency>-->
<!--<dependency>-->
<!--<groupId>org.apache.poi</groupId>-->
<!--<artifactId>poi</artifactId>-->
<!--<version>3.12</version>-->
<!--</dependency>-->
<dependency>
<groupId>joda-time</groupId>
<artifactId>joda-time</artifactId>
<version>2.10.1</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-catalyst -->
<!--<dependency>-->
<!--<groupId>org.apache.spark</groupId>-->
<!--<artifactId>spark-catalyst_2.11</artifactId>-->
<!--<version>2.3.0</version>-->
<!--<scope>test</scope>-->
<!--</dependency>-->
<!--中文分詞器-->
<dependency>
<groupId>com.huaban</groupId>
<artifactId>jieba-analysis</artifactId>
<version>1.0.2</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.68</version>
</dependency>
<!--es-->
<dependency>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch-spark-20_2.11</artifactId>
<version>6.2.4</version>
</dependency>
<!--poi excel-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>3.12</version>
</dependency>
</dependencies>
<build>
<finalName>spark-test</finalName>
<plugins>
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<version>3.2.2</version>
<executions>
<execution>
<goals>
<goal>compile</goal>
<goal>testCompile</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<!--<version>3.0.0</version>-->
<configuration>
<archive>
<manifest>
<mainClass>WordCount</mainClass>
</manifest>
</archive>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>8</source>
<target>8</target>
</configuration>
</plugin>
</plugins>
</build>
</project>
直接上代碼:
DataChangeStreaming:
package com.tzb.sparkstreaming.prod
import java.io.{FileNotFoundException, IOException}
import java.util
import com.alibaba.fastjson.{JSON, JSONObject}
import com.tzb.utils.{ConfigUtils, HBaseUtil, StringUtil}
import net.sf.json.JSONArray
import org.apache.hadoop.hbase.TableExistsException
import org.apache.hadoop.hbase.client._
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.slf4j
import org.slf4j.LoggerFactory
import scala.collection.mutable.ArrayBuffer
/**
* <!--測試Hbase時再打開註釋,否則idea本地連接測試環境會報錯-->
* SparkStreaming 版本0.10
* 注:本程序是將sparkstreaming和kafka、hbase結合起來使用的示例,測試環境 kafka和kafka依賴的zk爲210機器,hbase和hbase依賴的zk爲211機器
*
* 本地以及210 linux測試機都已測試成功:
* 打開kafkatool向某個主題中推送數據
* 執行main方法,開始消費數據
* kafkaTool發送json消息示例:
* {
* "tableName": "hbasetable6",
* "option": "put",
* "rowKey": "1001",
* "families": [
* "info1",
* "info2"],
* "cols_data": {
* "name":"tom",
* "age":"20"
* }
* }
* 如何查看自己消費者分組 對應的 topic 的offset:
* https://blog.51cto.com/13639264/2135877
* [root@xg kafka_2.11-2.0.0]# bin/kafka-consumer-groups.sh --bootstrap-server 10.21.0.210:9092 --group testgroup --describe
* Consumer group 'testgroup' has no active members.
* TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID
* testTopic 0 8 9 1 - - -
*
* 更改指定消費者分組對應topic的offset:未生效??
* bin/kafka-consumer-groups.sh --bootstrap-server 10.21.0.210:9092 --group testgroup --topic testTopic--execute --reset-offsets --to-offset 9
*
* 打包測試(成功):
* spark-submit --master yarn-client --conf spark.driver.memory=2g --class com.tzb.sparkstreaming.prod.DataChangeStreaming --executor-memory 8G --num-executors 5 --executor-cores 2 /var/lib/hadoop-hdfs/spride_sqoop_beijing/bi_table/test/spark-test-jar-with-dependencies.jar >> /var/lib/hadoop-hdfs/spride_sqoop_beijing/bi_table/test/sparkstreaming_datachange.log
* 線上跑的話要把代碼裏的kafka以及zk,hbase等組件的ip或域名,改爲線上的,同時提交任務時把 spark-submit 改爲 spark-submit2,命令後邊加個&符號,則爲後臺啓動程序,當前窗口可關閉。
*
* 如何停止任務:
* 如果想停止掉這個任務則:ps -ef | grep DataChangeStreaming,並將端口kill掉即可。
*/
object DataChangeStreaming {
// 設置日誌的級別
Logger.getLogger("org.apache").setLevel(Level.ERROR)
val logger: slf4j.Logger = LoggerFactory.getLogger(this.getClass.getSimpleName)
def main(args: Array[String]): Unit = {
val sparkConf: SparkConf = new SparkConf()
.setAppName(this.getClass.getSimpleName)
.setMaster("local[*]")
val ssc: StreamingContext = new StreamingContext(sparkConf, Seconds(5))
//策略
val preferredHosts: LocationStrategy = LocationStrategies.PreferConsistent
//kafka topic
val topics = Array("testTopic")
val groupId = "testgroup"
val kafkaParams: Map[String, Object] = Map[String, Object](
"bootstrap.servers" -> ConfigUtils.brokers, //kafka producer 生產者地址
"key.deserializer" -> classOf[StringDeserializer].getName,
"value.deserializer" -> classOf[StringDeserializer].getName,
"group.id" -> groupId,
//latest, earliest, none
"auto.offset.reset" -> "earliest",
"enable.auto.commit" -> "false" // 不自動提交
)
val stream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
ssc,
preferredHosts,
ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
)
logger.info(s"開始消費 kafka --- 主題名:${topics(0)} --- 消費組:${groupId} --- brokers:${ConfigUtils.brokers}")
println(s"開始消費 kafka --- 主題名:${topics(0)} --- 消費組:${groupId} --- brokers:${ConfigUtils.brokers}")
stream.foreachRDD(fr => {
//獲取offset
val offsetRanges: Array[OffsetRange] = fr.asInstanceOf[HasOffsetRanges].offsetRanges
println("獲取offset---offsetRanges:" + offsetRanges.mkString(","))
//開始業務
fr.foreachPartition(it => {
//HBase 連接
val connection: Connection = HBaseUtil.initHbase
println("創建的Hbase的Connection連接==>" + connection)
var tableName = ""
try
it.foreach(record => {
val jsonString: String = record.value()
println("接受到的一條 json 消息 ==>" + jsonString)
val jSONObject: JSONObject = JSON.parseObject(jsonString)
tableName = jSONObject.getString("tableName")
val option: String = jSONObject.getString("option") //put delete 行爲標識字段
val rowKey: String = jSONObject.getString("rowKey")
val families: String = jSONObject.getString("families")
val cols_data: String = jSONObject.getString("cols_data")
val familysArr = ArrayBuffer[String]()
if(families != null && StringUtil.isNotBlank(families)){
val familyjsonArr: JSONArray = JSONArray.fromObject(families)
val familyIter: util.Iterator[_] = familyjsonArr.iterator()
while (familyIter.hasNext) {
val family : String = familyIter.next()+""
familysArr += family
}
}
println("所有列族:familylist "+familysArr.mkString(","))
//創建新表 不存在就創建,存在就報錯拋異常,但是數據依然會插入
// HBaseUtil.createTable(connection, tableName)
// HBaseUtil.createTable(connection, tableName, Array[String]("info1", "info2")) //創建兩個列族
HBaseUtil.createTable(connection, tableName,familysArr) //創建n列族
println("tableName:" + tableName + " " + "rowKey:" + rowKey + " " + "option:" + option + "data:" + cols_data)
val dataObject: JSONObject = JSON.parseObject(cols_data)
if(dataObject !=null){
val keys: util.Set[String] = dataObject.keySet()
var columns = new ArrayBuffer[String]
var values = new ArrayBuffer[String]
import scala.collection.JavaConversions._
for (key <- keys) {
columns += key
values += dataObject.getString(key)
println(s"Columns: $key -> values: ${dataObject.getString(key)}")
}
//保存到HBase
HBaseUtil.putData(connection, tableName, rowKey, columns.toArray, values.toArray,familysArr(0)) //只添加到第一個列族 familysArr(0)
logger.info(s"Save successed -> 表名:$tableName --> 列族:${familysArr.mkString(",")} rowkey:$rowKey")
print(s"Save successed -> 表名:$tableName --> 列族:${familysArr.mkString(",")} rowkey:$rowKey")
}
//執行刪除操作
// if (option != null && HBaseUtil.OPTION_DELETE == option.toLowerCase) {
// //option不是null,而且值是'delete',執行刪除操作
// HBaseUtil.deleteByRowKey(tableName, rowKey)
// } else {
//
// }
})
catch {
case e1: FileNotFoundException => {
println("Missing file exception")
}
case e2: IOException => {
println("IO Exception")
}
case e3: IllegalArgumentException => {
printf("do something when illegal happened.")
}
case e3: TableExistsException => {
printf(s"Hbase中表已經存在!表名爲${tableName}")
}
case e4: Exception => e4.printStackTrace()
} finally {
connection.close()
}
})
//手動提交偏移量,kafka管理,有可能會產生重複消費
stream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
})
ssc.start()
ssc.awaitTermination()
}
}
HbaseUtil:
package com.tzb.utils
import java.io.IOException
import java.text.MessageFormat
import java.util
import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor, TableName}
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
/**
* <!--測試Hbase時再打開註釋,否則idea本地連接測試環境會報錯-->
*/
object HBaseUtil {
private val logger: Logger = LoggerFactory.getLogger(this.getClass.getSimpleName)
private var configuration: Configuration = _
private var connection: Connection = _
final val OPTION_DELETE = "delete"
/**
* 初始化配置
*/
def init(): Unit = {
try
if (configuration == null) {
configuration = HBaseConfiguration.create()
configuration.set("hbase.zookeeper.quorum", ConfigUtils.zkconnect) //zk地址
configuration.set("hbase.zookeeper.property.clientPort", "2181")
}
catch {
case e: Exception =>
logger.error("HBase Configuration Initialization failure !")
throw new RuntimeException(e)
}
}
/**
* 連接集羣
*
* @return
*/
def initHbase: Connection = {
init()
try
if (connection == null || connection.isClosed) connection = ConnectionFactory.createConnection(configuration)
// System.out.println("---------- " + conn.hashCode());
catch {
case e: IOException =>
logger.error("HBase 建立鏈接失敗 ", e)
}
connection
}
/**
* 查看HBase所有列簇
*/
def list(): Unit = {
val admin: Admin = initHbase.getAdmin
val tableNames: Array[TableName] = admin.listTableNames
for (name <- tableNames) {
System.out.println(name.getNameAsString)
}
}
/**
* 創建表
*
* @param tableNmae 表名
* @param cols 字段
*/
def createTable(connection: Connection, tableNmae: String, cols: ArrayBuffer[String] = ArrayBuffer[String] {"information"}): Unit = { //這是創建表時默認的列族名
try {
val tableName: TableName = TableName.valueOf(tableNmae)
val admin: Admin = connection.getAdmin
val tableNames: Array[TableName] = admin.listTableNames
for (name <- tableNames) {
//System.out.println(name.getNameAsString)
if (tableName == name.getNameAsString) {
logger.info(s"~~~表名 :$tableName 已經存在~~~")
println(s"~~~表名 :$tableName 已經存在~~~")
return
}
}
//if (admin.tableExists(tableName)) {
// println("表已存在!")
//} else {
val hTableDescriptor : HTableDescriptor = new HTableDescriptor(tableName)
for (col <- cols) {
val hColumnDescriptor = new HColumnDescriptor(col)
hTableDescriptor.addFamily(hColumnDescriptor)
}
admin.createTable(hTableDescriptor)//傳入列族(n個)
//}
}
catch {
case e: IOException =>
e.printStackTrace()
}
}
/**
* 刪除表
*
* @param tableName 表名
* @return
*/
def deleteTable(tableName: String): Boolean = {
var admin: Admin = null
try {
admin = initHbase.getAdmin
admin.disableTable(TableName.valueOf(tableName))
admin.deleteTable(TableName.valueOf(tableName))
} catch {
case e: IOException =>
logger.error(MessageFormat.format("刪除指定的表失敗,tableName:{0}", tableName), e)
return false
} finally admin.close()
true
}
/**
* 獲取原始數據
*
* @param tableName 表名
*/
def getNoDealData(tableName: String): Unit = {
try {
val table: Table = initHbase.getTable(TableName.valueOf(tableName))
val scan = new Scan()
//<rowKey,對應的行數據>
val result = new mutable.HashMap[String, mutable.HashMap[String, String]]()
// 獲取表
val rs: ResultScanner = table.getScanner(scan)
import scala.collection.JavaConversions._
for (r <- rs) { //每一行數據
val columnMap = new mutable.HashMap[String, String]()
var rowKey: String = null
for (cell <- r.listCells) {
if (rowKey == null) rowKey = Bytes.toString(cell.getRowArray, cell.getRowOffset, cell.getRowLength)
columnMap.put(Bytes.toString(cell.getQualifierArray, cell.getQualifierOffset, cell.getQualifierLength), Bytes.toString(cell.getValueArray, cell.getValueOffset, cell.getValueLength))
}
if (rowKey != null) result.put(rowKey, columnMap)
}
result.foreach(println(_))
} catch {
case e: IOException =>
e.printStackTrace()
}
}
/**
* 插入數據,當指定rowkey已經存在,則會覆蓋掉之前的舊數據
*
* @param connection
* @param tableName
* @param rowKey
* @param columns
* @param values
* @param familyName
*/
def putData(connection: Connection, tableName: String, rowKey: String, columns: Array[String], values: Array[String], familyName: String = "information"): Unit = { //information默認的列族
try {
val table: Table = connection.getTable(TableName.valueOf(tableName))
//設置rowkey
val put = new Put(Bytes.toBytes(rowKey))
if (columns != null && values != null && columns.length == values.length) {
var i = 0
while (i < columns.length) {
if (columns(i) != null && values(i) != null) {
put.addColumn(Bytes.toBytes(familyName), Bytes.toBytes(columns(i)), Bytes.toBytes(values(i)))
}
else{
throw new NullPointerException(MessageFormat.format("列名和列數據都不能爲空,column:{0},value:{1}", columns(i), values(i)))
}
i += 1
}
}
table.put(put)
table.close()
} catch {
case e: Exception =>
logger.error(MessageFormat.format("爲表添加 or 更新數據失敗,tableName:{0},rowKey:{1},familyName:{2}", tableName, rowKey, familyName), e)
}
}
/**
* 根據rowkey刪除整行的所有列族、所有行、所有版本
*
* @param tableName 表名
* @param rowKey rowkey
*/
def deleteByRowKey(tableName: String, rowKey: String): Boolean = {
var table: Table = null
try {
table = initHbase.getTable(TableName.valueOf(tableName))
val delete = new Delete(Bytes.toBytes(rowKey))
table.delete(delete)
} catch {
case e: IOException =>
logger.error(MessageFormat.format("刪除指定的表失敗,tableName:{0}", tableName), e)
return false
} finally table.close()
logger.info(s"Deleted -> 表名:$tableName -------------- rowkey:$rowKey")
true
}
def main(args: Array[String]): Unit = {
//HBaseUtil.list()
//HBaseUtil.createTable("pvuv",Array[String]{"information"})
//HBaseUtil.deleteTable("t_user_search_1")
//val connection: Connection = HBaseUtil.initHbase
//HBaseUtil.putData(connection,"www", "002", Array[String]{"url"}, Array[String]{"www.goole.com"});
//HBaseUtil.getNoDealData("www")
//解析json數據
//{
// "tableName": "pvuv",
// "pk": "001",
// "option": "put",
// "data": {
// "id": "21908627",
// "system_id": "10001",
// "user_id": "",
// "monitor_point": "10001",
// "client_ip": "183.15.177.28",
// "client_user_agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36"
// }
//}
val jsonString = "{\"tableName\":\"pvuv\",\"pk\":\"21908637\",\"option\":\"put\",\"data\":{\"id\":\"21908637\",\"system_id\":\"20001\",\"user_id\":\"\",\"monitor_point\":\"10001\",\"client_ip\":\"183.15.177.28\",\"client_user_agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36\"}}"
val jSONObject: JSONObject = JSON.parseObject(jsonString)
//val tableName: String = jSONObject.getString("tableName")
//val rowKey: String = jSONObject.getString("pk")
//val option: String = jSONObject.getString("option")
//val familyName = "information"
val dataObject: JSONObject = JSON.parseObject(jSONObject.getString("data"))
val keys: util.Set[String] = dataObject.keySet()
var columns = new ArrayBuffer[String]
var values = new ArrayBuffer[String]
import scala.collection.JavaConversions._
for (key <- keys) {
columns += key
values += dataObject.getString(key)
//println(s"${key} -> ${dataObject.getString(key)}")
}
//HBaseUtil.createTable(connection, "t_pvuv_log")
//HBaseUtil.putData(connection,tableName,rowKey,columns.toArray,values.toArray)
//HBaseUtil.getNoDealData("t_monitor_system")
//HBaseUtil.deleteByRowKey("t_pvuv_log", "10690786")
}
}