scala-spark版本xgboost包使用

//  XGBOOST測試

//調用形式
//  /opt/app/spark-1.6.1/bin/spark-shell --master yarn-client --conf spark.executor.extraJavaOptions='-XX:PermSize=1024M' --driver-memory 6g --num-executors 80 
//(續上)  --executor-memory 6g --executor-cores 1 --jars /opt/app/spark-1.6.1/lib/xgboost4j-spark-0.5-jar-with-dependencies.jar  

import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.RandomForest
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{SparkConf, SparkContext}
import ml.dmlc.xgboost4j.scala.spark.XGBoost

val df=sqlContext.sql("select * from databasename.tmp_ym_hotel_multiple_features_table_new_train")
val data=df.select(df("order_cii_notcancelcii"),df("city"),df("order_cii_ahead_1day"),df("order_cii_ahead_3days_avg")
,df("order_cii_ahead_7days_avg"),df("order_cii_30days_avg"),df("order_cii_ahead_sameoneweek"),df("order_cii_ahead_sametwoweeks_avg")
,df("star"),df("goldstar"),df("level"),df("ratingservice"),df("novoters"),df("week_day"),df("working_day"),df("cii_ahead_sameoneweek")
,df("cii_ahead_sametwoweeks_avg"),df("cii_ahead_samethreeweeks_avg"),df("cii_ahead_samefourweeks_avg"),df("simple_estimate_constant")
,df("cii_ahead_1day_avg"),df("cii_ahead_3days_avg"),df("cii_ahead_7days_avg"),df("order_ahead_lt_1days"),df("order_ahead_lt_2days")
,df("order_ahead_lt_3days"),df("order_ahead_lt_7days"),df("order_ahead_lt_14days"),df("order_alldays"),df("click_ahead_1day")
,df("click_ahead_2days"),df("click_ahead_3days"),df("click_ahead_7days"),df("click_ahead_14days"),df("browse_0day_uv")
,df("browse_1day_uv"),df("browse_2day_uv"),df("browse_3day_uv"),df("browse_4day_uv"),df("browse_5day_uv")
,df("browse_6day_uv"),df("browse_7_14day_uv"),df("browse_14daymore_uv"),df("order_cii_14days_avg"),df("order_cii_21days_avg")
,df("order_cii_ahead_samethreeweeks_avg"),df("order_cii_ahead_samefourweeks_avg"))


import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
val  trainData=data.map{line =>
  val label=line(0).toString.toDouble
  val value0=(1 to 16).map(i=>  line(i).toString.toDouble  )
  val featureVector=Vectors.dense( value0.toArray)
  LabeledPoint(label, featureVector)
}


// 測試開始+1
val numRound = 800
val paramMap = List(
  "eta" -> 0.1f,
  "max_depth" -> 6,  //數的最大深度。缺省值爲6 ,取值範圍爲:[1,∞] 
  "silent" ->  0,  //取0時表示打印出運行時信息,取1時表示以緘默方式運行,不打印運行時信息。缺省值爲0 
  "objective" -> "reg:linear",  //定義學習任務及相應的學習目標
  "eval_metric" -> "rmse",  //校驗數據所需要的評價指標
  "nthread"->  1  //XGBoost運行時的線程數。缺省值是當前系統可以獲得的最大線程數
  ).toMap 

val model  = XGBoost.train(trainData, paramMap, numRound, nWorkers = 80, useExternalMemory = false)

val sql_test="select * from databasename.tmp_ym_hotel_multiple_features_table_test_7days"   //10月31日~11月6日
val df1=sqlContext.sql(sql_test)
val data1=df1.select(df1("masterhotel"),df1("city"),df1("order_cii_ahead_1day"),df1("order_cii_ahead_3days_avg"),df1("order_cii_ahead_7days_avg")
,df1("order_cii_30days_avg"),df1("order_cii_ahead_sameoneweek"),df1("order_cii_ahead_sametwoweeks_avg"),df1("star"),df1("goldstar")
,df1("level"),df1("ratingservice"),df1("novoters"),df1("week_day"),df1("working_day"),df1("cii_ahead_sameoneweek"),df1("cii_ahead_sametwoweeks_avg")
,df1("cii_ahead_samethreeweeks_avg"),df1("cii_ahead_samefourweeks_avg"),df1("simple_estimate_constant"),df1("cii_ahead_1day_avg")
,df1("cii_ahead_3days_avg"),df1("cii_ahead_7days_avg"),df1("order_ahead_lt_1days"),df1("order_ahead_lt_2days")
,df1("order_ahead_lt_3days"),df1("order_ahead_lt_7days"),df1("order_ahead_lt_14days"),df1("order_alldays")
,df1("click_ahead_1day"),df1("click_ahead_2days"),df1("click_ahead_3days"),df1("click_ahead_7days")
,df1("click_ahead_14days"),df1("browse_0day_uv"),df1("browse_1day_uv"),df1("browse_2day_uv"),df1("browse_3day_uv")
,df1("browse_4day_uv"),df1("browse_5day_uv"),df1("browse_6day_uv"),df1("browse_7_14day_uv"),df1("browse_14daymore_uv")
,df1("order_cii_14days_avg"),df1("order_cii_21days_avg"),df1("order_cii_ahead_samethreeweeks_avg"),df1("order_cii_ahead_samefourweeks_avg"))


 
//LablePoint構建
// 修改!!!!
val  testData=data1.map{line =>
  val label=line(0).toString.toDouble
  val value0=(1 to 16).map(i=>  line(i).toString.toDouble)
  val featureVector=Vectors.dense( value0.toArray)
  featureVector
}
val predTrain = model.predict(testData)
val s=predTrain.collect()(0)


//s.length


//真實值
val data2=df1.select(df1("masterhotel"),df1("order_cii_notcancelcii"), df1("rank1"),df1("orderdate"))
val actual_frame=data2.toDF()

//構建DataFrame類型結果集
case class resultset(masterhotel:Int,  //母酒店ID
quantity:Double,   //真實產量
rank:Int,     //排序
date:String,    //日期
frcst_cii:Double //預測產量
)    

val ac_1=actual_frame.collect()
val pr_1=predTrain.collect()(0)


val output0=(0 until ac_1.length).map( i =>resultset(ac_1(i)(0).toString.toInt,
ac_1(i)(1).toString.toDouble,
ac_1(i)(2).toString.toInt,
ac_1(i)(3).toString,
pr_1(i)(0).toString.toDouble
)).toDF()


//增加一列
val output=output0.withColumn("diff",abs($"quantity"-$"frcst_cii"))

// 計算MAE@100, MAE@500
val MAE100=output.filter($"rank"<=100).groupBy("date").avg("diff")
val MAE500=output.groupBy("date").avg("diff")
val mae100=MAE100.sort("date").collect()
val mae500=MAE500.sort("date").collect()


//結果打印
mae100.foreach(i => println("MAE100",i))
mae500.foreach(i => println("MAE500",i))


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章