單機版本:
package com.itheima.java_wordcount;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
/**
* Date:2019/4/24
* Author:Lynn.cn.Li
* Desc:
*/
public class WordCountJava {
public static void main(String[] args) {
// 1.創建sparkConf對象。設置appName和master地址
SparkConf sparkConf = new SparkConf().setAppName("LocalJavaWordCount").setMaster("local[2]");
// 2.創建sparkContext對象
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
// 3.讀取數據文件
JavaRDD<String> textFileRDD = jsc.textFile("g://input/1.txt");
// 4.切分每一行,得到每一個單詞
JavaRDD<String> flatMapRDD = textFileRDD.flatMap(new FlatMapFunction<String, String>() {
public Iterator<String> call(String s) throws Exception {
// 按照空格切分單詞
String[] arr = s.split(" ");
return Arrays.asList(arr).iterator();
}
});
// 5.每個單詞計數爲1
JavaPairRDD<String, Integer> javaPairRDD = flatMapRDD.mapToPair(new PairFunction<String, String, Integer>() {
public Tuple2<String, Integer> call(String s) throws Exception {
return new Tuple2<String, Integer>(s, 1);
}
});
// 6.相同單詞出現的次數累加
JavaPairRDD<String, Integer> resultRDD = javaPairRDD.reduceByKey(new Function2<Integer, Integer, Integer>() {
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
/**
* 細節:實現排序
* 1.先將(單詞,次數)進行位置對調成(次數,單詞),進行排序
* 2.排序後再將(次數,單詞)進行位置對調成(單詞,次數)
*/
JavaPairRDD<Integer, String> sortRDD = resultRDD.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
public Tuple2<Integer, String> call(Tuple2<String, Integer> tuple2) throws Exception {
return new Tuple2<Integer, String>(tuple2._2, tuple2._1);
}
});
// 排序
JavaPairRDD<Integer, String> sortDescRDD = sortRDD.sortByKey(false);
// 再對調位置
JavaPairRDD<String, Integer> finalResultRDD = sortDescRDD.mapToPair(new PairFunction<Tuple2<Integer, String>, String, Integer>() {
public Tuple2<String, Integer> call(Tuple2<Integer, String> tuple2) throws Exception {
return new Tuple2<String, Integer>(tuple2._2, tuple2._1);
}
});
// 7.收集結果數據
List<Tuple2<String, Integer>> wordData = finalResultRDD.collect();
// 8.循環打印結果數據
for (Tuple2<String, Integer> tuple2 : wordData) {
System.out.println("單詞:"+tuple2._1+"出現了"+tuple2._2+"次");
}
// 9.關閉SparkCount
jsc.stop();
}
}
提交集羣版本:
package com.itheima.java_wordcount;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
import java.util.Arrays;
import java.util.Iterator;
/**
* Date:2019/4/24
* Author:Lynn.cn.Li
* Desc:
*/
public class WordCountJavaOnline {
public static void main(String[] args) {
// 1.創建sparkConf對象。設置appName和master地址
SparkConf sparkConf = new SparkConf().setAppName("OnlineJavaWordCount");
// 2.創建sparkContext對象
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
// 3.讀取數據文件
JavaRDD<String> textFileRDD = jsc.textFile(args[0]);//動態參數傳入
// 4.切分每一行,得到每一個單詞
JavaRDD<String> flatMapRDD = textFileRDD.flatMap(new FlatMapFunction<String, String>() {
public Iterator<String> call(String s) throws Exception {
// 按照空格切分單詞
String[] arr = s.split(" ");
return Arrays.asList(arr).iterator();
}
});
// 5.每個單詞計數爲1
JavaPairRDD<String, Integer> javaPairRDD = flatMapRDD.mapToPair(new PairFunction<String, String, Integer>() {
public Tuple2<String, Integer> call(String s) throws Exception {
return new Tuple2<String, Integer>(s, 1);
}
});
// 6.相同單詞出現的次數累加
JavaPairRDD<String, Integer> resultRDD = javaPairRDD.reduceByKey(new Function2<Integer, Integer, Integer>() {
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
resultRDD.saveAsTextFile(args[1]);//動態參數傳入
// 9.關閉SparkCount
jsc.stop();
}
}
spark執行腳本:
spark-submit --class com.itheima.java_wordcount.WordCountJavaOnline \
--master spark://node01:7077,node02:7077 \
--executor-memory 512m \
--total-executor-cores 2 \
/export/servers/sparkTestData/wordcount_java.jar \
/spark/wordcount/input2/1.txt \
/spark/wordcount/output4