- 首先你的類得extends Configured implements Tool,並且實現tool的run方法。
- 下面我把代碼貼出來,如圖
最後在完成後不要忘記 job.setJarByClass(WordCountApp.class);寫這一句代碼,負責會報錯找不到類。package cmd; import java.io.IOException; import java.net.URI; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Mapper.Context; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; public class WordCountApp extends Configured implements Tool{ public static String FILE_PATH=""; public static String OUT_PATH=""; @Override public int run(String[] args) throws Exception { FILE_PATH = args[0]; OUT_PATH = args[1]; Job job = new Job(new Configuration(), WordCountApp.class.getSimpleName()); job.setJarByClass(WordCountApp.class); final Configuration conf = new Configuration(); final FileSystem fileSystem = FileSystem.get(new URI(OUT_PATH), conf); if(fileSystem.exists(new Path(OUT_PATH))){ fileSystem.delete(new Path(OUT_PATH), true); } //1.1從哪裏讀取數據 FileInputFormat.setInputPaths(job, FILE_PATH); //把每一行數據解析成一個鍵值對 job.setInputFormatClass(TextInputFormat.class); //1.2自定義函數 job.setMapperClass(MyMapReduce.class); job.setMapOutputKeyClass(Text.class); job.setPartitionerClass(HashPartitioner.class); //1.3分區 job.setPartitionerClass(HashPartitioner.class); job.setNumReduceTasks(1); //1.4排序,分組 //1.5歸約 //2.1框架自己完成 //2.2自定義reduce函數 job.setReducerClass(MyReduce.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(LongWritable.class); //2.3寫入hdfs中去 FileOutputFormat.setOutputPath(job, new Path(OUT_PATH)); job.setOutputFormatClass(TextOutputFormat.class); job.waitForCompletion(true); return 0; } public static void main(String[] args) throws Exception { ToolRunner.run(new WordCountApp(), args); } static class MyMapReduce extends Mapper<LongWritable, Text, Text, LongWritable>{ protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); String[] splits = line.split("\t"); for(String word:splits){ context.write(new Text(word),new LongWritable(1)); } } } static class MyReduce extends Reducer<Text, LongWritable, Text, LongWritable>{ protected void reduce(Text key, Iterable<LongWritable> values, Context context ) throws IOException, InterruptedException { long sum = 0L; for(LongWritable value: values) { sum+=value.get(); } context.write(key, new LongWritable(sum)); } } }
- 然後把這段代碼打成jar包放到linux系統下,利用Hadoop的命令上傳到hdfs系統中。
- 最後操作命令hadoop jar 打包的包名.jar 讀取文件路徑 輸入的文件路徑
hadoop程序在MyEclipse中打jar時要注意的事項
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.