大數據Hadoop之MR TopN案例

1.需求

對輸入數據進行加工,輸出流量使用量在前10的用戶信息

(1)輸入數據

13470253144	180	180	360
13509468723	7335	110349	117684
13560439638	918	4938	5856
13568436656	3597	25635	29232
13590439668	1116	954	2070
13630577991	6960	690	7650
13682846555	1938	2910	4848
13729199489	240	0	240
13736230513	2481	24681	27162
13768778790	120	120	240
13846544121	264	0	264
13956435636	132	1512	1644
13966251146	240	0	240
13975057813	11058	48243	59301
13992314666	3008	3720	6728
15043685818	3659	3538	7197
15910133277	3156	2936	6092
15959002129	1938	180	2118
18271575951	1527	2106	3633
18390173782	9531	2412	11943
84188413	4116	1432	5548

(2)輸出數據

13509468723	7335	110349	117684
13975057813	11058	48243	59301
13568436656	3597	25635	29232
13736230513	2481	24681	27162
18390173782	9531	2412	11943
13630577991	6960	690	7650
15043685818	3659	3538	7197
13992314666	3008	3720	6728
15910133277	3156	2936	6092
13560439638	918	4938	5856

2.需求分析

在這裏插入圖片描述
這個案例的實現可以是在map端也可以是在reduce端單獨實現,我們以map端爲例。在map端創建一個TreeMap集合,每次將封裝好的數據以FlowBean對象爲key,以手機號爲value存入TreeMap集合。我們每次要判斷集合的數據是否超過了十條,如果超過了,就要將總流量最小的那個元素移出去,保證TreeMap結合的元素不能大於10。最後,我們在setup階段將數據寫出。

3.實現代碼

FlowBean類:

package com.mrreview.topn;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.WritableComparable;

public class FlowBean implements WritableComparable<FlowBean> {
	private int upFlow;
	private int downFlow;
	private int sumFlow;

	public FlowBean() {
		
	}
	
	public FlowBean(int upFlow, int downFlow) {
		this.upFlow = upFlow;
		this.downFlow = downFlow;
		this.sumFlow = upFlow + downFlow;
	}
	
	@Override
	public void readFields(DataInput in) throws IOException {
		this.upFlow = in.readInt();
		this.downFlow = in.readInt();
		this.sumFlow = in.readInt();
	}

	@Override
	public void write(DataOutput out) throws IOException {
		out.writeInt(upFlow);
		out.writeInt(downFlow);
		out.writeInt(sumFlow);
	}

	@Override
	public int compareTo(FlowBean o) {
		return -(this.sumFlow - o.sumFlow);
	}

	public int getUpFlow() {
		return upFlow;
	}

	public void setUpFlow(int upFlow) {
		this.upFlow = upFlow;
	}

	public int getDownFlow() {
		return downFlow;
	}

	public void setDownFlow(int downFlow) {
		this.downFlow = downFlow;
	}

	public int getSumFlow() {
		return sumFlow;
	}

	public void setSumFlow(int sumFlow) {
		this.sumFlow = sumFlow;
	}

	@Override
	public String toString() {
		return upFlow + "\t" + downFlow + "\t" + sumFlow;
	}
}

Mapper類:

package com.mrreview.topn;

import java.io.IOException;
import java.util.Iterator;
import java.util.TreeMap;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

public class TopNMapper extends Mapper<LongWritable, Text, Text, FlowBean>{
	
	// 定義一個TreeMap作爲存儲數據的容器(天然按key排序)
	private TreeMap<FlowBean, Text> flowMap = new TreeMap<FlowBean, Text>();
	private FlowBean kBean;
	private Text v;
	
	@Override
	protected void map(LongWritable key, Text value, Mapper<LongWritable, Text,  Text, FlowBean>.Context context)
			throws IOException, InterruptedException {
		kBean = new FlowBean();
		v = new Text();
		// 1. 獲取一行數據
		String line = value.toString();
		
		// 2. 切割
		String[] fields = line.split("\t");
		
		// 3. 封裝數據
		String phoneNum = fields[0];
		int upFlow = Integer.parseInt(fields[1]);
		int downFlow = Integer.parseInt(fields[2]);
		int sumFlow = Integer.parseInt(fields[3]);
		
		kBean.setUpFlow(upFlow);
		kBean.setDownFlow(downFlow);
		kBean.setSumFlow(sumFlow);
		
		v.set(phoneNum);
		
		// 4. 向TreeMap中添加數據
		flowMap.put(kBean, v);
		
		// 5. 限制TreeMap的數據量,超過10條就刪除流量最小的一條
		if(flowMap.size() > 10) {
			flowMap.remove(flowMap.lastKey());
		}
	}
	
	@Override
	protected void cleanup(Mapper<LongWritable, Text, Text, FlowBean>.Context context)
			throws IOException, InterruptedException {
		// 6. 遍歷集合輸出數據
		Iterator<FlowBean> bean = flowMap.keySet().iterator();
		
		while(bean.hasNext()) {
			FlowBean k = bean.next();
			
			context.write(flowMap.get(k), k);
		}
	}
}

Driver:

package com.mrreview.topn;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import com.sun.jersey.core.impl.provider.entity.XMLJAXBElementProvider.Text;

public class TopNDriver {
	public static void main(String[] args) throws Exception {
		
		args = new String[] {"D:\\hadoop-2.7.1\\winMR\\TopN\\input", "D:\\hadoop-2.7.1\\winMR\\TopN\\output"};
		
		Job job = Job.getInstance();
		job.setJarByClass(TopNDriver.class);
		job.setMapperClass(TopNMapper.class);
		job.setReducerClass(TopNReducer.class);
		
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(FlowBean.class);
		
		FileInputFormat.setInputPaths(job, args[0]);
		FileOutputFormat.setOutputPath(job, new Path(args[1]));
		
		// 只用到map要將reduce的數量設置爲0
		job.setNumReduceTasks(0);
		
		job.waitForCompletion(true);
	}
}

4. 運行結果:

在這裏插入圖片描述

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章