實現mapreduce多文件自定義輸出

 

普通maprduce中通常是有map和reduce兩個階段,在不做設置的情況下,計算結果會以part-000*輸出成多個文件,並且輸出的文件數量和reduce數量一樣,文件內容格式也不能隨心所欲。這樣不利於後續結果處理。

       在hadoop中,reduce支持多個輸出,輸出的文件名也是可控的,就是繼承MultipleTextOutputFormat類,重寫generateFileNameForKey方法。如果只是想做到輸出結果的文件名可控,實現自己的LogNameMultipleTextOutputFormat類,設置jobconf.setOutputFormat(LogNameMultipleTextOutputFormat.class);就可以了,但是這種方式只限於使用舊版本的hadoop api.如果想採用新版本的api接口或者自定義輸出內容的格式等等更多的需求,那麼就要自己動手重寫一些hadoop api了。

    首先需要構造一個自己的MultipleOutputFormat類實現FileOutputFormat類(注意是org.apache.hadoop.mapreduce.lib.output包的FileOutputFormat)

 

  

import java.io.DataOutputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;


/**
 * This abstract class extends the FileOutputFormat, allowing to write the
 * output data to different output files. There are three basic use cases for
 * this class. 
 * Created on 2012-07-08
 * @author zhoulongliu
 * @param <K>
 * @param <V>
 */
public abstract class MultipleOutputFormat<K extends WritableComparable<?>, V extends Writable> extends
        FileOutputFormat<K, V> {


   //接口類,需要在調用程序中實現generateFileNameForKeyValue來獲取文件名
    private MultiRecordWriter writer = null;


    public RecordWriter<K, V> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
        if (writer == null) {
            writer = new MultiRecordWriter(job, getTaskOutputPath(job));
        }
        return writer;
    }


    /**
     * get task output path
     * @param conf
     * @return
     * @throws IOException
     */
    private Path getTaskOutputPath(TaskAttemptContext conf) throws IOException {
        Path workPath = null;
        OutputCommitter committer = super.getOutputCommitter(conf);
        if (committer instanceof FileOutputCommitter) {
            workPath = ((FileOutputCommitter) committer).getWorkPath();
        } else {
            Path outputPath = super.getOutputPath(conf);
            if (outputPath == null) {
                throw new IOException("Undefined job output-path");
            }
            workPath = outputPath;
        }
        return workPath;
    }


    /**
     * 通過key, value, conf來確定輸出文件名(含擴展名) Generate the file output file name based
     * on the given key and the leaf file name. The default behavior is that the
     * file name does not depend on the key.
     * 
     * @param key the key of the output data
     * @param name the leaf file name
     * @param conf the configure object
     * @return generated file name
     */
    protected abstract String generateFileNameForKeyValue(K key, V value, Configuration conf);


   /**
    * 實現記錄寫入器RecordWriter類
    * (內部類)
    * @author zhoulongliu
    *
    */
    public class MultiRecordWriter extends RecordWriter<K, V> {
        /** RecordWriter的緩存 */
        private HashMap<String, RecordWriter<K, V>> recordWriters = null;
        private TaskAttemptContext job = null;
        /** 輸出目錄 */
        private Path workPath = null;


        public MultiRecordWriter(TaskAttemptContext job, Path workPath) {
            super();
            this.job = job;
            this.workPath = workPath;
            recordWriters = new HashMap<String, RecordWriter<K, V>>();
        }


        @Override
        public void close(TaskAttemptContext context) throws IOException, InterruptedException {
            Iterator<RecordWriter<K, V>> values = this.recordWriters.values().iterator();
            while (values.hasNext()) {
                values.next().close(context);
            }
            this.recordWriters.clear();
        }


        @Override
        public void write(K key, V value) throws IOException, InterruptedException {
            // 得到輸出文件名
            String baseName = generateFileNameForKeyValue(key, value, job.getConfiguration());
           //如果recordWriters裏沒有文件名,那麼就建立。否則就直接寫值。
            RecordWriter<K, V> rw = this.recordWriters.get(baseName);
            if (rw == null) {
                rw = getBaseRecordWriter(job, baseName);
                this.recordWriters.put(baseName, rw);
            }
            rw.write(key, value);
        }


        // ${mapred.out.dir}/_temporary/_${taskid}/${nameWithExtension}
        private RecordWriter<K, V> getBaseRecordWriter(TaskAttemptContext job, String baseName) throws IOException,
                InterruptedException {
            Configuration conf = job.getConfiguration();
           //查看是否使用解碼器  
            boolean isCompressed = getCompressOutput(job);
            String keyValueSeparator = ",";
            RecordWriter<K, V> recordWriter = null;
            if (isCompressed) {
                Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job, GzipCodec.class);
                CompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);
                Path file = new Path(workPath, baseName + codec.getDefaultExtension());
                FSDataOutputStream fileOut = file.getFileSystem(conf).create(file, false);
                //這裏我使用的自定義的OutputFormat 
                recordWriter = new LineRecordWriter<K, V>(new DataOutputStream(codec.createOutputStream(fileOut)),
                        keyValueSeparator);
            } else {
                Path file = new Path(workPath, baseName);
                FSDataOutputStream fileOut = file.getFileSystem(conf).create(file, false);
                //這裏我使用的自定義的OutputFormat 
                recordWriter = new LineRecordWriter<K, V>(fileOut, keyValueSeparator);
            }
            return recordWriter;
        }
    }


}

  接着你還需要自定義一個LineRecordWriter實現記錄寫入器RecordWriter類,自定義輸出格式。

 

import java.io.DataOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;

import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;

/**
 * 
 * 重新構造實現記錄寫入器RecordWriter類
 * Created on 2012-07-08
 * @author zhoulongliu
 * @param <K>
 * @param <V>
 */
public class LineRecordWriter<K, V> extends RecordWriter<K, V> {

    private static final String utf8 = "UTF-8";//定義字符編碼格式
    private static final byte[] newline;
    static {
        try {
            newline = "\n".getBytes(utf8);//定義換行符
        } catch (UnsupportedEncodingException uee) {
            throw new IllegalArgumentException("can't find " + utf8 + " encoding");
        }
    }
    protected DataOutputStream out;
    private final byte[] keyValueSeparator;

     //實現構造方法,出入輸出流對象和分隔符
    public LineRecordWriter(DataOutputStream out, String keyValueSeparator) {
        this.out = out;
        try {
            this.keyValueSeparator = keyValueSeparator.getBytes(utf8);
        } catch (UnsupportedEncodingException uee) {
            throw new IllegalArgumentException("can't find " + utf8 + " encoding");
        }
    }

    public LineRecordWriter(DataOutputStream out) {
        this(out, "\t");
    }

    private void writeObject(Object o) throws IOException {
        if (o instanceof Text) {
            Text to = (Text) o;
            out.write(to.getBytes(), 0, to.getLength());
        } else {
            out.write(o.toString().getBytes(utf8));
        }
    }
   
    /**
     * 將mapreduce的key,value以自定義格式寫入到輸出流中
     */
    public synchronized void write(K key, V value) throws IOException {
        boolean nullKey = key == null || key instanceof NullWritable;
        boolean nullValue = value == null || value instanceof NullWritable;
        if (nullKey && nullValue) {
            return;
        }
        if (!nullKey) {
            writeObject(key);
        }
        if (!(nullKey || nullValue)) {
            out.write(keyValueSeparator);
        }
        if (!nullValue) {
            writeObject(value);
        }
        out.write(newline);
    }

    public synchronized void close(TaskAttemptContext context) throws IOException {
        out.close();
    }

}

  接着,你實現剛剛重寫MultipleOutputFormat類中的generateFileNameForKeyValue方法自定義返回需要輸出文件的名稱,我這裏是以key值中以逗號分割取第一個字段的值作爲輸出文件名,這樣第一個字段值相同的會輸出到一個文件中並以其值作爲文件名。

 

 public static class VVLogNameMultipleTextOutputFormat extends MultipleOutputFormat<Text, NullWritable> {
        
        @Override
        protected String generateFileNameForKeyValue(Text key, NullWritable value, Configuration conf) { 
            String sp[] = key.toString().split(",");
            String filename = sp[1];
            try {
                Long.parseLong(sp[1]);
            } catch (NumberFormatException e) {
                filename = "000000000000";
            }
            return filename;
        }


    }

   最後就是在job調用時設置了

        Configuration conf = getConf();
        Job job = new Job(conf);
        job.setNumReduceTasks(12);
        ......
        job.setMapperClass(VVEtlMapper.class); 
        job.setReducerClass(EtlReducer.class);
        job.setOutputFormatClass(VVLogNameMultipleTextOutputFormat.class);//設置自定義的多文件輸出類
       FileInputFormat.setInputPaths(job,new Path(args[0]));
       FileOutputFormat.setOutputPath(job,new Path(args[1]));
       FileOutputFormat.setCompressOutput(job, true);//設置輸出結果採用壓縮 
       FileOutputFormat.setOutputCompressorClass(job, LzopCodec.class); //設置輸出結果採用lzo壓縮

   ok,這樣你就完成了支持新的hadoop api自定義的多文件輸出mapreduce編寫。

 

 

http://blog.csdn.net/liuzhoulong/article/details/7743840

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章