mapReduce 輸出結果導入Oracle,無效字符錯誤

mapReduce 輸出結果導入Oracle,無效字符錯誤

使用map 讀取數據 ,然後輸入到Oracle,相信下面這段代碼大家都不陌生,但是一直報錯 無效字符。

 Job job = new Job(conf, "Query_Job");

        job.setJarByClass(ImportDriver.class);
        job.setMapperClass(ImportMapper.class);
        job.setOutputKeyClass(ActiveIpD.class);
        job.setOutputValueClass(Text.class);
        FileInputFormat.addInputPath(job, new Path(input));
        
        //結果,輸出到oracle
         job.setOutputFormatClass(DBOutputFormat.class);
        DBConfiguration.configureDB(xxx,xxx,xxx,xxx)
        DBOutputFormat.setOutput(job, "tableName",
                "id",
                "name",
          );
        job.setNumReduceTasks(0);

後來觀察DBConfiguration源碼發現,源碼 構造sql結束後,sql加了分號;Oracle不能識別分號。導致錯誤

 public String constructQuery(String table, String[] fieldNames) {
    if(fieldNames == null) {
      throw new IllegalArgumentException("Field names may not be null");
    }

    StringBuilder query = new StringBuilder();
    query.append("INSERT INTO ").append(table);

    if (fieldNames.length > 0 && fieldNames[0] != null) {
      query.append(" (");
      for (int i = 0; i < fieldNames.length; i++) {
        query.append(fieldNames[i]);
        if (i != fieldNames.length - 1) {
          query.append(",");
        }
      }
      query.append(")");
    }
    query.append(" VALUES (");

    for (int i = 0; i < fieldNames.length; i++) {
      query.append("?");
      if(i != fieldNames.length - 1) {
        query.append(",");
      }
    }
    query.append(");");

    return query.toString();
  }

重新方法 去掉分號

package com.boco.querymr.util;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.db.DBConfiguration;
import org.apache.hadoop.mapreduce.lib.db.DBOutputFormat;
import org.apache.hadoop.mapreduce.lib.db.DBWritable;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.StringUtils;

import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;


public class MyDBOutputFormat<K extends DBWritable, V> extends DBOutputFormat<K, V> {
    private static final Log LOG = LogFactory.getLog(MyDBOutputFormat.class);

 
   

    @Override
    public String constructQuery(String table, String[] fieldNames) {
        if (fieldNames == null) {
            throw new IllegalArgumentException("Field names may not be null");
        }

        StringBuilder query = new StringBuilder();
        query.append("INSERT INTO ").append(table);

        if (fieldNames.length > 0 && fieldNames[0] != null) {
            query.append(" (");
            for (int i = 0; i < fieldNames.length; i++) {
                query.append(fieldNames[i]);
                if (i != fieldNames.length - 1) {
                    query.append(",");
                }
            }
            query.append(")");
        }
        query.append(" VALUES (");

        for (int i = 0; i < fieldNames.length; i++) {
            query.append("?");
            if (i != fieldNames.length - 1) {
                query.append(",");
            }
        }
        query.append(")");
        LOG.info(query.toString());
        System.err.println("查詢" + query.toString());
        return query.toString();
    }

    
    private static DBConfiguration setOutput(Job job,
                                             String tableName) throws IOException {
        job.setOutputFormatClass(MyDBOutputFormat.class);
        job.setReduceSpeculativeExecution(false);

        DBConfiguration dbConf = new DBConfiguration(job.getConfiguration());

        dbConf.setOutputTableName(tableName);
        return dbConf;
    }


}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章