通過jobclient監控遠程集羣任務

pom:

<dependencies>
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-common</artifactId>
        <version>2.6.0</version>
    </dependency>
    <dependency>
        <groupId>org.apache.hadoop</groupId>
        <artifactId>hadoop-client</artifactId>
        <version>2.6.0</version>
    </dependency>
</dependencies>


<build>
    <resources>
        <resource>
            <directory>${project.basedir}/src/main/resources</directory>
        </resource>
    </resources>
<plugins>
<plugin>
    <!-- 這是個編譯java代碼的 -->
    <groupId>org.apache.maven.plugins</groupId>
    <artifactId>maven-compiler-plugin</artifactId>
    <version>3.2</version>
    <configuration>
        <source>1.8</source>
        <target>1.8</target>
        <encoding>UTF-8</encoding>
    </configuration>
    <executions>
        <execution>
            <phase>compile</phase>
            <goals>
                <goal>compile</goal>
            </goals>
        </execution>
    </executions>
</plugin>
    <plugin>
        <artifactId>maven-assembly-plugin</artifactId>
        <version>2.6</version>
        <configuration>
            <descriptorRefs>
                <descriptorRef>jar-with-dependencies</descriptorRef>
            </descriptorRefs>

        </configuration>
    </plugin>
</plugins>
</build>

 

java代碼:   

        //HADOOP_USER_NAME設置hadoop用戶,查詢本用戶的遠程任務

    System.setProperty( "HADOOP_USER_NAME","xiaoyuefei" );
    //不要加false(有毒不然連不上resourcemanager)
    Configuration conf=new Configuration();
    conf.set("fs.defaultFS", "hdfs://***:8020");
    conf.set("mapreduce.framework.name", "yarn");
    conf.set("yarn.resourcemanager.address", "http://***:8032");
    conf.set("mapreduce.app-submission.cross-platform", "true");
    //這個很關鍵(classloder加載class)
    conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName() );
    int outtime = 120;
    JobClient jobClient = new JobClient(conf);
    System.out.println("jobClient : " + jobClient);
    JobStatus[] jobsToComplete = null;
    try
    {
        jobsToComplete = jobClient.jobsToComplete();
        for (JobStatus e : jobsToComplete)
        {
            long startTime = e.getStartTime();
            System.out.println(e.getJobName() + " start time : " + new Date(startTime));
            if (System.currentTimeMillis() - startTime > outtime * 60 * 1000)
            {
                JobID jobID = e.getJobID();
                jobClient.getJob(jobID).killJob();
                System.out.println("********************************************************");
                System.out.println("job killed : " + jobID + " at " + new Date());
                System.out.println("********************************************************");
                FileWriter fileWriter = new FileWriter("/data1/shell/job_control/job_monitor/logger.txt", true);
                fileWriter.write("job killed : " + jobID + " at " + new Date() + "\t");
                fileWriter.write(jobID + " Start time is :" + new Date(startTime));
                fileWriter.write("\n");
                fileWriter.flush();
                fileWriter.close();
            }
        }
    }
    catch (IOException e2)
    {
        e2.printStackTrace();
    }
    finally
    {
        jobClient.close();
    }
}

注:

1.本代碼用於殺死超時任務

2.可以使用hadoop jar命令執行

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章