1 | /** |
---|
2 | * Licensed to the Apache Software Foundation (ASF) under one |
---|
3 | * or more contributor license agreements. See the NOTICE file |
---|
4 | * distributed with this work for additional information |
---|
5 | * regarding copyright ownership. The ASF licenses this file |
---|
6 | * to you under the Apache License, Version 2.0 (the |
---|
7 | * "License"); you may not use this file except in compliance |
---|
8 | * with the License. You may obtain a copy of the License at |
---|
9 | * |
---|
10 | * http://www.apache.org/licenses/LICENSE-2.0 |
---|
11 | * |
---|
12 | * Unless required by applicable law or agreed to in writing, software |
---|
13 | * distributed under the License is distributed on an "AS IS" BASIS, |
---|
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
---|
15 | * See the License for the specific language governing permissions and |
---|
16 | * limitations under the License. |
---|
17 | */ |
---|
18 | package org.apache.hadoop.mapred; |
---|
19 | |
---|
20 | import java.io.File; |
---|
21 | import java.io.IOException; |
---|
22 | |
---|
23 | import junit.framework.TestCase; |
---|
24 | |
---|
25 | import org.apache.commons.logging.Log; |
---|
26 | import org.apache.commons.logging.LogFactory; |
---|
27 | import org.apache.hadoop.conf.Configuration; |
---|
28 | import org.apache.hadoop.examples.SleepJob; |
---|
29 | import org.apache.hadoop.fs.FileSystem; |
---|
30 | import org.apache.hadoop.hdfs.MiniDFSCluster; |
---|
31 | import org.apache.hadoop.util.ToolRunner; |
---|
32 | |
---|
33 | public class TestJobDirCleanup extends TestCase { |
---|
34 | //The testcase brings up a cluster with many trackers, and |
---|
35 | //runs a job with a single map and many reduces. The check is |
---|
36 | //to see whether the job directories are cleaned up at the |
---|
37 | //end of the job (indirectly testing whether all tasktrackers |
---|
38 | //got a KillJobAction). |
---|
39 | private static final Log LOG = |
---|
40 | LogFactory.getLog(TestEmptyJob.class.getName()); |
---|
41 | private void runSleepJob(JobConf conf) throws Exception { |
---|
42 | String[] args = { "-m", "1", "-r", "10", "-mt", "1000", "-rt", "10000" }; |
---|
43 | ToolRunner.run(conf, new SleepJob(), args); |
---|
44 | } |
---|
45 | public void testJobDirCleanup() throws IOException { |
---|
46 | String namenode = null; |
---|
47 | MiniDFSCluster dfs = null; |
---|
48 | MiniMRCluster mr = null; |
---|
49 | FileSystem fileSys = null; |
---|
50 | try { |
---|
51 | final int taskTrackers = 10; |
---|
52 | final int jobTrackerPort = 60050; |
---|
53 | Configuration conf = new Configuration(); |
---|
54 | JobConf mrConf = new JobConf(); |
---|
55 | mrConf.set("mapred.tasktracker.reduce.tasks.maximum", "1"); |
---|
56 | dfs = new MiniDFSCluster(conf, 1, true, null); |
---|
57 | fileSys = dfs.getFileSystem(); |
---|
58 | namenode = fileSys.getUri().toString(); |
---|
59 | mr = new MiniMRCluster(10, namenode, 3, |
---|
60 | null, null, mrConf); |
---|
61 | final String jobTrackerName = "localhost:" + mr.getJobTrackerPort(); |
---|
62 | JobConf jobConf = mr.createJobConf(); |
---|
63 | runSleepJob(jobConf); |
---|
64 | for(int i=0; i < taskTrackers; ++i) { |
---|
65 | String jobDirStr = mr.getTaskTrackerLocalDir(i)+ |
---|
66 | "/taskTracker/jobcache"; |
---|
67 | File jobDir = new File(jobDirStr); |
---|
68 | String[] contents = jobDir.list(); |
---|
69 | while (contents.length > 0) { |
---|
70 | try { |
---|
71 | Thread.sleep(1000); |
---|
72 | LOG.warn(jobDir +" not empty yet"); |
---|
73 | contents = jobDir.list(); |
---|
74 | } catch (InterruptedException ie){} |
---|
75 | } |
---|
76 | } |
---|
77 | } catch (Exception ee){ |
---|
78 | } finally { |
---|
79 | if (fileSys != null) { fileSys.close(); } |
---|
80 | if (dfs != null) { dfs.shutdown(); } |
---|
81 | if (mr != null) { mr.shutdown(); } |
---|
82 | } |
---|
83 | } |
---|
84 | } |
---|
85 | |
---|
86 | |
---|