source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/mapred/TestJobStatusPersistency.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 4.8 KB
Line 
1/**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements.  See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership.  The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License.  You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18package org.apache.hadoop.mapred;
19
20import java.io.OutputStream;
21import java.io.OutputStreamWriter;
22import java.io.Writer;
23import java.util.Properties;
24
25import org.apache.hadoop.fs.FileSystem;
26import org.apache.hadoop.fs.Path;
27import org.apache.hadoop.io.LongWritable;
28import org.apache.hadoop.io.Text;
29
30public class TestJobStatusPersistency extends ClusterMapReduceTestCase {
31  static final Path TEST_DIR = 
32    new Path(System.getProperty("test.build.data","/tmp"), 
33             "job-status-persistence");
34 
35  private JobID runJob() throws Exception {
36    OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt"));
37    Writer wr = new OutputStreamWriter(os);
38    wr.write("hello1\n");
39    wr.write("hello2\n");
40    wr.write("hello3\n");
41    wr.write("hello4\n");
42    wr.close();
43
44    JobConf conf = createJobConf();
45    conf.setJobName("mr");
46
47    conf.setInputFormat(TextInputFormat.class);
48
49    conf.setMapOutputKeyClass(LongWritable.class);
50    conf.setMapOutputValueClass(Text.class);
51
52    conf.setOutputFormat(TextOutputFormat.class);
53    conf.setOutputKeyClass(LongWritable.class);
54    conf.setOutputValueClass(Text.class);
55
56    conf.setMapperClass(org.apache.hadoop.mapred.lib.IdentityMapper.class);
57    conf.setReducerClass(org.apache.hadoop.mapred.lib.IdentityReducer.class);
58
59    FileInputFormat.setInputPaths(conf, getInputDir());
60
61    FileOutputFormat.setOutputPath(conf, getOutputDir());
62
63    return JobClient.runJob(conf).getID();
64  }
65
66  public void testNonPersistency() throws Exception {
67    JobID jobId = runJob();
68    JobClient jc = new JobClient(createJobConf());
69    RunningJob rj = jc.getJob(jobId);
70    assertNotNull(rj);
71    stopCluster();
72    startCluster(false, null);
73    jc = new JobClient(createJobConf());
74    rj = jc.getJob(jobId);
75    assertNull(rj);
76  }
77
78  public void testPersistency() throws Exception {
79    Properties config = new Properties();
80    config.setProperty("mapred.job.tracker.persist.jobstatus.active", "true");
81    config.setProperty("mapred.job.tracker.persist.jobstatus.hours", "1");
82    stopCluster();
83    startCluster(false, config);
84    JobID jobId = runJob();
85    JobClient jc = new JobClient(createJobConf());
86    RunningJob rj0 = jc.getJob(jobId);
87    assertNotNull(rj0);
88    boolean sucessfull0 = rj0.isSuccessful();
89    String jobName0 = rj0.getJobName();
90    Counters counters0 = rj0.getCounters();
91    TaskCompletionEvent[] events0 = rj0.getTaskCompletionEvents(0);
92
93    stopCluster();
94    startCluster(false, config);
95     
96    jc = new JobClient(createJobConf());
97    RunningJob rj1 = jc.getJob(jobId);
98    assertNotNull(rj1);
99    assertEquals(sucessfull0, rj1.isSuccessful());
100    assertEquals(jobName0, rj0.getJobName());
101    assertEquals(counters0.size(), rj1.getCounters().size());
102
103    TaskCompletionEvent[] events1 = rj1.getTaskCompletionEvents(0);
104    assertEquals(events0.length, events1.length);   
105    for (int i = 0; i < events0.length; i++) {
106      assertEquals(events0[i].getTaskAttemptId(), events1[i].getTaskAttemptId());
107      assertEquals(events0[i].getTaskStatus(), events1[i].getTaskStatus());
108    }
109  }
110
111  /**
112   * Test if the completed job status is persisted to localfs.
113   */
114  public void testLocalPersistency() throws Exception {
115    FileSystem fs = FileSystem.getLocal(createJobConf());
116   
117    fs.delete(TEST_DIR, true);
118   
119    Properties config = new Properties();
120    config.setProperty("mapred.job.tracker.persist.jobstatus.active", "true");
121    config.setProperty("mapred.job.tracker.persist.jobstatus.hours", "1");
122    config.setProperty("mapred.job.tracker.persist.jobstatus.dir", 
123                       fs.makeQualified(TEST_DIR).toString());
124    stopCluster();
125    startCluster(false, config);
126    JobID jobId = runJob();
127    JobClient jc = new JobClient(createJobConf());
128    RunningJob rj = jc.getJob(jobId);
129    assertNotNull(rj);
130   
131    // check if the local fs has the data
132    Path jobInfo = new Path(TEST_DIR, rj.getID() + ".info");
133    assertTrue("Missing job info from the local fs", fs.exists(jobInfo));
134    fs.delete(TEST_DIR, true);
135  }
136}
Note: See TracBrowser for help on using the repository browser.