source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 5.7 KB
Line 
1/**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements.  See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership.  The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License.  You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18package org.apache.hadoop.mapred;
19
20import junit.framework.TestCase;
21import org.apache.hadoop.hdfs.MiniDFSCluster;
22import org.apache.hadoop.fs.FileSystem;
23import org.apache.hadoop.fs.Path;
24
25import java.io.IOException;
26import java.util.Map;
27import java.util.Properties;
28
29/**
30 * Test case to run a MapReduce job.
31 * <p/>
32 * It runs a 2 node cluster Hadoop with a 2 node DFS.
33 * <p/>
34 * The JobConf to use must be obtained via the creatJobConf() method.
35 * <p/>
36 * It creates a temporary directory -accessible via getTestRootDir()-
37 * for both input and output.
38 * <p/>
39 * The input directory is accesible via getInputDir() and the output
40 * directory via getOutputDir()
41 * <p/>
42 * The DFS filesystem is formated before the testcase starts and after it ends.
43 */
44public abstract class ClusterMapReduceTestCase extends TestCase {
45  private MiniDFSCluster dfsCluster = null;
46  private MiniMRCluster mrCluster = null;
47
48  /**
49   * Creates Hadoop Cluster and DFS before a test case is run.
50   *
51   * @throws Exception
52   */
53  protected void setUp() throws Exception {
54    super.setUp();
55
56    startCluster(true, null);
57  }
58
59  /**
60   * Starts the cluster within a testcase.
61   * <p/>
62   * Note that the cluster is already started when the testcase method
63   * is invoked. This method is useful if as part of the testcase the
64   * cluster has to be shutdown and restarted again.
65   * <p/>
66   * If the cluster is already running this method does nothing.
67   *
68   * @param reformatDFS indicates if DFS has to be reformated
69   * @param props configuration properties to inject to the mini cluster
70   * @throws Exception if the cluster could not be started
71   */
72  protected synchronized void startCluster(boolean reformatDFS, Properties props)
73          throws Exception {
74    if (dfsCluster == null) {
75      JobConf conf = new JobConf();
76      if (props != null) {
77        for (Map.Entry entry : props.entrySet()) {
78          conf.set((String) entry.getKey(), (String) entry.getValue());
79        }
80      }
81      dfsCluster = new MiniDFSCluster(conf, 2, reformatDFS, null);
82
83      ConfigurableMiniMRCluster.setConfiguration(props);
84      //noinspection deprecation
85      mrCluster = new ConfigurableMiniMRCluster(2, getFileSystem().getName(), 1);
86    }
87  }
88
89  private static class ConfigurableMiniMRCluster extends MiniMRCluster {
90    private static Properties config;
91
92    public static void setConfiguration(Properties props) {
93      config = props;
94    }
95
96    public ConfigurableMiniMRCluster(int numTaskTrackers, String namenode,
97                                     int numDir) throws Exception {
98      super(numTaskTrackers, namenode, numDir);
99    }
100
101    public JobConf createJobConf() {
102      JobConf conf = super.createJobConf();
103      if (config != null) {
104        for (Map.Entry entry : config.entrySet()) {
105          conf.set((String) entry.getKey(), (String) entry.getValue());
106        }
107      }
108      return conf;
109    }
110  }
111
112  /**
113   * Stops the cluster within a testcase.
114   * <p/>
115   * Note that the cluster is already started when the testcase method
116   * is invoked. This method is useful if as part of the testcase the
117   * cluster has to be shutdown.
118   * <p/>
119   * If the cluster is already stopped this method does nothing.
120   *
121   * @throws Exception if the cluster could not be stopped
122   */
123  protected void stopCluster() throws Exception {
124    if (mrCluster != null) {
125      mrCluster.shutdown();
126      mrCluster = null;
127    }
128    if (dfsCluster != null) {
129      dfsCluster.shutdown();
130      dfsCluster = null;
131    }
132  }
133
134  /**
135   * Destroys Hadoop Cluster and DFS after a test case is run.
136   *
137   * @throws Exception
138   */
139  protected void tearDown() throws Exception {
140    stopCluster();
141    super.tearDown();
142  }
143
144  /**
145   * Returns a preconfigured Filesystem instance for test cases to read and
146   * write files to it.
147   * <p/>
148   * TestCases should use this Filesystem instance.
149   *
150   * @return the filesystem used by Hadoop.
151   * @throws IOException
152   */
153  protected FileSystem getFileSystem() throws IOException {
154    return dfsCluster.getFileSystem();
155  }
156
157  protected MiniMRCluster getMRCluster() {
158    return mrCluster;
159  }
160
161  /**
162   * Returns the path to the root directory for the testcase.
163   *
164   * @return path to the root directory for the testcase.
165   */
166  protected Path getTestRootDir() {
167    return new Path("x").getParent();
168  }
169
170  /**
171   * Returns a path to the input directory for the testcase.
172   *
173   * @return path to the input directory for the tescase.
174   */
175  protected Path getInputDir() {
176    return new Path("input");
177  }
178
179  /**
180   * Returns a path to the output directory for the testcase.
181   *
182   * @return path to the output directory for the tescase.
183   */
184  protected Path getOutputDir() {
185    return new Path("output");
186  }
187
188  /**
189   * Returns a job configuration preconfigured to run against the Hadoop
190   * managed by the testcase.
191   *
192   * @return configuration that works on the testcase Hadoop instance
193   */
194  protected JobConf createJobConf() {
195    return mrCluster.createJobConf();
196  }
197
198}
Note: See TracBrowser for help on using the repository browser.