1 | /** |
---|
2 | * Licensed to the Apache Software Foundation (ASF) under one |
---|
3 | * or more contributor license agreements. See the NOTICE file |
---|
4 | * distributed with this work for additional information |
---|
5 | * regarding copyright ownership. The ASF licenses this file |
---|
6 | * to you under the Apache License, Version 2.0 (the |
---|
7 | * "License"); you may not use this file except in compliance |
---|
8 | * with the License. You may obtain a copy of the License at |
---|
9 | * |
---|
10 | * http://www.apache.org/licenses/LICENSE-2.0 |
---|
11 | * |
---|
12 | * Unless required by applicable law or agreed to in writing, software |
---|
13 | * distributed under the License is distributed on an "AS IS" BASIS, |
---|
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
---|
15 | * See the License for the specific language governing permissions and |
---|
16 | * limitations under the License. |
---|
17 | */ |
---|
18 | |
---|
19 | package org.apache.hadoop.mapred; |
---|
20 | |
---|
21 | import junit.framework.TestCase; |
---|
22 | import org.apache.hadoop.hdfs.MiniDFSCluster; |
---|
23 | import org.apache.hadoop.fs.FileSystem; |
---|
24 | import org.apache.hadoop.fs.Path; |
---|
25 | |
---|
26 | import java.io.File; |
---|
27 | import java.io.IOException; |
---|
28 | |
---|
29 | /** |
---|
30 | * Abstract Test case class to run MR in local or cluster mode and in local FS |
---|
31 | * or DFS. |
---|
32 | * |
---|
33 | * The Hadoop instance is started and stopped on each test method. |
---|
34 | * |
---|
35 | * If using DFS the filesystem is reformated at each start (test method). |
---|
36 | * |
---|
37 | * Job Configurations should be created using a configuration returned by the |
---|
38 | * 'createJobConf()' method. |
---|
39 | */ |
---|
40 | public abstract class HadoopTestCase extends TestCase { |
---|
41 | public static final int LOCAL_MR = 1; |
---|
42 | public static final int CLUSTER_MR = 2; |
---|
43 | public static final int LOCAL_FS = 4; |
---|
44 | public static final int DFS_FS = 8; |
---|
45 | |
---|
46 | private boolean localMR; |
---|
47 | private boolean localFS; |
---|
48 | |
---|
49 | private int taskTrackers; |
---|
50 | private int dataNodes; |
---|
51 | |
---|
52 | /** |
---|
53 | * Creates a testcase for local or cluster MR using DFS. |
---|
54 | * |
---|
55 | * The DFS will be formatted regardless if there was one or not before in the |
---|
56 | * given location. |
---|
57 | * |
---|
58 | * @param mrMode indicates if the MR should be local (LOCAL_MR) or cluster |
---|
59 | * (CLUSTER_MR) |
---|
60 | * @param fsMode indicates if the FS should be local (LOCAL_FS) or DFS (DFS_FS) |
---|
61 | * |
---|
62 | * local FS when using relative PATHs) |
---|
63 | * |
---|
64 | * @param taskTrackers number of task trackers to start when using cluster |
---|
65 | * |
---|
66 | * @param dataNodes number of data nodes to start when using DFS |
---|
67 | * |
---|
68 | * @throws IOException thrown if the base directory cannot be set. |
---|
69 | */ |
---|
70 | public HadoopTestCase(int mrMode, int fsMode, int taskTrackers, int dataNodes) |
---|
71 | throws IOException { |
---|
72 | if (mrMode != LOCAL_MR && mrMode != CLUSTER_MR) { |
---|
73 | throw new IllegalArgumentException( |
---|
74 | "Invalid MapRed mode, must be LOCAL_MR or CLUSTER_MR"); |
---|
75 | } |
---|
76 | if (fsMode != LOCAL_FS && fsMode != DFS_FS) { |
---|
77 | throw new IllegalArgumentException( |
---|
78 | "Invalid FileSystem mode, must be LOCAL_FS or DFS_FS"); |
---|
79 | } |
---|
80 | if (taskTrackers < 1) { |
---|
81 | throw new IllegalArgumentException( |
---|
82 | "Invalid taskTrackers value, must be greater than 0"); |
---|
83 | } |
---|
84 | if (dataNodes < 1) { |
---|
85 | throw new IllegalArgumentException( |
---|
86 | "Invalid dataNodes value, must be greater than 0"); |
---|
87 | } |
---|
88 | localMR = (mrMode == LOCAL_MR); |
---|
89 | localFS = (fsMode == LOCAL_FS); |
---|
90 | /* |
---|
91 | JobConf conf = new JobConf(); |
---|
92 | fsRoot = conf.get("hadoop.tmp.dir"); |
---|
93 | |
---|
94 | if (fsRoot == null) { |
---|
95 | throw new IllegalArgumentException( |
---|
96 | "hadoop.tmp.dir is not defined"); |
---|
97 | } |
---|
98 | |
---|
99 | fsRoot = fsRoot.replace(' ', '+') + "/fs"; |
---|
100 | |
---|
101 | File file = new File(fsRoot); |
---|
102 | if (!file.exists()) { |
---|
103 | if (!file.mkdirs()) { |
---|
104 | throw new RuntimeException("Could not create FS base path: " + file); |
---|
105 | } |
---|
106 | } |
---|
107 | */ |
---|
108 | this.taskTrackers = taskTrackers; |
---|
109 | this.dataNodes = dataNodes; |
---|
110 | } |
---|
111 | |
---|
112 | /** |
---|
113 | * Indicates if the MR is running in local or cluster mode. |
---|
114 | * |
---|
115 | * @return returns TRUE if the MR is running locally, FALSE if running in |
---|
116 | * cluster mode. |
---|
117 | */ |
---|
118 | public boolean isLocalMR() { |
---|
119 | return localMR; |
---|
120 | } |
---|
121 | |
---|
122 | /** |
---|
123 | * Indicates if the filesystem is local or DFS. |
---|
124 | * |
---|
125 | * @return returns TRUE if the filesystem is local, FALSE if it is DFS. |
---|
126 | */ |
---|
127 | public boolean isLocalFS() { |
---|
128 | return localFS; |
---|
129 | } |
---|
130 | |
---|
131 | |
---|
132 | private MiniDFSCluster dfsCluster = null; |
---|
133 | private MiniMRCluster mrCluster = null; |
---|
134 | private FileSystem fileSystem = null; |
---|
135 | |
---|
136 | /** |
---|
137 | * Creates Hadoop instance based on constructor configuration before |
---|
138 | * a test case is run. |
---|
139 | * |
---|
140 | * @throws Exception |
---|
141 | */ |
---|
142 | protected void setUp() throws Exception { |
---|
143 | super.setUp(); |
---|
144 | if (localFS) { |
---|
145 | fileSystem = FileSystem.getLocal(new JobConf()); |
---|
146 | } |
---|
147 | else { |
---|
148 | dfsCluster = new MiniDFSCluster(new JobConf(), dataNodes, true, null); |
---|
149 | fileSystem = dfsCluster.getFileSystem(); |
---|
150 | } |
---|
151 | if (localMR) { |
---|
152 | } |
---|
153 | else { |
---|
154 | //noinspection deprecation |
---|
155 | mrCluster = new MiniMRCluster(taskTrackers, fileSystem.getName(), 1); |
---|
156 | } |
---|
157 | } |
---|
158 | |
---|
159 | /** |
---|
160 | * Destroys Hadoop instance based on constructor configuration after |
---|
161 | * a test case is run. |
---|
162 | * |
---|
163 | * @throws Exception |
---|
164 | */ |
---|
165 | protected void tearDown() throws Exception { |
---|
166 | try { |
---|
167 | if (mrCluster != null) { |
---|
168 | mrCluster.shutdown(); |
---|
169 | } |
---|
170 | } |
---|
171 | catch (Exception ex) { |
---|
172 | System.out.println(ex); |
---|
173 | } |
---|
174 | try { |
---|
175 | if (dfsCluster != null) { |
---|
176 | dfsCluster.shutdown(); |
---|
177 | } |
---|
178 | } |
---|
179 | catch (Exception ex) { |
---|
180 | System.out.println(ex); |
---|
181 | } |
---|
182 | super.tearDown(); |
---|
183 | } |
---|
184 | |
---|
185 | /** |
---|
186 | * Returns the Filesystem in use. |
---|
187 | * |
---|
188 | * TestCases should use this Filesystem as it |
---|
189 | * is properly configured with the workingDir for relative PATHs. |
---|
190 | * |
---|
191 | * @return the filesystem used by Hadoop. |
---|
192 | */ |
---|
193 | protected FileSystem getFileSystem() { |
---|
194 | return fileSystem; |
---|
195 | } |
---|
196 | |
---|
197 | /** |
---|
198 | * Returns a job configuration preconfigured to run against the Hadoop |
---|
199 | * managed by the testcase. |
---|
200 | * @return configuration that works on the testcase Hadoop instance |
---|
201 | */ |
---|
202 | protected JobConf createJobConf() { |
---|
203 | return (localMR) ? new JobConf() : mrCluster.createJobConf(); |
---|
204 | } |
---|
205 | |
---|
206 | } |
---|