source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 10.6 KB
Line 
1/**
2* Licensed to the Apache Software Foundation (ASF) under one
3* or more contributor license agreements.  See the NOTICE file
4* distributed with this work for additional information
5* regarding copyright ownership.  The ASF licenses this file
6* to you under the Apache License, Version 2.0 (the
7* "License"); you may not use this file except in compliance
8* with the License.  You may obtain a copy of the License at
9*
10*     http://www.apache.org/licenses/LICENSE-2.0
11*
12* Unless required by applicable law or agreed to in writing, software
13* distributed under the License is distributed on an "AS IS" BASIS,
14* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15* See the License for the specific language governing permissions and
16* limitations under the License.
17*/
18package org.apache.hadoop.hdfs;
19
20import java.io.File;
21import java.io.IOException;
22import junit.framework.TestCase;
23import org.apache.commons.logging.Log;
24import org.apache.commons.logging.LogFactory;
25import org.apache.hadoop.conf.Configuration;
26import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
27import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
28
29import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
30import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
31
32/**
33* This test ensures the appropriate response (successful or failure) from
34* the system when the system is started under various storage state and
35* version conditions.
36*/
37public class TestDFSStorageStateRecovery extends TestCase {
38 
39  private static final Log LOG = LogFactory.getLog(
40                                                   "org.apache.hadoop.hdfs.TestDFSStorageStateRecovery");
41  private Configuration conf = null;
42  private int testCounter = 0;
43  private MiniDFSCluster cluster = null;
44 
45  /**
46   * The test case table.  Each row represents a test case.  This table is
47   * taken from the table in Apendix A of the HDFS Upgrade Test Plan
48   * (TestPlan-HdfsUpgrade.html) attached to
49   * http://issues.apache.org/jira/browse/HADOOP-702
50   * The column meanings are:
51   *  0) current directory exists
52   *  1) previous directory exists
53   *  2) previous.tmp directory exists
54   *  3) removed.tmp directory exists
55   *  4) node should recover and startup
56   *  5) current directory should exist after recovery but before startup
57   *  6) previous directory should exist after recovery but before startup
58   */
59  static boolean[][] testCases = new boolean[][] {
60    new boolean[] {true,  false, false, false, true,  true,  false}, // 1
61    new boolean[] {true,  true,  false, false, true,  true,  true }, // 2
62    new boolean[] {true,  false, true,  false, true,  true,  true }, // 3
63    new boolean[] {true,  true,  true,  true,  false, false, false }, // 4
64    new boolean[] {true,  true,  true,  false, false, false, false }, // 4
65    new boolean[] {false, true,  true,  true,  false, false, false }, // 4
66    new boolean[] {false, true,  true,  false, false, false, false }, // 4
67    new boolean[] {false, false, false, false, false, false, false }, // 5
68    new boolean[] {false, true,  false, false, false, false, false }, // 6
69    new boolean[] {false, false, true,  false, true,  true,  false}, // 7
70    new boolean[] {true,  false, false, true,  true,  true,  false}, // 8
71    new boolean[] {true,  true,  false, true,  false, false, false }, // 9
72    new boolean[] {true,  true,  true,  true,  false, false, false }, // 10
73    new boolean[] {true,  false, true,  true,  false, false, false }, // 10
74    new boolean[] {false, true,  true,  true,  false, false, false }, // 10
75    new boolean[] {false, false, true,  true,  false, false, false }, // 10
76    new boolean[] {false, false, false, true,  false, false, false }, // 11
77    new boolean[] {false, true,  false, true,  true,  true,  true }, // 12
78  };
79 
80  /**
81   * Writes an INFO log message containing the parameters. Only
82   * the first 4 elements of the state array are included in the message.
83   */
84  void log(String label, int numDirs, int testCaseNum, boolean[] state) {
85    LOG.info("============================================================");
86    LOG.info("***TEST " + (testCounter++) + "*** " 
87             + label + ":"
88             + " numDirs="+numDirs
89             + " testCase="+testCaseNum
90             + " current="+state[0]
91             + " previous="+state[1]
92             + " previous.tmp="+state[2]
93             + " removed.tmp="+state[3]);
94  }
95 
96  /**
97   * Sets up the storage directories for the given node type, either
98   * dfs.name.dir or dfs.data.dir. For each element in dfs.name.dir or
99   * dfs.data.dir, the subdirectories represented by the first four elements
100   * of the <code>state</code> array will be created and populated.
101   * See UpgradeUtilities.createStorageDirs().
102   *
103   * @param nodeType
104   *   the type of node that storage should be created for. Based on this
105   *   parameter either dfs.name.dir or dfs.data.dir is used from the global conf.
106   * @param state
107   *   a row from the testCases table which indicates which directories
108   *   to setup for the node
109   * @return file paths representing either dfs.name.dir or dfs.data.dir
110   *   directories
111   */
112  String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
113    String[] baseDirs = (nodeType == NAME_NODE ?
114                         conf.getStrings("dfs.name.dir") :
115                         conf.getStrings("dfs.data.dir"));
116    UpgradeUtilities.createEmptyDirs(baseDirs);
117    if (state[0])  // current
118      UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
119    if (state[1])  // previous
120      UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous");
121    if (state[2])  // previous.tmp
122      UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous.tmp");
123    if (state[3])  // removed.tmp
124      UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "removed.tmp");
125    return baseDirs;
126  }
127 
128  /**
129   * Verify that the current and/or previous exist as indicated by
130   * the method parameters.  If previous exists, verify that
131   * it hasn't been modified by comparing the checksum of all it's
132   * containing files with their original checksum.  It is assumed that
133   * the server has recovered.
134   */
135  void checkResult(NodeType nodeType, String[] baseDirs, 
136                   boolean currentShouldExist, boolean previousShouldExist) 
137    throws IOException
138  {
139    switch (nodeType) {
140    case NAME_NODE:
141      if (currentShouldExist) {
142        for (int i = 0; i < baseDirs.length; i++) {
143          assertTrue(new File(baseDirs[i],"current").isDirectory());
144          assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
145          assertTrue(new File(baseDirs[i],"current/edits").isFile());
146          assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
147          assertTrue(new File(baseDirs[i],"current/fstime").isFile());
148        }
149      }
150      break;
151    case DATA_NODE:
152      if (currentShouldExist) {
153        for (int i = 0; i < baseDirs.length; i++) {
154          assertEquals(
155                       UpgradeUtilities.checksumContents(
156                                                         nodeType, new File(baseDirs[i],"current")),
157                       UpgradeUtilities.checksumMasterContents(nodeType));
158        }
159      }
160      break;
161    }
162    if (previousShouldExist) {
163      for (int i = 0; i < baseDirs.length; i++) {
164        assertTrue(new File(baseDirs[i],"previous").isDirectory());
165        assertEquals(
166                     UpgradeUtilities.checksumContents(
167                                                       nodeType, new File(baseDirs[i],"previous")),
168                     UpgradeUtilities.checksumMasterContents(nodeType));
169      }
170    }
171  }
172 
173  /**
174   * This test iterates over the testCases table and attempts
175   * to startup the NameNode and DataNode normally.
176   */
177  public void testStorageStates() throws Exception {
178    String[] baseDirs;
179    UpgradeUtilities.initialize();
180
181    for (int numDirs = 1; numDirs <= 2; numDirs++) {
182      conf = new Configuration();
183      conf.setInt("dfs.datanode.scan.period.hours", -1);     
184      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
185      for (int i = 0; i < testCases.length; i++) {
186        boolean[] testCase = testCases[i];
187        boolean shouldRecover = testCase[4];
188        boolean curAfterRecover = testCase[5];
189        boolean prevAfterRecover = testCase[6];
190
191        log("NAME_NODE recovery", numDirs, i, testCase);
192        baseDirs = createStorageState(NAME_NODE, testCase);
193        if (shouldRecover) {
194          cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
195          checkResult(NAME_NODE, baseDirs, curAfterRecover, prevAfterRecover);
196          cluster.shutdown();
197        } else {
198          try {
199            cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
200            throw new AssertionError("NameNode should have failed to start");
201          } catch (IOException expected) {
202            // the exception is expected
203            // check that the message says "not formatted"
204            // when storage directory is empty (case #5)
205            if(!testCases[i][0] && !testCases[i][2] 
206                      && !testCases[i][1] && !testCases[i][3]) {
207              assertTrue(expected.getLocalizedMessage().contains(
208                  "NameNode is not formatted"));
209            }
210          }
211        }
212       
213        log("DATA_NODE recovery", numDirs, i, testCase);
214        createStorageState(NAME_NODE, new boolean[] {true, true, false, false});
215        cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
216        baseDirs = createStorageState(DATA_NODE, testCase);
217        if (!testCase[0] && !testCase[1] && !testCase[2] && !testCase[3]) {
218          // DataNode will create and format current if no directories exist
219          cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
220        } else {
221          if (shouldRecover) {
222            cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
223            checkResult(DATA_NODE, baseDirs, curAfterRecover, prevAfterRecover);
224          } else {
225            try {
226              cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
227              throw new AssertionError("DataNode should have failed to start");
228            } catch (Exception expected) {
229              // expected
230            }
231          }
232        }
233        cluster.shutdown();
234      } // end testCases loop
235    } // end numDirs loop
236  }
237 
238  protected void tearDown() throws Exception {
239    LOG.info("Shutting down MiniDFSCluster");
240    if (cluster != null) cluster.shutdown();
241  }
242 
243  public static void main(String[] args) throws Exception {
244    new TestDFSStorageStateRecovery().testStorageStates();
245  }
246 
247}
248
249
Note: See TracBrowser for help on using the repository browser.