source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/TestDFSUpgrade.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 11.2 KB
Line 
1/**
2* Licensed to the Apache Software Foundation (ASF) under one
3* or more contributor license agreements.  See the NOTICE file
4* distributed with this work for additional information
5* regarding copyright ownership.  The ASF licenses this file
6* to you under the Apache License, Version 2.0 (the
7* "License"); you may not use this file except in compliance
8* with the License.  You may obtain a copy of the License at
9*
10*     http://www.apache.org/licenses/LICENSE-2.0
11*
12* Unless required by applicable law or agreed to in writing, software
13* distributed under the License is distributed on an "AS IS" BASIS,
14* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15* See the License for the specific language governing permissions and
16* limitations under the License.
17*/
18package org.apache.hadoop.hdfs;
19
20import java.io.File;
21import java.io.IOException;
22import junit.framework.TestCase;
23import org.apache.commons.logging.Log;
24import org.apache.commons.logging.LogFactory;
25import org.apache.hadoop.conf.Configuration;
26import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
27import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
28
29import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
30import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
31
32import org.apache.hadoop.hdfs.server.common.HdfsConstants;
33import org.apache.hadoop.hdfs.server.common.Storage;
34import org.apache.hadoop.hdfs.server.common.StorageInfo;
35import org.apache.hadoop.fs.FileUtil;
36
37/**
38* This test ensures the appropriate response (successful or failure) from
39* the system when the system is upgraded under various storage state and
40* version conditions.
41*/
42public class TestDFSUpgrade extends TestCase {
43 
44  private static final Log LOG = LogFactory.getLog(
45                                                   "org.apache.hadoop.hdfs.TestDFSUpgrade");
46  private Configuration conf;
47  private int testCounter = 0;
48  private MiniDFSCluster cluster = null;
49   
50  /**
51   * Writes an INFO log message containing the parameters.
52   */
53  void log(String label, int numDirs) {
54    LOG.info("============================================================");
55    LOG.info("***TEST " + (testCounter++) + "*** " 
56             + label + ":"
57             + " numDirs="+numDirs);
58  }
59 
60  /**
61   * Verify that the current and previous directories exist.  Verify that
62   * previous hasn't been modified by comparing the checksum of all it's
63   * containing files with their original checksum.  It is assumed that
64   * the server has recovered and upgraded.
65   */
66  void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
67    switch (nodeType) {
68    case NAME_NODE:
69      for (int i = 0; i < baseDirs.length; i++) {
70        assertTrue(new File(baseDirs[i],"current").isDirectory());
71        assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
72        assertTrue(new File(baseDirs[i],"current/edits").isFile());
73        assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
74        assertTrue(new File(baseDirs[i],"current/fstime").isFile());
75      }
76      break;
77    case DATA_NODE:
78      for (int i = 0; i < baseDirs.length; i++) {
79        assertEquals(
80                     UpgradeUtilities.checksumContents(
81                                                       nodeType, new File(baseDirs[i],"current")),
82                     UpgradeUtilities.checksumMasterContents(nodeType));
83      }
84      break;
85    }
86    for (int i = 0; i < baseDirs.length; i++) {
87      assertTrue(new File(baseDirs[i],"previous").isDirectory());
88      assertEquals(
89                   UpgradeUtilities.checksumContents(
90                                                     nodeType, new File(baseDirs[i],"previous")),
91                   UpgradeUtilities.checksumMasterContents(nodeType));
92    }
93  }
94 
95  /**
96   * Attempts to start a NameNode with the given operation.  Starting
97   * the NameNode should throw an exception.
98   */
99  void startNameNodeShouldFail(StartupOption operation) {
100    try {
101      cluster = new MiniDFSCluster(conf, 0, operation); // should fail
102      throw new AssertionError("NameNode should have failed to start");
103    } catch (Exception expected) {
104      // expected
105    }
106  }
107 
108  /**
109   * Attempts to start a DataNode with the given operation.  Starting
110   * the DataNode should throw an exception.
111   */
112  void startDataNodeShouldFail(StartupOption operation) {
113    try {
114      cluster.startDataNodes(conf, 1, false, operation, null); // should fail
115      throw new AssertionError("DataNode should have failed to start");
116    } catch (Exception expected) {
117      // expected
118      assertFalse(cluster.isDataNodeUp());
119    }
120  }
121 
122  /**
123   * This test attempts to upgrade the NameNode and DataNode under
124   * a number of valid and invalid conditions.
125   */
126  public void testUpgrade() throws Exception {
127    File[] baseDirs;
128    UpgradeUtilities.initialize();
129   
130    for (int numDirs = 1; numDirs <= 2; numDirs++) {
131      conf = new Configuration();
132      conf.setInt("dfs.datanode.scan.period.hours", -1);     
133      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
134      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
135      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
136     
137      log("Normal NameNode upgrade", numDirs);
138      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
139      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
140      checkResult(NAME_NODE, nameNodeDirs);
141      cluster.shutdown();
142      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
143     
144      log("Normal DataNode upgrade", numDirs);
145      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
146      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
147      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
148      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
149      checkResult(DATA_NODE, dataNodeDirs);
150      cluster.shutdown();
151      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
152      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
153     
154      log("NameNode upgrade with existing previous dir", numDirs);
155      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
156      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
157      startNameNodeShouldFail(StartupOption.UPGRADE);
158      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
159     
160      log("DataNode upgrade with existing previous dir", numDirs);
161      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
162      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
163      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
164      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
165      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
166      checkResult(DATA_NODE, dataNodeDirs);
167      cluster.shutdown();
168      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
169      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
170
171      log("DataNode upgrade with future stored layout version in current", numDirs);
172      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
173      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
174      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
175      UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
176                                         new StorageInfo(Integer.MIN_VALUE,
177                                                         UpgradeUtilities.getCurrentNamespaceID(cluster),
178                                                         UpgradeUtilities.getCurrentFsscTime(cluster)));
179      startDataNodeShouldFail(StartupOption.REGULAR);
180      cluster.shutdown();
181      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
182      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
183     
184      log("DataNode upgrade with newer fsscTime in current", numDirs);
185      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
186      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
187      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
188      UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
189                                         new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
190                                                         UpgradeUtilities.getCurrentNamespaceID(cluster),
191                                                         Long.MAX_VALUE));
192      startDataNodeShouldFail(StartupOption.REGULAR);
193      cluster.shutdown();
194      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
195      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
196
197      log("NameNode upgrade with no edits file", numDirs);
198      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
199      for (File f : baseDirs) { 
200        FileUtil.fullyDelete(new File(f,"edits"));
201      }
202      startNameNodeShouldFail(StartupOption.UPGRADE);
203      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
204     
205      log("NameNode upgrade with no image file", numDirs);
206      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
207      for (File f : baseDirs) { 
208        FileUtil.fullyDelete(new File(f,"fsimage")); 
209      }
210      startNameNodeShouldFail(StartupOption.UPGRADE);
211      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
212     
213      log("NameNode upgrade with corrupt version file", numDirs);
214      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
215      for (File f : baseDirs) { 
216        UpgradeUtilities.corruptFile(new File(f,"VERSION")); 
217      }
218      startNameNodeShouldFail(StartupOption.UPGRADE);
219      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
220     
221      log("NameNode upgrade with old layout version in current", numDirs);
222      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
223      UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,
224                                         new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,
225                                                         UpgradeUtilities.getCurrentNamespaceID(null),
226                                                         UpgradeUtilities.getCurrentFsscTime(null)));
227      startNameNodeShouldFail(StartupOption.UPGRADE);
228      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
229     
230      log("NameNode upgrade with future layout version in current", numDirs);
231      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
232      UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,
233                                         new StorageInfo(Integer.MIN_VALUE,
234                                                         UpgradeUtilities.getCurrentNamespaceID(null),
235                                                         UpgradeUtilities.getCurrentFsscTime(null)));
236      startNameNodeShouldFail(StartupOption.UPGRADE);
237      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
238    } // end numDir loop
239  }
240 
241  protected void tearDown() throws Exception {
242    LOG.info("Shutting down MiniDFSCluster");
243    if (cluster != null) cluster.shutdown();
244  }
245   
246  public static void main(String[] args) throws Exception {
247    new TestDFSUpgrade().testUpgrade();
248  }
249 
250}
251
252
Note: See TracBrowser for help on using the repository browser.