[120] | 1 | /** |
---|
| 2 | * Licensed to the Apache Software Foundation (ASF) under one |
---|
| 3 | * or more contributor license agreements. See the NOTICE file |
---|
| 4 | * distributed with this work for additional information |
---|
| 5 | * regarding copyright ownership. The ASF licenses this file |
---|
| 6 | * to you under the Apache License, Version 2.0 (the |
---|
| 7 | * "License"); you may not use this file except in compliance |
---|
| 8 | * with the License. You may obtain a copy of the License at |
---|
| 9 | * |
---|
| 10 | * http://www.apache.org/licenses/LICENSE-2.0 |
---|
| 11 | * |
---|
| 12 | * Unless required by applicable law or agreed to in writing, software |
---|
| 13 | * distributed under the License is distributed on an "AS IS" BASIS, |
---|
| 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
---|
| 15 | * See the License for the specific language governing permissions and |
---|
| 16 | * limitations under the License. |
---|
| 17 | */ |
---|
| 18 | package org.apache.hadoop.hdfs; |
---|
| 19 | |
---|
| 20 | import java.io.File; |
---|
| 21 | import java.io.IOException; |
---|
| 22 | import junit.framework.TestCase; |
---|
| 23 | import org.apache.commons.logging.Log; |
---|
| 24 | import org.apache.commons.logging.LogFactory; |
---|
| 25 | import org.apache.hadoop.conf.Configuration; |
---|
| 26 | import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType; |
---|
| 27 | import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; |
---|
| 28 | |
---|
| 29 | import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE; |
---|
| 30 | import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE; |
---|
| 31 | |
---|
| 32 | import org.apache.hadoop.hdfs.server.common.HdfsConstants; |
---|
| 33 | import org.apache.hadoop.hdfs.server.common.StorageInfo; |
---|
| 34 | import org.apache.hadoop.fs.FileUtil; |
---|
| 35 | |
---|
| 36 | /** |
---|
| 37 | * This test ensures the appropriate response (successful or failure) from |
---|
| 38 | * the system when the system is rolled back under various storage state and |
---|
| 39 | * version conditions. |
---|
| 40 | */ |
---|
| 41 | public class TestDFSRollback extends TestCase { |
---|
| 42 | |
---|
| 43 | private static final Log LOG = LogFactory.getLog( |
---|
| 44 | "org.apache.hadoop.hdfs.TestDFSRollback"); |
---|
| 45 | private Configuration conf; |
---|
| 46 | private int testCounter = 0; |
---|
| 47 | private MiniDFSCluster cluster = null; |
---|
| 48 | |
---|
| 49 | /** |
---|
| 50 | * Writes an INFO log message containing the parameters. |
---|
| 51 | */ |
---|
| 52 | void log(String label, int numDirs) { |
---|
| 53 | LOG.info("============================================================"); |
---|
| 54 | LOG.info("***TEST " + (testCounter++) + "*** " |
---|
| 55 | + label + ":" |
---|
| 56 | + " numDirs="+numDirs); |
---|
| 57 | } |
---|
| 58 | |
---|
| 59 | /** |
---|
| 60 | * Verify that the new current directory is the old previous. |
---|
| 61 | * It is assumed that the server has recovered and rolled back. |
---|
| 62 | */ |
---|
| 63 | void checkResult(NodeType nodeType, String[] baseDirs) throws IOException { |
---|
| 64 | switch (nodeType) { |
---|
| 65 | case NAME_NODE: |
---|
| 66 | for (int i = 0; i < baseDirs.length; i++) { |
---|
| 67 | assertTrue(new File(baseDirs[i],"current").isDirectory()); |
---|
| 68 | assertTrue(new File(baseDirs[i],"current/VERSION").isFile()); |
---|
| 69 | assertTrue(new File(baseDirs[i],"current/edits").isFile()); |
---|
| 70 | assertTrue(new File(baseDirs[i],"current/fsimage").isFile()); |
---|
| 71 | assertTrue(new File(baseDirs[i],"current/fstime").isFile()); |
---|
| 72 | } |
---|
| 73 | break; |
---|
| 74 | case DATA_NODE: |
---|
| 75 | for (int i = 0; i < baseDirs.length; i++) { |
---|
| 76 | assertEquals( |
---|
| 77 | UpgradeUtilities.checksumContents( |
---|
| 78 | nodeType, new File(baseDirs[i],"current")), |
---|
| 79 | UpgradeUtilities.checksumMasterContents(nodeType)); |
---|
| 80 | } |
---|
| 81 | break; |
---|
| 82 | } |
---|
| 83 | for (int i = 0; i < baseDirs.length; i++) { |
---|
| 84 | assertFalse(new File(baseDirs[i],"previous").isDirectory()); |
---|
| 85 | } |
---|
| 86 | } |
---|
| 87 | |
---|
| 88 | /** |
---|
| 89 | * Attempts to start a NameNode with the given operation. Starting |
---|
| 90 | * the NameNode should throw an exception. |
---|
| 91 | */ |
---|
| 92 | void startNameNodeShouldFail(StartupOption operation) { |
---|
| 93 | try { |
---|
| 94 | cluster = new MiniDFSCluster(conf, 0, operation); // should fail |
---|
| 95 | throw new AssertionError("NameNode should have failed to start"); |
---|
| 96 | } catch (Exception expected) { |
---|
| 97 | // expected |
---|
| 98 | } |
---|
| 99 | } |
---|
| 100 | |
---|
| 101 | /** |
---|
| 102 | * Attempts to start a DataNode with the given operation. Starting |
---|
| 103 | * the DataNode should throw an exception. |
---|
| 104 | */ |
---|
| 105 | void startDataNodeShouldFail(StartupOption operation) { |
---|
| 106 | try { |
---|
| 107 | cluster.startDataNodes(conf, 1, false, operation, null); // should fail |
---|
| 108 | throw new AssertionError("DataNode should have failed to start"); |
---|
| 109 | } catch (Exception expected) { |
---|
| 110 | // expected |
---|
| 111 | assertFalse(cluster.isDataNodeUp()); |
---|
| 112 | } |
---|
| 113 | } |
---|
| 114 | |
---|
| 115 | /** |
---|
| 116 | * This test attempts to rollback the NameNode and DataNode under |
---|
| 117 | * a number of valid and invalid conditions. |
---|
| 118 | */ |
---|
| 119 | public void testRollback() throws Exception { |
---|
| 120 | File[] baseDirs; |
---|
| 121 | UpgradeUtilities.initialize(); |
---|
| 122 | |
---|
| 123 | for (int numDirs = 1; numDirs <= 2; numDirs++) { |
---|
| 124 | conf = new Configuration(); |
---|
| 125 | conf.setInt("dfs.datanode.scan.period.hours", -1); |
---|
| 126 | conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf); |
---|
| 127 | String[] nameNodeDirs = conf.getStrings("dfs.name.dir"); |
---|
| 128 | String[] dataNodeDirs = conf.getStrings("dfs.data.dir"); |
---|
| 129 | |
---|
| 130 | log("Normal NameNode rollback", numDirs); |
---|
| 131 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current"); |
---|
| 132 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous"); |
---|
| 133 | cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK); |
---|
| 134 | checkResult(NAME_NODE, nameNodeDirs); |
---|
| 135 | cluster.shutdown(); |
---|
| 136 | UpgradeUtilities.createEmptyDirs(nameNodeDirs); |
---|
| 137 | |
---|
| 138 | log("Normal DataNode rollback", numDirs); |
---|
| 139 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current"); |
---|
| 140 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous"); |
---|
| 141 | cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK); |
---|
| 142 | UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current"); |
---|
| 143 | UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous"); |
---|
| 144 | cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null); |
---|
| 145 | checkResult(DATA_NODE, dataNodeDirs); |
---|
| 146 | cluster.shutdown(); |
---|
| 147 | UpgradeUtilities.createEmptyDirs(nameNodeDirs); |
---|
| 148 | UpgradeUtilities.createEmptyDirs(dataNodeDirs); |
---|
| 149 | |
---|
| 150 | log("NameNode rollback without existing previous dir", numDirs); |
---|
| 151 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current"); |
---|
| 152 | startNameNodeShouldFail(StartupOption.ROLLBACK); |
---|
| 153 | UpgradeUtilities.createEmptyDirs(nameNodeDirs); |
---|
| 154 | |
---|
| 155 | log("DataNode rollback without existing previous dir", numDirs); |
---|
| 156 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current"); |
---|
| 157 | cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE); |
---|
| 158 | UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current"); |
---|
| 159 | cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null); |
---|
| 160 | cluster.shutdown(); |
---|
| 161 | UpgradeUtilities.createEmptyDirs(nameNodeDirs); |
---|
| 162 | UpgradeUtilities.createEmptyDirs(dataNodeDirs); |
---|
| 163 | |
---|
| 164 | log("DataNode rollback with future stored layout version in previous", numDirs); |
---|
| 165 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current"); |
---|
| 166 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous"); |
---|
| 167 | cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK); |
---|
| 168 | UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current"); |
---|
| 169 | baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous"); |
---|
| 170 | UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs, |
---|
| 171 | new StorageInfo(Integer.MIN_VALUE, |
---|
| 172 | UpgradeUtilities.getCurrentNamespaceID(cluster), |
---|
| 173 | UpgradeUtilities.getCurrentFsscTime(cluster))); |
---|
| 174 | startDataNodeShouldFail(StartupOption.ROLLBACK); |
---|
| 175 | cluster.shutdown(); |
---|
| 176 | UpgradeUtilities.createEmptyDirs(nameNodeDirs); |
---|
| 177 | UpgradeUtilities.createEmptyDirs(dataNodeDirs); |
---|
| 178 | |
---|
| 179 | log("DataNode rollback with newer fsscTime in previous", numDirs); |
---|
| 180 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current"); |
---|
| 181 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous"); |
---|
| 182 | cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK); |
---|
| 183 | UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current"); |
---|
| 184 | baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous"); |
---|
| 185 | UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs, |
---|
| 186 | new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(), |
---|
| 187 | UpgradeUtilities.getCurrentNamespaceID(cluster), |
---|
| 188 | Long.MAX_VALUE)); |
---|
| 189 | startDataNodeShouldFail(StartupOption.ROLLBACK); |
---|
| 190 | cluster.shutdown(); |
---|
| 191 | UpgradeUtilities.createEmptyDirs(nameNodeDirs); |
---|
| 192 | UpgradeUtilities.createEmptyDirs(dataNodeDirs); |
---|
| 193 | |
---|
| 194 | log("NameNode rollback with no edits file", numDirs); |
---|
| 195 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current"); |
---|
| 196 | baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous"); |
---|
| 197 | for (File f : baseDirs) { |
---|
| 198 | FileUtil.fullyDelete(new File(f,"edits")); |
---|
| 199 | } |
---|
| 200 | startNameNodeShouldFail(StartupOption.ROLLBACK); |
---|
| 201 | UpgradeUtilities.createEmptyDirs(nameNodeDirs); |
---|
| 202 | |
---|
| 203 | log("NameNode rollback with no image file", numDirs); |
---|
| 204 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current"); |
---|
| 205 | baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous"); |
---|
| 206 | for (File f : baseDirs) { |
---|
| 207 | FileUtil.fullyDelete(new File(f,"fsimage")); |
---|
| 208 | } |
---|
| 209 | startNameNodeShouldFail(StartupOption.ROLLBACK); |
---|
| 210 | UpgradeUtilities.createEmptyDirs(nameNodeDirs); |
---|
| 211 | |
---|
| 212 | log("NameNode rollback with corrupt version file", numDirs); |
---|
| 213 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current"); |
---|
| 214 | baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous"); |
---|
| 215 | for (File f : baseDirs) { |
---|
| 216 | UpgradeUtilities.corruptFile(new File(f,"VERSION")); |
---|
| 217 | } |
---|
| 218 | startNameNodeShouldFail(StartupOption.ROLLBACK); |
---|
| 219 | UpgradeUtilities.createEmptyDirs(nameNodeDirs); |
---|
| 220 | |
---|
| 221 | log("NameNode rollback with old layout version in previous", numDirs); |
---|
| 222 | UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current"); |
---|
| 223 | baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous"); |
---|
| 224 | UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs, |
---|
| 225 | new StorageInfo(1, |
---|
| 226 | UpgradeUtilities.getCurrentNamespaceID(null), |
---|
| 227 | UpgradeUtilities.getCurrentFsscTime(null))); |
---|
| 228 | startNameNodeShouldFail(StartupOption.UPGRADE); |
---|
| 229 | UpgradeUtilities.createEmptyDirs(nameNodeDirs); |
---|
| 230 | } // end numDir loop |
---|
| 231 | } |
---|
| 232 | |
---|
| 233 | protected void tearDown() throws Exception { |
---|
| 234 | LOG.info("Shutting down MiniDFSCluster"); |
---|
| 235 | if (cluster != null) cluster.shutdown(); |
---|
| 236 | } |
---|
| 237 | |
---|
| 238 | public static void main(String[] args) throws Exception { |
---|
| 239 | new TestDFSRollback().testRollback(); |
---|
| 240 | } |
---|
| 241 | |
---|
| 242 | } |
---|
| 243 | |
---|
| 244 | |
---|