source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 14.7 KB
Line 
1/*
2 * UpgradeUtilities.java
3 *
4 * Licensed to the Apache Software Foundation (ASF) under one
5 * or more contributor license agreements.  See the NOTICE file
6 * distributed with this work for additional information
7 * regarding copyright ownership.  The ASF licenses this file
8 * to you under the Apache License, Version 2.0 (the
9 * "License"); you may not use this file except in compliance
10 * with the License.  You may obtain a copy of the License at
11 *
12 *     http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20
21package org.apache.hadoop.hdfs;
22
23import java.io.File;
24import java.io.FileInputStream;
25import java.io.IOException;
26import java.io.OutputStream;
27import java.io.RandomAccessFile;
28import java.util.Arrays;
29import java.util.Random;
30import java.util.zip.CRC32;
31import org.apache.hadoop.conf.Configuration;
32import org.apache.hadoop.fs.FileSystem;
33import org.apache.hadoop.fs.FileUtil;
34import org.apache.hadoop.fs.LocalFileSystem;
35import org.apache.hadoop.fs.Path;
36import org.apache.hadoop.hdfs.protocol.FSConstants;
37import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
38import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
39
40import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
41import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
42
43import org.apache.hadoop.hdfs.server.common.HdfsConstants;
44import org.apache.hadoop.hdfs.server.common.Storage;
45import org.apache.hadoop.hdfs.server.common.StorageInfo;
46import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
47import org.apache.hadoop.hdfs.server.datanode.DataStorage;
48import org.apache.hadoop.hdfs.server.namenode.FSImage;
49import org.apache.hadoop.hdfs.server.namenode.NameNode;
50
51/**
52 * This class defines a number of static helper methods used by the
53 * DFS Upgrade unit tests.  By default, a singleton master populated storage
54 * directory is created for a Namenode (contains edits, fsimage,
55 * version, and time files) and a Datanode (contains version and
56 * block files).  The master directories are lazily created.  They are then
57 * copied by the createStorageDirs() method to create new storage
58 * directories of the appropriate type (Namenode or Datanode).
59 */
60public class UpgradeUtilities {
61
62  // Root scratch directory on local filesystem
63  private static File TEST_ROOT_DIR = new File(
64      System.getProperty("test.build.data","/tmp").replace(' ', '+'));
65  // The singleton master storage directory for Namenode
66  private static File namenodeStorage = new File(TEST_ROOT_DIR, "namenodeMaster");
67  // A checksum of the contents in namenodeStorage directory
68  private static long namenodeStorageChecksum;
69  // The namespaceId of the namenodeStorage directory
70  private static int namenodeStorageNamespaceID;
71  // The fsscTime of the namenodeStorage directory
72  private static long namenodeStorageFsscTime;
73  // The singleton master storage directory for Datanode
74  private static File datanodeStorage = new File(TEST_ROOT_DIR, "datanodeMaster");
75  // A checksum of the contents in datanodeStorage directory
76  private static long datanodeStorageChecksum;
77 
78  /**
79   * Initialize the data structures used by this class. 
80   * IMPORTANT NOTE: This method must be called once before calling
81   *                 any other public method on this class. 
82   * <p>
83   * Creates a singleton master populated storage
84   * directory for a Namenode (contains edits, fsimage,
85   * version, and time files) and a Datanode (contains version and
86   * block files).  This can be a lengthy operation.
87   */
88  public static void initialize() throws Exception {
89    createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()});
90    Configuration config = new Configuration();
91    config.set("dfs.name.dir", namenodeStorage.toString());
92    config.set("dfs.data.dir", datanodeStorage.toString());
93    MiniDFSCluster cluster = null;
94    try {
95      // format data-node
96      createEmptyDirs(new String[] {datanodeStorage.toString()});
97     
98      // format and start NameNode and start DataNode
99      NameNode.format(config); 
100      cluster = new MiniDFSCluster(config, 1, StartupOption.REGULAR);
101       
102      NameNode namenode = cluster.getNameNode();
103      namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
104      namenodeStorageFsscTime = namenode.versionRequest().getCTime();
105     
106      FileSystem fs = FileSystem.get(config);
107      Path baseDir = new Path("/TestUpgrade");
108      fs.mkdirs(baseDir);
109     
110      // write some files
111      int bufferSize = 4096;
112      byte[] buffer = new byte[bufferSize];
113      for(int i=0; i < bufferSize; i++)
114        buffer[i] = (byte)('0' + i % 50);
115      writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
116      writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
117     
118      // save image
119      namenode.getFSImage().saveFSImage();
120      namenode.getFSImage().getEditLog().open();
121     
122      // write more files
123      writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
124      writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
125    } finally {
126      // shutdown
127      if (cluster != null) cluster.shutdown();
128      FileUtil.fullyDelete(new File(namenodeStorage,"in_use.lock"));
129      FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock"));
130    }
131    namenodeStorageChecksum = checksumContents(
132                                               NAME_NODE, new File(namenodeStorage,"current"));
133    datanodeStorageChecksum = checksumContents(
134                                               DATA_NODE, new File(datanodeStorage,"current"));
135  }
136 
137  // Private helper method that writes a file to the given file system.
138  private static void writeFile(FileSystem fs, Path path, byte[] buffer,
139                                int bufferSize) throws IOException
140  {
141    OutputStream out;
142    out = fs.create(path, true, bufferSize, (short) 1, 1024);
143    out.write(buffer, 0, bufferSize);
144    out.close();
145  }
146 
147  /**
148   * Initialize dfs.name.dir and dfs.data.dir with the specified number of
149   * directory entries. Also initialize dfs.blockreport.intervalMsec.
150   */
151  public static Configuration initializeStorageStateConf(int numDirs,
152                                                         Configuration conf) {
153    StringBuffer nameNodeDirs =
154      new StringBuffer(new File(TEST_ROOT_DIR, "name1").toString());
155    StringBuffer dataNodeDirs =
156      new StringBuffer(new File(TEST_ROOT_DIR, "data1").toString());
157    for (int i = 2; i <= numDirs; i++) {
158      nameNodeDirs.append("," + new File(TEST_ROOT_DIR, "name"+i));
159      dataNodeDirs.append("," + new File(TEST_ROOT_DIR, "data"+i));
160    }
161    if (conf == null) {
162      conf = new Configuration();
163    }
164    conf.set("dfs.name.dir", nameNodeDirs.toString());
165    conf.set("dfs.data.dir", dataNodeDirs.toString());
166    conf.setInt("dfs.blockreport.intervalMsec", 10000);
167    return conf;
168  }
169 
170  /**
171   * Create empty directories.  If a specified directory already exists
172   * then it is first removed.
173   */
174  public static void createEmptyDirs(String[] dirs) throws IOException {
175    for (String d : dirs) {
176      File dir = new File(d);
177      if (dir.exists()) {
178        FileUtil.fullyDelete(dir);
179      }
180      dir.mkdirs();
181    }
182  }
183 
184  /**
185   * Return the checksum for the singleton master storage directory
186   * of the given node type.
187   */
188  public static long checksumMasterContents(NodeType nodeType) throws IOException {
189    if (nodeType == NAME_NODE) {
190      return namenodeStorageChecksum;
191    } else {
192      return datanodeStorageChecksum;
193    }
194  }
195 
196  /**
197   * Compute the checksum of all the files in the specified directory.
198   * The contents of subdirectories are not included. This method provides
199   * an easy way to ensure equality between the contents of two directories.
200   *
201   * @param nodeType if DATA_NODE then any file named "VERSION" is ignored.
202   *    This is because this file file is changed every time
203   *    the Datanode is started.
204   * @param dir must be a directory. Subdirectories are ignored.
205   *
206   * @throws IllegalArgumentException if specified directory is not a directory
207   * @throws IOException if an IOException occurs while reading the files
208   * @return the computed checksum value
209   */
210  public static long checksumContents(NodeType nodeType, File dir) throws IOException {
211    if (!dir.isDirectory()) {
212      throw new IllegalArgumentException(
213                                         "Given argument is not a directory:" + dir);
214    }
215    File[] list = dir.listFiles();
216    Arrays.sort(list);
217    CRC32 checksum = new CRC32();
218    for (int i = 0; i < list.length; i++) {
219      if (!list[i].isFile()) {
220        continue;
221      }
222      // skip VERSION file for DataNodes
223      if (nodeType == DATA_NODE && list[i].getName().equals("VERSION")) {
224        continue; 
225      }
226      FileInputStream fis = null;
227      try {
228        fis = new FileInputStream(list[i]);
229        byte[] buffer = new byte[1024];
230        int bytesRead;
231        while ((bytesRead = fis.read(buffer)) != -1) {
232          checksum.update(buffer, 0, bytesRead);
233        }
234      } finally {
235        if(fis != null) {
236          fis.close();
237        }
238      }
239    }
240    return checksum.getValue();
241  }
242 
243  /**
244   * Simulate the <code>dfs.name.dir</code> or <code>dfs.data.dir</code>
245   * of a populated DFS filesystem.
246   *
247   * This method creates and populates the directory specified by
248   *  <code>parent/dirName</code>, for each parent directory.
249   * The contents of the new directories will be
250   * appropriate for the given node type.  If the directory does not
251   * exist, it will be created.  If the directory already exists, it
252   * will first be deleted.
253   *
254   * By default, a singleton master populated storage
255   * directory is created for a Namenode (contains edits, fsimage,
256   * version, and time files) and a Datanode (contains version and
257   * block files).  These directories are then
258   * copied by this method to create new storage
259   * directories of the appropriate type (Namenode or Datanode).
260   *
261   * @return the array of created directories
262   */
263  public static File[] createStorageDirs(NodeType nodeType, String[] parents, String dirName) throws Exception {
264    File[] retVal = new File[parents.length];
265    for (int i = 0; i < parents.length; i++) {
266      File newDir = new File(parents[i], dirName);
267      createEmptyDirs(new String[] {newDir.toString()});
268      LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
269      switch (nodeType) {
270      case NAME_NODE:
271        localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
272                                new Path(newDir.toString()),
273                                false);
274        Path newImgDir = new Path(newDir.getParent(), "image");
275        if (!localFS.exists(newImgDir))
276          localFS.copyToLocalFile(
277              new Path(namenodeStorage.toString(), "image"),
278              newImgDir,
279              false);
280        break;
281      case DATA_NODE:
282        localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
283                                new Path(newDir.toString()),
284                                false);
285        Path newStorageFile = new Path(newDir.getParent(), "storage");
286        if (!localFS.exists(newStorageFile))
287          localFS.copyToLocalFile(
288              new Path(datanodeStorage.toString(), "storage"),
289              newStorageFile,
290              false);
291        break;
292      }
293      retVal[i] = newDir;
294    }
295    return retVal;
296  }
297 
298  /**
299   * Create a <code>version</code> file inside the specified parent
300   * directory.  If such a file already exists, it will be overwritten.
301   * The given version string will be written to the file as the layout
302   * version. None of the parameters may be null.
303   *
304   * @param version
305   *
306   * @return the created version file
307   */
308  public static File[] createVersionFile(NodeType nodeType, File[] parent,
309                                         StorageInfo version) throws IOException
310  {
311    Storage storage = null;
312    File[] versionFiles = new File[parent.length];
313    for (int i = 0; i < parent.length; i++) {
314      File versionFile = new File(parent[i], "VERSION");
315      FileUtil.fullyDelete(versionFile);
316      switch (nodeType) {
317      case NAME_NODE:
318        storage = new FSImage(version);
319        break;
320      case DATA_NODE:
321        storage = new DataStorage(version, "doNotCare");
322        break;
323      }
324      StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
325      sd.write(versionFile);
326      versionFiles[i] = versionFile;
327    }
328    return versionFiles;
329  }
330 
331  /**
332   * Corrupt the specified file.  Some random bytes within the file
333   * will be changed to some random values.
334   *
335   * @throws IllegalArgumentException if the given file is not a file
336   * @throws IOException if an IOException occurs while reading or writing the file
337   */
338  public static void corruptFile(File file) throws IOException {
339    if (!file.isFile()) {
340      throw new IllegalArgumentException(
341                                         "Given argument is not a file:" + file);
342    }
343    RandomAccessFile raf = new RandomAccessFile(file,"rws");
344    Random random = new Random();
345    for (long i = 0; i < raf.length(); i++) {
346      raf.seek(i);
347      if (random.nextBoolean()) {
348        raf.writeByte(random.nextInt());
349      }
350    }
351    raf.close();
352  }
353 
354  /**
355   * Return the layout version inherent in the current version
356   * of the Namenode, whether it is running or not.
357   */
358  public static int getCurrentLayoutVersion() {
359    return FSConstants.LAYOUT_VERSION;
360  }
361 
362  /**
363   * Return the namespace ID inherent in the currently running
364   * Namenode.  If no Namenode is running, return the namespace ID of
365   * the master Namenode storage directory.
366   *
367   * The UpgradeUtilities.initialize() method must be called once before
368   * calling this method.
369   */
370  public static int getCurrentNamespaceID(MiniDFSCluster cluster) throws IOException {
371    if (cluster != null) {
372      return cluster.getNameNode().versionRequest().getNamespaceID();
373    }
374    return namenodeStorageNamespaceID;
375  }
376 
377  /**
378   * Return the File System State Creation Timestamp (FSSCTime) inherent
379   * in the currently running Namenode.  If no Namenode is running,
380   * return the FSSCTime of the master Namenode storage directory.
381   *
382   * The UpgradeUtilities.initialize() method must be called once before
383   * calling this method.
384   */
385  public static long getCurrentFsscTime(MiniDFSCluster cluster) throws IOException {
386    if (cluster != null) {
387      return cluster.getNameNode().versionRequest().getCTime();
388    }
389    return namenodeStorageFsscTime;
390  }
391}
392
Note: See TracBrowser for help on using the repository browser.