source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 10.4 KB
Line 
1package org.apache.hadoop.hdfs.server.namenode;
2
3import java.io.File;
4import java.io.IOException;
5import java.util.Iterator;
6import java.util.List;
7import java.util.Random;
8
9import junit.framework.TestCase;
10
11import org.apache.commons.logging.Log;
12import org.apache.commons.logging.LogFactory;
13import org.apache.hadoop.conf.Configuration;
14import org.apache.hadoop.fs.FSDataOutputStream;
15import org.apache.hadoop.fs.FileSystem;
16import org.apache.hadoop.fs.FileUtil;
17import org.apache.hadoop.fs.Path;
18import org.apache.hadoop.hdfs.MiniDFSCluster;
19import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
20import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
21import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
22import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
23import org.apache.hadoop.util.StringUtils;
24
25/**
26 * Startup and checkpoint tests
27 *
28 */
29public class TestStartup extends TestCase {
30  public static final String NAME_NODE_HOST = "localhost:";
31  public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:";
32  private static final Log LOG =
33    LogFactory.getLog(TestStartup.class.getName());
34  private Configuration config;
35  private File hdfsDir=null;
36  static final long seed = 0xAAAAEEFL;
37  static final int blockSize = 4096;
38  static final int fileSize = 8192;
39  private long editsLength=0, fsimageLength=0;
40
41
42  private void writeFile(FileSystem fileSys, Path name, int repl)
43  throws IOException {
44    FSDataOutputStream stm = fileSys.create(name, true,
45        fileSys.getConf().getInt("io.file.buffer.size", 4096),
46        (short)repl, (long)blockSize);
47    byte[] buffer = new byte[fileSize];
48    Random rand = new Random(seed);
49    rand.nextBytes(buffer);
50    stm.write(buffer);
51    stm.close();
52  }
53
54
55  protected void setUp() throws Exception {
56    config = new Configuration();
57    String baseDir = System.getProperty("test.build.data", "/tmp");
58
59    hdfsDir = new File(baseDir, "dfs");
60    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
61      throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
62    }
63    LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
64    config.set("dfs.name.dir", new File(hdfsDir, "name").getPath());
65    config.set("dfs.data.dir", new File(hdfsDir, "data").getPath());
66
67    config.set("fs.checkpoint.dir",new File(hdfsDir, "secondary").getPath());
68    //config.set("fs.default.name", "hdfs://"+ NAME_NODE_HOST + "0");
69   
70    FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
71  }
72
73  /**
74   * clean up
75   */
76  public void tearDown() throws Exception {
77    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
78      throw new IOException("Could not delete hdfs directory in tearDown '" + hdfsDir + "'");
79    }   
80  }
81
82   /**
83   * start MiniDFScluster, create a file (to create edits) and do a checkpoint 
84   * @throws IOException
85   */
86  public void createCheckPoint() throws IOException {
87    LOG.info("--starting mini cluster");
88    // manage dirs parameter set to false
89    MiniDFSCluster cluster = null;
90    SecondaryNameNode sn = null;
91   
92    try {
93      cluster = new MiniDFSCluster(0, config, 1, true, false, false,  null, null, null, null);
94      cluster.waitActive();
95
96      LOG.info("--starting Secondary Node");
97
98      // start secondary node
99      sn = new SecondaryNameNode(config);
100      assertNotNull(sn);
101
102      // create a file
103      FileSystem fileSys = cluster.getFileSystem();
104      Path file1 = new Path("t1");
105      this.writeFile(fileSys, file1, 1);
106
107      LOG.info("--doing checkpoint");
108      sn.doCheckpoint();  // this shouldn't fail
109      LOG.info("--done checkpoint");
110    } catch (IOException e) {
111      fail(StringUtils.stringifyException(e));
112      System.err.println("checkpoint failed");
113      throw e;
114    }  finally {
115      if(sn!=null)
116        sn.shutdown();
117      if(cluster!=null) 
118        cluster.shutdown();
119      LOG.info("--file t1 created, cluster shutdown");
120    }
121  }
122
123  /*
124   * corrupt files by removing and recreating the directory
125   */
126  private void corruptNameNodeFiles() throws IOException {
127    // now corrupt/delete the directrory
128    List<File> nameDirs = (List<File>)FSNamesystem.getNamespaceDirs(config);
129    List<File> nameEditsDirs = (List<File>)FSNamesystem.getNamespaceEditsDirs(config);
130
131    // get name dir and its length, then delete and recreate the directory
132    File dir = nameDirs.get(0); // has only one
133    this.fsimageLength = new File(new File(dir, "current"), 
134        NameNodeFile.IMAGE.getName()).length();
135
136    if(dir.exists() && !(FileUtil.fullyDelete(dir)))
137      throw new IOException("Cannot remove directory: " + dir);
138
139    LOG.info("--removed dir "+dir + ";len was ="+ this.fsimageLength);
140
141    if (!dir.mkdirs())
142      throw new IOException("Cannot create directory " + dir);
143
144    dir = nameEditsDirs.get(0); //has only one
145
146    this.editsLength = new File(new File(dir, "current"), 
147        NameNodeFile.EDITS.getName()).length();
148
149    if(dir.exists() && !(FileUtil.fullyDelete(dir)))
150      throw new IOException("Cannot remove directory: " + dir);
151    if (!dir.mkdirs())
152      throw new IOException("Cannot create directory " + dir);
153
154    LOG.info("--removed dir and recreated "+dir + ";len was ="+ this.editsLength);
155
156
157  }
158
159  /**
160   * start with -importCheckpoint option and verify that the files are in separate directories and of the right length
161   * @throws IOException
162   */
163  private void checkNameNodeFiles() throws IOException{
164
165    // start namenode with import option
166    LOG.info("-- about to start DFS cluster");
167    MiniDFSCluster cluster = null;
168    try {
169      cluster = new MiniDFSCluster(0, config, 1, false, false, false,  StartupOption.IMPORT, null, null, null);
170      cluster.waitActive();
171      LOG.info("--NN started with checkpoint option");
172      NameNode nn = cluster.getNameNode();
173      assertNotNull(nn);       
174      // Verify that image file sizes did not change.
175      FSImage image = nn.getFSImage();
176      verifyDifferentDirs(image, this.fsimageLength, this.editsLength);
177    } finally {
178      if(cluster != null)
179        cluster.shutdown();
180    }
181  }
182
183  /**
184   * verify that edits log and fsimage are in different directories and of a correct size
185   */
186  private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) {
187    StorageDirectory sd =null;
188    for (Iterator<StorageDirectory> it = img.dirIterator(); it.hasNext();) {
189      sd = it.next();
190
191      if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
192        File imf = FSImage.getImageFile(sd, NameNodeFile.IMAGE);
193        LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
194        assertEquals(expectedImgSize, imf.length());   
195      } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
196        File edf = FSImage.getImageFile(sd, NameNodeFile.EDITS);
197        LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  + "; expected = " + expectedEditsSize);
198        assertEquals(expectedEditsSize, edf.length()); 
199      } else {
200        fail("Image/Edits directories are not different");
201      }
202    }
203
204  }
205  /**
206   * secnn-6
207   * checkpoint for edits and image is the same directory
208   * @throws IOException
209   */
210  public void testChkpointStartup2() throws IOException{
211    LOG.info("--starting checkpointStartup2 - same directory for checkpoint");
212    // different name dirs
213    config.set("dfs.name.dir", new File(hdfsDir, "name").getPath());
214    config.set("dfs.name.edits.dir", new File(hdfsDir, "edits").getPath());
215    // same checkpoint dirs
216    config.set("fs.checkpoint.edits.dir", new File(hdfsDir, "chkpt").getPath());
217    config.set("fs.checkpoint.dir", new File(hdfsDir, "chkpt").getPath());
218
219    createCheckPoint();
220
221    corruptNameNodeFiles();
222    checkNameNodeFiles();
223
224  }
225
226  /**
227   * seccn-8
228   * checkpoint for edits and image are different directories
229   * @throws IOException
230   */
231  public void testChkpointStartup1() throws IOException{
232    //setUpConfig();
233    LOG.info("--starting testStartup Recovery");
234    // different name dirs
235    config.set("dfs.name.dir", new File(hdfsDir, "name").getPath());
236    config.set("dfs.name.edits.dir", new File(hdfsDir, "edits").getPath());
237    // same checkpoint dirs
238    config.set("fs.checkpoint.edits.dir", new File(hdfsDir, "chkpt_edits").getPath());
239    config.set("fs.checkpoint.dir", new File(hdfsDir, "chkpt").getPath());
240
241    createCheckPoint();
242    corruptNameNodeFiles();
243    checkNameNodeFiles();
244  }
245
246  /**
247   * secnn-7
248   * secondary node copies fsimage and edits into correct separate directories.
249   * @throws IOException
250   */
251  public void testSNNStartup() throws IOException{
252    //setUpConfig();
253    LOG.info("--starting SecondNN startup test");
254    // different name dirs
255    config.set("dfs.name.dir", new File(hdfsDir, "name").getPath());
256    config.set("dfs.name.edits.dir", new File(hdfsDir, "name").getPath());
257    // same checkpoint dirs
258    config.set("fs.checkpoint.edits.dir", new File(hdfsDir, "chkpt_edits").getPath());
259    config.set("fs.checkpoint.dir", new File(hdfsDir, "chkpt").getPath());
260
261    LOG.info("--starting NN ");
262    MiniDFSCluster cluster = null;
263    SecondaryNameNode sn = null;
264    NameNode nn = null;
265    try {
266      cluster = new MiniDFSCluster(0, config, 1, true, false, false,  null, null, null, null);
267      cluster.waitActive();
268      nn = cluster.getNameNode();
269      assertNotNull(nn);
270
271      // start secondary node
272      LOG.info("--starting SecondNN");
273      sn = new SecondaryNameNode(config);
274      assertNotNull(sn);
275
276      LOG.info("--doing checkpoint");
277      sn.doCheckpoint();  // this shouldn't fail
278      LOG.info("--done checkpoint");
279
280
281
282      // now verify that image and edits are created in the different directories
283      FSImage image = nn.getFSImage();
284      StorageDirectory sd = image.getStorageDir(0); //only one
285      assertEquals(sd.getStorageDirType(), NameNodeDirType.IMAGE_AND_EDITS);
286      File imf = FSImage.getImageFile(sd, NameNodeFile.IMAGE);
287      File edf = FSImage.getImageFile(sd, NameNodeFile.EDITS);
288      LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length());
289      LOG.info("--edits file " + edf.getAbsolutePath() + "; len = " + edf.length());
290
291      FSImage chkpImage = sn.getFSImage();
292      verifyDifferentDirs(chkpImage, imf.length(), edf.length());
293
294    } catch (IOException e) {
295      fail(StringUtils.stringifyException(e));
296      System.err.println("checkpoint failed");
297      throw e;
298    } finally {
299      if(sn!=null)
300        sn.shutdown();
301      if(cluster!=null)
302        cluster.shutdown();
303    }
304  }
305}
Note: See TracBrowser for help on using the repository browser.