source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 24.0 KB
Line 
1/**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements.  See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership.  The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License.  You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18package org.apache.hadoop.hdfs.server.namenode;
19
20import junit.framework.TestCase;
21import java.io.*;
22import java.util.Collection;
23import java.util.List;
24import java.util.Iterator;
25import java.util.Random;
26
27import org.apache.hadoop.conf.Configuration;
28import org.apache.hadoop.hdfs.DistributedFileSystem;
29import org.apache.hadoop.hdfs.MiniDFSCluster;
30import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
31import org.apache.hadoop.hdfs.server.common.Storage;
32import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
33import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.ErrorSimulator;
34import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
35import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
36import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
37import org.apache.hadoop.hdfs.tools.DFSAdmin;
38import org.apache.hadoop.fs.FSDataOutputStream;
39import org.apache.hadoop.fs.FileSystem;
40import org.apache.hadoop.fs.FileUtil;
41import org.apache.hadoop.fs.Path;
42
43/**
44 * This class tests the creation and validation of a checkpoint.
45 */
46public class TestCheckpoint extends TestCase {
47  static final long seed = 0xDEADBEEFL;
48  static final int blockSize = 4096;
49  static final int fileSize = 8192;
50  static final int numDatanodes = 3;
51  short replication = 3;
52
53  private void writeFile(FileSystem fileSys, Path name, int repl)
54    throws IOException {
55    FSDataOutputStream stm = fileSys.create(name, true,
56                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
57                                            (short)repl, (long)blockSize);
58    byte[] buffer = new byte[fileSize];
59    Random rand = new Random(seed);
60    rand.nextBytes(buffer);
61    stm.write(buffer);
62    stm.close();
63  }
64 
65 
66  private void checkFile(FileSystem fileSys, Path name, int repl)
67    throws IOException {
68    assertTrue(fileSys.exists(name));
69    int replication = fileSys.getFileStatus(name).getReplication();
70    assertEquals("replication for " + name, repl, replication);
71    //We should probably test for more of the file properties.   
72  }
73 
74  private void cleanupFile(FileSystem fileSys, Path name)
75    throws IOException {
76    assertTrue(fileSys.exists(name));
77    fileSys.delete(name, true);
78    assertTrue(!fileSys.exists(name));
79  }
80
81  /**
82   * put back the old namedir
83   */
84  private void resurrectNameDir(File namedir) 
85    throws IOException {
86    String parentdir = namedir.getParent();
87    String name = namedir.getName();
88    File oldname =  new File(parentdir, name + ".old");
89    if (!oldname.renameTo(namedir)) {
90      assertTrue(false);
91    }
92  }
93
94  /**
95   * remove one namedir
96   */
97  private void removeOneNameDir(File namedir) 
98    throws IOException {
99    String parentdir = namedir.getParent();
100    String name = namedir.getName();
101    File newname =  new File(parentdir, name + ".old");
102    if (!namedir.renameTo(newname)) {
103      assertTrue(false);
104    }
105  }
106
107  /*
108   * Verify that namenode does not startup if one namedir is bad.
109   */
110  private void testNamedirError(Configuration conf, Collection<File> namedirs) 
111    throws IOException {
112    System.out.println("Starting testNamedirError");
113    MiniDFSCluster cluster = null;
114
115    if (namedirs.size() <= 1) {
116      return;
117    }
118   
119    //
120    // Remove one namedir & Restart cluster. This should fail.
121    //
122    File first = namedirs.iterator().next();
123    removeOneNameDir(first);
124    try {
125      cluster = new MiniDFSCluster(conf, 0, false, null);
126      cluster.shutdown();
127      assertTrue(false);
128    } catch (Throwable t) {
129      // no nothing
130    }
131    resurrectNameDir(first); // put back namedir
132  }
133
134  /*
135   * Simulate namenode crashing after rolling edit log.
136   */
137  private void testSecondaryNamenodeError1(Configuration conf)
138    throws IOException {
139    System.out.println("Starting testSecondaryNamenodeError 1");
140    Path file1 = new Path("checkpointxx.dat");
141    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
142                                                false, null);
143    cluster.waitActive();
144    FileSystem fileSys = cluster.getFileSystem();
145    try {
146      assertTrue(!fileSys.exists(file1));
147      //
148      // Make the checkpoint fail after rolling the edits log.
149      //
150      SecondaryNameNode secondary = startSecondaryNameNode(conf);
151      ErrorSimulator.setErrorSimulation(0);
152
153      try {
154        secondary.doCheckpoint();  // this should fail
155        assertTrue(false);
156      } catch (IOException e) {
157      }
158      ErrorSimulator.clearErrorSimulation(0);
159      secondary.shutdown();
160
161      //
162      // Create a new file
163      //
164      writeFile(fileSys, file1, replication);
165      checkFile(fileSys, file1, replication);
166    } finally {
167      fileSys.close();
168      cluster.shutdown();
169    }
170
171    //
172    // Restart cluster and verify that file exists.
173    // Then take another checkpoint to verify that the
174    // namenode restart accounted for the rolled edit logs.
175    //
176    System.out.println("Starting testSecondaryNamenodeError 2");
177    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
178    cluster.waitActive();
179    // Also check that the edits file is empty here
180    // and that temporary checkpoint files are gone.
181    FSImage image = cluster.getNameNode().getFSImage();
182    for (Iterator<StorageDirectory> it = 
183             image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
184      StorageDirectory sd = it.next();
185      assertFalse(FSImage.getImageFile(sd, NameNodeFile.IMAGE_NEW).exists());
186    }
187    for (Iterator<StorageDirectory> it = 
188            image.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
189      StorageDirectory sd = it.next();
190      assertFalse(image.getEditNewFile(sd).exists());
191      File edits = image.getEditFile(sd);
192      assertTrue(edits.exists()); // edits should exist and be empty
193      long editsLen = edits.length();
194      assertTrue(editsLen == Integer.SIZE/Byte.SIZE);
195    }
196   
197    fileSys = cluster.getFileSystem();
198    try {
199      checkFile(fileSys, file1, replication);
200      cleanupFile(fileSys, file1);
201      SecondaryNameNode secondary = startSecondaryNameNode(conf);
202      secondary.doCheckpoint();
203      secondary.shutdown();
204    } finally {
205      fileSys.close();
206      cluster.shutdown();
207    }
208  }
209
210  /*
211   * Simulate a namenode crash after uploading new image
212   */
213  private void testSecondaryNamenodeError2(Configuration conf)
214    throws IOException {
215    System.out.println("Starting testSecondaryNamenodeError 21");
216    Path file1 = new Path("checkpointyy.dat");
217    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
218                                                false, null);
219    cluster.waitActive();
220    FileSystem fileSys = cluster.getFileSystem();
221    try {
222      assertTrue(!fileSys.exists(file1));
223      //
224      // Make the checkpoint fail after uploading the new fsimage.
225      //
226      SecondaryNameNode secondary = startSecondaryNameNode(conf);
227      ErrorSimulator.setErrorSimulation(1);
228
229      try {
230        secondary.doCheckpoint();  // this should fail
231        assertTrue(false);
232      } catch (IOException e) {
233      }
234      ErrorSimulator.clearErrorSimulation(1);
235      secondary.shutdown();
236
237      //
238      // Create a new file
239      //
240      writeFile(fileSys, file1, replication);
241      checkFile(fileSys, file1, replication);
242    } finally {
243      fileSys.close();
244      cluster.shutdown();
245    }
246
247    //
248    // Restart cluster and verify that file exists.
249    // Then take another checkpoint to verify that the
250    // namenode restart accounted for the rolled edit logs.
251    //
252    System.out.println("Starting testSecondaryNamenodeError 22");
253    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
254    cluster.waitActive();
255    fileSys = cluster.getFileSystem();
256    try {
257      checkFile(fileSys, file1, replication);
258      cleanupFile(fileSys, file1);
259      SecondaryNameNode secondary = startSecondaryNameNode(conf);
260      secondary.doCheckpoint();
261      secondary.shutdown();
262    } finally {
263      fileSys.close();
264      cluster.shutdown();
265    }
266  }
267
268  /*
269   * Simulate a secondary namenode crash after rolling the edit log.
270   */
271  private void testSecondaryNamenodeError3(Configuration conf)
272    throws IOException {
273    System.out.println("Starting testSecondaryNamenodeError 31");
274    Path file1 = new Path("checkpointzz.dat");
275    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
276                                                false, null);
277    cluster.waitActive();
278    FileSystem fileSys = cluster.getFileSystem();
279    try {
280      assertTrue(!fileSys.exists(file1));
281      //
282      // Make the checkpoint fail after rolling the edit log.
283      //
284      SecondaryNameNode secondary = startSecondaryNameNode(conf);
285      ErrorSimulator.setErrorSimulation(0);
286
287      try {
288        secondary.doCheckpoint();  // this should fail
289        assertTrue(false);
290      } catch (IOException e) {
291      }
292      ErrorSimulator.clearErrorSimulation(0);
293      secondary.shutdown(); // secondary namenode crash!
294
295      // start new instance of secondary and verify that
296      // a new rollEditLog suceedes inspite of the fact that
297      // edits.new already exists.
298      //
299      secondary = startSecondaryNameNode(conf);
300      secondary.doCheckpoint();  // this should work correctly
301      secondary.shutdown();
302
303      //
304      // Create a new file
305      //
306      writeFile(fileSys, file1, replication);
307      checkFile(fileSys, file1, replication);
308    } finally {
309      fileSys.close();
310      cluster.shutdown();
311    }
312
313    //
314    // Restart cluster and verify that file exists.
315    // Then take another checkpoint to verify that the
316    // namenode restart accounted for the twice-rolled edit logs.
317    //
318    System.out.println("Starting testSecondaryNamenodeError 32");
319    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
320    cluster.waitActive();
321    fileSys = cluster.getFileSystem();
322    try {
323      checkFile(fileSys, file1, replication);
324      cleanupFile(fileSys, file1);
325      SecondaryNameNode secondary = startSecondaryNameNode(conf);
326      secondary.doCheckpoint();
327      secondary.shutdown();
328    } finally {
329      fileSys.close();
330      cluster.shutdown();
331    }
332  }
333
334  /**
335   * Simulate a secondary node failure to transfer image
336   * back to the name-node.
337   * Used to truncate primary fsimage file.
338   */
339  void testSecondaryFailsToReturnImage(Configuration conf)
340    throws IOException {
341    System.out.println("Starting testSecondaryFailsToReturnImage");
342    Path file1 = new Path("checkpointRI.dat");
343    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
344                                                false, null);
345    cluster.waitActive();
346    FileSystem fileSys = cluster.getFileSystem();
347    FSImage image = cluster.getNameNode().getFSImage();
348    try {
349      assertTrue(!fileSys.exists(file1));
350      StorageDirectory sd = null;
351      for (Iterator<StorageDirectory> it = 
352                image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();)
353         sd = it.next();
354      assertTrue(sd != null);
355      long fsimageLength = FSImage.getImageFile(sd, NameNodeFile.IMAGE).length();
356      //
357      // Make the checkpoint
358      //
359      SecondaryNameNode secondary = startSecondaryNameNode(conf);
360      ErrorSimulator.setErrorSimulation(2);
361
362      try {
363        secondary.doCheckpoint();  // this should fail
364        assertTrue(false);
365      } catch (IOException e) {
366        System.out.println("testSecondaryFailsToReturnImage: doCheckpoint() " +
367            "failed predictably - " + e);
368      }
369      ErrorSimulator.clearErrorSimulation(2);
370
371      // Verify that image file sizes did not change.
372      for (Iterator<StorageDirectory> it = 
373              image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
374        assertTrue(FSImage.getImageFile(it.next(), 
375                                NameNodeFile.IMAGE).length() == fsimageLength);
376      }
377
378      secondary.shutdown();
379    } finally {
380      fileSys.close();
381      cluster.shutdown();
382    }
383  }
384
385  /**
386   * Test different startup scenarios.
387   * <p><ol>
388   * <li> Start of primary name-node in secondary directory must succeed.
389   * <li> Start of secondary node when the primary is already running in
390   *      this directory must fail.
391   * <li> Start of primary name-node if secondary node is already running in
392   *      this directory must fail.
393   * <li> Start of two secondary nodes in the same directory must fail.
394   * <li> Import of a checkpoint must fail if primary
395   * directory contains a valid image.
396   * <li> Import of the secondary image directory must succeed if primary
397   * directory does not exist.
398   * <li> Recover failed checkpoint for secondary node.
399   * <li> Complete failed checkpoint for secondary node.
400   * </ol>
401   */
402  void testStartup(Configuration conf) throws IOException {
403    System.out.println("Startup of the name-node in the checkpoint directory.");
404    String primaryDirs = conf.get("dfs.name.dir");
405    String primaryEditsDirs = conf.get("dfs.name.edits.dir");
406    String checkpointDirs = conf.get("fs.checkpoint.dir");
407    String checkpointEditsDirs = conf.get("fs.checkpoint.edits.dir");
408    NameNode nn = startNameNode(conf, checkpointDirs, checkpointEditsDirs,
409                                 StartupOption.REGULAR);
410
411    // Starting secondary node in the same directory as the primary
412    System.out.println("Startup of secondary in the same dir as the primary.");
413    SecondaryNameNode secondary = null;
414    try {
415      secondary = startSecondaryNameNode(conf);
416      assertFalse(secondary.getFSImage().isLockSupported(0));
417      secondary.shutdown();
418    } catch (IOException e) { // expected to fail
419      assertTrue(secondary == null);
420    }
421    nn.stop(); nn = null;
422
423    // Starting primary node in the same directory as the secondary
424    System.out.println("Startup of primary in the same dir as the secondary.");
425    // secondary won't start without primary
426    nn = startNameNode(conf, primaryDirs, primaryEditsDirs,
427                        StartupOption.REGULAR);
428    boolean succeed = false;
429    do {
430      try {
431        secondary = startSecondaryNameNode(conf);
432        succeed = true;
433      } catch(IOException ie) { // keep trying
434        System.out.println("Try again: " + ie.getLocalizedMessage());
435      }
436    } while(!succeed);
437    nn.stop(); nn = null;
438    try {
439      nn = startNameNode(conf, checkpointDirs, checkpointEditsDirs,
440                          StartupOption.REGULAR);
441      assertFalse(nn.getFSImage().isLockSupported(0));
442      nn.stop(); nn = null;
443    } catch (IOException e) { // expected to fail
444      assertTrue(nn == null);
445    }
446
447    // Try another secondary in the same directory
448    System.out.println("Startup of two secondaries in the same dir.");
449    // secondary won't start without primary
450    nn = startNameNode(conf, primaryDirs, primaryEditsDirs,
451                        StartupOption.REGULAR);
452    SecondaryNameNode secondary2 = null;
453    try {
454      secondary2 = startSecondaryNameNode(conf);
455      assertFalse(secondary2.getFSImage().isLockSupported(0));
456      secondary2.shutdown();
457    } catch (IOException e) { // expected to fail
458      assertTrue(secondary2 == null);
459    }
460    nn.stop(); nn = null;
461    secondary.shutdown();
462
463    // Import a checkpoint with existing primary image.
464    System.out.println("Import a checkpoint with existing primary image.");
465    try {
466      nn = startNameNode(conf, primaryDirs, primaryEditsDirs,
467                          StartupOption.IMPORT);
468      assertTrue(false);
469    } catch (IOException e) { // expected to fail
470      assertTrue(nn == null);
471    }
472   
473    // Remove current image and import a checkpoint.
474    System.out.println("Import a checkpoint with existing primary image.");
475    List<File> nameDirs = (List<File>)FSNamesystem.getNamespaceDirs(conf);
476    List<File> nameEditsDirs = (List<File>)FSNamesystem.
477                                  getNamespaceEditsDirs(conf);
478    long fsimageLength = new File(new File(nameDirs.get(0), "current"), 
479                                        NameNodeFile.IMAGE.getName()).length();
480    for(File dir : nameDirs) {
481      if(dir.exists())
482        if(!(FileUtil.fullyDelete(dir)))
483          throw new IOException("Cannot remove directory: " + dir);
484      if (!dir.mkdirs())
485        throw new IOException("Cannot create directory " + dir);
486    }
487
488    for(File dir : nameEditsDirs) {
489      if(dir.exists())
490        if(!(FileUtil.fullyDelete(dir)))
491          throw new IOException("Cannot remove directory: " + dir);
492      if (!dir.mkdirs())
493        throw new IOException("Cannot create directory " + dir);
494    }
495   
496    nn = startNameNode(conf, primaryDirs, primaryEditsDirs,
497                        StartupOption.IMPORT);
498    // Verify that image file sizes did not change.
499    FSImage image = nn.getFSImage();
500    for (Iterator<StorageDirectory> it = 
501            image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
502      assertTrue(FSImage.getImageFile(it.next(), 
503                          NameNodeFile.IMAGE).length() == fsimageLength);
504    }
505    nn.stop();
506
507    // recover failed checkpoint
508    nn = startNameNode(conf, primaryDirs, primaryEditsDirs,
509                        StartupOption.REGULAR);
510    Collection<File> secondaryDirs = FSImage.getCheckpointDirs(conf, null);
511    for(File dir : secondaryDirs) {
512      Storage.rename(new File(dir, "current"), 
513                     new File(dir, "lastcheckpoint.tmp"));
514    }
515    secondary = startSecondaryNameNode(conf);
516    secondary.shutdown();
517    for(File dir : secondaryDirs) {
518      assertTrue(new File(dir, "current").exists()); 
519      assertFalse(new File(dir, "lastcheckpoint.tmp").exists());
520    }
521   
522    // complete failed checkpoint
523    for(File dir : secondaryDirs) {
524      Storage.rename(new File(dir, "previous.checkpoint"), 
525                     new File(dir, "lastcheckpoint.tmp"));
526    }
527    secondary = startSecondaryNameNode(conf);
528    secondary.shutdown();
529    for(File dir : secondaryDirs) {
530      assertTrue(new File(dir, "current").exists()); 
531      assertTrue(new File(dir, "previous.checkpoint").exists()); 
532      assertFalse(new File(dir, "lastcheckpoint.tmp").exists());
533    }
534    nn.stop(); nn = null;
535   
536    // Check that everything starts ok now.
537    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
538    cluster.waitActive();
539    cluster.shutdown();
540  }
541
542  NameNode startNameNode( Configuration conf,
543                          String imageDirs,
544                          String editsDirs,
545                          StartupOption start) throws IOException {
546    conf.set("fs.default.name", "hdfs://localhost:0");
547    conf.set("dfs.http.address", "0.0.0.0:0"); 
548    conf.set("dfs.name.dir", imageDirs);
549    conf.set("dfs.name.edits.dir", editsDirs);
550    String[] args = new String[]{start.getName()};
551    NameNode nn = NameNode.createNameNode(args, conf);
552    assertTrue(nn.isInSafeMode());
553    return nn;
554  }
555
556  SecondaryNameNode startSecondaryNameNode(Configuration conf
557                                          ) throws IOException {
558    conf.set("dfs.secondary.http.address", "0.0.0.0:0");
559    return new SecondaryNameNode(conf);
560  }
561
562  /**
563   * Tests checkpoint in HDFS.
564   */
565  public void testCheckpoint() throws IOException {
566    Path file1 = new Path("checkpoint.dat");
567    Path file2 = new Path("checkpoint2.dat");
568    Collection<File> namedirs = null;
569
570    Configuration conf = new Configuration();
571    conf.set("dfs.secondary.http.address", "0.0.0.0:0");
572    replication = (short)conf.getInt("dfs.replication", 3); 
573    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
574    cluster.waitActive();
575    FileSystem fileSys = cluster.getFileSystem();
576
577    try {
578      //
579      // verify that 'format' really blew away all pre-existing files
580      //
581      assertTrue(!fileSys.exists(file1));
582      assertTrue(!fileSys.exists(file2));
583      namedirs = cluster.getNameDirs();
584
585      //
586      // Create file1
587      //
588      writeFile(fileSys, file1, replication);
589      checkFile(fileSys, file1, replication);
590
591      //
592      // Take a checkpoint
593      //
594      SecondaryNameNode secondary = startSecondaryNameNode(conf);
595      ErrorSimulator.initializeErrorSimulationEvent(3);
596      secondary.doCheckpoint();
597      secondary.shutdown();
598    } finally {
599      fileSys.close();
600      cluster.shutdown();
601    }
602
603    //
604    // Restart cluster and verify that file1 still exist.
605    //
606    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
607    cluster.waitActive();
608    fileSys = cluster.getFileSystem();
609    try {
610      // check that file1 still exists
611      checkFile(fileSys, file1, replication);
612      cleanupFile(fileSys, file1);
613
614      // create new file file2
615      writeFile(fileSys, file2, replication);
616      checkFile(fileSys, file2, replication);
617
618      //
619      // Take a checkpoint
620      //
621      SecondaryNameNode secondary = startSecondaryNameNode(conf);
622      secondary.doCheckpoint();
623      secondary.shutdown();
624    } finally {
625      fileSys.close();
626      cluster.shutdown();
627    }
628
629    //
630    // Restart cluster and verify that file2 exists and
631    // file1 does not exist.
632    //
633    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
634    cluster.waitActive();
635    fileSys = cluster.getFileSystem();
636
637    assertTrue(!fileSys.exists(file1));
638
639    try {
640      // verify that file2 exists
641      checkFile(fileSys, file2, replication);
642    } finally {
643      fileSys.close();
644      cluster.shutdown();
645    }
646
647    // file2 is left behind.
648
649    testSecondaryNamenodeError1(conf);
650    testSecondaryNamenodeError2(conf);
651    testSecondaryNamenodeError3(conf);
652    testNamedirError(conf, namedirs);
653    testSecondaryFailsToReturnImage(conf);
654    testStartup(conf);
655  }
656
657  /**
658   * Tests save namepsace.
659   */
660  public void testSaveNamespace() throws IOException {
661    MiniDFSCluster cluster = null;
662    DistributedFileSystem fs = null;
663    try {
664      Configuration conf = new Configuration();
665      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
666      cluster.waitActive();
667      fs = (DistributedFileSystem)(cluster.getFileSystem());
668
669      // Saving image without safe mode should fail
670      DFSAdmin admin = new DFSAdmin(conf);
671      String[] args = new String[]{"-saveNamespace"};
672      try {
673        admin.run(args);
674      } catch(IOException eIO) {
675        assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
676      } catch(Exception e) {
677        throw new IOException(e);
678      }
679      // create new file
680      Path file = new Path("namespace.dat");
681      writeFile(fs, file, replication);
682      checkFile(fs, file, replication);
683      // verify that the edits file is NOT empty
684      Collection<File> editsDirs = cluster.getNameEditsDirs();
685      for(File ed : editsDirs) {
686        assertTrue(new File(ed, "current/edits").length() > Integer.SIZE/Byte.SIZE);
687      }
688
689      // Saving image in safe mode should succeed
690      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
691      try {
692        admin.run(args);
693      } catch(Exception e) {
694        throw new IOException(e);
695      }
696      // verify that the edits file is empty
697      for(File ed : editsDirs) {
698        assertTrue(new File(ed, "current/edits").length() == Integer.SIZE/Byte.SIZE);
699      }
700
701      // restart cluster and verify file exists
702      cluster.shutdown();
703      cluster = null;
704
705      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
706      cluster.waitActive();
707      fs = (DistributedFileSystem)(cluster.getFileSystem());
708      checkFile(fs, file, replication);
709    } finally {
710      if(fs != null) fs.close();
711      if(cluster!= null) cluster.shutdown();
712    }
713  }
714}
Note: See TracBrowser for help on using the repository browser.