source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 3.7 KB
Line 
1/**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements.  See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership.  The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License.  You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18package org.apache.hadoop.hdfs;
19
20import java.io.IOException;
21
22import org.apache.commons.logging.impl.Log4JLogger;
23import org.apache.hadoop.conf.Configuration;
24import org.apache.hadoop.fs.FSDataOutputStream;
25import org.apache.hadoop.fs.FileSystem;
26import org.apache.hadoop.fs.Path;
27import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
28import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
29import org.apache.hadoop.hdfs.server.namenode.NameNode;
30import org.apache.log4j.Level;
31
32public class TestFileCreationDelete extends junit.framework.TestCase {
33  {
34    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
35    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
36    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
37  }
38
39  public void testFileCreationDeleteParent() throws IOException {
40    Configuration conf = new Configuration();
41    final int MAX_IDLE_TIME = 2000; // 2s
42    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
43    conf.setInt("heartbeat.recheck.interval", 1000);
44    conf.setInt("dfs.heartbeat.interval", 1);
45    conf.setBoolean("dfs.support.append", true);
46
47    // create cluster
48    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
49    FileSystem fs = null;
50    try {
51      cluster.waitActive();
52      fs = cluster.getFileSystem();
53      final int nnport = cluster.getNameNodePort();
54
55      // create file1.
56      Path dir = new Path("/foo");
57      Path file1 = new Path(dir, "file1");
58      FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
59      System.out.println("testFileCreationDeleteParent: "
60          + "Created file " + file1);
61      TestFileCreation.writeFile(stm1, 1000);
62      stm1.sync();
63
64      // create file2.
65      Path file2 = new Path("/file2");
66      FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
67      System.out.println("testFileCreationDeleteParent: "
68          + "Created file " + file2);
69      TestFileCreation.writeFile(stm2, 1000);
70      stm2.sync();
71
72      // rm dir
73      fs.delete(dir, true);
74
75      // restart cluster with the same namenode port as before.
76      // This ensures that leases are persisted in fsimage.
77      cluster.shutdown();
78      try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
79      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
80                                   null, null, null);
81      cluster.waitActive();
82
83      // restart cluster yet again. This triggers the code to read in
84      // persistent leases from fsimage.
85      cluster.shutdown();
86      try {Thread.sleep(5000);} catch (InterruptedException e) {}
87      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
88                                   null, null, null);
89      cluster.waitActive();
90      fs = cluster.getFileSystem();
91
92      assertTrue(!fs.exists(file1));
93      assertTrue(fs.exists(file2));
94    } finally {
95      fs.close();
96      cluster.shutdown();
97    }
98  }
99}
Note: See TracBrowser for help on using the repository browser.