source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 6.1 KB
Line 
1/**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements.  See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership.  The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License.  You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18package org.apache.hadoop.hdfs.server.namenode;
19
20import junit.framework.TestCase;
21import java.io.*;
22import java.net.*;
23import java.util.Random;
24
25import org.apache.hadoop.conf.Configuration;
26import org.apache.hadoop.fs.FsShell;
27import org.apache.hadoop.fs.FileSystem;
28import org.apache.hadoop.fs.Path;
29import org.apache.hadoop.util.StringUtils;
30import org.apache.hadoop.fs.FSDataOutputStream;
31import org.apache.hadoop.fs.FSDataInputStream;
32import org.apache.hadoop.hdfs.MiniDFSCluster;
33import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
34import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
35
36
37/**
38 * This class tests that a file system adheres to the limit of
39 * maximum number of files that is configured.
40 */
41public class TestFileLimit extends TestCase {
42  static final long seed = 0xDEADBEEFL;
43  static final int blockSize = 8192;
44  boolean simulatedStorage = false;
45
46  // The test file is 2 times the blocksize plus one. This means that when the
47  // entire file is written, the first two blocks definitely get flushed to
48  // the datanodes.
49
50  private static String TEST_ROOT_DIR =
51    new Path(System.getProperty("test.build.data","/tmp"))
52    .toString().replace(' ', '+');
53 
54  //
55  // creates a zero file.
56  //
57  private void createFile(FileSystem fileSys, Path name)
58    throws IOException {
59    FSDataOutputStream stm = fileSys.create(name, true,
60                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
61                                            (short)1, (long)blockSize);
62    byte[] buffer = new byte[1024];
63    Random rand = new Random(seed);
64    rand.nextBytes(buffer);
65    stm.write(buffer);
66    stm.close();
67  }
68
69  private void waitForLimit(FSNamesystem namesys, long num)
70  {
71    // wait for number of blocks to decrease
72    while (true) {
73      long total = namesys.getBlocksTotal() + namesys.dir.totalInodes();
74      System.out.println("Comparing current nodes " + total +
75                         " to become " + num);
76      if (total == num) {
77        break;
78      }
79      try {
80        Thread.sleep(1000);
81      } catch (InterruptedException e) {
82      }
83    }
84  }
85
86  /**
87   * Test that file data becomes available before file is closed.
88   */
89  public void testFileLimit() throws IOException {
90    Configuration conf = new Configuration();
91    int maxObjects = 5;
92    conf.setLong("dfs.max.objects", maxObjects);
93    conf.setLong("dfs.blockreport.intervalMsec", 1000L);
94    conf.setInt("dfs.heartbeat.interval", 1);
95    int currentNodes = 0;
96   
97    if (simulatedStorage) {
98      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
99    }
100    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
101    FileSystem fs = cluster.getFileSystem();
102    FSNamesystem namesys = FSNamesystem.fsNamesystemObject;
103    NameNode namenode = cluster.getNameNode();
104    try {
105
106      //
107      // check that / exists
108      //
109      Path path = new Path("/");
110      assertTrue("/ should be a directory", 
111                 fs.getFileStatus(path).isDir() == true);
112      currentNodes = 1;          // root inode
113
114      // verify that we can create the specified number of files. We leave
115      // one for the "/". Each file takes an inode and a block.
116      //
117      for (int i = 0; i < maxObjects/2; i++) {
118        Path file = new Path("/filestatus" + i);
119        createFile(fs, file);
120        System.out.println("Created file " + file);
121        currentNodes += 2;      // two more objects for this creation.
122      }
123
124      // verify that creating another file fails
125      boolean hitException = false;
126      try {
127        Path file = new Path("/filestatus");
128        createFile(fs, file);
129        System.out.println("Created file " + file);
130      } catch (IOException e) {
131        hitException = true;
132      }
133      assertTrue("Was able to exceed file limit", hitException);
134
135      // delete one file
136      Path file0 = new Path("/filestatus0");
137      fs.delete(file0, true);
138      System.out.println("Deleted file " + file0);
139      currentNodes -= 2;
140
141      // wait for number of blocks to decrease
142      waitForLimit(namesys, currentNodes);
143
144      // now, we shud be able to create a new file
145      createFile(fs, file0);
146      System.out.println("Created file " + file0 + " again.");
147      currentNodes += 2;
148
149      // delete the file again
150      file0 = new Path("/filestatus0");
151      fs.delete(file0, true);
152      System.out.println("Deleted file " + file0 + " again.");
153      currentNodes -= 2;
154
155      // wait for number of blocks to decrease
156      waitForLimit(namesys, currentNodes);
157
158      // create two directories in place of the file that we deleted
159      Path dir = new Path("/dir0/dir1");
160      fs.mkdirs(dir);
161      System.out.println("Created directories " + dir);
162      currentNodes += 2;
163      waitForLimit(namesys, currentNodes);
164
165      // verify that creating another directory fails
166      hitException = false;
167      try {
168        fs.mkdirs(new Path("dir.fail"));
169        System.out.println("Created directory should not have succeeded.");
170      } catch (IOException e) {
171        hitException = true;
172      }
173      assertTrue("Was able to exceed dir limit", hitException);
174
175    } finally {
176      fs.close();
177      cluster.shutdown();
178    }
179  }
180
181  public void testFileLimitSimulated() throws IOException {
182    simulatedStorage = true;
183    testFileLimit();
184    simulatedStorage = false;
185  }
186}
Note: See TracBrowser for help on using the repository browser.