source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/TestModTime.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 6.3 KB
Line 
1/**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements.  See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership.  The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License.  You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18package org.apache.hadoop.hdfs;
19
20import junit.framework.TestCase;
21import java.io.*;
22import java.util.Random;
23import java.net.*;
24import org.apache.hadoop.conf.Configuration;
25import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
26import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
27import org.apache.hadoop.fs.FSDataOutputStream;
28import org.apache.hadoop.fs.FileSystem;
29import org.apache.hadoop.fs.Path;
30import org.apache.hadoop.fs.FileStatus;
31
32/**
33 * This class tests the decommissioning of nodes.
34 * @author Dhruba Borthakur
35 */
36public class TestModTime extends TestCase {
37  static final long seed = 0xDEADBEEFL;
38  static final int blockSize = 8192;
39  static final int fileSize = 16384;
40  static final int numDatanodes = 6;
41
42
43  Random myrand = new Random();
44  Path hostsFile;
45  Path excludeFile;
46
47  private void writeFile(FileSystem fileSys, Path name, int repl)
48    throws IOException {
49    // create and write a file that contains three blocks of data
50    FSDataOutputStream stm = fileSys.create(name, true, 
51                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
52                                            (short)repl, (long)blockSize);
53    byte[] buffer = new byte[fileSize];
54    Random rand = new Random(seed);
55    rand.nextBytes(buffer);
56    stm.write(buffer);
57    stm.close();
58  }
59 
60  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
61    assertTrue(fileSys.exists(name));
62    fileSys.delete(name, true);
63    assertTrue(!fileSys.exists(name));
64  }
65
66  private void printDatanodeReport(DatanodeInfo[] info) {
67    System.out.println("-------------------------------------------------");
68    for (int i = 0; i < info.length; i++) {
69      System.out.println(info[i].getDatanodeReport());
70      System.out.println();
71    }
72  }
73
74  /**
75   * Tests modification time in DFS.
76   */
77  public void testModTime() throws IOException {
78    Configuration conf = new Configuration();
79
80    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
81    cluster.waitActive();
82    InetSocketAddress addr = new InetSocketAddress("localhost", 
83                                                   cluster.getNameNodePort());
84    DFSClient client = new DFSClient(addr, conf);
85    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
86    assertEquals("Number of Datanodes ", numDatanodes, info.length);
87    FileSystem fileSys = cluster.getFileSystem();
88    int replicas = numDatanodes - 1;
89    assertTrue(fileSys instanceof DistributedFileSystem);
90
91    try {
92
93     //
94     // create file and record ctime and mtime of test file
95     //
96     System.out.println("Creating testdir1 and testdir1/test1.dat.");
97     Path dir1 = new Path("testdir1");
98     Path file1 = new Path(dir1, "test1.dat");
99     writeFile(fileSys, file1, replicas);
100     FileStatus stat = fileSys.getFileStatus(file1);
101     long mtime1 = stat.getModificationTime();
102     assertTrue(mtime1 != 0);
103     //
104     // record dir times
105     //
106     stat = fileSys.getFileStatus(dir1);
107     long mdir1 = stat.getModificationTime();
108
109     //
110     // create second test file
111     //
112     System.out.println("Creating testdir1/test2.dat.");
113     Path file2 = new Path(dir1, "test2.dat");
114     writeFile(fileSys, file2, replicas);
115     stat = fileSys.getFileStatus(file2);
116
117     //
118     // verify that mod time of dir remains the same
119     // as before. modification time of directory has increased.
120     //
121     stat = fileSys.getFileStatus(dir1);
122     assertTrue(stat.getModificationTime() >= mdir1);
123     mdir1 = stat.getModificationTime();
124     //
125     // create another directory
126     //
127     Path dir2 = (new Path("testdir2/")).makeQualified(fileSys);
128     System.out.println("Creating testdir2 " + dir2);
129     assertTrue(fileSys.mkdirs(dir2));
130     stat = fileSys.getFileStatus(dir2);
131     long mdir2 = stat.getModificationTime();
132     //
133     // rename file1 from testdir into testdir2
134     //
135     Path newfile = new Path(dir2, "testnew.dat");
136     System.out.println("Moving " + file1 + " to " + newfile);
137     fileSys.rename(file1, newfile);
138     //
139     // verify that modification time of file1 did not change.
140     //
141     stat = fileSys.getFileStatus(newfile);
142     assertTrue(stat.getModificationTime() == mtime1);
143     //
144     // verify that modification time of  testdir1 and testdir2
145     // were changed.
146     //
147     stat = fileSys.getFileStatus(dir1);
148     assertTrue(stat.getModificationTime() != mdir1);
149     mdir1 = stat.getModificationTime();
150
151     stat = fileSys.getFileStatus(dir2);
152     assertTrue(stat.getModificationTime() != mdir2);
153     mdir2 = stat.getModificationTime();
154     //
155     // delete newfile
156     //
157     System.out.println("Deleting testdir2/testnew.dat.");
158     assertTrue(fileSys.delete(newfile, true));
159     //
160     // verify that modification time of testdir1 has not changed.
161     //
162     stat = fileSys.getFileStatus(dir1);
163     assertTrue(stat.getModificationTime() == mdir1);
164     //
165     // verify that modification time of testdir2 has changed.
166     //
167     stat = fileSys.getFileStatus(dir2);
168     assertTrue(stat.getModificationTime() != mdir2);
169     mdir2 = stat.getModificationTime();
170
171     cleanupFile(fileSys, file2);
172     cleanupFile(fileSys, dir1);
173     cleanupFile(fileSys, dir2);
174    } catch (IOException e) {
175      info = client.datanodeReport(DatanodeReportType.ALL);
176      printDatanodeReport(info);
177      throw e;
178    } finally {
179      fileSys.close();
180      cluster.shutdown();
181    }
182  }
183
184  public static void main(String[] args) throws Exception {
185    new TestModTime().testModTime();
186  }
187}
Note: See TracBrowser for help on using the repository browser.