source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/TestFSOutputSummer.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 4.9 KB
Line 
1/**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements.  See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership.  The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License.  You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18package org.apache.hadoop.hdfs;
19
20import junit.framework.TestCase;
21import java.io.*;
22import java.util.Random;
23import org.apache.hadoop.conf.Configuration;
24import org.apache.hadoop.fs.FSDataInputStream;
25import org.apache.hadoop.fs.FSDataOutputStream;
26import org.apache.hadoop.fs.FileSystem;
27import org.apache.hadoop.fs.Path;
28
29/**
30 * This class tests if FSOutputSummer works correctly.
31 */
32public class TestFSOutputSummer extends TestCase {
33  private static final long seed = 0xDEADBEEFL;
34  private static final int BYTES_PER_CHECKSUM = 10;
35  private static final int BLOCK_SIZE = 2*BYTES_PER_CHECKSUM;
36  private static final int HALF_CHUNK_SIZE = BYTES_PER_CHECKSUM/2;
37  private static final int FILE_SIZE = 2*BLOCK_SIZE-1;
38  private static final short NUM_OF_DATANODES = 2;
39  private byte[] expected = new byte[FILE_SIZE];
40  private byte[] actual = new byte[FILE_SIZE];
41  private FileSystem fileSys;
42
43  /* create a file, write all data at once */
44  private void writeFile1(Path name) throws Exception {
45    FSDataOutputStream stm = fileSys.create(name, true, 
46               fileSys.getConf().getInt("io.file.buffer.size", 4096),
47               NUM_OF_DATANODES, BLOCK_SIZE);
48    stm.write(expected);
49    stm.close();
50    checkFile(name);
51    cleanupFile(name);
52  }
53 
54  /* create a file, write data chunk by chunk */
55  private void writeFile2(Path name) throws Exception {
56    FSDataOutputStream stm = fileSys.create(name, true, 
57               fileSys.getConf().getInt("io.file.buffer.size", 4096),
58               NUM_OF_DATANODES, BLOCK_SIZE);
59    int i=0;
60    for( ;i<FILE_SIZE-BYTES_PER_CHECKSUM; i+=BYTES_PER_CHECKSUM) {
61      stm.write(expected, i, BYTES_PER_CHECKSUM);
62    }
63    stm.write(expected, i, FILE_SIZE-3*BYTES_PER_CHECKSUM);
64    stm.close();
65    checkFile(name);
66    cleanupFile(name);
67  }
68 
69  /* create a file, write data with vairable amount of data */
70  private void writeFile3(Path name) throws Exception {
71    FSDataOutputStream stm = fileSys.create(name, true, 
72        fileSys.getConf().getInt("io.file.buffer.size", 4096),
73        NUM_OF_DATANODES, BLOCK_SIZE);
74    stm.write(expected, 0, HALF_CHUNK_SIZE);
75    stm.write(expected, HALF_CHUNK_SIZE, BYTES_PER_CHECKSUM+2);
76    stm.write(expected, HALF_CHUNK_SIZE+BYTES_PER_CHECKSUM+2, 2);
77    stm.write(expected, HALF_CHUNK_SIZE+BYTES_PER_CHECKSUM+4, HALF_CHUNK_SIZE);
78    stm.write(expected, BLOCK_SIZE+4, BYTES_PER_CHECKSUM-4);
79    stm.write(expected, BLOCK_SIZE+BYTES_PER_CHECKSUM, 
80        FILE_SIZE-3*BYTES_PER_CHECKSUM);
81    stm.close();
82    checkFile(name);
83    cleanupFile(name);
84  }
85  private void checkAndEraseData(byte[] actual, int from, byte[] expected,
86      String message) throws Exception {
87    for (int idx = 0; idx < actual.length; idx++) {
88      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
89                        expected[from+idx]+" actual "+actual[idx],
90                        actual[idx], expected[from+idx]);
91      actual[idx] = 0;
92    }
93  }
94 
95  private void checkFile(Path name) throws Exception {
96    FSDataInputStream stm = fileSys.open(name);
97    // do a sanity check. Read the file
98    stm.readFully(0, actual);
99    checkAndEraseData(actual, 0, expected, "Read Sanity Test");
100    stm.close();
101  }
102
103  private void cleanupFile(Path name) throws IOException {
104    assertTrue(fileSys.exists(name));
105    fileSys.delete(name, true);
106    assertTrue(!fileSys.exists(name));
107  }
108 
109  /**
110   * Test write opeation for output stream in DFS.
111   */
112  public void testFSOutputSummer() throws Exception {
113    Configuration conf = new Configuration();
114    conf.setLong("dfs.block.size", BLOCK_SIZE);
115    conf.setInt("io.bytes.per.checksum", BYTES_PER_CHECKSUM);
116    conf.set("fs.hdfs.impl",
117             "org.apache.hadoop.hdfs.ChecksumDistributedFileSystem");     
118    MiniDFSCluster cluster = new MiniDFSCluster(
119        conf, NUM_OF_DATANODES, true, null);
120    fileSys = cluster.getFileSystem();
121    try {
122      Path file = new Path("try.dat");
123      Random rand = new Random(seed);
124      rand.nextBytes(expected);
125      writeFile1(file);
126      writeFile2(file);
127      writeFile3(file);
128    } finally {
129      fileSys.close();
130      cluster.shutdown();
131    }
132  }
133}
Note: See TracBrowser for help on using the repository browser.