source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/mapred/TestFileInputFormat.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 3.4 KB
Line 
1/**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements.  See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership.  The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License.  You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18package org.apache.hadoop.mapred;
19
20import java.io.DataOutputStream;
21
22import junit.framework.TestCase;
23
24import org.apache.hadoop.fs.BlockLocation;
25import org.apache.hadoop.fs.FileStatus;
26import org.apache.hadoop.fs.FileSystem;
27import org.apache.hadoop.fs.Path;
28import org.apache.hadoop.hdfs.MiniDFSCluster;
29
30public class TestFileInputFormat extends TestCase {
31
32  public void testLocality() throws Exception {
33    JobConf conf = new JobConf();
34    MiniDFSCluster dfs = null;
35    try {
36      dfs = new MiniDFSCluster(conf, 4, true,
37                               new String[]{"/rack0", "/rack0", 
38                                             "/rack1", "/rack1"},
39                               new String[]{"host0", "host1", 
40                                            "host2", "host3"});
41      FileSystem fs = dfs.getFileSystem();
42      System.out.println("FileSystem " + fs.getUri());
43      Path path = new Path("/foo/bar");
44      // create a multi-block file on hdfs
45      DataOutputStream out = fs.create(path, true, 4096, 
46                                       (short) 2, 512, null);
47      for(int i=0; i < 1000; ++i) {
48        out.writeChars("Hello\n");
49      }
50      out.close();
51      System.out.println("Wrote file");
52
53      // split it using a file input format
54      TextInputFormat.addInputPath(conf, path);
55      TextInputFormat inFormat = new TextInputFormat();
56      inFormat.configure(conf);
57      InputSplit[] splits = inFormat.getSplits(conf, 1);
58      FileStatus fileStatus = fs.getFileStatus(path);
59      BlockLocation[] locations = 
60        fs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
61      System.out.println("Made splits");
62
63      // make sure that each split is a block and the locations match
64      for(int i=0; i < splits.length; ++i) {
65        FileSplit fileSplit = (FileSplit) splits[i];
66        System.out.println("File split: " + fileSplit);
67        for (String h: fileSplit.getLocations()) {
68          System.out.println("Location: " + h);
69        }
70        System.out.println("Block: " + locations[i]);
71        assertEquals(locations[i].getOffset(), fileSplit.getStart());
72        assertEquals(locations[i].getLength(), fileSplit.getLength());
73        String[] blockLocs = locations[i].getHosts();
74        String[] splitLocs = fileSplit.getLocations();
75        assertEquals(2, blockLocs.length);
76        assertEquals(2, splitLocs.length);
77        assertTrue((blockLocs[0].equals(splitLocs[0]) && 
78                    blockLocs[1].equals(splitLocs[1])) ||
79                   (blockLocs[1].equals(splitLocs[0]) &&
80                    blockLocs[0].equals(splitLocs[1])));
81      }
82    } finally {
83      if (dfs != null) {
84        dfs.shutdown();
85      }
86    }
87  }
88
89}
Note: See TracBrowser for help on using the repository browser.