source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 4.4 KB
Line 
1/**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements.  See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership.  The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License.  You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18package org.apache.hadoop.hdfs.server.datanode;
19
20import java.io.IOException;
21import java.util.List;
22
23import org.apache.hadoop.conf.Configuration;
24import org.apache.hadoop.fs.*;
25import org.apache.hadoop.hdfs.DFSTestUtil;
26import org.apache.hadoop.hdfs.MiniDFSCluster;
27import org.apache.hadoop.hdfs.DistributedFileSystem;
28import org.apache.hadoop.hdfs.protocol.Block;
29import org.apache.hadoop.hdfs.protocol.ClientProtocol;
30import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
31import org.apache.hadoop.hdfs.protocol.LocatedBlock;
32import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
33import org.apache.hadoop.hdfs.server.datanode.DataBlockScanner;
34import org.apache.hadoop.hdfs.server.datanode.DataNode;
35import org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo;
36import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
37
38/**
39 * This tests InterDataNodeProtocol for block handling.
40 */
41public class TestInterDatanodeProtocol extends junit.framework.TestCase {
42  public static void checkMetaInfo(Block b, InterDatanodeProtocol idp,
43      DataBlockScanner scanner) throws IOException {
44    BlockMetaDataInfo metainfo = idp.getBlockMetaDataInfo(b);
45    assertEquals(b.getBlockId(), metainfo.getBlockId());
46    assertEquals(b.getNumBytes(), metainfo.getNumBytes());
47    if (scanner != null) {
48      assertEquals(scanner.getLastScanTime(b),
49          metainfo.getLastScanTime());
50    }
51  }
52
53  public static LocatedBlock getLastLocatedBlock(
54      ClientProtocol namenode, String src
55  ) throws IOException {
56    //get block info for the last block
57    LocatedBlocks locations = namenode.getBlockLocations(src, 0, Long.MAX_VALUE);
58    List<LocatedBlock> blocks = locations.getLocatedBlocks();
59    DataNode.LOG.info("blocks.size()=" + blocks.size());
60    assertTrue(blocks.size() > 0);
61
62    return blocks.get(blocks.size() - 1);
63  }
64
65  /**
66   * The following test first creates a file.
67   * It verifies the block information from a datanode.
68   * Then, it updates the block with new information and verifies again.
69   */
70  public void testBlockMetaDataInfo() throws Exception {
71    Configuration conf = new Configuration();
72    MiniDFSCluster cluster = null;
73
74    try {
75      cluster = new MiniDFSCluster(conf, 3, true, null);
76      cluster.waitActive();
77
78      //create a file
79      DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
80      String filestr = "/foo";
81      Path filepath = new Path(filestr);
82      DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
83      assertTrue(dfs.getClient().exists(filestr));
84
85      //get block info
86      LocatedBlock locatedblock = getLastLocatedBlock(dfs.getClient().namenode, filestr);
87      DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
88      assertTrue(datanodeinfo.length > 0);
89
90      //connect to a data node
91      InterDatanodeProtocol idp = DataNode.createInterDataNodeProtocolProxy(
92          datanodeinfo[0], conf);
93      DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
94      assertTrue(datanode != null);
95     
96      //stop block scanner, so we could compare lastScanTime
97      datanode.blockScannerThread.interrupt();
98
99      //verify BlockMetaDataInfo
100      Block b = locatedblock.getBlock();
101      InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
102      checkMetaInfo(b, idp, datanode.blockScanner);
103
104      //verify updateBlock
105      Block newblock = new Block(
106          b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
107      idp.updateBlock(b, newblock, false);
108      checkMetaInfo(newblock, idp, datanode.blockScanner);
109    }
110    finally {
111      if (cluster != null) {cluster.shutdown();}
112    }
113  }
114}
Note: See TracBrowser for help on using the repository browser.