source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 5.6 KB
Line 
1/**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements.  See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership.  The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License.  You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18package org.apache.hadoop.hdfs;
19
20import java.io.IOException;
21import java.util.Arrays;
22
23import org.apache.hadoop.conf.Configuration;
24import org.apache.hadoop.fs.Path;
25import org.apache.hadoop.hdfs.protocol.Block;
26import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
27import org.apache.hadoop.hdfs.protocol.LocatedBlock;
28import org.apache.hadoop.hdfs.server.datanode.DataNode;
29import org.apache.hadoop.hdfs.server.datanode.TestInterDatanodeProtocol;
30import org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo;
31import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
32
33public class TestLeaseRecovery extends junit.framework.TestCase {
34  static final int BLOCK_SIZE = 1024;
35  static final short REPLICATION_NUM = (short)3;
36
37  static void checkMetaInfo(Block b, InterDatanodeProtocol idp
38      ) throws IOException {
39    TestInterDatanodeProtocol.checkMetaInfo(b, idp, null);
40  }
41 
42  static int min(Integer... x) {
43    int m = x[0];
44    for(int i = 1; i < x.length; i++) {
45      if (x[i] < m) {
46        m = x[i];
47      }
48    }
49    return m;
50  }
51
52  /**
53   * The following test first creates a file with a few blocks.
54   * It randomly truncates the replica of the last block stored in each datanode.
55   * Finally, it triggers block synchronization to synchronize all stored block.
56   */
57  public void testBlockSynchronization() throws Exception {
58    final int ORG_FILE_SIZE = 3000; 
59    Configuration conf = new Configuration();
60    conf.setLong("dfs.block.size", BLOCK_SIZE);
61    conf.setBoolean("dfs.support.append", true);
62    MiniDFSCluster cluster = null;
63
64    try {
65      cluster = new MiniDFSCluster(conf, 5, true, null);
66      cluster.waitActive();
67
68      //create a file
69      DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
70      String filestr = "/foo";
71      Path filepath = new Path(filestr);
72      DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
73      assertTrue(dfs.dfs.exists(filestr));
74      DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);
75
76      //get block info for the last block
77      LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
78          dfs.dfs.namenode, filestr);
79      DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
80      assertEquals(REPLICATION_NUM, datanodeinfos.length);
81
82      //connect to data nodes
83      InterDatanodeProtocol[] idps = new InterDatanodeProtocol[REPLICATION_NUM];
84      DataNode[] datanodes = new DataNode[REPLICATION_NUM];
85      for(int i = 0; i < REPLICATION_NUM; i++) {
86        idps[i] = DataNode.createInterDataNodeProtocolProxy(datanodeinfos[i], conf);
87        datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
88        assertTrue(datanodes[i] != null);
89      }
90     
91      //verify BlockMetaDataInfo
92      Block lastblock = locatedblock.getBlock();
93      DataNode.LOG.info("newblocks=" + lastblock);
94      for(int i = 0; i < REPLICATION_NUM; i++) {
95        checkMetaInfo(lastblock, idps[i]);
96      }
97
98      //setup random block sizes
99      int lastblocksize = ORG_FILE_SIZE % BLOCK_SIZE;
100      Integer[] newblocksizes = new Integer[REPLICATION_NUM];
101      for(int i = 0; i < REPLICATION_NUM; i++) {
102        newblocksizes[i] = AppendTestUtil.nextInt(lastblocksize);
103      }
104      DataNode.LOG.info("newblocksizes = " + Arrays.asList(newblocksizes)); 
105
106      //update blocks with random block sizes
107      Block[] newblocks = new Block[REPLICATION_NUM];
108      for(int i = 0; i < REPLICATION_NUM; i++) {
109        newblocks[i] = new Block(lastblock.getBlockId(), newblocksizes[i],
110            lastblock.getGenerationStamp());
111        idps[i].updateBlock(lastblock, newblocks[i], false);
112        checkMetaInfo(newblocks[i], idps[i]);
113      }
114
115      DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
116      cluster.getNameNode().append(filestr, dfs.dfs.clientName);
117
118      //block synchronization
119      final int primarydatanodeindex = AppendTestUtil.nextInt(datanodes.length);
120      DataNode.LOG.info("primarydatanodeindex  =" + primarydatanodeindex);
121      DataNode primary = datanodes[primarydatanodeindex];
122      DataNode.LOG.info("primary.dnRegistration=" + primary.dnRegistration);
123      primary.recoverBlocks(new Block[]{lastblock}, new DatanodeInfo[][]{datanodeinfos}).join();
124
125      BlockMetaDataInfo[] updatedmetainfo = new BlockMetaDataInfo[REPLICATION_NUM];
126      int minsize = min(newblocksizes);
127      long currentGS = cluster.getNameNode().namesystem.getGenerationStamp();
128      lastblock.setGenerationStamp(currentGS);
129      for(int i = 0; i < REPLICATION_NUM; i++) {
130        updatedmetainfo[i] = idps[i].getBlockMetaDataInfo(lastblock);
131        assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
132        assertEquals(minsize, updatedmetainfo[i].getNumBytes());
133        assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
134      }
135    }
136    finally {
137      if (cluster != null) {cluster.shutdown();}
138    }
139  }
140}
Note: See TracBrowser for help on using the repository browser.