source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/TestDatanodeReport.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 3.0 KB
Line 
1/**
2* Licensed to the Apache Software Foundation (ASF) under one
3* or more contributor license agreements.  See the NOTICE file
4* distributed with this work for additional information
5* regarding copyright ownership.  The ASF licenses this file
6* to you under the Apache License, Version 2.0 (the
7* "License"); you may not use this file except in compliance
8* with the License.  You may obtain a copy of the License at
9*
10*     http://www.apache.org/licenses/LICENSE-2.0
11*
12* Unless required by applicable law or agreed to in writing, software
13* distributed under the License is distributed on an "AS IS" BASIS,
14* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15* See the License for the specific language governing permissions and
16* limitations under the License.
17*/
18package org.apache.hadoop.hdfs;
19
20import java.net.InetSocketAddress;
21import java.util.ArrayList;
22
23import junit.framework.TestCase;
24import org.apache.hadoop.conf.Configuration;
25
26import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
27import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
28import org.apache.hadoop.hdfs.server.datanode.DataNode;
29
30/**
31 * This test ensures the all types of data node report work correctly.
32 */
33public class TestDatanodeReport extends TestCase {
34  final static private Configuration conf = new Configuration();
35  final static private int NUM_OF_DATANODES = 4;
36   
37  /**
38   * This test attempts to different types of datanode report.
39   */
40  public void testDatanodeReport() throws Exception {
41    conf.setInt(
42        "heartbeat.recheck.interval", 500); // 0.5s
43    MiniDFSCluster cluster = 
44      new MiniDFSCluster(conf, NUM_OF_DATANODES, true, null);
45    try {
46      //wait until the cluster is up
47      cluster.waitActive();
48
49      InetSocketAddress addr = new InetSocketAddress("localhost",
50          cluster.getNameNodePort());
51      DFSClient client = new DFSClient(addr, conf);
52
53      assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
54                   NUM_OF_DATANODES);
55      assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
56                   NUM_OF_DATANODES);
57      assertEquals(client.datanodeReport(DatanodeReportType.DEAD).length, 0);
58
59      // bring down one datanode
60      ArrayList<DataNode> datanodes = cluster.getDataNodes();
61      datanodes.remove(datanodes.size()-1).shutdown();
62
63      DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
64      while (nodeInfo.length != 1) {
65        try {
66          Thread.sleep(500);
67        } catch (Exception e) {
68        }
69        nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
70      }
71
72      assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
73                   NUM_OF_DATANODES-1);
74      assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
75                   NUM_OF_DATANODES);
76    }finally {
77      cluster.shutdown();
78    }
79  }
80 
81  public static void main(String[] args) throws Exception {
82    new TestDatanodeReport().testDatanodeReport();
83  }
84 
85}
86
87
Note: See TracBrowser for help on using the repository browser.