source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 5.4 KB
Line 
1/**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements.  See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership.  The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License.  You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18package org.apache.hadoop.hdfs.server.namenode;
19
20
21import java.io.File;
22import java.util.ArrayList;
23
24import org.apache.hadoop.conf.Configuration;
25import org.apache.hadoop.fs.DF;
26import org.apache.hadoop.hdfs.MiniDFSCluster;
27import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
28import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
29import junit.framework.TestCase;
30import org.apache.commons.logging.Log;
31import org.apache.commons.logging.LogFactory;
32
33
34
35/**
36 * This tests InterDataNodeProtocol for block handling.
37 */
38public class TestNamenodeCapacityReport extends TestCase {
39  private static final Log LOG = LogFactory.getLog(TestNamenodeCapacityReport.class);
40
41  /**
42   * The following test first creates a file.
43   * It verifies the block information from a datanode.
44   * Then, it updates the block with new information and verifies again.
45   */
46  public void testVolumeSize() throws Exception {
47    Configuration conf = new Configuration();
48    MiniDFSCluster cluster = null;
49
50    // Set aside fifth of the total capacity as reserved
51    long reserved = 10000;
52    conf.setLong("dfs.datanode.du.reserved", reserved);
53   
54    try {
55      cluster = new MiniDFSCluster(conf, 1, true, null);
56      cluster.waitActive();
57     
58      FSNamesystem namesystem = cluster.getNameNode().namesystem;
59     
60      // Ensure the data reported for each data node is right
61      ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
62      ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
63      namesystem.DFSNodesStatus(live, dead);
64     
65      assertTrue(live.size() == 1);
66     
67      long used, remaining, configCapacity, nonDFSUsed;
68      float percentUsed, percentRemaining;
69     
70      for (final DatanodeDescriptor datanode : live) {
71        used = datanode.getDfsUsed();
72        remaining = datanode.getRemaining();
73        nonDFSUsed = datanode.getNonDfsUsed();
74        configCapacity = datanode.getCapacity();
75        percentUsed = datanode.getDfsUsedPercent();
76        percentRemaining = datanode.getRemainingPercent();
77       
78        LOG.info("Datanode configCapacity " + configCapacity
79            + " used " + used + " non DFS used " + nonDFSUsed
80            + " remaining " + remaining + " perentUsed " + percentUsed
81            + " percentRemaining " + percentRemaining);
82       
83        assertTrue(configCapacity == (used + remaining + nonDFSUsed));
84        assertTrue(percentUsed == ((100.0f * (float)used)/(float)configCapacity));
85        assertTrue(percentRemaining == ((100.0f * (float)remaining)/(float)configCapacity));
86      }   
87     
88      DF df = new DF(new File(cluster.getDataDirectory()), conf);
89     
90      //
91      // Currently two data directories are created by the data node
92      // in the MiniDFSCluster. This results in each data directory having
93      // capacity equals to the disk capacity of the data directory.
94      // Hence the capacity reported by the data node is twice the disk space
95      // the disk capacity
96      //
97      // So multiply the disk capacity and reserved space by two
98      // for accommodating it
99      //
100      int numOfDataDirs = 2;
101     
102      long diskCapacity = numOfDataDirs * df.getCapacity();
103      reserved *= numOfDataDirs;
104     
105      configCapacity = namesystem.getCapacityTotal();
106      used = namesystem.getCapacityUsed();
107      nonDFSUsed = namesystem.getCapacityUsedNonDFS();
108      remaining = namesystem.getCapacityRemaining();
109      percentUsed = namesystem.getCapacityUsedPercent();
110      percentRemaining = namesystem.getCapacityRemainingPercent();
111     
112      LOG.info("Data node directory " + cluster.getDataDirectory());
113           
114      LOG.info("Name node diskCapacity " + diskCapacity + " configCapacity "
115          + configCapacity + " reserved " + reserved + " used " + used
116          + " remaining " + remaining + " nonDFSUsed " + nonDFSUsed
117          + " remaining " + remaining + " percentUsed " + percentUsed
118          + " percentRemaining " + percentRemaining);
119     
120      // Ensure new total capacity reported excludes the reserved space
121      assertTrue(configCapacity == diskCapacity - reserved);
122     
123      // Ensure new total capacity reported excludes the reserved space
124      assertTrue(configCapacity == (used + remaining + nonDFSUsed));
125
126      // Ensure percent used is calculated based on used and present capacity
127      assertTrue(percentUsed == ((float)used * 100.0f)/(float)configCapacity);
128
129      // Ensure percent used is calculated based on used and present capacity
130      assertTrue(percentRemaining == ((float)remaining * 100.0f)/(float)configCapacity);
131    }
132    finally {
133      if (cluster != null) {cluster.shutdown();}
134    }
135  }
136}
Note: See TracBrowser for help on using the repository browser.