1 | /** |
---|
2 | * Licensed to the Apache Software Foundation (ASF) under one |
---|
3 | * or more contributor license agreements. See the NOTICE file |
---|
4 | * distributed with this work for additional information |
---|
5 | * regarding copyright ownership. The ASF licenses this file |
---|
6 | * to you under the Apache License, Version 2.0 (the |
---|
7 | * "License"); you may not use this file except in compliance |
---|
8 | * with the License. You may obtain a copy of the License at |
---|
9 | * |
---|
10 | * http://www.apache.org/licenses/LICENSE-2.0 |
---|
11 | * |
---|
12 | * Unless required by applicable law or agreed to in writing, software |
---|
13 | * distributed under the License is distributed on an "AS IS" BASIS, |
---|
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
---|
15 | * See the License for the specific language governing permissions and |
---|
16 | * limitations under the License. |
---|
17 | */ |
---|
18 | package org.apache.hadoop.hdfs.server.namenode.metrics; |
---|
19 | |
---|
20 | import java.io.IOException; |
---|
21 | import java.util.Random; |
---|
22 | |
---|
23 | import junit.framework.TestCase; |
---|
24 | |
---|
25 | import org.apache.hadoop.conf.Configuration; |
---|
26 | import org.apache.hadoop.fs.Path; |
---|
27 | import org.apache.hadoop.hdfs.DFSTestUtil; |
---|
28 | import org.apache.hadoop.hdfs.DistributedFileSystem; |
---|
29 | import org.apache.hadoop.hdfs.MiniDFSCluster; |
---|
30 | import org.apache.hadoop.hdfs.protocol.LocatedBlock; |
---|
31 | import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; |
---|
32 | |
---|
33 | /** |
---|
34 | * Test for metrics published by the Namenode |
---|
35 | */ |
---|
36 | public class TestNameNodeMetrics extends TestCase { |
---|
37 | private static final Configuration CONF = new Configuration(); |
---|
38 | static { |
---|
39 | CONF.setLong("dfs.block.size", 100); |
---|
40 | CONF.setInt("io.bytes.per.checksum", 1); |
---|
41 | CONF.setLong("dfs.heartbeat.interval", 1L); |
---|
42 | CONF.setInt("dfs.replication.interval", 1); |
---|
43 | } |
---|
44 | |
---|
45 | private MiniDFSCluster cluster; |
---|
46 | private FSNamesystemMetrics metrics; |
---|
47 | private DistributedFileSystem fs; |
---|
48 | private Random rand = new Random(); |
---|
49 | private FSNamesystem namesystem; |
---|
50 | |
---|
51 | @Override |
---|
52 | protected void setUp() throws Exception { |
---|
53 | cluster = new MiniDFSCluster(CONF, 3, true, null); |
---|
54 | cluster.waitActive(); |
---|
55 | namesystem = cluster.getNameNode().getNamesystem(); |
---|
56 | fs = (DistributedFileSystem) cluster.getFileSystem(); |
---|
57 | metrics = namesystem.getFSNamesystemMetrics(); |
---|
58 | } |
---|
59 | |
---|
60 | @Override |
---|
61 | protected void tearDown() throws Exception { |
---|
62 | cluster.shutdown(); |
---|
63 | } |
---|
64 | |
---|
65 | /** create a file with a length of <code>fileLen</code> */ |
---|
66 | private void createFile(String fileName, long fileLen, short replicas) throws IOException { |
---|
67 | Path filePath = new Path(fileName); |
---|
68 | DFSTestUtil.createFile(fs, filePath, fileLen, replicas, rand.nextLong()); |
---|
69 | } |
---|
70 | |
---|
71 | private void updateMetrics() throws Exception { |
---|
72 | // Wait for metrics update (corresponds to dfs.replication.interval |
---|
73 | // for some block related metrics to get updated) |
---|
74 | Thread.sleep(1000); |
---|
75 | metrics.doUpdates(null); |
---|
76 | } |
---|
77 | |
---|
78 | /** Test metrics associated with addition of a file */ |
---|
79 | public void testFileAdd() throws Exception { |
---|
80 | // Add files with 100 blocks |
---|
81 | final String file = "/tmp/t"; |
---|
82 | createFile(file, 3200, (short)3); |
---|
83 | final int blockCount = 32; |
---|
84 | int blockCapacity = namesystem.getBlockCapacity(); |
---|
85 | updateMetrics(); |
---|
86 | assertEquals(blockCapacity, metrics.blockCapacity.get()); |
---|
87 | |
---|
88 | // Blocks are stored in a hashmap. Compute its capacity, which |
---|
89 | // doubles every time the number of entries reach the threshold. |
---|
90 | int threshold = (int)(blockCapacity * FSNamesystem.DEFAULT_MAP_LOAD_FACTOR); |
---|
91 | while (threshold < blockCount) { |
---|
92 | blockCapacity <<= 1; |
---|
93 | } |
---|
94 | updateMetrics(); |
---|
95 | assertEquals(3, metrics.filesTotal.get()); |
---|
96 | assertEquals(blockCount, metrics.blocksTotal.get()); |
---|
97 | assertEquals(blockCapacity, metrics.blockCapacity.get()); |
---|
98 | fs.delete(new Path(file), true); |
---|
99 | } |
---|
100 | |
---|
101 | /** Corrupt a block and ensure metrics reflects it */ |
---|
102 | public void testCorruptBlock() throws Exception { |
---|
103 | // Create a file with single block with two replicas |
---|
104 | String file = "/tmp/t"; |
---|
105 | createFile(file, 100, (short)2); |
---|
106 | |
---|
107 | // Corrupt first replica of the block |
---|
108 | LocatedBlock block = namesystem.getBlockLocations(file, 0, 1).get(0); |
---|
109 | namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]); |
---|
110 | updateMetrics(); |
---|
111 | assertEquals(1, metrics.corruptBlocks.get()); |
---|
112 | assertEquals(1, metrics.pendingReplicationBlocks.get()); |
---|
113 | assertEquals(1, metrics.scheduledReplicationBlocks.get()); |
---|
114 | fs.delete(new Path(file), true); |
---|
115 | updateMetrics(); |
---|
116 | assertEquals(0, metrics.corruptBlocks.get()); |
---|
117 | assertEquals(0, metrics.pendingReplicationBlocks.get()); |
---|
118 | assertEquals(0, metrics.scheduledReplicationBlocks.get()); |
---|
119 | } |
---|
120 | |
---|
121 | /** Create excess blocks by reducing the replication factor for |
---|
122 | * for a file and ensure metrics reflects it |
---|
123 | */ |
---|
124 | public void testExcessBlocks() throws Exception { |
---|
125 | String file = "/tmp/t"; |
---|
126 | createFile(file, 100, (short)2); |
---|
127 | int totalBlocks = 1; |
---|
128 | namesystem.setReplication(file, (short)1); |
---|
129 | updateMetrics(); |
---|
130 | assertEquals(totalBlocks, metrics.excessBlocks.get()); |
---|
131 | assertEquals(totalBlocks, metrics.pendingDeletionBlocks.get()); |
---|
132 | fs.delete(new Path(file), true); |
---|
133 | } |
---|
134 | |
---|
135 | /** Test to ensure metrics reflects missing blocks */ |
---|
136 | public void testMissingBlock() throws Exception { |
---|
137 | // Create a file with single block with two replicas |
---|
138 | String file = "/tmp/t"; |
---|
139 | createFile(file, 100, (short)1); |
---|
140 | |
---|
141 | // Corrupt the only replica of the block to result in a missing block |
---|
142 | LocatedBlock block = namesystem.getBlockLocations(file, 0, 1).get(0); |
---|
143 | namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]); |
---|
144 | updateMetrics(); |
---|
145 | assertEquals(1, metrics.underReplicatedBlocks.get()); |
---|
146 | assertEquals(1, metrics.missingBlocks.get()); |
---|
147 | fs.delete(new Path(file), true); |
---|
148 | updateMetrics(); |
---|
149 | assertEquals(0, metrics.underReplicatedBlocks.get()); |
---|
150 | } |
---|
151 | } |
---|