1 | /** |
---|
2 | * Licensed to the Apache Software Foundation (ASF) under one |
---|
3 | * or more contributor license agreements. See the NOTICE file |
---|
4 | * distributed with this work for additional information |
---|
5 | * regarding copyright ownership. The ASF licenses this file |
---|
6 | * to you under the Apache License, Version 2.0 (the |
---|
7 | * "License"); you may not use this file except in compliance |
---|
8 | * with the License. You may obtain a copy of the License at |
---|
9 | * |
---|
10 | * http://www.apache.org/licenses/LICENSE-2.0 |
---|
11 | * |
---|
12 | * Unless required by applicable law or agreed to in writing, software |
---|
13 | * distributed under the License is distributed on an "AS IS" BASIS, |
---|
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
---|
15 | * See the License for the specific language governing permissions and |
---|
16 | * limitations under the License. |
---|
17 | */ |
---|
18 | |
---|
19 | package org.apache.hadoop.hdfs; |
---|
20 | |
---|
21 | import java.io.*; |
---|
22 | import java.util.ArrayList; |
---|
23 | |
---|
24 | import junit.framework.*; |
---|
25 | import org.apache.hadoop.conf.Configuration; |
---|
26 | import org.apache.hadoop.fs.BlockLocation; |
---|
27 | import org.apache.hadoop.fs.FileStatus; |
---|
28 | import org.apache.hadoop.fs.FileSystem; |
---|
29 | import org.apache.hadoop.fs.ChecksumException; |
---|
30 | import org.apache.hadoop.fs.Path; |
---|
31 | import org.apache.hadoop.hdfs.protocol.Block; |
---|
32 | import org.apache.hadoop.hdfs.protocol.DatanodeInfo; |
---|
33 | import org.apache.hadoop.hdfs.server.common.GenerationStamp; |
---|
34 | import org.apache.hadoop.hdfs.server.datanode.DataNode; |
---|
35 | |
---|
36 | /** |
---|
37 | * A JUnit test for corrupted file handling. |
---|
38 | */ |
---|
39 | public class TestFileCorruption extends TestCase { |
---|
40 | |
---|
41 | public TestFileCorruption(String testName) { |
---|
42 | super(testName); |
---|
43 | } |
---|
44 | |
---|
45 | protected void setUp() throws Exception { |
---|
46 | } |
---|
47 | |
---|
48 | protected void tearDown() throws Exception { |
---|
49 | } |
---|
50 | |
---|
51 | /** check if DFS can handle corrupted blocks properly */ |
---|
52 | public void testFileCorruption() throws Exception { |
---|
53 | MiniDFSCluster cluster = null; |
---|
54 | DFSTestUtil util = new DFSTestUtil("TestFileCorruption", 20, 3, 8*1024); |
---|
55 | try { |
---|
56 | Configuration conf = new Configuration(); |
---|
57 | cluster = new MiniDFSCluster(conf, 3, true, null); |
---|
58 | FileSystem fs = cluster.getFileSystem(); |
---|
59 | util.createFiles(fs, "/srcdat"); |
---|
60 | // Now deliberately remove the blocks |
---|
61 | File data_dir = new File(System.getProperty("test.build.data"), |
---|
62 | "dfs/data/data5/current"); |
---|
63 | assertTrue("data directory does not exist", data_dir.exists()); |
---|
64 | File[] blocks = data_dir.listFiles(); |
---|
65 | assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0)); |
---|
66 | for (int idx = 0; idx < blocks.length; idx++) { |
---|
67 | if (!blocks[idx].getName().startsWith("blk_")) { |
---|
68 | continue; |
---|
69 | } |
---|
70 | System.out.println("Deliberately removing file "+blocks[idx].getName()); |
---|
71 | assertTrue("Cannot remove file.", blocks[idx].delete()); |
---|
72 | } |
---|
73 | assertTrue("Corrupted replicas not handled properly.", |
---|
74 | util.checkFiles(fs, "/srcdat")); |
---|
75 | util.cleanup(fs, "/srcdat"); |
---|
76 | } finally { |
---|
77 | if (cluster != null) { cluster.shutdown(); } |
---|
78 | } |
---|
79 | } |
---|
80 | |
---|
81 | /** check if local FS can handle corrupted blocks properly */ |
---|
82 | public void testLocalFileCorruption() throws Exception { |
---|
83 | Configuration conf = new Configuration(); |
---|
84 | Path file = new Path(System.getProperty("test.build.data"), "corruptFile"); |
---|
85 | FileSystem fs = FileSystem.getLocal(conf); |
---|
86 | DataOutputStream dos = fs.create(file); |
---|
87 | dos.writeBytes("original bytes"); |
---|
88 | dos.close(); |
---|
89 | // Now deliberately corrupt the file |
---|
90 | dos = new DataOutputStream(new FileOutputStream(file.toString())); |
---|
91 | dos.writeBytes("corruption"); |
---|
92 | dos.close(); |
---|
93 | // Now attempt to read the file |
---|
94 | DataInputStream dis = fs.open(file, 512); |
---|
95 | try { |
---|
96 | System.out.println("A ChecksumException is expected to be logged."); |
---|
97 | dis.readByte(); |
---|
98 | } catch (ChecksumException ignore) { |
---|
99 | //expect this exception but let any NPE get thrown |
---|
100 | } |
---|
101 | fs.delete(file, true); |
---|
102 | } |
---|
103 | |
---|
104 | /** Test the case that a replica is reported corrupt while it is not |
---|
105 | * in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown. |
---|
106 | * See Hadoop-4351. |
---|
107 | */ |
---|
108 | public void testArrayOutOfBoundsException() throws Exception { |
---|
109 | MiniDFSCluster cluster = null; |
---|
110 | try { |
---|
111 | Configuration conf = new Configuration(); |
---|
112 | cluster = new MiniDFSCluster(conf, 2, true, null); |
---|
113 | cluster.waitActive(); |
---|
114 | |
---|
115 | FileSystem fs = cluster.getFileSystem(); |
---|
116 | final Path FILE_PATH = new Path("/tmp.txt"); |
---|
117 | final long FILE_LEN = 1L; |
---|
118 | DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short)2, 1L); |
---|
119 | |
---|
120 | // get the block |
---|
121 | File dataDir = new File(cluster.getDataDirectory(), |
---|
122 | "data1/current"); |
---|
123 | Block blk = getBlock(dataDir); |
---|
124 | if (blk == null) { |
---|
125 | blk = getBlock(new File(cluster.getDataDirectory(), |
---|
126 | "dfs/data/data2/current")); |
---|
127 | } |
---|
128 | assertFalse(blk==null); |
---|
129 | |
---|
130 | // start a third datanode |
---|
131 | cluster.startDataNodes(conf, 1, true, null, null); |
---|
132 | ArrayList<DataNode> datanodes = cluster.getDataNodes(); |
---|
133 | assertEquals(datanodes.size(), 3); |
---|
134 | DataNode dataNode = datanodes.get(2); |
---|
135 | |
---|
136 | // report corrupted block by the third datanode |
---|
137 | cluster.getNameNode().namesystem.markBlockAsCorrupt(blk, |
---|
138 | new DatanodeInfo(dataNode.dnRegistration )); |
---|
139 | |
---|
140 | // open the file |
---|
141 | fs.open(FILE_PATH); |
---|
142 | |
---|
143 | //clean up |
---|
144 | fs.delete(FILE_PATH, false); |
---|
145 | } finally { |
---|
146 | if (cluster != null) { cluster.shutdown(); } |
---|
147 | } |
---|
148 | |
---|
149 | } |
---|
150 | |
---|
151 | private Block getBlock(File dataDir) { |
---|
152 | assertTrue("data directory does not exist", dataDir.exists()); |
---|
153 | File[] blocks = dataDir.listFiles(); |
---|
154 | assertTrue("Blocks do not exist in dataDir", (blocks != null) && (blocks.length > 0)); |
---|
155 | |
---|
156 | int idx = 0; |
---|
157 | String blockFileName = null; |
---|
158 | for (; idx < blocks.length; idx++) { |
---|
159 | blockFileName = blocks[idx].getName(); |
---|
160 | if (blockFileName.startsWith("blk_") && !blockFileName.endsWith(".meta")) { |
---|
161 | break; |
---|
162 | } |
---|
163 | } |
---|
164 | if (blockFileName == null) { |
---|
165 | return null; |
---|
166 | } |
---|
167 | long blockId = Long.parseLong(blockFileName.substring("blk_".length())); |
---|
168 | long blockTimeStamp = GenerationStamp.WILDCARD_STAMP; |
---|
169 | for (idx=0; idx < blocks.length; idx++) { |
---|
170 | String fileName = blocks[idx].getName(); |
---|
171 | if (fileName.startsWith(blockFileName) && fileName.endsWith(".meta")) { |
---|
172 | int startIndex = blockFileName.length()+1; |
---|
173 | int endIndex = fileName.length() - ".meta".length(); |
---|
174 | blockTimeStamp = Long.parseLong(fileName.substring(startIndex, endIndex)); |
---|
175 | break; |
---|
176 | } |
---|
177 | } |
---|
178 | return new Block(blockId, blocks[idx].length(), blockTimeStamp); |
---|
179 | } |
---|
180 | } |
---|