source: proiecte/HadoopJUnit/hadoop-0.20.1/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java @ 120

Last change on this file since 120 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 5.7 KB
Line 
1/**
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements.  See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership.  The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License.  You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18package org.apache.hadoop.hdfs.server.datanode;
19
20import java.io.DataOutputStream;
21import java.io.File;
22import java.net.InetSocketAddress;
23import java.net.Socket;
24
25import org.apache.hadoop.conf.Configuration;
26import org.apache.hadoop.fs.FileSystem;
27import org.apache.hadoop.fs.Path;
28import org.apache.hadoop.hdfs.DFSTestUtil;
29import org.apache.hadoop.hdfs.MiniDFSCluster;
30import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
31import org.apache.hadoop.hdfs.protocol.LocatedBlock;
32import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
33import org.apache.hadoop.io.Text;
34
35import junit.framework.TestCase;
36
37/** Test if a datanode can correctly handle errors during block read/write*/
38public class TestDiskError extends TestCase {
39  public void testShutdown() throws Exception {
40    if (System.getProperty("os.name").startsWith("Windows")) {
41      /**
42       * This test depends on OS not allowing file creations on a directory
43       * that does not have write permissions for the user. Apparently it is
44       * not the case on Windows (at least under Cygwin), and possibly AIX.
45       * This is disabled on Windows.
46       */
47      return;
48    }
49    // bring up a cluster of 3
50    Configuration conf = new Configuration();
51    conf.setLong("dfs.block.size", 512L);
52    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
53    cluster.waitActive();
54    FileSystem fs = cluster.getFileSystem();
55    final int dnIndex = 0;
56    String dataDir = cluster.getDataDirectory();
57    File dir1 = new File(new File(dataDir, "data"+(2*dnIndex+1)), "tmp");
58    File dir2 = new File(new File(dataDir, "data"+(2*dnIndex+2)), "tmp");
59    try {
60      // make the data directory of the first datanode to be readonly
61      assertTrue(dir1.setReadOnly());
62      assertTrue(dir2.setReadOnly());
63
64      // create files and make sure that first datanode will be down
65      DataNode dn = cluster.getDataNodes().get(dnIndex);
66      for (int i=0; DataNode.isDatanodeUp(dn); i++) {
67        Path fileName = new Path("/test.txt"+i);
68        DFSTestUtil.createFile(fs, fileName, 1024, (short)2, 1L);
69        DFSTestUtil.waitReplication(fs, fileName, (short)2);
70        fs.delete(fileName, true);
71      }
72    } finally {
73      // restore its old permission
74      dir1.setWritable(true);
75      dir2.setWritable(true);
76      cluster.shutdown();
77    }
78  }
79 
80  public void testReplicationError() throws Exception {
81    // bring up a cluster of 1
82    Configuration conf = new Configuration();
83    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
84    cluster.waitActive();
85    FileSystem fs = cluster.getFileSystem();
86   
87    try {
88      // create a file of replication factor of 1
89      final Path fileName = new Path("/test.txt");
90      final int fileLen = 1;
91      DFSTestUtil.createFile(fs, fileName, 1, (short)1, 1L);
92      DFSTestUtil.waitReplication(fs, fileName, (short)1);
93
94      // get the block belonged to the created file
95      LocatedBlocks blocks = cluster.getNameNode().namesystem.getBlockLocations(
96          fileName.toString(), 0, (long)fileLen);
97      assertEquals(blocks.locatedBlockCount(), 1);
98      LocatedBlock block = blocks.get(0);
99     
100      // bring up a second datanode
101      cluster.startDataNodes(conf, 1, true, null, null);
102      cluster.waitActive();
103      final int sndNode = 1;
104      DataNode datanode = cluster.getDataNodes().get(sndNode);
105     
106      // replicate the block to the second datanode
107      InetSocketAddress target = datanode.getSelfAddr();
108      Socket s = new Socket(target.getAddress(), target.getPort());
109        //write the header.
110      DataOutputStream out = new DataOutputStream(
111          s.getOutputStream());
112
113      out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION );
114      out.write( DataTransferProtocol.OP_WRITE_BLOCK );
115      out.writeLong( block.getBlock().getBlockId());
116      out.writeLong( block.getBlock().getGenerationStamp() );
117      out.writeInt(1);
118      out.writeBoolean( false );       // recovery flag
119      Text.writeString( out, "" );
120      out.writeBoolean(false); // Not sending src node information
121      out.writeInt(0);
122     
123      // write check header
124      out.writeByte( 1 );
125      out.writeInt( 512 );
126
127      out.flush();
128
129      // close the connection before sending the content of the block
130      out.close();
131     
132      // the temporary block & meta files should be deleted
133      String dataDir = cluster.getDataDirectory();
134      File dir1 = new File(new File(dataDir, "data"+(2*sndNode+1)), "tmp");
135      File dir2 = new File(new File(dataDir, "data"+(2*sndNode+2)), "tmp");
136      while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
137        Thread.sleep(100);
138      }
139     
140      // then increase the file's replication factor
141      fs.setReplication(fileName, (short)2);
142      // replication should succeed
143      DFSTestUtil.waitReplication(fs, fileName, (short)1);
144     
145      // clean up the file
146      fs.delete(fileName, false);
147    } finally {
148      cluster.shutdown();
149    }
150  }
151}
Note: See TracBrowser for help on using the repository browser.