source: proiecte/HadoopJUnit/hadoop-0.20.1/src/hdfs/hdfs-default.xml @ 141

Last change on this file since 141 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 10.4 KB
Line 
1<?xml version="1.0"?>
2<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3
4<!-- Do not modify this file directly.  Instead, copy entries that you -->
5<!-- wish to modify from this file into hdfs-site.xml and change them -->
6<!-- there.  If hdfs-site.xml does not already exist, create it.      -->
7
8<configuration>
9
10<property>
11  <name>dfs.namenode.logging.level</name>
12  <value>info</value>
13  <description>The logging level for dfs namenode. Other values are "dir"(trac
14e namespace mutations), "block"(trace block under/over replications and block
15creations/deletions), or "all".</description>
16</property>
17
18<property>
19  <name>dfs.secondary.http.address</name>
20  <value>0.0.0.0:50090</value>
21  <description>
22    The secondary namenode http server address and port.
23    If the port is 0 then the server will start on a free port.
24  </description>
25</property>
26
27<property>
28  <name>dfs.datanode.address</name>
29  <value>0.0.0.0:50010</value>
30  <description>
31    The address where the datanode server will listen to.
32    If the port is 0 then the server will start on a free port.
33  </description>
34</property>
35
36<property>
37  <name>dfs.datanode.http.address</name>
38  <value>0.0.0.0:50075</value>
39  <description>
40    The datanode http server address and port.
41    If the port is 0 then the server will start on a free port.
42  </description>
43</property>
44
45<property>
46  <name>dfs.datanode.ipc.address</name>
47  <value>0.0.0.0:50020</value>
48  <description>
49    The datanode ipc server address and port.
50    If the port is 0 then the server will start on a free port.
51  </description>
52</property>
53
54<property>
55  <name>dfs.datanode.handler.count</name>
56  <value>3</value>
57  <description>The number of server threads for the datanode.</description>
58</property>
59
60<property>
61  <name>dfs.http.address</name>
62  <value>0.0.0.0:50070</value>
63  <description>
64    The address and the base port where the dfs namenode web ui will listen on.
65    If the port is 0 then the server will start on a free port.
66  </description>
67</property>
68
69<property>
70  <name>dfs.https.enable</name>
71  <value>false</value>
72  <description>Decide if HTTPS(SSL) is supported on HDFS
73  </description>
74</property>
75
76<property>
77  <name>dfs.https.need.client.auth</name>
78  <value>false</value>
79  <description>Whether SSL client certificate authentication is required
80  </description>
81</property>
82
83<property>
84  <name>dfs.https.server.keystore.resource</name>
85  <value>ssl-server.xml</value>
86  <description>Resource file from which ssl server keystore
87  information will be extracted
88  </description>
89</property>
90
91<property>
92  <name>dfs.https.client.keystore.resource</name>
93  <value>ssl-client.xml</value>
94  <description>Resource file from which ssl client keystore
95  information will be extracted
96  </description>
97</property>
98
99<property>
100  <name>dfs.datanode.https.address</name>
101  <value>0.0.0.0:50475</value>
102</property>
103
104<property>
105  <name>dfs.https.address</name>
106  <value>0.0.0.0:50470</value>
107</property>
108
109 <property>
110  <name>dfs.datanode.dns.interface</name>
111  <value>default</value>
112  <description>The name of the Network Interface from which a data node should
113  report its IP address.
114  </description>
115 </property>
116 
117<property>
118  <name>dfs.datanode.dns.nameserver</name>
119  <value>default</value>
120  <description>The host name or IP address of the name server (DNS)
121  which a DataNode should use to determine the host name used by the
122  NameNode for communication and display purposes.
123  </description>
124 </property>
125 
126 
127 
128<property>
129  <name>dfs.replication.considerLoad</name>
130  <value>true</value>
131  <description>Decide if chooseTarget considers the target's load or not
132  </description>
133</property>
134<property>
135  <name>dfs.default.chunk.view.size</name>
136  <value>32768</value>
137  <description>The number of bytes to view for a file on the browser.
138  </description>
139</property>
140
141<property>
142  <name>dfs.datanode.du.reserved</name>
143  <value>0</value>
144  <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
145  </description>
146</property>
147
148<property>
149  <name>dfs.name.dir</name>
150  <value>${hadoop.tmp.dir}/dfs/name</value>
151  <description>Determines where on the local filesystem the DFS name node
152      should store the name table(fsimage).  If this is a comma-delimited list
153      of directories then the name table is replicated in all of the
154      directories, for redundancy. </description>
155</property>
156
157<property>
158  <name>dfs.name.edits.dir</name>
159  <value>${dfs.name.dir}</value>
160  <description>Determines where on the local filesystem the DFS name node
161      should store the transaction (edits) file. If this is a comma-delimited list
162      of directories then the transaction file is replicated in all of the
163      directories, for redundancy. Default value is same as dfs.name.dir
164  </description>
165</property>
166<property>
167  <name>dfs.web.ugi</name>
168  <value>webuser,webgroup</value>
169  <description>The user account used by the web interface.
170    Syntax: USERNAME,GROUP1,GROUP2, ...
171  </description>
172</property>
173
174<property>
175  <name>dfs.permissions</name>
176  <value>true</value>
177  <description>
178    If "true", enable permission checking in HDFS.
179    If "false", permission checking is turned off,
180    but all other behavior is unchanged.
181    Switching from one parameter value to the other does not change the mode,
182    owner or group of files or directories.
183  </description>
184</property>
185
186<property>
187  <name>dfs.permissions.supergroup</name>
188  <value>supergroup</value>
189  <description>The name of the group of super-users.</description>
190</property>
191
192<property>
193  <name>dfs.data.dir</name>
194  <value>${hadoop.tmp.dir}/dfs/data</value>
195  <description>Determines where on the local filesystem an DFS data node
196  should store its blocks.  If this is a comma-delimited
197  list of directories, then data will be stored in all named
198  directories, typically on different devices.
199  Directories that do not exist are ignored.
200  </description>
201</property>
202
203<property>
204  <name>dfs.replication</name>
205  <value>3</value>
206  <description>Default block replication.
207  The actual number of replications can be specified when the file is created.
208  The default is used if replication is not specified in create time.
209  </description>
210</property>
211
212<property>
213  <name>dfs.replication.max</name>
214  <value>512</value>
215  <description>Maximal block replication.
216  </description>
217</property>
218
219<property>
220  <name>dfs.replication.min</name>
221  <value>1</value>
222  <description>Minimal block replication.
223  </description>
224</property>
225
226<property>
227  <name>dfs.block.size</name>
228  <value>67108864</value>
229  <description>The default block size for new files.</description>
230</property>
231
232<property>
233  <name>dfs.df.interval</name>
234  <value>60000</value>
235  <description>Disk usage statistics refresh interval in msec.</description>
236</property>
237
238<property>
239  <name>dfs.client.block.write.retries</name>
240  <value>3</value>
241  <description>The number of retries for writing blocks to the data nodes,
242  before we signal failure to the application.
243  </description>
244</property>
245
246<property>
247  <name>dfs.blockreport.intervalMsec</name>
248  <value>3600000</value>
249  <description>Determines block reporting interval in milliseconds.</description>
250</property>
251
252<property>
253  <name>dfs.blockreport.initialDelay</name>  <value>0</value>
254  <description>Delay for first block report in seconds.</description>
255</property>
256
257<property>
258  <name>dfs.heartbeat.interval</name>
259  <value>3</value>
260  <description>Determines datanode heartbeat interval in seconds.</description>
261</property>
262
263<property>
264  <name>dfs.namenode.handler.count</name>
265  <value>10</value>
266  <description>The number of server threads for the namenode.</description>
267</property>
268
269<property>
270  <name>dfs.safemode.threshold.pct</name>
271  <value>0.999f</value>
272  <description>
273    Specifies the percentage of blocks that should satisfy
274    the minimal replication requirement defined by dfs.replication.min.
275    Values less than or equal to 0 mean not to start in safe mode.
276    Values greater than 1 will make safe mode permanent.
277  </description>
278</property>
279
280<property>
281  <name>dfs.safemode.extension</name>
282  <value>30000</value>
283  <description>
284    Determines extension of safe mode in milliseconds
285    after the threshold level is reached.
286  </description>
287</property>
288
289<property>
290  <name>dfs.balance.bandwidthPerSec</name>
291  <value>1048576</value>
292  <description>
293        Specifies the maximum amount of bandwidth that each datanode
294        can utilize for the balancing purpose in term of
295        the number of bytes per second.
296  </description>
297</property>
298
299<property>
300  <name>dfs.hosts</name>
301  <value></value>
302  <description>Names a file that contains a list of hosts that are
303  permitted to connect to the namenode. The full pathname of the file
304  must be specified.  If the value is empty, all hosts are
305  permitted.</description>
306</property>
307
308<property>
309  <name>dfs.hosts.exclude</name>
310  <value></value>
311  <description>Names a file that contains a list of hosts that are
312  not permitted to connect to the namenode.  The full pathname of the
313  file must be specified.  If the value is empty, no hosts are
314  excluded.</description>
315</property> 
316
317<property>
318  <name>dfs.max.objects</name>
319  <value>0</value>
320  <description>The maximum number of files, directories and blocks
321  dfs supports. A value of zero indicates no limit to the number
322  of objects that dfs supports.
323  </description>
324</property>
325
326<property>
327  <name>dfs.namenode.decommission.interval</name>
328  <value>30</value>
329  <description>Namenode periodicity in seconds to check if decommission is
330  complete.</description>
331</property>
332
333<property>
334  <name>dfs.namenode.decommission.nodes.per.interval</name>
335  <value>5</value>
336  <description>The number of nodes namenode checks if decommission is complete
337  in each dfs.namenode.decommission.interval.</description>
338</property>
339
340<property>
341  <name>dfs.replication.interval</name>
342  <value>3</value>
343  <description>The periodicity in seconds with which the namenode computes
344  repliaction work for datanodes. </description>
345</property>
346
347<property>
348  <name>dfs.access.time.precision</name>
349  <value>3600000</value>
350  <description>The access time for HDFS file is precise upto this value.
351               The default value is 1 hour. Setting a value of 0 disables
352               access times for HDFS.
353  </description>
354</property>
355
356<property>
357  <name>dfs.support.append</name>
358  <value>false</value>
359  <description>Does HDFS allow appends to files?
360               This is currently set to false because there are bugs in the
361               "append code" and is not supported in any prodction cluster.
362  </description>
363</property>
364
365</configuration>
Note: See TracBrowser for help on using the repository browser.