source: proiecte/HadoopJUnit/hadoop-0.20.1/src/core/core-default.xml @ 141

Last change on this file since 141 was 120, checked in by (none), 14 years ago

Added the mail files for the Hadoop JUNit Project

  • Property svn:executable set to *
File size: 13.0 KB
Line 
1<?xml version="1.0"?>
2<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3
4<!-- Do not modify this file directly.  Instead, copy entries that you -->
5<!-- wish to modify from this file into core-site.xml and change them -->
6<!-- there.  If core-site.xml does not already exist, create it.      -->
7
8<configuration>
9
10<!--- global properties -->
11
12<property>
13  <name>hadoop.tmp.dir</name>
14  <value>/tmp/hadoop-${user.name}</value>
15  <description>A base for other temporary directories.</description>
16</property>
17
18<property>
19  <name>hadoop.native.lib</name>
20  <value>true</value>
21  <description>Should native hadoop libraries, if present, be used.</description>
22</property>
23
24<property>
25  <name>hadoop.http.filter.initializers</name>
26  <value></value>
27  <description>A comma separated list of class names. Each class in the list
28  must extend org.apache.hadoop.http.FilterInitializer. The corresponding
29  Filter will be initialized. Then, the Filter will be applied to all user
30  facing jsp and servlet web pages.  The ordering of the list defines the
31  ordering of the filters.</description>
32</property>
33
34<property>
35  <name>hadoop.security.authorization</name>
36  <value>false</value>
37  <description>Is service-level authorization enabled?</description>
38</property>
39
40<!--- logging properties -->
41
42<property>
43  <name>hadoop.logfile.size</name>
44  <value>10000000</value>
45  <description>The max size of each log file</description>
46</property>
47
48<property>
49  <name>hadoop.logfile.count</name>
50  <value>10</value>
51  <description>The max number of log files</description>
52</property>
53
54<!-- i/o properties -->
55<property>
56  <name>io.file.buffer.size</name>
57  <value>4096</value>
58  <description>The size of buffer for use in sequence files.
59  The size of this buffer should probably be a multiple of hardware
60  page size (4096 on Intel x86), and it determines how much data is
61  buffered during read and write operations.</description>
62</property>
63 
64<property>
65  <name>io.bytes.per.checksum</name>
66  <value>512</value>
67  <description>The number of bytes per checksum.  Must not be larger than
68  io.file.buffer.size.</description>
69</property>
70
71<property>
72  <name>io.skip.checksum.errors</name>
73  <value>false</value>
74  <description>If true, when a checksum error is encountered while
75  reading a sequence file, entries are skipped, instead of throwing an
76  exception.</description>
77</property>
78
79<property>
80  <name>io.compression.codecs</name>
81  <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec</value>
82  <description>A list of the compression codec classes that can be used
83               for compression/decompression.</description>
84</property>
85
86<property>
87  <name>io.serializations</name>
88  <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
89  <description>A list of serialization classes that can be used for
90  obtaining serializers and deserializers.</description>
91</property>
92
93<!-- file system properties -->
94
95<property>
96  <name>fs.default.name</name>
97  <value>file:///</value>
98  <description>The name of the default file system.  A URI whose
99  scheme and authority determine the FileSystem implementation.  The
100  uri's scheme determines the config property (fs.SCHEME.impl) naming
101  the FileSystem implementation class.  The uri's authority is used to
102  determine the host, port, etc. for a filesystem.</description>
103</property>
104
105<property>
106  <name>fs.trash.interval</name>
107  <value>0</value>
108  <description>Number of minutes between trash checkpoints.
109  If zero, the trash feature is disabled.
110  </description>
111</property>
112
113<property>
114  <name>fs.file.impl</name>
115  <value>org.apache.hadoop.fs.LocalFileSystem</value>
116  <description>The FileSystem for file: uris.</description>
117</property>
118
119<property>
120  <name>fs.hdfs.impl</name>
121  <value>org.apache.hadoop.hdfs.DistributedFileSystem</value>
122  <description>The FileSystem for hdfs: uris.</description>
123</property>
124
125<property>
126  <name>fs.s3.impl</name>
127  <value>org.apache.hadoop.fs.s3.S3FileSystem</value>
128  <description>The FileSystem for s3: uris.</description>
129</property>
130
131<property>
132  <name>fs.s3n.impl</name>
133  <value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value>
134  <description>The FileSystem for s3n: (Native S3) uris.</description>
135</property>
136
137<property>
138  <name>fs.kfs.impl</name>
139  <value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value>
140  <description>The FileSystem for kfs: uris.</description>
141</property>
142
143<property>
144  <name>fs.hftp.impl</name>
145  <value>org.apache.hadoop.hdfs.HftpFileSystem</value>
146</property>
147
148<property>
149  <name>fs.hsftp.impl</name>
150  <value>org.apache.hadoop.hdfs.HsftpFileSystem</value>
151</property>
152
153<property>
154  <name>fs.ftp.impl</name>
155  <value>org.apache.hadoop.fs.ftp.FTPFileSystem</value>
156  <description>The FileSystem for ftp: uris.</description>
157</property>
158
159<property>
160  <name>fs.ramfs.impl</name>
161  <value>org.apache.hadoop.fs.InMemoryFileSystem</value>
162  <description>The FileSystem for ramfs: uris.</description>
163</property>
164
165<property>
166  <name>fs.har.impl</name>
167  <value>org.apache.hadoop.fs.HarFileSystem</value>
168  <description>The filesystem for Hadoop archives. </description>
169</property>
170
171<property>
172  <name>fs.checkpoint.dir</name>
173  <value>${hadoop.tmp.dir}/dfs/namesecondary</value>
174  <description>Determines where on the local filesystem the DFS secondary
175      name node should store the temporary images to merge.
176      If this is a comma-delimited list of directories then the image is
177      replicated in all of the directories for redundancy.
178  </description>
179</property>
180
181<property>
182  <name>fs.checkpoint.edits.dir</name>
183  <value>${fs.checkpoint.dir}</value>
184  <description>Determines where on the local filesystem the DFS secondary
185      name node should store the temporary edits to merge.
186      If this is a comma-delimited list of directoires then teh edits is
187      replicated in all of the directoires for redundancy.
188      Default value is same as fs.checkpoint.dir
189  </description>
190</property>
191
192<property>
193  <name>fs.checkpoint.period</name>
194  <value>3600</value>
195  <description>The number of seconds between two periodic checkpoints.
196  </description>
197</property>
198
199<property>
200  <name>fs.checkpoint.size</name>
201  <value>67108864</value>
202  <description>The size of the current edit log (in bytes) that triggers
203       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
204  </description>
205</property>
206
207
208
209<property>
210  <name>fs.s3.block.size</name>
211  <value>67108864</value>
212  <description>Block size to use when writing files to S3.</description>
213</property>
214
215<property>
216  <name>fs.s3.buffer.dir</name>
217  <value>${hadoop.tmp.dir}/s3</value>
218  <description>Determines where on the local filesystem the S3 filesystem
219  should store files before sending them to S3
220  (or after retrieving them from S3).
221  </description>
222</property>
223
224<property>
225  <name>fs.s3.maxRetries</name>
226  <value>4</value>
227  <description>The maximum number of retries for reading or writing files to S3,
228  before we signal failure to the application.
229  </description>
230</property>
231
232<property>
233  <name>fs.s3.sleepTimeSeconds</name>
234  <value>10</value>
235  <description>The number of seconds to sleep between each S3 retry.
236  </description>
237</property>
238
239
240<property>
241  <name>local.cache.size</name>
242  <value>10737418240</value>
243  <description>The limit on the size of cache you want to keep, set by default
244  to 10GB. This will act as a soft limit on the cache directory for out of band data.
245  </description>
246</property>
247           
248<property>
249  <name>io.seqfile.compress.blocksize</name>
250  <value>1000000</value>
251  <description>The minimum block size for compression in block compressed
252          SequenceFiles.
253  </description>
254</property>
255
256<property>
257  <name>io.seqfile.lazydecompress</name>
258  <value>true</value>
259  <description>Should values of block-compressed SequenceFiles be decompressed
260          only when necessary.
261  </description>
262</property>
263
264<property>
265  <name>io.seqfile.sorter.recordlimit</name>
266  <value>1000000</value>
267  <description>The limit on number of records to be kept in memory in a spill
268          in SequenceFiles.Sorter
269  </description>
270</property>
271
272 <property>
273  <name>io.mapfile.bloom.size</name>
274  <value>1048576</value>
275  <description>The size of BloomFilter-s used in BloomMapFile. Each time this many
276  keys is appended the next BloomFilter will be created (inside a DynamicBloomFilter).
277  Larger values minimize the number of filters, which slightly increases the performance,
278  but may waste too much space if the total number of keys is usually much smaller
279  than this number.
280  </description>
281</property>
282
283<property>
284  <name>io.mapfile.bloom.error.rate</name>
285  <value>0.005</value>
286  <description>The rate of false positives in BloomFilter-s used in BloomMapFile.
287  As this value decreases, the size of BloomFilter-s increases exponentially. This
288  value is the probability of encountering false positives (default is 0.5%).
289  </description>
290</property>
291
292<property>
293  <name>hadoop.util.hash.type</name>
294  <value>murmur</value>
295  <description>The default implementation of Hash. Currently this can take one of the
296  two values: 'murmur' to select MurmurHash and 'jenkins' to select JenkinsHash.
297  </description>
298</property>
299
300
301<!-- ipc properties -->
302
303<property>
304  <name>ipc.client.idlethreshold</name>
305  <value>4000</value>
306  <description>Defines the threshold number of connections after which
307               connections will be inspected for idleness.
308  </description>
309</property>
310
311<property>
312  <name>ipc.client.kill.max</name>
313  <value>10</value>
314  <description>Defines the maximum number of clients to disconnect in one go.
315  </description>
316</property>
317
318<property>
319  <name>ipc.client.connection.maxidletime</name>
320  <value>10000</value>
321  <description>The maximum time in msec after which a client will bring down the
322               connection to the server.
323  </description>
324</property>
325
326<property>
327  <name>ipc.client.connect.max.retries</name>
328  <value>10</value>
329  <description>Indicates the number of retries a client will make to establish
330               a server connection.
331  </description>
332</property>
333
334<property>
335  <name>ipc.server.listen.queue.size</name>
336  <value>128</value>
337  <description>Indicates the length of the listen queue for servers accepting
338               client connections.
339  </description>
340</property>
341
342<property>
343  <name>ipc.server.tcpnodelay</name>
344  <value>false</value>
345  <description>Turn on/off Nagle's algorithm for the TCP socket connection on
346  the server. Setting to true disables the algorithm and may decrease latency
347  with a cost of more/smaller packets.
348  </description>
349</property>
350
351<property>
352  <name>ipc.client.tcpnodelay</name>
353  <value>false</value>
354  <description>Turn on/off Nagle's algorithm for the TCP socket connection on
355  the client. Setting to true disables the algorithm and may decrease latency
356  with a cost of more/smaller packets.
357  </description>
358</property>
359
360
361<!-- Web Interface Configuration -->
362
363<property>
364  <name>webinterface.private.actions</name>
365  <value>false</value>
366  <description> If set to true, the web interfaces of JT and NN may contain
367                actions, such as kill job, delete file, etc., that should
368                not be exposed to public. Enable this option if the interfaces
369                are only reachable by those who have the right authorization.
370  </description>
371</property>
372
373<!-- Proxy Configuration -->
374
375<property>
376  <name>hadoop.rpc.socket.factory.class.default</name>
377  <value>org.apache.hadoop.net.StandardSocketFactory</value>
378  <description> Default SocketFactory to use. This parameter is expected to be
379    formatted as "package.FactoryClassName".
380  </description>
381</property>
382
383<property>
384  <name>hadoop.rpc.socket.factory.class.ClientProtocol</name>
385  <value></value>
386  <description> SocketFactory to use to connect to a DFS. If null or empty, use
387    hadoop.rpc.socket.class.default. This socket factory is also used by
388    DFSClient to create sockets to DataNodes.
389  </description>
390</property>
391
392
393
394<property>
395  <name>hadoop.socks.server</name>
396  <value></value>
397  <description> Address (host:port) of the SOCKS server to be used by the
398    SocksSocketFactory.
399  </description>
400</property>
401
402<!-- Rack Configuration -->
403
404<property>
405  <name>topology.node.switch.mapping.impl</name>
406  <value>org.apache.hadoop.net.ScriptBasedMapping</value>
407  <description> The default implementation of the DNSToSwitchMapping. It
408    invokes a script specified in topology.script.file.name to resolve
409    node names. If the value for topology.script.file.name is not set, the
410    default value of DEFAULT_RACK is returned for all node names.
411  </description>
412</property>
413
414<property>
415  <name>topology.script.file.name</name>
416  <value></value>
417  <description> The script name that should be invoked to resolve DNS names to
418    NetworkTopology names. Example: the script would take host.foo.bar as an
419    argument, and return /rack1 as the output.
420  </description>
421</property>
422
423<property>
424  <name>topology.script.number.args</name>
425  <value>100</value>
426  <description> The max number of args that the script configured with
427    topology.script.file.name should be run with. Each arg is an
428    IP address.
429  </description>
430</property>
431
432
433
434</configuration>
Note: See TracBrowser for help on using the repository browser.