1 | /** |
---|
2 | * Licensed to the Apache Software Foundation (ASF) under one |
---|
3 | * or more contributor license agreements. See the NOTICE file |
---|
4 | * distributed with this work for additional information |
---|
5 | * regarding copyright ownership. The ASF licenses this file |
---|
6 | * to you under the Apache License, Version 2.0 (the |
---|
7 | * "License"); you may not use this file except in compliance |
---|
8 | * with the License. You may obtain a copy of the License at |
---|
9 | * |
---|
10 | * http://www.apache.org/licenses/LICENSE-2.0 |
---|
11 | * |
---|
12 | * Unless required by applicable law or agreed to in writing, software |
---|
13 | * distributed under the License is distributed on an "AS IS" BASIS, |
---|
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
---|
15 | * See the License for the specific language governing permissions and |
---|
16 | * limitations under the License. |
---|
17 | */ |
---|
18 | |
---|
19 | #include "hdfs.h" |
---|
20 | #include "hdfsJniHelper.h" |
---|
21 | |
---|
22 | |
---|
23 | /* Some frequently used Java paths */ |
---|
24 | #define HADOOP_CONF "org/apache/hadoop/conf/Configuration" |
---|
25 | #define HADOOP_PATH "org/apache/hadoop/fs/Path" |
---|
26 | #define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem" |
---|
27 | #define HADOOP_FS "org/apache/hadoop/fs/FileSystem" |
---|
28 | #define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation" |
---|
29 | #define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem" |
---|
30 | #define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream" |
---|
31 | #define HADOOP_OSTRM "org/apache/hadoop/fs/FSDataOutputStream" |
---|
32 | #define HADOOP_STAT "org/apache/hadoop/fs/FileStatus" |
---|
33 | #define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission" |
---|
34 | #define HADOOP_UNIX_USER_GROUP_INFO "org/apache/hadoop/security/UnixUserGroupInformation" |
---|
35 | #define HADOOP_USER_GROUP_INFO "org/apache/hadoop/security/UserGroupInformation" |
---|
36 | #define JAVA_NET_ISA "java/net/InetSocketAddress" |
---|
37 | #define JAVA_NET_URI "java/net/URI" |
---|
38 | #define JAVA_STRING "java/lang/String" |
---|
39 | |
---|
40 | #define JAVA_VOID "V" |
---|
41 | |
---|
42 | /* Macros for constructing method signatures */ |
---|
43 | #define JPARAM(X) "L" X ";" |
---|
44 | #define JARRPARAM(X) "[L" X ";" |
---|
45 | #define JMETHOD1(X, R) "(" X ")" R |
---|
46 | #define JMETHOD2(X, Y, R) "(" X Y ")" R |
---|
47 | #define JMETHOD3(X, Y, Z, R) "(" X Y Z")" R |
---|
48 | |
---|
49 | |
---|
50 | /** |
---|
51 | * hdfsJniEnv: A wrapper struct to be used as 'value' |
---|
52 | * while saving thread -> JNIEnv* mappings |
---|
53 | */ |
---|
54 | typedef struct |
---|
55 | { |
---|
56 | JNIEnv* env; |
---|
57 | } hdfsJniEnv; |
---|
58 | |
---|
59 | |
---|
60 | |
---|
61 | /** |
---|
62 | * Helper function to destroy a local reference of java.lang.Object |
---|
63 | * @param env: The JNIEnv pointer. |
---|
64 | * @param jFile: The local reference of java.lang.Object object |
---|
65 | * @return None. |
---|
66 | */ |
---|
67 | static void destroyLocalReference(JNIEnv *env, jobject jObject) |
---|
68 | { |
---|
69 | if (jObject) |
---|
70 | (*env)->DeleteLocalRef(env, jObject); |
---|
71 | } |
---|
72 | |
---|
73 | |
---|
74 | /** |
---|
75 | * Helper function to create a org.apache.hadoop.fs.Path object. |
---|
76 | * @param env: The JNIEnv pointer. |
---|
77 | * @param path: The file-path for which to construct org.apache.hadoop.fs.Path |
---|
78 | * object. |
---|
79 | * @return Returns a jobject on success and NULL on error. |
---|
80 | */ |
---|
81 | static jobject constructNewObjectOfPath(JNIEnv *env, const char *path) |
---|
82 | { |
---|
83 | //Construct a java.lang.String object |
---|
84 | jstring jPathString = (*env)->NewStringUTF(env, path); |
---|
85 | |
---|
86 | //Construct the org.apache.hadoop.fs.Path object |
---|
87 | jobject jPath = |
---|
88 | constructNewObjectOfClass(env, NULL, "org/apache/hadoop/fs/Path", |
---|
89 | "(Ljava/lang/String;)V", jPathString); |
---|
90 | if (jPath == NULL) { |
---|
91 | fprintf(stderr, "Can't construct instance of class " |
---|
92 | "org.apache.hadoop.fs.Path for %s\n", path); |
---|
93 | errno = EINTERNAL; |
---|
94 | return NULL; |
---|
95 | } |
---|
96 | |
---|
97 | // Destroy the local reference to the java.lang.String object |
---|
98 | destroyLocalReference(env, jPathString); |
---|
99 | |
---|
100 | return jPath; |
---|
101 | } |
---|
102 | |
---|
103 | |
---|
104 | /** |
---|
105 | * Helper function to translate an exception into a meaningful errno value. |
---|
106 | * @param exc: The exception. |
---|
107 | * @param env: The JNIEnv Pointer. |
---|
108 | * @param method: The name of the method that threw the exception. This |
---|
109 | * may be format string to be used in conjuction with additional arguments. |
---|
110 | * @return Returns a meaningful errno value if possible, or EINTERNAL if not. |
---|
111 | */ |
---|
112 | static int errnoFromException(jthrowable exc, JNIEnv *env, |
---|
113 | const char *method, ...) |
---|
114 | { |
---|
115 | va_list ap; |
---|
116 | int errnum = 0; |
---|
117 | char *excClass = NULL; |
---|
118 | |
---|
119 | if (exc == NULL) |
---|
120 | goto default_error; |
---|
121 | |
---|
122 | if ((excClass = classNameOfObject((jobject) exc, env)) == NULL) { |
---|
123 | errnum = EINTERNAL; |
---|
124 | goto done; |
---|
125 | } |
---|
126 | |
---|
127 | if (!strcmp(excClass, "org.apache.hadoop.security." |
---|
128 | "AccessControlException")) { |
---|
129 | errnum = EACCES; |
---|
130 | goto done; |
---|
131 | } |
---|
132 | |
---|
133 | if (!strcmp(excClass, "org.apache.hadoop.hdfs.protocol." |
---|
134 | "QuotaExceededException")) { |
---|
135 | errnum = EDQUOT; |
---|
136 | goto done; |
---|
137 | } |
---|
138 | |
---|
139 | if (!strcmp(excClass, "java.io.FileNotFoundException")) { |
---|
140 | errnum = ENOENT; |
---|
141 | goto done; |
---|
142 | } |
---|
143 | |
---|
144 | //TODO: interpret more exceptions; maybe examine exc.getMessage() |
---|
145 | |
---|
146 | default_error: |
---|
147 | |
---|
148 | //Can't tell what went wrong, so just punt |
---|
149 | (*env)->ExceptionDescribe(env); |
---|
150 | fprintf(stderr, "Call to "); |
---|
151 | va_start(ap, method); |
---|
152 | vfprintf(stderr, method, ap); |
---|
153 | va_end(ap); |
---|
154 | fprintf(stderr, " failed!\n"); |
---|
155 | errnum = EINTERNAL; |
---|
156 | |
---|
157 | done: |
---|
158 | |
---|
159 | (*env)->ExceptionClear(env); |
---|
160 | |
---|
161 | if (excClass != NULL) |
---|
162 | free(excClass); |
---|
163 | |
---|
164 | return errnum; |
---|
165 | } |
---|
166 | |
---|
167 | |
---|
168 | |
---|
169 | |
---|
170 | hdfsFS hdfsConnect(const char* host, tPort port) { |
---|
171 | // conect with NULL as user name/groups |
---|
172 | return hdfsConnectAsUser(host, port, NULL, NULL, 0); |
---|
173 | } |
---|
174 | |
---|
175 | |
---|
176 | hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user , const char **groups, int groups_size ) |
---|
177 | { |
---|
178 | // JAVA EQUIVALENT: |
---|
179 | // FileSystem fs = FileSystem.get(new Configuration()); |
---|
180 | // return fs; |
---|
181 | |
---|
182 | JNIEnv *env = 0; |
---|
183 | jobject jConfiguration = NULL; |
---|
184 | jobject jFS = NULL; |
---|
185 | jobject jURI = NULL; |
---|
186 | jstring jURIString = NULL; |
---|
187 | jvalue jVal; |
---|
188 | jthrowable jExc = NULL; |
---|
189 | char *cURI = 0; |
---|
190 | jobject gFsRef = NULL; |
---|
191 | |
---|
192 | |
---|
193 | //Get the JNIEnv* corresponding to current thread |
---|
194 | env = getJNIEnv(); |
---|
195 | if (env == NULL) { |
---|
196 | errno = EINTERNAL; |
---|
197 | return NULL; |
---|
198 | } |
---|
199 | |
---|
200 | //Create the org.apache.hadoop.conf.Configuration object |
---|
201 | jConfiguration = |
---|
202 | constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V"); |
---|
203 | |
---|
204 | if (jConfiguration == NULL) { |
---|
205 | fprintf(stderr, "Can't construct instance of class " |
---|
206 | "org.apache.hadoop.conf.Configuration\n"); |
---|
207 | errno = EINTERNAL; |
---|
208 | return NULL; |
---|
209 | } |
---|
210 | |
---|
211 | if (user != NULL) { |
---|
212 | |
---|
213 | if (groups == NULL || groups_size <= 0) { |
---|
214 | fprintf(stderr, "ERROR: groups must not be empty/null\n"); |
---|
215 | errno = EINVAL; |
---|
216 | return NULL; |
---|
217 | } |
---|
218 | |
---|
219 | jstring jUserString = (*env)->NewStringUTF(env, user); |
---|
220 | jarray jGroups = constructNewArrayString(env, &jExc, groups, groups_size); |
---|
221 | if (jGroups == NULL) { |
---|
222 | errno = EINTERNAL; |
---|
223 | fprintf(stderr, "ERROR: could not construct groups array\n"); |
---|
224 | return NULL; |
---|
225 | } |
---|
226 | |
---|
227 | jobject jUgi; |
---|
228 | if ((jUgi = constructNewObjectOfClass(env, &jExc, HADOOP_UNIX_USER_GROUP_INFO, JMETHOD2(JPARAM(JAVA_STRING), JARRPARAM(JAVA_STRING), JAVA_VOID), jUserString, jGroups)) == NULL) { |
---|
229 | fprintf(stderr,"failed to construct hadoop user unix group info object\n"); |
---|
230 | errno = errnoFromException(jExc, env, HADOOP_UNIX_USER_GROUP_INFO, |
---|
231 | "init"); |
---|
232 | destroyLocalReference(env, jConfiguration); |
---|
233 | destroyLocalReference(env, jUserString); |
---|
234 | if (jGroups != NULL) { |
---|
235 | destroyLocalReference(env, jGroups); |
---|
236 | } |
---|
237 | return NULL; |
---|
238 | } |
---|
239 | #define USE_UUGI |
---|
240 | #ifdef USE_UUGI |
---|
241 | |
---|
242 | // UnixUserGroupInformation.UGI_PROPERTY_NAME |
---|
243 | jstring jAttrString = (*env)->NewStringUTF(env,"hadoop.job.ugi"); |
---|
244 | |
---|
245 | if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_UNIX_USER_GROUP_INFO, "saveToConf", |
---|
246 | JMETHOD3(JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING), JPARAM(HADOOP_UNIX_USER_GROUP_INFO), JAVA_VOID), |
---|
247 | jConfiguration, jAttrString, jUgi) != 0) { |
---|
248 | errno = errnoFromException(jExc, env, HADOOP_FSPERM, |
---|
249 | "init"); |
---|
250 | destroyLocalReference(env, jConfiguration); |
---|
251 | destroyLocalReference(env, jUserString); |
---|
252 | if (jGroups != NULL) { |
---|
253 | destroyLocalReference(env, jGroups); |
---|
254 | } |
---|
255 | destroyLocalReference(env, jUgi); |
---|
256 | return NULL; |
---|
257 | } |
---|
258 | |
---|
259 | destroyLocalReference(env, jUserString); |
---|
260 | destroyLocalReference(env, jGroups); |
---|
261 | destroyLocalReference(env, jUgi); |
---|
262 | } |
---|
263 | #else |
---|
264 | |
---|
265 | // what does "current" mean in the context of libhdfs ? does it mean for the last hdfs connection we used? |
---|
266 | // that's why this code cannot be activated. We know the above use of the conf object should work well with |
---|
267 | // multiple connections. |
---|
268 | if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_USER_GROUP_INFO, "setCurrentUGI", |
---|
269 | JMETHOD1(JPARAM(HADOOP_USER_GROUP_INFO), JAVA_VOID), |
---|
270 | jUgi) != 0) { |
---|
271 | errno = errnoFromException(jExc, env, HADOOP_USER_GROUP_INFO, |
---|
272 | "setCurrentUGI"); |
---|
273 | destroyLocalReference(env, jConfiguration); |
---|
274 | destroyLocalReference(env, jUserString); |
---|
275 | if (jGroups != NULL) { |
---|
276 | destroyLocalReference(env, jGroups); |
---|
277 | } |
---|
278 | destroyLocalReference(env, jUgi); |
---|
279 | return NULL; |
---|
280 | } |
---|
281 | |
---|
282 | destroyLocalReference(env, jUserString); |
---|
283 | destroyLocalReference(env, jGroups); |
---|
284 | destroyLocalReference(env, jUgi); |
---|
285 | } |
---|
286 | #endif |
---|
287 | //Check what type of FileSystem the caller wants... |
---|
288 | if (host == NULL) { |
---|
289 | // fs = FileSytem::getLocal(conf); |
---|
290 | if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "getLocal", |
---|
291 | JMETHOD1(JPARAM(HADOOP_CONF), |
---|
292 | JPARAM(HADOOP_LOCALFS)), |
---|
293 | jConfiguration) != 0) { |
---|
294 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
295 | "FileSystem::getLocal"); |
---|
296 | goto done; |
---|
297 | } |
---|
298 | jFS = jVal.l; |
---|
299 | } |
---|
300 | else if (!strcmp(host, "default") && port == 0) { |
---|
301 | //fs = FileSystem::get(conf); |
---|
302 | if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, |
---|
303 | HADOOP_FS, "get", |
---|
304 | JMETHOD1(JPARAM(HADOOP_CONF), |
---|
305 | JPARAM(HADOOP_FS)), |
---|
306 | jConfiguration) != 0) { |
---|
307 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
308 | "FileSystem::get"); |
---|
309 | goto done; |
---|
310 | } |
---|
311 | jFS = jVal.l; |
---|
312 | } |
---|
313 | else { |
---|
314 | // fs = FileSystem::get(URI, conf); |
---|
315 | cURI = malloc(strlen(host)+16); |
---|
316 | sprintf(cURI, "hdfs://%s:%d", host, (int)(port)); |
---|
317 | |
---|
318 | jURIString = (*env)->NewStringUTF(env, cURI); |
---|
319 | if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, JAVA_NET_URI, |
---|
320 | "create", "(Ljava/lang/String;)Ljava/net/URI;", |
---|
321 | jURIString) != 0) { |
---|
322 | errno = errnoFromException(jExc, env, "java.net.URI::create"); |
---|
323 | goto done; |
---|
324 | } |
---|
325 | jURI = jVal.l; |
---|
326 | |
---|
327 | if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "get", |
---|
328 | JMETHOD2(JPARAM(JAVA_NET_URI), |
---|
329 | JPARAM(HADOOP_CONF), JPARAM(HADOOP_FS)), |
---|
330 | jURI, jConfiguration) != 0) { |
---|
331 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
332 | "Filesystem::get(URI, Configuration)"); |
---|
333 | goto done; |
---|
334 | } |
---|
335 | |
---|
336 | jFS = jVal.l; |
---|
337 | } |
---|
338 | |
---|
339 | done: |
---|
340 | |
---|
341 | // Release unnecessary local references |
---|
342 | destroyLocalReference(env, jConfiguration); |
---|
343 | destroyLocalReference(env, jURIString); |
---|
344 | destroyLocalReference(env, jURI); |
---|
345 | |
---|
346 | if (cURI) free(cURI); |
---|
347 | |
---|
348 | /* Create a global reference for this fs */ |
---|
349 | if (jFS) { |
---|
350 | gFsRef = (*env)->NewGlobalRef(env, jFS); |
---|
351 | destroyLocalReference(env, jFS); |
---|
352 | } |
---|
353 | |
---|
354 | return gFsRef; |
---|
355 | } |
---|
356 | |
---|
357 | |
---|
358 | |
---|
359 | int hdfsDisconnect(hdfsFS fs) |
---|
360 | { |
---|
361 | // JAVA EQUIVALENT: |
---|
362 | // fs.close() |
---|
363 | |
---|
364 | //Get the JNIEnv* corresponding to current thread |
---|
365 | JNIEnv* env = getJNIEnv(); |
---|
366 | |
---|
367 | if (env == NULL) { |
---|
368 | errno = EINTERNAL; |
---|
369 | return -2; |
---|
370 | } |
---|
371 | |
---|
372 | //Parameters |
---|
373 | jobject jFS = (jobject)fs; |
---|
374 | |
---|
375 | //Caught exception |
---|
376 | jthrowable jExc = NULL; |
---|
377 | |
---|
378 | //Sanity check |
---|
379 | if (fs == NULL) { |
---|
380 | errno = EBADF; |
---|
381 | return -1; |
---|
382 | } |
---|
383 | |
---|
384 | if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
385 | "close", "()V") != 0) { |
---|
386 | errno = errnoFromException(jExc, env, "Filesystem::close"); |
---|
387 | return -1; |
---|
388 | } |
---|
389 | |
---|
390 | //Release unnecessary references |
---|
391 | (*env)->DeleteGlobalRef(env, fs); |
---|
392 | |
---|
393 | return 0; |
---|
394 | } |
---|
395 | |
---|
396 | |
---|
397 | |
---|
398 | hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags, |
---|
399 | int bufferSize, short replication, tSize blockSize) |
---|
400 | { |
---|
401 | /* |
---|
402 | JAVA EQUIVALENT: |
---|
403 | File f = new File(path); |
---|
404 | FSData{Input|Output}Stream f{is|os} = fs.create(f); |
---|
405 | return f{is|os}; |
---|
406 | */ |
---|
407 | /* Get the JNIEnv* corresponding to current thread */ |
---|
408 | JNIEnv* env = getJNIEnv(); |
---|
409 | |
---|
410 | if (env == NULL) { |
---|
411 | errno = EINTERNAL; |
---|
412 | return NULL; |
---|
413 | } |
---|
414 | |
---|
415 | jobject jFS = (jobject)fs; |
---|
416 | |
---|
417 | if (flags & O_RDWR) { |
---|
418 | fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n"); |
---|
419 | errno = ENOTSUP; |
---|
420 | return NULL; |
---|
421 | } |
---|
422 | |
---|
423 | if ((flags & O_CREAT) && (flags & O_EXCL)) { |
---|
424 | fprintf(stderr, "WARN: hdfs does not truly support O_CREATE && O_EXCL\n"); |
---|
425 | } |
---|
426 | |
---|
427 | /* The hadoop java api/signature */ |
---|
428 | const char* method = ((flags & O_WRONLY) == 0) ? "open" : (flags & O_APPEND) ? "append" : "create"; |
---|
429 | const char* signature = ((flags & O_WRONLY) == 0) ? |
---|
430 | JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM)) : |
---|
431 | (flags & O_APPEND) ? |
---|
432 | JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_OSTRM)) : |
---|
433 | JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM)); |
---|
434 | |
---|
435 | /* Return value */ |
---|
436 | hdfsFile file = NULL; |
---|
437 | |
---|
438 | /* Create an object of org.apache.hadoop.fs.Path */ |
---|
439 | jobject jPath = constructNewObjectOfPath(env, path); |
---|
440 | if (jPath == NULL) { |
---|
441 | return NULL; |
---|
442 | } |
---|
443 | |
---|
444 | /* Get the Configuration object from the FileSystem object */ |
---|
445 | jvalue jVal; |
---|
446 | jobject jConfiguration = NULL; |
---|
447 | jthrowable jExc = NULL; |
---|
448 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
449 | "getConf", JMETHOD1("", JPARAM(HADOOP_CONF))) != 0) { |
---|
450 | errno = errnoFromException(jExc, env, "get configuration object " |
---|
451 | "from filesystem"); |
---|
452 | destroyLocalReference(env, jPath); |
---|
453 | return NULL; |
---|
454 | } |
---|
455 | jConfiguration = jVal.l; |
---|
456 | |
---|
457 | jint jBufferSize = bufferSize; |
---|
458 | jshort jReplication = replication; |
---|
459 | jlong jBlockSize = blockSize; |
---|
460 | jstring jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size"); |
---|
461 | jstring jStrReplication = (*env)->NewStringUTF(env, "dfs.replication"); |
---|
462 | jstring jStrBlockSize = (*env)->NewStringUTF(env, "dfs.block.size"); |
---|
463 | |
---|
464 | |
---|
465 | //bufferSize |
---|
466 | if (!bufferSize) { |
---|
467 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration, |
---|
468 | HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I", |
---|
469 | jStrBufferSize, 4096) != 0) { |
---|
470 | errno = errnoFromException(jExc, env, "org.apache.hadoop.conf." |
---|
471 | "Configuration::getInt"); |
---|
472 | goto done; |
---|
473 | } |
---|
474 | jBufferSize = jVal.i; |
---|
475 | } |
---|
476 | |
---|
477 | if ((flags & O_WRONLY) && (flags & O_APPEND) == 0) { |
---|
478 | //replication |
---|
479 | |
---|
480 | if (!replication) { |
---|
481 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration, |
---|
482 | HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I", |
---|
483 | jStrReplication, 1) != 0) { |
---|
484 | errno = errnoFromException(jExc, env, "org.apache.hadoop.conf." |
---|
485 | "Configuration::getInt"); |
---|
486 | goto done; |
---|
487 | } |
---|
488 | jReplication = jVal.i; |
---|
489 | } |
---|
490 | |
---|
491 | //blockSize |
---|
492 | if (!blockSize) { |
---|
493 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration, |
---|
494 | HADOOP_CONF, "getLong", "(Ljava/lang/String;J)J", |
---|
495 | jStrBlockSize, 67108864)) { |
---|
496 | errno = errnoFromException(jExc, env, "org.apache.hadoop.conf." |
---|
497 | "FileSystem::%s(%s)", method, |
---|
498 | signature); |
---|
499 | goto done; |
---|
500 | } |
---|
501 | jBlockSize = jVal.j; |
---|
502 | } |
---|
503 | } |
---|
504 | |
---|
505 | /* Create and return either the FSDataInputStream or |
---|
506 | FSDataOutputStream references jobject jStream */ |
---|
507 | |
---|
508 | // READ? |
---|
509 | if ((flags & O_WRONLY) == 0) { |
---|
510 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
511 | method, signature, jPath, jBufferSize)) { |
---|
512 | errno = errnoFromException(jExc, env, "org.apache.hadoop.conf." |
---|
513 | "FileSystem::%s(%s)", method, |
---|
514 | signature); |
---|
515 | goto done; |
---|
516 | } |
---|
517 | } else if ((flags & O_WRONLY) && (flags & O_APPEND)) { |
---|
518 | // WRITE/APPEND? |
---|
519 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
520 | method, signature, jPath)) { |
---|
521 | errno = errnoFromException(jExc, env, "org.apache.hadoop.conf." |
---|
522 | "FileSystem::%s(%s)", method, |
---|
523 | signature); |
---|
524 | goto done; |
---|
525 | } |
---|
526 | } else { |
---|
527 | // WRITE/CREATE |
---|
528 | jboolean jOverWrite = 1; |
---|
529 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
530 | method, signature, jPath, jOverWrite, |
---|
531 | jBufferSize, jReplication, jBlockSize)) { |
---|
532 | errno = errnoFromException(jExc, env, "org.apache.hadoop.conf." |
---|
533 | "FileSystem::%s(%s)", method, |
---|
534 | signature); |
---|
535 | goto done; |
---|
536 | } |
---|
537 | } |
---|
538 | |
---|
539 | file = malloc(sizeof(struct hdfsFile_internal)); |
---|
540 | if (!file) { |
---|
541 | errno = ENOMEM; |
---|
542 | return NULL; |
---|
543 | } |
---|
544 | file->file = (*env)->NewGlobalRef(env, jVal.l); |
---|
545 | file->type = (((flags & O_WRONLY) == 0) ? INPUT : OUTPUT); |
---|
546 | |
---|
547 | destroyLocalReference(env, jVal.l); |
---|
548 | |
---|
549 | done: |
---|
550 | |
---|
551 | //Delete unnecessary local references |
---|
552 | destroyLocalReference(env, jStrBufferSize); |
---|
553 | destroyLocalReference(env, jStrReplication); |
---|
554 | destroyLocalReference(env, jStrBlockSize); |
---|
555 | destroyLocalReference(env, jConfiguration); |
---|
556 | destroyLocalReference(env, jPath); |
---|
557 | |
---|
558 | return file; |
---|
559 | } |
---|
560 | |
---|
561 | |
---|
562 | |
---|
563 | int hdfsCloseFile(hdfsFS fs, hdfsFile file) |
---|
564 | { |
---|
565 | // JAVA EQUIVALENT: |
---|
566 | // file.close |
---|
567 | |
---|
568 | //Get the JNIEnv* corresponding to current thread |
---|
569 | JNIEnv* env = getJNIEnv(); |
---|
570 | |
---|
571 | if (env == NULL) { |
---|
572 | errno = EINTERNAL; |
---|
573 | return -2; |
---|
574 | } |
---|
575 | |
---|
576 | //Parameters |
---|
577 | jobject jStream = (jobject)(file ? file->file : NULL); |
---|
578 | |
---|
579 | //Caught exception |
---|
580 | jthrowable jExc = NULL; |
---|
581 | |
---|
582 | //Sanity check |
---|
583 | if (!file || file->type == UNINITIALIZED) { |
---|
584 | errno = EBADF; |
---|
585 | return -1; |
---|
586 | } |
---|
587 | |
---|
588 | //The interface whose 'close' method to be called |
---|
589 | const char* interface = (file->type == INPUT) ? |
---|
590 | HADOOP_ISTRM : HADOOP_OSTRM; |
---|
591 | |
---|
592 | if (invokeMethod(env, NULL, &jExc, INSTANCE, jStream, interface, |
---|
593 | "close", "()V") != 0) { |
---|
594 | errno = errnoFromException(jExc, env, "%s::close", interface); |
---|
595 | return -1; |
---|
596 | } |
---|
597 | |
---|
598 | //De-allocate memory |
---|
599 | free(file); |
---|
600 | (*env)->DeleteGlobalRef(env, jStream); |
---|
601 | |
---|
602 | return 0; |
---|
603 | } |
---|
604 | |
---|
605 | |
---|
606 | |
---|
607 | int hdfsExists(hdfsFS fs, const char *path) |
---|
608 | { |
---|
609 | JNIEnv *env = getJNIEnv(); |
---|
610 | if (env == NULL) { |
---|
611 | errno = EINTERNAL; |
---|
612 | return -2; |
---|
613 | } |
---|
614 | |
---|
615 | jobject jPath = constructNewObjectOfPath(env, path); |
---|
616 | jvalue jVal; |
---|
617 | jthrowable jExc = NULL; |
---|
618 | jobject jFS = (jobject)fs; |
---|
619 | |
---|
620 | if (jPath == NULL) { |
---|
621 | return -1; |
---|
622 | } |
---|
623 | |
---|
624 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
625 | "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"), |
---|
626 | jPath) != 0) { |
---|
627 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
628 | "FileSystem::exists"); |
---|
629 | return -1; |
---|
630 | } |
---|
631 | |
---|
632 | return jVal.z ? 0 : -1; |
---|
633 | } |
---|
634 | |
---|
635 | |
---|
636 | |
---|
637 | tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length) |
---|
638 | { |
---|
639 | // JAVA EQUIVALENT: |
---|
640 | // byte [] bR = new byte[length]; |
---|
641 | // fis.read(bR); |
---|
642 | |
---|
643 | //Get the JNIEnv* corresponding to current thread |
---|
644 | JNIEnv* env = getJNIEnv(); |
---|
645 | if (env == NULL) { |
---|
646 | errno = EINTERNAL; |
---|
647 | return -1; |
---|
648 | } |
---|
649 | |
---|
650 | //Parameters |
---|
651 | jobject jInputStream = (jobject)(f ? f->file : NULL); |
---|
652 | |
---|
653 | jbyteArray jbRarray; |
---|
654 | jint noReadBytes = 0; |
---|
655 | jvalue jVal; |
---|
656 | jthrowable jExc = NULL; |
---|
657 | |
---|
658 | //Sanity check |
---|
659 | if (!f || f->type == UNINITIALIZED) { |
---|
660 | errno = EBADF; |
---|
661 | return -1; |
---|
662 | } |
---|
663 | |
---|
664 | //Error checking... make sure that this file is 'readable' |
---|
665 | if (f->type != INPUT) { |
---|
666 | fprintf(stderr, "Cannot read from a non-InputStream object!\n"); |
---|
667 | errno = EINVAL; |
---|
668 | return -1; |
---|
669 | } |
---|
670 | |
---|
671 | //Read the requisite bytes |
---|
672 | jbRarray = (*env)->NewByteArray(env, length); |
---|
673 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM, |
---|
674 | "read", "([B)I", jbRarray) != 0) { |
---|
675 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
676 | "FSDataInputStream::read"); |
---|
677 | noReadBytes = -1; |
---|
678 | } |
---|
679 | else { |
---|
680 | noReadBytes = jVal.i; |
---|
681 | if (noReadBytes > 0) { |
---|
682 | (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer); |
---|
683 | } else { |
---|
684 | //This is a valid case: there aren't any bytes left to read! |
---|
685 | if (noReadBytes == 0 || noReadBytes < -1) { |
---|
686 | fprintf(stderr, "WARN: FSDataInputStream.read returned invalid return code - libhdfs returning EOF, i.e., 0: %d\n", noReadBytes); |
---|
687 | } |
---|
688 | noReadBytes = 0; |
---|
689 | } |
---|
690 | errno = 0; |
---|
691 | } |
---|
692 | |
---|
693 | destroyLocalReference(env, jbRarray); |
---|
694 | |
---|
695 | return noReadBytes; |
---|
696 | } |
---|
697 | |
---|
698 | |
---|
699 | |
---|
700 | tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position, |
---|
701 | void* buffer, tSize length) |
---|
702 | { |
---|
703 | // JAVA EQUIVALENT: |
---|
704 | // byte [] bR = new byte[length]; |
---|
705 | // fis.read(pos, bR, 0, length); |
---|
706 | |
---|
707 | //Get the JNIEnv* corresponding to current thread |
---|
708 | JNIEnv* env = getJNIEnv(); |
---|
709 | if (env == NULL) { |
---|
710 | errno = EINTERNAL; |
---|
711 | return -1; |
---|
712 | } |
---|
713 | |
---|
714 | //Parameters |
---|
715 | jobject jInputStream = (jobject)(f ? f->file : NULL); |
---|
716 | |
---|
717 | jbyteArray jbRarray; |
---|
718 | jint noReadBytes = 0; |
---|
719 | jvalue jVal; |
---|
720 | jthrowable jExc = NULL; |
---|
721 | |
---|
722 | //Sanity check |
---|
723 | if (!f || f->type == UNINITIALIZED) { |
---|
724 | errno = EBADF; |
---|
725 | return -1; |
---|
726 | } |
---|
727 | |
---|
728 | //Error checking... make sure that this file is 'readable' |
---|
729 | if (f->type != INPUT) { |
---|
730 | fprintf(stderr, "Cannot read from a non-InputStream object!\n"); |
---|
731 | errno = EINVAL; |
---|
732 | return -1; |
---|
733 | } |
---|
734 | |
---|
735 | //Read the requisite bytes |
---|
736 | jbRarray = (*env)->NewByteArray(env, length); |
---|
737 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM, |
---|
738 | "read", "(J[BII)I", position, jbRarray, 0, length) != 0) { |
---|
739 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
740 | "FSDataInputStream::read"); |
---|
741 | noReadBytes = -1; |
---|
742 | } |
---|
743 | else { |
---|
744 | noReadBytes = jVal.i; |
---|
745 | if (noReadBytes > 0) { |
---|
746 | (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer); |
---|
747 | } else { |
---|
748 | //This is a valid case: there aren't any bytes left to read! |
---|
749 | if (noReadBytes == 0 || noReadBytes < -1) { |
---|
750 | fprintf(stderr, "WARN: FSDataInputStream.read returned invalid return code - libhdfs returning EOF, i.e., 0: %d\n", noReadBytes); |
---|
751 | } |
---|
752 | noReadBytes = 0; |
---|
753 | } |
---|
754 | errno = 0; |
---|
755 | } |
---|
756 | destroyLocalReference(env, jbRarray); |
---|
757 | |
---|
758 | return noReadBytes; |
---|
759 | } |
---|
760 | |
---|
761 | |
---|
762 | |
---|
763 | tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length) |
---|
764 | { |
---|
765 | // JAVA EQUIVALENT |
---|
766 | // byte b[] = str.getBytes(); |
---|
767 | // fso.write(b); |
---|
768 | |
---|
769 | //Get the JNIEnv* corresponding to current thread |
---|
770 | JNIEnv* env = getJNIEnv(); |
---|
771 | if (env == NULL) { |
---|
772 | errno = EINTERNAL; |
---|
773 | return -1; |
---|
774 | } |
---|
775 | |
---|
776 | //Parameters |
---|
777 | jobject jOutputStream = (jobject)(f ? f->file : 0); |
---|
778 | jbyteArray jbWarray; |
---|
779 | |
---|
780 | //Caught exception |
---|
781 | jthrowable jExc = NULL; |
---|
782 | |
---|
783 | //Sanity check |
---|
784 | if (!f || f->type == UNINITIALIZED) { |
---|
785 | errno = EBADF; |
---|
786 | return -1; |
---|
787 | } |
---|
788 | |
---|
789 | if (length < 0) { |
---|
790 | errno = EINVAL; |
---|
791 | return -1; |
---|
792 | } |
---|
793 | |
---|
794 | //Error checking... make sure that this file is 'writable' |
---|
795 | if (f->type != OUTPUT) { |
---|
796 | fprintf(stderr, "Cannot write into a non-OutputStream object!\n"); |
---|
797 | errno = EINVAL; |
---|
798 | return -1; |
---|
799 | } |
---|
800 | |
---|
801 | // 'length' equals 'zero' is a valid use-case according to Posix! |
---|
802 | if (length != 0) { |
---|
803 | //Write the requisite bytes into the file |
---|
804 | jbWarray = (*env)->NewByteArray(env, length); |
---|
805 | (*env)->SetByteArrayRegion(env, jbWarray, 0, length, buffer); |
---|
806 | if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream, |
---|
807 | HADOOP_OSTRM, "write", |
---|
808 | "([B)V", jbWarray) != 0) { |
---|
809 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
810 | "FSDataOutputStream::write"); |
---|
811 | length = -1; |
---|
812 | } |
---|
813 | destroyLocalReference(env, jbWarray); |
---|
814 | } |
---|
815 | |
---|
816 | //Return no. of bytes succesfully written (libc way) |
---|
817 | //i.e. 'length' itself! ;-) |
---|
818 | return length; |
---|
819 | } |
---|
820 | |
---|
821 | |
---|
822 | |
---|
823 | int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos) |
---|
824 | { |
---|
825 | // JAVA EQUIVALENT |
---|
826 | // fis.seek(pos); |
---|
827 | |
---|
828 | //Get the JNIEnv* corresponding to current thread |
---|
829 | JNIEnv* env = getJNIEnv(); |
---|
830 | if (env == NULL) { |
---|
831 | errno = EINTERNAL; |
---|
832 | return -1; |
---|
833 | } |
---|
834 | |
---|
835 | //Parameters |
---|
836 | jobject jInputStream = (jobject)(f ? f->file : 0); |
---|
837 | |
---|
838 | //Caught exception |
---|
839 | jthrowable jExc = NULL; |
---|
840 | |
---|
841 | //Sanity check |
---|
842 | if (!f || f->type != INPUT) { |
---|
843 | errno = EBADF; |
---|
844 | return -1; |
---|
845 | } |
---|
846 | |
---|
847 | if (invokeMethod(env, NULL, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM, |
---|
848 | "seek", "(J)V", desiredPos) != 0) { |
---|
849 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
850 | "FSDataInputStream::seek"); |
---|
851 | return -1; |
---|
852 | } |
---|
853 | |
---|
854 | return 0; |
---|
855 | } |
---|
856 | |
---|
857 | |
---|
858 | |
---|
859 | tOffset hdfsTell(hdfsFS fs, hdfsFile f) |
---|
860 | { |
---|
861 | // JAVA EQUIVALENT |
---|
862 | // pos = f.getPos(); |
---|
863 | |
---|
864 | //Get the JNIEnv* corresponding to current thread |
---|
865 | JNIEnv* env = getJNIEnv(); |
---|
866 | if (env == NULL) { |
---|
867 | errno = EINTERNAL; |
---|
868 | return -1; |
---|
869 | } |
---|
870 | |
---|
871 | //Parameters |
---|
872 | jobject jStream = (jobject)(f ? f->file : 0); |
---|
873 | |
---|
874 | //Sanity check |
---|
875 | if (!f || f->type == UNINITIALIZED) { |
---|
876 | errno = EBADF; |
---|
877 | return -1; |
---|
878 | } |
---|
879 | |
---|
880 | const char* interface = (f->type == INPUT) ? |
---|
881 | HADOOP_ISTRM : HADOOP_OSTRM; |
---|
882 | |
---|
883 | jlong currentPos = -1; |
---|
884 | jvalue jVal; |
---|
885 | jthrowable jExc = NULL; |
---|
886 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStream, |
---|
887 | interface, "getPos", "()J") != 0) { |
---|
888 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
889 | "FSDataInputStream::getPos"); |
---|
890 | return -1; |
---|
891 | } |
---|
892 | currentPos = jVal.j; |
---|
893 | |
---|
894 | return (tOffset)currentPos; |
---|
895 | } |
---|
896 | |
---|
897 | |
---|
898 | |
---|
899 | int hdfsFlush(hdfsFS fs, hdfsFile f) |
---|
900 | { |
---|
901 | // JAVA EQUIVALENT |
---|
902 | // fos.flush(); |
---|
903 | |
---|
904 | //Get the JNIEnv* corresponding to current thread |
---|
905 | JNIEnv* env = getJNIEnv(); |
---|
906 | if (env == NULL) { |
---|
907 | errno = EINTERNAL; |
---|
908 | return -1; |
---|
909 | } |
---|
910 | |
---|
911 | //Parameters |
---|
912 | jobject jOutputStream = (jobject)(f ? f->file : 0); |
---|
913 | |
---|
914 | //Caught exception |
---|
915 | jthrowable jExc = NULL; |
---|
916 | |
---|
917 | //Sanity check |
---|
918 | if (!f || f->type != OUTPUT) { |
---|
919 | errno = EBADF; |
---|
920 | return -1; |
---|
921 | } |
---|
922 | |
---|
923 | if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream, |
---|
924 | HADOOP_OSTRM, "flush", "()V") != 0) { |
---|
925 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
926 | "FSDataInputStream::flush"); |
---|
927 | return -1; |
---|
928 | } |
---|
929 | |
---|
930 | return 0; |
---|
931 | } |
---|
932 | |
---|
933 | |
---|
934 | |
---|
935 | int hdfsAvailable(hdfsFS fs, hdfsFile f) |
---|
936 | { |
---|
937 | // JAVA EQUIVALENT |
---|
938 | // fis.available(); |
---|
939 | |
---|
940 | //Get the JNIEnv* corresponding to current thread |
---|
941 | JNIEnv* env = getJNIEnv(); |
---|
942 | if (env == NULL) { |
---|
943 | errno = EINTERNAL; |
---|
944 | return -1; |
---|
945 | } |
---|
946 | |
---|
947 | //Parameters |
---|
948 | jobject jInputStream = (jobject)(f ? f->file : 0); |
---|
949 | |
---|
950 | //Caught exception |
---|
951 | jthrowable jExc = NULL; |
---|
952 | |
---|
953 | //Sanity check |
---|
954 | if (!f || f->type != INPUT) { |
---|
955 | errno = EBADF; |
---|
956 | return -1; |
---|
957 | } |
---|
958 | |
---|
959 | jint available = -1; |
---|
960 | jvalue jVal; |
---|
961 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, |
---|
962 | HADOOP_ISTRM, "available", "()I") != 0) { |
---|
963 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
964 | "FSDataInputStream::available"); |
---|
965 | return -1; |
---|
966 | } |
---|
967 | available = jVal.i; |
---|
968 | |
---|
969 | return available; |
---|
970 | } |
---|
971 | |
---|
972 | |
---|
973 | |
---|
974 | int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst) |
---|
975 | { |
---|
976 | //JAVA EQUIVALENT |
---|
977 | // FileUtil::copy(srcFS, srcPath, dstFS, dstPath, |
---|
978 | // deleteSource = false, conf) |
---|
979 | |
---|
980 | //Get the JNIEnv* corresponding to current thread |
---|
981 | JNIEnv* env = getJNIEnv(); |
---|
982 | if (env == NULL) { |
---|
983 | errno = EINTERNAL; |
---|
984 | return -1; |
---|
985 | } |
---|
986 | |
---|
987 | //Parameters |
---|
988 | jobject jSrcFS = (jobject)srcFS; |
---|
989 | jobject jDstFS = (jobject)dstFS; |
---|
990 | jobject jSrcPath = NULL; |
---|
991 | jobject jDstPath = NULL; |
---|
992 | |
---|
993 | jSrcPath = constructNewObjectOfPath(env, src); |
---|
994 | if (jSrcPath == NULL) { |
---|
995 | return -1; |
---|
996 | } |
---|
997 | |
---|
998 | jDstPath = constructNewObjectOfPath(env, dst); |
---|
999 | if (jDstPath == NULL) { |
---|
1000 | destroyLocalReference(env, jSrcPath); |
---|
1001 | return -1; |
---|
1002 | } |
---|
1003 | |
---|
1004 | int retval = 0; |
---|
1005 | |
---|
1006 | //Create the org.apache.hadoop.conf.Configuration object |
---|
1007 | jobject jConfiguration = |
---|
1008 | constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V"); |
---|
1009 | if (jConfiguration == NULL) { |
---|
1010 | fprintf(stderr, "Can't construct instance of class " |
---|
1011 | "org.apache.hadoop.conf.Configuration\n"); |
---|
1012 | errno = EINTERNAL; |
---|
1013 | destroyLocalReference(env, jSrcPath); |
---|
1014 | destroyLocalReference(env, jDstPath); |
---|
1015 | return -1; |
---|
1016 | } |
---|
1017 | |
---|
1018 | //FileUtil::copy |
---|
1019 | jboolean deleteSource = 0; //Only copy |
---|
1020 | jvalue jVal; |
---|
1021 | jthrowable jExc = NULL; |
---|
1022 | if (invokeMethod(env, &jVal, &jExc, STATIC, |
---|
1023 | NULL, "org/apache/hadoop/fs/FileUtil", "copy", |
---|
1024 | "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z", |
---|
1025 | jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource, |
---|
1026 | jConfiguration) != 0) { |
---|
1027 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1028 | "FileUtil::copy"); |
---|
1029 | retval = -1; |
---|
1030 | goto done; |
---|
1031 | } |
---|
1032 | |
---|
1033 | done: |
---|
1034 | |
---|
1035 | //Delete unnecessary local references |
---|
1036 | destroyLocalReference(env, jConfiguration); |
---|
1037 | destroyLocalReference(env, jSrcPath); |
---|
1038 | destroyLocalReference(env, jDstPath); |
---|
1039 | |
---|
1040 | return retval; |
---|
1041 | } |
---|
1042 | |
---|
1043 | |
---|
1044 | |
---|
1045 | int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst) |
---|
1046 | { |
---|
1047 | //JAVA EQUIVALENT |
---|
1048 | // FileUtil::copy(srcFS, srcPath, dstFS, dstPath, |
---|
1049 | // deleteSource = true, conf) |
---|
1050 | |
---|
1051 | //Get the JNIEnv* corresponding to current thread |
---|
1052 | JNIEnv* env = getJNIEnv(); |
---|
1053 | if (env == NULL) { |
---|
1054 | errno = EINTERNAL; |
---|
1055 | return -1; |
---|
1056 | } |
---|
1057 | |
---|
1058 | |
---|
1059 | //Parameters |
---|
1060 | jobject jSrcFS = (jobject)srcFS; |
---|
1061 | jobject jDstFS = (jobject)dstFS; |
---|
1062 | |
---|
1063 | jobject jSrcPath = NULL; |
---|
1064 | jobject jDstPath = NULL; |
---|
1065 | |
---|
1066 | jSrcPath = constructNewObjectOfPath(env, src); |
---|
1067 | if (jSrcPath == NULL) { |
---|
1068 | return -1; |
---|
1069 | } |
---|
1070 | |
---|
1071 | jDstPath = constructNewObjectOfPath(env, dst); |
---|
1072 | if (jDstPath == NULL) { |
---|
1073 | destroyLocalReference(env, jSrcPath); |
---|
1074 | return -1; |
---|
1075 | } |
---|
1076 | |
---|
1077 | int retval = 0; |
---|
1078 | |
---|
1079 | //Create the org.apache.hadoop.conf.Configuration object |
---|
1080 | jobject jConfiguration = |
---|
1081 | constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V"); |
---|
1082 | if (jConfiguration == NULL) { |
---|
1083 | fprintf(stderr, "Can't construct instance of class " |
---|
1084 | "org.apache.hadoop.conf.Configuration\n"); |
---|
1085 | errno = EINTERNAL; |
---|
1086 | destroyLocalReference(env, jSrcPath); |
---|
1087 | destroyLocalReference(env, jDstPath); |
---|
1088 | return -1; |
---|
1089 | } |
---|
1090 | |
---|
1091 | //FileUtil::copy |
---|
1092 | jboolean deleteSource = 1; //Delete src after copy |
---|
1093 | jvalue jVal; |
---|
1094 | jthrowable jExc = NULL; |
---|
1095 | if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, |
---|
1096 | "org/apache/hadoop/fs/FileUtil", "copy", |
---|
1097 | "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z", |
---|
1098 | jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource, |
---|
1099 | jConfiguration) != 0) { |
---|
1100 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1101 | "FileUtil::copy(move)"); |
---|
1102 | retval = -1; |
---|
1103 | goto done; |
---|
1104 | } |
---|
1105 | |
---|
1106 | done: |
---|
1107 | |
---|
1108 | //Delete unnecessary local references |
---|
1109 | destroyLocalReference(env, jConfiguration); |
---|
1110 | destroyLocalReference(env, jSrcPath); |
---|
1111 | destroyLocalReference(env, jDstPath); |
---|
1112 | |
---|
1113 | return retval; |
---|
1114 | } |
---|
1115 | |
---|
1116 | |
---|
1117 | |
---|
1118 | int hdfsDelete(hdfsFS fs, const char* path) |
---|
1119 | { |
---|
1120 | // JAVA EQUIVALENT: |
---|
1121 | // File f = new File(path); |
---|
1122 | // bool retval = fs.delete(f); |
---|
1123 | |
---|
1124 | //Get the JNIEnv* corresponding to current thread |
---|
1125 | JNIEnv* env = getJNIEnv(); |
---|
1126 | if (env == NULL) { |
---|
1127 | errno = EINTERNAL; |
---|
1128 | return -1; |
---|
1129 | } |
---|
1130 | |
---|
1131 | jobject jFS = (jobject)fs; |
---|
1132 | |
---|
1133 | //Create an object of java.io.File |
---|
1134 | jobject jPath = constructNewObjectOfPath(env, path); |
---|
1135 | if (jPath == NULL) { |
---|
1136 | return -1; |
---|
1137 | } |
---|
1138 | |
---|
1139 | //Delete the file |
---|
1140 | jvalue jVal; |
---|
1141 | jthrowable jExc = NULL; |
---|
1142 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
1143 | "delete", "(Lorg/apache/hadoop/fs/Path;)Z", |
---|
1144 | jPath) != 0) { |
---|
1145 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1146 | "FileSystem::delete"); |
---|
1147 | return -1; |
---|
1148 | } |
---|
1149 | |
---|
1150 | //Delete unnecessary local references |
---|
1151 | destroyLocalReference(env, jPath); |
---|
1152 | |
---|
1153 | return (jVal.z) ? 0 : -1; |
---|
1154 | } |
---|
1155 | |
---|
1156 | |
---|
1157 | |
---|
1158 | int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath) |
---|
1159 | { |
---|
1160 | // JAVA EQUIVALENT: |
---|
1161 | // Path old = new Path(oldPath); |
---|
1162 | // Path new = new Path(newPath); |
---|
1163 | // fs.rename(old, new); |
---|
1164 | |
---|
1165 | //Get the JNIEnv* corresponding to current thread |
---|
1166 | JNIEnv* env = getJNIEnv(); |
---|
1167 | if (env == NULL) { |
---|
1168 | errno = EINTERNAL; |
---|
1169 | return -1; |
---|
1170 | } |
---|
1171 | |
---|
1172 | jobject jFS = (jobject)fs; |
---|
1173 | |
---|
1174 | //Create objects of org.apache.hadoop.fs.Path |
---|
1175 | jobject jOldPath = NULL; |
---|
1176 | jobject jNewPath = NULL; |
---|
1177 | |
---|
1178 | jOldPath = constructNewObjectOfPath(env, oldPath); |
---|
1179 | if (jOldPath == NULL) { |
---|
1180 | return -1; |
---|
1181 | } |
---|
1182 | |
---|
1183 | jNewPath = constructNewObjectOfPath(env, newPath); |
---|
1184 | if (jNewPath == NULL) { |
---|
1185 | destroyLocalReference(env, jOldPath); |
---|
1186 | return -1; |
---|
1187 | } |
---|
1188 | |
---|
1189 | //Rename the file |
---|
1190 | jvalue jVal; |
---|
1191 | jthrowable jExc = NULL; |
---|
1192 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, "rename", |
---|
1193 | JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_PATH), "Z"), |
---|
1194 | jOldPath, jNewPath) != 0) { |
---|
1195 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1196 | "FileSystem::rename"); |
---|
1197 | return -1; |
---|
1198 | } |
---|
1199 | |
---|
1200 | //Delete unnecessary local references |
---|
1201 | destroyLocalReference(env, jOldPath); |
---|
1202 | destroyLocalReference(env, jNewPath); |
---|
1203 | |
---|
1204 | return (jVal.z) ? 0 : -1; |
---|
1205 | } |
---|
1206 | |
---|
1207 | |
---|
1208 | |
---|
1209 | char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize) |
---|
1210 | { |
---|
1211 | // JAVA EQUIVALENT: |
---|
1212 | // Path p = fs.getWorkingDirectory(); |
---|
1213 | // return p.toString() |
---|
1214 | |
---|
1215 | //Get the JNIEnv* corresponding to current thread |
---|
1216 | JNIEnv* env = getJNIEnv(); |
---|
1217 | if (env == NULL) { |
---|
1218 | errno = EINTERNAL; |
---|
1219 | return NULL; |
---|
1220 | } |
---|
1221 | |
---|
1222 | jobject jFS = (jobject)fs; |
---|
1223 | jobject jPath = NULL; |
---|
1224 | jvalue jVal; |
---|
1225 | jthrowable jExc = NULL; |
---|
1226 | |
---|
1227 | //FileSystem::getWorkingDirectory() |
---|
1228 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, |
---|
1229 | HADOOP_FS, "getWorkingDirectory", |
---|
1230 | "()Lorg/apache/hadoop/fs/Path;") != 0 || |
---|
1231 | jVal.l == NULL) { |
---|
1232 | errno = errnoFromException(jExc, env, "FileSystem::" |
---|
1233 | "getWorkingDirectory"); |
---|
1234 | return NULL; |
---|
1235 | } |
---|
1236 | jPath = jVal.l; |
---|
1237 | |
---|
1238 | //Path::toString() |
---|
1239 | jstring jPathString; |
---|
1240 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath, |
---|
1241 | "org/apache/hadoop/fs/Path", "toString", |
---|
1242 | "()Ljava/lang/String;") != 0) { |
---|
1243 | errno = errnoFromException(jExc, env, "Path::toString"); |
---|
1244 | destroyLocalReference(env, jPath); |
---|
1245 | return NULL; |
---|
1246 | } |
---|
1247 | jPathString = jVal.l; |
---|
1248 | |
---|
1249 | const char *jPathChars = (const char*) |
---|
1250 | ((*env)->GetStringUTFChars(env, jPathString, NULL)); |
---|
1251 | |
---|
1252 | //Copy to user-provided buffer |
---|
1253 | strncpy(buffer, jPathChars, bufferSize); |
---|
1254 | |
---|
1255 | //Delete unnecessary local references |
---|
1256 | (*env)->ReleaseStringUTFChars(env, jPathString, jPathChars); |
---|
1257 | |
---|
1258 | destroyLocalReference(env, jPathString); |
---|
1259 | destroyLocalReference(env, jPath); |
---|
1260 | |
---|
1261 | return buffer; |
---|
1262 | } |
---|
1263 | |
---|
1264 | |
---|
1265 | |
---|
1266 | int hdfsSetWorkingDirectory(hdfsFS fs, const char* path) |
---|
1267 | { |
---|
1268 | // JAVA EQUIVALENT: |
---|
1269 | // fs.setWorkingDirectory(Path(path)); |
---|
1270 | |
---|
1271 | //Get the JNIEnv* corresponding to current thread |
---|
1272 | JNIEnv* env = getJNIEnv(); |
---|
1273 | if (env == NULL) { |
---|
1274 | errno = EINTERNAL; |
---|
1275 | return -1; |
---|
1276 | } |
---|
1277 | |
---|
1278 | jobject jFS = (jobject)fs; |
---|
1279 | int retval = 0; |
---|
1280 | jthrowable jExc = NULL; |
---|
1281 | |
---|
1282 | //Create an object of org.apache.hadoop.fs.Path |
---|
1283 | jobject jPath = constructNewObjectOfPath(env, path); |
---|
1284 | if (jPath == NULL) { |
---|
1285 | return -1; |
---|
1286 | } |
---|
1287 | |
---|
1288 | //FileSystem::setWorkingDirectory() |
---|
1289 | if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
1290 | "setWorkingDirectory", |
---|
1291 | "(Lorg/apache/hadoop/fs/Path;)V", jPath) != 0) { |
---|
1292 | errno = errnoFromException(jExc, env, "FileSystem::" |
---|
1293 | "setWorkingDirectory"); |
---|
1294 | retval = -1; |
---|
1295 | } |
---|
1296 | |
---|
1297 | //Delete unnecessary local references |
---|
1298 | destroyLocalReference(env, jPath); |
---|
1299 | |
---|
1300 | return retval; |
---|
1301 | } |
---|
1302 | |
---|
1303 | |
---|
1304 | |
---|
1305 | int hdfsCreateDirectory(hdfsFS fs, const char* path) |
---|
1306 | { |
---|
1307 | // JAVA EQUIVALENT: |
---|
1308 | // fs.mkdirs(new Path(path)); |
---|
1309 | |
---|
1310 | //Get the JNIEnv* corresponding to current thread |
---|
1311 | JNIEnv* env = getJNIEnv(); |
---|
1312 | if (env == NULL) { |
---|
1313 | errno = EINTERNAL; |
---|
1314 | return -1; |
---|
1315 | } |
---|
1316 | |
---|
1317 | jobject jFS = (jobject)fs; |
---|
1318 | |
---|
1319 | //Create an object of org.apache.hadoop.fs.Path |
---|
1320 | jobject jPath = constructNewObjectOfPath(env, path); |
---|
1321 | if (jPath == NULL) { |
---|
1322 | return -1; |
---|
1323 | } |
---|
1324 | |
---|
1325 | //Create the directory |
---|
1326 | jvalue jVal; |
---|
1327 | jVal.z = 0; |
---|
1328 | jthrowable jExc = NULL; |
---|
1329 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
1330 | "mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z", |
---|
1331 | jPath) != 0) { |
---|
1332 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1333 | "FileSystem::mkdirs"); |
---|
1334 | goto done; |
---|
1335 | } |
---|
1336 | |
---|
1337 | done: |
---|
1338 | |
---|
1339 | //Delete unnecessary local references |
---|
1340 | destroyLocalReference(env, jPath); |
---|
1341 | |
---|
1342 | return (jVal.z) ? 0 : -1; |
---|
1343 | } |
---|
1344 | |
---|
1345 | |
---|
1346 | int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication) |
---|
1347 | { |
---|
1348 | // JAVA EQUIVALENT: |
---|
1349 | // fs.setReplication(new Path(path), replication); |
---|
1350 | |
---|
1351 | //Get the JNIEnv* corresponding to current thread |
---|
1352 | JNIEnv* env = getJNIEnv(); |
---|
1353 | if (env == NULL) { |
---|
1354 | errno = EINTERNAL; |
---|
1355 | return -1; |
---|
1356 | } |
---|
1357 | |
---|
1358 | jobject jFS = (jobject)fs; |
---|
1359 | |
---|
1360 | //Create an object of org.apache.hadoop.fs.Path |
---|
1361 | jobject jPath = constructNewObjectOfPath(env, path); |
---|
1362 | if (jPath == NULL) { |
---|
1363 | return -1; |
---|
1364 | } |
---|
1365 | |
---|
1366 | //Create the directory |
---|
1367 | jvalue jVal; |
---|
1368 | jthrowable jExc = NULL; |
---|
1369 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
1370 | "setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z", |
---|
1371 | jPath, replication) != 0) { |
---|
1372 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1373 | "FileSystem::setReplication"); |
---|
1374 | goto done; |
---|
1375 | } |
---|
1376 | |
---|
1377 | done: |
---|
1378 | |
---|
1379 | //Delete unnecessary local references |
---|
1380 | destroyLocalReference(env, jPath); |
---|
1381 | |
---|
1382 | return (jVal.z) ? 0 : -1; |
---|
1383 | } |
---|
1384 | |
---|
1385 | int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group) |
---|
1386 | { |
---|
1387 | // JAVA EQUIVALENT: |
---|
1388 | // fs.setOwner(path, owner, group) |
---|
1389 | |
---|
1390 | //Get the JNIEnv* corresponding to current thread |
---|
1391 | JNIEnv* env = getJNIEnv(); |
---|
1392 | if (env == NULL) { |
---|
1393 | errno = EINTERNAL; |
---|
1394 | return -1; |
---|
1395 | } |
---|
1396 | |
---|
1397 | if (owner == NULL && group == NULL) { |
---|
1398 | fprintf(stderr, "Both owner and group cannot be null in chown"); |
---|
1399 | errno = EINVAL; |
---|
1400 | return -1; |
---|
1401 | } |
---|
1402 | |
---|
1403 | jobject jFS = (jobject)fs; |
---|
1404 | |
---|
1405 | jobject jPath = constructNewObjectOfPath(env, path); |
---|
1406 | if (jPath == NULL) { |
---|
1407 | return -1; |
---|
1408 | } |
---|
1409 | |
---|
1410 | jstring jOwnerString = (*env)->NewStringUTF(env, owner); |
---|
1411 | jstring jGroupString = (*env)->NewStringUTF(env, group); |
---|
1412 | |
---|
1413 | //Create the directory |
---|
1414 | int ret = 0; |
---|
1415 | jthrowable jExc = NULL; |
---|
1416 | if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
1417 | "setOwner", JMETHOD3(JPARAM(HADOOP_PATH), JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID), |
---|
1418 | jPath, jOwnerString, jGroupString) != 0) { |
---|
1419 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1420 | "FileSystem::setOwner"); |
---|
1421 | ret = -1; |
---|
1422 | goto done; |
---|
1423 | } |
---|
1424 | |
---|
1425 | done: |
---|
1426 | destroyLocalReference(env, jPath); |
---|
1427 | destroyLocalReference(env, jOwnerString); |
---|
1428 | destroyLocalReference(env, jGroupString); |
---|
1429 | |
---|
1430 | return ret; |
---|
1431 | } |
---|
1432 | |
---|
1433 | int hdfsChmod(hdfsFS fs, const char* path, short mode) |
---|
1434 | { |
---|
1435 | // JAVA EQUIVALENT: |
---|
1436 | // fs.setPermission(path, FsPermission) |
---|
1437 | |
---|
1438 | //Get the JNIEnv* corresponding to current thread |
---|
1439 | JNIEnv* env = getJNIEnv(); |
---|
1440 | if (env == NULL) { |
---|
1441 | errno = EINTERNAL; |
---|
1442 | return -1; |
---|
1443 | } |
---|
1444 | |
---|
1445 | jobject jFS = (jobject)fs; |
---|
1446 | |
---|
1447 | // construct jPerm = FsPermission.createImmutable(short mode); |
---|
1448 | |
---|
1449 | jshort jmode = mode; |
---|
1450 | |
---|
1451 | jobject jPermObj = |
---|
1452 | constructNewObjectOfClass(env, NULL, HADOOP_FSPERM,"(S)V",jmode); |
---|
1453 | if (jPermObj == NULL) { |
---|
1454 | return -2; |
---|
1455 | } |
---|
1456 | |
---|
1457 | //Create an object of org.apache.hadoop.fs.Path |
---|
1458 | jobject jPath = constructNewObjectOfPath(env, path); |
---|
1459 | if (jPath == NULL) { |
---|
1460 | return -3; |
---|
1461 | } |
---|
1462 | |
---|
1463 | //Create the directory |
---|
1464 | int ret = 0; |
---|
1465 | jthrowable jExc = NULL; |
---|
1466 | if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
1467 | "setPermission", JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSPERM), JAVA_VOID), |
---|
1468 | jPath, jPermObj) != 0) { |
---|
1469 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1470 | "FileSystem::setPermission"); |
---|
1471 | ret = -1; |
---|
1472 | goto done; |
---|
1473 | } |
---|
1474 | |
---|
1475 | done: |
---|
1476 | destroyLocalReference(env, jPath); |
---|
1477 | destroyLocalReference(env, jPermObj); |
---|
1478 | |
---|
1479 | return ret; |
---|
1480 | } |
---|
1481 | |
---|
1482 | int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime) |
---|
1483 | { |
---|
1484 | // JAVA EQUIVALENT: |
---|
1485 | // fs.setTimes(src, mtime, atime) |
---|
1486 | |
---|
1487 | //Get the JNIEnv* corresponding to current thread |
---|
1488 | JNIEnv* env = getJNIEnv(); |
---|
1489 | if (env == NULL) { |
---|
1490 | errno = EINTERNAL; |
---|
1491 | return -1; |
---|
1492 | } |
---|
1493 | |
---|
1494 | jobject jFS = (jobject)fs; |
---|
1495 | |
---|
1496 | //Create an object of org.apache.hadoop.fs.Path |
---|
1497 | jobject jPath = constructNewObjectOfPath(env, path); |
---|
1498 | if (jPath == NULL) { |
---|
1499 | fprintf(stderr, "could not construct path object\n"); |
---|
1500 | return -2; |
---|
1501 | } |
---|
1502 | |
---|
1503 | jlong jmtime = mtime * (jlong)1000; |
---|
1504 | jlong jatime = atime * (jlong)1000; |
---|
1505 | |
---|
1506 | int ret = 0; |
---|
1507 | jthrowable jExc = NULL; |
---|
1508 | if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
1509 | "setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J", JAVA_VOID), |
---|
1510 | jPath, jmtime, jatime) != 0) { |
---|
1511 | fprintf(stderr, "call to setTime failed\n"); |
---|
1512 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1513 | "FileSystem::setTimes"); |
---|
1514 | ret = -1; |
---|
1515 | goto done; |
---|
1516 | } |
---|
1517 | |
---|
1518 | done: |
---|
1519 | destroyLocalReference(env, jPath); |
---|
1520 | return ret; |
---|
1521 | } |
---|
1522 | |
---|
1523 | |
---|
1524 | |
---|
1525 | |
---|
1526 | char*** |
---|
1527 | hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length) |
---|
1528 | { |
---|
1529 | // JAVA EQUIVALENT: |
---|
1530 | // fs.getFileBlockLoctions(new Path(path), start, length); |
---|
1531 | |
---|
1532 | //Get the JNIEnv* corresponding to current thread |
---|
1533 | JNIEnv* env = getJNIEnv(); |
---|
1534 | if (env == NULL) { |
---|
1535 | errno = EINTERNAL; |
---|
1536 | return NULL; |
---|
1537 | } |
---|
1538 | |
---|
1539 | jobject jFS = (jobject)fs; |
---|
1540 | |
---|
1541 | //Create an object of org.apache.hadoop.fs.Path |
---|
1542 | jobject jPath = constructNewObjectOfPath(env, path); |
---|
1543 | if (jPath == NULL) { |
---|
1544 | return NULL; |
---|
1545 | } |
---|
1546 | |
---|
1547 | jvalue jFSVal; |
---|
1548 | jthrowable jFSExc = NULL; |
---|
1549 | if (invokeMethod(env, &jFSVal, &jFSExc, INSTANCE, jFS, |
---|
1550 | HADOOP_FS, "getFileStatus", |
---|
1551 | "(Lorg/apache/hadoop/fs/Path;)" |
---|
1552 | "Lorg/apache/hadoop/fs/FileStatus;", |
---|
1553 | jPath) != 0) { |
---|
1554 | errno = errnoFromException(jFSExc, env, "org.apache.hadoop.fs." |
---|
1555 | "FileSystem::getFileStatus"); |
---|
1556 | destroyLocalReference(env, jPath); |
---|
1557 | return NULL; |
---|
1558 | } |
---|
1559 | jobject jFileStatus = jFSVal.l; |
---|
1560 | |
---|
1561 | //org.apache.hadoop.fs.FileSystem::getFileBlockLocations |
---|
1562 | char*** blockHosts = NULL; |
---|
1563 | jobjectArray jBlockLocations;; |
---|
1564 | jvalue jVal; |
---|
1565 | jthrowable jExc = NULL; |
---|
1566 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, |
---|
1567 | HADOOP_FS, "getFileBlockLocations", |
---|
1568 | "(Lorg/apache/hadoop/fs/FileStatus;JJ)" |
---|
1569 | "[Lorg/apache/hadoop/fs/BlockLocation;", |
---|
1570 | jFileStatus, start, length) != 0) { |
---|
1571 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1572 | "FileSystem::getFileBlockLocations"); |
---|
1573 | destroyLocalReference(env, jPath); |
---|
1574 | destroyLocalReference(env, jFileStatus); |
---|
1575 | return NULL; |
---|
1576 | } |
---|
1577 | jBlockLocations = jVal.l; |
---|
1578 | |
---|
1579 | //Figure out no of entries in jBlockLocations |
---|
1580 | //Allocate memory and add NULL at the end |
---|
1581 | jsize jNumFileBlocks = (*env)->GetArrayLength(env, jBlockLocations); |
---|
1582 | |
---|
1583 | blockHosts = malloc(sizeof(char**) * (jNumFileBlocks+1)); |
---|
1584 | if (blockHosts == NULL) { |
---|
1585 | errno = ENOMEM; |
---|
1586 | goto done; |
---|
1587 | } |
---|
1588 | blockHosts[jNumFileBlocks] = NULL; |
---|
1589 | if (jNumFileBlocks == 0) { |
---|
1590 | errno = 0; |
---|
1591 | goto done; |
---|
1592 | } |
---|
1593 | |
---|
1594 | //Now parse each block to get hostnames |
---|
1595 | int i = 0; |
---|
1596 | for (i=0; i < jNumFileBlocks; ++i) { |
---|
1597 | jobject jFileBlock = |
---|
1598 | (*env)->GetObjectArrayElement(env, jBlockLocations, i); |
---|
1599 | |
---|
1600 | jvalue jVal; |
---|
1601 | jobjectArray jFileBlockHosts; |
---|
1602 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFileBlock, HADOOP_BLK_LOC, |
---|
1603 | "getHosts", "()[Ljava/lang/String;") || |
---|
1604 | jVal.l == NULL) { |
---|
1605 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1606 | "BlockLocation::getHosts"); |
---|
1607 | destroyLocalReference(env, jPath); |
---|
1608 | destroyLocalReference(env, jFileStatus); |
---|
1609 | destroyLocalReference(env, jBlockLocations); |
---|
1610 | return NULL; |
---|
1611 | } |
---|
1612 | |
---|
1613 | jFileBlockHosts = jVal.l; |
---|
1614 | //Figure out no of hosts in jFileBlockHosts |
---|
1615 | //Allocate memory and add NULL at the end |
---|
1616 | jsize jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts); |
---|
1617 | blockHosts[i] = malloc(sizeof(char*) * (jNumBlockHosts+1)); |
---|
1618 | if (blockHosts[i] == NULL) { |
---|
1619 | int x = 0; |
---|
1620 | for (x=0; x < i; ++x) { |
---|
1621 | free(blockHosts[x]); |
---|
1622 | } |
---|
1623 | free(blockHosts); |
---|
1624 | errno = ENOMEM; |
---|
1625 | goto done; |
---|
1626 | } |
---|
1627 | blockHosts[i][jNumBlockHosts] = NULL; |
---|
1628 | |
---|
1629 | //Now parse each hostname |
---|
1630 | int j = 0; |
---|
1631 | const char *hostName; |
---|
1632 | for (j=0; j < jNumBlockHosts; ++j) { |
---|
1633 | jstring jHost = |
---|
1634 | (*env)->GetObjectArrayElement(env, jFileBlockHosts, j); |
---|
1635 | |
---|
1636 | hostName = |
---|
1637 | (const char*)((*env)->GetStringUTFChars(env, jHost, NULL)); |
---|
1638 | blockHosts[i][j] = strdup(hostName); |
---|
1639 | |
---|
1640 | (*env)->ReleaseStringUTFChars(env, jHost, hostName); |
---|
1641 | destroyLocalReference(env, jHost); |
---|
1642 | } |
---|
1643 | |
---|
1644 | destroyLocalReference(env, jFileBlockHosts); |
---|
1645 | } |
---|
1646 | |
---|
1647 | done: |
---|
1648 | |
---|
1649 | //Delete unnecessary local references |
---|
1650 | destroyLocalReference(env, jPath); |
---|
1651 | destroyLocalReference(env, jFileStatus); |
---|
1652 | destroyLocalReference(env, jBlockLocations); |
---|
1653 | |
---|
1654 | return blockHosts; |
---|
1655 | } |
---|
1656 | |
---|
1657 | |
---|
1658 | void hdfsFreeHosts(char ***blockHosts) |
---|
1659 | { |
---|
1660 | int i, j; |
---|
1661 | for (i=0; blockHosts[i]; i++) { |
---|
1662 | for (j=0; blockHosts[i][j]; j++) { |
---|
1663 | free(blockHosts[i][j]); |
---|
1664 | } |
---|
1665 | free(blockHosts[i]); |
---|
1666 | } |
---|
1667 | free(blockHosts); |
---|
1668 | } |
---|
1669 | |
---|
1670 | |
---|
1671 | tOffset hdfsGetDefaultBlockSize(hdfsFS fs) |
---|
1672 | { |
---|
1673 | // JAVA EQUIVALENT: |
---|
1674 | // fs.getDefaultBlockSize(); |
---|
1675 | |
---|
1676 | //Get the JNIEnv* corresponding to current thread |
---|
1677 | JNIEnv* env = getJNIEnv(); |
---|
1678 | if (env == NULL) { |
---|
1679 | errno = EINTERNAL; |
---|
1680 | return -1; |
---|
1681 | } |
---|
1682 | |
---|
1683 | jobject jFS = (jobject)fs; |
---|
1684 | |
---|
1685 | //FileSystem::getDefaultBlockSize() |
---|
1686 | tOffset blockSize = -1; |
---|
1687 | jvalue jVal; |
---|
1688 | jthrowable jExc = NULL; |
---|
1689 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
1690 | "getDefaultBlockSize", "()J") != 0) { |
---|
1691 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1692 | "FileSystem::getDefaultBlockSize"); |
---|
1693 | return -1; |
---|
1694 | } |
---|
1695 | blockSize = jVal.j; |
---|
1696 | |
---|
1697 | return blockSize; |
---|
1698 | } |
---|
1699 | |
---|
1700 | |
---|
1701 | |
---|
1702 | tOffset hdfsGetCapacity(hdfsFS fs) |
---|
1703 | { |
---|
1704 | // JAVA EQUIVALENT: |
---|
1705 | // fs.getRawCapacity(); |
---|
1706 | |
---|
1707 | //Get the JNIEnv* corresponding to current thread |
---|
1708 | JNIEnv* env = getJNIEnv(); |
---|
1709 | if (env == NULL) { |
---|
1710 | errno = EINTERNAL; |
---|
1711 | return -1; |
---|
1712 | } |
---|
1713 | |
---|
1714 | jobject jFS = (jobject)fs; |
---|
1715 | |
---|
1716 | if (!((*env)->IsInstanceOf(env, jFS, |
---|
1717 | globalClassReference(HADOOP_DFS, env)))) { |
---|
1718 | fprintf(stderr, "hdfsGetCapacity works only on a " |
---|
1719 | "DistributedFileSystem!\n"); |
---|
1720 | return -1; |
---|
1721 | } |
---|
1722 | |
---|
1723 | //FileSystem::getRawCapacity() |
---|
1724 | jvalue jVal; |
---|
1725 | jthrowable jExc = NULL; |
---|
1726 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS, |
---|
1727 | "getRawCapacity", "()J") != 0) { |
---|
1728 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1729 | "FileSystem::getRawCapacity"); |
---|
1730 | return -1; |
---|
1731 | } |
---|
1732 | |
---|
1733 | return jVal.j; |
---|
1734 | } |
---|
1735 | |
---|
1736 | |
---|
1737 | |
---|
1738 | tOffset hdfsGetUsed(hdfsFS fs) |
---|
1739 | { |
---|
1740 | // JAVA EQUIVALENT: |
---|
1741 | // fs.getRawUsed(); |
---|
1742 | |
---|
1743 | //Get the JNIEnv* corresponding to current thread |
---|
1744 | JNIEnv* env = getJNIEnv(); |
---|
1745 | if (env == NULL) { |
---|
1746 | errno = EINTERNAL; |
---|
1747 | return -1; |
---|
1748 | } |
---|
1749 | |
---|
1750 | jobject jFS = (jobject)fs; |
---|
1751 | |
---|
1752 | if (!((*env)->IsInstanceOf(env, jFS, |
---|
1753 | globalClassReference(HADOOP_DFS, env)))) { |
---|
1754 | fprintf(stderr, "hdfsGetUsed works only on a " |
---|
1755 | "DistributedFileSystem!\n"); |
---|
1756 | return -1; |
---|
1757 | } |
---|
1758 | |
---|
1759 | //FileSystem::getRawUsed() |
---|
1760 | jvalue jVal; |
---|
1761 | jthrowable jExc = NULL; |
---|
1762 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS, |
---|
1763 | "getRawUsed", "()J") != 0) { |
---|
1764 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1765 | "FileSystem::getRawUsed"); |
---|
1766 | return -1; |
---|
1767 | } |
---|
1768 | |
---|
1769 | return jVal.j; |
---|
1770 | } |
---|
1771 | |
---|
1772 | |
---|
1773 | |
---|
1774 | static int |
---|
1775 | getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo) |
---|
1776 | { |
---|
1777 | jvalue jVal; |
---|
1778 | jthrowable jExc = NULL; |
---|
1779 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, |
---|
1780 | HADOOP_STAT, "isDir", "()Z") != 0) { |
---|
1781 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1782 | "FileStatus::isDir"); |
---|
1783 | return -1; |
---|
1784 | } |
---|
1785 | fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile; |
---|
1786 | |
---|
1787 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, |
---|
1788 | HADOOP_STAT, "getReplication", "()S") != 0) { |
---|
1789 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1790 | "FileStatus::getReplication"); |
---|
1791 | return -1; |
---|
1792 | } |
---|
1793 | fileInfo->mReplication = jVal.s; |
---|
1794 | |
---|
1795 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, |
---|
1796 | HADOOP_STAT, "getBlockSize", "()J") != 0) { |
---|
1797 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1798 | "FileStatus::getBlockSize"); |
---|
1799 | return -1; |
---|
1800 | } |
---|
1801 | fileInfo->mBlockSize = jVal.j; |
---|
1802 | |
---|
1803 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, |
---|
1804 | HADOOP_STAT, "getModificationTime", "()J") != 0) { |
---|
1805 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1806 | "FileStatus::getModificationTime"); |
---|
1807 | return -1; |
---|
1808 | } |
---|
1809 | fileInfo->mLastMod = (tTime) (jVal.j / 1000); |
---|
1810 | |
---|
1811 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, |
---|
1812 | HADOOP_STAT, "getAccessTime", "()J") != 0) { |
---|
1813 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1814 | "FileStatus::getAccessTime"); |
---|
1815 | return -1; |
---|
1816 | } |
---|
1817 | fileInfo->mLastAccess = (tTime) (jVal.j / 1000); |
---|
1818 | |
---|
1819 | |
---|
1820 | if (fileInfo->mKind == kObjectKindFile) { |
---|
1821 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, |
---|
1822 | HADOOP_STAT, "getLen", "()J") != 0) { |
---|
1823 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1824 | "FileStatus::getLen"); |
---|
1825 | return -1; |
---|
1826 | } |
---|
1827 | fileInfo->mSize = jVal.j; |
---|
1828 | } |
---|
1829 | |
---|
1830 | jobject jPath; |
---|
1831 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT, |
---|
1832 | "getPath", "()Lorg/apache/hadoop/fs/Path;") || |
---|
1833 | jVal.l == NULL) { |
---|
1834 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1835 | "Path::getPath"); |
---|
1836 | return -1; |
---|
1837 | } |
---|
1838 | jPath = jVal.l; |
---|
1839 | |
---|
1840 | jstring jPathName; |
---|
1841 | const char *cPathName; |
---|
1842 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath, HADOOP_PATH, |
---|
1843 | "toString", "()Ljava/lang/String;")) { |
---|
1844 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1845 | "Path::toString"); |
---|
1846 | destroyLocalReference(env, jPath); |
---|
1847 | return -1; |
---|
1848 | } |
---|
1849 | jPathName = jVal.l; |
---|
1850 | cPathName = (const char*) ((*env)->GetStringUTFChars(env, jPathName, NULL)); |
---|
1851 | fileInfo->mName = strdup(cPathName); |
---|
1852 | (*env)->ReleaseStringUTFChars(env, jPathName, cPathName); |
---|
1853 | destroyLocalReference(env, jPath); |
---|
1854 | destroyLocalReference(env, jPathName); |
---|
1855 | jstring jUserName; |
---|
1856 | const char* cUserName; |
---|
1857 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT, |
---|
1858 | "getOwner", "()Ljava/lang/String;")) { |
---|
1859 | fprintf(stderr, "Call to org.apache.hadoop.fs." |
---|
1860 | "FileStatus::getOwner failed!\n"); |
---|
1861 | errno = EINTERNAL; |
---|
1862 | return -1; |
---|
1863 | } |
---|
1864 | jUserName = jVal.l; |
---|
1865 | cUserName = (const char*) ((*env)->GetStringUTFChars(env, jUserName, NULL)); |
---|
1866 | fileInfo->mOwner = strdup(cUserName); |
---|
1867 | (*env)->ReleaseStringUTFChars(env, jUserName, cUserName); |
---|
1868 | destroyLocalReference(env, jUserName); |
---|
1869 | |
---|
1870 | jstring jGroupName; |
---|
1871 | const char* cGroupName; |
---|
1872 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT, |
---|
1873 | "getGroup", "()Ljava/lang/String;")) { |
---|
1874 | fprintf(stderr, "Call to org.apache.hadoop.fs." |
---|
1875 | "FileStatus::getGroup failed!\n"); |
---|
1876 | errno = EINTERNAL; |
---|
1877 | return -1; |
---|
1878 | } |
---|
1879 | jGroupName = jVal.l; |
---|
1880 | cGroupName = (const char*) ((*env)->GetStringUTFChars(env, jGroupName, NULL)); |
---|
1881 | fileInfo->mGroup = strdup(cGroupName); |
---|
1882 | (*env)->ReleaseStringUTFChars(env, jGroupName, cGroupName); |
---|
1883 | destroyLocalReference(env, jGroupName); |
---|
1884 | |
---|
1885 | jobject jPermission; |
---|
1886 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT, |
---|
1887 | "getPermission", "()Lorg/apache/hadoop/fs/permission/FsPermission;") || |
---|
1888 | jVal.l == NULL) { |
---|
1889 | fprintf(stderr, "Call to org.apache.hadoop.fs." |
---|
1890 | "FileStatus::getPermission failed!\n"); |
---|
1891 | errno = EINTERNAL; |
---|
1892 | return -1; |
---|
1893 | } |
---|
1894 | jPermission = jVal.l; |
---|
1895 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPermission, HADOOP_FSPERM, |
---|
1896 | "toShort", "()S") != 0) { |
---|
1897 | fprintf(stderr, "Call to org.apache.hadoop.fs.permission." |
---|
1898 | "FsPermission::toShort failed!\n"); |
---|
1899 | errno = EINTERNAL; |
---|
1900 | return -1; |
---|
1901 | } |
---|
1902 | fileInfo->mPermissions = jVal.s; |
---|
1903 | destroyLocalReference(env, jPermission); |
---|
1904 | |
---|
1905 | return 0; |
---|
1906 | } |
---|
1907 | |
---|
1908 | static int |
---|
1909 | getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo *fileInfo) |
---|
1910 | { |
---|
1911 | // JAVA EQUIVALENT: |
---|
1912 | // fs.isDirectory(f) |
---|
1913 | // fs.getModificationTime() |
---|
1914 | // fs.getAccessTime() |
---|
1915 | // fs.getLength(f) |
---|
1916 | // f.getPath() |
---|
1917 | // f.getOwner() |
---|
1918 | // f.getGroup() |
---|
1919 | // f.getPermission().toShort() |
---|
1920 | |
---|
1921 | jobject jStat; |
---|
1922 | jvalue jVal; |
---|
1923 | jthrowable jExc = NULL; |
---|
1924 | |
---|
1925 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
1926 | "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"), |
---|
1927 | jPath) != 0) { |
---|
1928 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1929 | "FileSystem::exists"); |
---|
1930 | return -1; |
---|
1931 | } |
---|
1932 | |
---|
1933 | if (jVal.z == 0) { |
---|
1934 | errno = ENOENT; |
---|
1935 | return -1; |
---|
1936 | } |
---|
1937 | |
---|
1938 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, |
---|
1939 | "getFileStatus", JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_STAT)), |
---|
1940 | jPath) != 0) { |
---|
1941 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1942 | "FileSystem::getFileStatus"); |
---|
1943 | return -1; |
---|
1944 | } |
---|
1945 | jStat = jVal.l; |
---|
1946 | int ret = getFileInfoFromStat(env, jStat, fileInfo); |
---|
1947 | destroyLocalReference(env, jStat); |
---|
1948 | return ret; |
---|
1949 | } |
---|
1950 | |
---|
1951 | |
---|
1952 | |
---|
1953 | hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries) |
---|
1954 | { |
---|
1955 | // JAVA EQUIVALENT: |
---|
1956 | // Path p(path); |
---|
1957 | // Path []pathList = fs.listPaths(p) |
---|
1958 | // foreach path in pathList |
---|
1959 | // getFileInfo(path) |
---|
1960 | |
---|
1961 | //Get the JNIEnv* corresponding to current thread |
---|
1962 | JNIEnv* env = getJNIEnv(); |
---|
1963 | if (env == NULL) { |
---|
1964 | errno = EINTERNAL; |
---|
1965 | return NULL; |
---|
1966 | } |
---|
1967 | |
---|
1968 | jobject jFS = (jobject)fs; |
---|
1969 | |
---|
1970 | //Create an object of org.apache.hadoop.fs.Path |
---|
1971 | jobject jPath = constructNewObjectOfPath(env, path); |
---|
1972 | if (jPath == NULL) { |
---|
1973 | return NULL; |
---|
1974 | } |
---|
1975 | |
---|
1976 | hdfsFileInfo *pathList = 0; |
---|
1977 | |
---|
1978 | jobjectArray jPathList = NULL; |
---|
1979 | jvalue jVal; |
---|
1980 | jthrowable jExc = NULL; |
---|
1981 | if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS, "listStatus", |
---|
1982 | JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_STAT)), |
---|
1983 | jPath) != 0) { |
---|
1984 | errno = errnoFromException(jExc, env, "org.apache.hadoop.fs." |
---|
1985 | "FileSystem::listStatus"); |
---|
1986 | destroyLocalReference(env, jPath); |
---|
1987 | return NULL; |
---|
1988 | } |
---|
1989 | jPathList = jVal.l; |
---|
1990 | |
---|
1991 | //Figure out no of entries in that directory |
---|
1992 | jsize jPathListSize = (*env)->GetArrayLength(env, jPathList); |
---|
1993 | *numEntries = jPathListSize; |
---|
1994 | if (jPathListSize == 0) { |
---|
1995 | errno = 0; |
---|
1996 | goto done; |
---|
1997 | } |
---|
1998 | |
---|
1999 | //Allocate memory |
---|
2000 | pathList = calloc(jPathListSize, sizeof(hdfsFileInfo)); |
---|
2001 | if (pathList == NULL) { |
---|
2002 | errno = ENOMEM; |
---|
2003 | goto done; |
---|
2004 | } |
---|
2005 | |
---|
2006 | //Save path information in pathList |
---|
2007 | jsize i; |
---|
2008 | jobject tmpStat; |
---|
2009 | for (i=0; i < jPathListSize; ++i) { |
---|
2010 | tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i); |
---|
2011 | if (getFileInfoFromStat(env, tmpStat, &pathList[i])) { |
---|
2012 | hdfsFreeFileInfo(pathList, jPathListSize); |
---|
2013 | destroyLocalReference(env, tmpStat); |
---|
2014 | pathList = NULL; |
---|
2015 | goto done; |
---|
2016 | } |
---|
2017 | destroyLocalReference(env, tmpStat); |
---|
2018 | } |
---|
2019 | |
---|
2020 | done: |
---|
2021 | |
---|
2022 | //Delete unnecessary local references |
---|
2023 | destroyLocalReference(env, jPath); |
---|
2024 | destroyLocalReference(env, jPathList); |
---|
2025 | |
---|
2026 | return pathList; |
---|
2027 | } |
---|
2028 | |
---|
2029 | |
---|
2030 | |
---|
2031 | hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path) |
---|
2032 | { |
---|
2033 | // JAVA EQUIVALENT: |
---|
2034 | // File f(path); |
---|
2035 | // fs.isDirectory(f) |
---|
2036 | // fs.lastModified() ?? |
---|
2037 | // fs.getLength(f) |
---|
2038 | // f.getPath() |
---|
2039 | |
---|
2040 | //Get the JNIEnv* corresponding to current thread |
---|
2041 | JNIEnv* env = getJNIEnv(); |
---|
2042 | if (env == NULL) { |
---|
2043 | errno = EINTERNAL; |
---|
2044 | return NULL; |
---|
2045 | } |
---|
2046 | |
---|
2047 | jobject jFS = (jobject)fs; |
---|
2048 | |
---|
2049 | //Create an object of org.apache.hadoop.fs.Path |
---|
2050 | jobject jPath = constructNewObjectOfPath(env, path); |
---|
2051 | if (jPath == NULL) { |
---|
2052 | return NULL; |
---|
2053 | } |
---|
2054 | |
---|
2055 | hdfsFileInfo *fileInfo = calloc(1, sizeof(hdfsFileInfo)); |
---|
2056 | if (getFileInfo(env, jFS, jPath, fileInfo)) { |
---|
2057 | hdfsFreeFileInfo(fileInfo, 1); |
---|
2058 | fileInfo = NULL; |
---|
2059 | goto done; |
---|
2060 | } |
---|
2061 | |
---|
2062 | done: |
---|
2063 | |
---|
2064 | //Delete unnecessary local references |
---|
2065 | destroyLocalReference(env, jPath); |
---|
2066 | |
---|
2067 | return fileInfo; |
---|
2068 | } |
---|
2069 | |
---|
2070 | |
---|
2071 | |
---|
2072 | void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries) |
---|
2073 | { |
---|
2074 | //Free the mName |
---|
2075 | int i; |
---|
2076 | for (i=0; i < numEntries; ++i) { |
---|
2077 | if (hdfsFileInfo[i].mName) { |
---|
2078 | free(hdfsFileInfo[i].mName); |
---|
2079 | } |
---|
2080 | } |
---|
2081 | |
---|
2082 | //Free entire block |
---|
2083 | free(hdfsFileInfo); |
---|
2084 | } |
---|
2085 | |
---|
2086 | |
---|
2087 | |
---|
2088 | |
---|
2089 | /** |
---|
2090 | * vim: ts=4: sw=4: et: |
---|
2091 | */ |
---|