summaryrefslogtreecommitdiffstats
path: root/src/H5FDhdfs.c
diff options
context:
space:
mode:
authorJacob Smith <jake.smith@hdfgroup.org>2019-08-14 20:36:45 (GMT)
committerJacob Smith <jake.smith@hdfgroup.org>2019-08-14 20:36:45 (GMT)
commit2e5cd3d2b2d953420252c80ab5e32ac95ac0c5b3 (patch)
treea79af758bc878d55a958bedae77d919e03a9516f /src/H5FDhdfs.c
parent2b72832d97496290f74ccaffcda41c3e596f8f49 (diff)
downloadhdf5-2e5cd3d2b2d953420252c80ab5e32ac95ac0c5b3.zip
hdf5-2e5cd3d2b2d953420252c80ab5e32ac95ac0c5b3.tar.gz
hdf5-2e5cd3d2b2d953420252c80ab5e32ac95ac0c5b3.tar.bz2
Fix compiler warnings with diabled ROS3 and HDFS VFDs.
Fix misuse of [HD]strncpy and [HD]snprintf. Minor formatting changes. Fix test cleanup issue with java groups example.
Diffstat (limited to 'src/H5FDhdfs.c')
-rw-r--r--src/H5FDhdfs.c484
1 files changed, 290 insertions, 194 deletions
diff --git a/src/H5FDhdfs.c b/src/H5FDhdfs.c
index e3e11b2..83d0202 100644
--- a/src/H5FDhdfs.c
+++ b/src/H5FDhdfs.c
@@ -130,6 +130,8 @@ typedef struct {
#endif /* HDFS_STATS */
+#ifdef H5_HAVE_LIBHDFS
+
/* "unique" identifier for `hdfs_t` structures.
* Randomly generated by unweighted dice rolls.
*/
@@ -143,7 +145,7 @@ typedef struct {
* Purpose:
*
* Contain/retain information associated with a file hosted on Hadoop
- * Distributed File System (HDFS). Instantiated and populated via
+ * Distributed File System (HDFS). Instantiated and populated via
* `H5FD_hdfs_handle_open()` and cleaned up via `H5FD_hdfs_handle_close()`.
*
*
@@ -179,14 +181,11 @@ typedef struct {
*/
typedef struct {
unsigned long magic;
-#ifdef H5_HAVE_LIBHDFS
hdfsFS filesystem;
hdfsFileInfo *fileinfo;
- hdfsFile file;
-#endif
+ hdfsFile file;
} hdfs_t;
-#ifdef H5_HAVE_LIBHDFS
/*--------------------------------------------------------------------------
* Function: H5FD_hdfs_handle_open
@@ -274,7 +273,7 @@ H5FD_hdfs_handle_open(
"hdfsGetPathInfo failed")
}
handle->file = hdfsOpenFile(
- handle->filesystem,
+ handle->filesystem,
path,
O_RDONLY,
stream_buffer_size,
@@ -292,18 +291,18 @@ done:
/* error; clean up */
HDassert(handle->magic == HDFS_HDFST_MAGIC);
handle->magic++;
- if (handle->file != NULL) {
+ if (handle->file != NULL) {
if (FAIL == (hdfsCloseFile(handle->filesystem, handle->file))) {
- HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL,
+ HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL,
"unable to close hdfs file handle")
}
}
- if (handle->fileinfo != NULL) {
+ if (handle->fileinfo != NULL) {
hdfsFreeFileInfo(handle->fileinfo, 1);
}
- if (handle->filesystem != NULL) {
+ if (handle->filesystem != NULL) {
if (FAIL == (hdfsDisconnect(handle->filesystem))) {
- HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL,
+ HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL,
"unable to disconnect from hdfs")
}
}
@@ -351,18 +350,18 @@ H5FD_hdfs_handle_close(hdfs_t *handle)
}
handle->magic++;
- if (handle->file != NULL) {
+ if (handle->file != NULL) {
if (FAIL == (hdfsCloseFile(handle->filesystem, handle->file))) {
- HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL,
+ HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL,
"unable to close hdfs file handle")
}
}
- if (handle->fileinfo != NULL) {
+ if (handle->fileinfo != NULL) {
hdfsFreeFileInfo(handle->fileinfo, 1);
}
- if (handle->filesystem != NULL) {
+ if (handle->filesystem != NULL) {
if (FAIL == (hdfsDisconnect(handle->filesystem))) {
- HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL,
+ HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL,
"unable to disconnect hdfs file system")
}
}
@@ -373,6 +372,7 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5FD_hdfs_close() */
+
#endif /* H5_HAVE_LIBHDFS */
@@ -382,9 +382,9 @@ done:
*
* Purpose:
*
- * H5FD_hdfs_t is a structure used to store all information needed to
+ * H5FD_hdfs_t is a structure used to store all information needed to
* maintain R/O access to a single HDF5 file in an HDFS file system.
- * This structure is created when such a file is "opened" and
+ * This structure is created when such a file is "opened" and
* discarded when it is "closed".
*
*
@@ -396,7 +396,7 @@ done:
*
* `fa` (H5FD_hdfs_fapl_t)
*
- * Instance of `H5FD_hdfs_fapl_t` containing the HDFS configuration data
+ * Instance of `H5FD_hdfs_fapl_t` containing the HDFS configuration data
* needed to "open" the HDF5 file.
*
* `eoa` (haddr_t)
@@ -405,9 +405,9 @@ done:
* equal the file size.
*
* `hdfs_handle` (hdfs_t *)
- *
+ *
* Instance of HDFS Request handle associated with the target resource.
- * Responsible for communicating with remote host and presenting file
+ * Responsible for communicating with remote host and presenting file
* contents as indistinguishable from a file on the local filesystem.
*
* *** present only if HDFS_SATS is flagged to enable stats collection ***
@@ -456,10 +456,13 @@ typedef struct H5FD_hdfs_t {
* ADDR_OVERFLOW: Checks whether a file address of type `haddr_t'
* is too large to be represented by the second argument
* of the file seek function.
+ * Only included if HDFS code should compile.
*
*/
#define MAXADDR (((haddr_t)1<<(8*sizeof(HDoff_t)-1))-1)
+#ifdef H5_HAVE_LIBHDFS
#define ADDR_OVERFLOW(A) (HADDR_UNDEF==(A) || ((A) & ~(haddr_t)MAXADDR))
+#endif /* H5_HAVE_LIBHDFS */
/* Prototypes */
static herr_t H5FD_hdfs_term(void);
@@ -474,13 +477,13 @@ static herr_t H5FD_hdfs_query(const H5FD_t *_f1, unsigned long *flags);
static haddr_t H5FD_hdfs_get_eoa(const H5FD_t *_file, H5FD_mem_t type);
static herr_t H5FD_hdfs_set_eoa(H5FD_t *_file, H5FD_mem_t type, haddr_t addr);
static haddr_t H5FD_hdfs_get_eof(const H5FD_t *_file, H5FD_mem_t type);
-static herr_t H5FD_hdfs_get_handle(H5FD_t *_file, hid_t fapl,
+static herr_t H5FD_hdfs_get_handle(H5FD_t *_file, hid_t fapl,
void** file_handle);
-static herr_t H5FD_hdfs_read(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id,
+static herr_t H5FD_hdfs_read(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id,
haddr_t addr, size_t size, void *buf);
-static herr_t H5FD_hdfs_write(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id,
+static herr_t H5FD_hdfs_write(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id,
haddr_t addr, size_t size, const void *buf);
-static herr_t H5FD_hdfs_truncate(H5FD_t *_file, hid_t dxpl_id,
+static herr_t H5FD_hdfs_truncate(H5FD_t *_file, hid_t dxpl_id,
hbool_t closing);
static herr_t H5FD_hdfs_lock(H5FD_t *_file, hbool_t rw);
static herr_t H5FD_hdfs_unlock(H5FD_t *_file);
@@ -521,8 +524,10 @@ static const H5FD_class_t H5FD_hdfs_g = {
H5FD_FLMAP_DICHOTOMY /* fl_map */
};
+#ifdef H5_HAVE_LIBHDFS
/* Declare a free list to manage the H5FD_hdfs_t struct */
H5FL_DEFINE_STATIC(H5FD_hdfs_t);
+#endif /* H5_HAVE_LIBHDFS */
/*-------------------------------------------------------------------------
@@ -545,7 +550,7 @@ H5FD__init_package(void)
FUNC_ENTER_STATIC
if (H5FD_hdfs_init() < 0) {
- HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL,
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL,
"unable to initialize hdfs VFD")
}
@@ -643,8 +648,8 @@ H5FD_hdfs_term(void)
* Function: H5Pset_fapl_hdfs
*
* Purpose: Modify the file access property list to use the H5FD_HDFS
- * driver defined in this source file. All driver specfic
- * properties are passed in as a pointer to a suitably
+ * driver defined in this source file. All driver specfic
+ * properties are passed in as a pointer to a suitably
* initialized instance of H5FD_hdfs_fapl_t
*
* Return: SUCCEED/FAIL
@@ -658,7 +663,7 @@ H5FD_hdfs_term(void)
*-------------------------------------------------------------------------
*/
herr_t
-H5Pset_fapl_hdfs(hid_t fapl_id,
+H5Pset_fapl_hdfs(hid_t fapl_id,
H5FD_hdfs_fapl_t *fa)
{
H5P_genplist_t *plist = NULL; /* Property list pointer */
@@ -674,7 +679,7 @@ H5Pset_fapl_hdfs(hid_t fapl_id,
#endif
plist = H5P_object_verify(fapl_id, H5P_FILE_ACCESS);
- if (plist == NULL) {
+ if (plist == NULL) {
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, \
"not a file access property list")
}
@@ -699,12 +704,12 @@ done:
* contains internally consistant data. Return SUCCEED if so,
* and FAIL otherwise.
*
- * Note the difference between internally consistant and
- * correct. As we will have to try to access the target
+ * Note the difference between internally consistant and
+ * correct. As we will have to try to access the target
* object to determine whether the supplied data is correct,
* we will settle for internal consistancy at this point
*
- * Return: SUCCEED if instance of H5FD_hdfs_fapl_t contains internally
+ * Return: SUCCEED if instance of H5FD_hdfs_fapl_t contains internally
* consistant data, FAIL otherwise.
*
* Programmer: Jacob Smith
@@ -761,7 +766,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5Pget_fapl_hdfs(hid_t fapl_id,
+H5Pget_fapl_hdfs(hid_t fapl_id,
H5FD_hdfs_fapl_t *fa_out)
{
const H5FD_hdfs_fapl_t *fa = NULL;
@@ -832,7 +837,7 @@ H5FD_hdfs_fapl_get(H5FD_t *_file)
fa = (H5FD_hdfs_fapl_t *)H5MM_calloc(sizeof(H5FD_hdfs_fapl_t));
if (fa == NULL) {
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL,
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed")
}
@@ -878,7 +883,7 @@ H5FD_hdfs_fapl_copy(const void *_old_fa)
new_fa = (H5FD_hdfs_fapl_t *)H5MM_malloc(sizeof(H5FD_hdfs_fapl_t));
if (new_fa == NULL) {
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL,
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed")
}
@@ -916,7 +921,7 @@ H5FD_hdfs_fapl_free(void *_fa)
FUNC_ENTER_NOAPI_NOINIT_NOERR
- HDassert(fa != NULL); /* sanity check */
+ HDassert(fa != NULL); /* sanity check */
H5MM_xfree(fa);
@@ -994,11 +999,11 @@ done:
* Create and/or opens a file as an HDF5 file.
*
* Any flag except H5F_ACC_RDONLY will cause an error.
- *
+ *
* Return:
*
- * Success: A pointer to a new file data structure.
- * The public fields will be initialized by the caller, which is
+ * Success: A pointer to a new file data structure.
+ * The public fields will be initialized by the caller, which is
* always H5FD_open().
*
* Failure: NULL
@@ -1010,26 +1015,21 @@ done:
*
*-------------------------------------------------------------------------
*/
+#ifdef H5_HAVE_LIBHDFS
static H5FD_t *
H5FD_hdfs_open(
- const char *path,
- unsigned flags,
- hid_t fapl_id,
+ const char *path,
+ unsigned flags,
+ hid_t fapl_id,
haddr_t maxaddr)
{
H5FD_t *ret_value = NULL;
-#ifdef H5_HAVE_LIBHDFS
H5FD_hdfs_t *file = NULL;
hdfs_t *handle = NULL;
H5FD_hdfs_fapl_t fa;
-#endif
FUNC_ENTER_NOAPI_NOINIT
-#ifndef H5_HAVE_LIBHDFS
- HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, NULL,
- "Illegal open of unsupported virtual file (hdfs)");
-#else
#if HDFS_DEBUG
HDfprintf(stdout, "H5FD_hdfs_open() called.\n");
#endif /* HDFS_DEBUG */
@@ -1078,11 +1078,11 @@ H5FD_hdfs_open(
HDassert(handle->magic == HDFS_HDFST_MAGIC);
- /* create new file struct
+ /* create new file struct
*/
file = H5FL_CALLOC(H5FD_hdfs_t);
if (file == NULL) {
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL,
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL,
"unable to allocate file struct")
}
file->hdfs_handle = handle;
@@ -1090,33 +1090,53 @@ H5FD_hdfs_open(
#if HDFS_STATS
if (FAIL == hdfs_reset_stats(file)) {
- HGOTO_ERROR(H5E_INTERNAL, H5E_UNINITIALIZED, NULL,
+ HGOTO_ERROR(H5E_INTERNAL, H5E_UNINITIALIZED, NULL,
"unable to reset file statistics")
}
#endif /* HDFS_STATS */
ret_value = (H5FD_t*)file;
-#endif /* H5_HAVE_LIBHDFS */
done:
-#ifdef H5_HAVE_LIBHDFS
if (ret_value == NULL) {
- if (handle != NULL) {
+ if (handle != NULL) {
if (FAIL == H5FD_hdfs_handle_close(handle)) {
- HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL,
+ HDONE_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, NULL,
"unable to close HDFS file handle")
}
}
if (file != NULL) {
file = H5FL_FREE(H5FD_hdfs_t, file);
}
- } /* if null return value (error) */
-#endif /* H5_HAVE_LIBHDFS */
+ } /* end if null return value (error) */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_hdfs_open() */
+
+#else /* H5_HAVE_LIBHDFS not defined */
+
+static H5FD_t *
+H5FD_hdfs_open(
+ const char H5_ATTR_UNUSED *path,
+ unsigned H5_ATTR_UNUSED flags,
+ hid_t H5_ATTR_UNUSED fapl_id,
+ haddr_t H5_ATTR_UNUSED maxaddr)
+{
+ H5FD_t *ret_value = NULL;
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, NULL,
+ "Illegal open of unsupported virtual file (hdfs)");
+done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5FD_hdfs_open() */
+#endif /* H5_HAVE_LIBHDFS */
+
#if HDFS_STATS
/*----------------------------------------------------------------------------
@@ -1124,7 +1144,7 @@ done:
* Function: hdfs_fprint_stats()
*
* Purpose:
- *
+ *
* Tabulate and pretty-print statistics for this virtual file.
*
* Should be called upon file close.
@@ -1145,17 +1165,17 @@ done:
* - max (largest size read)
* 2. tabulation of "bins", sepraring reads into exponentially-larger
* ranges of size.
- * - columns for number of reads, total bytes, and average size, with
+ * - columns for number of reads, total bytes, and average size, with
* separate sub-colums for raw- and metadata reads.
* - each row represents one bin, identified by the top of its range
- *
+ *
* Bin ranges can be modified with pound-defines at the top of this file.
*
* Bins without any reads in their bounds are not printed.
*
* An "overflow" bin is also present, to catch "big" reads.
*
- * Output for all bins (and range ceiling and average size report)
+ * Output for all bins (and range ceiling and average size report)
* is divied by powers of 1024. By corollary, four digits before the decimal
* is valid.
*
@@ -1216,36 +1236,6 @@ hdfs_fprint_stats(
"hdfs handle has invalid magic")
}
- /* TODO: See what libhdfs exposes to us. */
-
-#if 0
- if (file->s3r_handle->purl == NULL) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "parsed url structure cannot be null")
- }
- purl = file->s3r_handle->purl;
-
- /******************
- * PRINT FILENAME *
- ******************/
-
- HDfprintf(stream, "stats for %s://%s", purl->scheme, purl->host);
- if (purl->port != NULL && purl->port[0] != '\0') {
- HDfprintf(stream, ":%s", purl->port);
- }
- if (purl->query != NULL && purl->query[0] != '\0') {
- if (purl->path != NULL && purl->path[0] != '\0') {
- HDfprintf(stream, "/%s", purl->path);
- } else {
- HDfprintf(stream, "/");
- }
- HDfprintf(stream, "?%s", purl->query);
- } else if (purl->path != NULL && purl->path[0] != '\0') {
- HDfprintf(stream, "/%s", purl->path);
- }
- HDfprintf(stream, "\n");
-#endif
-
/*******************
* AGGREGATE STATS *
*******************/
@@ -1254,18 +1244,22 @@ hdfs_fprint_stats(
const hdfs_statsbin *r = &file->raw[i];
const hdfs_statsbin *m = &file->meta[i];
- if (m->min < min_meta) min_meta = m->min;
- if (r->min < min_raw) min_raw = r->min;
- if (m->max > max_meta) max_meta = m->max;
- if (r->max > max_raw) max_raw = r->max;
+ if (m->min < min_meta) { min_meta = m->min; }
+ if (r->min < min_raw) { min_raw = r->min; }
+ if (m->max > max_meta) { max_meta = m->max; }
+ if (r->max > max_raw) { max_raw = r->max; }
count_raw += r->count;
count_meta += m->count;
bytes_raw += r->bytes;
bytes_meta += m->bytes;
}
- if (count_raw > 0) average_raw = (double)bytes_raw / (double)count_raw;
- if (count_meta > 0) average_meta = (double)bytes_meta / (double)count_meta;
+ if (count_raw > 0) {
+ average_raw = (double)bytes_raw / (double)count_raw;
+ }
+ if (count_meta > 0) {
+ average_meta = (double)bytes_meta / (double)count_meta;
+ }
/******************
* PRINT OVERVIEW *
@@ -1342,9 +1336,9 @@ hdfs_fprint_stats(
* PRINT INDIVIDUAL BIN STATS *
******************************/
- HDfprintf(stream,
+ HDfprintf(stream,
"BINS # of reads total bytes average size\n");
- HDfprintf(stream,
+ HDfprintf(stream,
" up-to meta raw meta raw meta raw\n");
for (i = 0; i <= HDFS_STATS_BIN_COUNT; i++) {
@@ -1414,7 +1408,7 @@ hdfs_fprint_stats(
HDassert(suffix_i < sizeof(suffixes));
HDfprintf(
- stream,
+ stream,
" %8.3f%c %7d %7d %8.3f%c %8.3f%c %8.3f%c %8.3f%c\n",
re_dub, suffixes[suffix_i], /* bin ceiling */
m->count, /* metadata reads */
@@ -1428,7 +1422,6 @@ hdfs_fprint_stats(
done:
FUNC_LEAVE_NOAPI(ret_value);
-
} /* hdfs_fprint_stats */
#endif /* HDFS_STATS */
@@ -1442,7 +1435,7 @@ done:
* Close an HDF5 file.
*
* Return:
- *
+ *
* SUCCEED/FAIL
*
* Programmer: Jacob Smith
@@ -1452,35 +1445,31 @@ done:
*
*-------------------------------------------------------------------------
*/
+#ifdef H5_HAVE_LIBHDFS
+
static herr_t
H5FD_hdfs_close(H5FD_t *_file)
{
herr_t ret_value = SUCCEED;
-#ifdef H5_HAVE_LIBHDFS
H5FD_hdfs_t *file = (H5FD_hdfs_t *)_file;
-#endif
FUNC_ENTER_NOAPI_NOINIT
-#ifndef H5_HAVE_LIBHDFS
- HGOTO_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL,
- "Illegal close of unsupported Virtual File (hdfs)")
-#else
#if HDFS_DEBUG
HDfprintf(stdout, "H5FD_hdfs_close() called.\n");
#endif
- /* Sanity checks
+ /* Sanity checks
*/
HDassert(file != NULL);
HDassert(file->hdfs_handle != NULL);
HDassert(file->hdfs_handle->magic == HDFS_HDFST_MAGIC);
- /* Close the underlying request handle
+ /* Close the underlying request handle
*/
- if (file->hdfs_handle != NULL) {
+ if (file->hdfs_handle != NULL) {
if (FAIL == H5FD_hdfs_handle_close(file->hdfs_handle)) {
- HGOTO_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL,
+ HGOTO_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL,
"unable to close HDFS file handle")
}
}
@@ -1488,21 +1477,37 @@ H5FD_hdfs_close(H5FD_t *_file)
#if HDFS_STATS
/* TODO: mechanism to re-target stats printout */
if (FAIL == hdfs_fprint_stats(stdout, file)) {
- HGOTO_ERROR(H5E_INTERNAL, H5E_ERROR, FAIL,
+ HGOTO_ERROR(H5E_INTERNAL, H5E_ERROR, FAIL,
"problem while writing file statistics")
}
#endif /* HDFS_STATS */
- /* Release the file info
+ /* Release the file info
*/
file = H5FL_FREE(H5FD_hdfs_t, file);
-#endif /* H5_HAVE_LIBHDFS */
done:
FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD_hdfs_close() */
+
+#else /* H5_HAVE_LIBHDFS not defined */
+
+static herr_t
+H5FD_hdfs_close(H5FD_t H5_ATTR_UNUSED *_file)
+{
+ herr_t ret_value = SUCCEED;
+ FUNC_ENTER_NOAPI_NOINIT
+
+ HGOTO_ERROR(H5E_VFL, H5E_CANTCLOSEFILE, FAIL,
+ "Illegal close of unsupported Virtual File (hdfs)")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5FD_hdfs_close() */
+#endif /* H5_HAVE_LIBHDFS */
+
/*-------------------------------------------------------------------------
*
@@ -1527,21 +1532,21 @@ done:
*
*-------------------------------------------------------------------------
*/
+#ifdef H5_HAVE_LIBHDFS
+
static int
-H5FD_hdfs_cmp(const H5FD_t *_f1,
- const H5FD_t *_f2)
+H5FD_hdfs_cmp(
+ const H5FD_t *_f1,
+ const H5FD_t *_f2)
{
int ret_value = 0;
-#ifdef H5_HAVE_LIBHDFS
const H5FD_hdfs_t *f1 = (const H5FD_hdfs_t *)_f1;
const H5FD_hdfs_t *f2 = (const H5FD_hdfs_t *)_f2;
hdfsFileInfo *finfo1 = NULL;
hdfsFileInfo *finfo2 = NULL;
-#endif /* H5_HAVE_LIBHDFS */
FUNC_ENTER_NOAPI_NOINIT_NOERR
-#ifdef H5_HAVE_LIBHDFS
#if HDFS_DEBUG
HDfprintf(stdout, "H5FD_hdfs_cmp() called.\n");
#endif /* HDFS_DEBUG */
@@ -1556,23 +1561,37 @@ H5FD_hdfs_cmp(const H5FD_t *_f1,
HDassert(finfo1 != NULL);
HDassert(finfo2 != NULL);
- if (finfo1->mKind != finfo2->mKind) HGOTO_DONE(-1);
- if (finfo1->mName != finfo2->mName) HGOTO_DONE(-1);
- if (finfo1->mLastMod != finfo2->mLastMod) HGOTO_DONE(-1);
- if (finfo1->mSize != finfo2->mSize) HGOTO_DONE(-1);
- if (finfo1->mReplication != finfo2->mReplication) HGOTO_DONE(-1);
- if (finfo1->mBlockSize != finfo2->mBlockSize) HGOTO_DONE(-1);
- if (strcmp(finfo1->mOwner, finfo2->mOwner)) HGOTO_DONE(-1);
- if (strcmp(finfo1->mGroup, finfo2->mGroup)) HGOTO_DONE(-1);
- if (finfo1->mPermissions != finfo2->mPermissions) HGOTO_DONE(-1);
- if (finfo1->mLastAccess != finfo2->mLastAccess) HGOTO_DONE(-1);
-#endif /* H5_HAVE_LIBHDFS */
+ if (finfo1->mKind != finfo2->mKind) { HGOTO_DONE(-1); }
+ if (finfo1->mName != finfo2->mName) { HGOTO_DONE(-1); }
+ if (finfo1->mLastMod != finfo2->mLastMod) { HGOTO_DONE(-1); }
+ if (finfo1->mSize != finfo2->mSize) { HGOTO_DONE(-1); }
+ if (finfo1->mReplication != finfo2->mReplication) { HGOTO_DONE(-1); }
+ if (finfo1->mBlockSize != finfo2->mBlockSize) { HGOTO_DONE(-1); }
+ if (strcmp(finfo1->mOwner, finfo2->mOwner)) { HGOTO_DONE(-1); }
+ if (strcmp(finfo1->mGroup, finfo2->mGroup)) { HGOTO_DONE(-1); }
+ if (finfo1->mPermissions != finfo2->mPermissions) { HGOTO_DONE(-1); }
+ if (finfo1->mLastAccess != finfo2->mLastAccess) { HGOTO_DONE(-1); }
done:
FUNC_LEAVE_NOAPI(ret_value)
+} /* H5FD_hdfs_cmp() */
+
+#else /* H5_HAVE_LIBHDFS not defined */
+
+static int
+H5FD_hdfs_cmp(
+ const H5FD_t H5_ATTR_UNUSED *_f1,
+ const H5FD_t H5_ATTR_UNUSED *_f2)
+{
+ int ret_value = 0;
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5FD_hdfs_cmp() */
+#endif /* H5_HAVE_LIBHDFS */
+
/*-------------------------------------------------------------------------
* Function: H5FD_hdfs_query
@@ -1580,7 +1599,7 @@ done:
* Purpose: Set the flags that this VFL driver is capable of supporting.
* (listed in H5FDpublic.h)
*
- * Note that since the HDFS VFD is read only, most flags
+ * Note that since the HDFS VFD is read only, most flags
* are irrelevant.
*
* The term "set" is highly misleading...
@@ -1595,7 +1614,7 @@ done:
*/
static herr_t
H5FD_hdfs_query(
- const H5FD_t H5_ATTR_UNUSED *_file,
+ const H5FD_t H5_ATTR_UNUSED *_file,
unsigned long *flags) /* out variable */
{
FUNC_ENTER_NOAPI_NOINIT_NOERR
@@ -1606,11 +1625,10 @@ H5FD_hdfs_query(
if (flags) {
*flags = 0;
- *flags |= H5FD_FEAT_DATA_SIEVE;
+ *flags |= H5FD_FEAT_DATA_SIEVE;
}
FUNC_LEAVE_NOAPI(SUCCEED)
-
} /* H5FD_hdfs_query() */
@@ -1635,14 +1653,14 @@ H5FD_hdfs_query(
*
*-------------------------------------------------------------------------
*/
+#ifdef H5_HAVE_LIBHDFS
+
static haddr_t
H5FD_hdfs_get_eoa(
- const H5FD_t *_file,
+ const H5FD_t *_file,
H5FD_mem_t H5_ATTR_UNUSED type)
{
-#ifdef H5_HAVE_LIBHDFS
const H5FD_hdfs_t *file = (const H5FD_hdfs_t *)_file;
-#endif /* H5_HAVE_LIBHDFS */
FUNC_ENTER_NOAPI_NOINIT_NOERR
@@ -1650,14 +1668,27 @@ H5FD_hdfs_get_eoa(
HDfprintf(stdout, "H5FD_hdfs_get_eoa() called.\n");
#endif
-#ifdef H5_HAVE_LIBHDFS
FUNC_LEAVE_NOAPI(file->eoa)
-#else
- FUNC_LEAVE_NOAPI(0)
-#endif /* H5_HAVE_LIBHDFS */
+} /* end H5FD_hdfs_get_eoa() */
+
+#else /* H5_HAVE_LIBHDFS not defined */
+
+static haddr_t
+H5FD_hdfs_get_eoa(
+ const H5FD_t H5_ATTR_UNUSED *_file,
+ H5FD_mem_t H5_ATTR_UNUSED type)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+#if HDFS_DEBUG
+ HDfprintf(stdout, "H5FD_hdfs_get_eoa() called.\n");
+#endif
+
+ FUNC_LEAVE_NOAPI(0)
} /* end H5FD_hdfs_get_eoa() */
+#endif /* H5_HAVE_LIBHDFS */
+
/*-------------------------------------------------------------------------
*
@@ -1678,15 +1709,15 @@ H5FD_hdfs_get_eoa(
*
*-------------------------------------------------------------------------
*/
+#ifdef H5_HAVE_LIBHDFS
+
static herr_t
H5FD_hdfs_set_eoa(
- H5FD_t *_file,
- H5FD_mem_t H5_ATTR_UNUSED type,
+ H5FD_t *_file,
+ H5FD_mem_t H5_ATTR_UNUSED type,
haddr_t addr)
{
-#ifdef H5_HAVE_LIBHDFS
H5FD_hdfs_t *file = (H5FD_hdfs_t *)_file;
-#endif /* H5_HAVE_LIBHDFS */
FUNC_ENTER_NOAPI_NOINIT_NOERR
@@ -1694,16 +1725,30 @@ H5FD_hdfs_set_eoa(
HDfprintf(stdout, "H5FD_hdfs_set_eoa() called.\n");
#endif
-#ifdef H5_HAVE_LIBHDFS
file->eoa = addr;
FUNC_LEAVE_NOAPI(SUCCEED)
-#else
- FUNC_LEAVE_NOAPI(FAIL)
-#endif /* H5_HAVE_LIBHDFS */
+} /* H5FD_hdfs_set_eoa() */
+
+#else /* H5_HAVE_LIBHDFS not defined */
+
+static herr_t
+H5FD_hdfs_set_eoa(
+ H5FD_t H5_ATTR_UNUSED *_file,
+ H5FD_mem_t H5_ATTR_UNUSED type,
+ haddr_t H5_ATTR_UNUSED addr)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+#if HDFS_DEBUG
+ HDfprintf(stdout, "H5FD_hdfs_set_eoa() called.\n");
+#endif
+
+ FUNC_LEAVE_NOAPI(FAIL)
} /* H5FD_hdfs_set_eoa() */
+#endif /* H5_HAVE_LIBHDFS */
+
/*-------------------------------------------------------------------------
*
@@ -1715,7 +1760,7 @@ H5FD_hdfs_set_eoa(
*
* Return:
*
- * EOF: the first address past the end of the "file", either the
+ * EOF: the first address past the end of the "file", either the
* filesystem file or the HDF5 file.
*
* Programmer: Jacob Smith
@@ -1723,14 +1768,14 @@ H5FD_hdfs_set_eoa(
*
*-------------------------------------------------------------------------
*/
+#ifdef H5_HAVE_LIBHDFS
+
static haddr_t
H5FD_hdfs_get_eof(
- const H5FD_t *_file,
+ const H5FD_t *_file,
H5FD_mem_t H5_ATTR_UNUSED type)
{
-#ifdef H5_HAVE_LIBHDFS
const H5FD_hdfs_t *file = (const H5FD_hdfs_t *)_file;
-#endif /* H5_HAVE_LIBHDFS */
FUNC_ENTER_NOAPI_NOINIT_NOERR
@@ -1738,17 +1783,30 @@ H5FD_hdfs_get_eof(
HDfprintf(stdout, "H5FD_hdfs_get_eof() called.\n");
#endif
-#ifdef H5_HAVE_LIBHDFS
HDassert(file->hdfs_handle != NULL);
HDassert(file->hdfs_handle->magic == HDFS_HDFST_MAGIC);
FUNC_LEAVE_NOAPI((size_t) file->hdfs_handle->fileinfo->mSize)
-#else
- FUNC_LEAVE_NOAPI((size_t)0)
-#endif /* H5_HAVE_LIBHDFS */
+} /* end H5FD_hdfs_get_eof() */
+
+#else /* H5_HAVE_LIBHDFS not defined */
+
+static haddr_t
+H5FD_hdfs_get_eof(
+ const H5FD_t H5_ATTR_UNUSED *_file,
+ H5FD_mem_t H5_ATTR_UNUSED type)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+#if HDFS_DEBUG
+ HDfprintf(stdout, "H5FD_hdfs_get_eof() called.\n");
+#endif
+
+ FUNC_LEAVE_NOAPI((size_t)0)
} /* end H5FD_hdfs_get_eof() */
+#endif /* H5_HAVE_LIBHDFS */
+
/*-------------------------------------------------------------------------
*
@@ -1769,16 +1827,16 @@ H5FD_hdfs_get_eof(
*
*-------------------------------------------------------------------------
*/
+#ifdef H5_HAVE_LIBHDFS
+
static herr_t
H5FD_hdfs_get_handle(
- H5FD_t *_file,
- hid_t H5_ATTR_UNUSED fapl,
+ H5FD_t *_file,
+ hid_t H5_ATTR_UNUSED fapl,
void **file_handle)
{
herr_t ret_value = SUCCEED;
-#ifdef H5_HAVE_LIBHDFS
H5FD_hdfs_t *file = (H5FD_hdfs_t *)_file;
-#endif
FUNC_ENTER_NOAPI_NOINIT
@@ -1786,29 +1844,48 @@ H5FD_hdfs_get_handle(
HDfprintf(stdout, "H5FD_hdfs_get_handle() called.\n");
#endif /* HDFS_DEBUG */
-#ifdef H5_HAVE_LIBHDFS
if (!file_handle) {
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
"file handle not valid")
}
*file_handle = file->hdfs_handle;
-#else
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD_hdfs_get_handle() */
+
+#else /* H5_HAVE_LIBHDFS not defined */
+
+static herr_t
+H5FD_hdfs_get_handle(
+ H5FD_t H5_ATTR_UNUSED *_file,
+ hid_t H5_ATTR_UNUSED fapl,
+ void H5_ATTR_UNUSED **file_handle)
+{
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+#if HDFS_DEBUG
+ HDfprintf(stdout, "H5FD_hdfs_get_handle() called.\n");
+#endif /* HDFS_DEBUG */
+
HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL,
"Illegal get-handle of unsupported virtual file (hdfs)");
-#endif /* H5_HAVE_LIBHDFS */
done:
FUNC_LEAVE_NOAPI(ret_value)
-
} /* end H5FD_hdfs_get_handle() */
+#endif /* H5_HAVE_LIBHDFS */
+
/*-------------------------------------------------------------------------
*
* Function: H5FD_hdfs_read()
*
- * Purpose:
+ * Purpose:
*
* Reads SIZE bytes of data from FILE beginning at address ADDR
* into buffer BUF according to data transfer properties in DXPL_ID.
@@ -1828,20 +1905,20 @@ done:
*
*-------------------------------------------------------------------------
*/
+#ifdef H5_HAVE_LIBHDFS
+
static herr_t
H5FD_hdfs_read(
- H5FD_t *_file,
- H5FD_mem_t H5_ATTR_UNUSED type,
+ H5FD_t *_file,
+ H5FD_mem_t H5_ATTR_UNUSED type,
hid_t H5_ATTR_UNUSED dxpl_id,
haddr_t addr, /* start offset */
size_t size, /* length of read */
void *buf) /* out */
{
herr_t ret_value = SUCCEED;
-#if H5_HAVE_LIBHDFS
H5FD_hdfs_t *file = (H5FD_hdfs_t *)_file;
size_t filesize = 0;
-#endif /* H5_HAVE_LIBHDFS */
#if HDFS_STATS
/* working variables for storing stats */
hdfs_statsbin *bin = NULL;
@@ -1854,10 +1931,6 @@ H5FD_hdfs_read(
HDfprintf(stdout, "H5FD_hdfs_read() called.\n");
#endif /* HDFS_DEBUG */
-#ifndef H5_HAVE_LIBHDFS
- HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL,
- "Illegal get-handle of unsupported virtual file (hdfs)");
-#else
HDassert(file != NULL);
HDassert(file->hdfs_handle != NULL);
HDassert(file->hdfs_handle->magic == HDFS_HDFST_MAGIC);
@@ -1894,37 +1967,64 @@ H5FD_hdfs_read(
? &file->raw[bin_i]
: &file->meta[bin_i];
- /* Store collected stats in appropriate bin
+ /* Store collected stats in appropriate bin
*/
if (bin->count == 0) {
bin->min = size;
bin->max = size;
- } else {
- if (size < bin->min) bin->min = size;
- if (size > bin->max) bin->max = size;
+ }
+ else {
+ if (size < bin->min) { bin->min = size; }
+ if (size > bin->max) { bin->max = size; }
}
bin->count++;
bin->bytes += (unsigned long long)size;
#endif /* HDFS_STATS */
-#endif /* H5_HAVE_LIBHDFS */
done:
FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD_hdfs_read() */
+
+#else /* H5_HAVE_LIBHDFS not defined */
+
+static herr_t
+H5FD_hdfs_read(
+ H5FD_t H5_ATTR_UNUSED *_file,
+ H5FD_mem_t H5_ATTR_UNUSED type,
+ hid_t H5_ATTR_UNUSED dxpl_id,
+ haddr_t H5_ATTR_UNUSED addr,
+ size_t H5_ATTR_UNUSED size,
+ void H5_ATTR_UNUSED *buf)
+{
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+#if HDFS_DEBUG
+ HDfprintf(stdout, "H5FD_hdfs_read() called.\n");
+#endif /* HDFS_DEBUG */
+
+ HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL,
+ "Illegal get-handle of unsupported virtual file (hdfs)");
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5FD_hdfs_read() */
+#endif /* H5_HAVE_LIBHDFS */
+
/*-------------------------------------------------------------------------
*
* Function: H5FD_hdfs_write()
*
- * Purpose:
+ * Purpose:
*
* Write bytes to file.
* UNSUPPORTED IN READ-ONLY HDFS VFD.
*
- * Return:
+ * Return:
*
* FAIL (Not possible with Read-Only S3 file.)
*
@@ -1937,11 +2037,11 @@ done:
*/
static herr_t
H5FD_hdfs_write(
- H5FD_t H5_ATTR_UNUSED *_file,
- H5FD_mem_t H5_ATTR_UNUSED type,
+ H5FD_t H5_ATTR_UNUSED *_file,
+ H5FD_mem_t H5_ATTR_UNUSED type,
hid_t H5_ATTR_UNUSED dxpl_id,
- haddr_t H5_ATTR_UNUSED addr,
- size_t H5_ATTR_UNUSED size,
+ haddr_t H5_ATTR_UNUSED addr,
+ size_t H5_ATTR_UNUSED size,
const void H5_ATTR_UNUSED *buf)
{
herr_t ret_value = FAIL;
@@ -1957,7 +2057,6 @@ H5FD_hdfs_write(
done:
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5FD_hdfs_write() */
@@ -1985,8 +2084,8 @@ done:
*/
static herr_t
H5FD_hdfs_truncate(
- H5FD_t H5_ATTR_UNUSED *_file,
- hid_t H5_ATTR_UNUSED dxpl_id,
+ H5FD_t H5_ATTR_UNUSED *_file,
+ hid_t H5_ATTR_UNUSED dxpl_id,
hbool_t H5_ATTR_UNUSED closing)
{
herr_t ret_value = SUCCEED;
@@ -2002,7 +2101,6 @@ H5FD_hdfs_truncate(
done:
FUNC_LEAVE_NOAPI(ret_value)
-
} /* end H5FD_hdfs_truncate() */
@@ -2031,12 +2129,11 @@ done:
*/
static herr_t
H5FD_hdfs_lock(
- H5FD_t H5_ATTR_UNUSED *_file,
+ H5FD_t H5_ATTR_UNUSED *_file,
hbool_t H5_ATTR_UNUSED rw)
{
FUNC_ENTER_NOAPI_NOINIT_NOERR
FUNC_LEAVE_NOAPI(SUCCEED)
-
} /* end H5FD_hdfs_lock() */
@@ -2065,6 +2162,5 @@ H5FD_hdfs_unlock(H5FD_t H5_ATTR_UNUSED *_file)
{
FUNC_ENTER_NOAPI_NOINIT_NOERR
FUNC_LEAVE_NOAPI(SUCCEED)
-
} /* end H5FD_hdfs_unlock() */