summaryrefslogtreecommitdiffstats
path: root/src/H5FDhdfs.c
diff options
context:
space:
mode:
authorDana Robinson <43805+derobins@users.noreply.github.com>2023-06-28 15:31:32 (GMT)
committerGitHub <noreply@github.com>2023-06-28 15:31:32 (GMT)
commit187ea8a9ae1405b5b46ca47a32fb1fb9e2686c98 (patch)
treebaffa167d0796786241aef6b0ce76d4adec3b66e /src/H5FDhdfs.c
parent7a44581a84778a1346a2fd5b6cca7d9db905a321 (diff)
downloadhdf5-187ea8a9ae1405b5b46ca47a32fb1fb9e2686c98.zip
hdf5-187ea8a9ae1405b5b46ca47a32fb1fb9e2686c98.tar.gz
hdf5-187ea8a9ae1405b5b46ca47a32fb1fb9e2686c98.tar.bz2
Rename HD(f)printf() to (f)printf() (#3194)
Diffstat (limited to 'src/H5FDhdfs.c')
-rw-r--r--src/H5FDhdfs.c92
1 files changed, 46 insertions, 46 deletions
diff --git a/src/H5FDhdfs.c b/src/H5FDhdfs.c
index b461bd4..00b9b92 100644
--- a/src/H5FDhdfs.c
+++ b/src/H5FDhdfs.c
@@ -347,7 +347,7 @@ H5FD_hdfs_init(void)
FUNC_ENTER_NOAPI(H5I_INVALID_HID)
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
if (H5I_VFL != H5I_get_type(H5FD_HDFS_g))
@@ -388,7 +388,7 @@ H5FD__hdfs_term(void)
FUNC_ENTER_PACKAGE_NOERR
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
/* Reset VFL ID */
@@ -422,7 +422,7 @@ H5FD__hdfs_handle_open(const char *path, const char *namenode_name, const int32_
FUNC_ENTER_PACKAGE
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
if (path == NULL || path[0] == '\0')
@@ -507,7 +507,7 @@ H5FD__hdfs_handle_close(hdfs_t *handle)
FUNC_ENTER_PACKAGE
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
if (handle == NULL)
@@ -598,7 +598,7 @@ H5Pset_fapl_hdfs(hid_t fapl_id, H5FD_hdfs_fapl_t *fa)
assert(fa != NULL);
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
plist = H5P_object_verify(fapl_id, H5P_FILE_ACCESS);
@@ -639,7 +639,7 @@ H5Pget_fapl_hdfs(hid_t fapl_id, H5FD_hdfs_fapl_t *fa_dst /*out*/)
H5TRACE2("e", "ix", fapl_id, fa_dst);
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
if (fa_dst == NULL)
@@ -796,7 +796,7 @@ hdfs__reset_stats(H5FD_hdfs_t *file)
FUNC_ENTER_PACKAGE
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
if (file == NULL)
@@ -854,7 +854,7 @@ H5FD__hdfs_open(const char *path, unsigned flags, hid_t fapl_id, haddr_t maxaddr
FUNC_ENTER_PACKAGE
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif /* HDFS_DEBUG */
/* Sanity check on file offsets */
@@ -1025,10 +1025,10 @@ hdfs__fprint_stats(FILE *stream, const H5FD_hdfs_t *file)
* PRINT OVERVIEW *
******************/
- HDfprintf(stream, "TOTAL READS: %llu (%llu meta, %llu raw)\n", count_raw + count_meta, count_meta,
- count_raw);
- HDfprintf(stream, "TOTAL BYTES: %llu (%llu meta, %llu raw)\n", bytes_raw + bytes_meta, bytes_meta,
- bytes_raw);
+ fprintf(stream, "TOTAL READS: %llu (%llu meta, %llu raw)\n", count_raw + count_meta, count_meta,
+ count_raw);
+ fprintf(stream, "TOTAL BYTES: %llu (%llu meta, %llu raw)\n", bytes_raw + bytes_meta, bytes_meta,
+ bytes_raw);
if (count_raw + count_meta == 0)
goto done;
@@ -1037,60 +1037,60 @@ hdfs__fprint_stats(FILE *stream, const H5FD_hdfs_t *file)
* PRINT AGGREGATE STATS *
*************************/
- HDfprintf(stream, "SIZES meta raw\n");
- HDfprintf(stream, " min ");
+ fprintf(stream, "SIZES meta raw\n");
+ fprintf(stream, " min ");
if (count_meta == 0)
- HDfprintf(stream, " 0.000 ");
+ fprintf(stream, " 0.000 ");
else {
re_dub = (double)min_meta;
for (suffix_i = 0; re_dub >= 1024.0; suffix_i++)
re_dub /= 1024.0;
assert(suffix_i < sizeof(suffixes));
- HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]);
+ fprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]);
}
if (count_raw == 0)
- HDfprintf(stream, " 0.000 \n");
+ fprintf(stream, " 0.000 \n");
else {
re_dub = (double)min_raw;
for (suffix_i = 0; re_dub >= 1024.0; suffix_i++)
re_dub /= 1024.0;
assert(suffix_i < sizeof(suffixes));
- HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]);
+ fprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]);
}
- HDfprintf(stream, " avg ");
+ fprintf(stream, " avg ");
re_dub = (double)average_meta;
for (suffix_i = 0; re_dub >= 1024.0; suffix_i++)
re_dub /= 1024.0;
assert(suffix_i < sizeof(suffixes));
- HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]);
+ fprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]);
re_dub = (double)average_raw;
for (suffix_i = 0; re_dub >= 1024.0; suffix_i++)
re_dub /= 1024.0;
assert(suffix_i < sizeof(suffixes));
- HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]);
+ fprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]);
- HDfprintf(stream, " max ");
+ fprintf(stream, " max ");
re_dub = (double)max_meta;
for (suffix_i = 0; re_dub >= 1024.0; suffix_i++)
re_dub /= 1024.0;
assert(suffix_i < sizeof(suffixes));
- HDfprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]);
+ fprintf(stream, "%8.3lf%c ", re_dub, suffixes[suffix_i]);
re_dub = (double)max_raw;
for (suffix_i = 0; re_dub >= 1024.0; suffix_i++)
re_dub /= 1024.0;
assert(suffix_i < sizeof(suffixes));
- HDfprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]);
+ fprintf(stream, "%8.3lf%c\n", re_dub, suffixes[suffix_i]);
/******************************
* PRINT INDIVIDUAL BIN STATS *
******************************/
- HDfprintf(stream, "BINS # of reads total bytes average size\n");
- HDfprintf(stream, " up-to meta raw meta raw meta raw\n");
+ fprintf(stream, "BINS # of reads total bytes average size\n");
+ fprintf(stream, " up-to meta raw meta raw meta raw\n");
for (i = 0; i <= HDFS_STATS_BIN_COUNT; i++) {
const hdfs_statsbin *m;
@@ -1114,10 +1114,10 @@ hdfs__fprint_stats(FILE *stream, const H5FD_hdfs_t *file)
if (i == HDFS_STATS_BIN_COUNT) {
range_end = hdfs_stats_boundaries[i - 1];
- HDfprintf(stream, ">");
+ fprintf(stream, ">");
}
else
- HDfprintf(stream, " ");
+ fprintf(stream, " ");
bm_val = (double)m->bytes;
for (suffix_i = 0; bm_val >= 1024.0; suffix_i++)
@@ -1150,14 +1150,14 @@ hdfs__fprint_stats(FILE *stream, const H5FD_hdfs_t *file)
re_dub /= 1024.0;
assert(suffix_i < sizeof(suffixes));
- HDfprintf(stream, " %8.3f%c %7d %7d %8.3f%c %8.3f%c %8.3f%c %8.3f%c\n", re_dub,
- suffixes[suffix_i], /* bin ceiling */
- m->count, /* metadata reads */
- r->count, /* raw data reads */
- bm_val, bm_suffix, /* metadata bytes */
- br_val, br_suffix, /* raw data bytes */
- am_val, am_suffix, /* metadata average */
- ar_val, ar_suffix); /* raw data average */
+ fprintf(stream, " %8.3f%c %7d %7d %8.3f%c %8.3f%c %8.3f%c %8.3f%c\n", re_dub,
+ suffixes[suffix_i], /* bin ceiling */
+ m->count, /* metadata reads */
+ r->count, /* raw data reads */
+ bm_val, bm_suffix, /* metadata bytes */
+ br_val, br_suffix, /* raw data bytes */
+ am_val, am_suffix, /* metadata average */
+ ar_val, ar_suffix); /* raw data average */
HDfflush(stream);
}
@@ -1192,7 +1192,7 @@ H5FD__hdfs_close(H5FD_t *_file)
FUNC_ENTER_PACKAGE
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
/* Sanity checks */
@@ -1248,7 +1248,7 @@ H5FD__hdfs_cmp(const H5FD_t *_f1, const H5FD_t *_f2)
FUNC_ENTER_PACKAGE_NOERR
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif /* HDFS_DEBUG */
assert(f1->hdfs_handle != NULL);
@@ -1321,7 +1321,7 @@ H5FD__hdfs_query(const H5FD_t H5_ATTR_UNUSED *_file, unsigned long *flags)
FUNC_ENTER_PACKAGE_NOERR
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
if (flags) {
@@ -1359,7 +1359,7 @@ H5FD__hdfs_get_eoa(const H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type)
FUNC_ENTER_PACKAGE_NOERR
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
FUNC_LEAVE_NOAPI(file->eoa)
@@ -1390,7 +1390,7 @@ H5FD__hdfs_set_eoa(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, haddr_t addr)
FUNC_ENTER_PACKAGE_NOERR
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
file->eoa = addr;
@@ -1424,7 +1424,7 @@ H5FD__hdfs_get_eof(const H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type)
FUNC_ENTER_PACKAGE_NOERR
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
assert(file->hdfs_handle != NULL);
@@ -1459,7 +1459,7 @@ H5FD__hdfs_get_handle(H5FD_t *_file, hid_t H5_ATTR_UNUSED fapl, void **file_hand
FUNC_ENTER_PACKAGE
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif /* HDFS_DEBUG */
if (!file_handle)
@@ -1509,7 +1509,7 @@ H5FD__hdfs_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
FUNC_ENTER_PACKAGE
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif /* HDFS_DEBUG */
assert(file != NULL);
@@ -1581,7 +1581,7 @@ H5FD__hdfs_write(H5FD_t H5_ATTR_UNUSED *_file, H5FD_mem_t H5_ATTR_UNUSED type, h
FUNC_ENTER_PACKAGE
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL, "cannot write to read-only file")
@@ -1619,7 +1619,7 @@ H5FD__hdfs_truncate(H5FD_t H5_ATTR_UNUSED *_file, hid_t H5_ATTR_UNUSED dxpl_id,
FUNC_ENTER_PACKAGE
#if HDFS_DEBUG
- HDfprintf(stdout, "called %s.\n", __func__);
+ fprintf(stdout, "called %s.\n", __func__);
#endif
HGOTO_ERROR(H5E_VFL, H5E_UNSUPPORTED, FAIL, "cannot truncate read-only file")