diff options
Diffstat (limited to 'tools')
29 files changed, 109 insertions, 109 deletions
diff --git a/tools/lib/h5diff_util.c b/tools/lib/h5diff_util.c index ade7dcd..0c20447 100644 --- a/tools/lib/h5diff_util.c +++ b/tools/lib/h5diff_util.c @@ -322,9 +322,9 @@ void print_found(hsize_t nfound) { if (g_Parallel) - parallel_print("%" H5_PRINTF_LL_WIDTH "u differences found\n", (unsigned long long)nfound); + parallel_print("%" PRIuHSIZE " differences found\n", nfound); else - HDfprintf(stdout, "%Hu differences found\n", nfound); + HDfprintf(stdout, "%" PRIuHSIZE " differences found\n", nfound); } /*----------------------------------------------------------------- diff --git a/tools/lib/h5tools_dump.c b/tools/lib/h5tools_dump.c index 34b70ac..56ffd27 100644 --- a/tools/lib/h5tools_dump.c +++ b/tools/lib/h5tools_dump.c @@ -81,8 +81,8 @@ h5tool_format_t h5tools_dataformat = { 1, /*skip_first */ - 1, /*obj_hidefileno */ - " " H5_PRINTF_HADDR_FMT, /*obj_format */ + 1, /*obj_hidefileno */ + " %" PRIuHADDR, /*obj_format */ 1, /*dset_hidefileno */ "DATASET %s ", /*dset_format */ @@ -3331,7 +3331,7 @@ h5tools_dump_dcpl(FILE *stream, const h5tool_format_t *info, h5tools_context_t * if (HADDR_UNDEF == ioffset) h5tools_str_append(&buffer, "OFFSET HADDR_UNDEF"); else - h5tools_str_append(&buffer, "OFFSET " H5_PRINTF_HADDR_FMT, ioffset); + h5tools_str_append(&buffer, "OFFSET %" PRIuHADDR, ioffset); h5tools_render_element(stream, info, ctx, &buffer, &curr_pos, (size_t)ncols, (hsize_t)0, (hsize_t)0); } diff --git a/tools/src/h5dump/h5dump.c b/tools/src/h5dump/h5dump.c index 9a5e297..34b3c10 100644 --- a/tools/src/h5dump/h5dump.c +++ b/tools/src/h5dump/h5dump.c @@ -256,7 +256,7 @@ usage(const char *prog) " <cred> :: \"(<aws-region>,<access-id>,<access-key>)\"\n"); PRINTVALSTREAM(rawoutstream, " If absent or <cred> -> \"(,,)\", no authentication.\n"); - PRINTVALSTREAM(rawoutstream, " Has no effect is filedriver is not `ros3'.\n"); + PRINTVALSTREAM(rawoutstream, " Has no effect if filedriver is not \"ros3\".\n"); PRINTVALSTREAM(rawoutstream, " --hdfs-attrs=<attrs> Supply configuration information for HDFS file access.\n"); PRINTVALSTREAM(rawoutstream, " For use with \"--filedriver=hdfs\"\n"); @@ -338,7 +338,7 @@ usage(const char *prog) PRINTVALSTREAM(rawoutstream, " -X S, --xml-ns=S (XML Schema) Use qualified names n the XML\n"); PRINTVALSTREAM(rawoutstream, " \":\": no namespace, default: \"hdf5:\"\n"); PRINTVALSTREAM(rawoutstream, - " E.g., to dump a file called `-f', use h5dump -- -f\n"); + " E.g., to dump a file called \"-f\", use h5dump -- -f\n"); PRINTVALSTREAM(rawoutstream, "\n"); PRINTVALSTREAM(rawoutstream, "--------------- Subsetting Options ---------------\n"); PRINTVALSTREAM(rawoutstream, " Subsetting is available by using the following options with a dataset\n"); @@ -1132,7 +1132,7 @@ parse_start: case 'M': if (!last_was_dset) { - error_msg("option `-%c' can only be used after --dataset option\n", opt); + error_msg("option \"-%c\" can only be used after --dataset option\n", opt); goto error; } if (parse_mask_list(opt_arg) != SUCCEED) { @@ -1205,7 +1205,7 @@ parse_start: struct subset_t *s; if (!last_was_dset) { - error_msg("option `-%c' can only be used after --dataset option\n", opt); + error_msg("option \"-%c\" can only be used after --dataset option\n", opt); goto error; } diff --git a/tools/src/h5dump/h5dump_ddl.c b/tools/src/h5dump/h5dump_ddl.c index 94dc1fd..8c637b1 100644 --- a/tools/src/h5dump/h5dump_ddl.c +++ b/tools/src/h5dump/h5dump_ddl.c @@ -1252,9 +1252,9 @@ dump_fcpl(hid_t fid) indentation(dump_indent + COL); PRINTSTREAM(rawoutstream, "%s %s\n", "FREE_SPACE_PERSIST", fs_persist ? "TRUE" : "FALSE"); indentation(dump_indent + COL); - PRINTSTREAM(rawoutstream, "%s %Hu\n", "FREE_SPACE_SECTION_THRESHOLD", fs_threshold); + PRINTSTREAM(rawoutstream, "%s %" PRIuHSIZE "\n", "FREE_SPACE_SECTION_THRESHOLD", fs_threshold); indentation(dump_indent + COL); - PRINTSTREAM(rawoutstream, "%s %Hu\n", "FILE_SPACE_PAGE_SIZE", fsp_size); + PRINTSTREAM(rawoutstream, "%s %" PRIuHSIZE "\n", "FILE_SPACE_PAGE_SIZE", fsp_size); /*------------------------------------------------------------------------- * USER_BLOCK @@ -1263,7 +1263,7 @@ dump_fcpl(hid_t fid) indentation(dump_indent + COL); PRINTSTREAM(rawoutstream, "USER_BLOCK %s\n", BEGIN); indentation(dump_indent + COL + COL); - PRINTSTREAM(rawoutstream, "%s %Hu\n", "USERBLOCK_SIZE", userblock); + PRINTSTREAM(rawoutstream, "%s %" PRIuHSIZE "\n", "USERBLOCK_SIZE", userblock); indentation(dump_indent + COL); PRINTSTREAM(rawoutstream, "%s\n", END); diff --git a/tools/src/h5dump/h5dump_xml.c b/tools/src/h5dump/h5dump_xml.c index e4bbb9b..0457d32 100644 --- a/tools/src/h5dump/h5dump_xml.c +++ b/tools/src/h5dump/h5dump_xml.c @@ -93,8 +93,8 @@ static h5tool_format_t xml_dataformat = { 1, /*skip_first */ - 1, /*obj_hidefileno */ - " " H5_PRINTF_HADDR_FMT, /*obj_format */ + 1, /*obj_hidefileno */ + " %" PRIuHADDR, /*obj_format */ 1, /*dset_hidefileno */ "DATASET %s ", /*dset_format */ diff --git a/tools/src/h5import/h5import.c b/tools/src/h5import/h5import.c index 14de44b..7a7887f 100644 --- a/tools/src/h5import/h5import.c +++ b/tools/src/h5import/h5import.c @@ -946,7 +946,7 @@ readFloatData(FILE *strm, struct Input *in) * * Return: 0, ok, -1 no * - * Programmer: Pedro Vicente, pvn@hdfgroup.org + * Programmer: Pedro Vicente * * Date: July, 26, 2007 * diff --git a/tools/src/h5ls/h5ls.c b/tools/src/h5ls/h5ls.c index 68a8647..9ebd4d5 100644 --- a/tools/src/h5ls/h5ls.c +++ b/tools/src/h5ls/h5ls.c @@ -95,8 +95,8 @@ static h5tool_format_t ls_dataformat = { 0, /*skip_first */ - 0, /*obj_hidefileno */ - "-%lu:" H5_PRINTF_HADDR_FMT, /*obj_format */ + 0, /*obj_hidefileno */ + "-%lu:%" PRIuHADDR, /*obj_format */ 0, /*dset_hidefileno */ "DSET-%s ", /*dset_format */ @@ -965,7 +965,7 @@ print_enum_type(h5tools_str_t *buffer, hid_t type, int ind) /*On SGI Altix(cobalt), wrong values were printed out with "value+i*dst_size" *strangely, unless use another pointer "copy".*/ copy = value + i * dst_size; - h5tools_str_append(buffer, "%" H5_PRINTF_LL_WIDTH "d", *((long long *)((void *)copy))); + h5tools_str_append(buffer, "%lld", *((long long *)((void *)copy))); } } @@ -1908,10 +1908,9 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name) print_string(&buffer, f_name, TRUE); } else { - h5tools_str_append(&buffer, - " #%03d %10" H5_PRINTF_LL_WIDTH - "u %10" H5_PRINTF_LL_WIDTH "u %10" H5_PRINTF_LL_WIDTH "u ", - i, total, (hsize_t)f_offset, f_size); + h5tools_str_append( + &buffer, " #%03d %10" PRIuHSIZE " %10" PRIuHSIZE " %10" PRIuHSIZE " ", + i, total, (hsize_t)f_offset, f_size); print_string(&buffer, f_name, TRUE); } h5tools_str_append(&buffer, "\n"); diff --git a/tools/src/h5repack/h5repack.c b/tools/src/h5repack/h5repack.c index 395b66b..a1764df 100644 --- a/tools/src/h5repack/h5repack.c +++ b/tools/src/h5repack/h5repack.c @@ -718,7 +718,7 @@ check_options(pack_opt_t *options) } if (options->ublock_filename == NULL && options->ublock_size != 0) - H5TOOLS_GOTO_ERROR((-1), "file name missing for user block", options->ublock_filename); + H5TOOLS_GOTO_ERROR((-1), "file name missing for user block"); /*------------------------------------------------------------------------ * Verify alignment options; threshold is zero default but alignment not diff --git a/tools/src/h5stat/h5stat.c b/tools/src/h5stat/h5stat.c index f834706..12a408e 100644 --- a/tools/src/h5stat/h5stat.c +++ b/tools/src/h5stat/h5stat.c @@ -1196,7 +1196,7 @@ print_file_info(const iter_t *iter) HDprintf("\t# of unique links: %lu\n", iter->uniq_links); HDprintf("\t# of unique other: %lu\n", iter->uniq_others); HDprintf("\tMax. # of links to object: %lu\n", iter->max_links); - HDfprintf(stdout, "\tMax. # of objects in group: %Hu\n", iter->max_fanout); + HDfprintf(stdout, "\tMax. # of objects in group: %" PRIuHSIZE "\n", iter->max_fanout); return 0; } /* print_file_info() */ @@ -1219,40 +1219,40 @@ static herr_t print_file_metadata(const iter_t *iter) { HDfprintf(stdout, "File space information for file metadata (in bytes):\n"); - HDfprintf(stdout, "\tSuperblock: %Hu\n", iter->super_size); - HDfprintf(stdout, "\tSuperblock extension: %Hu\n", iter->super_ext_size); - HDfprintf(stdout, "\tUser block: %Hu\n", iter->ublk_size); + HDfprintf(stdout, "\tSuperblock: %" PRIuHSIZE "\n", iter->super_size); + HDfprintf(stdout, "\tSuperblock extension: %" PRIuHSIZE "\n", iter->super_ext_size); + HDfprintf(stdout, "\tUser block: %" PRIuHSIZE "\n", iter->ublk_size); HDfprintf(stdout, "\tObject headers: (total/unused)\n"); - HDfprintf(stdout, "\t\tGroups: %Hu/%Hu\n", iter->group_ohdr_info.total_size, + HDfprintf(stdout, "\t\tGroups: %" PRIuHSIZE "/%" PRIuHSIZE "\n", iter->group_ohdr_info.total_size, iter->group_ohdr_info.free_size); - HDfprintf(stdout, "\t\tDatasets(exclude compact data): %Hu/%Hu\n", iter->dset_ohdr_info.total_size, - iter->dset_ohdr_info.free_size); - HDfprintf(stdout, "\t\tDatatypes: %Hu/%Hu\n", iter->dtype_ohdr_info.total_size, + HDfprintf(stdout, "\t\tDatasets(exclude compact data): %" PRIuHSIZE "/%" PRIuHSIZE "\n", + iter->dset_ohdr_info.total_size, iter->dset_ohdr_info.free_size); + HDfprintf(stdout, "\t\tDatatypes: %" PRIuHSIZE "/%" PRIuHSIZE "\n", iter->dtype_ohdr_info.total_size, iter->dtype_ohdr_info.free_size); HDfprintf(stdout, "\tGroups:\n"); - HDfprintf(stdout, "\t\tB-tree/List: %Hu\n", iter->groups_btree_storage_size); - HDfprintf(stdout, "\t\tHeap: %Hu\n", iter->groups_heap_storage_size); + HDfprintf(stdout, "\t\tB-tree/List: %" PRIuHSIZE "\n", iter->groups_btree_storage_size); + HDfprintf(stdout, "\t\tHeap: %" PRIuHSIZE "\n", iter->groups_heap_storage_size); HDfprintf(stdout, "\tAttributes:\n"); - HDfprintf(stdout, "\t\tB-tree/List: %Hu\n", iter->attrs_btree_storage_size); - HDfprintf(stdout, "\t\tHeap: %Hu\n", iter->attrs_heap_storage_size); + HDfprintf(stdout, "\t\tB-tree/List: %" PRIuHSIZE "\n", iter->attrs_btree_storage_size); + HDfprintf(stdout, "\t\tHeap: %" PRIuHSIZE "\n", iter->attrs_heap_storage_size); HDfprintf(stdout, "\tChunked datasets:\n"); - HDfprintf(stdout, "\t\tIndex: %Hu\n", iter->datasets_index_storage_size); + HDfprintf(stdout, "\t\tIndex: %" PRIuHSIZE "\n", iter->datasets_index_storage_size); HDfprintf(stdout, "\tDatasets:\n"); - HDfprintf(stdout, "\t\tHeap: %Hu\n", iter->datasets_heap_storage_size); + HDfprintf(stdout, "\t\tHeap: %" PRIuHSIZE "\n", iter->datasets_heap_storage_size); HDfprintf(stdout, "\tShared Messages:\n"); - HDfprintf(stdout, "\t\tHeader: %Hu\n", iter->SM_hdr_storage_size); - HDfprintf(stdout, "\t\tB-tree/List: %Hu\n", iter->SM_index_storage_size); - HDfprintf(stdout, "\t\tHeap: %Hu\n", iter->SM_heap_storage_size); + HDfprintf(stdout, "\t\tHeader: %" PRIuHSIZE "\n", iter->SM_hdr_storage_size); + HDfprintf(stdout, "\t\tB-tree/List: %" PRIuHSIZE "\n", iter->SM_index_storage_size); + HDfprintf(stdout, "\t\tHeap: %" PRIuHSIZE "\n", iter->SM_heap_storage_size); HDfprintf(stdout, "\tFree-space managers:\n"); - HDfprintf(stdout, "\t\tHeader: %Hu\n", iter->free_hdr); - HDfprintf(stdout, "\t\tAmount of free space: %Hu\n", iter->free_space); + HDfprintf(stdout, "\t\tHeader: %" PRIuHSIZE "\n", iter->free_hdr); + HDfprintf(stdout, "\t\tAmount of free space: %" PRIuHSIZE "\n", iter->free_space); return 0; } /* print_file_metadata() */ @@ -1330,11 +1330,11 @@ print_group_metadata(const iter_t *iter) { HDprintf("File space information for groups' metadata (in bytes):\n"); - HDfprintf(stdout, "\tObject headers (total/unused): %Hu/%Hu\n", iter->group_ohdr_info.total_size, - iter->group_ohdr_info.free_size); + HDfprintf(stdout, "\tObject headers (total/unused): %" PRIuHSIZE "/%" PRIuHSIZE "\n", + iter->group_ohdr_info.total_size, iter->group_ohdr_info.free_size); - HDfprintf(stdout, "\tB-tree/List: %Hu\n", iter->groups_btree_storage_size); - HDfprintf(stdout, "\tHeap: %Hu\n", iter->groups_heap_storage_size); + HDfprintf(stdout, "\tB-tree/List: %" PRIuHSIZE "\n", iter->groups_btree_storage_size); + HDfprintf(stdout, "\tHeap: %" PRIuHSIZE "\n", iter->groups_heap_storage_size); return 0; } /* print_group_metadata() */ @@ -1368,7 +1368,7 @@ print_dataset_info(const iter_t *iter) HDprintf("\t\t# of dataset with rank %u: %lu\n", u, iter->dset_rank_count[u]); HDprintf("1-D Dataset information:\n"); - HDfprintf(stdout, "\tMax. dimension size of 1-D datasets: %Hu\n", iter->max_dset_dims); + HDfprintf(stdout, "\tMax. dimension size of 1-D datasets: %" PRIuHSIZE "\n", iter->max_dset_dims); HDprintf("\tSmall 1-D datasets (with dimension sizes 0 to %u):\n", sdsets_threshold - 1); total = 0; for (u = 0; u < (unsigned)sdsets_threshold; u++) { @@ -1400,8 +1400,9 @@ print_dataset_info(const iter_t *iter) } /* end if */ HDprintf("Dataset storage information:\n"); - HDfprintf(stdout, "\tTotal raw data size: %Hu\n", iter->dset_storage_size); - HDfprintf(stdout, "\tTotal external raw data size: %Hu\n", iter->dset_external_storage_size); + HDfprintf(stdout, "\tTotal raw data size: %" PRIuHSIZE "\n", iter->dset_storage_size); + HDfprintf(stdout, "\tTotal external raw data size: %" PRIuHSIZE "\n", + iter->dset_external_storage_size); HDprintf("Dataset layout information:\n"); for (u = 0; u < H5D_NLAYOUTS; u++) @@ -1445,11 +1446,11 @@ print_dset_metadata(const iter_t *iter) { HDprintf("File space information for datasets' metadata (in bytes):\n"); - HDfprintf(stdout, "\tObject headers (total/unused): %Hu/%Hu\n", iter->dset_ohdr_info.total_size, - iter->dset_ohdr_info.free_size); + HDfprintf(stdout, "\tObject headers (total/unused): %" PRIuHSIZE "/%" PRIuHSIZE "\n", + iter->dset_ohdr_info.total_size, iter->dset_ohdr_info.free_size); - HDfprintf(stdout, "\tIndex for Chunked datasets: %Hu\n", iter->datasets_index_storage_size); - HDfprintf(stdout, "\tHeap: %Hu\n", iter->datasets_heap_storage_size); + HDfprintf(stdout, "\tIndex for Chunked datasets: %" PRIuHSIZE "\n", iter->datasets_index_storage_size); + HDfprintf(stdout, "\tHeap: %" PRIuHSIZE "\n", iter->datasets_heap_storage_size); return 0; } /* print_dset_metadata() */ @@ -1563,7 +1564,7 @@ print_freespace_info(const iter_t *iter) unsigned u; /* Local index variable */ HDfprintf(stdout, "Free-space persist: %s\n", iter->fs_persist ? "TRUE" : "FALSE"); - HDfprintf(stdout, "Free-space section threshold: %Hu bytes\n", iter->fs_threshold); + HDfprintf(stdout, "Free-space section threshold: %" PRIuHSIZE " bytes\n", iter->fs_threshold); HDprintf("Small size free-space sections (< %u bytes):\n", (unsigned)SIZE_SMALL_SECTS); total = 0; for (u = 0; u < SIZE_SMALL_SECTS; u++) { @@ -1611,7 +1612,7 @@ print_storage_summary(const iter_t *iter) double percent = 0.0f; HDfprintf(stdout, "File space management strategy: %s\n", FS_STRATEGY_NAME[iter->fs_strategy]); - HDfprintf(stdout, "File space page size: %Hu bytes\n", iter->fsp_size); + HDfprintf(stdout, "File space page size: %" PRIuHSIZE " bytes\n", iter->fsp_size); HDprintf("Summary of file space information:\n"); total_meta = iter->super_size + iter->super_ext_size + iter->ublk_size + iter->group_ohdr_info.total_size + @@ -1620,27 +1621,28 @@ print_storage_summary(const iter_t *iter) iter->datasets_index_storage_size + iter->datasets_heap_storage_size + iter->SM_hdr_storage_size + iter->SM_index_storage_size + iter->SM_heap_storage_size + iter->free_hdr; - HDfprintf(stdout, " File metadata: %Hu bytes\n", total_meta); - HDfprintf(stdout, " Raw data: %Hu bytes\n", iter->dset_storage_size); + HDfprintf(stdout, " File metadata: %" PRIuHSIZE " bytes\n", total_meta); + HDfprintf(stdout, " Raw data: %" PRIuHSIZE " bytes\n", iter->dset_storage_size); percent = ((double)iter->free_space / (double)iter->filesize) * (double)100.0f; - HDfprintf(stdout, " Amount/Percent of tracked free space: %Hu bytes/%3.1f%\n", iter->free_space, - percent); + HDfprintf(stdout, " Amount/Percent of tracked free space: %" PRIuHSIZE " bytes/%3.1f%%\n", + iter->free_space, percent); if (iter->filesize < (total_meta + iter->dset_storage_size + iter->free_space)) { unaccount = (total_meta + iter->dset_storage_size + iter->free_space) - iter->filesize; - HDfprintf(stdout, " ??? File has %Hu more bytes accounted for than its size! ???\n", unaccount); + HDfprintf(stdout, " ??? File has %" PRIuHSIZE " more bytes accounted for than its size! ???\n", + unaccount); } else { unaccount = iter->filesize - (total_meta + iter->dset_storage_size + iter->free_space); - HDfprintf(stdout, " Unaccounted space: %Hu bytes\n", unaccount); + HDfprintf(stdout, " Unaccounted space: %" PRIuHSIZE " bytes\n", unaccount); } - HDfprintf(stdout, "Total space: %Hu bytes\n", + HDfprintf(stdout, "Total space: %" PRIuHSIZE " bytes\n", total_meta + iter->dset_storage_size + iter->free_space + unaccount); if (iter->nexternal) - HDfprintf(stdout, "External raw data: %Hu bytes\n", iter->dset_external_storage_size); + HDfprintf(stdout, "External raw data: %" PRIuHSIZE " bytes\n", iter->dset_external_storage_size); return 0; } /* print_storage_summary() */ diff --git a/tools/src/misc/h5clear.c b/tools/src/misc/h5clear.c index 0ef9483..a931833 100644 --- a/tools/src/misc/h5clear.c +++ b/tools/src/misc/h5clear.c @@ -358,7 +358,7 @@ main(int argc, const char *argv[]) h5tools_setstatus(EXIT_FAILURE); goto done; } - HDfprintf(stdout, "EOA is %a; EOF is %a \n", eoa, st.st_size); + HDfprintf(stdout, "EOA is %" PRIuHADDR "; EOF is %" PRIuHADDR " \n", eoa, (haddr_t)st.st_size); } /* --increment option */ diff --git a/tools/src/misc/h5debug.c b/tools/src/misc/h5debug.c index 268498f..85abb8a 100644 --- a/tools/src/misc/h5debug.c +++ b/tools/src/misc/h5debug.c @@ -347,7 +347,7 @@ main(int argc, char *argv[]) /* * Read the signature at the specified file position. */ - HDfprintf(stdout, "Reading signature at address %a (rel)\n", addr); + HDfprintf(stdout, "Reading signature at address %" PRIuHADDR " (rel)\n", addr); if (H5F_block_read(f, H5FD_MEM_SUPER, addr, sizeof(sig), sig) < 0) { HDfprintf(stderr, "cannot read signature\n"); exit_value = 3; diff --git a/tools/src/misc/h5repart.c b/tools/src/misc/h5repart.c index 6424384..65ceb4f 100644 --- a/tools/src/misc/h5repart.c +++ b/tools/src/misc/h5repart.c @@ -64,10 +64,10 @@ usage(const char *progname) "(windows or sec2)\n"); HDfprintf(stderr, " SRC The name of the source file\n"); HDfprintf(stderr, " DST The name of the destination files\n"); - HDfprintf(stderr, "Sizes may be suffixed with `g' for GB, `m' for MB or " - "`k' for kB.\n"); + HDfprintf(stderr, "Sizes may be suffixed with 'g' for GB, 'm' for MB or " + "'k' for kB.\n"); HDfprintf(stderr, "File family names include an integer printf " - "format such as `%%d'\n"); + "format such as '%%d'\n"); HDexit(EXIT_FAILURE); } diff --git a/tools/test/h5repack/h5repack.sh.in b/tools/test/h5repack/h5repack.sh.in index 18e6d86..a95a22e 100644 --- a/tools/test/h5repack/h5repack.sh.in +++ b/tools/test/h5repack/h5repack.sh.in @@ -689,7 +689,7 @@ DIFFFAIL() $RUNSERIAL $H5DIFF_BIN -q "$@" ) RET=$? - if [ $RET == 0 ] ; then + if [ $RET -eq 0 ] ; then echo "*FAILED*" nerrors="`expr $nerrors + 1`" else diff --git a/tools/test/h5repack/h5repacktst.c b/tools/test/h5repack/h5repacktst.c index ea9ded1..85af344 100644 --- a/tools/test/h5repack/h5repacktst.c +++ b/tools/test/h5repack/h5repacktst.c @@ -3292,8 +3292,6 @@ make_layout2(hid_t loc_id) if (make_dset(loc_id, CHUNKED_S_FIX, s_sid, chunked_dcpl, s_buf[0]) < 0) goto error; - HDfree(s_buf); - ret_value = 0; error: diff --git a/tools/test/perform/chunk.c b/tools/test/perform/chunk.c index 90ae815..9f99ce9 100644 --- a/tools/test/perform/chunk.c +++ b/tools/test/perform/chunk.c @@ -12,7 +12,7 @@ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - * Programmer: Robb Matzke <robb@arborea.spizella.com> + * Programmer: Robb Matzke * Thursday, May 14, 1998 * * Purpose: Checks the effect of various I/O request sizes and raw data diff --git a/tools/test/perform/chunk_cache.c b/tools/test/perform/chunk_cache.c index 9a6d5fa..3a6a209 100644 --- a/tools/test/perform/chunk_cache.c +++ b/tools/test/perform/chunk_cache.c @@ -259,11 +259,11 @@ check_partial_chunks_perf(hid_t file) end_t = H5_get_time(); if ((end_t - start_t) > (double)0.0f) - printf("1. Partial chunks: total read time is %lf; number of bytes being read from file is %lu\n", + printf("1. Partial chunks: total read time is %lf; number of bytes being read from file is %zu\n", (end_t - start_t), nbytes_global); else printf("1. Partial chunks: no total read time because timer is not available; number of bytes being " - "read from file is %lu\n", + "read from file is %zu\n", nbytes_global); H5Dclose(dataset); @@ -337,11 +337,11 @@ check_hash_value_perf(hid_t file) end_t = H5_get_time(); if ((end_t - start_t) > (double)0.0f) - printf("2. Hash value: total read time is %lf; number of bytes being read from file is %lu\n", + printf("2. Hash value: total read time is %lf; number of bytes being read from file is %zu\n", (end_t - start_t), nbytes_global); else printf("2. Hash value: no total read time because timer is not available; number of bytes being read " - "from file is %lu\n", + "from file is %zu\n", nbytes_global); H5Dclose(dataset); diff --git a/tools/test/perform/perf.c b/tools/test/perform/perf.c index 6d467e1..22aca04 100644 --- a/tools/test/perform/perf.c +++ b/tools/test/perform/perf.c @@ -313,7 +313,7 @@ main(int argc, char **argv) VRFY((ret >= 0), "H5Dwrite dataset1 succeeded", !H5FATAL); if (ret < 0) - HDfprintf(stderr, "node %d, read error, loc = %Ld: %s\n", mynod, mynod * opt_block, + HDfprintf(stderr, "node %d, read error, loc = %" PRId64 ": %s\n", mynod, mynod * opt_block, strerror(myerrno)); /* if the user wanted to check correctness, compare the write @@ -427,7 +427,8 @@ parse_args(int argc, char **argv) if (NULL != (p = (char *)HDstrchr(optarg, '/'))) opt_threshold = (hsize_t)HDatoi(p + 1); } - HDfprintf(stdout, "alignment/threshold=%Hu/%Hu\n", opt_alignment, opt_threshold); + HDfprintf(stdout, "alignment/threshold=%" PRIuHSIZE "/%" PRIuHSIZE "\n", opt_alignment, + opt_threshold); break; case '2': /* use 2-files, i.e., split file driver */ opt_split_vfd = 1; diff --git a/tools/test/perform/pio_standalone.h b/tools/test/perform/pio_standalone.h index 0e0ac26..d64f421 100644 --- a/tools/test/perform/pio_standalone.h +++ b/tools/test/perform/pio_standalone.h @@ -130,12 +130,12 @@ #else /* H5_HAVE_WIN32_API */ #define HDfileno(F) fileno(F) #endif /* H5_HAVE_WIN32_API */ -#define HDfloor(X) floor(X) -#define HDfmod(X, Y) fmod(X, Y) -#define HDfopen(S, M) fopen(S, M) -#define HDfork() fork() -#define HDfpathconf(F, N) fpathconf(F, N) -H5_DLL int HDfprintf(FILE *stream, const char *fmt, ...); +#define HDfloor(X) floor(X) +#define HDfmod(X, Y) fmod(X, Y) +#define HDfopen(S, M) fopen(S, M) +#define HDfork() fork() +#define HDfpathconf(F, N) fpathconf(F, N) +#define HDfprintf fprintf #define HDfputc(C, F) fputc(C, F) #define HDfputs(S, F) fputs(S, F) #define HDfread(M, Z, N, F) fread(M, Z, N, F) diff --git a/tools/test/perform/sio_engine.c b/tools/test/perform/sio_engine.c index 79577b3..11001c6 100644 --- a/tools/test/perform/sio_engine.c +++ b/tools/test/perform/sio_engine.c @@ -783,7 +783,7 @@ do_read(results *res, file_descr *fd, parameters *parms, void *buffer) /* Allocate data verification buffer */ if (NULL == (buffer2 = (char *)malloc(linear_buf_size))) { - HDfprintf(stderr, "malloc for data verification buffer size (%Zu) failed\n", linear_buf_size); + HDfprintf(stderr, "malloc for data verification buffer size (%zu) failed\n", linear_buf_size); GOTOERROR(FAIL); } /* end if */ diff --git a/tools/test/perform/sio_perf.c b/tools/test/perform/sio_perf.c index d2425fd..fb31080 100644 --- a/tools/test/perform/sio_perf.c +++ b/tools/test/perform/sio_perf.c @@ -872,9 +872,9 @@ report_parameters(struct options *opts) HDfprintf(output, "\n"); if (opts->page_size) { - HDfprintf(output, "Page Aggregation Enabled. Page size = %ld\n", opts->page_size); + HDfprintf(output, "Page Aggregation Enabled. Page size = %zu\n", opts->page_size); if (opts->page_buffer_size) - HDfprintf(output, "Page Buffering Enabled. Page Buffer size = %ld\n", opts->page_buffer_size); + HDfprintf(output, "Page Buffering Enabled. Page Buffer size = %zu\n", opts->page_buffer_size); else HDfprintf(output, "Page Buffering Disabled\n"); } diff --git a/tools/test/perform/sio_standalone.h b/tools/test/perform/sio_standalone.h index 02ec6c6..4001257 100644 --- a/tools/test/perform/sio_standalone.h +++ b/tools/test/perform/sio_standalone.h @@ -145,12 +145,12 @@ #else /* H5_HAVE_WIN32_API */ #define HDfileno(F) fileno(F) #endif /* H5_HAVE_WIN32_API */ -#define HDfloor(X) floor(X) -#define HDfmod(X, Y) fmod(X, Y) -#define HDfopen(S, M) fopen(S, M) -#define HDfork() fork() -#define HDfpathconf(F, N) fpathconf(F, N) -H5_DLL int HDfprintf(FILE *stream, const char *fmt, ...); +#define HDfloor(X) floor(X) +#define HDfmod(X, Y) fmod(X, Y) +#define HDfopen(S, M) fopen(S, M) +#define HDfork() fork() +#define HDfpathconf(F, N) fpathconf(F, N) +#define HDfprintf fprintf #define HDfputc(C, F) fputc(C, F) #define HDfputs(S, F) fputs(S, F) #define HDfread(M, Z, N, F) fread(M, Z, N, F) diff --git a/tools/testfiles/h5dump-help.txt b/tools/testfiles/h5dump-help.txt index a122dee..5b11223 100644 --- a/tools/testfiles/h5dump-help.txt +++ b/tools/testfiles/h5dump-help.txt @@ -15,7 +15,7 @@ usage: h5dump [OPTIONS] files --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. <cred> :: "(<aws-region>,<access-id>,<access-key>)" If absent or <cred> -> "(,,)", no authentication. - Has no effect is filedriver is not `ros3'. + Has no effect if filedriver is not "ros3". --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. For use with "--filedriver=hdfs" <attrs> :: (<namenode name>,<namenode port>, @@ -73,7 +73,7 @@ usage: h5dump [OPTIONS] files -D U, --xml-dtd=U Use the DTD or schema at U -X S, --xml-ns=S (XML Schema) Use qualified names n the XML ":": no namespace, default: "hdf5:" - E.g., to dump a file called `-f', use h5dump -- -f + E.g., to dump a file called "-f", use h5dump -- -f --------------- Subsetting Options --------------- Subsetting is available by using the following options with a dataset diff --git a/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl b/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl index a122dee..5b11223 100644 --- a/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl +++ b/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl @@ -15,7 +15,7 @@ usage: h5dump [OPTIONS] files --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. <cred> :: "(<aws-region>,<access-id>,<access-key>)" If absent or <cred> -> "(,,)", no authentication. - Has no effect is filedriver is not `ros3'. + Has no effect if filedriver is not "ros3". --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. For use with "--filedriver=hdfs" <attrs> :: (<namenode name>,<namenode port>, @@ -73,7 +73,7 @@ usage: h5dump [OPTIONS] files -D U, --xml-dtd=U Use the DTD or schema at U -X S, --xml-ns=S (XML Schema) Use qualified names n the XML ":": no namespace, default: "hdf5:" - E.g., to dump a file called `-f', use h5dump -- -f + E.g., to dump a file called "-f", use h5dump -- -f --------------- Subsetting Options --------------- Subsetting is available by using the following options with a dataset diff --git a/tools/testfiles/pbits/tpbitsIncomplete.ddl b/tools/testfiles/pbits/tpbitsIncomplete.ddl index a122dee..5b11223 100644 --- a/tools/testfiles/pbits/tpbitsIncomplete.ddl +++ b/tools/testfiles/pbits/tpbitsIncomplete.ddl @@ -15,7 +15,7 @@ usage: h5dump [OPTIONS] files --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. <cred> :: "(<aws-region>,<access-id>,<access-key>)" If absent or <cred> -> "(,,)", no authentication. - Has no effect is filedriver is not `ros3'. + Has no effect if filedriver is not "ros3". --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. For use with "--filedriver=hdfs" <attrs> :: (<namenode name>,<namenode port>, @@ -73,7 +73,7 @@ usage: h5dump [OPTIONS] files -D U, --xml-dtd=U Use the DTD or schema at U -X S, --xml-ns=S (XML Schema) Use qualified names n the XML ":": no namespace, default: "hdf5:" - E.g., to dump a file called `-f', use h5dump -- -f + E.g., to dump a file called "-f", use h5dump -- -f --------------- Subsetting Options --------------- Subsetting is available by using the following options with a dataset diff --git a/tools/testfiles/pbits/tpbitsLengthExceeded.ddl b/tools/testfiles/pbits/tpbitsLengthExceeded.ddl index a122dee..5b11223 100644 --- a/tools/testfiles/pbits/tpbitsLengthExceeded.ddl +++ b/tools/testfiles/pbits/tpbitsLengthExceeded.ddl @@ -15,7 +15,7 @@ usage: h5dump [OPTIONS] files --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. <cred> :: "(<aws-region>,<access-id>,<access-key>)" If absent or <cred> -> "(,,)", no authentication. - Has no effect is filedriver is not `ros3'. + Has no effect if filedriver is not "ros3". --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. For use with "--filedriver=hdfs" <attrs> :: (<namenode name>,<namenode port>, @@ -73,7 +73,7 @@ usage: h5dump [OPTIONS] files -D U, --xml-dtd=U Use the DTD or schema at U -X S, --xml-ns=S (XML Schema) Use qualified names n the XML ":": no namespace, default: "hdf5:" - E.g., to dump a file called `-f', use h5dump -- -f + E.g., to dump a file called "-f", use h5dump -- -f --------------- Subsetting Options --------------- Subsetting is available by using the following options with a dataset diff --git a/tools/testfiles/pbits/tpbitsLengthPositive.ddl b/tools/testfiles/pbits/tpbitsLengthPositive.ddl index a122dee..5b11223 100644 --- a/tools/testfiles/pbits/tpbitsLengthPositive.ddl +++ b/tools/testfiles/pbits/tpbitsLengthPositive.ddl @@ -15,7 +15,7 @@ usage: h5dump [OPTIONS] files --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. <cred> :: "(<aws-region>,<access-id>,<access-key>)" If absent or <cred> -> "(,,)", no authentication. - Has no effect is filedriver is not `ros3'. + Has no effect if filedriver is not "ros3". --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. For use with "--filedriver=hdfs" <attrs> :: (<namenode name>,<namenode port>, @@ -73,7 +73,7 @@ usage: h5dump [OPTIONS] files -D U, --xml-dtd=U Use the DTD or schema at U -X S, --xml-ns=S (XML Schema) Use qualified names n the XML ":": no namespace, default: "hdf5:" - E.g., to dump a file called `-f', use h5dump -- -f + E.g., to dump a file called "-f", use h5dump -- -f --------------- Subsetting Options --------------- Subsetting is available by using the following options with a dataset diff --git a/tools/testfiles/pbits/tpbitsMaxExceeded.ddl b/tools/testfiles/pbits/tpbitsMaxExceeded.ddl index a122dee..5b11223 100644 --- a/tools/testfiles/pbits/tpbitsMaxExceeded.ddl +++ b/tools/testfiles/pbits/tpbitsMaxExceeded.ddl @@ -15,7 +15,7 @@ usage: h5dump [OPTIONS] files --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. <cred> :: "(<aws-region>,<access-id>,<access-key>)" If absent or <cred> -> "(,,)", no authentication. - Has no effect is filedriver is not `ros3'. + Has no effect if filedriver is not "ros3". --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. For use with "--filedriver=hdfs" <attrs> :: (<namenode name>,<namenode port>, @@ -73,7 +73,7 @@ usage: h5dump [OPTIONS] files -D U, --xml-dtd=U Use the DTD or schema at U -X S, --xml-ns=S (XML Schema) Use qualified names n the XML ":": no namespace, default: "hdf5:" - E.g., to dump a file called `-f', use h5dump -- -f + E.g., to dump a file called "-f", use h5dump -- -f --------------- Subsetting Options --------------- Subsetting is available by using the following options with a dataset diff --git a/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl b/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl index a122dee..5b11223 100644 --- a/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl +++ b/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl @@ -15,7 +15,7 @@ usage: h5dump [OPTIONS] files --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. <cred> :: "(<aws-region>,<access-id>,<access-key>)" If absent or <cred> -> "(,,)", no authentication. - Has no effect is filedriver is not `ros3'. + Has no effect if filedriver is not "ros3". --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. For use with "--filedriver=hdfs" <attrs> :: (<namenode name>,<namenode port>, @@ -73,7 +73,7 @@ usage: h5dump [OPTIONS] files -D U, --xml-dtd=U Use the DTD or schema at U -X S, --xml-ns=S (XML Schema) Use qualified names n the XML ":": no namespace, default: "hdf5:" - E.g., to dump a file called `-f', use h5dump -- -f + E.g., to dump a file called "-f", use h5dump -- -f --------------- Subsetting Options --------------- Subsetting is available by using the following options with a dataset diff --git a/tools/testfiles/pbits/tpbitsOffsetNegative.ddl b/tools/testfiles/pbits/tpbitsOffsetNegative.ddl index a122dee..5b11223 100644 --- a/tools/testfiles/pbits/tpbitsOffsetNegative.ddl +++ b/tools/testfiles/pbits/tpbitsOffsetNegative.ddl @@ -15,7 +15,7 @@ usage: h5dump [OPTIONS] files --s3-cred=<cred> Supply S3 authentication information to "ros3" vfd. <cred> :: "(<aws-region>,<access-id>,<access-key>)" If absent or <cred> -> "(,,)", no authentication. - Has no effect is filedriver is not `ros3'. + Has no effect if filedriver is not "ros3". --hdfs-attrs=<attrs> Supply configuration information for HDFS file access. For use with "--filedriver=hdfs" <attrs> :: (<namenode name>,<namenode port>, @@ -73,7 +73,7 @@ usage: h5dump [OPTIONS] files -D U, --xml-dtd=U Use the DTD or schema at U -X S, --xml-ns=S (XML Schema) Use qualified names n the XML ":": no namespace, default: "hdf5:" - E.g., to dump a file called `-f', use h5dump -- -f + E.g., to dump a file called "-f", use h5dump -- -f --------------- Subsetting Options --------------- Subsetting is available by using the following options with a dataset |