summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorDana Robinson <derobins@hdfgroup.org>2020-08-15 01:02:55 (GMT)
committerDana Robinson <derobins@hdfgroup.org>2020-08-15 01:02:55 (GMT)
commit686dbefff5afe0a7572dccbb74ab826a24e6fcbf (patch)
tree142910a42c476648e984c84967a762a750cff57e /tools
parent62c296f1eb69255c5140a675270b11b5b265a220 (diff)
downloadhdf5-686dbefff5afe0a7572dccbb74ab826a24e6fcbf.zip
hdf5-686dbefff5afe0a7572dccbb74ab826a24e6fcbf.tar.gz
hdf5-686dbefff5afe0a7572dccbb74ab826a24e6fcbf.tar.bz2
Normalization of perform directory with develop
Brings over chunk_cache program
Diffstat (limited to 'tools')
-rw-r--r--tools/test/perform/CMakeLists.txt15
-rw-r--r--tools/test/perform/Makefile.am4
-rw-r--r--tools/test/perform/chunk_cache.c394
-rw-r--r--tools/test/perform/overhead.c2
-rw-r--r--tools/test/perform/perf_meta.c2
-rw-r--r--tools/test/perform/sio_engine.c5
-rw-r--r--tools/test/perform/sio_perf.c8
-rw-r--r--tools/test/perform/sio_standalone.h2
-rw-r--r--tools/test/perform/zip_perf.c4
9 files changed, 422 insertions, 14 deletions
diff --git a/tools/test/perform/CMakeLists.txt b/tools/test/perform/CMakeLists.txt
index c05698e..3c45e85 100644
--- a/tools/test/perform/CMakeLists.txt
+++ b/tools/test/perform/CMakeLists.txt
@@ -73,6 +73,21 @@ else ()
endif ()
set_target_properties (iopipe PROPERTIES FOLDER perform)
+#-- Adding test for chunk_cache
+set (chunk_cache_SOURCES
+ ${HDF5_TOOLS_TEST_PERFORM_SOURCE_DIR}/chunk_cache.c
+)
+add_executable (chunk_cache ${chunk_cache_SOURCES})
+target_include_directories (chunk_cache PRIVATE "${HDF5_TEST_SRC_DIR};${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
+if (NOT BUILD_SHARED_LIBS)
+ TARGET_C_PROPERTIES (chunk_cache STATIC)
+ target_link_libraries (chunk_cache PRIVATE ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET})
+else ()
+ TARGET_C_PROPERTIES (chunk_cache SHARED)
+ target_link_libraries (chunk_cache PRIVATE ${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+endif ()
+set_target_properties (chunk_cache PROPERTIES FOLDER perform)
+
#-- Adding test for overhead
set (overhead_SOURCES
${HDF5_TOOLS_TEST_PERFORM_SOURCE_DIR}/overhead.c
diff --git a/tools/test/perform/Makefile.am b/tools/test/perform/Makefile.am
index 5a89a66..39800d7 100644
--- a/tools/test/perform/Makefile.am
+++ b/tools/test/perform/Makefile.am
@@ -50,12 +50,12 @@ if BUILD_PARALLEL_CONDITIONAL
TEST_PROG_PARA=h5perf perf
endif
# Serial test programs.
-TEST_PROG = iopipe chunk overhead zip_perf perf_meta h5perf_serial $(BUILD_ALL_PROGS)
+TEST_PROG = iopipe chunk chunk_cache overhead zip_perf perf_meta h5perf_serial $(BUILD_ALL_PROGS)
# check_PROGRAMS will be built but not installed. Do not any executable
# that is in bin_PROGRAMS already. Otherwise, it will be removed twice in
# "make clean" and some systems, e.g., AIX, do not like it.
-check_PROGRAMS= iopipe chunk overhead zip_perf perf_meta $(BUILD_ALL_PROGS) perf
+check_PROGRAMS= iopipe chunk chunk_cache overhead zip_perf perf_meta $(BUILD_ALL_PROGS) perf
h5perf_SOURCES=pio_perf.c pio_engine.c
h5perf_serial_SOURCES=sio_perf.c sio_engine.c
diff --git a/tools/test/perform/chunk_cache.c b/tools/test/perform/chunk_cache.c
new file mode 100644
index 0000000..d7c56af
--- /dev/null
+++ b/tools/test/perform/chunk_cache.c
@@ -0,0 +1,394 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Purpose: check the performance of chunk cache in these two cases (HDFFV-10601):
+ * 1. partial chunks exist along any dimension.
+ * 2. number of slots in chunk cache is smaller than the number of chunks
+ * in the fastest-growing dimension.
+ */
+#include "hdf5.h"
+#include "H5private.h"
+#include "h5test.h"
+
+#define FILENAME "chunk_cache_perf.h5"
+
+#define RANK 2
+
+#define DSET1_NAME "partial_chunks"
+#define DSET1_DIM1 9 * 1000
+#define DSET1_DIM2 9
+#define CHUNK1_DIM1 2 * 1000
+#define CHUNK1_DIM2 2
+
+#define DSET2_NAME "hash_value"
+#define DSET2_DIM1 300
+#define DSET2_DIM2 600
+#define CHUNK2_DIM1 100
+#define CHUNK2_DIM2 100
+
+#define RDCC_NSLOTS 5
+#define RDCC_NBYTES 1024 * 1024 * 10
+#define RDCC_W0 0.75F
+
+#define FILTER_COUNTER 306
+static size_t nbytes_global;
+
+typedef struct test_time_t {
+ long tv_sec;
+ long tv_usec;
+} test_time_t;
+
+/* Local function prototypes for the dummy filter */
+static size_t
+counter (unsigned flags, size_t cd_nelmts,
+ const unsigned *cd_values, size_t nbytes,
+ size_t *buf_size, void **buf);
+
+/* This message derives from H5Z */
+const H5Z_class2_t H5Z_COUNTER[1] = {{
+ H5Z_CLASS_T_VERS, /* H5Z_class_t version */
+ FILTER_COUNTER, /* Filter id number */
+ 1, 1, /* Encoding and decoding enabled */
+ "counter", /* Filter name for debugging */
+ NULL, /* The "can apply" callback */
+ NULL, /* The "set local" callback */
+ counter, /* The actual filter function */
+}};
+
+/*-------------------------------------------------------------------------
+ * Count number of bytes but don't do anything else. Keep
+ * track of the data of chunks being read from file into memory.
+ */
+static size_t
+counter (unsigned H5_ATTR_UNUSED flags, size_t H5_ATTR_UNUSED cd_nelmts,
+ const unsigned H5_ATTR_UNUSED *cd_values, size_t nbytes,
+ size_t H5_ATTR_UNUSED *buf_size, void H5_ATTR_UNUSED **buf)
+{
+ nbytes_global += nbytes;
+ return nbytes;
+}
+
+/*---------------------------------------------------------------------------*/
+static void
+cleanup (void)
+{
+ if (!getenv ("HDF5_NOCLEANUP")) {
+ remove (FILENAME);
+ }
+}
+
+/*-------------------------------------------------------------------------------
+ * Create a chunked dataset with partial chunks along either dimensions:
+ * dataset dimension: 9000 x 9
+ * chunk dimension: 2000 x 2
+ */
+static int create_dset1(hid_t file)
+{
+ hid_t dataspace = H5I_INVALID_HID, dataset = H5I_INVALID_HID;
+ hid_t dcpl = H5I_INVALID_HID;
+ hsize_t dims[RANK] = {DSET1_DIM1, DSET1_DIM2};
+ hsize_t chunk_dims[RANK] = {CHUNK1_DIM1, CHUNK1_DIM2};
+ int **data; /* data for writing */
+
+ /* Create the data space. */
+ if((dataspace = H5Screate_simple (RANK, dims, NULL)) < 0)
+ goto error;
+
+ /* Modify dataset creation properties, i.e. enable chunking */
+ if((dcpl = H5Pcreate (H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if(H5Pset_chunk (dcpl, RANK, chunk_dims) < 0)
+ goto error;
+
+ /* Set the dummy filter simply for counting the number of bytes being read into the memory */
+ if(H5Zregister(H5Z_COUNTER) < 0)
+ goto error;
+
+ if(H5Pset_filter(dcpl, FILTER_COUNTER, 0, 0, NULL) < 0)
+ goto error;
+
+ /* Create a new dataset within the file using chunk creation properties. */
+ if((dataset = H5Dcreate2 (file, DSET1_NAME, H5T_NATIVE_INT, dataspace,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Create & fill array */
+ H5TEST_ALLOCATE_2D_ARRAY(data, int, DSET1_DIM1, DSET1_DIM2);
+ H5TEST_FILL_2D_ARRAY(data, int, DSET1_DIM1, DSET1_DIM2);
+
+
+ /* Write data to dataset */
+ if(H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, data) < 0)
+ goto error;
+
+ /* Close resources */
+ H5Dclose (dataset);
+ H5Pclose (dcpl);
+ H5Sclose (dataspace);
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose (dataset);
+ H5Pclose (dcpl);
+ H5Sclose (dataspace);
+ } H5E_END_TRY;
+
+ return 1;
+}
+
+/*---------------------------------------------------------------------------
+ * Create a chunked dataset for testing hash values:
+ * dataset dimensions: 300 x 600
+ * chunk dimensions: 100 x 100
+ */
+static int create_dset2(hid_t file)
+{
+ hid_t dataspace = H5I_INVALID_HID, dataset = H5I_INVALID_HID;
+ hid_t dcpl = H5I_INVALID_HID;
+ hsize_t dims[RANK] = {DSET2_DIM1, DSET2_DIM2};
+ hsize_t chunk_dims[RANK] = {CHUNK2_DIM1, CHUNK2_DIM2};
+ int **data; /* data for writing */
+
+ /* Create the data space. */
+ if((dataspace = H5Screate_simple(RANK, dims, NULL)) < 0)
+ goto error;
+
+ /* Modify dataset creation properties, i.e. enable chunking */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if(H5Pset_chunk(dcpl, RANK, chunk_dims) < 0)
+ goto error;
+
+ /* Set the dummy filter simply for counting the number of bytes being read into the memory */
+ if(H5Zregister(H5Z_COUNTER) < 0)
+ goto error;
+ if(H5Pset_filter(dcpl, FILTER_COUNTER, 0, 0, NULL) < 0)
+ goto error;
+
+ /* Create a new dataset within the file using chunk creation properties. */
+ if((dataset = H5Dcreate2(file, DSET2_NAME, H5T_NATIVE_INT, dataspace,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Create & fill array */
+ H5TEST_ALLOCATE_2D_ARRAY(data, int, DSET2_DIM1, DSET2_DIM2);
+ H5TEST_FILL_2D_ARRAY(data, int, DSET2_DIM1, DSET2_DIM2);
+
+ /* Write data to dataset */
+ if(H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ goto error;
+
+ /* Close resources */
+ H5Dclose(dataset);
+ H5Pclose(dcpl);
+ H5Sclose(dataspace);
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(dataset);
+ H5Pclose(dcpl);
+ H5Sclose(dataspace);
+ } H5E_END_TRY;
+
+ return 1;
+}
+
+/*---------------------------------------------------------------------------
+ * Check the performance of the chunk cache when partial chunks exist
+ * along the dataset dimensions.
+ */
+static int check_partial_chunks_perf(hid_t file)
+{
+ hid_t dataset = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ hid_t memspace = H5I_INVALID_HID;
+ hid_t dapl = H5I_INVALID_HID;
+
+ int rdata[DSET1_DIM2]; /* data for reading */
+ int i;
+
+ hsize_t row_rank = 1;
+ hsize_t row_dim[1] = {DSET1_DIM2};
+ hsize_t start[RANK] = {0, 0};
+ hsize_t count[RANK] = {1, DSET1_DIM2};
+ double start_t, end_t;
+
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ goto error;
+ if(H5Pset_chunk_cache (dapl, RDCC_NSLOTS, RDCC_NBYTES, RDCC_W0) < 0)
+ goto error;
+
+ dataset = H5Dopen2 (file, DSET1_NAME, dapl);
+
+ H5_CHECK_OVERFLOW(row_rank, hsize_t, int);
+ memspace = H5Screate_simple((int)row_rank, row_dim, NULL);
+ filespace = H5Dget_space(dataset);
+
+ nbytes_global = 0;
+
+ start_t = H5_get_time();
+
+ /* Read the data row by row */
+ for(i = 0; i < DSET1_DIM1; i++) {
+ start[0] = (hsize_t)i;
+ if(H5Sselect_hyperslab(filespace, H5S_SELECT_SET,
+ start, NULL, count, NULL) < 0)
+ goto error;
+
+ if(H5Dread (dataset, H5T_NATIVE_INT, memspace, filespace,
+ H5P_DEFAULT, rdata) < 0)
+ goto error;
+ }
+
+ end_t = H5_get_time();
+
+ if((end_t - start_t) > (double)0.0f)
+ printf("1. Partial chunks: total read time is %lf; number of bytes being read from file is %lu\n", (end_t -start_t), nbytes_global);
+ else
+ printf("1. Partial chunks: no total read time because timer is not available; number of bytes being read from file is %lu\n", nbytes_global);
+
+ H5Dclose (dataset);
+ H5Sclose (filespace);
+ H5Sclose (memspace);
+ H5Pclose (dapl);
+
+ return 0;
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose (dataset);
+ H5Sclose (filespace);
+ H5Sclose (memspace);
+ H5Pclose (dapl);
+ } H5E_END_TRY;
+ return 1;
+}
+
+/*---------------------------------------------------------------------------
+ * Check the performance of chunk cache when the number of cache slots
+ * is smaller than the number of chunks along the fastest-growing
+ * dimension of the dataset.
+ */
+static int check_hash_value_perf(hid_t file)
+{
+ hid_t dataset = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ hid_t memspace = H5I_INVALID_HID;
+ hid_t dapl = H5I_INVALID_HID;
+
+ int rdata[DSET2_DIM1]; /* data for reading */
+ int i;
+
+ hsize_t column_rank = 1;
+ hsize_t column_dim[1] = {DSET2_DIM1};
+ hsize_t start[RANK] = {0, 0};
+ hsize_t count[RANK] = {DSET2_DIM1, 1};
+ double start_t, end_t;
+
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ goto error;
+ if(H5Pset_chunk_cache (dapl, RDCC_NSLOTS, RDCC_NBYTES, RDCC_W0) < 0)
+ goto error;
+
+ if((dataset = H5Dopen2 (file, DSET2_NAME, dapl)) < 0)
+ goto error;
+
+ H5_CHECK_OVERFLOW(column_rank, hsize_t, int);
+ if((memspace = H5Screate_simple((int)column_rank, column_dim, NULL)) < 0)
+ goto error;
+ if((filespace = H5Dget_space(dataset)) < 0)
+ goto error;
+
+ nbytes_global = 0;
+
+ start_t = H5_get_time();
+
+ /* Read the data column by column */
+ for(i = 0; i < DSET2_DIM2; i++) {
+ start[1] = (hsize_t)i;
+ if(H5Sselect_hyperslab(filespace, H5S_SELECT_SET,
+ start, NULL, count, NULL) < 0)
+ goto error;
+
+ if(H5Dread (dataset, H5T_NATIVE_INT, memspace, filespace,
+ H5P_DEFAULT, rdata) < 0)
+ goto error;
+ }
+
+ end_t = H5_get_time();
+
+ if((end_t - start_t) > (double)0.0f)
+ printf("2. Hash value: total read time is %lf; number of bytes being read from file is %lu\n", (end_t -start_t), nbytes_global);
+ else
+ printf("2. Hash value: no total read time because timer is not available; number of bytes being read from file is %lu\n", nbytes_global);
+
+ H5Dclose (dataset);
+ H5Sclose (filespace);
+ H5Sclose (memspace);
+ H5Pclose (dapl);
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose (dataset);
+ H5Sclose (filespace);
+ H5Sclose (memspace);
+ H5Pclose (dapl);
+ } H5E_END_TRY;
+ return 1;
+}
+
+/*-------------------------------------------------------------------------------------
+ * Purpose: check the performance of chunk cache in these two cases (HDFFV-10601):
+ * 1. partial chunks exist along any dimension.
+ * 2. number of slots in chunk cache is smaller than the number of chunks
+ * in the fastest-growing dimension.
+ *-------------------------------------------------------------------------------------*/
+int
+main (void)
+{
+ hid_t file; /* handles */
+ int nerrors = 0;
+
+ /* Create a new file. If file exists its contents will be overwritten. */
+ if((file = H5Fcreate (FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+ nerrors += create_dset1(file);
+ nerrors += create_dset2(file);
+
+ if(H5Fclose (file) < 0)
+ goto error;
+
+ /* Re-open the file for testing performance. */
+ if((file = H5Fopen (FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ goto error;
+
+ nerrors += check_partial_chunks_perf(file);
+ nerrors += check_hash_value_perf(file);
+
+ if(H5Fclose (file) < 0)
+ goto error;
+
+ if (nerrors>0) goto error;
+ cleanup();
+ return 0;
+
+error:
+ fprintf(stderr, "*** ERRORS DETECTED ***\n");
+ return 1;
+}
diff --git a/tools/test/perform/overhead.c b/tools/test/perform/overhead.c
index 58558a5..bb3aff5 100644
--- a/tools/test/perform/overhead.c
+++ b/tools/test/perform/overhead.c
@@ -12,7 +12,7 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * Programmer: Robb Matzke <matzke@llnl.gov>
+ * Programmer: Robb Matzke
* Monday, September 28, 1998
*
* Purpose: Creates a chunked dataset and measures the storage overhead.
diff --git a/tools/test/perform/perf_meta.c b/tools/test/perform/perf_meta.c
index b56f074..77248cc 100644
--- a/tools/test/perform/perf_meta.c
+++ b/tools/test/perform/perf_meta.c
@@ -12,7 +12,7 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * Programmer: Raymond Lu <slu@ncsa.uiuc.edu>
+ * Programmer: Raymond Lu
* Friday, Oct 3, 2004
*
* Purpose: Tests performance of metadata
diff --git a/tools/test/perform/sio_engine.c b/tools/test/perform/sio_engine.c
index 2562ab8..aa3a316 100644
--- a/tools/test/perform/sio_engine.c
+++ b/tools/test/perform/sio_engine.c
@@ -1268,8 +1268,7 @@ done:
* 'temp' in the code below, but early (4.4.7, at least) gcc only
* allows diagnostic pragmas to be toggled outside of functions.
*/
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+H5_GCC_DIAG_OFF(format-nonliteral)
static void
do_cleanupfile(iotype iot, char *filename)
{
@@ -1331,5 +1330,5 @@ do_cleanupfile(iotype iot, char *filename)
}
}
}
-#pragma GCC diagnostic pop
+H5_GCC_DIAG_ON(format-nonliteral)
diff --git a/tools/test/perform/sio_perf.c b/tools/test/perform/sio_perf.c
index a56558e..d2eb3fc 100644
--- a/tools/test/perform/sio_perf.c
+++ b/tools/test/perform/sio_perf.c
@@ -303,7 +303,7 @@ typedef struct _minmax {
/* local functions */
static hsize_t parse_size_directive(const char *size);
-static struct options *parse_command_line(int argc, char *argv[]);
+static struct options *parse_command_line(int argc, const char *argv[]);
static void run_test_loop(struct options *options);
static int run_test(iotype iot, parameters parms, struct options *opts);
static void output_all_info(minmax *mm, int count, int indent_level);
@@ -324,7 +324,7 @@ static void report_parameters(struct options *opts);
* Modifications:
*/
int
-main(int argc, char **argv)
+main(int argc, const char *argv[])
{
int exit_value = EXIT_SUCCESS;
struct options *opts = NULL;
@@ -944,7 +944,7 @@ report_parameters(struct options *opts)
* Added multidimensional testing (Christian Chilan, April, 2008)
*/
static struct options *
-parse_command_line(int argc, char *argv[])
+parse_command_line(int argc, const char *argv[])
{
int opt;
struct options *cl_opts;
@@ -984,7 +984,7 @@ parse_command_line(int argc, char *argv[])
cl_opts->h5_extendable = FALSE; /* Use extendable dataset */
cl_opts->verify = FALSE; /* No Verify data correctness by default */
- while ((opt = get_option(argc, (const char **)argv, s_opts, l_opts)) != EOF) {
+ while ((opt = get_option(argc, argv, s_opts, l_opts)) != EOF) {
switch ((char)opt) {
case 'a':
cl_opts->h5_alignment = parse_size_directive(opt_arg);
diff --git a/tools/test/perform/sio_standalone.h b/tools/test/perform/sio_standalone.h
index 45f6d25..99e13bc 100644
--- a/tools/test/perform/sio_standalone.h
+++ b/tools/test/perform/sio_standalone.h
@@ -228,7 +228,7 @@ H5_DLL int HDfprintf (FILE *stream, const char *fmt, ...);
#define HDgetpwnam(S) getpwnam(S)
#define HDgetpwuid(U) getpwuid(U)
#define HDgetrusage(X,S) getrusage(X,S)
-#define HDgets(S) gets(S)
+/* Don't define a macro for gets() - it was removed in C11 */
#ifdef H5_HAVE_WIN32_API
H5_DLL int Wgettimeofday(struct timeval *tv, struct timezone *tz);
#define HDgettimeofday(V,Z) Wgettimeofday(V,Z)
diff --git a/tools/test/perform/zip_perf.c b/tools/test/perform/zip_perf.c
index e301bb3..8f1f584 100644
--- a/tools/test/perform/zip_perf.c
+++ b/tools/test/perform/zip_perf.c
@@ -552,7 +552,7 @@ do_write_test(unsigned long file_size, unsigned long min_buf_size,
* Modifications:
*/
int
-main(int argc, char **argv)
+main(int argc, const char *argv[])
{
unsigned long min_buf_size = 128 * ONE_KB, max_buf_size = ONE_MB;
unsigned long file_size = 64 * ONE_MB;
@@ -563,7 +563,7 @@ main(int argc, char **argv)
/* Initialize h5tools lib */
h5tools_init();
- while ((opt = get_option(argc, (const char **)argv, s_opts, l_opts)) > 0) {
+ while ((opt = get_option(argc, argv, s_opts, l_opts)) > 0) {
switch ((char)opt) {
case '0': case '1': case '2':
case '3': case '4': case '5':