From 6fa26d7e4939ac1b61b74bed7bca70055715be24 Mon Sep 17 00:00:00 2001 From: Songyu Lu Date: Fri, 2 Nov 2018 14:53:35 -0500 Subject: HDFFV-10601 Issues with chunk cache hash value calcuation: 1. H5D__chunk_hash_val: When the number of chunks in the fastest changing dimension is larger than the number of slots in the hash table, H5D__chunk_hash_val abandons the normal hash value calculation algorithm and simply uses the scaled dimension. This will cause chunks a selection that cuts across chunks in dimensions other than the fastest changing to all have the same hash value, and they will therefore always evict each other from the cache, having an obvious major performance impact. Eliminated the check for the number of slots in this function and always use the full algorithm. 2. H5D__chunk_init: When the scaled dimensions (number of chunks in each dimension) are calculated in H5D__chunk_init, a simple divide ("/") operator is used with the dataset size in elements and the chunk size in elements. While this is fine when the dataset size is an exact multiple of the chunk size, in other cases, since "/" rounds down, it results in a scaled dimension one less than it should (it ignores the partial edge chunk). This has trickle down effects on hash value calculation that can cause excess hash value collisions and therefore performance issues. Changed the calculation to (((dataset_size - 1) / chunk_size) + 1). Tested the build with Autotool and CMake. --- src/H5Dchunk.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 22dc05a..7eeb866 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -949,7 +949,12 @@ H5D__chunk_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id) /* Initial scaled dimension sizes */ if(dset->shared->layout.u.chunk.dim[u] == 0) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", u) - rdcc->scaled_dims[u] = dset->shared->curr_dims[u] / dset->shared->layout.u.chunk.dim[u]; + + /* Make a special case when the dimension is 0 because (0 - 1) is a big number for unsigned integer */ + if(dset->shared->curr_dims[u] == 0) + rdcc->scaled_dims[u] = 0; + else + rdcc->scaled_dims[u] = ((dset->shared->curr_dims[u] - 1) / dset->shared->layout.u.chunk.dim[u]) + 1; if( !(scaled_power2up = H5VM_power2up(rdcc->scaled_dims[u])) ) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get the next power of 2") @@ -2799,6 +2804,7 @@ H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled) hsize_t val; /* Intermediate value */ unsigned ndims = shared->ndims; /* Rank of dataset */ unsigned ret = 0; /* Value to return */ + unsigned u; /* Local index variable */ FUNC_ENTER_STATIC_NOERR @@ -2809,17 +2815,11 @@ H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled) /* If the fastest changing dimension doesn't have enough entropy, use * other dimensions too */ - if(ndims > 1 && shared->cache.chunk.scaled_dims[ndims - 1] <= shared->cache.chunk.nslots) { - unsigned u; /* Local index variable */ - - val = scaled[0]; - for(u = 1; u < ndims; u++) { - val <<= shared->cache.chunk.scaled_encode_bits[u]; - val ^= scaled[u]; - } /* end for */ - } /* end if */ - else - val = scaled[ndims - 1]; + val = scaled[0]; + for(u = 1; u < ndims; u++) { + val <<= shared->cache.chunk.scaled_encode_bits[u]; + val ^= scaled[u]; + } /* end for */ /* Modulo value against the number of array slots */ ret = (unsigned)(val % shared->cache.chunk.nslots); -- cgit v0.12 From 3a13bb72e8d33808eb3bfd68c2a6b4ae5153f3a0 Mon Sep 17 00:00:00 2001 From: Songyu Lu Date: Thu, 8 Nov 2018 15:41:49 -0600 Subject: HDFFV-10601: I changed to a better way to calculate the number of chunks in a dataset. --- src/H5Dchunk.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 7eeb866..cb6b925 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -950,11 +950,9 @@ H5D__chunk_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id) if(dset->shared->layout.u.chunk.dim[u] == 0) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", u) - /* Make a special case when the dimension is 0 because (0 - 1) is a big number for unsigned integer */ - if(dset->shared->curr_dims[u] == 0) - rdcc->scaled_dims[u] = 0; - else - rdcc->scaled_dims[u] = ((dset->shared->curr_dims[u] - 1) / dset->shared->layout.u.chunk.dim[u]) + 1; + /* Round up to the next integer # of chunks, to accommodate partial chunks */ + rdcc->scaled_dims[u] = (dset->shared->curr_dims[u] + dset->shared->layout.u.chunk.dim[u] - 1) / + dset->shared->layout.u.chunk.dim[u]; if( !(scaled_power2up = H5VM_power2up(rdcc->scaled_dims[u])) ) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get the next power of 2") -- cgit v0.12 From 2fe10c647c003eaba2f84d52daaae23c3341916a Mon Sep 17 00:00:00 2001 From: Songyu Lu Date: Tue, 13 Nov 2018 11:37:29 -0600 Subject: HDFFV10601: Adding performance test to verify the improvement. --- tools/test/perform/CMakeLists.txt | 10 ++ tools/test/perform/Makefile.am | 4 +- tools/test/perform/chunk_cache.c | 361 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 373 insertions(+), 2 deletions(-) create mode 100644 tools/test/perform/chunk_cache.c diff --git a/tools/test/perform/CMakeLists.txt b/tools/test/perform/CMakeLists.txt index fa41608..14abdec 100644 --- a/tools/test/perform/CMakeLists.txt +++ b/tools/test/perform/CMakeLists.txt @@ -51,6 +51,16 @@ TARGET_C_PROPERTIES (iopipe STATIC) target_link_libraries (iopipe PRIVATE ${HDF5_LIB_TARGET} ${HDF5_TOOLS_LIB_TARGET}) set_target_properties (iopipe PROPERTIES FOLDER perform) +#-- Adding test for chunk_cache +set (chunk_cache_SOURCES + ${HDF5_TOOLS_TEST_PERFORM_SOURCE_DIR}/chunk_cache.c +) +add_executable (chunk_cache ${chunk_cache_SOURCES}) +target_include_directories(chunk_cache PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$:${MPI_C_INCLUDE_DIRS}>") +TARGET_C_PROPERTIES (chunk_cache STATIC) +target_link_libraries (chunk_cache PRIVATE ${HDF5_LIB_TARGET} ${HDF5_TOOLS_LIB_TARGET}) +set_target_properties (chunk_cache PROPERTIES FOLDER perform) + #-- Adding test for overhead set (overhead_SOURCES ${HDF5_TOOLS_TEST_PERFORM_SOURCE_DIR}/overhead.c diff --git a/tools/test/perform/Makefile.am b/tools/test/perform/Makefile.am index 5a89a66..39800d7 100644 --- a/tools/test/perform/Makefile.am +++ b/tools/test/perform/Makefile.am @@ -50,12 +50,12 @@ if BUILD_PARALLEL_CONDITIONAL TEST_PROG_PARA=h5perf perf endif # Serial test programs. -TEST_PROG = iopipe chunk overhead zip_perf perf_meta h5perf_serial $(BUILD_ALL_PROGS) +TEST_PROG = iopipe chunk chunk_cache overhead zip_perf perf_meta h5perf_serial $(BUILD_ALL_PROGS) # check_PROGRAMS will be built but not installed. Do not any executable # that is in bin_PROGRAMS already. Otherwise, it will be removed twice in # "make clean" and some systems, e.g., AIX, do not like it. -check_PROGRAMS= iopipe chunk overhead zip_perf perf_meta $(BUILD_ALL_PROGS) perf +check_PROGRAMS= iopipe chunk chunk_cache overhead zip_perf perf_meta $(BUILD_ALL_PROGS) perf h5perf_SOURCES=pio_perf.c pio_engine.c h5perf_serial_SOURCES=sio_perf.c sio_engine.c diff --git a/tools/test/perform/chunk_cache.c b/tools/test/perform/chunk_cache.c new file mode 100644 index 0000000..9a6315a --- /dev/null +++ b/tools/test/perform/chunk_cache.c @@ -0,0 +1,361 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Purpose: check the performance of chunk cache in these two cases (HDFFV-10601): + * 1. partial chunks exist along any dimension. + * 2. number of slots in chunk cache is smaller than the number of chunks + * in the fastest-growing dimension. + */ +#include +#include "hdf5.h" + +#define FILENAME "chunk_cache_perf.h5" + +#define RANK 2 + +#define DSET1_NAME "partial_chunks" +#define DSET1_DIM1 9 * 1000 +#define DSET1_DIM2 9 +#define CHUNK1_DIM1 2 * 1000 +#define CHUNK1_DIM2 2 + +#define DSET2_NAME "hash_value" +#define DSET2_DIM1 300 +#define DSET2_DIM2 600 +#define CHUNK2_DIM1 100 +#define CHUNK2_DIM2 100 + +#define RDCC_NSLOTS 5 +#define RDCC_NBYTES 1024 * 1024 * 10 +#define RDCC_W0 0.75F + +#define FILTER_COUNTER 306 +static size_t nbytes_global; + +typedef struct test_time_t { + long tv_sec; + long tv_usec; +} test_time_t; + +/* Local function prototypes for the dummy filter */ +static size_t +counter (unsigned flags, size_t cd_nelmts, + const unsigned *cd_values, size_t nbytes, + size_t *buf_size, void **buf); + +/* This message derives from H5Z */ +const H5Z_class2_t H5Z_COUNTER[1] = {{ + H5Z_CLASS_T_VERS, /* H5Z_class_t version */ + FILTER_COUNTER, /* Filter id number */ + 1, 1, /* Encoding and decoding enabled */ + "counter", /* Filter name for debugging */ + NULL, /* The "can apply" callback */ + NULL, /* The "set local" callback */ + counter, /* The actual filter function */ +}}; + +/*------------------------------------------------------------------------- + * Count number of bytes but don't do anything else. Keep + * track of the data of chunks being read from file into memory. + */ +static size_t +counter (unsigned flags, size_t cd_nelmts, + const unsigned *cd_values, size_t nbytes, + size_t *buf_size, void **buf) +{ + nbytes_global += nbytes; + return nbytes; +} + +/*---------------------------------------------------------------------------*/ +static int +test_time_get_current(test_time_t *tv) +{ + struct timespec tp; + + if (!tv) + return -1; + if (clock_gettime(CLOCK_MONOTONIC, &tp)) + return -1; + + tv->tv_sec = tp.tv_sec; + tv->tv_usec = tp.tv_nsec / 1000; + + return 0; +} + +/*---------------------------------------------------------------------------*/ +static double +test_time_to_double(test_time_t tv) +{ + return (double) tv.tv_sec + (double) (tv.tv_usec) * 0.000001; +} + +/*---------------------------------------------------------------------------*/ +static test_time_t +test_time_add(test_time_t in1, test_time_t in2) +{ + test_time_t out; + + out.tv_sec = in1.tv_sec + in2.tv_sec; + out.tv_usec = in1.tv_usec + in2.tv_usec; + if(out.tv_usec > 1000000) { + out.tv_usec -= 1000000; + out.tv_sec += 1; + } + + return out; +} + +/*---------------------------------------------------------------------------*/ +static test_time_t +test_time_subtract(test_time_t in1, test_time_t in2) +{ + test_time_t out; + + out.tv_sec = in1.tv_sec - in2.tv_sec; + out.tv_usec = in1.tv_usec - in2.tv_usec; + if(out.tv_usec < 0) { + out.tv_usec += 1000000; + out.tv_sec -= 1; + } + + return out; +} + +/*------------------------------------------------------------------------------- + * Create a chunked dataset with partial chunks along either dimensions: + * dataset dimension: 9000 x 9 + * chunk dimension: 2000 x 2 + */ +static void create_dset1(hid_t file) +{ + hid_t dataspace, dataset; + hid_t dcpl; + hsize_t dims[RANK] = {DSET1_DIM1, DSET1_DIM2}; + herr_t status; + hsize_t chunk_dims[RANK] = {CHUNK1_DIM1, CHUNK1_DIM2}; + int data[DSET1_DIM1][DSET1_DIM2]; /* data for writing */ + int i, j; + + /* Create the data space. */ + dataspace = H5Screate_simple (RANK, dims, NULL); + + /* Modify dataset creation properties, i.e. enable chunking */ + dcpl = H5Pcreate (H5P_DATASET_CREATE); + status = H5Pset_chunk (dcpl, RANK, chunk_dims); + + /* Set the dummy filter simply for counting the number of bytes being read into the memory */ + H5Zregister(H5Z_COUNTER); + H5Pset_filter(dcpl, FILTER_COUNTER, 0, 0, NULL); + + /* Create a new dataset within the file using chunk creation properties. */ + dataset = H5Dcreate2 (file, DSET1_NAME, H5T_NATIVE_INT, dataspace, + H5P_DEFAULT, dcpl, H5P_DEFAULT); + + for (i = 0; i < DSET1_DIM1; i++) + for (j = 0; j < DSET1_DIM2; j++) + data[i][j] = i+j; + + /* Write data to dataset */ + status = H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, + H5P_DEFAULT, data); + + /* Close resources */ + status = H5Dclose (dataset); + status = H5Pclose (dcpl); + status = H5Sclose (dataspace); +} + +/*--------------------------------------------------------------------------- + * Create a chunked dataset for testing hash values: + * dataset dimensions: 300 x 600 + * chunk dimensions: 100 x 100 + */ +static void create_dset2(hid_t file) +{ + hid_t dataspace, dataset; + hid_t dcpl; + hsize_t dims[RANK] = {DSET2_DIM1, DSET2_DIM2}; + herr_t status; + hsize_t chunk_dims[RANK] = {CHUNK2_DIM1, CHUNK2_DIM2}; + int data[DSET2_DIM1][DSET2_DIM2]; /* data for writing */ + int i, j; + + /* Create the data space. */ + dataspace = H5Screate_simple (RANK, dims, NULL); + + /* Modify dataset creation properties, i.e. enable chunking */ + dcpl = H5Pcreate (H5P_DATASET_CREATE); + status = H5Pset_chunk (dcpl, RANK, chunk_dims); + + /* Set the dummy filter simply for counting the number of bytes being read into the memory */ + H5Zregister(H5Z_COUNTER); + H5Pset_filter(dcpl, FILTER_COUNTER, 0, 0, NULL); + + /* Create a new dataset within the file using chunk creation properties. */ + dataset = H5Dcreate2 (file, DSET2_NAME, H5T_NATIVE_INT, dataspace, + H5P_DEFAULT, dcpl, H5P_DEFAULT); + + for (i = 0; i < DSET2_DIM1; i++) + for (j = 0; j < DSET2_DIM2; j++) + data[i][j] = i+j; + + /* Write data to dataset */ + status = H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, + H5P_DEFAULT, data); + + /* Close resources */ + status = H5Dclose (dataset); + status = H5Pclose (dcpl); + status = H5Sclose (dataspace); +} +/*--------------------------------------------------------------------------- + * Check the performance of the chunk cache when partial chunks exist + * along the dataset dimensions. + */ +static void check_partial_chunks_perf(hid_t file) +{ + hid_t dataset; + hid_t filespace; + hid_t memspace; + hid_t dapl; + + herr_t status; + int rdata[DSET1_DIM2]; /* data for reading */ + int i; + + hsize_t row_rank = 1; + hsize_t row_dim[1] = {DSET1_DIM2}; + hsize_t start[RANK] = {0, 0}; + hsize_t count[RANK] = {1, DSET1_DIM2}; + test_time_t t = {0, 0}, t1 = {0, 0}, t2 = {0, 0}; + + dapl = H5Pcreate(H5P_DATASET_ACCESS); + status = H5Pset_chunk_cache (dapl, RDCC_NSLOTS, RDCC_NBYTES, RDCC_W0); + + dataset = H5Dopen2 (file, DSET1_NAME, dapl); + + memspace = H5Screate_simple(row_rank, row_dim, NULL); + filespace = H5Dget_space(dataset); + + nbytes_global = 0; + + test_time_get_current(&t1); + + /* Read the data row by row */ + for(i = 0; i < DSET1_DIM1; i++) { + start[0] = i; + status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, + start, NULL, count, NULL); + + status = H5Dread (dataset, H5T_NATIVE_INT, memspace, filespace, + H5P_DEFAULT, rdata); + } + + test_time_get_current(&t2); + t = test_time_add(t, test_time_subtract(t2, t1)); + + printf("1. Partial chunks: total read time is %lf; number of bytes being read from file is %lu\n", test_time_to_double(t), nbytes_global); + + status = H5Dclose (dataset); + status = H5Sclose (filespace); + status = H5Sclose (memspace); + status = H5Pclose (dapl); +} + +/*--------------------------------------------------------------------------- + * Check the performance of chunk cache when the number of cache slots + * is smaller than the number of chunks along the fastest-growing + * dimension of the dataset. + */ +static void check_hash_value_perf(hid_t file) +{ + hid_t dataset; + hid_t filespace; + hid_t memspace; + hid_t dapl; + + herr_t status; + int rdata[DSET2_DIM1]; /* data for reading */ + int i; + + hsize_t column_rank = 1; + hsize_t column_dim[1] = {DSET2_DIM1}; + hsize_t start[RANK] = {0, 0}; + hsize_t count[RANK] = {DSET2_DIM1, 1}; + test_time_t t = {0, 0}, t1 = {0, 0}, t2 = {0, 0}; + + dapl = H5Pcreate(H5P_DATASET_ACCESS); + status = H5Pset_chunk_cache (dapl, RDCC_NSLOTS, RDCC_NBYTES, RDCC_W0); + + dataset = H5Dopen2 (file, DSET2_NAME, dapl); + + memspace = H5Screate_simple(column_rank, column_dim, NULL); + filespace = H5Dget_space(dataset); + + nbytes_global = 0; + + test_time_get_current(&t1); + + /* Read the data column by column */ + for(i = 0; i < DSET2_DIM2; i++) { + start[1] = i; + status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, + start, NULL, count, NULL); + + status = H5Dread (dataset, H5T_NATIVE_INT, memspace, filespace, + H5P_DEFAULT, rdata); + } + + test_time_get_current(&t2); + t = test_time_add(t, test_time_subtract(t2, t1)); + + printf("2. Hash value: total read time is %lf; number of bytes being read from file is %lu\n", test_time_to_double(t), nbytes_global); + + status = H5Dclose (dataset); + status = H5Sclose (filespace); + status = H5Sclose (memspace); + status = H5Pclose (dapl); +} + +/*------------------------------------------------------------------------------------- + * Purpose: check the performance of chunk cache in these two cases (HDFFV-10601): + * 1. partial chunks exist along any dimension. + * 2. number of slots in chunk cache is smaller than the number of chunks + * in the fastest-growing dimension. + *-------------------------------------------------------------------------------------*/ +int +main (void) +{ + hid_t file; /* handles */ + + /* Create a new file. If file exists its contents will be overwritten. */ + file = H5Fcreate (FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + + create_dset1(file); + create_dset2(file); + + H5Fclose (file); + + /* Re-open the file for testing performance. */ + file = H5Fopen (FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + + check_partial_chunks_perf(file); + check_hash_value_perf(file); + + H5Fclose (file); + + return 0; +} -- cgit v0.12 From cd13d24e5140578a880aebe4e2d8b899179d0870 Mon Sep 17 00:00:00 2001 From: Songyu Lu Date: Wed, 14 Nov 2018 17:30:23 -0600 Subject: HDFFV-10601: I added error checking to the HDF5 functions. --- tools/test/perform/chunk_cache.c | 226 ++++++++++++++++++++++++++++----------- 1 file changed, 161 insertions(+), 65 deletions(-) diff --git a/tools/test/perform/chunk_cache.c b/tools/test/perform/chunk_cache.c index 9a6315a..01571e2 100644 --- a/tools/test/perform/chunk_cache.c +++ b/tools/test/perform/chunk_cache.c @@ -17,6 +17,8 @@ * 2. number of slots in chunk cache is smaller than the number of chunks * in the fastest-growing dimension. */ +#include +#include #include #include "hdf5.h" @@ -133,49 +135,87 @@ test_time_subtract(test_time_t in1, test_time_t in2) return out; } +/*------------------------------------------------------------------------- + * Function: cleanup + * + * Purpose: Removes test files + * + * Return: void + * + * Programmer: Robb Matzke + * Thursday, June 4, 1998 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +static void +cleanup (void) +{ + if (!getenv ("HDF5_NOCLEANUP")) { + remove (FILENAME); + } +} /*------------------------------------------------------------------------------- * Create a chunked dataset with partial chunks along either dimensions: * dataset dimension: 9000 x 9 * chunk dimension: 2000 x 2 */ -static void create_dset1(hid_t file) +static int create_dset1(hid_t file) { hid_t dataspace, dataset; hid_t dcpl; hsize_t dims[RANK] = {DSET1_DIM1, DSET1_DIM2}; - herr_t status; hsize_t chunk_dims[RANK] = {CHUNK1_DIM1, CHUNK1_DIM2}; int data[DSET1_DIM1][DSET1_DIM2]; /* data for writing */ int i, j; /* Create the data space. */ - dataspace = H5Screate_simple (RANK, dims, NULL); + if((dataspace = H5Screate_simple (RANK, dims, NULL)) < 0) + goto error; /* Modify dataset creation properties, i.e. enable chunking */ - dcpl = H5Pcreate (H5P_DATASET_CREATE); - status = H5Pset_chunk (dcpl, RANK, chunk_dims); + if((dcpl = H5Pcreate (H5P_DATASET_CREATE)) < 0) + goto error; + if(H5Pset_chunk (dcpl, RANK, chunk_dims) < 0) + goto error; /* Set the dummy filter simply for counting the number of bytes being read into the memory */ - H5Zregister(H5Z_COUNTER); - H5Pset_filter(dcpl, FILTER_COUNTER, 0, 0, NULL); + if(H5Zregister(H5Z_COUNTER) < 0) + goto error; + + if(H5Pset_filter(dcpl, FILTER_COUNTER, 0, 0, NULL) < 0) + goto error; /* Create a new dataset within the file using chunk creation properties. */ - dataset = H5Dcreate2 (file, DSET1_NAME, H5T_NATIVE_INT, dataspace, - H5P_DEFAULT, dcpl, H5P_DEFAULT); + if((dataset = H5Dcreate2 (file, DSET1_NAME, H5T_NATIVE_INT, dataspace, + H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + goto error; for (i = 0; i < DSET1_DIM1; i++) for (j = 0; j < DSET1_DIM2; j++) data[i][j] = i+j; /* Write data to dataset */ - status = H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, data); + if(H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, + H5P_DEFAULT, data) < 0) + goto error; /* Close resources */ - status = H5Dclose (dataset); - status = H5Pclose (dcpl); - status = H5Sclose (dataspace); + H5Dclose (dataset); + H5Pclose (dcpl); + H5Sclose (dataspace); + return 0; + +error: + H5E_BEGIN_TRY { + H5Dclose (dataset); + H5Pclose (dcpl); + H5Sclose (dataspace); + } H5E_END_TRY; + + return 1; } /*--------------------------------------------------------------------------- @@ -183,56 +223,72 @@ static void create_dset1(hid_t file) * dataset dimensions: 300 x 600 * chunk dimensions: 100 x 100 */ -static void create_dset2(hid_t file) +static int create_dset2(hid_t file) { hid_t dataspace, dataset; hid_t dcpl; hsize_t dims[RANK] = {DSET2_DIM1, DSET2_DIM2}; - herr_t status; hsize_t chunk_dims[RANK] = {CHUNK2_DIM1, CHUNK2_DIM2}; int data[DSET2_DIM1][DSET2_DIM2]; /* data for writing */ int i, j; /* Create the data space. */ - dataspace = H5Screate_simple (RANK, dims, NULL); + if((dataspace = H5Screate_simple (RANK, dims, NULL)) < 0) + goto error; /* Modify dataset creation properties, i.e. enable chunking */ - dcpl = H5Pcreate (H5P_DATASET_CREATE); - status = H5Pset_chunk (dcpl, RANK, chunk_dims); + if((dcpl = H5Pcreate (H5P_DATASET_CREATE)) < 0) + goto error; + if(H5Pset_chunk (dcpl, RANK, chunk_dims) < 0) + goto error; /* Set the dummy filter simply for counting the number of bytes being read into the memory */ - H5Zregister(H5Z_COUNTER); - H5Pset_filter(dcpl, FILTER_COUNTER, 0, 0, NULL); + if(H5Zregister(H5Z_COUNTER) < 0) + goto error; + if(H5Pset_filter(dcpl, FILTER_COUNTER, 0, 0, NULL) < 0) + goto error; /* Create a new dataset within the file using chunk creation properties. */ - dataset = H5Dcreate2 (file, DSET2_NAME, H5T_NATIVE_INT, dataspace, - H5P_DEFAULT, dcpl, H5P_DEFAULT); + if((dataset = H5Dcreate2 (file, DSET2_NAME, H5T_NATIVE_INT, dataspace, + H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + goto error; for (i = 0; i < DSET2_DIM1; i++) for (j = 0; j < DSET2_DIM2; j++) data[i][j] = i+j; /* Write data to dataset */ - status = H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, data); + if(H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, + H5P_DEFAULT, data) < 0) + goto error; /* Close resources */ - status = H5Dclose (dataset); - status = H5Pclose (dcpl); - status = H5Sclose (dataspace); + H5Dclose (dataset); + H5Pclose (dcpl); + H5Sclose (dataspace); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Dclose (dataset); + H5Pclose (dcpl); + H5Sclose (dataspace); + } H5E_END_TRY; + + return 1; } /*--------------------------------------------------------------------------- * Check the performance of the chunk cache when partial chunks exist * along the dataset dimensions. */ -static void check_partial_chunks_perf(hid_t file) +static int check_partial_chunks_perf(hid_t file) { hid_t dataset; hid_t filespace; hid_t memspace; hid_t dapl; - herr_t status; int rdata[DSET1_DIM2]; /* data for reading */ int i; @@ -242,8 +298,10 @@ static void check_partial_chunks_perf(hid_t file) hsize_t count[RANK] = {1, DSET1_DIM2}; test_time_t t = {0, 0}, t1 = {0, 0}, t2 = {0, 0}; - dapl = H5Pcreate(H5P_DATASET_ACCESS); - status = H5Pset_chunk_cache (dapl, RDCC_NSLOTS, RDCC_NBYTES, RDCC_W0); + if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0) + goto error; + if(H5Pset_chunk_cache (dapl, RDCC_NSLOTS, RDCC_NBYTES, RDCC_W0) < 0) + goto error; dataset = H5Dopen2 (file, DSET1_NAME, dapl); @@ -257,11 +315,13 @@ static void check_partial_chunks_perf(hid_t file) /* Read the data row by row */ for(i = 0; i < DSET1_DIM1; i++) { start[0] = i; - status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, - start, NULL, count, NULL); + if(H5Sselect_hyperslab(filespace, H5S_SELECT_SET, + start, NULL, count, NULL) < 0) + goto error; - status = H5Dread (dataset, H5T_NATIVE_INT, memspace, filespace, - H5P_DEFAULT, rdata); + if(H5Dread (dataset, H5T_NATIVE_INT, memspace, filespace, + H5P_DEFAULT, rdata) < 0) + goto error; } test_time_get_current(&t2); @@ -269,10 +329,20 @@ static void check_partial_chunks_perf(hid_t file) printf("1. Partial chunks: total read time is %lf; number of bytes being read from file is %lu\n", test_time_to_double(t), nbytes_global); - status = H5Dclose (dataset); - status = H5Sclose (filespace); - status = H5Sclose (memspace); - status = H5Pclose (dapl); + H5Dclose (dataset); + H5Sclose (filespace); + H5Sclose (memspace); + H5Pclose (dapl); + + return 0; +error: + H5E_BEGIN_TRY { + H5Dclose (dataset); + H5Sclose (filespace); + H5Sclose (memspace); + H5Pclose (dapl); + } H5E_END_TRY; + return 1; } /*--------------------------------------------------------------------------- @@ -280,14 +350,13 @@ static void check_partial_chunks_perf(hid_t file) * is smaller than the number of chunks along the fastest-growing * dimension of the dataset. */ -static void check_hash_value_perf(hid_t file) +static int check_hash_value_perf(hid_t file) { hid_t dataset; hid_t filespace; hid_t memspace; hid_t dapl; - herr_t status; int rdata[DSET2_DIM1]; /* data for reading */ int i; @@ -297,13 +366,17 @@ static void check_hash_value_perf(hid_t file) hsize_t count[RANK] = {DSET2_DIM1, 1}; test_time_t t = {0, 0}, t1 = {0, 0}, t2 = {0, 0}; - dapl = H5Pcreate(H5P_DATASET_ACCESS); - status = H5Pset_chunk_cache (dapl, RDCC_NSLOTS, RDCC_NBYTES, RDCC_W0); - - dataset = H5Dopen2 (file, DSET2_NAME, dapl); + if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0) + goto error; + if(H5Pset_chunk_cache (dapl, RDCC_NSLOTS, RDCC_NBYTES, RDCC_W0) < 0) + goto error; - memspace = H5Screate_simple(column_rank, column_dim, NULL); - filespace = H5Dget_space(dataset); + if((dataset = H5Dopen2 (file, DSET2_NAME, dapl)) < 0) + goto error; + if((memspace = H5Screate_simple(column_rank, column_dim, NULL)) < 0) + goto error; + if((filespace = H5Dget_space(dataset)) < 0) + goto error; nbytes_global = 0; @@ -312,11 +385,13 @@ static void check_hash_value_perf(hid_t file) /* Read the data column by column */ for(i = 0; i < DSET2_DIM2; i++) { start[1] = i; - status = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, - start, NULL, count, NULL); + if(H5Sselect_hyperslab(filespace, H5S_SELECT_SET, + start, NULL, count, NULL) < 0) + goto error; - status = H5Dread (dataset, H5T_NATIVE_INT, memspace, filespace, - H5P_DEFAULT, rdata); + if(H5Dread (dataset, H5T_NATIVE_INT, memspace, filespace, + H5P_DEFAULT, rdata) < 0) + goto error; } test_time_get_current(&t2); @@ -324,10 +399,20 @@ static void check_hash_value_perf(hid_t file) printf("2. Hash value: total read time is %lf; number of bytes being read from file is %lu\n", test_time_to_double(t), nbytes_global); - status = H5Dclose (dataset); - status = H5Sclose (filespace); - status = H5Sclose (memspace); - status = H5Pclose (dapl); + H5Dclose (dataset); + H5Sclose (filespace); + H5Sclose (memspace); + H5Pclose (dapl); + return 0; + +error: + H5E_BEGIN_TRY { + H5Dclose (dataset); + H5Sclose (filespace); + H5Sclose (memspace); + H5Pclose (dapl); + } H5E_END_TRY; + return 1; } /*------------------------------------------------------------------------------------- @@ -340,22 +425,33 @@ int main (void) { hid_t file; /* handles */ - + int nerrors = 0; + /* Create a new file. If file exists its contents will be overwritten. */ - file = H5Fcreate (FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + if((file = H5Fcreate (FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) + goto error; - create_dset1(file); - create_dset2(file); + nerrors += create_dset1(file); + nerrors += create_dset2(file); - H5Fclose (file); + if(H5Fclose (file) < 0) + goto error; /* Re-open the file for testing performance. */ - file = H5Fopen (FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + if((file = H5Fopen (FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) + goto error; - check_partial_chunks_perf(file); - check_hash_value_perf(file); + nerrors += check_partial_chunks_perf(file); + nerrors += check_hash_value_perf(file); - H5Fclose (file); + if(H5Fclose (file) < 0) + goto error; + if (nerrors>0) goto error; + cleanup(); return 0; + +error: + fprintf(stderr, "*** ERRORS DETECTED ***\n"); + return 1; } -- cgit v0.12