diff options
author | Ray Lu <songyulu@hdfgroup.org> | 2018-11-15 15:43:46 (GMT) |
---|---|---|
committer | Ray Lu <songyulu@hdfgroup.org> | 2018-11-15 15:43:46 (GMT) |
commit | e07d097da16a69cdd3d0d305595b241e6cf39f60 (patch) | |
tree | 01f90d465371c02fe22c6b25d19f6e6abe0824be /src/H5Dchunk.c | |
parent | 73f881a8385fffc7b48f3c2ec3ba538425966cbb (diff) | |
parent | cd13d24e5140578a880aebe4e2d8b899179d0870 (diff) | |
download | hdf5-e07d097da16a69cdd3d0d305595b241e6cf39f60.zip hdf5-e07d097da16a69cdd3d0d305595b241e6cf39f60.tar.gz hdf5-e07d097da16a69cdd3d0d305595b241e6cf39f60.tar.bz2 |
Merge pull request #1316 in HDFFV/hdf5 from ~SONGYULU/hdf5_ray:bugfix/HDFFV-10601-issues-with-chunk-cache-hash to develop
* commit 'cd13d24e5140578a880aebe4e2d8b899179d0870':
HDFFV-10601: I added error checking to the HDF5 functions.
HDFFV10601: Adding performance test to verify the improvement.
HDFFV-10601: I changed to a better way to calculate the number of chunks in a dataset.
HDFFV-10601 Issues with chunk cache hash value calcuation:
Diffstat (limited to 'src/H5Dchunk.c')
-rw-r--r-- | src/H5Dchunk.c | 22 |
1 files changed, 10 insertions, 12 deletions
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 22dc05a..cb6b925 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -949,7 +949,10 @@ H5D__chunk_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id) /* Initial scaled dimension sizes */ if(dset->shared->layout.u.chunk.dim[u] == 0) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", u) - rdcc->scaled_dims[u] = dset->shared->curr_dims[u] / dset->shared->layout.u.chunk.dim[u]; + + /* Round up to the next integer # of chunks, to accommodate partial chunks */ + rdcc->scaled_dims[u] = (dset->shared->curr_dims[u] + dset->shared->layout.u.chunk.dim[u] - 1) / + dset->shared->layout.u.chunk.dim[u]; if( !(scaled_power2up = H5VM_power2up(rdcc->scaled_dims[u])) ) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get the next power of 2") @@ -2799,6 +2802,7 @@ H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled) hsize_t val; /* Intermediate value */ unsigned ndims = shared->ndims; /* Rank of dataset */ unsigned ret = 0; /* Value to return */ + unsigned u; /* Local index variable */ FUNC_ENTER_STATIC_NOERR @@ -2809,17 +2813,11 @@ H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled) /* If the fastest changing dimension doesn't have enough entropy, use * other dimensions too */ - if(ndims > 1 && shared->cache.chunk.scaled_dims[ndims - 1] <= shared->cache.chunk.nslots) { - unsigned u; /* Local index variable */ - - val = scaled[0]; - for(u = 1; u < ndims; u++) { - val <<= shared->cache.chunk.scaled_encode_bits[u]; - val ^= scaled[u]; - } /* end for */ - } /* end if */ - else - val = scaled[ndims - 1]; + val = scaled[0]; + for(u = 1; u < ndims; u++) { + val <<= shared->cache.chunk.scaled_encode_bits[u]; + val ^= scaled[u]; + } /* end for */ /* Modulo value against the number of array slots */ ret = (unsigned)(val % shared->cache.chunk.nslots); |