summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorLarry Knox <lrknox@hdfgroup.org>2019-01-05 13:50:15 (GMT)
committerLarry Knox <lrknox@hdfgroup.org>2019-01-05 13:50:15 (GMT)
commit3be0fbd817e82b4a60b36b2b9d444a3750f2eb06 (patch)
treecc16d19b4f3fd04aa0aa70556fac36b2222daa24 /src
parent25fe692c5c5c37a904a0d2ba3f447e60396c30da (diff)
parentcceb9f06b8f213ef1cb360a4ef900536669a8dad (diff)
downloadhdf5-3be0fbd817e82b4a60b36b2b9d444a3750f2eb06.zip
hdf5-3be0fbd817e82b4a60b36b2b9d444a3750f2eb06.tar.gz
hdf5-3be0fbd817e82b4a60b36b2b9d444a3750f2eb06.tar.bz2
Merge branch 'develop' of https://bitbucket.hdfgroup.org/scm/~lrknox/hdf5_lrk into develop
Diffstat (limited to 'src')
-rw-r--r--src/H5Adense.c35
-rw-r--r--src/H5Dchunk.c28
-rw-r--r--src/H5L.c2
3 files changed, 60 insertions, 5 deletions
diff --git a/src/H5Adense.c b/src/H5Adense.c
index 5bed82d..021fa76 100644
--- a/src/H5Adense.c
+++ b/src/H5Adense.c
@@ -300,18 +300,49 @@ static herr_t
H5A__dense_fnd_cb(const H5A_t *attr, hbool_t *took_ownership, void *_user_attr)
{
H5A_t const **user_attr = (H5A_t const **)_user_attr; /* User data from v2 B-tree attribute lookup */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC_NOERR
+ FUNC_ENTER_STATIC
/* Check arguments */
HDassert(attr);
HDassert(user_attr);
+ HDassert(took_ownership);
+ /*
+ * If there is an attribute already stored in "user_attr",
+ * we need to free the dynamially allocated spaces for the
+ * attribute, otherwise we got infinite loop closing library due to
+ * outstanding allocation. (HDFFV-10659)
+ *
+ * This callback is used by H5A__dense_remove() to close/free the
+ * attribute stored in "user_attr" (via H5O__msg_free_real()) after
+ * the attribute node is deleted from the name index v2 B-tree.
+ * The issue is:
+ * When deleting the attribute node from the B-tree,
+ * if the attribute is found in the intermediate B-tree nodes,
+ * which may be merged/redistributed, we need to free the dynamically
+ * allocated spaces for the intermediate decoded attribute.
+ */
+ if(*user_attr != NULL) {
+ H5A_t *old_attr = *user_attr;
+ if(old_attr->shared) {
+ /* Free any dynamically allocated items */
+ if(H5A__free(old_attr) < 0)
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't release attribute info")
+
+ /* Destroy shared attribute struct */
+ old_attr->shared = H5FL_FREE(H5A_shared_t, old_attr->shared);
+ } /* end if */
+
+ old_attr = H5FL_FREE(H5A_t, old_attr);
+ } /* end if */
/* Take over attribute ownership */
*user_attr = attr;
*took_ownership = TRUE;
- FUNC_LEAVE_NOAPI(SUCCEED)
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5A__dense_fnd_cb() */
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index 91f3b91..b5a5c39 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -1493,6 +1493,9 @@ H5D__create_chunk_map_single(H5D_chunk_map_t *fm, const H5D_io_info_t
/* Set chunk location & hyperslab size */
for(u = 0; u < fm->f_ndims; u++) {
+ /* Validate this chunk dimension */
+ if(fm->layout->u.chunk.dim[u] == 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", u)
HDassert(sel_start[u] == sel_end[u]);
chunk_info->scaled[u] = sel_start[u] / fm->layout->u.chunk.dim[u];
coords[u] = chunk_info->scaled[u] * fm->layout->u.chunk.dim[u];
@@ -1580,6 +1583,9 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
/* Set initial chunk location & hyperslab size */
for(u = 0; u < fm->f_ndims; u++) {
+ /* Validate this chunk dimension */
+ if(fm->layout->u.chunk.dim[u] == 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", u)
scaled[u] = start_scaled[u] = sel_start[u] / fm->layout->u.chunk.dim[u];
coords[u] = start_coords[u] = scaled[u] * fm->layout->u.chunk.dim[u];
end[u] = (coords[u] + fm->chunk_dim[u]) - 1;
@@ -4043,6 +4049,9 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_
* assume here that all elements of space_dim are > 0. This is checked at
* the top of this function. */
for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ /* Validate this chunk dimension */
+ if(chunk_dim[op_dim] == 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", op_dim)
min_unalloc[op_dim] = (old_dim[op_dim] + chunk_dim[op_dim] - 1) / chunk_dim[op_dim];
max_unalloc[op_dim] = (space_dim[op_dim] - 1) / chunk_dim[op_dim];
@@ -4484,13 +4493,17 @@ H5D__chunk_update_old_edge_chunks(H5D_t *dset, hsize_t old_dim[])
/* Start off with this dimension marked as not needing to be modified */
new_full_dim[op_dim] = FALSE;
+ /* Validate this chunk dimension */
+ if(chunk_dim[op_dim] == 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", op_dim)
+
/* Calculate offset of first previously incomplete chunk in this
* dimension */
- old_edge_chunk_sc[op_dim] = (old_dim[op_dim] / chunk_dim[op_dim]);
+ old_edge_chunk_sc[op_dim] = (old_dim[op_dim] / chunk_dim[op_dim]);
/* Calculate the largest offset of chunks that might need to be
* modified in this dimension */
- max_edge_chunk_sc[op_dim] = MIN((old_dim[op_dim] - 1) / chunk_dim[op_dim],
+ max_edge_chunk_sc[op_dim] = MIN((old_dim[op_dim] - 1) / chunk_dim[op_dim],
MAX((space_dim[op_dim] / chunk_dim[op_dim]), 1) - 1);
/* Check for old_dim aligned with chunk boundary in this dimension, if
@@ -4626,6 +4639,8 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info,
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI size")
/* Distribute evenly the number of blocks between processes. */
+ if(mpi_size == 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "Resulted in division by zero")
num_blocks = chunk_info->num_io / mpi_size; /* value should be the same on all procs */
/* after evenly distributing the blocks between processes, are
@@ -5066,6 +5081,10 @@ H5D__chunk_prune_by_extent(H5D_t *dset, const hsize_t *old_dim)
HDmemset(min_mod_chunk_sc, 0, sizeof(min_mod_chunk_sc));
HDmemset(max_mod_chunk_sc, 0, sizeof(max_mod_chunk_sc));
for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) {
+ /* Validate this chunk dimension */
+ if(chunk_dim[op_dim] == 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", op_dim)
+
/* Calculate the largest offset of chunks that might need to be
* modified in this dimension */
max_mod_chunk_sc[op_dim] = (old_dim[op_dim] - 1) / chunk_dim[op_dim];
@@ -5721,9 +5740,12 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
/* (background buffer has already been zeroed out, if not expanding) */
if(udata->cpy_info->expand_ref) {
size_t ref_count;
+ size_t dt_size;
/* Determine # of reference elements to copy */
- ref_count = nbytes / H5T_get_size(udata->dt_src);
+ if((dt_size = H5T_get_size(udata->dt_src)) == 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "size must not be 0")
+ ref_count = nbytes / dt_size;
/* Copy the reference elements */
if(H5O_copy_expand_ref(udata->file_src, buf, udata->idx_info_dst->f, bkg, ref_count, H5T_get_ref_type(udata->dt_src), udata->cpy_info) < 0)
diff --git a/src/H5L.c b/src/H5L.c
index 33e561a..1f45740 100644
--- a/src/H5L.c
+++ b/src/H5L.c
@@ -648,6 +648,8 @@ H5Lcreate_ud(hid_t link_loc_id, const char *link_name, H5L_type_t link_type,
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no link name specified")
if(link_type < H5L_TYPE_UD_MIN || link_type > H5L_TYPE_MAX)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid link class")
+ if(!udata && udata_size)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "udata cannot be NULL if udata_size is non-zero")
/* Check the group access property list */
if(H5P_DEFAULT == lcpl_id)