summaryrefslogtreecommitdiffstats
path: root/src/H5Dint.c
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2015-10-12 15:48:31 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2015-10-12 15:48:31 (GMT)
commitfa675882186356cdb9ccedd745eb040931369917 (patch)
tree2c2d0d2c1b1fbfa9904ee1c9f83fe199b80bac06 /src/H5Dint.c
parente8409e6ff14824143456f51c5d4607775a6e16da (diff)
downloadhdf5-fa675882186356cdb9ccedd745eb040931369917.zip
hdf5-fa675882186356cdb9ccedd745eb040931369917.tar.gz
hdf5-fa675882186356cdb9ccedd745eb040931369917.tar.bz2
[svn-r28037] Description:
Bring r27806 from trunk to branch. Also, some minor code cleanups. Tested on: MacOSX/64 10.10.5 (amazon) w/serial (h5committest not required on this branch)
Diffstat (limited to 'src/H5Dint.c')
-rw-r--r--src/H5Dint.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/src/H5Dint.c b/src/H5Dint.c
index de71be8..d74144b 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -1449,12 +1449,6 @@ done:
static herr_t
H5D__append_flush_setup(H5D_t *dset, hid_t dapl_id)
{
- H5P_genplist_t *dapl; /* data access property list object pointer */
- hsize_t curr_dims[H5S_MAX_RANK]; /* current dimension sizes */
- hsize_t max_dims[H5S_MAX_RANK]; /* current dimension sizes */
- int rank; /* dataspace # of dimensions */
- int i; /* local index variable */
- H5D_append_flush_t info;
herr_t ret_value = SUCCEED; /* return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1463,49 +1457,57 @@ H5D__append_flush_setup(H5D_t *dset, hid_t dapl_id)
HDassert(dset);
HDassert(dset->shared);
+ /* Set default append flush values */
dset->shared->append_flush.ndims = 0;
dset->shared->append_flush.func = NULL;
dset->shared->append_flush.udata = NULL;
HDmemset(dset->shared->append_flush.boundary, 0, sizeof(dset->shared->append_flush.boundary));
if(dapl_id != H5P_DATASET_ACCESS_DEFAULT && dset->shared->layout.type == H5D_CHUNKED) {
+ H5P_genplist_t *dapl; /* data access property list object pointer */
+
/* Get dataset access property list */
if(NULL == (dapl = (H5P_genplist_t *)H5I_object(dapl_id)))
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for dapl ID");
/* Check if append flush property exists */
if(H5P_exist_plist(dapl, H5D_ACS_APPEND_FLUSH_NAME) > 0) {
+ H5D_append_flush_t info;
/* Get append flush property */
if(H5P_get(dapl, H5D_ACS_APPEND_FLUSH_NAME, &info) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get append flush info")
else if(info.ndims > 0) {
+ hsize_t curr_dims[H5S_MAX_RANK]; /* current dimension sizes */
+ hsize_t max_dims[H5S_MAX_RANK]; /* current dimension sizes */
+ int rank; /* dataspace # of dimensions */
+ unsigned u; /* local index variable */
/* Get dataset rank */
if((rank = H5S_get_simple_extent_dims(dset->shared->space, curr_dims, max_dims)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
- if(info.ndims != rank)
+ if(info.ndims != (unsigned)rank)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "boundary dimension rank does not match dataset rank")
/* Validate boundary sizes */
- for(i = 0; i < info.ndims; i++) {
- if(info.boundary[i] != 0) /* when a non-zero boundary is set */
+ for(u = 0; u < info.ndims; u++) {
+ if(info.boundary[u] != 0) /* when a non-zero boundary is set */
/* the dimension is extendible? */
- if(max_dims[i] != H5S_UNLIMITED && max_dims[i] == curr_dims[i])
+ if(max_dims[u] != H5S_UNLIMITED && max_dims[u] == curr_dims[u])
break;
- }
+ } /* end for */
- if(i != info.ndims) /* at least one boundary dimension is not extendible */
+ if(u != info.ndims) /* at least one boundary dimension is not extendible */
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "boundary dimension is not valid")
dset->shared->append_flush.ndims = info.ndims;
dset->shared->append_flush.func = info.func;
dset->shared->append_flush.udata = info.udata;
HDmemcpy(dset->shared->append_flush.boundary, info.boundary, sizeof(info.boundary));
- }
- }
- }
+ } /* end else-if */
+ } /* end if */
+ } /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)