summaryrefslogtreecommitdiffstats
path: root/src/H5D.c
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2002-08-21 12:36:18 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2002-08-21 12:36:18 (GMT)
commitb7b36d7d06c6d7445b302def58835b3814c4f6ca (patch)
tree01a47868e71373d2a68cf1867c2cb91bd4d804d0 /src/H5D.c
parent8bae55914620439a0dc3aff60d8f2b4db7611298 (diff)
downloadhdf5-b7b36d7d06c6d7445b302def58835b3814c4f6ca.zip
hdf5-b7b36d7d06c6d7445b302def58835b3814c4f6ca.tar.gz
hdf5-b7b36d7d06c6d7445b302def58835b3814c4f6ca.tar.bz2
[svn-r5884] Purpose:
Bug fix/Code cleanup Description: Clean up memory leak in fill value code Also rearrange logic of code a bit Platforms tested: FreeBSD 4.6 (sleipnir)
Diffstat (limited to 'src/H5D.c')
-rw-r--r--src/H5D.c101
1 files changed, 53 insertions, 48 deletions
diff --git a/src/H5D.c b/src/H5D.c
index 1e2283b..645e81b 100644
--- a/src/H5D.c
+++ b/src/H5D.c
@@ -3274,51 +3274,28 @@ H5D_init_storage(H5D_t *dset, const H5S_t *space)
if(H5P_get(plist, H5D_CRT_SPACE_TIME_NAME, &space_time) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve space allocation time");
- if(dset->layout.type==H5D_CHUNKED) {
- /*
- * If the dataset is accessed via parallel I/O, allocate file space
- * for all chunks now and initialize each chunk with the fill value.
- */
- if (space_time==H5D_SPACE_ALLOC_EARLY
-#ifdef H5_HAVE_PARALLEL
- || (IS_H5FD_MPIO(dset->ent.file) || IS_H5FD_MPIPOSIX(dset->ent.file))
-#endif /*H5_HAVE_PARALLEL*/
- ) {
- /* We only handle simple data spaces so far */
- int ndims;
- hsize_t dim[H5O_LAYOUT_NDIMS];
-
- if ((ndims=H5S_get_simple_extent_dims(space, dim, NULL))<0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple data space info");
- dim[ndims] = dset->layout.dim[ndims];
- ndims++;
-
- if (H5F_istore_allocate(dset->ent.file, H5P_DATASET_XFER_DEFAULT,
- &(dset->layout), dim, plist)<0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset");
- } /* end if */
- } else { /* case for compact and contiguous dataset. */
- /*
- * If the fill value is default and dataset is compact, zero set data buf.
- */
- if(!fill.buf && dset->layout.type==H5D_COMPACT) {
- HDmemset(dset->layout.buf, 0, dset->layout.size);
- goto done;
- }
+ /* Get the number of elements in the dataset's dataspace */
+ snpoints = H5S_get_simple_extent_npoints(space);
+ assert(snpoints>=0);
+ H5_ASSIGN_OVERFLOW(npoints,snpoints,hssize_t,size_t);
- snpoints = H5S_get_simple_extent_npoints(space);
- assert(snpoints>=0);
- H5_ASSIGN_OVERFLOW(npoints,snpoints,hssize_t,size_t);
+ switch (dset->layout.type) {
+ case H5D_COMPACT:
+ /* If the fill value is defined, initialize the data buffer with it */
+ if(fill.buf)
+ /* Initialize the cached data buffer with the fill value */
+ H5V_array_fill(dset->layout.buf, fill.buf, fill.size, npoints);
+ else /* If the fill value is default, zero set data buf. */
+ HDmemset(dset->layout.buf, 0, dset->layout.size);
+ break;
- /* If fill value is library default, simply define it as one-byte 0-value
- * buffer */
- if(!fill.buf) {
- fill.size=1;
- fill.buf=H5MM_calloc(fill.size);
- }
-
- /* write fill value to the entire extent of the dataset.*/
- if (fill.buf) {
+ case H5D_CONTIGUOUS:
+ /* If fill value is library default, get the dataset's type's size */
+ if(!fill.buf) {
+ fill.size=H5T_get_size(dset->type);
+ assert(fill.size);
+ } /* end if */
+
/*
* Fill the entire current extent with the fill value. We can do
* this quite efficiently by making sure we copy the fill value
@@ -3331,8 +3308,11 @@ H5D_init_storage(H5D_t *dset, const H5S_t *space)
if ((buf=H5FL_BLK_ALLOC(type_conv,bufsize,0))==NULL)
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for fill buffer");
- /* Fill the buffer with the fill value */
- H5V_array_fill(buf, fill.buf, fill.size, ptsperbuf);
+ /* Fill the buffer with the user's fill value */
+ if(fill.buf)
+ H5V_array_fill(buf, fill.buf, fill.size, ptsperbuf);
+ else /* Fill the buffer with the default fill value */
+ HDmemset(buf,0,bufsize);
/* Start at the beginning of the dataset */
addr = 0;
@@ -3341,13 +3321,38 @@ H5D_init_storage(H5D_t *dset, const H5S_t *space)
while (npoints>0) {
size = MIN(ptsperbuf, npoints) * fill.size;
if (H5F_seq_write(dset->ent.file, H5P_DATASET_XFER_DEFAULT,
- &(dset->layout), plist, space, fill.size, size, addr, buf)<0)
+ &(dset->layout), plist, space, fill.size, size, addr, buf)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to write fill value to dataset");
npoints -= MIN(ptsperbuf, npoints);
addr += size;
} /* end while */
- } /* end if */
- }
+ break;
+
+ case H5D_CHUNKED:
+ /*
+ * If the dataset is accessed via parallel I/O, allocate file space
+ * for all chunks now and initialize each chunk with the fill value.
+ */
+ if (space_time==H5D_SPACE_ALLOC_EARLY
+#ifdef H5_HAVE_PARALLEL
+ || (IS_H5FD_MPIO(dset->ent.file) || IS_H5FD_MPIPOSIX(dset->ent.file))
+#endif /*H5_HAVE_PARALLEL*/
+ ) {
+ /* We only handle simple data spaces so far */
+ int ndims;
+ hsize_t dim[H5O_LAYOUT_NDIMS];
+
+ if ((ndims=H5S_get_simple_extent_dims(space, dim, NULL))<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple data space info");
+ dim[ndims] = dset->layout.dim[ndims];
+ ndims++;
+
+ if (H5F_istore_allocate(dset->ent.file, H5P_DATASET_XFER_DEFAULT,
+ &(dset->layout), dim, plist)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset");
+ } /* end if */
+ break;
+ } /* end switch */
done:
if (buf)