summaryrefslogtreecommitdiffstats
path: root/src/H5Dcontig.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/H5Dcontig.c')
-rw-r--r--src/H5Dcontig.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c
index a2b1f59..66427a3 100644
--- a/src/H5Dcontig.c
+++ b/src/H5Dcontig.c
@@ -132,7 +132,7 @@ H5D_contig_alloc(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout /*out */ )
HDassert(layout);
/* Allocate space for the contiguous data */
- if(HADDR_UNDEF == (layout->store.u.contig.addr = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, layout->u.contig.size)))
+ if(HADDR_UNDEF == (layout->store.u.contig.addr = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, layout->store.u.contig.size)))
HGOTO_ERROR(H5E_IO, H5E_NOSPACE, FAIL, "unable to reserve file space")
done:
@@ -180,7 +180,7 @@ H5D_contig_fill(H5D_t *dset, hid_t dxpl_id)
HDassert(TRUE == H5P_isa_class(dxpl_id, H5P_DATASET_XFER));
HDassert(dset && H5D_CONTIGUOUS == dset->shared->layout.type);
HDassert(H5F_addr_defined(dset->shared->layout.store.u.contig.addr));
- HDassert(dset->shared->layout.u.contig.size > 0);
+ HDassert(dset->shared->layout.store.u.contig.size > 0);
HDassert(dset->shared->space);
HDassert(dset->shared->type);
@@ -215,7 +215,7 @@ H5D_contig_fill(H5D_t *dset, hid_t dxpl_id)
/* Initialize storage info for this dataset */
store.contig.dset_addr = dset->shared->layout.store.u.contig.addr;
- store.contig.dset_size = dset->shared->layout.u.contig.size;
+ store.contig.dset_size = dset->shared->layout.store.u.contig.size;
/* Get the number of elements in the dataset's dataspace */
snpoints = H5S_GET_EXTENT_NPOINTS(dset->shared->space);
@@ -328,7 +328,7 @@ H5D_contig_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout)
HDassert(layout);
/* Free the file space for the chunk */
- if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, layout->store.u.contig.addr, layout->u.contig.size) < 0)
+ if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, layout->store.u.contig.addr, layout->store.u.contig.size) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to free object header")
done:
@@ -423,7 +423,7 @@ H5D_contig_construct(H5F_t *f, H5D_t *dset)
HGOTO_ERROR(H5E_DATASET, H5E_OVERFLOW, FAIL, "size of dataset's storage overflowed")
/* Assign the dataset's contiguous storage size */
- dset->shared->layout.u.contig.size = tmp_size;
+ dset->shared->layout.store.u.contig.size = tmp_size;
/* Get the sieve buffer size for this dataset */
dset->shared->cache.contig.sieve_buf_size = H5F_SIEVE_BUF_SIZE(f);
@@ -482,7 +482,7 @@ H5D_contig_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t UNUSED *t
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_contig_io_init)
io_info->store->contig.dset_addr = io_info->dset->shared->layout.store.u.contig.addr;
- io_info->store->contig.dset_size = io_info->dset->shared->layout.u.contig.size;
+ io_info->store->contig.dset_size = io_info->dset->shared->layout.store.u.contig.size;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5D_contig_io_init() */
@@ -1267,7 +1267,7 @@ H5D_contig_copy(H5F_t *f_src, const H5O_layout_t *layout_src, H5F_t *f_dst,
/* Set up number of bytes to copy, and initial buffer size */
/* (actually use the destination size, which has been fixed up, if necessary) */
- total_src_nbytes = layout_dst->u.contig.size;
+ total_src_nbytes = layout_dst->store.u.contig.size;
H5_CHECK_OVERFLOW(total_src_nbytes, hsize_t, size_t);
buf_size = MIN(H5D_TEMP_BUF_SIZE, (size_t)total_src_nbytes);