summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorScot Breitenfeld <brtnfld@hdfgroup.org>2015-04-21 16:57:33 (GMT)
committerScot Breitenfeld <brtnfld@hdfgroup.org>2015-04-21 16:57:33 (GMT)
commit11b19627b9ac9a97af2970dac92e378baeb9fc1e (patch)
tree78988ac90fce00ca5270c93d47a7d5235e97efe5 /src
parent6813b25007280afa9b8bec13c19c134029acd26c (diff)
parentb32caab787236a72c25dcb1c895c32da8832deab (diff)
downloadhdf5-11b19627b9ac9a97af2970dac92e378baeb9fc1e.zip
hdf5-11b19627b9ac9a97af2970dac92e378baeb9fc1e.tar.gz
hdf5-11b19627b9ac9a97af2970dac92e378baeb9fc1e.tar.bz2
[svn-r26867] svn merge -r26837:26866 https://svn.hdfgroup.uiuc.edu/hdf5/trunk
Diffstat (limited to 'src')
-rw-r--r--src/H5AC.c2
-rw-r--r--src/H5ACprivate.h1
-rw-r--r--src/H5Dchunk.c94
-rw-r--r--src/H5Dcompact.c15
-rw-r--r--src/H5Dcontig.c13
-rw-r--r--src/H5Ddeprec.c27
-rw-r--r--src/H5Defl.c13
-rw-r--r--src/H5Dint.c75
-rw-r--r--src/H5Dio.c11
-rw-r--r--src/H5Dmpio.c3
-rw-r--r--src/H5Dpkg.h12
-rw-r--r--src/H5Oprivate.h4
-rw-r--r--src/H5S.c9
-rw-r--r--src/H5T.c146
-rw-r--r--src/H5VMprivate.h23
15 files changed, 281 insertions, 167 deletions
diff --git a/src/H5AC.c b/src/H5AC.c
index 7ed5047..e6bcbb6 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -598,7 +598,7 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
#ifdef H5_HAVE_PARALLEL
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index ccecd83..0a958b0 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -42,6 +42,7 @@
#define H5AC__TRACE_FILE_ENABLED 0
#endif /* H5_METADATA_TRACE_FILE */
+/* Global metadata tag values */
#define H5AC__INVALID_TAG (haddr_t)0
#define H5AC__IGNORE_TAG (haddr_t)1
#define H5AC__SUPERBLOCK_TAG (haddr_t)2
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index 336aaf6..6c7c5d5 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -325,9 +325,7 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, hsiz
hsize_t chunk_idx; /* Global index of chunk */
H5F_block_t old_chunk; /* Offset/length of old chunk */
H5D_chk_idx_info_t idx_info; /* Chunked index info */
- hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
- int space_ndims; /* Dataset's space rank */
- hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Dataset's dataspace dimensions */
+ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC_TAG(dxpl_id, dset->oloc.addr, FAIL)
@@ -338,12 +336,8 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, hsiz
if(H5D__alloc_storage(dset, dxpl_id, H5D_ALLOC_WRITE, FALSE, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage")
- /* Retrieve the dataset dimensions */
- if((space_ndims = H5S_get_simple_extent_dims(dset->shared->space, space_dim, NULL)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple dataspace info")
-
/* Calculate the index of this chunk */
- if(H5VM_chunk_index((unsigned)space_ndims, offset,
+ if(H5VM_chunk_index(dset->shared->ndims, offset,
layout->u.chunk.dim, layout->u.chunk.down_chunks, &chunk_idx) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
@@ -382,7 +376,7 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, hsiz
/* Make sure the address of the chunk is returned. */
if(!H5F_addr_defined(udata.chunk_block.offset))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined")
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk address isn't defined")
/* Evict the (old) entry from the cache if present, but do not flush
* it to disk */
@@ -475,23 +469,15 @@ done:
herr_t
H5D__chunk_set_info(const H5D_t *dset)
{
- hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /* Curr. size of dataset dimensions */
- int sndims; /* Rank of dataspace */
- unsigned ndims; /* Rank of dataspace */
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
/* Sanity checks */
HDassert(dset);
- /* Get the dim info for dataset */
- if((sndims = H5S_get_simple_extent_dims(dset->shared->space, curr_dims, NULL)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace dimensions")
- H5_ASSIGN_OVERFLOW(ndims, sndims, int, unsigned);
-
/* Set the base layout information */
- if(H5D__chunk_set_info_real(&dset->shared->layout.u.chunk, ndims, curr_dims) < 0)
+ if(H5D__chunk_set_info_real(&dset->shared->layout.u.chunk, dset->shared->ndims, dset->shared->curr_dims) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set layout's chunk info")
/* Call the index's "resize" callback */
@@ -519,10 +505,7 @@ static herr_t
H5D__chunk_construct(H5F_t UNUSED *f, H5D_t *dset)
{
const H5T_t *type = dset->shared->type; /* Convenience pointer to dataset's datatype */
- hsize_t max_dims[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */
- hsize_t dims[H5O_LAYOUT_NDIMS]; /* Dimension size of data in elements */
uint64_t chunk_size; /* Size of chunk in bytes */
- int ndims; /* Rank of dataspace */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -537,9 +520,7 @@ H5D__chunk_construct(H5F_t UNUSED *f, H5D_t *dset)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "no chunk information set?")
/* Set up layout information */
- if((ndims = H5S_GET_EXTENT_NDIMS(dset->shared->space)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get rank")
- if(dset->shared->layout.u.chunk.ndims != (unsigned)ndims)
+ if(dset->shared->layout.u.chunk.ndims != dset->shared->ndims)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "dimensionality of chunks doesn't match the dataspace")
/* Increment # of chunk dimensions, to account for datatype size as last element */
@@ -553,10 +534,6 @@ H5D__chunk_construct(H5F_t UNUSED *f, H5D_t *dset)
/* Set the last dimension of the chunk size to the size of the datatype */
dset->shared->layout.u.chunk.dim[dset->shared->layout.u.chunk.ndims - 1] = (uint32_t)H5T_GET_SIZE(type);
- /* Get local copy of dataset dimensions (for sanity checking) */
- if(H5S_get_simple_extent_dims(dset->shared->space, dims, max_dims) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to query maximum dimensions")
-
/* Sanity check dimensions */
for(u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++) {
/* Don't allow zero-sized chunk dimensions */
@@ -568,7 +545,7 @@ H5D__chunk_construct(H5F_t UNUSED *f, H5D_t *dset)
* the maximum dimension size. If any dimension size is zero, there
* will be no such restriction.
*/
- if(dims[u] && max_dims[u] != H5S_UNLIMITED && max_dims[u] < dset->shared->layout.u.chunk.dim[u])
+ if(dset->shared->curr_dims[u] && dset->shared->max_dims[u] != H5S_UNLIMITED && dset->shared->max_dims[u] < dset->shared->layout.u.chunk.dim[u])
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be <= maximum dimension size for fixed-sized dimensions")
} /* end for */
@@ -745,10 +722,8 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
/* Set the number of dimensions for the memory dataspace */
H5_ASSIGN_OVERFLOW(fm->m_ndims, sm_ndims, int, unsigned);
- /* Get dim number and dimensionality for each dataspace */
+ /* Get rank for file dataspace */
fm->f_ndims = f_ndims = dataset->shared->layout.u.chunk.ndims - 1;
- if(H5S_get_simple_extent_dims(file_space, fm->f_dims, NULL) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality")
/* Normalize hyperslab selections by adjusting them by the offset */
/* (It might be worthwhile to normalize both the file and memory dataspaces
@@ -2548,7 +2523,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
point_of_no_return = TRUE;
ent->chunk = NULL;
} /* end else */
- H5_ASSIGN_OVERFLOW(nbytes, udata.chunk_block.length, uint32_t, size_t);
+ H5_ASSIGN_OVERFLOW(nbytes, udata.chunk_block.length, hsize_t, size_t);
if(H5Z_pipeline(&(dset->shared->dcpl_cache.pline), 0, &(udata.filter_mask), dxpl_cache->err_detect,
dxpl_cache->filter_cb, &nbytes, &alloc, &buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed")
@@ -2557,7 +2532,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
if(nbytes > ((size_t)0xffffffff))
HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk too large for 32-bit length")
#endif /* H5_SIZEOF_SIZE_T > 4 */
- H5_ASSIGN_OVERFLOW(udata.chunk_block.length, nbytes, size_t, uint32_t);
+ H5_ASSIGN_OVERFLOW(udata.chunk_block.length, nbytes, size_t, hsize_t);
/* Indicate that the chunk must be allocated */
must_alloc = TRUE;
@@ -2590,7 +2565,8 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
/* Write the data to the file */
HDassert(H5F_addr_defined(udata.chunk_block.offset));
- if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, udata.chunk_block.length, dxpl_id, buf) < 0)
+ H5_CHECK_OVERFLOW(udata.chunk_block.length, hsize_t, size_t);
+ if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, (size_t)udata.chunk_block.length, dxpl_id, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
/* Insert the chunk record into the index */
@@ -3299,8 +3275,8 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
H5D_chunk_coll_info_t chunk_info; /* chunk address information for doing I/O */
#endif /* H5_HAVE_PARALLEL */
hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
- int space_ndims; /* Dataset's space rank */
- hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Dataset's dataspace dimensions */
+ unsigned space_ndims; /* Dataset's space rank */
+ const hsize_t *space_dim; /* Dataset's dataspace dimensions */
const uint32_t *chunk_dim = layout->u.chunk.dim; /* Convenience pointer to chunk dimensions */
unsigned op_dim; /* Current operating dimension */
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
@@ -3315,9 +3291,8 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
HDassert(TRUE == H5P_isa_class(dxpl_id, H5P_DATASET_XFER));
/* Retrieve the dataset dimensions */
- if((space_ndims = H5S_get_simple_extent_dims(dset->shared->space, space_dim, NULL)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple dataspace info")
- space_dim[space_ndims] = layout->u.chunk.dim[space_ndims];
+ space_dim = dset->shared->curr_dims;
+ space_ndims = dset->shared->ndims;
/* The last dimension in chunk_offset is always 0 */
chunk_offset[space_ndims] = (hsize_t)0;
@@ -3431,7 +3406,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
* certain dimension, max_unalloc is updated in order to avoid allocating
* those chunks again.
*/
- for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) {
+ for(op_dim = 0; op_dim < space_ndims; op_dim++) {
H5D_chunk_ud_t udata; /* User data for querying chunk info */
int i; /* Local index variable */
@@ -3440,7 +3415,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
continue;
else {
/* Reset the chunk offset indices */
- HDmemset(chunk_offset, 0, ((unsigned)space_ndims * sizeof(chunk_offset[0])));
+ HDmemset(chunk_offset, 0, (space_ndims * sizeof(chunk_offset[0])));
chunk_offset[op_dim] = min_unalloc[op_dim];
carry = FALSE;
@@ -3458,7 +3433,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
hsize_t chunk_idx;
/* Calculate the index of this chunk */
- if(H5VM_chunk_index((unsigned)space_ndims, chunk_offset,
+ if(H5VM_chunk_index(space_ndims, chunk_offset,
layout->u.chunk.dim, layout->u.chunk.down_chunks,
&chunk_idx) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
@@ -3475,7 +3450,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
unsigned u; /* Local index variable */
hbool_t outside_orig = FALSE;
- for(u = 0; u < (unsigned)space_ndims; u++) {
+ for(u = 0; u < space_ndims; u++) {
HDassert(chunk_offset[u] < space_dim[u]);
if(chunk_offset[u] >= old_dim[u])
outside_orig = TRUE;
@@ -4020,14 +3995,13 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset's layout */
const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
H5D_rdcc_ent_t *ent = NULL; /* Cache entry */
- int space_ndims; /* Dataset's space rank */
- hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Current dataspace dimensions */
+ unsigned space_ndims; /* Dataset's space rank */
+ const hsize_t *space_dim; /* Current dataspace dimensions */
unsigned op_dim; /* Current operating dimension */
hbool_t shrunk_dim[H5O_LAYOUT_NDIMS]; /* Dimensions which have shrunk */
H5D_chunk_it_ud1_t udata; /* Chunk index iterator user data */
hbool_t udata_init = FALSE; /* Whether the chunk index iterator user data has been initialized */
H5D_chunk_common_ud_t idx_udata; /* User data for index removal routine */
- H5D_chunk_ud_t chk_udata; /* User data for getting chunk info */
H5S_t *chunk_space = NULL; /* Dataspace for a chunk */
hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Chunk dimensions */
hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
@@ -4049,10 +4023,8 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
/* Go get the rank & dimensions (including the element size) */
- if((space_ndims = H5S_get_simple_extent_dims(dset->shared->space, space_dim,
- NULL)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
- space_dim[space_ndims] = layout->u.chunk.dim[space_ndims];
+ space_dim = dset->shared->curr_dims;
+ space_ndims = dset->shared->ndims;
/* The last dimension in chunk_offset is always 0 */
chunk_offset[space_ndims] = (hsize_t)0;
@@ -4071,14 +4043,14 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
/* (also copy the chunk dimensions into 'hsize_t' array for creating dataspace) */
/* (also compute the dimensions which have been shrunk) */
elmts_per_chunk = 1;
- for(u = 0; u < (unsigned)space_ndims; u++) {
+ for(u = 0; u < space_ndims; u++) {
elmts_per_chunk *= layout->u.chunk.dim[u];
chunk_dim[u] = layout->u.chunk.dim[u];
shrunk_dim[u] = space_dim[u] < old_dim[u];
} /* end for */
/* Create a dataspace for a chunk & set the extent */
- if(NULL == (chunk_space = H5S_create_simple((unsigned)space_ndims, chunk_dim, NULL)))
+ if(NULL == (chunk_space = H5S_create_simple(space_ndims, chunk_dim, NULL)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace")
/* Reset hyperslab start array */
@@ -4162,7 +4134,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
if(has_fill)
for(ent = rdcc->head; ent; ent = ent->next)
/* Check for chunk offset outside of new dimensions */
- for(u = 0; u < (unsigned)space_ndims; u++)
+ for(u = 0; u < space_ndims; u++)
if((hsize_t)ent->offset[u] >= space_dim[u]) {
/* Mark the entry as "deleted" */
ent->deleted = TRUE;
@@ -4178,12 +4150,12 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
HDassert((hsize_t) max_mod_chunk_off[op_dim] >= min_mod_chunk_off[op_dim]);
/* Reset the chunk offset indices */
- HDmemset(chunk_offset, 0, ((unsigned)space_ndims * sizeof(chunk_offset[0])));
+ HDmemset(chunk_offset, 0, (space_ndims * sizeof(chunk_offset[0])));
chunk_offset[op_dim] = min_mod_chunk_off[op_dim];
/* Initialize "dims_outside_fill" array */
ndims_outside_fill = 0;
- for(u = 0; u < (unsigned)space_ndims; u++)
+ for(u = 0; u < space_ndims; u++)
if((hssize_t)chunk_offset[u] > max_fill_chunk_off[u]) {
dims_outside_fill[u] = TRUE;
ndims_outside_fill++;
@@ -4198,7 +4170,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
int i; /* Local index variable */
/* Calculate the index of this chunk */
- if(H5VM_chunk_index((unsigned)space_ndims, chunk_offset,
+ if(H5VM_chunk_index(space_ndims, chunk_offset,
layout->u.chunk.dim, layout->u.chunk.down_chunks,
&(chk_io_info.store->chunk.index)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
@@ -4212,12 +4184,14 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write fill value")
} /* end if */
else {
+ H5D_chunk_ud_t chk_udata; /* User data for getting chunk info */
+
#ifndef NDEBUG
/* Make sure this chunk is really outside the new dimensions */
{
hbool_t outside_dim = FALSE;
- for(u = 0; u < (unsigned)space_ndims; u++)
+ for(u = 0; u < space_ndims; u++)
if(chunk_offset[u] >= space_dim[u]) {
outside_dim = TRUE;
break;
@@ -4503,7 +4477,7 @@ H5D__chunk_update_cache(H5D_t *dset, hid_t dxpl_id)
HDassert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
/* Get the rank */
- rank = dset->shared->layout.u.chunk.ndims-1;
+ rank = dset->shared->layout.u.chunk.ndims - 1;
HDassert(rank > 0);
/* 1-D dataset's chunks can't have their index change */
diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c
index 789beab..1d8b97c 100644
--- a/src/H5Dcompact.c
+++ b/src/H5Dcompact.c
@@ -174,11 +174,8 @@ H5D__compact_construct(H5F_t *f, H5D_t *dset)
hssize_t stmp_size; /* Temporary holder for raw data size */
hsize_t tmp_size; /* Temporary holder for raw data size */
hsize_t max_comp_data_size; /* Max. allowed size of compact data */
- hsize_t dim[H5O_LAYOUT_NDIMS]; /* Current size of data in elements */
- hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */
- int ndims; /* Rank of dataspace */
- int i; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -187,11 +184,9 @@ H5D__compact_construct(H5F_t *f, H5D_t *dset)
HDassert(dset);
/* Check for invalid dataset dimensions */
- if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dim, max_dim)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace dimensions")
- for(i = 0; i < ndims; i++)
- if(max_dim[i] > dim[i])
- HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "extendible compact dataset")
+ for(u = 0; u < dset->shared->ndims; u++)
+ if(dset->shared->max_dims[u] > dset->shared->curr_dims[u])
+ HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "extendible compact dataset not allowed")
/*
* Compact dataset is stored in dataset object header message of
diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c
index dc09768..e913a3f 100644
--- a/src/H5Dcontig.c
+++ b/src/H5Dcontig.c
@@ -396,10 +396,7 @@ H5D__contig_construct(H5F_t *f, H5D_t *dset)
size_t dt_size; /* Size of datatype */
hsize_t tmp_size; /* Temporary holder for raw data size */
size_t tmp_sieve_buf_size; /* Temporary holder for sieve buffer size */
- hsize_t dim[H5O_LAYOUT_NDIMS]; /* Current size of data in elements */
- hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */
- int ndims; /* Rank of dataspace */
- int i; /* Local index variable */
+ unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -415,11 +412,9 @@ H5D__contig_construct(H5F_t *f, H5D_t *dset)
*/
/* Check for invalid dataset dimensions */
- if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dim, max_dim)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize contiguous storage")
- for(i = 0; i < ndims; i++)
- if(max_dim[i] > dim[i])
- HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "extendible contiguous non-external dataset")
+ for(u = 0; u < dset->shared->ndims; u++)
+ if(dset->shared->max_dims[u] > dset->shared->curr_dims[u])
+ HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "extendible contiguous non-external dataset not allowed")
/* Retrieve the number of elements in the dataspace */
if((snelmts = H5S_GET_EXTENT_NPOINTS(dset->shared->space)) < 0)
diff --git a/src/H5Ddeprec.c b/src/H5Ddeprec.c
index 0b2fee6..b3dae7b 100644
--- a/src/H5Ddeprec.c
+++ b/src/H5Ddeprec.c
@@ -341,9 +341,7 @@ static herr_t
H5D__extend(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id)
{
htri_t changed; /* Flag to indicate that the dataspace was successfully extended */
- H5S_t *space; /* Dataset's dataspace */
- int rank; /* Dataspace # of dimensions */
- hsize_t curr_dims[H5O_LAYOUT_NDIMS];/* Current dimension sizes */
+ hsize_t old_dims[H5S_MAX_RANK]; /* Current (i.e. old, if changed) dimension sizes */
H5O_fill_t *fill; /* Dataset's fill value */
herr_t ret_value = SUCCEED; /* Return value */
@@ -364,20 +362,30 @@ H5D__extend(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id)
*/
/* Retrieve the current dimensions */
- space = dataset->shared->space;
- if((rank = H5S_get_simple_extent_dims(space, curr_dims, NULL)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
+ HDcompile_assert(sizeof(old_dims) == sizeof(dataset->shared->curr_dims));
+ HDmemcpy(old_dims, dataset->shared->curr_dims, H5S_MAX_RANK * sizeof(old_dims[0]));
/* Increase the size of the data space */
- if((changed = H5S_extend(space, size)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to increase size of data space")
+ if((changed = H5S_extend(dataset->shared->space, size)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to increase size of dataspace")
/* Updated the dataset's info if the dataspace was successfully extended */
if(changed) {
+ /* Get the extended dimension sizes */
+ /* (Need to retrieve this here, since the 'size' dimensions could
+ * extend one dimension but be smaller in a different dimension,
+ * and the dataspace's extent is the larger of the current and
+ * 'size' dimension values. - QAK)
+ */
+ if(H5S_get_simple_extent_dims(dataset->shared->space, dataset->shared->curr_dims, NULL) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
+
/* Update the index values for the cached chunks for this dataset */
if(H5D_CHUNKED == dataset->shared->layout.type) {
+ /* Update general information for chunks */
if(H5D__chunk_set_info(dataset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to update # of chunks")
+ /* Update the chunk cache indices */
if(H5D__chunk_update_cache(dataset, dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices")
} /* end if */
@@ -385,8 +393,7 @@ H5D__extend(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id)
/* Allocate space for the new parts of the dataset, if appropriate */
fill = &dataset->shared->dcpl_cache.fill;
if(fill->alloc_time == H5D_ALLOC_TIME_EARLY)
- if(H5D__alloc_storage(dataset, dxpl_id, H5D_ALLOC_EXTEND, FALSE,
- curr_dims) < 0)
+ if(H5D__alloc_storage(dataset, dxpl_id, H5D_ALLOC_EXTEND, FALSE, old_dims) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize dataset with fill value")
/* Mark the dataspace as dirty, for later writing to the file */
diff --git a/src/H5Defl.c b/src/H5Defl.c
index 38c8ccd..355492f 100644
--- a/src/H5Defl.c
+++ b/src/H5Defl.c
@@ -126,14 +126,11 @@ static herr_t
H5D__efl_construct(H5F_t *f, H5D_t *dset)
{
size_t dt_size; /* Size of datatype */
- hsize_t dim[H5O_LAYOUT_NDIMS]; /* Current size of data in elements */
- hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */
hssize_t stmp_size; /* Temporary holder for raw data size */
hsize_t tmp_size; /* Temporary holder for raw data size */
hsize_t max_points; /* Maximum elements */
hsize_t max_storage; /* Maximum storage size */
- int ndims; /* Rank of dataspace */
- int i; /* Local index variable */
+ unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -149,11 +146,9 @@ H5D__efl_construct(H5F_t *f, H5D_t *dset)
*/
/* Check for invalid dataset dimensions */
- if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dim, max_dim)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize contiguous storage")
- for(i = 1; i < ndims; i++)
- if(max_dim[i] > dim[i])
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "only the first dimension can be extendible")
+ for(u = 1; u < dset->shared->ndims; u++)
+ if(dset->shared->max_dims[u] > dset->shared->curr_dims[u])
+ HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "only the first dimension can be extendible")
/* Retrieve the size of the dataset's datatype */
if(0 == (dt_size = H5T_get_size(dset->shared->type)))
diff --git a/src/H5Dint.c b/src/H5Dint.c
index 8e1fcec..c626475 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -62,6 +62,7 @@ static H5D_shared_t *H5D__new(hid_t dcpl_id, hbool_t creating,
hbool_t vl_type);
static herr_t H5D__init_type(H5F_t *file, const H5D_t *dset, hid_t type_id,
const H5T_t *type);
+static herr_t H5D__cache_dataspace_info(const H5D_t *dset);
static herr_t H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space);
static herr_t H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset,
hid_t dapl_id);
@@ -665,6 +666,41 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5D__cache_dataspace_info
+ *
+ * Purpose: Cache dataspace info for a dataset
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * Wednesday, November 19, 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__cache_dataspace_info(const H5D_t *dset)
+{
+ int sndims; /* Signed number of dimensions of dataspace rank */
+ unsigned u; /* Local index value */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checking */
+ HDassert(dset);
+
+ /* Cache info for dataset's dataspace */
+ if((sndims = H5S_get_simple_extent_dims(dset->shared->space, dset->shared->curr_dims, dset->shared->max_dims)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't cache dataspace dimensions")
+ dset->shared->ndims = (unsigned)sndims;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__cache_dataspace_info() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D__init_space
*
* Purpose: Copy a dataspace for a dataset's use, performing all the
@@ -698,6 +734,10 @@ H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space)
if(NULL == (dset->shared->space = H5S_copy(space, FALSE, TRUE)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't copy dataspace")
+ /* Cache the dataset's dataspace info */
+ if(H5D__cache_dataspace_info(dset) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't cache dataspace info")
+
/* Set the latest format, if requested */
if(use_latest_format)
if(H5S_set_latest_version(dset->shared->space) < 0)
@@ -1251,6 +1291,10 @@ H5D__open_oid(H5D_t *dataset, hid_t dapl_id, hid_t dxpl_id)
if(NULL == (dataset->shared->space = H5S_read(&(dataset->oloc), dxpl_id)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to load dataspace info from dataset header")
+ /* Cache the dataset's dataspace info */
+ if(H5D__cache_dataspace_info(dataset) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't cache dataspace info")
+
/* Get a datatype ID for the dataset's datatype */
if((dataset->shared->type_id = H5I_register(H5I_DATATYPE, dataset->shared->type, FALSE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "unable to register type")
@@ -2161,9 +2205,7 @@ done:
herr_t
H5D__set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
{
- H5S_t *space; /* Dataset's dataspace */
- int rank; /* Dataspace # of dimensions */
- hsize_t curr_dims[H5O_LAYOUT_NDIMS];/* Current dimension sizes */
+ hsize_t curr_dims[H5S_MAX_RANK]; /* Current dimension sizes */
htri_t changed; /* Whether the dataspace changed size */
herr_t ret_value = SUCCEED; /* Return value */
@@ -2187,29 +2229,30 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
if(H5D__check_filters(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't apply filters")
- /* Get the data space */
- space = dset->shared->space;
-
- /* Check if we are shrinking or expanding any of the dimensions */
- if((rank = H5S_get_simple_extent_dims(space, curr_dims, NULL)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
+ /* Keep the current dataspace dimensions for later */
+ HDcompile_assert(sizeof(curr_dims) == sizeof(dset->shared->curr_dims));
+ HDmemcpy(curr_dims, dset->shared->curr_dims, H5S_MAX_RANK * sizeof(curr_dims[0]));
- /* Modify the size of the data space */
- if((changed = H5S_set_extent(space, size)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to modify size of data space")
+ /* Modify the size of the dataspace */
+ if((changed = H5S_set_extent(dset->shared->space, size)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to modify size of dataspace")
/* Don't bother updating things, unless they've changed */
if(changed) {
- hbool_t shrink = FALSE; /* Flag to indicate a dimension has shrank */
- hbool_t expand = FALSE; /* Flag to indicate a dimension has grown */
- unsigned u; /* Local index variable */
+ hbool_t shrink = FALSE; /* Flag to indicate a dimension has shrank */
+ hbool_t expand = FALSE; /* Flag to indicate a dimension has grown */
+ unsigned u; /* Local index variable */
/* Determine if we are shrinking and/or expanding any dimensions */
- for(u = 0; u < (unsigned)rank; u++) {
+ for(u = 0; u < dset->shared->ndims; u++) {
+ /* Check for various status changes */
if(size[u] < curr_dims[u])
shrink = TRUE;
if(size[u] > curr_dims[u])
expand = TRUE;
+
+ /* Update the cached copy of the dataset's dimensions */
+ dset->shared->curr_dims[u] = size[u];
} /* end for */
/*-------------------------------------------------------------------------
diff --git a/src/H5Dio.c b/src/H5Dio.c
index 1c77d93..44080dc 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -302,8 +302,6 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id,
uint32_t direct_filters;
hsize_t *direct_offset;
uint32_t direct_datasize;
- int ndims = 0;
- hsize_t dims[H5O_LAYOUT_NDIMS];
hsize_t internal_offset[H5O_LAYOUT_NDIMS];
unsigned u; /* Local index variable */
@@ -324,12 +322,9 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id,
/* The library's chunking code requires the offset terminates with a zero. So transfer the
* offset array to an internal offset array */
- if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dims, NULL)) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve dataspace extent dims")
-
- for(u = 0; u < (unsigned)ndims; u++) {
+ for(u = 0; u < dset->shared->ndims; u++) {
/* Make sure the offset doesn't exceed the dataset's dimensions */
- if(direct_offset[u] > dims[u])
+ if(direct_offset[u] > dset->shared->curr_dims[u])
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset")
/* Make sure the offset fall right on a chunk's boundary */
@@ -340,7 +335,7 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id,
} /* end for */
/* Terminate the offset with a zero */
- internal_offset[ndims] = 0;
+ internal_offset[dset->shared->ndims] = 0;
/* write raw data */
if(H5D__chunk_direct_write(dset, dxpl_id, direct_filters, internal_offset, direct_datasize, buf) < 0)
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index bd1531d..db487cd 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -860,8 +860,7 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ
mspace = chunk_info->mspace;
/* Look up address of chunk */
- if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->coords,
- chunk_info->index, &udata) < 0)
+ if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->coords, chunk_info->index, &udata) < 0)
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk address")
ctg_store.contig.dset_addr = udata.chunk_block.offset;
} /* end else */
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index a3a3985..769fec1 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -83,7 +83,7 @@ typedef struct H5D_type_info_t {
/* Computed/derived values */
size_t src_type_size; /* Size of source type */
- size_t dst_type_size; /* Size of destination type*/
+ size_t dst_type_size; /* Size of destination type */
size_t max_type_size; /* Size of largest source/destination type */
hbool_t is_conv_noop; /* Whether the type conversion is a NOOP */
hbool_t is_xform_noop; /* Whether the data transform is a NOOP */
@@ -325,9 +325,9 @@ typedef struct H5D_chunk_info_t {
uint32_t chunk_points; /* Number of elements selected in chunk */
hsize_t coords[H5O_LAYOUT_NDIMS]; /* Coordinates of chunk in file dataset's dataspace */
H5S_t *fspace; /* Dataspace describing chunk & selection in it */
- unsigned fspace_shared; /* Indicate that the file space for a chunk is shared and shouldn't be freed */
+ hbool_t fspace_shared; /* Indicate that the file space for a chunk is shared and shouldn't be freed */
H5S_t *mspace; /* Dataspace describing selection in memory corresponding to this chunk */
- unsigned mspace_shared; /* Indicate that the memory space for a chunk is shared and shouldn't be freed */
+ hbool_t mspace_shared; /* Indicate that the memory space for a chunk is shared and shouldn't be freed */
} H5D_chunk_info_t;
/* Main structure holding the mapping between file chunks and memory */
@@ -337,7 +337,6 @@ typedef struct H5D_chunk_map_t {
const H5S_t *file_space; /* Pointer to the file dataspace */
unsigned f_ndims; /* Number of dimensions for file dataspace */
- hsize_t f_dims[H5O_LAYOUT_NDIMS]; /* File dataspace dimensions */
const H5S_t *mem_space; /* Pointer to the memory dataspace */
H5S_t *mchunk_tmpl; /* Dataspace template for new memory chunks */
@@ -419,6 +418,11 @@ typedef struct H5D_shared_t {
H5O_layout_t layout; /* Data layout */
hbool_t checked_filters;/* TRUE if dataset passes can_apply check */
+ /* Cached dataspace info */
+ unsigned ndims; /* The dataset's dataspace rank */
+ hsize_t curr_dims[H5S_MAX_RANK]; /* The curr. size of dataset dimensions */
+ hsize_t max_dims[H5S_MAX_RANK]; /* The max. size of dataset dimensions */
+
/* Buffered/cached information for types of raw data storage*/
struct {
H5D_rdcdc_t contig; /* Information about contiguous data */
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index e3a2d33..3707367 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -424,8 +424,8 @@ typedef struct H5O_layout_chunk_t {
uint32_t dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in elements */
uint32_t size; /* Size of chunk in bytes */
hsize_t nchunks; /* Number of chunks in dataset */
- hsize_t chunks[H5O_LAYOUT_NDIMS]; /* # of chunks in dataset dimensions */
- hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */
+ hsize_t chunks[H5O_LAYOUT_NDIMS]; /* # of chunks in each dataset dimension */
+ hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */
} H5O_layout_chunk_t;
typedef struct H5O_layout_t {
diff --git a/src/H5S.c b/src/H5S.c
index 4a1783e..dd2bb0a 100644
--- a/src/H5S.c
+++ b/src/H5S.c
@@ -2144,14 +2144,13 @@ H5S_extend(H5S_t *space, const hsize_t *size)
HDassert(size);
/* Check through all the dimensions to see if modifying the dataspace is allowed */
- for(u = 0; u < space->extent.rank; u++) {
- if(space->extent.size[u]<size[u]) {
- if(space->extent.max && H5S_UNLIMITED!=space->extent.max[u] &&
- space->extent.max[u]<size[u])
+ for(u = 0; u < space->extent.rank; u++)
+ if(space->extent.size[u] < size[u]) {
+ if(space->extent.max && H5S_UNLIMITED != space->extent.max[u] &&
+ space->extent.max[u] < size[u])
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dimension cannot be increased")
ret_value++;
} /* end if */
- } /* end for */
/* Update */
if(ret_value) {
diff --git a/src/H5T.c b/src/H5T.c
index 9320d28..aaf8bc1 100644
--- a/src/H5T.c
+++ b/src/H5T.c
@@ -1,3 +1,4 @@
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright by The HDF Group. *
* Copyright by the Board of Trustees of the University of Illinois. *
@@ -67,10 +68,6 @@
*
*/
-/* Define the code template for types which need no extra initialization for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_NONE_CORE { \
-}
-
/* Define the code template for bitfields for the "GUTS" in the H5T_INIT_TYPE macro */
#define H5T_INIT_TYPE_BITFIELD_CORE { \
dt->shared->type = H5T_BITFIELD; \
@@ -1986,7 +1983,7 @@ H5T_detect_class(const H5T_t *dt, H5T_class_t cls, hbool_t from_api)
case H5T_VLEN:
case H5T_ENUM:
HGOTO_DONE(H5T_detect_class(dt->shared->parent, cls, from_api));
-
+ break;
case H5T_NO_CLASS:
case H5T_INTEGER:
case H5T_FLOAT:
@@ -3240,12 +3237,12 @@ H5T_copy(H5T_t *old_dt, H5T_copy_t method)
HDassert(tmp != NULL);
/* Apply the accumulated size change to the offset of the field */
- new_dt->shared->u.compnd.memb[i].offset += accum_change;
+ new_dt->shared->u.compnd.memb[i].offset += (size_t) accum_change;
if(old_dt->shared->u.compnd.sorted != H5T_SORT_VALUE) {
for(old_match = -1, j = 0; j < old_dt->shared->u.compnd.nmembs; j++) {
if(!HDstrcmp(new_dt->shared->u.compnd.memb[i].name, old_dt->shared->u.compnd.memb[j].name)) {
- old_match = j;
+ old_match = (int) j;
break;
} /* end if */
} /* end for */
@@ -3255,19 +3252,20 @@ H5T_copy(H5T_t *old_dt, H5T_copy_t method)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, NULL, "fields in datatype corrupted");
} /* end if */
else
- old_match = i;
+ old_match = (int) i;
/* If the field changed size, add that change to the accumulated size change */
if(new_dt->shared->u.compnd.memb[i].type->shared->size != old_dt->shared->u.compnd.memb[old_match].type->shared->size) {
/* Adjust the size of the member */
new_dt->shared->u.compnd.memb[i].size = (old_dt->shared->u.compnd.memb[old_match].size*tmp->shared->size)/old_dt->shared->u.compnd.memb[old_match].type->shared->size;
- accum_change += (new_dt->shared->u.compnd.memb[i].type->shared->size - old_dt->shared->u.compnd.memb[old_match].type->shared->size);
+ accum_change += (int) (new_dt->shared->u.compnd.memb[i].type->shared->size - old_dt->shared->u.compnd.memb[old_match].type->shared->size);
+ HDassert(accum_change >= 0);
} /* end if */
} /* end for */
/* Apply the accumulated size change to the size of the compound struct */
- new_dt->shared->size += accum_change;
+ new_dt->shared->size += (size_t) accum_change;
}
break;
@@ -3313,6 +3311,13 @@ H5T_copy(H5T_t *old_dt, H5T_copy_t method)
new_dt->shared->size=new_dt->shared->u.array.nelem*new_dt->shared->parent->shared->size;
break;
+ case H5T_NO_CLASS:
+ case H5T_INTEGER:
+ case H5T_FLOAT:
+ case H5T_TIME:
+ case H5T_STRING:
+ case H5T_BITFIELD:
+ case H5T_NCLASSES:
default:
break;
} /* end switch */
@@ -3530,6 +3535,16 @@ H5T__free(H5T_t *dt)
H5MM_xfree(dt->shared->u.opaque.tag);
break;
+ case H5T_NO_CLASS:
+ case H5T_INTEGER:
+ case H5T_FLOAT:
+ case H5T_TIME:
+ case H5T_STRING:
+ case H5T_BITFIELD:
+ case H5T_REFERENCE:
+ case H5T_VLEN:
+ case H5T_ARRAY:
+ case H5T_NCLASSES:
default:
break;
} /* end switch */
@@ -3796,11 +3811,14 @@ H5T_set_size(H5T_t *dt, size_t size)
case H5T_ARRAY:
case H5T_REFERENCE:
HDassert("can't happen" && 0);
+ break;
case H5T_NO_CLASS:
case H5T_NCLASSES:
HDassert("invalid type" && 0);
+ break;
default:
HDassert("not implemented yet" && 0);
+ break;
}
/* Commit (if we didn't convert this type to a VL string) */
@@ -3875,7 +3893,6 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
unsigned *idx1 = NULL, *idx2 = NULL;
size_t base_size;
hbool_t swapped;
- int i, j;
unsigned u;
int tmp;
int ret_value = 0;
@@ -3930,24 +3947,32 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
for(u = 0; u < dt1->shared->u.compnd.nmembs; u++)
idx1[u] = idx2[u] = u;
if(dt1->shared->u.enumer.nmembs > 1) {
- for(i = dt1->shared->u.compnd.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i)
+ int i;
+
+ for(i = (int) dt1->shared->u.compnd.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i) {
+ int j;
+
for(j = 0, swapped=FALSE; j < i; j++)
if(HDstrcmp(dt1->shared->u.compnd.memb[idx1[j]].name,
dt1->shared->u.compnd.memb[idx1[j + 1]].name) > 0) {
- tmp = idx1[j];
+ unsigned tmp_idx = idx1[j];
idx1[j] = idx1[j + 1];
- idx1[j + 1] = tmp;
+ idx1[j + 1] = tmp_idx;
swapped = TRUE;
}
- for(i = dt2->shared->u.compnd.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i)
+ }
+ for(i = (int) dt2->shared->u.compnd.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i) {
+ int j;
+
for(j = 0, swapped = FALSE; j<i; j++)
if(HDstrcmp(dt2->shared->u.compnd.memb[idx2[j]].name,
dt2->shared->u.compnd.memb[idx2[j + 1]].name) > 0) {
- tmp = idx2[j];
+ unsigned tmp_idx = idx2[j];
idx2[j] = idx2[j + 1];
- idx2[j + 1] = tmp;
+ idx2[j + 1] = tmp_idx;
swapped = TRUE;
}
+ }
} /* end if */
#ifdef H5T_DEBUG
@@ -4007,28 +4032,39 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "memory allocation failed");
for (u=0; u<dt1->shared->u.enumer.nmembs; u++)
idx1[u] = u;
- if(dt1->shared->u.enumer.nmembs > 1)
- for (i=dt1->shared->u.enumer.nmembs-1, swapped=TRUE; swapped && i>=0; --i)
- for (j=0, swapped=FALSE; j<i; j++)
+ if(dt1->shared->u.enumer.nmembs > 1) {
+ int i;
+ for (i = (int) dt1->shared->u.enumer.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i) {
+ int j;
+
+ for (j = 0, swapped = FALSE; j < i; j++)
if (HDstrcmp(dt1->shared->u.enumer.name[idx1[j]],
dt1->shared->u.enumer.name[idx1[j+1]]) > 0) {
- tmp = idx1[j];
+ unsigned tmp_idx = idx1[j];
idx1[j] = idx1[j+1];
- idx1[j+1] = tmp;
+ idx1[j+1] = tmp_idx;
swapped = TRUE;
}
+ }
+ }
for (u=0; u<dt2->shared->u.enumer.nmembs; u++)
idx2[u] = u;
- if(dt2->shared->u.enumer.nmembs > 1)
- for (i=dt2->shared->u.enumer.nmembs-1, swapped=TRUE; swapped && i>=0; --i)
- for (j=0, swapped=FALSE; j<i; j++)
+ if(dt2->shared->u.enumer.nmembs > 1) {
+ int i;
+
+ for (i = (int) dt2->shared->u.enumer.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i) {
+ int j;
+
+ for (j = 0, swapped = FALSE; j < i; j++)
if (HDstrcmp(dt2->shared->u.enumer.name[idx2[j]],
dt2->shared->u.enumer.name[idx2[j+1]]) > 0) {
- tmp = idx2[j];
+ unsigned tmp_idx = idx2[j];
idx2[j] = idx2[j+1];
- idx2[j+1] = tmp;
+ idx2[j+1] = tmp_idx;
swapped = TRUE;
}
+ }
+ }
#ifdef H5T_DEBUG
/* I don't quite trust the code above yet :-) --RPM */
@@ -4148,6 +4184,14 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
HGOTO_DONE(1);
break;
+ case H5T_NO_CLASS:
+ case H5T_INTEGER:
+ case H5T_FLOAT:
+ case H5T_TIME:
+ case H5T_STRING:
+ case H5T_BITFIELD:
+ case H5T_REFERENCE:
+ case H5T_NCLASSES:
default:
/*
* Atomic datatypes...
@@ -4255,13 +4299,23 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
case H5R_BADTYPE:
case H5R_MAXTYPE:
HDassert("invalid type" && 0);
+ break;
default:
HDassert("not implemented yet" && 0);
+ break;
}
break;
+ case H5T_NO_CLASS:
+ case H5T_OPAQUE:
+ case H5T_COMPOUND:
+ case H5T_ENUM:
+ case H5T_VLEN:
+ case H5T_ARRAY:
+ case H5T_NCLASSES:
default:
HDassert("not implemented yet" && 0);
+ break;
}
break;
} /* end switch */
@@ -4535,7 +4589,7 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name,
} /* end if */
if(cmp > 0)
md++;
- HDmemmove(H5T_g.path + md + 1, H5T_g.path + md, (H5T_g.npaths - md) * sizeof(H5T_path_t*));
+ HDmemmove(H5T_g.path + md + 1, H5T_g.path + md, (size_t) (H5T_g.npaths - md) * sizeof(H5T_path_t*));
H5T_g.npaths++;
H5T_g.path[md] = path;
table = path;
@@ -5000,6 +5054,17 @@ H5T_is_sensible(const H5T_t *dt)
ret_value=FALSE;
break;
+ case H5T_NO_CLASS:
+ case H5T_INTEGER:
+ case H5T_FLOAT:
+ case H5T_TIME:
+ case H5T_STRING:
+ case H5T_BITFIELD:
+ case H5T_OPAQUE:
+ case H5T_REFERENCE:
+ case H5T_VLEN:
+ case H5T_ARRAY:
+ case H5T_NCLASSES:
default:
/* Assume all other datatype are sensible to store on disk */
ret_value=TRUE;
@@ -5082,7 +5147,7 @@ H5T_set_loc(H5T_t *dt, H5F_t *f, H5T_loc_t loc)
H5T_t *memb_type; /* Member's datatype pointer */
/* Apply the accumulated size change to the offset of the field */
- dt->shared->u.compnd.memb[i].offset += accum_change;
+ dt->shared->u.compnd.memb[i].offset += (size_t) accum_change;
/* Set the member type pointer (for convenience) */
memb_type=dt->shared->u.compnd.memb[i].type;
@@ -5105,13 +5170,14 @@ H5T_set_loc(H5T_t *dt, H5F_t *f, H5T_loc_t loc)
dt->shared->u.compnd.memb[i].size = (dt->shared->u.compnd.memb[i].size*memb_type->shared->size)/old_size;
/* Add that change to the accumulated size change */
- accum_change += (memb_type->shared->size - (int)old_size);
+ accum_change += (int) (memb_type->shared->size - old_size);
+ HDassert(accum_change >= 0);
} /* end if */
} /* end if */
} /* end for */
/* Apply the accumulated size change to the datatype */
- dt->shared->size = (size_t)(dt->shared->size + accum_change);
+ dt->shared->size = dt->shared->size + (size_t) accum_change;
break;
case H5T_VLEN: /* Recurse on the VL information if it's VL, compound or array, then free VL sequence */
@@ -5145,6 +5211,15 @@ H5T_set_loc(H5T_t *dt, H5F_t *f, H5T_loc_t loc)
} /* end if */
break;
+ case H5T_NO_CLASS:
+ case H5T_INTEGER:
+ case H5T_FLOAT:
+ case H5T_TIME:
+ case H5T_STRING:
+ case H5T_BITFIELD:
+ case H5T_OPAQUE:
+ case H5T_ENUM:
+ case H5T_NCLASSES:
default:
break;
} /* end switch */
@@ -5235,6 +5310,15 @@ H5T_upgrade_version_cb(H5T_t *dt, void *op_value)
dt->shared->version = dt->shared->parent->shared->version;
break;
+ case H5T_NO_CLASS:
+ case H5T_INTEGER:
+ case H5T_FLOAT:
+ case H5T_TIME:
+ case H5T_STRING:
+ case H5T_BITFIELD:
+ case H5T_OPAQUE:
+ case H5T_REFERENCE:
+ case H5T_NCLASSES:
default:
break;
} /* end switch */
diff --git a/src/H5VMprivate.h b/src/H5VMprivate.h
index 20821b2..dbe39ca 100644
--- a/src/H5VMprivate.h
+++ b/src/H5VMprivate.h
@@ -410,6 +410,29 @@ H5VM_log2_of2(uint32_t n)
/*-------------------------------------------------------------------------
+ * Function: H5VM_power2up
+ *
+ * Purpose: Round up a number to the next power of 2
+ *
+ * Return: Return the number which is a power of 2
+ *
+ * Programmer: Vailin Choi; Nov 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static H5_inline hsize_t UNUSED
+H5VM_power2up(hsize_t n)
+{
+ hsize_t ret_value = 1; /* Return value */
+
+ while(ret_value < n)
+ ret_value <<= 1;
+
+ return(ret_value);
+} /* H5VM_power2up */
+
+
+/*-------------------------------------------------------------------------
* Function: H5VM_limit_enc_size
*
* Purpose: Determine the # of bytes needed to encode values within a