summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2010-07-20 16:51:49 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2010-07-20 16:51:49 (GMT)
commitde3a870bcd4953a3654b3e9dc92edab86fe858cd (patch)
tree8bf2fc6e0814f67b1388c23136aaff9e4a7cba96 /src
parent98754fa9d12090f5e048fdb05cc5e9ce9111676f (diff)
downloadhdf5-de3a870bcd4953a3654b3e9dc92edab86fe858cd.zip
hdf5-de3a870bcd4953a3654b3e9dc92edab86fe858cd.tar.gz
hdf5-de3a870bcd4953a3654b3e9dc92edab86fe858cd.tar.bz2
[svn-r19110] Description:
Bring r19049:19109 from trunk to revise_chunks branch Tested on: FreeBSD/32 6.3 (duty) in debug mode FreeBSD/64 6.3 (liberty) w/C++ & FORTRAN, in debug mode Linux/32 2.6 (jam) w/PGI compilers, w/default API=1.8.x, w/C++ & FORTRAN, w/threadsafe, in debug mode Linux/64-amd64 2.6 (amani) w/Intel compilers, w/default API=1.6.x, w/C++ & FORTRAN, in production mode Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN, w/szip filter, w/threadsafe, in production mode Linux/PPC 2.6 (heiwa) w/C++ & FORTRAN, w/threadsafe, in debug mode Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN, in production mode Linux/64-amd64 2.6 (abe) w/parallel, w/FORTRAN, in debug mode Mac OS X/32 10.6.4 (amazon) in debug mode Mac OS X/32 10.6.4 (amazon) w/C++ & FORTRAN, w/threadsafe, in production mode Mac OS X/32 10.6.4 (amazon) w/parallel, in debug mode
Diffstat (limited to 'src')
-rw-r--r--src/H5AC.c2685
-rw-r--r--src/H5ACpkg.h66
-rw-r--r--src/H5ACprivate.h18
-rw-r--r--src/H5ACpublic.h76
-rw-r--r--src/H5C.c863
-rw-r--r--src/H5Cpkg.h41
-rw-r--r--src/H5Cprivate.h26
-rw-r--r--src/H5Dchunk.c3
-rw-r--r--src/H5Dio.c98
-rw-r--r--src/H5Dmpio.c124
-rw-r--r--src/H5FDmpio.c2
-rw-r--r--src/H5FDmpiposix.c2
-rw-r--r--src/H5HLcache.c10
-rw-r--r--src/H5Sall.c99
-rw-r--r--src/H5Shyper.c828
-rw-r--r--src/H5Smpio.c627
-rw-r--r--src/H5Snone.c254
-rw-r--r--src/H5Spkg.h6
-rw-r--r--src/H5Spoint.c181
-rw-r--r--src/H5Sprivate.h21
-rw-r--r--src/H5Sselect.c740
-rw-r--r--src/H5public.h4
-rw-r--r--src/Makefile.in4
-rw-r--r--src/libhdf5.settings.in4
24 files changed, 4800 insertions, 1982 deletions
diff --git a/src/H5AC.c b/src/H5AC.c
index 76e43dd..7d6a49b 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -125,14 +125,28 @@ static herr_t H5AC_check_if_write_permitted(const H5F_t *f,
hid_t dxpl_id,
hbool_t * write_permitted_ptr);
-#ifdef H5_HAVE_PARALLEL
-static herr_t H5AC_broadcast_clean_list(H5AC_t * cache_ptr);
-#endif /* JRM */
-
static herr_t H5AC_ext_config_2_int_config(H5AC_cache_config_t * ext_conf_ptr,
H5C_auto_size_ctl_t * int_conf_ptr);
#ifdef H5_HAVE_PARALLEL
+static herr_t H5AC_broadcast_candidate_list(H5AC_t * cache_ptr,
+ int * num_entries_ptr,
+ haddr_t ** haddr_buf_ptr_ptr);
+
+static herr_t H5AC_broadcast_clean_list(H5AC_t * cache_ptr);
+
+static herr_t H5AC_construct_candidate_list(H5AC_t * cache_ptr,
+ H5AC_aux_t * aux_ptr,
+ int sync_point_op);
+
+static herr_t H5AC_copy_candidate_list_to_buffer(H5AC_t * cache_ptr,
+ int * num_entries_ptr,
+ haddr_t ** haddr_buf_ptr_ptr,
+ size_t * MPI_Offset_buf_size_ptr,
+ MPI_Offset ** MPI_Offset_buf_ptr_ptr);
+
+static herr_t H5AC_flush_entries(H5F_t *f);
+
static herr_t H5AC_log_deleted_entry(H5AC_t * cache_ptr,
H5AC_info_t * entry_ptr,
haddr_t addr,
@@ -147,33 +161,55 @@ static herr_t H5AC_log_flushed_entry(H5C_t * cache_ptr,
unsigned flags,
int type_id);
-#if 0 /* this is useful debugging code -- JRM */
-static herr_t H5AC_log_flushed_entry_dummy(H5C_t * cache_ptr,
- haddr_t addr,
- hbool_t was_dirty,
- unsigned flags,
- int type_id);
-#endif /* JRM */
+static herr_t H5AC_log_moved_entry(const H5F_t * f,
+ haddr_t old_addr,
+ haddr_t new_addr);
static herr_t H5AC_log_inserted_entry(H5F_t * f,
H5AC_t * cache_ptr,
H5AC_info_t * entry_ptr);
+static herr_t H5AC_propagate_and_apply_candidate_list(H5F_t * f,
+ hid_t dxpl_id,
+ H5AC_t * cache_ptr);
+
static herr_t H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
hid_t dxpl_id,
- H5AC_t * cache_ptr,
- hbool_t do_barrier);
+ H5AC_t * cache_ptr);
+
+static herr_t H5AC_receive_candidate_list(H5AC_t * cache_ptr,
+ int * num_entries_ptr,
+ haddr_t ** haddr_buf_ptr_ptr);
static herr_t H5AC_receive_and_apply_clean_list(H5F_t * f,
hid_t primary_dxpl_id,
hid_t secondary_dxpl_id,
H5AC_t * cache_ptr);
-static herr_t H5AC_log_moved_entry(const H5F_t * f,
- haddr_t old_addr,
- haddr_t new_addr);
+static herr_t H5AC_tidy_cache_0_lists(H5AC_t * cache_ptr,
+ int num_candidates,
+ haddr_t * candidates_list_ptr);
+
+herr_t H5AC_rsp__dist_md_write__flush(H5F_t *f,
+ hid_t dxpl_id,
+ H5AC_t * cache_ptr);
+
+herr_t H5AC_rsp__dist_md_write__flush_to_min_clean(H5F_t *f,
+ hid_t dxpl_id,
+ H5AC_t * cache_ptr);
+
+herr_t H5AC_rsp__p0_only__flush(H5F_t *f,
+ hid_t dxpl_id,
+ H5AC_t * cache_ptr);
+
+herr_t H5AC_rsp__p0_only__flush_to_min_clean(H5F_t *f,
+ hid_t dxpl_id,
+ H5AC_t * cache_ptr);
+
+static herr_t H5AC_run_sync_point(H5F_t *f,
+ hid_t dxpl_id,
+ int sync_point_op);
-static herr_t H5AC_flush_entries(H5F_t *f);
#endif /* H5_HAVE_PARALLEL */
@@ -377,26 +413,6 @@ H5AC_term_interface(void)
FUNC_LEAVE_NOAPI(n)
} /* end H5AC_term_interface() */
-
-/*-------------------------------------------------------------------------
- * Function: H5AC_create
- *
- * Purpose: Initialize the cache just after a file is opened. The
- * SIZE_HINT is the number of cache slots desired. If you
- * pass an invalid value then H5AC_NSLOTS is used. You can
- * turn off caching by using 1 for the SIZE_HINT value.
- *
- * Return: Success: Number of slots actually used.
- *
- * Failure: Negative
- *
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jul 9 1997
- *
- *-------------------------------------------------------------------------
- */
-
static const char * H5AC_entry_type_names[H5AC_NTYPES] =
{
"B-tree nodes",
@@ -429,19 +445,34 @@ static const char * H5AC_entry_type_names[H5AC_NTYPES] =
"test entry" /* for testing only -- not used for actual files */
};
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_create
+ *
+ * Purpose: Initialize the cache just after a file is opened. The
+ * SIZE_HINT is the number of cache slots desired. If you
+ * pass an invalid value then H5AC_NSLOTS is used. You can
+ * turn off caching by using 1 for the SIZE_HINT value.
+ *
+ * Return: Success: Number of slots actually used.
+ *
+ * Failure: Negative
+ *
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Jul 9 1997
+ *
+ *-------------------------------------------------------------------------
+ */
herr_t
H5AC_create(const H5F_t *f,
H5AC_cache_config_t *config_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
- herr_t result;
#ifdef H5_HAVE_PARALLEL
char prefix[H5C__PREFIX_LEN] = "";
- MPI_Comm mpi_comm = MPI_COMM_NULL;
- int mpi_rank = -1;
- int mpi_size = -1;
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5AC_create, FAIL)
@@ -451,97 +482,78 @@ H5AC_create(const H5F_t *f,
HDcompile_assert(NELMTS(H5AC_entry_type_names) == H5AC_NTYPES);
HDcompile_assert(H5C__MAX_NUM_TYPE_IDS == H5AC_NTYPES);
- result = H5AC_validate_config(config_ptr);
-
- if ( result != SUCCEED ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Bad cache configuration");
- }
+ if(H5AC_validate_config(config_ptr) < 0)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Bad cache configuration")
#ifdef H5_HAVE_PARALLEL
- if ( IS_H5FD_MPI(f) ) {
+ if(IS_H5FD_MPI(f)) {
+ MPI_Comm mpi_comm;
+ int mpi_rank;
+ int mpi_size;
- if ( (mpi_comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL ) {
-
- HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, \
- "can't get MPI communicator")
- }
-
- if ( (mpi_rank = H5F_mpi_get_rank(f)) < 0 ) {
+ if(MPI_COMM_NULL == (mpi_comm = H5F_mpi_get_comm(f)))
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get MPI communicator")
+ if((mpi_rank = H5F_mpi_get_rank(f)) < 0)
HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi rank")
- }
-
- if ( (mpi_size = H5F_mpi_get_size(f)) < 0 ) {
+ if((mpi_size = H5F_mpi_get_size(f)) < 0)
HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi size")
- }
/* There is no point in setting up the auxilary structure if size
* is less than or equal to 1, as there will never be any processes
* to broadcast the clean lists to.
*/
- if ( mpi_size > 1 ) {
-
- if ( NULL == (aux_ptr = H5FL_CALLOC(H5AC_aux_t)) ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "Can't allocate H5AC auxilary structure.")
-
- } else {
-
- aux_ptr->magic = H5AC__H5AC_AUX_T_MAGIC;
- aux_ptr->mpi_comm = mpi_comm;
- aux_ptr->mpi_rank = mpi_rank;
- aux_ptr->mpi_size = mpi_size;
- aux_ptr->write_permitted = FALSE;
- aux_ptr->dirty_bytes_threshold =
- H5AC__DEFAULT_DIRTY_BYTES_THRESHOLD;
- aux_ptr->dirty_bytes = 0;
+ if(mpi_size > 1) {
+ if(NULL == (aux_ptr = H5FL_CALLOC(H5AC_aux_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate H5AC auxilary structure.")
+
+ aux_ptr->magic = H5AC__H5AC_AUX_T_MAGIC;
+ aux_ptr->mpi_comm = mpi_comm;
+ aux_ptr->mpi_rank = mpi_rank;
+ aux_ptr->mpi_size = mpi_size;
+ aux_ptr->write_permitted = FALSE;
+ aux_ptr->dirty_bytes_threshold = H5AC__DEFAULT_DIRTY_BYTES_THRESHOLD;
+ aux_ptr->dirty_bytes = 0;
+ aux_ptr->metadata_write_strategy = H5AC__DEFAULT_METADATA_WRITE_STRATEGY;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- aux_ptr->dirty_bytes_propagations = 0;
- aux_ptr->unprotect_dirty_bytes = 0;
- aux_ptr->unprotect_dirty_bytes_updates = 0;
- aux_ptr->insert_dirty_bytes = 0;
- aux_ptr->insert_dirty_bytes_updates = 0;
- aux_ptr->move_dirty_bytes = 0;
- aux_ptr->move_dirty_bytes_updates = 0;
+ aux_ptr->dirty_bytes_propagations = 0;
+ aux_ptr->unprotect_dirty_bytes = 0;
+ aux_ptr->unprotect_dirty_bytes_updates = 0;
+ aux_ptr->insert_dirty_bytes = 0;
+ aux_ptr->insert_dirty_bytes_updates = 0;
+ aux_ptr->move_dirty_bytes = 0;
+ aux_ptr->move_dirty_bytes_updates = 0;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
- aux_ptr->d_slist_ptr = NULL;
- aux_ptr->d_slist_len = 0;
- aux_ptr->c_slist_ptr = NULL;
- aux_ptr->c_slist_len = 0;
- aux_ptr->write_done = NULL;
-
- sprintf(prefix, "%d:", mpi_rank);
- }
-
- if ( mpi_rank == 0 ) {
-
- aux_ptr->d_slist_ptr =
- H5SL_create(H5SL_TYPE_HADDR);
-
- if ( aux_ptr->d_slist_ptr == NULL ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL,
- "can't create dirtied entry list.")
- }
-
- aux_ptr->c_slist_ptr =
- H5SL_create(H5SL_TYPE_HADDR);
-
- if ( aux_ptr->c_slist_ptr == NULL ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL,
- "can't create cleaned entry list.")
- }
- }
- }
-
- if ( aux_ptr != NULL ) {
+ aux_ptr->d_slist_ptr = NULL;
+ aux_ptr->d_slist_len = 0;
+ aux_ptr->c_slist_ptr = NULL;
+ aux_ptr->c_slist_len = 0;
+ aux_ptr->candidate_slist_ptr = NULL;
+ aux_ptr->candidate_slist_len = 0;
+ aux_ptr->write_done = NULL;
+ aux_ptr->sync_point_done = NULL;
+
+ sprintf(prefix, "%d:", mpi_rank);
+
+ if(mpi_rank == 0) {
+ if(NULL == (aux_ptr->d_slist_ptr = H5SL_create(H5SL_TYPE_HADDR)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create dirtied entry list.")
+
+ if(NULL == (aux_ptr->c_slist_ptr = H5SL_create(H5SL_TYPE_HADDR)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cleaned entry list.")
+ } /* end if */
- if ( aux_ptr->mpi_rank == 0 ) {
+ /* construct the candidate slist for all processes.
+ * when the distributed strategy is selected as all processes
+ * will use it in the case of a flush.
+ */
+ if(NULL == (aux_ptr->candidate_slist_ptr = H5SL_create(H5SL_TYPE_HADDR)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create candidate entry list.")
+ } /* end if */
+ if(aux_ptr != NULL) {
+ if(aux_ptr->mpi_rank == 0) {
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
H5AC__DEFAULT_MIN_CLEAN_SIZE,
(H5AC_NTYPES - 1),
@@ -550,25 +562,17 @@ H5AC_create(const H5F_t *f,
TRUE,
H5AC_log_flushed_entry,
(void *)aux_ptr);
-
} else {
-
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
H5AC__DEFAULT_MIN_CLEAN_SIZE,
(H5AC_NTYPES - 1),
(const char **)H5AC_entry_type_names,
+ H5AC_check_if_write_permitted,
+ TRUE,
NULL,
- FALSE,
-#if 0 /* this is useful debugging code -- keep it for a while */ /* JRM */
- H5AC_log_flushed_entry_dummy,
-#else /* JRM */
- NULL,
-#endif /* JRM */
(void *)aux_ptr);
}
-
} else {
-
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
H5AC__DEFAULT_MIN_CLEAN_SIZE,
(H5AC_NTYPES - 1),
@@ -596,61 +600,40 @@ H5AC_create(const H5F_t *f,
}
#endif /* H5_HAVE_PARALLEL */
- if ( NULL == f->shared->cache ) {
-
+ if(NULL == f->shared->cache)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
- }
#ifdef H5_HAVE_PARALLEL
- else if ( aux_ptr != NULL ) {
-
- result = H5C_set_prefix(f->shared->cache, prefix);
-
- if ( result != SUCCEED ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "H5C_set_prefix() failed")
- }
- }
+ if(aux_ptr != NULL) {
+ if(H5C_set_prefix(f->shared->cache, prefix) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "H5C_set_prefix() failed")
+ } /* end if */
#endif /* H5_HAVE_PARALLEL */
- result = H5AC_set_cache_auto_resize_config(f->shared->cache, config_ptr);
-
- if ( result != SUCCEED ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "auto resize configuration failed")
- }
+ if(H5AC_set_cache_auto_resize_config(f->shared->cache, config_ptr) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "auto resize configuration failed")
done:
-
#ifdef H5_HAVE_PARALLEL
-
/* if there is a failure, try to tidy up the auxilary structure */
-
- if ( ret_value != SUCCEED ) {
-
- if ( aux_ptr != NULL ) {
-
- if ( aux_ptr->d_slist_ptr != NULL ) {
-
+ if(ret_value < 0) {
+ if(aux_ptr != NULL) {
+ if(aux_ptr->d_slist_ptr != NULL)
H5SL_close(aux_ptr->d_slist_ptr);
- }
-
- if ( aux_ptr->c_slist_ptr != NULL ) {
+ if(aux_ptr->c_slist_ptr != NULL)
H5SL_close(aux_ptr->c_slist_ptr);
- }
+
+ if(aux_ptr->candidate_slist_ptr != NULL)
+ H5SL_close(aux_ptr->candidate_slist_ptr);
aux_ptr->magic = 0;
- H5FL_FREE(H5AC_aux_t, aux_ptr);
- aux_ptr = NULL;
- }
- }
+ aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr);
+ } /* end if */
+ } /* end if */
#endif /* H5_HAVE_PARALLEL */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_create() */
@@ -694,7 +677,7 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
#endif /* H5AC__TRACE_FILE_ENABLED */
#ifdef H5_HAVE_PARALLEL
- aux_ptr = f->shared->cache->aux_ptr;
+ aux_ptr = (struct H5AC_aux_t *)(f->shared->cache->aux_ptr);
if(aux_ptr)
/* Sanity check */
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
@@ -715,6 +698,8 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
H5SL_close(aux_ptr->d_slist_ptr);
if(aux_ptr->c_slist_ptr != NULL)
H5SL_close(aux_ptr->c_slist_ptr);
+ if(aux_ptr->candidate_slist_ptr != NULL)
+ H5SL_close(aux_ptr->candidate_slist_ptr);
aux_ptr->magic = 0;
H5FL_FREE(H5AC_aux_t, aux_ptr);
aux_ptr = NULL;
@@ -903,8 +888,6 @@ H5AC_get_entry_status(const H5F_t *f,
haddr_t addr,
unsigned * status_ptr)
{
- H5C_t *cache_ptr = f->shared->cache;
- herr_t result;
hbool_t in_cache;
hbool_t is_dirty;
hbool_t is_protected;
@@ -917,50 +900,31 @@ H5AC_get_entry_status(const H5F_t *f,
FUNC_ENTER_NOAPI(H5AC_get_entry_status, FAIL)
- if ( ( cache_ptr == NULL ) ||
- ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ||
- ( ! H5F_addr_defined(addr) ) ||
- ( status_ptr == NULL ) ) {
-
+ if((f == NULL) || (!H5F_addr_defined(addr)) || (status_ptr == NULL))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry.")
- }
-
- result = H5C_get_entry_status(f, addr, &entry_size, &in_cache,
- &is_dirty, &is_protected, &is_pinned, &is_flush_dep_parent,
- &is_flush_dep_child);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_entry_status() failed.")
- }
- if ( in_cache ) {
+ if(H5C_get_entry_status(f, addr, &entry_size, &in_cache, &is_dirty,
+ &is_protected, &is_pinned, &is_flush_dep_parent, &is_flush_dep_child) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_entry_status() failed.")
+ if(in_cache) {
status |= H5AC_ES__IN_CACHE;
-
- if ( is_dirty )
+ if(is_dirty)
status |= H5AC_ES__IS_DIRTY;
-
- if ( is_protected )
+ if(is_protected)
status |= H5AC_ES__IS_PROTECTED;
-
- if ( is_pinned )
+ if(is_pinned)
status |= H5AC_ES__IS_PINNED;
-
- if ( is_flush_dep_parent )
+ if(is_flush_dep_parent)
status |= H5AC_ES__IS_FLUSH_DEP_PARENT;
-
- if ( is_flush_dep_child )
+ if(is_flush_dep_child)
status |= H5AC_ES__IS_FLUSH_DEP_CHILD;
- }
+ } /* end if */
*status_ptr = status;
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_get_entry_status() */
@@ -983,9 +947,6 @@ herr_t
H5AC_set(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
void *thing, unsigned int flags)
{
-#ifdef H5_HAVE_PARALLEL
- H5AC_aux_t * aux_ptr = NULL;
-#endif /* H5_HAVE_PARALLEL */
#if H5AC__TRACE_FILE_ENABLED
char trace[128] = "";
size_t trace_entry_size = 0;
@@ -1041,26 +1002,20 @@ H5AC_set(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
#endif /* H5AC__TRACE_FILE_ENABLED */
#ifdef H5_HAVE_PARALLEL
- if(NULL != (aux_ptr = f->shared->cache->aux_ptr)) {
+{
+ H5AC_aux_t *aux_ptr;
+
+ if(NULL != (aux_ptr = (H5AC_aux_t *)f->shared->cache->aux_ptr)) {
+ /* Log the new entry */
if(H5AC_log_inserted_entry(f, f->shared->cache, (H5AC_info_t *)thing) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5AC_log_inserted_entry() failed")
/* Check if we should try to flush */
- if(aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold) {
- hbool_t evictions_enabled;
-
- /* Query if evictions are allowed */
- if(H5C_get_evictions_enabled((const H5C_t *)f->shared->cache, &evictions_enabled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_get_evictions_enabled() failed.")
-
- /* Flush if evictions are allowed */
- if(evictions_enabled) {
- if(H5AC_propagate_flushed_and_still_clean_entries_list(f,
- H5AC_noblock_dxpl_id, f->shared->cache, TRUE) < 0 )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate clean entries list.")
- } /* end if */
- } /* end if */
+ if(aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)
+ if(H5AC_run_sync_point(f, H5AC_noblock_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
} /* end if */
+}
#endif /* H5_HAVE_PARALLEL */
done:
@@ -1109,7 +1064,7 @@ H5AC_mark_entry_dirty(void *thing)
* occult errors.
*/
if((H5C_get_trace_file_ptr_from_entry(thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
+ (NULL != trace_file_ptr))
sprintf(trace, "%s 0x%lx", FUNC,
(unsigned long)(((H5C_cache_entry_t *)thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -1160,15 +1115,14 @@ done:
herr_t
H5AC_move_entry(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t new_addr)
{
- herr_t result;
- herr_t ret_value=SUCCEED; /* Return value */
-#ifdef H5_HAVE_PARALLEL
- H5AC_aux_t * aux_ptr = NULL;
-#endif /* H5_HAVE_PARALLEL */
#if H5AC__TRACE_FILE_ENABLED
char trace[128] = "";
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+#ifdef H5_HAVE_PARALLEL
+ H5AC_aux_t * aux_ptr;
+#endif /* H5_HAVE_PARALLEL */
+ herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5AC_move_entry, FAIL)
@@ -1198,52 +1152,31 @@ H5AC_move_entry(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t ne
#endif /* H5AC__TRACE_FILE_ENABLED */
#ifdef H5_HAVE_PARALLEL
- if ( NULL != (aux_ptr = f->shared->cache->aux_ptr) ) {
+ /* Log moving the entry */
+ if(NULL != (aux_ptr = (H5AC_aux_t *)f->shared->cache->aux_ptr)) {
if(H5AC_log_moved_entry(f, old_addr, new_addr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "can't log moved entry")
- }
+ } /* end if */
#endif /* H5_HAVE_PARALLEL */
- result = H5C_move_entry(f->shared->cache,
- type,
- old_addr,
- new_addr);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, \
- "H5C_move_entry() failed.")
- }
+ if(H5C_move_entry(f->shared->cache, type, old_addr, new_addr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "H5C_move_entry() failed.")
#ifdef H5_HAVE_PARALLEL
/* Check if we should try to flush */
- if(aux_ptr && (aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)) {
- hbool_t evictions_enabled;
-
- /* Query if evictions are allowed */
- if(H5C_get_evictions_enabled((const H5C_t *)f->shared->cache, &evictions_enabled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_get_evictions_enabled() failed.")
-
- /* Flush if evictions are allowed */
- if(evictions_enabled) {
- if(H5AC_propagate_flushed_and_still_clean_entries_list(f,
- H5AC_noblock_dxpl_id, f->shared->cache, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate clean entries list.")
- } /* end if */
+ if(NULL != aux_ptr && aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold) {
+ if(H5AC_run_sync_point(f, H5AC_noblock_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
} /* end if */
#endif /* H5_HAVE_PARALLEL */
done:
-
#if H5AC__TRACE_FILE_ENABLED
- if ( trace_file_ptr != NULL ) {
-
+ if(trace_file_ptr != NULL)
HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
- }
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_move_entry() */
@@ -1332,7 +1265,6 @@ H5AC_create_flush_dependency(void * parent_thing, void * child_thing)
FUNC,
(unsigned long)(((H5C_cache_entry_t *)parent_thing)->addr),
(unsigned long)(((H5C_cache_entry_t *)child_thing)->addr));
- } /* end if */
#endif /* H5AC__TRACE_FILE_ENABLED */
if(H5C_create_flush_dependency(parent_thing, child_thing) < 0)
@@ -1382,12 +1314,12 @@ H5AC_protect(H5F_t *f,
{
unsigned protect_flags = H5C__NO_FLAGS_SET;
void * thing = (void *)NULL;
- void * ret_value; /* Return value */
#if H5AC__TRACE_FILE_ENABLED
char trace[128] = "";
size_t trace_entry_size = 0;
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+ void * ret_value; /* Return value */
FUNC_ENTER_NOAPI(H5AC_protect, NULL)
@@ -1416,7 +1348,7 @@ H5AC_protect(H5F_t *f,
( H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0) &&
( trace_file_ptr != NULL ) ) {
- char * rw_string;
+ const char * rw_string;
if ( rw == H5AC_WRITE ) {
@@ -1628,12 +1560,11 @@ H5AC_destroy_flush_dependency(void * parent_thing, void * child_thing)
#if H5AC__TRACE_FILE_ENABLED
if((H5C_get_trace_file_ptr_from_entry(parent_thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
- sprintf(trace, "%s %lx",
+ (NULL != trace_file_ptr))
+ sprintf(trace, "%s %llx %llx",
FUNC,
- (unsigned long)(((H5C_cache_entry_t *)parent_thing)->addr),
- (unsigned long)(((H5C_cache_entry_t *)child_thing)->addr));
- } /* end if */
+ (unsigned long long)(((H5C_cache_entry_t *)parent_thing)->addr),
+ (unsigned long long)(((H5C_cache_entry_t *)child_thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
if(H5C_destroy_flush_dependency(parent_thing, child_thing) < 0)
@@ -1641,7 +1572,7 @@ H5AC_destroy_flush_dependency(void * parent_thing, void * child_thing)
done:
#if H5AC__TRACE_FILE_ENABLED
- if( trace_file_ptr != NULL )
+ if(trace_file_ptr != NULL)
HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -1691,7 +1622,6 @@ herr_t
H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
void *thing, unsigned flags)
{
- herr_t result;
hbool_t dirtied;
hbool_t deleted;
#ifdef H5_HAVE_PARALLEL
@@ -1741,93 +1671,92 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
/* Check if the size changed out from underneath us, if we're not deleting
* the entry.
*/
- if ( dirtied && !deleted ) {
+ if(dirtied && !deleted) {
size_t curr_size = 0;
- if ( (type->size)(f, thing, &curr_size) < 0 ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, FAIL, \
- "Can't get size of thing")
- }
+ if((type->size)(f, thing, &curr_size) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, FAIL, "Can't get size of thing")
if(((H5AC_info_t *)thing)->size != curr_size)
HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, FAIL, "size of entry changed")
- }
+ } /* end if */
#ifdef H5_HAVE_PARALLEL
- if ( ( dirtied ) && ( ((H5AC_info_t *)thing)->is_dirty == FALSE ) &&
- ( NULL != (aux_ptr = f->shared->cache->aux_ptr) ) ) {
+ if((dirtied) && (((H5AC_info_t *)thing)->is_dirty == FALSE) &&
+ (NULL != (aux_ptr = (H5AC_aux_t *)f->shared->cache->aux_ptr))) {
if(H5AC_log_dirtied_entry((H5AC_info_t *)thing, addr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "can't log dirtied entry")
- }
-
- if ( ( (flags & H5C__DELETED_FLAG) != 0 ) &&
- ( NULL != (aux_ptr = f->shared->cache->aux_ptr) ) &&
- ( aux_ptr->mpi_rank == 0 ) ) {
-
- result = H5AC_log_deleted_entry(f->shared->cache,
- (H5AC_info_t *)thing,
- addr,
- flags);
-
- if ( result < 0 ) {
+ } /* end if */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
- "H5AC_log_deleted_entry() failed.")
- }
- }
+ if((deleted) &&
+ (NULL != (aux_ptr = (H5AC_aux_t *)(f->shared->cache->aux_ptr))) &&
+ (aux_ptr->mpi_rank == 0)) {
+ if(H5AC_log_deleted_entry(f->shared->cache, (H5AC_info_t *)thing, addr, flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5AC_log_deleted_entry() failed.")
+ } /* end if */
#endif /* H5_HAVE_PARALLEL */
- result = H5C_unprotect(f,
- dxpl_id,
- H5AC_noblock_dxpl_id,
- type,
- addr,
- thing,
- flags);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
- "H5C_unprotect() failed.")
- }
+ if(H5C_unprotect(f, dxpl_id, H5AC_noblock_dxpl_id, type, addr, thing, flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5C_unprotect() failed.")
#ifdef H5_HAVE_PARALLEL
/* Check if we should try to flush */
- if(aux_ptr && (aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)) {
- hbool_t evictions_enabled;
-
- /* Query if evictions are allowed */
- if(H5C_get_evictions_enabled((const H5C_t *)f->shared->cache, &evictions_enabled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_get_evictions_enabled() failed.")
-
- /* Flush if evictions are allowed */
- if(evictions_enabled) {
- if(H5AC_propagate_flushed_and_still_clean_entries_list(f,
- H5AC_noblock_dxpl_id, f->shared->cache, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate clean entries list.")
- } /* end if */
+ if((aux_ptr != NULL) && (aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)) {
+ if(H5AC_run_sync_point(f, H5AC_noblock_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
} /* end if */
#endif /* H5_HAVE_PARALLEL */
done:
-
#if H5AC__TRACE_FILE_ENABLED
- if ( trace_file_ptr != NULL ) {
-
+ if(trace_file_ptr != NULL)
HDfprintf(trace_file_ptr, "%s %x %d\n",
- trace,
- (unsigned)flags,
- (int)ret_value);
- }
+ trace, (unsigned)flags, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_unprotect() */
/*-------------------------------------------------------------------------
+ * Function: HA5C_set_sync_point_done_callback
+ *
+ * Purpose: Set the value of the sync_point_done callback. This
+ * callback is used by the parallel test code to verify
+ * that the expected writes and only the expected writes
+ * take place during a sync point.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/9/10
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+herr_t
+H5AC_set_sync_point_done_callback(H5C_t * cache_ptr,
+ void (* sync_point_done)(int num_writes, haddr_t * written_entries_tbl))
+{
+ H5AC_aux_t * aux_ptr;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5AC_set_sync_point_done_callback)
+
+ HDassert(cache_ptr && (cache_ptr->magic == H5C__H5C_T_MAGIC));
+
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+
+ aux_ptr->sync_point_done = sync_point_done;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5AC_set_sync_point_done_callback() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
* Function: HA5C_set_write_done_callback
*
* Purpose: Set the value of the write_done callback. This callback
@@ -1846,29 +1775,20 @@ herr_t
H5AC_set_write_done_callback(H5C_t * cache_ptr,
void (* write_done)(void))
{
- herr_t ret_value = SUCCEED; /* Return value */
- H5AC_aux_t * aux_ptr = NULL;
+ H5AC_aux_t * aux_ptr;
- FUNC_ENTER_NOAPI(H5AC_set_write_done_callback, FAIL)
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5AC_set_write_done_callback)
- /* This would normally be an assert, but we need to use an HGOTO_ERROR
- * call to shut up the compiler.
- */
- if ( ( ! cache_ptr ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) {
+ HDassert(cache_ptr && (cache_ptr->magic == H5C__H5C_T_MAGIC));
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr")
- }
-
- aux_ptr = cache_ptr->aux_ptr;
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
HDassert( aux_ptr != NULL );
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
aux_ptr->write_done = write_done;
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5AC_set_write_done_callback() */
#endif /* H5_HAVE_PARALLEL */
@@ -1901,7 +1821,6 @@ H5AC_stats(const H5F_t *f)
done:
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_stats() */
@@ -1959,13 +1878,8 @@ H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
"H5C_get_cache_auto_resize_config() failed.")
}
- result = H5C_get_evictions_enabled((const H5C_t *)cache_ptr, &evictions_enabled);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_get_resize_enabled() failed.")
- }
+ if(H5C_get_evictions_enabled((const H5C_t *)cache_ptr, &evictions_enabled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_resize_enabled() failed.")
if ( internal_config.rpt_fcn == NULL ) {
@@ -2009,11 +1923,16 @@ H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
config_ptr->dirty_bytes_threshold =
((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold;
+ config_ptr->metadata_write_strategy =
+ ((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy;
} else {
#endif /* H5_HAVE_PARALLEL */
- config_ptr->dirty_bytes_threshold = H5AC__DEFAULT_DIRTY_BYTES_THRESHOLD;
+ config_ptr->dirty_bytes_threshold =
+ H5AC__DEFAULT_DIRTY_BYTES_THRESHOLD;
+ config_ptr->metadata_write_strategy =
+ H5AC__DEFAULT_METADATA_WRITE_STRATEGY;
#ifdef H5_HAVE_PARALLEL
}
@@ -2212,24 +2131,6 @@ H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr,
}
}
- if (
- (
- config_ptr->dirty_bytes_threshold
- <
- H5AC__MIN_DIRTY_BYTES_THRESHOLD
- )
- ||
- (
- config_ptr->dirty_bytes_threshold
- >
- H5AC__MAX_DIRTY_BYTES_THRESHOLD
- )
- ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "config_ptr->dirty_bytes_threshold out of range.")
- }
-
if ( config_ptr->close_trace_file ) {
if ( H5AC_close_trace_file(cache_ptr) < 0 ) {
@@ -2263,6 +2164,9 @@ H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr,
((H5AC_aux_t *)(cache_ptr->aux_ptr))->dirty_bytes_threshold =
config_ptr->dirty_bytes_threshold;
+
+ ((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy =
+ config_ptr->metadata_write_strategy;
}
#endif /* H5_HAVE_PARALLEL */
@@ -2278,7 +2182,7 @@ done:
( trace_file_ptr != NULL ) ) {
HDfprintf(trace_file_ptr,
- "%s %d %d %d %d \"%s\" %d %d %d %f %d %d %ld %d %f %f %d %f %f %d %d %d %f %f %d %d %d %d %f %d %d\n",
+ "%s %d %d %d %d \"%s\" %d %d %d %f %d %d %ld %d %f %f %d %f %f %d %d %d %f %f %d %d %d %d %f %d %d %d\n",
"H5AC_set_cache_auto_resize_config",
trace_config.version,
(int)(trace_config.rpt_fcn_enabled),
@@ -2309,6 +2213,7 @@ done:
(int)(trace_config.apply_empty_reserve),
trace_config.empty_reserve,
trace_config.dirty_bytes_threshold,
+ trace_config.metadata_write_strategy,
(int)ret_value);
}
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -2343,45 +2248,28 @@ done:
herr_t
H5AC_validate_config(H5AC_cache_config_t * config_ptr)
{
- herr_t result;
H5C_auto_size_ctl_t internal_config;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5AC_validate_config, FAIL)
- if ( config_ptr == NULL ) {
-
+ if(config_ptr == NULL)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL config_ptr on entry.")
- }
-
- if ( config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION ) {
+ if(config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unknown config version.")
- }
- if ( ( config_ptr->rpt_fcn_enabled != TRUE ) &&
- ( config_ptr->rpt_fcn_enabled != FALSE ) ) {
+ if((config_ptr->rpt_fcn_enabled != TRUE) && (config_ptr->rpt_fcn_enabled != FALSE))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->rpt_fcn_enabled must be either TRUE or FALSE.")
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "config_ptr->rpt_fcn_enabled must be either TRUE or FALSE.")
- }
-
- if ( ( config_ptr->open_trace_file != TRUE ) &&
- ( config_ptr->open_trace_file != FALSE ) ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "config_ptr->open_trace_file must be either TRUE or FALSE.")
- }
+ if((config_ptr->open_trace_file != TRUE) && (config_ptr->open_trace_file != FALSE))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->open_trace_file must be either TRUE or FALSE.")
- if ( ( config_ptr->close_trace_file != TRUE ) &&
- ( config_ptr->close_trace_file != FALSE ) ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "config_ptr->close_trace_file must be either TRUE or FALSE.")
- }
+ if((config_ptr->close_trace_file != TRUE) && (config_ptr->close_trace_file != FALSE))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->close_trace_file must be either TRUE or FALSE.")
/* don't bother to test trace_file_name unless open_trace_file is TRUE */
- if ( config_ptr->open_trace_file ) {
+ if(config_ptr->open_trace_file) {
size_t name_len;
/* Can't really test the trace_file_name field without trying to
@@ -2390,15 +2278,10 @@ H5AC_validate_config(H5AC_cache_config_t * config_ptr)
*/
name_len = HDstrlen(config_ptr->trace_file_name);
- if ( name_len == 0 ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "config_ptr->trace_file_name is empty.")
-
- } else if ( name_len > H5AC__MAX_TRACE_FILE_NAME_LEN ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "config_ptr->trace_file_name too long.")
+ if(name_len == 0) {
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name is empty.")
+ } else if(name_len > H5AC__MAX_TRACE_FILE_NAME_LEN) {
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name too long.")
}
}
@@ -2418,36 +2301,24 @@ H5AC_validate_config(H5AC_cache_config_t * config_ptr)
"Can't disable evictions while auto-resize is enabled.")
}
- if ( config_ptr->dirty_bytes_threshold < H5AC__MIN_DIRTY_BYTES_THRESHOLD ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "dirty_bytes_threshold too small.")
- } else
- if ( config_ptr->dirty_bytes_threshold > H5AC__MAX_DIRTY_BYTES_THRESHOLD ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "dirty_bytes_threshold too big.")
- }
-
- if ( H5AC_ext_config_2_int_config(config_ptr, &internal_config) !=
- SUCCEED ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5AC_ext_config_2_int_config() failed.")
+ if(config_ptr->dirty_bytes_threshold < H5AC__MIN_DIRTY_BYTES_THRESHOLD) {
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too small.")
+ } else if(config_ptr->dirty_bytes_threshold > H5AC__MAX_DIRTY_BYTES_THRESHOLD) {
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too big.")
}
- result = H5C_validate_resize_config(&internal_config,
- H5C_RESIZE_CFG__VALIDATE_ALL);
+ if((config_ptr->metadata_write_strategy != H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY) &&
+ (config_ptr->metadata_write_strategy != H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "config_ptr->metadata_write_strategy out of range.")
- if ( result != SUCCEED ) {
+ if(H5AC_ext_config_2_int_config(config_ptr, &internal_config) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_ext_config_2_int_config() failed.")
+ if(H5C_validate_resize_config(&internal_config, H5C_RESIZE_CFG__VALIDATE_ALL) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "error(s) in new config.")
- }
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_validate_config() */
@@ -2620,12 +2491,192 @@ done:
} /* H5AC_open_trace_file() */
+/*-------------------------------------------------------------------------
+ * Function: H5AC_add_candidate()
+ *
+ * Purpose: Add the supplied metadata entry address to the candidate
+ * list. Verify that each entry added does not appear in
+ * the list prior to its insertion.
+ *
+ * This function is intended for used in constructing list
+ * of entried to be flushed during sync points. It shouldn't
+ * be called anywhere else.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 3/17/10
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+herr_t
+H5AC_add_candidate(H5AC_t * cache_ptr,
+ haddr_t addr)
+{
+ H5AC_aux_t * aux_ptr;
+ H5AC_slist_entry_t * slist_entry_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC_add_candidate, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert( aux_ptr->metadata_write_strategy ==
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+ HDassert( aux_ptr->candidate_slist_ptr != NULL );
+
+ /* If the supplied address appears in the candidate list, scream and die. */
+ if(NULL != H5SL_search(aux_ptr->candidate_slist_ptr, (void *)(&addr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry already in candidate slist.")
+
+ /* otherwise, construct an entry for the supplied address, and insert
+ * it into the candidate slist.
+ */
+ if(NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC_slist_entry_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "Can't allocate candidate slist entry .")
+
+ slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
+ slist_entry_ptr->addr = addr;
+
+ if(H5SL_insert(aux_ptr->candidate_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into dirty entry slist.")
+
+ aux_ptr->candidate_slist_len += 1;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_add_candidate() */
+#endif /* H5_HAVE_PARALLEL */
+
+
/*************************************************************************/
/**************************** Private Functions: *************************/
/*************************************************************************/
/*-------------------------------------------------------------------------
*
+ * Function: H5AC_broadcast_candidate_list()
+ *
+ * Purpose: Broadcast the contents of the process 0 candidate entry
+ * slist. In passing, also remove all entries from said
+ * list. As the application of this will be handled by
+ * the same functions on all processes, construct and
+ * return a copy of the list in the same format as that
+ * received by the other processes. Note that if this
+ * copy is returned in *haddr_buf_ptr_ptr, the caller
+ * must free it.
+ *
+ * This function must only be called by the process with
+ * MPI_rank 0.
+ *
+ * Return SUCCEED on success, and FAIL on failure.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 7/1/05
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC_broadcast_candidate_list(H5AC_t * cache_ptr,
+ int * num_entries_ptr,
+ haddr_t ** haddr_buf_ptr_ptr)
+{
+ herr_t result;
+ hbool_t success = FALSE;
+ H5AC_aux_t * aux_ptr = NULL;
+ haddr_t * haddr_buf_ptr = NULL;
+ MPI_Offset * MPI_Offset_buf_ptr = NULL;
+ size_t buf_size = 0;
+ int mpi_result;
+ int chk_num_entries = 0;
+ int num_entries = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC_broadcast_candidate_list, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert( aux_ptr->mpi_rank == 0 );
+ HDassert( aux_ptr->metadata_write_strategy ==
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+ HDassert( aux_ptr->candidate_slist_ptr != NULL );
+ HDassert( H5SL_count(aux_ptr->candidate_slist_ptr) ==
+ (size_t)(aux_ptr->candidate_slist_len) );
+ HDassert( num_entries_ptr != NULL );
+ HDassert( *num_entries_ptr == 0 );
+ HDassert( haddr_buf_ptr_ptr != NULL );
+ HDassert( *haddr_buf_ptr_ptr == NULL );
+
+ /* First broadcast the number of entries in the list so that the
+ * receivers can set up buffers to receive them. If there aren't
+ * any, we are done.
+ */
+ num_entries = aux_ptr->candidate_slist_len;
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
+
+ if(num_entries > 0) {
+ /* convert the candidate list into the format we
+ * are used to receiving from process 0, and also load it
+ * into a buffer for transmission.
+ */
+ if(H5AC_copy_candidate_list_to_buffer(cache_ptr, &chk_num_entries,
+ &haddr_buf_ptr, &buf_size, &MPI_Offset_buf_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate buffer.")
+
+ HDassert( chk_num_entries == num_entries );
+ HDassert( haddr_buf_ptr != NULL );
+ HDassert( MPI_Offset_buf_ptr != NULL );
+ HDassert( aux_ptr->candidate_slist_len == 0 );
+
+ /* Now broadcast the list of candidate entries -- if there is one.
+ *
+ * The peculiar structure of the following call to MPI_Bcast is
+ * due to MPI's (?) failure to believe in the MPI_Offset type.
+ * Thus the element type is MPI_BYTE, with size equal to the
+ * buf_size computed above.
+ */
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
+ } /* end if */
+
+ success = TRUE;
+
+done:
+ if(MPI_Offset_buf_ptr != NULL)
+ MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
+
+ if(success) {
+ /* Pass the number of entries and the buffer pointer
+ * back to the caller. Do this so that we can use the same code
+ * to apply the candidate list to all the processes.
+ */
+ *num_entries_ptr = num_entries;
+ *haddr_buf_ptr_ptr = haddr_buf_ptr;
+ } else if(haddr_buf_ptr != NULL) {
+ haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_broadcast_candidate_list() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ *
* Function: H5AC_broadcast_clean_list()
*
* Purpose: Broadcast the contents of the process 0 cleaned entry
@@ -2650,6 +2701,7 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
haddr_t addr;
+ haddr_t * addr_buf_ptr = NULL;
H5AC_aux_t * aux_ptr = NULL;
H5SL_node_t * slist_node_ptr = NULL;
H5AC_slist_entry_t * slist_entry_ptr = NULL;
@@ -2657,14 +2709,14 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
size_t buf_size;
int i = 0;
int mpi_result;
- int num_entries;
+ int num_entries = 0;
FUNC_ENTER_NOAPI(H5AC_broadcast_clean_list, FAIL)
HDassert( cache_ptr != NULL );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+ aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr;
HDassert( aux_ptr != NULL );
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
@@ -2702,13 +2754,28 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
"memory allocation failed for clean entry buffer")
}
+ /* if the sync_point_done callback is defined, allocate the
+ * addr buffer as well.
+ */
+ if ( aux_ptr->sync_point_done != NULL ) {
+
+ addr_buf_ptr = H5MM_malloc((size_t)(num_entries * sizeof(haddr_t)));
+
+ if ( addr_buf_ptr == NULL ) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
+ "memory allocation failed for addr buffer")
+ }
+ }
+
+
/* now load the entry base addresses into the buffer, emptying the
* cleaned entry list in passing
*/
while ( NULL != (slist_node_ptr = H5SL_first(aux_ptr->c_slist_ptr) ) )
{
- slist_entry_ptr = H5SL_item(slist_node_ptr);
+ slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_item(slist_node_ptr);
HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
@@ -2716,6 +2783,11 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
addr = slist_entry_ptr->addr;
+ if ( addr_buf_ptr != NULL ) {
+
+ addr_buf_ptr[i] = addr;
+ }
+
if ( H5FD_mpi_haddr_to_MPIOff(addr, &(buf_ptr[i])) < 0 ) {
HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, \
@@ -2743,19 +2815,12 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
/* and also remove the matching entry from the dirtied list
* if it exists.
*/
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
- (void *)(&addr))) != NULL ) {
-
- HDassert( slist_entry_ptr->magic ==
- H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
+ if((slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr))) != NULL) {
+ HDassert( slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
HDassert( slist_entry_ptr->addr == addr );
- if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
- != slist_entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from dirty entry slist.")
- }
+ if(H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from dirty entry slist.")
slist_entry_ptr->magic = 0;
H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
@@ -2764,8 +2829,7 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
aux_ptr->d_slist_len -= 1;
HDassert( aux_ptr->d_slist_len >= 0 );
- }
-
+ } /* end if */
} /* while */
@@ -2786,15 +2850,16 @@ H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
}
}
-done:
-
- if ( buf_ptr != NULL ) {
+ if(aux_ptr->sync_point_done != NULL)
+ (aux_ptr->sync_point_done)(num_entries, addr_buf_ptr);
+done:
+ if(buf_ptr != NULL)
buf_ptr = (MPI_Offset *)H5MM_xfree((void *)buf_ptr);
- }
+ if(addr_buf_ptr != NULL)
+ addr_buf_ptr = (MPI_Offset *)H5MM_xfree((void *)addr_buf_ptr);
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_broadcast_clean_list() */
#endif /* H5_HAVE_PARALLEL */
@@ -2850,7 +2915,9 @@ H5AC_check_if_write_permitted(const H5F_t UNUSED * f,
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- if ( aux_ptr->mpi_rank == 0 ) {
+ if ( ( aux_ptr->mpi_rank == 0 ) ||
+ ( aux_ptr->metadata_write_strategy ==
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED ) ) {
write_permitted = aux_ptr->write_permitted;
@@ -2871,6 +2938,222 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5AC_construct_candidate_list()
+ *
+ * Purpose: In the parallel case when the metadata_write_strategy is
+ * H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, process 0 uses
+ * this function to construct the list of cache entries to
+ * be flushed. This list is then propagated to the other
+ * caches, and then flushed in a distributed fashion.
+ *
+ * The sync_point_op parameter is used to determine the extent
+ * of the flush.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 3/17/10
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+herr_t
+H5AC_construct_candidate_list(H5AC_t * cache_ptr,
+ H5AC_aux_t * aux_ptr,
+ int sync_point_op)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC_construct_candidate_list, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert( aux_ptr->metadata_write_strategy ==
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+ HDassert( ( sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_CACHE ) ||
+ ( aux_ptr->mpi_rank == 0 ) );
+ HDassert( aux_ptr->d_slist_ptr != NULL );
+ HDassert( aux_ptr->c_slist_ptr != NULL );
+ HDassert( aux_ptr->c_slist_len == 0 );
+ HDassert( aux_ptr->candidate_slist_ptr != NULL );
+ HDassert( aux_ptr->candidate_slist_len == 0 );
+ HDassert( ( sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN ) ||
+ ( sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_CACHE ) );
+
+ switch(sync_point_op) {
+ case H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN:
+ if(H5C_construct_candidate_list__min_clean((H5C_t *)cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_construct_candidate_list__min_clean() failed.")
+ break;
+
+ case H5AC_SYNC_POINT_OP__FLUSH_CACHE:
+ if(H5C_construct_candidate_list__clean_cache((H5C_t *)cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_construct_candidate_list__clean_cache() failed.")
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown sync point operation.")
+ break;
+ } /* end switch */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_construct_candidate_list() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC_copy_candidate_list_to_buffer
+ *
+ * Purpose: Allocate buffer(s) and copy the contents of the candidate
+ * entry slist into it (them). In passing, remove all
+ * entries from the candidate slist. Note that the
+ * candidate slist must not be empty.
+ *
+ * If MPI_Offset_buf_ptr_ptr is not NULL, allocate a buffer
+ * of MPI_Offset, copy the contents of the candidate
+ * entry list into it with the appropriate conversions,
+ * and return the base address of the buffer in
+ * *MPI_Offset_buf_ptr. Note that this is the buffer
+ * used by process 0 to transmit the list of entries to
+ * be flushed to all other processes (in this file group).
+ *
+ * Similarly, allocate a buffer of haddr_t, load the contents
+ * of the candidate list into this buffer, and return its
+ * base address in *haddr_buf_ptr_ptr. Note that this
+ * latter buffer is constructed unconditionally.
+ *
+ * In passing, also remove all entries from the candidate
+ * entry slist.
+ *
+ * Return: Return SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer, 4/19/10
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC_copy_candidate_list_to_buffer(H5AC_t * cache_ptr,
+ int * num_entries_ptr,
+ haddr_t ** haddr_buf_ptr_ptr,
+ size_t * MPI_Offset_buf_size_ptr,
+ MPI_Offset ** MPI_Offset_buf_ptr_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ hbool_t success = FALSE;
+ haddr_t addr;
+ H5AC_aux_t * aux_ptr = NULL;
+ H5SL_node_t * slist_node_ptr = NULL;
+ H5AC_slist_entry_t * slist_entry_ptr = NULL;
+ MPI_Offset * MPI_Offset_buf_ptr = NULL;
+ haddr_t * haddr_buf_ptr = NULL;
+ size_t buf_size;
+ int i = 0;
+ int num_entries = 0;
+
+ FUNC_ENTER_NOAPI(H5AC_copy_candidate_list_to_buffer, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert( aux_ptr->metadata_write_strategy ==
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+ HDassert( aux_ptr->candidate_slist_ptr != NULL );
+ HDassert( H5SL_count(aux_ptr->candidate_slist_ptr) ==
+ (size_t)(aux_ptr->candidate_slist_len) );
+ HDassert( aux_ptr->candidate_slist_len > 0 );
+ HDassert( num_entries_ptr != NULL );
+ HDassert( *num_entries_ptr == 0 );
+ HDassert( haddr_buf_ptr_ptr != NULL );
+ HDassert( *haddr_buf_ptr_ptr == NULL );
+
+ num_entries = aux_ptr->candidate_slist_len;
+
+ /* allocate a buffer(s) to store the list of candidate entry
+ * base addresses in
+ */
+ if(MPI_Offset_buf_ptr_ptr != NULL) {
+ HDassert( MPI_Offset_buf_size_ptr != NULL );
+
+ /* allocate a buffer of MPI_Offset */
+ buf_size = sizeof(MPI_Offset) * (size_t)num_entries;
+ if(NULL == (MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for MPI_Offset buffer")
+ } /* end if */
+
+ /* allocate a buffer of haddr_t */
+ if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) * (size_t)num_entries)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for haddr buffer")
+
+ /* now load the entry base addresses into the buffer, emptying the
+ * candidate entry list in passing
+ */
+ while(NULL != (slist_node_ptr = H5SL_first(aux_ptr->candidate_slist_ptr))) {
+ slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_item(slist_node_ptr);
+
+ HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
+ HDassert( i < num_entries );
+
+ addr = slist_entry_ptr->addr;
+ haddr_buf_ptr[i] = addr;
+ if(MPI_Offset_buf_ptr != NULL) {
+ if(H5FD_mpi_haddr_to_MPIOff(addr, &(MPI_Offset_buf_ptr[i])) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert from haddr to MPI off")
+ } /* end if */
+
+ i++;
+
+ /* now remove the entry from the cleaned entry list */
+ if(H5SL_remove(aux_ptr->candidate_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from candidate entry slist.")
+
+ slist_entry_ptr->magic = 0;
+ H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
+ slist_entry_ptr = NULL;
+
+ aux_ptr->candidate_slist_len -= 1;
+
+ HDassert( aux_ptr->candidate_slist_len >= 0 );
+ } /* while */
+ HDassert( aux_ptr->candidate_slist_len == 0 );
+
+ success = TRUE;
+
+done:
+ if(success) {
+ /* Pass the number of entries and the buffer pointer
+ * back to the caller.
+ */
+ *num_entries_ptr = num_entries;
+ *haddr_buf_ptr_ptr = haddr_buf_ptr;
+
+ if(MPI_Offset_buf_ptr_ptr != NULL) {
+ HDassert( MPI_Offset_buf_ptr != NULL);
+ *MPI_Offset_buf_size_ptr = buf_size;
+ *MPI_Offset_buf_ptr_ptr = MPI_Offset_buf_ptr;
+ } /* end if */
+ } /* end if */
+ else {
+ if(MPI_Offset_buf_ptr != NULL)
+ MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
+ if(haddr_buf_ptr != NULL)
+ haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
+ } /* end else */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_copy_candidate_list_to_buffer() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
* Function: H5AC_ext_config_2_int_config()
*
* Purpose: Utility function to translate an instance of
@@ -2969,16 +3252,16 @@ H5AC_log_deleted_entry(H5AC_t * cache_ptr,
haddr_t addr,
unsigned int flags)
{
- herr_t ret_value = SUCCEED; /* Return value */
- H5AC_aux_t * aux_ptr = NULL;
+ H5AC_aux_t * aux_ptr;
H5AC_slist_entry_t * slist_entry_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5AC_log_deleted_entry, FAIL)
HDassert( cache_ptr != NULL );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- aux_ptr = cache_ptr->aux_ptr;
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
HDassert( aux_ptr != NULL );
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
@@ -2988,25 +3271,17 @@ H5AC_log_deleted_entry(H5AC_t * cache_ptr,
HDassert( (flags & H5C__DELETED_FLAG) != 0 );
- if ( aux_ptr->mpi_rank == 0 ) {
-
+ if(aux_ptr->mpi_rank == 0) {
HDassert( aux_ptr->d_slist_ptr != NULL );
HDassert( aux_ptr->c_slist_ptr != NULL );
/* if the entry appears in the dirtied entry slist, remove it. */
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
- (void *)(&addr))) != NULL ) {
-
- HDassert( slist_entry_ptr->magic ==
- H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
- HDassert( slist_entry_ptr->addr == addr );
-
- if ( H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr))
- != slist_entry_ptr ) {
+ if((slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr))) != NULL) {
+ HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
+ HDassert(slist_entry_ptr->addr == addr);
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from dirty entry slist.")
- }
+ if(H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from dirty entry slist.")
slist_entry_ptr->magic = 0;
H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
@@ -3015,22 +3290,15 @@ H5AC_log_deleted_entry(H5AC_t * cache_ptr,
aux_ptr->d_slist_len -= 1;
HDassert( aux_ptr->d_slist_len >= 0 );
- }
+ } /* end if */
/* if the entry appears in the cleaned entry slist, remove it. */
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
- (void *)(&addr))) != NULL ) {
-
- HDassert( slist_entry_ptr->magic ==
- H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
- HDassert( slist_entry_ptr->addr == addr );
-
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
- != slist_entry_ptr ) {
+ if((slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr))) != NULL) {
+ HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
+ HDassert(slist_entry_ptr->addr == addr);
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from cleaned entry slist.")
- }
+ if(H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from cleaned entry slist.")
slist_entry_ptr->magic = 0;
H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
@@ -3039,13 +3307,11 @@ H5AC_log_deleted_entry(H5AC_t * cache_ptr,
aux_ptr->c_slist_len -= 1;
HDassert( aux_ptr->c_slist_len >= 0 );
- }
- }
+ } /* end if */
+ } /* if */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_log_deleted_entry() */
#endif /* H5_HAVE_PARALLEL */
@@ -3093,7 +3359,7 @@ H5AC_log_dirtied_entry(const H5AC_info_t * entry_ptr,
HDassert( cache_ptr != NULL );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- aux_ptr = cache_ptr->aux_ptr;
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
HDassert( aux_ptr != NULL );
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
@@ -3133,24 +3399,16 @@ H5AC_log_dirtied_entry(const H5AC_info_t * entry_ptr,
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
}
- if ( H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr)) != NULL ) {
-
+ if(H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr)) != NULL) {
/* the entry is dirty. If it exists on the cleaned entries list,
* remove it.
*/
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
- (void *)(&addr))) != NULL ) {
-
- HDassert( slist_entry_ptr->magic ==
- H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
- HDassert( slist_entry_ptr->addr == addr );
+ if((slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr))) != NULL) {
+ HDassert(slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
+ HDassert(slist_entry_ptr->addr == addr);
- if ( H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr))
- != slist_entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, \
- "Can't delete entry from clean entry slist.")
- }
+ if(H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr)) != slist_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from clean entry slist.")
slist_entry_ptr->magic = 0;
H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
@@ -3159,8 +3417,8 @@ H5AC_log_dirtied_entry(const H5AC_info_t * entry_ptr,
aux_ptr->c_slist_len -= 1;
HDassert( aux_ptr->c_slist_len >= 0 );
- }
- }
+ } /* end if */
+ } /* end if */
} else {
aux_ptr->dirty_bytes += entry_ptr->size;
@@ -3200,34 +3458,6 @@ done:
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
-#if 0 /* This is useful debugging code. -- JRM */
-static herr_t
-H5AC_log_flushed_entry_dummy(H5C_t * cache_ptr,
- haddr_t addr,
- hbool_t was_dirty,
- unsigned flags,
- int type_id)
-{
- herr_t ret_value = SUCCEED; /* Return value */
- H5AC_aux_t * aux_ptr = NULL;
-
- FUNC_ENTER_NOAPI(H5AC_log_flushed_entry_dummy, FAIL)
-
- aux_ptr = cache_ptr->aux_ptr;
-
- if ( ( was_dirty ) && ( (flags & H5C__FLUSH_CLEAR_ONLY_FLAG) == 0 ) ) {
-
- HDfprintf(stdout,
- "%d:H5AC_log_flushed_entry(): addr = %d, flags = %x, was_dirty = %d, type_id = %d\n",
- (int)(aux_ptr->mpi_rank), (int)addr, flags, (int)was_dirty, type_id);
- }
-done:
-
- FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5AC_log_flushed_entry_dummy() */
-#endif /* JRM */
-
static herr_t
H5AC_log_flushed_entry(H5C_t * cache_ptr,
haddr_t addr,
@@ -3246,7 +3476,7 @@ H5AC_log_flushed_entry(H5C_t * cache_ptr,
HDassert( cache_ptr != NULL );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- aux_ptr = cache_ptr->aux_ptr;
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
HDassert( aux_ptr != NULL );
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
@@ -3261,7 +3491,8 @@ H5AC_log_flushed_entry(H5C_t * cache_ptr,
* cleaned list and the dirtied list.
*/
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
+ if ( (slist_entry_ptr = (H5AC_slist_entry_t *)
+ H5SL_search(aux_ptr->c_slist_ptr,
(void *)(&addr))) != NULL ) {
HDassert( slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
@@ -3283,8 +3514,8 @@ H5AC_log_flushed_entry(H5C_t * cache_ptr,
HDassert( aux_ptr->c_slist_len >= 0 );
}
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
- (void *)(&addr))) != NULL ) {
+ if ( (slist_entry_ptr = (H5AC_slist_entry_t *)
+ H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr))) != NULL ) {
HDassert( slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
HDassert( slist_entry_ptr->addr == addr );
@@ -3364,62 +3595,47 @@ H5AC_log_inserted_entry(H5F_t * f,
H5AC_t * cache_ptr,
H5AC_info_t * entry_ptr)
{
+ H5AC_aux_t * aux_ptr;
herr_t ret_value = SUCCEED; /* Return value */
- H5AC_aux_t * aux_ptr = NULL;
- H5AC_slist_entry_t * slist_entry_ptr = NULL;
FUNC_ENTER_NOAPI(H5AC_log_inserted_entry, FAIL)
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- aux_ptr = cache_ptr->aux_ptr;
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
HDassert( entry_ptr != NULL );
- if ( aux_ptr->mpi_rank == 0 ) {
-
- HDassert( aux_ptr->d_slist_ptr != NULL );
- HDassert( aux_ptr->c_slist_ptr != NULL );
-
- if ( H5SL_search(aux_ptr->d_slist_ptr, (void *)(&entry_ptr->addr)) == NULL ) {
-
- /* insert the address of the entry in the dirty entry list, and
- * add its size to the dirty_bytes count.
- */
- if ( NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC_slist_entry_t)) ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "Can't allocate dirty slist entry .")
- }
-
- slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
- slist_entry_ptr->addr = entry_ptr->addr;
+ if(aux_ptr->mpi_rank == 0) {
+ H5AC_slist_entry_t * slist_entry_ptr;
- if ( H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr,
- &(slist_entry_ptr->addr)) < 0 ) {
+ HDassert(aux_ptr->d_slist_ptr != NULL);
+ HDassert(aux_ptr->c_slist_ptr != NULL);
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
- "can't insert entry into dirty entry slist.")
- }
+ if(NULL != H5SL_search(aux_ptr->d_slist_ptr, (void *)(&entry_ptr->addr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Inserted entry already in dirty slist.")
- aux_ptr->d_slist_len += 1;
+ /* insert the address of the entry in the dirty entry list, and
+ * add its size to the dirty_bytes count.
+ */
+ if(NULL == (slist_entry_ptr = H5FL_CALLOC(H5AC_slist_entry_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "Can't allocate dirty slist entry .")
- } else {
+ slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
+ slist_entry_ptr->addr = entry_ptr->addr;
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Inserted entry already in dirty slist.")
- }
+ if(H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr, &(slist_entry_ptr->addr)) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert entry into dirty entry slist.")
- if ( H5SL_search(aux_ptr->c_slist_ptr, (void *)(&entry_ptr->addr)) != NULL ) {
+ aux_ptr->d_slist_len += 1;
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Inserted entry in clean slist.")
- }
- }
+ if(NULL != H5SL_search(aux_ptr->c_slist_ptr, (void *)(&entry_ptr->addr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Inserted entry in clean slist.")
+ } /* end if */
aux_ptr->dirty_bytes += entry_ptr->size;
@@ -3429,9 +3645,7 @@ H5AC_log_inserted_entry(H5F_t * f,
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_log_inserted_entry() */
#endif /* H5_HAVE_PARALLEL */
@@ -3506,7 +3720,7 @@ H5AC_log_moved_entry(const H5F_t *f,
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- aux_ptr = cache_ptr->aux_ptr;
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
HDassert( aux_ptr != NULL );
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
@@ -3531,8 +3745,8 @@ H5AC_log_moved_entry(const H5F_t *f,
/* if the entry appears in the cleaned entry slist, under its old
* address, remove it.
*/
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->c_slist_ptr,
- (void *)(&old_addr))) != NULL ) {
+ if ( (slist_entry_ptr = (H5AC_slist_entry_t *)
+ H5SL_search(aux_ptr->c_slist_ptr, (void *)(&old_addr))) != NULL ) {
HDassert( slist_entry_ptr->magic ==
H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
@@ -3557,8 +3771,8 @@ H5AC_log_moved_entry(const H5F_t *f,
/* if the entry appears in the dirtied entry slist under its old
* address, remove it, but don't free it. Set addr to new_addr.
*/
- if ( (slist_entry_ptr = H5SL_search(aux_ptr->d_slist_ptr,
- (void *)(&old_addr))) != NULL ) {
+ if ( (slist_entry_ptr = (H5AC_slist_entry_t *)
+ H5SL_search(aux_ptr->d_slist_ptr, (void *)(&old_addr))) != NULL ) {
HDassert( slist_entry_ptr->magic ==
H5AC__H5AC_SLIST_ENTRY_T_MAGIC );
@@ -3634,27 +3848,223 @@ H5AC_log_moved_entry(const H5F_t *f,
}
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_log_moved_entry() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
+ * Function: H5AC_propagate_and_apply_candidate_list
+ *
+ * Purpose: Prior to the addition of support for multiple metadata
+ * write strategies, in PHDF5, only the metadata cache with
+ * mpi rank 0 was allowed to write to file. All other
+ * metadata caches on processes with rank greater than 0
+ * were required to retain dirty entries until they were
+ * notified that the entry was clean.
+ *
+ * This constraint is relaxed with the distributed
+ * metadata write strategy, in which a list of candidate
+ * metadata cache entries is constructed by the process 0
+ * cache and then distributed to the caches of all the other
+ * processes. Once the listed is distributed, many (if not
+ * all) processes writing writing a unique subset of the
+ * entries, and marking the remainder clean. The subsets
+ * are chosen so that each entry in the list of candidates
+ * is written by exactly one cache, and all entries are
+ * marked as being clean in all caches.
+ *
+ * While the list of candidate cache entries is prepared
+ * elsewhere, this function is the main routine for distributing
+ * and applying the list. It must be run simultaniously on
+ * all processes that have the relevant file open. To ensure
+ * proper synchronization, there is a barrier at the beginning
+ * of this function.
+ *
+ * At present, this function is called under one of two
+ * circumstances:
+ *
+ * 1) Dirty byte creation exceeds some user specified value.
+ *
+ * While metadata reads may occur independently, all
+ * operations writing metadata must be collective. Thus
+ * all metadata caches see the same sequence of operations,
+ * and therefore the same dirty data creation.
+ *
+ * This fact is used to synchronize the caches for purposes
+ * of propagating the list of candidate entries, by simply
+ * calling this function from all caches whenever some user
+ * specified threshold on dirty data is exceeded. (the
+ * process 0 cache creates the candidate list just before
+ * calling this function).
+ *
+ * 2) Under direct user control -- this operation must be
+ * collective.
+ *
+ * The operations to be managed by this function are as
+ * follows:
+ *
+ * All processes:
+ *
+ * 1) Participate in an opening barrier.
+ *
+ * For the process with mpi rank 0:
+ *
+ * 1) Load the contents of the candidate list
+ * (candidate_slist_ptr) into a buffer, and broadcast that
+ * buffer to all the other caches. Clear the candidate
+ * list in passing.
+ *
+ * If there is a positive number of candidates, proceed with
+ * the following:
+ *
+ * 2) Apply the candidate entry list.
+ *
+ * 3) Particpate in a closing barrier.
+ *
+ * 4) Remove from the dirty list (d_slist_ptr) and from the
+ * flushed and still clean entries list (c_slist_ptr),
+ * all addresses that appeared in the candidate list, as
+ * these entries are now clean.
+ *
+ *
+ * For all processes with mpi rank greater than 0:
+ *
+ * 1) Receive the candidate entry list broadcast
+ *
+ * If there is a positive number of candidates, proceed with
+ * the following:
+ *
+ * 2) Apply the candidate entry list.
+ *
+ * 3) Particpate in a closing barrier.
+ *
+ * Return: Success: non-negative
+ *
+ * Failure: negative
+ *
+ * Programmer: John Mainzer
+ * 3/17/10
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC_propagate_and_apply_candidate_list(H5F_t * f,
+ hid_t dxpl_id,
+ H5AC_t * cache_ptr)
+{
+ int mpi_code;
+ int num_candidates = 0;
+ haddr_t * candidates_list_ptr = NULL;
+ H5AC_aux_t * aux_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC_propagate_and_apply_candidate_list, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert( aux_ptr->metadata_write_strategy ==
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+
+ /* to prevent "messages from the future" we must synchronize all
+ * processes before we write any entries.
+ */
+ if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 1", mpi_code)
+
+ if(aux_ptr->mpi_rank == 0) {
+ if(H5AC_broadcast_candidate_list(cache_ptr, &num_candidates, &candidates_list_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't broadcast candidate slist.")
+
+ HDassert( aux_ptr->candidate_slist_len == 0 );
+ } /* end if */
+ else {
+ if(H5AC_receive_candidate_list(cache_ptr, &num_candidates, &candidates_list_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't receive candidate broadcast.")
+ } /* end else */
+
+ if(num_candidates > 0) {
+ herr_t result;
+
+ /* all processes apply the candidate list.
+ * H5C_apply_candidate_list() handles the details of
+ * distributing the writes across the processes.
+ */
+
+ aux_ptr->write_permitted = TRUE;
+
+ result = H5C_apply_candidate_list(f,
+ dxpl_id,
+ dxpl_id,
+ cache_ptr,
+ num_candidates,
+ candidates_list_ptr,
+ aux_ptr->mpi_rank,
+ aux_ptr->mpi_size);
+
+ aux_ptr->write_permitted = FALSE;
+
+ if(result < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't apply candidate list.")
+
+ if(aux_ptr->write_done != NULL)
+ (aux_ptr->write_done)();
+
+ /* to prevent "messages from the past" we must synchronize all
+ * processes again before we go on.
+ */
+ if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 2", mpi_code)
+
+ if(aux_ptr->mpi_rank == 0) {
+ if(H5AC_tidy_cache_0_lists(cache_ptr, num_candidates, candidates_list_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't tidy up process 0 lists.")
+ } /* end if */
+ } /* end if */
+
+ /* if it is defined, call the sync point done callback. Note
+ * that this callback is defined purely for testing purposes,
+ * and should be undefined under normal operating circumstances.
+ */
+ if(aux_ptr->sync_point_done != NULL)
+ (aux_ptr->sync_point_done)(num_candidates, candidates_list_ptr);
+
+done:
+ if(candidates_list_ptr != NULL)
+ candidates_list_ptr = (haddr_t *)H5MM_xfree((void *)candidates_list_ptr);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_propagate_and_apply_candidate_list() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
* Function: H5AC_propagate_flushed_and_still_clean_entries_list
*
- * Purpose: In PHDF5, only the metadata cache with mpi rank 0 is allowed
- * to write to file. All other metadata caches on processes
- * with rank greater than 0 must retain dirty entries until
- * they are notified that the entry is now clean.
+ * Purpose: In PHDF5, if the process 0 only metadata write strategy
+ * is selected, only the metadata cache with mpi rank 0 is
+ * allowed to write to file. All other metadata caches on
+ * processes with rank greater than 0 must retain dirty
+ * entries until they are notified that the entry is now
+ * clean.
*
- * This function is the main routine for that proceedure.
- * It must be called simultaniously on all processes that
- * have the relevant file open. To this end, there must
- * be a barrier immediately prior to this call.
+ * This function is the main routine for handling this
+ * notification proceedure. It must be called
+ * simultaniously on all processes that have the relevant
+ * file open. To this end, it is called only during a
+ * sync point, with a barrier prior to the call.
*
- * Typicaly, this will be done one of two ways:
+ * Note that any metadata entry writes by process 0 will
+ * occur after the barrier and just before this call.
+ *
+ * Typicaly, calls to this function will be triggered in
+ * one of two ways:
*
* 1) Dirty byte creation exceeds some user specified value.
*
@@ -3677,14 +4087,11 @@ done:
*
* For the process with mpi rank 0:
*
- * 1) Enable writes, flush the cache to its min clean size,
- * and then disable writes again.
- *
- * 2) Load the contents of the flushed and still clean entries
+ * 1) Load the contents of the flushed and still clean entries
* list (c_slist_ptr) into a buffer, and broadcast that
* buffer to all the other caches.
*
- * 3) Clear the flushed and still clean entries list
+ * 2) Clear the flushed and still clean entries list
* (c_slist_ptr).
*
*
@@ -3712,113 +4119,156 @@ done:
herr_t
H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
hid_t dxpl_id,
- H5AC_t * cache_ptr,
- hbool_t do_barrier)
+ H5AC_t * cache_ptr)
{
+ H5AC_aux_t * aux_ptr;
herr_t ret_value = SUCCEED; /* Return value */
- herr_t result;
- int mpi_code;
- H5AC_aux_t * aux_ptr = NULL;
FUNC_ENTER_NOAPI(H5AC_propagate_flushed_and_still_clean_entries_list, FAIL)
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
- HDassert( aux_ptr != NULL );
- HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
-
-#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- HDfprintf(stdout,
- "%d:H5AC_propagate...:%d: (u/uu/i/iu/r/ru) = %d/%d/%d/%d/%d/%d\n",
- (int)(aux_ptr->mpi_rank),
- (int)(aux_ptr->dirty_bytes_propagations),
- (int)(aux_ptr->unprotect_dirty_bytes),
- (int)(aux_ptr->unprotect_dirty_bytes_updates),
- (int)(aux_ptr->insert_dirty_bytes),
- (int)(aux_ptr->insert_dirty_bytes_updates),
- (int)(aux_ptr->move_dirty_bytes),
- (int)(aux_ptr->move_dirty_bytes_updates));
-#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
-
- if ( do_barrier ) {
+ HDassert(aux_ptr != NULL);
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ HDassert(aux_ptr->metadata_write_strategy ==
+ H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
- /* to prevent "messages from the future" we must synchronize all
- * processes before we start the flush. This synchronization may
- * already be done -- hence the do_barrier parameter.
- */
-
- if ( MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)) ) {
+ if(aux_ptr->mpi_rank == 0) {
+ if(H5AC_broadcast_clean_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't broadcast clean slist.")
+ HDassert( aux_ptr->c_slist_len == 0 );
+ } /* end if */
+ else {
+ if(H5AC_receive_and_apply_clean_list(f, dxpl_id, H5AC_noblock_dxpl_id, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't receive and/or process clean slist broadcast.")
+ } /* end else */
- HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code)
- }
- }
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_propagate_flushed_and_still_clean_entries_list() */
+#endif /* H5_HAVE_PARALLEL */
- if ( aux_ptr->mpi_rank == 0 ) {
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC_receive_and_apply_clean_list()
+ *
+ * Purpose: Receive the list of cleaned entries from process 0,
+ * and mark the specified entries as clean.
+ *
+ * This function must only be called by the process with
+ * MPI_rank greater than 0.
+ *
+ * Return SUCCEED on success, and FAIL on failure.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 7/4/05
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC_receive_and_apply_clean_list(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5AC_t * cache_ptr)
+{
+ H5AC_aux_t * aux_ptr;
+ haddr_t * haddr_buf_ptr = NULL;
+ MPI_Offset * MPI_Offset_buf_ptr = NULL;
+ int mpi_result;
+ int num_entries = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
- aux_ptr->write_permitted = TRUE;
+ FUNC_ENTER_NOAPI(H5AC_receive_and_apply_clean_list, FAIL)
- result = H5C_flush_to_min_clean(f, dxpl_id, H5AC_noblock_dxpl_id);
+ HDassert( f != NULL );
+ HDassert( f->shared->cache == cache_ptr );
- aux_ptr->write_permitted = FALSE;
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- if ( result < 0 ) {
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_flush_to_min_clean() failed.")
- }
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert( aux_ptr->mpi_rank != 0 );
- if ( aux_ptr->write_done != NULL ) {
+ /* First receive the number of entries in the list so that we
+ * can set up a buffer to receive them. If there aren't
+ * any, we are done.
+ */
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
- (aux_ptr->write_done)();
- }
+ if(num_entries > 0) {
+ size_t buf_size;
+ int i;
- if ( H5AC_broadcast_clean_list(cache_ptr) < 0 ) {
+ /* allocate buffers to store the list of entry base addresses in */
+ buf_size = sizeof(MPI_Offset) * (size_t)num_entries;
+ if(NULL == (MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for receive buffer")
+ if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) * (size_t)num_entries)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for haddr buffer")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Can't broadcast clean slist.")
- }
+ /* Now receive the list of cleaned entries
+ *
+ * The peculiar structure of the following call to MPI_Bcast is
+ * due to MPI's (?) failure to believe in the MPI_Offset type.
+ * Thus the element type is MPI_BYTE, with size equal to the
+ * buf_size computed above.
+ */
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
- HDassert( aux_ptr->c_slist_len == 0 );
+ /* translate the MPI_Offsets to haddr_t */
+ i = 0;
+ while(i < num_entries) {
+ haddr_buf_ptr[i] = H5FD_mpi_MPIOff_to_haddr(MPI_Offset_buf_ptr[i]);
- } else {
+ if(haddr_buf_ptr[i] == HADDR_UNDEF)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert MPI off to haddr")
- if ( H5AC_receive_and_apply_clean_list(f, dxpl_id,
- H5AC_noblock_dxpl_id,
- cache_ptr) < 0 ) {
+ i++;
+ } /* end while */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Can't receive and/or process clean slist broadcast.")
- }
- }
+ /* mark the indicated entries as clean */
+ if(H5C_mark_entries_as_clean(f, primary_dxpl_id, secondary_dxpl_id,
+ (int32_t)num_entries, &(haddr_buf_ptr[0])) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't mark entries clean.")
+ } /* end if */
- aux_ptr->dirty_bytes = 0;
-#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- aux_ptr->dirty_bytes_propagations += 1;
- aux_ptr->unprotect_dirty_bytes = 0;
- aux_ptr->unprotect_dirty_bytes_updates = 0;
- aux_ptr->insert_dirty_bytes = 0;
- aux_ptr->insert_dirty_bytes_updates = 0;
- aux_ptr->move_dirty_bytes = 0;
- aux_ptr->move_dirty_bytes_updates = 0;
-#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
+ /* if it is defined, call the sync point done callback. Note
+ * that this callback is defined purely for testing purposes,
+ * and should be undefined under normal operating circumstances.
+ */
+ if(aux_ptr->sync_point_done != NULL)
+ (aux_ptr->sync_point_done)(num_entries, haddr_buf_ptr);
done:
+ if(MPI_Offset_buf_ptr != NULL)
+ MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
+ if(haddr_buf_ptr != NULL)
+ haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5AC_propagate_flushed_and_still_clean_entries_list() */
+} /* H5AC_receive_and_apply_clean_list() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
*
- * Function: H5AC_receive_and_apply_clean_list()
+ * Function: H5AC_receive_candidate_list()
*
- * Purpose: Receive the list of cleaned entries from process 0,
- * and mark the specified entries as clean.
+ * Purpose: Receive the list of candidate entries from process 0,
+ * and return it in a buffer pointed to by *haddr_buf_ptr_ptr.
+ * Note that the caller must free this buffer if it is
+ * returned.
*
* This function must only be called by the process with
* MPI_rank greater than 0.
@@ -3827,27 +4277,25 @@ done:
*
* Return: Non-negative on success/Negative on failure.
*
- * Programmer: John Mainzer, 7/4/05
+ * Programmer: John Mainzer, 3/17/10
*
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_receive_and_apply_clean_list(H5F_t * f,
- hid_t primary_dxpl_id,
- hid_t secondary_dxpl_id,
- H5AC_t * cache_ptr)
+H5AC_receive_candidate_list(H5AC_t * cache_ptr,
+ int * num_entries_ptr,
+ haddr_t ** haddr_buf_ptr_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
- H5AC_aux_t * aux_ptr = NULL;
+ hbool_t success = FALSE;
+ H5AC_aux_t * aux_ptr;
haddr_t * haddr_buf_ptr = NULL;
MPI_Offset * MPI_Offset_buf_ptr = NULL;
- size_t buf_size;
- int i = 0;
int mpi_result;
int num_entries;
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(H5AC_receive_and_apply_clean_list, FAIL)
+ FUNC_ENTER_NOAPI(H5AC_receive_candidate_list, FAIL)
HDassert( cache_ptr != NULL );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
@@ -3857,180 +4305,830 @@ H5AC_receive_and_apply_clean_list(H5F_t * f,
HDassert( aux_ptr != NULL );
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
HDassert( aux_ptr->mpi_rank != 0 );
+ HDassert( aux_ptr-> metadata_write_strategy ==
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+
+ HDassert( num_entries_ptr != NULL );
+ HDassert( *num_entries_ptr == 0 );
+
+ HDassert( haddr_buf_ptr_ptr != NULL );
+ HDassert( *haddr_buf_ptr_ptr == NULL );
+
/* First receive the number of entries in the list so that we
* can set up a buffer to receive them. If there aren't
* any, we are done.
*/
- mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm);
-
- if ( mpi_result != MPI_SUCCESS ) {
-
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 1", mpi_result)
- }
- if ( num_entries > 0 )
- {
- /* allocate a buffers to store the list of entry base addresses in */
+ if(num_entries > 0) {
+ size_t buf_size;
+ int i;
+ /* allocate buffers to store the list of entry base addresses in */
buf_size = sizeof(MPI_Offset) * (size_t)num_entries;
- MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size);
+ if(NULL == (MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_malloc(buf_size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for receive buffer")
+ if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) * (size_t)num_entries)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for haddr buffer")
- if ( MPI_Offset_buf_ptr == NULL ) {
+ /* Now receive the list of candidate entries
+ *
+ * The peculiar structure of the following call to MPI_Bcast is
+ * due to MPI's (?) failure to believe in the MPI_Offset type.
+ * Thus the element type is MPI_BYTE, with size equal to the
+ * buf_size computed above.
+ */
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "memory allocation failed for receive buffer")
- }
+ /* translate the MPI_Offsets to haddr_t */
+ i = 0;
+ while(i < num_entries) {
+ haddr_buf_ptr[i] = H5FD_mpi_MPIOff_to_haddr(MPI_Offset_buf_ptr[i]);
- haddr_buf_ptr = (haddr_t *)H5MM_malloc(sizeof(haddr_t) *
- (size_t)num_entries);
+ if(haddr_buf_ptr[i] == HADDR_UNDEF)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert MPI off to haddr")
- if ( haddr_buf_ptr == NULL ) {
+ i++;
+ } /* end while */
+ } /* end if */
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "memory allocation failed for haddr buffer")
- }
+ success = TRUE;
+done:
+ if(MPI_Offset_buf_ptr != NULL)
+ MPI_Offset_buf_ptr = (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
- /* Now receive the list of cleaned entries
- *
- * The peculiar structure of the following call to MPI_Bcast is
- * due to MPI's (?) failure to believe in the MPI_Offset type.
- * Thus the element type is MPI_BYTE, with size equal to the
- * buf_size computed above.
+ if(success) {
+ /* finally, pass the number of entries and the buffer pointer
+ * back to the caller. Do this so that we can use the same code
+ * to apply the candidate list to all the processes.
*/
+ *num_entries_ptr = num_entries;
+ *haddr_buf_ptr_ptr = haddr_buf_ptr;
+ } /* end if */
+ else {
+ if(haddr_buf_ptr != NULL)
+ haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
+ } /* end else */
- mpi_result = MPI_Bcast((void *)MPI_Offset_buf_ptr, (int)buf_size,
- MPI_BYTE, 0, aux_ptr->mpi_comm);
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_receive_candidate_list() */
+#endif /* H5_HAVE_PARALLEL */
- if ( mpi_result != MPI_SUCCESS ) {
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_rsp__dist_md_write__flush
+ *
+ * Purpose: Routine for handling the details of running a sync point
+ * that is triggered by a flush -- which in turn must have been
+ * triggered by either a flush API call or a file close --
+ * when the distributed metadata write strategy is selected.
+ *
+ * Upon entry, each process generates it own candidate list,
+ * being a sorted list of all dirty metadata entries currently
+ * in the metadata cache. Note that this list must be idendical
+ * across all processes, as all processes see the same stream
+ * of dirty metadata coming in, and use the same lists of
+ * candidate entries at each sync point. (At first glance, this
+ * argument sounds circular, but think of it in the sense of
+ * a recursive proof).
+ *
+ * If this this list is empty, we are done, and the function
+ * returns
+ *
+ * Otherwise, after the sorted list dirty metadata entries is
+ * constructed, each process uses the same algorithm to assign
+ * each entry on the candidate list to exactly one process for
+ * flushing.
+ *
+ * At this point, all processes participate in a barrier to
+ * avoid messages from the past/future bugs.
+ *
+ * Each process then flushes the entries assigned to it, and
+ * marks all other entries on the candidate list as clean.
+ *
+ * Finally, all processes participate in a second barrier to
+ * avoid messages from the past/future bugs.
+ *
+ * At the end of this process, process 0 and only process 0
+ * must tidy up its lists of dirtied and cleaned entries.
+ * These lists are not used in the distributed metadata write
+ * strategy, but they must be maintained should we shift
+ * to a strategy that uses them.
+ *
+ * Return: Success: non-negative
+ *
+ * Failure: negative
+ *
+ * Programmer: John Mainzer
+ * April 28, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+herr_t
+H5AC_rsp__dist_md_write__flush(H5F_t *f,
+ hid_t dxpl_id,
+ H5AC_t * cache_ptr)
+{
+ int mpi_code;
+ int num_entries = 0;
+ haddr_t * haddr_buf_ptr = NULL;
+ H5AC_aux_t * aux_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed 2", mpi_result)
- }
+ FUNC_ENTER_NOAPI(H5AC_rsp__dist_md_write__flush, FAIL)
+ HDassert( f != NULL );
+ HDassert( f->shared->cache == cache_ptr );
- /* translate the MPI_Offsets to haddr_t */
- i = 0;
- while ( i < num_entries )
- {
- haddr_buf_ptr[i] = H5FD_mpi_MPIOff_to_haddr(MPI_Offset_buf_ptr[i]);
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- if ( haddr_buf_ptr[i] == HADDR_UNDEF ) {
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
- HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, \
- "can't convert MPI off to haddr")
- }
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert( aux_ptr->metadata_write_strategy ==
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
- i++;
- }
+ /* first construct the candidate list -- initially, this will be in the
+ * form of a skip list. We will convert it later.
+ */
+ if(H5C_construct_candidate_list__clean_cache(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate list.")
+ if(aux_ptr->candidate_slist_len > 0) {
+ herr_t result;
- /* mark the indicated entries as clean */
- if ( H5C_mark_entries_as_clean(f, primary_dxpl_id, secondary_dxpl_id,
- (int32_t)num_entries, &(haddr_buf_ptr[0])) < 0 ) {
+ /* convert the candidate list into the format we
+ * are used to receiving from process 0.
+ */
+ if(H5AC_copy_candidate_list_to_buffer(cache_ptr, &num_entries, &haddr_buf_ptr, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate buffer.")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Can't mark entries clean.")
+ /* initial sync point barrier */
+ if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 1", mpi_code)
- }
- }
+ /* apply the candidate list */
+ aux_ptr->write_permitted = TRUE;
-done:
+ result = H5C_apply_candidate_list(f,
+ dxpl_id,
+ dxpl_id,
+ cache_ptr,
+ num_entries,
+ haddr_buf_ptr,
+ aux_ptr->mpi_rank,
+ aux_ptr->mpi_size);
- if ( MPI_Offset_buf_ptr != NULL ) {
+ aux_ptr->write_permitted = FALSE;
- MPI_Offset_buf_ptr =
- (MPI_Offset *)H5MM_xfree((void *)MPI_Offset_buf_ptr);
- }
+ if(result < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't apply candidate list.")
- if ( haddr_buf_ptr != NULL ) {
+ /* this code exists primarily for the test bed -- it allows us to
+ * enforce posix semantics on the server that pretends to be a
+ * file system in our parallel tests.
+ */
+ if(aux_ptr->write_done != NULL)
+ (aux_ptr->write_done)();
+ /* final sync point barrier */
+ if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 1", mpi_code)
+
+ /* if this is process zero, tidy up the dirtied,
+ * and flushed and still clean lists.
+ */
+ if(aux_ptr->mpi_rank == 0) {
+ if(H5AC_tidy_cache_0_lists(cache_ptr, num_entries, haddr_buf_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't tidy up process 0 lists.")
+ } /* end if */
+ } /* end if */
+
+ /* if it is defined, call the sync point done callback. Note
+ * that this callback is defined purely for testing purposes,
+ * and should be undefined under normal operating circumstances.
+ */
+ if(aux_ptr->sync_point_done != NULL)
+ (aux_ptr->sync_point_done)(num_entries, haddr_buf_ptr);
+
+done:
+ if(haddr_buf_ptr != NULL)
haddr_buf_ptr = (haddr_t *)H5MM_xfree((void *)haddr_buf_ptr);
- }
FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_rsp__dist_md_write__flush() */
+#endif /* H5_HAVE_PARALLEL */
-} /* H5AC_receive_and_apply_clean_list() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_rsp__dist_md_write__flush_to_min_clean
+ *
+ * Purpose: Routine for handling the details of running a sync point
+ * triggered by the accumulation of dirty metadata (as
+ * opposed to a flush call to the API) when the distributed
+ * metadata write strategy is selected.
+ *
+ * After invocation and initial sanity checking this function
+ * first checks to see if evictions are enabled -- if they
+ * are not, the function does nothing and returns.
+ *
+ * Otherwise, process zero constructs a list of entries to
+ * be flushed in order to bring the process zero cache back
+ * within its min clean requirement. Note that this list
+ * (the candidate list) may be empty.
+ *
+ * Then, all processes participate in a barrier.
+ *
+ * After the barrier, process 0 broadcasts the number of
+ * entries in the candidate list prepared above, and all
+ * other processes receive this number.
+ *
+ * If this number is zero, we are done, and the function
+ * returns without further action.
+ *
+ * Otherwise, process 0 broadcasts the sorted list of
+ * candidate entries, and all other processes receive it.
+ *
+ * Then, each process uses the same algorithm to assign
+ * each entry on the candidate list to exactly one process
+ * for flushing.
+ *
+ * Each process then flushes the entries assigned to it, and
+ * marks all other entries on the candidate list as clean.
+ *
+ * Finally, all processes participate in a second barrier to
+ * avoid messages from the past/future bugs.
+ *
+ * At the end of this process, process 0 and only process 0
+ * must tidy up its lists of dirtied and cleaned entries.
+ * These lists are not used in the distributed metadata write
+ * strategy, but they must be maintained should we shift
+ * to a strategy that uses them.
+ *
+ * Return: Success: non-negative
+ *
+ * Failure: negative
+ *
+ * Programmer: John Mainzer
+ * April 28, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+herr_t
+H5AC_rsp__dist_md_write__flush_to_min_clean(H5F_t *f,
+ hid_t dxpl_id,
+ H5AC_t * cache_ptr)
+{
+ hbool_t evictions_enabled;
+ H5AC_aux_t * aux_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC_rsp__dist_md_write__flush_to_min_clean, FAIL)
+
+ HDassert( f != NULL );
+ HDassert( f->shared->cache == cache_ptr );
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert( aux_ptr->metadata_write_strategy ==
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+
+ /* Query if evictions are allowed */
+ if(H5C_get_evictions_enabled((const H5C_t *)cache_ptr, &evictions_enabled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_get_evictions_enabled() failed.")
+
+ if(evictions_enabled) {
+ /* construct candidate list -- process 0 only */
+ if(aux_ptr->mpi_rank == 0) {
+ if(H5AC_construct_candidate_list(cache_ptr, aux_ptr, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate list.")
+ } /* mpi rank == 0 */
+
+ /* propagate and apply candidate list -- all processes */
+ if(H5AC_propagate_and_apply_candidate_list(f, dxpl_id, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate and apply candidate list.")
+ } /* evictions enabled */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_rsp__dist_md_write__flush_to_min_clean() */
+#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
- * Function: H5AC_flush_entries
+ * Function: H5AC_rsp__p0_only__flush
*
- * Purpose: Flush the metadata cache associated with the specified file,
- * only writing from rank 0, but propagating the cleaned entries
- * to all ranks.
+ * Purpose: Routine for handling the details of running a sync point
+ * that is triggered a flush -- which in turn must have been
+ * triggered by either a flush API call or a file close --
+ * when the process 0 only metadata write strategy is selected.
*
- * Return: Non-negative on success/Negative on failure if there was a
- * request to flush all items and something was protected.
+ * First, all processes participate in a barrier.
*
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * Aug 22 2009
+ * Then process zero flushes all dirty entries, and broadcasts
+ * they number of clean entries (if any) to all the other
+ * caches.
+ *
+ * If this number is zero, we are done.
+ *
+ * Otherwise, process 0 broadcasts the list of cleaned
+ * entries, and all other processes which are part of this
+ * file group receive it, and mark the listed entries as
+ * clean in their caches.
+ *
+ * Since all processes have the same set of dirty
+ * entries at the beginning of the sync point, and all
+ * entries that will be written are written before
+ * process zero broadcasts the number of cleaned entries,
+ * there is no need for a closing barrier.
+ *
+ * Return: Success: non-negative
+ *
+ * Failure: negative
+ *
+ * Programmer: John Mainzer
+ * April 28, 2010
*
*-------------------------------------------------------------------------
*/
+#ifdef H5_HAVE_PARALLEL
herr_t
-H5AC_flush_entries(H5F_t *f)
+H5AC_rsp__p0_only__flush(H5F_t *f,
+ hid_t dxpl_id,
+ H5AC_t * cache_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ int mpi_code;
+ H5AC_aux_t * aux_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT(H5AC_flush_entries)
+ FUNC_ENTER_NOAPI(H5AC_rsp__p0_only__flush, FAIL)
- HDassert(f);
- HDassert(f->shared->cache);
+ HDassert( f != NULL );
+ HDassert( f->shared->cache == cache_ptr );
- /* Check if we have >1 ranks */
- if(f->shared->cache->aux_ptr) {
- H5AC_aux_t * aux_ptr = f->shared->cache->aux_ptr;
- int mpi_code;
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
-#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- HDfprintf(stdout,
- "%d::H5AC_flush: (u/uu/i/iu/r/ru) = %d/%d/%d/%d/%d/%d\n",
- (int)(aux_ptr->mpi_rank),
- (int)(aux_ptr->unprotect_dirty_bytes),
- (int)(aux_ptr->unprotect_dirty_bytes_updates),
- (int)(aux_ptr->insert_dirty_bytes),
- (int)(aux_ptr->insert_dirty_bytes_updates),
- (int)(aux_ptr->move_dirty_bytes),
- (int)(aux_ptr->move_dirty_bytes_updates));
-#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert( aux_ptr->metadata_write_strategy ==
+ H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY );
+
+
+ /* to prevent "messages from the future" we must
+ * synchronize all processes before we start the flush.
+ * Hence the following barrier.
+ */
+ if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed 1", mpi_code)
+
+ /* Flush data to disk, from rank 0 process */
+ if(aux_ptr->mpi_rank == 0) {
+ herr_t result;
+
+ aux_ptr->write_permitted = TRUE;
+
+ result = H5C_flush_cache(f, dxpl_id, dxpl_id, H5AC__NO_FLAGS_SET);
+
+ aux_ptr->write_permitted = FALSE;
+
+ if(result < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
+
+ if(aux_ptr->write_done != NULL)
+ (aux_ptr->write_done)();
+ } /* end if */
+
+ /* Propagate cleaned entries to other ranks. */
+ if(H5AC_propagate_flushed_and_still_clean_entries_list(f, H5AC_noblock_dxpl_id, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate clean entries list.")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_rsp__p0_only__flush() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_rsp__p0_only__flush_to_min_clean
+ *
+ * Purpose: Routine for handling the details of running a sync point
+ * triggered by the accumulation of dirty metadata (as
+ * opposed to a flush call to the API) when the process 0
+ * only metadata write strategy is selected.
+ *
+ * After invocation and initial sanity checking this function
+ * first checks to see if evictions are enabled -- if they
+ * are not, the function does nothing and returns.
+ *
+ * Otherwise, all processes participate in a barrier.
+ *
+ * After the barrier, if this is process 0, the function
+ * causes the cache to flush sufficient entries to get the
+ * cache back within its minimum clean fraction, and broadcast
+ * the number of entries which have been flushed since
+ * the last sync point, and are still clean.
+ *
+ * If this number is zero, we are done.
+ *
+ * Otherwise, process 0 broadcasts the list of cleaned
+ * entries, and all other processes which are part of this
+ * file group receive it, and mark the listed entries as
+ * clean in their caches.
+ *
+ * Since all processes have the same set of dirty
+ * entries at the beginning of the sync point, and all
+ * entries that will be written are written before
+ * process zero broadcasts the number of cleaned entries,
+ * there is no need for a closing barrier.
+ *
+ * Return: Success: non-negative
+ *
+ * Failure: negative
+ *
+ * Programmer: John Mainzer
+ * April 28, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+herr_t
+H5AC_rsp__p0_only__flush_to_min_clean(H5F_t *f,
+ hid_t dxpl_id,
+ H5AC_t * cache_ptr)
+{
+ hbool_t evictions_enabled;
+ H5AC_aux_t * aux_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC_rsp__p0_only__flush_to_min_clean, FAIL)
+
+ HDassert( f != NULL );
+ HDassert( f->shared->cache == cache_ptr );
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert( aux_ptr->metadata_write_strategy ==
+ H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY );
+
+ /* Query if evictions are allowed */
+ if(H5C_get_evictions_enabled((const H5C_t *)cache_ptr, &evictions_enabled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_get_evictions_enabled() failed.")
+
+ /* Flush if evictions are allowed -- following call
+ * will cause process 0 to flush to min clean size,
+ * and then propagate the newly clean entries to the
+ * other processes.
+ *
+ * Otherwise, do nothing.
+ */
+ if(evictions_enabled) {
+ int mpi_code;
/* to prevent "messages from the future" we must synchronize all
- * processes before we start the flush. Hence the following
- * barrier.
+ * processes before we start the flush. This synchronization may
+ * already be done -- hence the do_barrier parameter.
*/
if(MPI_SUCCESS != (mpi_code = MPI_Barrier(aux_ptr->mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code)
- /* Flush data to disk, from rank 0 process */
- if(aux_ptr->mpi_rank == 0 ) {
- herr_t status;
+ if(0 == aux_ptr->mpi_rank) {
+ herr_t result;
- aux_ptr->write_permitted = TRUE;
+ /* here, process 0 flushes as many entries as necessary to
+ * comply with the currently specified min clean size.
+ * Note that it is quite possible that no entries will be
+ * flushed.
+ */
+ aux_ptr->write_permitted = TRUE;
- status = H5C_flush_cache(f,
- H5AC_noblock_dxpl_id,
- H5AC_noblock_dxpl_id,
- H5AC__NO_FLAGS_SET);
+ result = H5C_flush_to_min_clean(f, dxpl_id, H5AC_noblock_dxpl_id);
- aux_ptr->write_permitted = FALSE;
+ aux_ptr->write_permitted = FALSE;
- if(status < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
+ if(result < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_flush_to_min_clean() failed.")
+ /* this call exists primarily for the test code -- it is used
+ * to enforce POSIX semantics on the process used to simulate
+ * reads and writes in t_cache.c.
+ */
if(aux_ptr->write_done != NULL)
(aux_ptr->write_done)();
- } /* end if ( aux_ptr->mpi_rank == 0 ) */
+ } /* end if */
- /* Propagate cleaned entries to other ranks */
- if(H5AC_propagate_flushed_and_still_clean_entries_list(f,
- H5AC_noblock_dxpl_id,
- f->shared->cache,
- FALSE) < 0 )
+ if(H5AC_propagate_flushed_and_still_clean_entries_list(f, dxpl_id, cache_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate clean entries list.")
- } /* end if ( aux_ptr != NULL ) */
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_rsp__p0_only__flush_to_min_clean() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_run_sync_point
+ *
+ * Purpose: Top level routine for managing a sync point between all
+ * meta data caches in the parallel case. Since all caches
+ * see the same sequence of dirty metadata, we simply count
+ * bytes of dirty metadata, and run a sync point whenever the
+ * number of dirty bytes of metadata seen since the last
+ * sync point exceeds a threshold that is common across all
+ * processes. We also run sync points in response to
+ * HDF5 API calls triggering either a flush or a file close.
+ *
+ * In earlier versions of PHDF5, only the metadata cache with
+ * mpi rank 0 was allowed to write to file. All other
+ * metadata caches on processes with rank greater than 0 were
+ * required to retain dirty entries until they were notified
+ * that the entry is was clean.
+ *
+ * This function was created to make it easier for us to
+ * experiment with other options, as it is a single point
+ * for the execution of sync points.
+ *
+ * Return: Success: non-negative
+ *
+ * Failure: negative
+ *
+ * Programmer: John Mainzer
+ * March 11, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+herr_t
+H5AC_run_sync_point(H5F_t *f,
+ hid_t dxpl_id,
+ int sync_point_op)
+{
+ H5AC_t * cache_ptr;
+ H5AC_aux_t * aux_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC_run_sync_point, FAIL)
+
+ HDassert( f != NULL );
+
+ cache_ptr = f->shared->cache;
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+
+ HDassert( ( sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN ) ||
+ ( sync_point_op == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED ) );
+
+#if H5AC_DEBUG_DIRTY_BYTES_CREATION
+ HDfprintf(stdout,
+ "%d:H5AC_propagate...:%d: (u/uu/i/iu/r/ru) = %d/%d/%d/%d/%d/%d\n",
+ (int)(aux_ptr->mpi_rank),
+ (int)(aux_ptr->dirty_bytes_propagations),
+ (int)(aux_ptr->unprotect_dirty_bytes),
+ (int)(aux_ptr->unprotect_dirty_bytes_updates),
+ (int)(aux_ptr->insert_dirty_bytes),
+ (int)(aux_ptr->insert_dirty_bytes_updates),
+ (int)(aux_ptr->rename_dirty_bytes),
+ (int)(aux_ptr->rename_dirty_bytes_updates));
+#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
+
+ switch(aux_ptr->metadata_write_strategy) {
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ switch(sync_point_op) {
+ case H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN:
+ if(H5AC_rsp__p0_only__flush_to_min_clean(f, dxpl_id, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC_rsp__p0_only__flush_to_min_clean() failed.")
+ break;
+
+ case H5AC_SYNC_POINT_OP__FLUSH_CACHE:
+ if(H5AC_rsp__p0_only__flush(f, dxpl_id, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC_rsp__p0_only__flush() failed.")
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown flush op");
+ break;
+ } /* end switch */
+ break;
+
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ switch(sync_point_op) {
+ case H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN:
+ if(H5AC_rsp__dist_md_write__flush_to_min_clean(f, dxpl_id, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC_rsp__dist_md_write__flush() failed.")
+ break;
+
+ case H5AC_SYNC_POINT_OP__FLUSH_CACHE:
+ if(H5AC_rsp__dist_md_write__flush(f, dxpl_id, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5AC_rsp__dist_md_write__flush() failed.")
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown flush op");
+ break;
+ } /* end switch */
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown metadata write strategy.")
+ break;
+ } /* end switch */
+
+ /* reset the dirty bytes count */
+ aux_ptr->dirty_bytes = 0;
+
+#if H5AC_DEBUG_DIRTY_BYTES_CREATION
+ aux_ptr->dirty_bytes_propagations += 1;
+ aux_ptr->unprotect_dirty_bytes = 0;
+ aux_ptr->unprotect_dirty_bytes_updates = 0;
+ aux_ptr->insert_dirty_bytes = 0;
+ aux_ptr->insert_dirty_bytes_updates = 0;
+ aux_ptr->rename_dirty_bytes = 0;
+ aux_ptr->rename_dirty_bytes_updates = 0;
+#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_run_sync_point() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_tidy_cache_0_lists()
+ *
+ * Purpose: In the distributed metadata write strategy, not all dirty
+ * entries are written by process 0 -- thus we must tidy
+ * up the dirtied, and flushed and still clean lists
+ * maintained by process zero after each sync point.
+ *
+ * This procedure exists to tend to this issue.
+ *
+ * At this point, all entries that process 0 cleared should
+ * have been removed from both the dirty and flushed and
+ * still clean lists, and entries that process 0 has flushed
+ * should have been removed from the dirtied list and added
+ * to the flushed and still clean list.
+ *
+ * However, since the distributed metadata write strategy
+ * doesn't make use of these lists, the objective is simply
+ * to maintain these lists in consistent state that allows
+ * them to be used should the metadata write strategy change
+ * to one that uses these lists.
+ *
+ * Thus for our purposes, all we need to do is remove from
+ * the dirtied and flushed and still clean lists all
+ * references to entries that appear in the candidate list.
+ *
+ * Return: Success: non-negative
+ *
+ * Failure: negative
+ *
+ * Programmer: John Mainzer
+ * 4/20/10
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+static herr_t
+H5AC_tidy_cache_0_lists(H5AC_t * cache_ptr,
+ int num_candidates,
+ haddr_t * candidates_list_ptr)
+
+{
+ int i;
+ H5AC_aux_t * aux_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5AC_tidy_cache_0_lists, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+
+ HDassert( aux_ptr != NULL );
+ HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
+ HDassert( aux_ptr->metadata_write_strategy ==
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED );
+ HDassert( aux_ptr->mpi_rank == 0 );
+ HDassert( num_candidates > 0 );
+ HDassert( candidates_list_ptr != NULL );
+
+ /* clean up dirtied and flushed and still clean lists by removing
+ * all entries on the candidate list. Cleared entries should
+ * have been removed from both the dirty and cleaned lists at
+ * this point, flushed entries should have been added to the
+ * cleaned list. However, for this metadata write strategy,
+ * we just want to remove all references to the candidate entries.
+ */
+ for(i = 0; i < num_candidates; i++) {
+ H5AC_slist_entry_t * d_slist_entry_ptr;
+ H5AC_slist_entry_t * c_slist_entry_ptr;
+ haddr_t addr;
+
+ addr = candidates_list_ptr[i];
+
+ /* addr must be either on the dirtied list, or on the flushed
+ * and still clean list. Remove it.
+ */
+ d_slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->d_slist_ptr, (void *)&addr);
+ if(d_slist_entry_ptr != NULL) {
+ HDassert(d_slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
+ HDassert(d_slist_entry_ptr->addr == addr);
+
+ if(H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr)) != d_slist_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from dirty entry slist.")
+
+ d_slist_entry_ptr->magic = 0;
+ H5FL_FREE(H5AC_slist_entry_t, d_slist_entry_ptr);
+
+ aux_ptr->d_slist_len -= 1;
+
+ HDassert(aux_ptr->d_slist_len >= 0);
+ } /* end if */
+
+ c_slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_search(aux_ptr->c_slist_ptr, (void *)&addr);
+ if(c_slist_entry_ptr != NULL) {
+ HDassert(c_slist_entry_ptr->magic == H5AC__H5AC_SLIST_ENTRY_T_MAGIC);
+ HDassert(c_slist_entry_ptr->addr == addr);
+
+ if(H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr)) != c_slist_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDELETE, FAIL, "Can't delete entry from clean entry slist.")
+
+ c_slist_entry_ptr->magic = 0;
+ H5FL_FREE(H5AC_slist_entry_t, c_slist_entry_ptr);
+
+ aux_ptr->c_slist_len -= 1;
+
+ HDassert( aux_ptr->c_slist_len >= 0 );
+ } /* end if */
+ } /* end for */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_tidy_cache_0_lists() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_flush_entries
+ *
+ * Purpose: Flush the metadata cache associated with the specified file,
+ * only writing from rank 0, but propagating the cleaned entries
+ * to all ranks.
+ *
+ * Return: Non-negative on success/Negative on failure if there was a
+ * request to flush all items and something was protected.
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Aug 22 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+herr_t
+H5AC_flush_entries(H5F_t *f)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5AC_flush_entries)
+
+ HDassert(f);
+ HDassert(f->shared->cache);
+
+ /* Check if we have >1 ranks */
+ if(f->shared->cache->aux_ptr) {
+ if(H5AC_run_sync_point(f, H5AC_noblock_dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_CACHE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
+ } /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -4057,7 +5155,6 @@ herr_t
H5AC_ignore_tags(H5F_t * f)
{
/* Variable Declarations */
- H5AC_t * cache_ptr = NULL;
herr_t ret_value = SUCCEED;
/* Function Enter Macro */
@@ -4068,18 +5165,12 @@ H5AC_ignore_tags(H5F_t * f)
HDassert(f->shared);
HDassert(f->shared->cache);
- /* Get cache pointer */
- cache_ptr = f->shared->cache;
-
/* Set up a new metadata tag */
- if (H5C_ignore_tags(cache_ptr) < 0)
+ if(H5C_ignore_tags(f->shared->cache) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "H5C_ignore_tags() failed.")
done:
-
- /* Function Leave Macro */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_ignore_tags() */
@@ -4103,27 +5194,24 @@ H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t * prev_tag)
herr_t ret_value = SUCCEED;
/* Function Enter Macro */
- FUNC_ENTER_NOAPI_NOINIT(H5AC_tag)
+ FUNC_ENTER_NOAPI(H5AC_tag, FAIL)
/* Check Arguments */
if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list");
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list")
/* Get the current tag value and return that (if prev_tag is NOT null)*/
- if (prev_tag) {
- if( (H5P_get(dxpl, "H5AC_metadata_tag", prev_tag)) < 0 )
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to query dxpl");
- }
+ if(prev_tag) {
+ if((H5P_get(dxpl, "H5AC_metadata_tag", prev_tag)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to query dxpl")
+ } /* end if */
/* Set the provided tag value in the dxpl_id. */
if(H5P_set(dxpl, "H5AC_metadata_tag", &metadata_tag) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set property in dxpl")
done:
-
- /* Function Leave Macro */
- FUNC_LEAVE_NOAPI(ret_value);
-
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_tag */
@@ -4144,22 +5232,19 @@ done:
herr_t
H5AC_retag_copied_metadata(H5F_t * f, haddr_t metadata_tag)
{
- /* Variable Declarations */
herr_t ret_value = SUCCEED;
/* Function Enter Macro */
- FUNC_ENTER_NOAPI_NOINIT(H5AC_retag_copied_metadata)
+ FUNC_ENTER_NOAPI(H5AC_retag_copied_metadata, FAIL)
/* Assertions */
HDassert(f);
HDassert(f->shared);
/* Call cache-level function to retag entries */
- H5C_retag_copied_metadata(f->shared->cache, metadata_tag);
+ H5C_retag_copied_metadata(f->shared->cache, metadata_tag);
done:
-
- /* Function Leave Macro */
- FUNC_LEAVE_NOAPI(ret_value);
-
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_retag_copied_metadata */
+
diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h
index d5346f5..3060a70 100644
--- a/src/H5ACpkg.h
+++ b/src/H5ACpkg.h
@@ -46,6 +46,17 @@
#define H5AC_DEBUG_DIRTY_BYTES_CREATION 0
+#ifdef H5_HAVE_PARALLEL
+
+/* the following #defined are used to specify the operation required
+ * at a sync point.
+ */
+
+#define H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN 0
+#define H5AC_SYNC_POINT_OP__FLUSH_CACHE 1
+
+#endif /* H5_HAVE_PARALLEL */
+
/*-------------------------------------------------------------------------
* It is a bit difficult to set ranges of allowable values on the
* dirty_bytes_threshold field of H5AC_aux_t. The following are
@@ -59,6 +70,9 @@
#define H5AC__MAX_DIRTY_BYTES_THRESHOLD (int32_t) \
(H5C__MAX_MAX_CACHE_SIZE / 4)
+#define H5AC__DEFAULT_METADATA_WRITE_STRATEGY \
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED
+
/****************************************************************************
*
* structure H5AC_aux_t
@@ -162,6 +176,12 @@
* broadcast. This field is reset to zero after each such
* broadcast.
*
+ * metadata_write_strategy: Integer code indicating how we will be
+ * writing the metadata. In the first incarnation of
+ * this code, all writes were done from process 0. This
+ * field exists to facilitate experiments with other
+ * strategies.
+ *
* dirty_bytes_propagations: This field only exists when the
* H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
*
@@ -211,6 +231,19 @@
* been created via move operations since the last time
* the cleaned list was propagated.
*
+ * Things have changed a bit since the following four fields were defined.
+ * If metadata_write_strategy is H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY,
+ * all comments hold as before -- with the caviate that pending further
+ * coding, the process 0 metadata cache is forbidden to flush entries outside
+ * of a sync point.
+ *
+ * However, for different metadata write strategies, these fields are used
+ * only to maintain the correct dirty byte count on process zero -- and in
+ * most if not all cases, this is redundant, as process zero will be barred
+ * from flushing entries outside of a sync point.
+ *
+ * JRM -- 3/16/10
+ *
* d_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list
* of entries that have been dirtied since the last time they
* were listed in a clean entries broadcast. This list is
@@ -259,6 +292,17 @@
* contain the value 0 on all processes other than process 0.
* It exists primarily for sanity checking.
*
+ * The following two fields are used only when metadata_write_strategy
+ * is H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED.
+ *
+ * candidate_slist_ptr: Pointer to an instance of H5SL_t used by process 0
+ * to construct a list of entries to be flushed at this sync
+ * point. This list is then broadcast to the other processes,
+ * which then either flush or mark clean all entries on it.
+ *
+ * candidate_slist_len: Integer field containing the number of entries on the
+ * candidate list. It exists primarily for sanity checking.
+ *
* write_done: In the parallel test bed, it is necessary to ensure that
* all writes to the server process from cache 0 complete
* before it enters the barrier call with the other caches.
@@ -271,6 +315,19 @@
* This field must be set to NULL when the callback is not
* needed.
*
+ * Note: This field has been extended for use by all processes
+ * with the addition of support for the distributed
+ * metadata write strategy.
+ * JRM -- 5/9/10
+ *
+ * sync_point_done: In the parallel test bed, it is necessary to verify
+ * that the expected writes, and only the expected writes,
+ * have taken place at the end of each sync point.
+ *
+ * The sync_point_done callback allows t_cache to perform
+ * this verification. The field is set to NULL when the
+ * callback is not needed.
+ *
****************************************************************************/
#ifdef H5_HAVE_PARALLEL
@@ -293,6 +350,8 @@ typedef struct H5AC_aux_t
int32_t dirty_bytes;
+ int32_t metadata_write_strategy;
+
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
int32_t dirty_bytes_propagations;
@@ -316,8 +375,15 @@ typedef struct H5AC_aux_t
int32_t c_slist_len;
+ H5SL_t * candidate_slist_ptr;
+
+ int32_t candidate_slist_len;
+
void (* write_done)(void);
+ void (* sync_point_done)(int num_writes,
+ haddr_t * written_entries_tbl);
+
} H5AC_aux_t; /* struct H5AC_aux_t */
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 221dd41..b55572d 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -223,6 +223,9 @@ H5_DLLVAR hid_t H5AC_ind_dxpl_id;
/* Default cache configuration. */
+#define H5AC__DEFAULT_METADATA_WRITE_STRATEGY \
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED
+
#ifdef H5_HAVE_PARALLEL
#define H5AC__DEFAULT_CACHE_CONFIG \
{ \
@@ -255,7 +258,9 @@ H5_DLLVAR hid_t H5AC_ind_dxpl_id;
/* int epochs_before_eviction = */ 3, \
/* hbool_t apply_empty_reserve = */ TRUE, \
/* double empty_reserve = */ 0.1, \
- /* int dirty_bytes_threshold = */ (256 * 1024) \
+ /* int dirty_bytes_threshold = */ (256 * 1024), \
+ /* int metadata_write_strategy = */ \
+ H5AC__DEFAULT_METADATA_WRITE_STRATEGY \
}
#else /* H5_HAVE_PARALLEL */
#define H5AC__DEFAULT_CACHE_CONFIG \
@@ -289,7 +294,9 @@ H5_DLLVAR hid_t H5AC_ind_dxpl_id;
/* int epochs_before_eviction = */ 3, \
/* hbool_t apply_empty_reserve = */ TRUE, \
/* double empty_reserve = */ 0.1, \
- /* int dirty_bytes_threshold = */ (256 * 1024) \
+ /* int dirty_bytes_threshold = */ (256 * 1024), \
+ /* int metadata_write_strategy = */ \
+ H5AC__DEFAULT_METADATA_WRITE_STRATEGY \
}
#endif /* H5_HAVE_PARALLEL */
@@ -359,6 +366,9 @@ H5_DLL herr_t H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id,
const H5AC_class_t *type, haddr_t addr,
unsigned flags);
+H5_DLL herr_t H5AC_set_sync_point_done_callback(H5C_t *cache_ptr,
+ void (*sync_point_done)(int num_writes, haddr_t *written_entries_tbl));
+
H5_DLL herr_t H5AC_set_write_done_callback(H5C_t * cache_ptr,
void (* write_done)(void));
H5_DLL herr_t H5AC_stats(const H5F_t *f);
@@ -393,5 +403,9 @@ H5_DLL herr_t H5AC_retag_copied_metadata(H5F_t * f, haddr_t metadata_tag);
H5_DLL herr_t H5AC_ignore_tags(H5F_t * f);
+#ifdef H5_HAVE_PARALLEL
+H5_DLL herr_t H5AC_add_candidate(H5AC_t * cache_ptr, haddr_t addr);
+#endif /* H5_HAVE_PARALLEL */
+
#endif /* !_H5ACprivate_H */
diff --git a/src/H5ACpublic.h b/src/H5ACpublic.h
index 02941b6..639179c 100644
--- a/src/H5ACpublic.h
+++ b/src/H5ACpublic.h
@@ -354,21 +354,22 @@ extern "C" {
* Parallel Configuration Fields:
*
* In PHDF5, all operations that modify metadata must be executed collectively.
+ *
* We used to think that this was enough to ensure consistency across the
* metadata caches, but since we allow processes to read metadata individually,
* the order of dirty entries in the LRU list can vary across processes,
* which can result in inconsistencies between the caches.
*
- * To prevent this, only the metadata cache on process 0 is allowed to write
- * to file, and then only after synchronizing with the other caches. After
- * it writes entries to file, it sends the base addresses of the now clean
- * entries to the other caches, so they can mark these entries clean as well.
+ * PHDF5 uses several strategies to prevent such inconsistencies in metadata,
+ * all of which use the fact that the same stream of dirty metadata is seen
+ * by all processes for purposes of synchronization. This is done by
+ * having each process count the number of bytes of dirty metadata generated,
+ * and then running a "sync point" whenever this count exceeds a user
+ * specified threshold (see dirty_bytes_threshold below).
*
- * The different caches know when to synchronize caches by counting the
- * number of bytes of dirty metadata created by the collective operations
- * modifying metadata. Whenever this count exceeds a user specified
- * threshold (see below), process 0 flushes down to its minimum clean size,
- * and then sends the list of newly cleaned entries to the other caches.
+ * The current metadata write strategy is indicated by the
+ * metadata_write_strategy field. The possible values of this field, along
+ * with the associated metadata write strategies are discussed below.
*
* dirty_bytes_threshold: Threshold of dirty byte creation used to
* synchronize updates between caches. (See above for outline and
@@ -378,11 +379,67 @@ extern "C" {
* file. This field is ignored unless HDF5 has been compiled for
* parallel.
*
+ * metadata_write_strategy: Integer field containing a code indicating the
+ * desired metadata write strategy. The valid values of this field
+ * are enumerated and discussed below:
+ *
+ *
+ * H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ *
+ * When metadata_write_strategy is set to this value, only process
+ * zero is allowed to write dirty metadata to disk. All other
+ * processes must retain dirty metadata until they are informed at
+ * a sync point that the dirty metadata in question has been written
+ * to disk.
+ *
+ * When the sync point is reached (or when there is a user generated
+ * flush), process zero flushes sufficient entries to bring it into
+ * complience with its min clean size (or flushes all dirty entries in
+ * the case of a user generated flush), broad casts the list of
+ * entries just cleaned to all the other processes, and then exits
+ * the sync point.
+ *
+ * Upon receipt of the broadcast, the other processes mark the indicated
+ * entries as clean, and leave the sync point as well.
+ *
+ *
+ * H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ *
+ * In the distributed metadata write strategy, process zero still makes
+ * the decisions as to what entries should be flushed, but the actual
+ * flushes are distributed across the processes in the computation to
+ * the extent possible.
+ *
+ * In this strategy, when a sync point is triggered (either by dirty
+ * metadata creation or manual flush), all processes enter a barrier.
+ *
+ * On the other side of the barrier, process 0 constructs an ordered
+ * list of the entries to be flushed, and then broadcasts this list
+ * to the caches in all the processes.
+ *
+ * All processes then scan the list of entries to be flushed, flushing
+ * some, and marking the rest as clean. The algorithm for this purpose
+ * ensures that each entry in the list is flushed exactly once, and
+ * all are marked clean in each cache.
+ *
+ * Note that in the case of a flush of the cache, no message passing
+ * is necessary, as all processes have the same list of dirty entries,
+ * and all of these entries must be flushed. Thus in this case it is
+ * sufficient for each process to sort its list of dirty entries after
+ * leaving the initial barrier, and use this list as if it had been
+ * received from process zero.
+ *
+ * To avoid possible messages from the past/future, all caches must
+ * wait until all caches are done before leaving the sync point.
+ *
****************************************************************************/
#define H5AC__CURR_CACHE_CONFIG_VERSION 1
#define H5AC__MAX_TRACE_FILE_NAME_LEN 1024
+#define H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY 0
+#define H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED 1
+
typedef struct H5AC_cache_config_t
{
/* general configuration fields: */
@@ -440,6 +497,7 @@ typedef struct H5AC_cache_config_t
/* parallel configuration fields: */
int dirty_bytes_threshold;
+ int metadata_write_strategy;
} H5AC_cache_config_t;
diff --git a/src/H5C.c b/src/H5C.c
index bfb7e05..11ad8bb 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -335,6 +335,624 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5C_apply_candidate_list
+ *
+ * Purpose: Apply the supplied candidate list.
+ *
+ * We used to do this by simply having each process write
+ * every mpi_size-th entry in the candidate list, starting
+ * at index mpi_rank, and mark all the others clean.
+ *
+ * However, this can cause unnecessary contention in a file
+ * system by increasing the number of processes writing to
+ * adjacent locations in the HDF5 file.
+ *
+ * To attempt to minimize this, we now arange matters such
+ * that each process writes n adjacent entries in the
+ * candidate list, and marks all others clean. We must do
+ * this in such a fashion as to guarantee that each entry
+ * on the candidate list is written by exactly one process,
+ * and marked clean by all others.
+ *
+ * To do this, first construct a table mapping mpi_rank
+ * to the index of the first entry in the candidate list to
+ * be written by the process of that mpi_rank, and then use
+ * the table to control which entries are written and which
+ * are marked as clean as a function of the mpi_rank.
+ *
+ * Note that the table must be identical on all processes, as
+ * all see the same candidate list, mpi_size, and mpi_rank --
+ * the inputs used to construct the table.
+ *
+ * We construct the table as follows. Let:
+ *
+ * n = num_candidates / mpi_size;
+ *
+ * m = num_candidates % mpi_size;
+ *
+ * Now allocate an array of integers of length mpi_size + 1,
+ * and call this array candidate_assignment_table.
+ *
+ * Conceptually, if the number of candidates is a multiple
+ * of the mpi_size, we simply pass through the candidate list
+ * and assign n entries to each process to flush, with the
+ * index of the first entry to flush in the location in
+ * the candidate_assignment_table indicated by the mpi_rank
+ * of the process.
+ *
+ * In the more common case in which the candidate list isn't
+ * isn't a multiple of the mpi_size, we pretend it is, and
+ * give num_candidates % mpi_size processes one extra entry
+ * each to make things work out.
+ *
+ * Once the table is constructed, we determine the first and
+ * last entry this process is to flush as follows:
+ *
+ * first_entry_to_flush = candidate_assignment_table[mpi_rank]
+ *
+ * last_entry_to_flush =
+ * candidate_assignment_table[mpi_rank + 1] - 1;
+ *
+ * With these values determined, we simply scan through the
+ * candidate list, marking all entries in the range
+ * [first_entry_to_flush, last_entry_to_flush] for flush,
+ * and all others to be cleaned.
+ *
+ * Finally, we scan the LRU from tail to head, flushing
+ * or marking clean the candidate entries as indicated.
+ * If necessary, we scan the pinned list as well.
+ *
+ * Note that this function will fail if any protected or
+ * clean entries appear on the candidate list.
+ *
+ * This function is used in managing sync points, and
+ * shouldn't be used elsewhere.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 3/17/10
+ *
+ * Modifications:
+ *
+ * Heavily reworked to have each process flush a group of
+ * adjacent entries.
+ * JRM -- 4/15/10
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+#define H5C_APPLY_CANDIDATE_LIST__DEBUG 0
+herr_t
+H5C_apply_candidate_list(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ int num_candidates,
+ haddr_t * candidates_list_ptr,
+ int mpi_rank,
+ int mpi_size)
+{
+ hbool_t first_flush = FALSE;
+ int i;
+ int m;
+ int n;
+ int first_entry_to_flush;
+ int last_entry_to_flush;
+ int entries_to_clear = 0;
+ int entries_to_flush = 0;
+ int entries_cleared = 0;
+ int entries_flushed = 0;
+ int entries_examined = 0;
+ int initial_list_len;
+ int * candidate_assignment_table = NULL;
+ haddr_t addr;
+ H5C_cache_entry_t * clear_ptr = NULL;
+ H5C_cache_entry_t * entry_ptr = NULL;
+ H5C_cache_entry_t * flush_ptr = NULL;
+#if H5C_DO_SANITY_CHECKS
+ haddr_t last_addr;
+#endif /* H5C_DO_SANITY_CHECKS */
+#if H5C_APPLY_CANDIDATE_LIST__DEBUG
+ char tbl_buf[1024];
+#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_apply_candidate_list, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+ HDassert( num_candidates > 0 );
+ HDassert( num_candidates <= cache_ptr->slist_len );
+ HDassert( candidates_list_ptr != NULL );
+ HDassert( 0 <= mpi_rank );
+ HDassert( mpi_rank < mpi_size );
+
+#if H5C_APPLY_CANDIDATE_LIST__DEBUG
+ HDfprintf(stdout, "%s:%d: setting up candidate assignment table.\n",
+ FUNC, mpi_rank);
+ for ( i = 0; i < 1024; i++ ) tbl_buf[i] = '\0';
+ sprintf(&(tbl_buf[0]), "candidate list = ");
+ for ( i = 0; i < num_candidates; i++ )
+ {
+ sprintf(&(tbl_buf[strlen(tbl_buf)]), " 0x%llx",
+ (long long)(*(candidates_list_ptr + i)));
+ }
+ sprintf(&(tbl_buf[strlen(tbl_buf)]), "\n");
+ HDfprintf(stdout, "%s", tbl_buf);
+#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
+ n = num_candidates / mpi_size;
+ m = num_candidates % mpi_size;
+ HDassert(n >= 0);
+
+ if(NULL == (candidate_assignment_table = (int *)H5MM_malloc(sizeof(int) * (size_t)(mpi_size + 1))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for candidate assignment table")
+
+ candidate_assignment_table[0] = 0;
+ candidate_assignment_table[mpi_size] = num_candidates;
+
+ if(m == 0) { /* mpi_size is an even divisor of num_candidates */
+ HDassert(n > 0);
+ for(i = 1; i < mpi_size; i++)
+ candidate_assignment_table[i] = candidate_assignment_table[i - 1] + n;
+ } /* end if */
+ else {
+ for(i = 1; i <= m; i++)
+ candidate_assignment_table[i] = candidate_assignment_table[i - 1] + n + 1;
+
+ if(num_candidates < mpi_size) {
+ for(i = m + 1; i < mpi_size; i++)
+ candidate_assignment_table[i] = num_candidates;
+ } /* end if */
+ else {
+ for(i = m + 1; i < mpi_size; i++)
+ candidate_assignment_table[i] = candidate_assignment_table[i - 1] + n;
+ } /* end else */
+ } /* end else */
+ HDassert((candidate_assignment_table[mpi_size - 1] + n) == num_candidates);
+
+#if H5C_DO_SANITY_CHECKS
+ /* verify that the candidate assignment table has the expected form */
+ for ( i = 1; i < mpi_size - 1; i++ )
+ {
+ int a, b;
+
+ a = candidate_assignment_table[i] - candidate_assignment_table[i - 1];
+ b = candidate_assignment_table[i + 1] - candidate_assignment_table[i];
+
+ HDassert( n + 1 >= a );
+ HDassert( a >= b );
+ HDassert( b >= n );
+ }
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ first_entry_to_flush = candidate_assignment_table[mpi_rank];
+ last_entry_to_flush = candidate_assignment_table[mpi_rank + 1] - 1;
+
+#if H5C_APPLY_CANDIDATE_LIST__DEBUG
+ for ( i = 0; i < 1024; i++ )
+ tbl_buf[i] = '\0';
+ sprintf(&(tbl_buf[0]), "candidate assignment table = ");
+ for(i = 0; i <= mpi_size; i++)
+ sprintf(&(tbl_buf[strlen(tbl_buf)]), " %d", candidate_assignment_table[i]);
+ sprintf(&(tbl_buf[strlen(tbl_buf)]), "\n");
+ HDfprintf(stdout, "%s", tbl_buf);
+
+ HDfprintf(stdout, "%s:%d: flush entries [%d, %d].\n",
+ FUNC, mpi_rank, first_entry_to_flush, last_entry_to_flush);
+
+ HDfprintf(stdout, "%s:%d: marking entries.\n", FUNC, mpi_rank);
+#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
+ for(i = 0; i < num_candidates; i++) {
+ addr = candidates_list_ptr[i];
+ HDassert( H5F_addr_defined(addr) );
+
+#if H5C_DO_SANITY_CHECKS
+ if ( i > 0 ) {
+ if ( last_addr == addr ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Duplicate entry in cleaned list.\n")
+ } else if ( last_addr > addr ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "candidate list not sorted.\n")
+ }
+ }
+
+ last_addr = addr;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
+ if(entry_ptr == NULL) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed candidate entry not in cache?!?!?.")
+ } else if(!entry_ptr->is_dirty) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not dirty?!?!?.")
+ } else if ( entry_ptr->is_protected ) {
+ /* For now at least, we can't deal with protected entries.
+ * If we encounter one, scream and die. If it becomes an
+ * issue, we should be able to work around this.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry is protected?!?!?.")
+ } else {
+ /* determine whether the entry is to be cleared or flushed,
+ * and mark it accordingly. We will scan the protected and
+ * pinned list shortly, and clear or flush according to these
+ * markings.
+ */
+ if((i >= first_entry_to_flush) && (i <= last_entry_to_flush)) {
+ entries_to_flush++;
+ entry_ptr->flush_immediately = TRUE;
+ } /* end if */
+ else {
+ entries_to_clear++;
+ entry_ptr->clear_on_unprotect = TRUE;
+ } /* end else */
+ } /* end else */
+ } /* end for */
+
+#if H5C_APPLY_CANDIDATE_LIST__DEBUG
+ HDfprintf(stdout, "%s:%d: num candidates/to clear/to flush = %d/%d/%d.\n",
+ FUNC, mpi_rank, (int)num_candidates, (int)entries_to_clear,
+ (int)entries_to_flush);
+#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
+
+ /* We have now marked all the entries on the candidate list for
+ * either flush or clear -- now scan the LRU and the pinned list
+ * for these entries and do the deed.
+ *
+ * Note that we are doing things in this round about manner so as
+ * to preserve the order of the LRU list to the best of our ability.
+ * If we don't do this, my experiments indicate that we will have a
+ * noticably poorer hit ratio as a result.
+ */
+
+#if H5C_APPLY_CANDIDATE_LIST__DEBUG
+ HDfprintf(stdout, "%s:%d: scanning LRU list. len = %d.\n", FUNC, mpi_rank,
+ (int)(cache_ptr->LRU_list_len));
+#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
+ entries_examined = 0;
+ initial_list_len = cache_ptr->LRU_list_len;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ while((entry_ptr != NULL) && (entries_examined <= initial_list_len) &&
+ ((entries_cleared + entries_flushed) < num_candidates)) {
+ if(entry_ptr->clear_on_unprotect) {
+ entry_ptr->clear_on_unprotect = FALSE;
+ clear_ptr = entry_ptr;
+ entry_ptr = entry_ptr->prev;
+ entries_cleared++;
+
+#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
+ HDfprintf(stdout, "%s:%d: clearing 0x%llx.\n", FUNC, mpi_rank,
+ (long long)clear_ptr->addr);
+#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
+ if(H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ clear_ptr->type,
+ clear_ptr->addr,
+ H5C__FLUSH_CLEAR_ONLY_FLAG,
+ &first_flush,
+ TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
+ } else if(entry_ptr->flush_immediately) {
+ entry_ptr->flush_immediately = FALSE;
+ flush_ptr = entry_ptr;
+ entry_ptr = entry_ptr->prev;
+ entries_flushed++;
+
+#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
+ HDfprintf(stdout, "%s:%d: flushing 0x%llx.\n", FUNC, mpi_rank,
+ (long long)flush_ptr->addr);
+#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
+ if(H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ flush_ptr->type,
+ flush_ptr->addr,
+ H5C__NO_FLAGS_SET,
+ &first_flush,
+ TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
+ } else {
+ entry_ptr = entry_ptr->prev;
+ }
+
+ entries_examined++;
+ } /* end while */
+
+#if H5C_APPLY_CANDIDATE_LIST__DEBUG
+ HDfprintf(stdout, "%s:%d: entries examined/cleared/flushed = %d/%d/%d.\n",
+ FUNC, mpi_rank, entries_examined,
+ entries_cleared, entries_flushed);
+#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
+ /* It is also possible that some of the cleared entries are on the
+ * pinned list. Must scan that also.
+ */
+
+#if H5C_APPLY_CANDIDATE_LIST__DEBUG
+ HDfprintf(stdout, "%s:%d: scanning pinned entry list. len = %d\n",
+ FUNC, mpi_rank, (int)(cache_ptr->pel_len));
+#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
+ entry_ptr = cache_ptr->pel_head_ptr;
+ while((entry_ptr != NULL) &&
+ ((entries_cleared + entries_flushed) < num_candidates)) {
+ if(entry_ptr->clear_on_unprotect) {
+ entry_ptr->clear_on_unprotect = FALSE;
+ clear_ptr = entry_ptr;
+ entry_ptr = entry_ptr->next;
+ entries_cleared++;
+
+#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
+ HDfprintf(stdout, "%s:%d: clearing 0x%llx.\n", FUNC, mpi_rank,
+ (long long)clear_ptr->addr);
+#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
+ if(H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ clear_ptr->type,
+ clear_ptr->addr,
+ H5C__FLUSH_CLEAR_ONLY_FLAG,
+ &first_flush,
+ TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
+ } else if(entry_ptr->flush_immediately) {
+ entry_ptr->flush_immediately = FALSE;
+ flush_ptr = entry_ptr;
+ entry_ptr = entry_ptr->next;
+ entries_flushed++;
+
+#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
+ HDfprintf(stdout, "%s:%d: flushing 0x%llx.\n", FUNC, mpi_rank,
+ (long long)flush_ptr->addr);
+#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
+ if(H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ flush_ptr->type,
+ flush_ptr->addr,
+ H5C__NO_FLAGS_SET,
+ &first_flush,
+ TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
+ } else {
+ entry_ptr = entry_ptr->next;
+ }
+ } /* end while */
+
+#if H5C_APPLY_CANDIDATE_LIST__DEBUG
+ HDfprintf(stdout,
+ "%s:%d: pel entries examined/cleared/flushed = %d/%d/%d.\n",
+ FUNC, mpi_rank, entries_examined,
+ entries_cleared, entries_flushed);
+ HDfprintf(stdout, "%s:%d: done.\n", FUNC, mpi_rank);
+
+ fsync(stdout);
+#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
+ if((entries_flushed != entries_to_flush) || (entries_cleared != entries_to_clear))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry count mismatch.")
+
+done:
+ if(candidate_assignment_table != NULL)
+ candidate_assignment_table = (int *)H5MM_xfree((void *)candidate_assignment_table);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_apply_candidate_list() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_construct_candidate_list__clean_cache
+ *
+ * Purpose: Construct the list of entries that should be flushed to
+ * clean all entries in the cache.
+ *
+ * This function is used in managing sync points, and
+ * shouldn't be used elsewhere.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 3/17/10
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+herr_t
+H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
+{
+ size_t space_needed;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_construct_candidate_list__clean_cache, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
+ /* As a sanity check, set space needed to the size of the skip list.
+ * This should be the sum total of the sizes of all the dirty entries
+ * in the metadata cache.
+ */
+ space_needed = cache_ptr->slist_size;
+
+ /* Recall that while we shouldn't have any protected entries at this
+ * point, it is possible that some dirty entries may reside on the
+ * pinned list at this point.
+ */
+ HDassert( cache_ptr->slist_size <=
+ (cache_ptr->dLRU_list_size + cache_ptr->pel_size) );
+ HDassert( cache_ptr->slist_len <=
+ (cache_ptr->dLRU_list_len + cache_ptr->pel_len) );
+
+ if(space_needed > 0) { /* we have work to do */
+ H5C_cache_entry_t *entry_ptr;
+ int nominated_entries_count = 0;
+ size_t nominated_entries_size = 0;
+ haddr_t nominated_addr;
+
+ HDassert( cache_ptr->slist_len > 0 );
+
+ /* Scan the dirty LRU list from tail forward and nominate sufficient
+ * entries to free up the necessary space.
+ */
+ entry_ptr = cache_ptr->dLRU_tail_ptr;
+ while((nominated_entries_size < space_needed) &&
+ (nominated_entries_count < cache_ptr->slist_len) &&
+ (entry_ptr != NULL)) {
+ HDassert( ! (entry_ptr->is_protected) );
+ HDassert( ! (entry_ptr->is_read_only) );
+ HDassert( entry_ptr->ro_ref_count == 0 );
+ HDassert( entry_ptr->is_dirty );
+ HDassert( entry_ptr->in_slist );
+
+ nominated_addr = entry_ptr->addr;
+ if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed(1).")
+
+ nominated_entries_size += entry_ptr->size;
+ nominated_entries_count++;
+ entry_ptr = entry_ptr->aux_prev;
+ } /* end while */
+ HDassert( entry_ptr == NULL );
+
+ /* it is possible that there are some dirty entries on the
+ * protected entry list as well -- scan it too if necessary
+ */
+ entry_ptr = cache_ptr->pel_head_ptr;
+ while((nominated_entries_size < space_needed) &&
+ (nominated_entries_count < cache_ptr->slist_len) &&
+ (entry_ptr != NULL)) {
+ if(entry_ptr->is_dirty) {
+ HDassert( ! (entry_ptr->is_protected) );
+ HDassert( ! (entry_ptr->is_read_only) );
+ HDassert( entry_ptr->ro_ref_count == 0 );
+ HDassert( entry_ptr->is_dirty );
+ HDassert( entry_ptr->in_slist );
+
+ nominated_addr = entry_ptr->addr;
+ if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed(2).")
+
+ nominated_entries_size += entry_ptr->size;
+ nominated_entries_count++;
+ } /* end if */
+
+ entry_ptr = entry_ptr->next;
+ } /* end while */
+
+ HDassert( nominated_entries_count == cache_ptr->slist_len );
+ HDassert( nominated_entries_size == space_needed );
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_construct_candidate_list__clean_cache() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_construct_candidate_list__min_clean
+ *
+ * Purpose: Construct the list of entries that should be flushed to
+ * get the cache back within its min clean constraints.
+ *
+ * This function is used in managing sync points, and
+ * shouldn't be used elsewhere.
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 3/17/10
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+herr_t
+H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr)
+{
+ size_t space_needed = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_construct_candidate_list__min_clean, FAIL)
+
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
+ /* compute the number of bytes (if any) that must be flushed to get the
+ * cache back within its min clean constraints.
+ */
+ if(cache_ptr->max_cache_size > cache_ptr->index_size) {
+ if(((cache_ptr->max_cache_size - cache_ptr->index_size) +
+ cache_ptr->cLRU_list_size) >= cache_ptr->min_clean_size)
+ space_needed = 0;
+ else
+ space_needed = cache_ptr->min_clean_size -
+ ((cache_ptr->max_cache_size - cache_ptr->index_size) +
+ cache_ptr->cLRU_list_size);
+ } /* end if */
+ else {
+ if(cache_ptr->min_clean_size <= cache_ptr->cLRU_list_size)
+ space_needed = 0;
+ else
+ space_needed = cache_ptr->min_clean_size -
+ cache_ptr->cLRU_list_size;
+ } /* end else */
+
+ if(space_needed > 0) { /* we have work to do */
+ H5C_cache_entry_t *entry_ptr;
+ int nominated_entries_count = 0;
+ size_t nominated_entries_size = 0;
+
+ HDassert( cache_ptr->slist_len > 0 );
+
+ /* Scan the dirty LRU list from tail forward and nominate sufficient
+ * entries to free up the necessary space.
+ */
+ entry_ptr = cache_ptr->dLRU_tail_ptr;
+ while((nominated_entries_size < space_needed) &&
+ (nominated_entries_count < cache_ptr->slist_len) &&
+ (entry_ptr != NULL)) {
+ haddr_t nominated_addr;
+
+ HDassert( ! (entry_ptr->is_protected) );
+ HDassert( ! (entry_ptr->is_read_only) );
+ HDassert( entry_ptr->ro_ref_count == 0 );
+ HDassert( entry_ptr->is_dirty );
+ HDassert( entry_ptr->in_slist );
+
+ nominated_addr = entry_ptr->addr;
+ if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed.")
+
+ nominated_entries_size += entry_ptr->size;
+ nominated_entries_count++;
+ entry_ptr = entry_ptr->aux_prev;
+ } /* end while */
+ HDassert( nominated_entries_count <= cache_ptr->slist_len );
+ HDassert( nominated_entries_size >= space_needed );
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_construct_candidate_list__min_clean() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
* Function: H5C_create
*
* Purpose: Allocate, initialize, and return the address of a new
@@ -356,10 +974,6 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
- * JRM -- 11/5/08
- * Added initialization for the new clean_index_size and
- * dirty_index_size fields of H5C_t.
- *
*-------------------------------------------------------------------------
*/
H5C_t *
@@ -1502,9 +2116,7 @@ H5C_flush_to_min_clean(H5F_t * f,
#endif /* end modified code -- commented out for now */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C_flush_to_min_clean() */
@@ -1903,33 +2515,6 @@ H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr,
* Programmer: John Mainzer
* 6/2/04
*
- * QAK -- 1/31/08
- * Added initialization for the new free_file_space_on_destroy
- * field.
- *
- * JRM -- 11/13/08
- * Moved test to see if we already have an entry with the
- * specified address in the cache. This was necessary as
- * we used to modify some fields in the entry to be inserted
- * priort to this test, which got the cache confused if the
- * insertion failed because the entry was already present.
- *
- * Also revised the function to call H5C_make_space_in_cache()
- * if the min_clean_size is not met at present, not just if
- * there is insufficient space in the cache for the new
- * entry.
- *
- * The purpose of this modification is to avoid "metadata
- * blizzards" in the write only case. In such instances,
- * the cache was allowed to fill with dirty metadata. When
- * we finally needed to evict an entry to make space, we had
- * to flush out a whole cache full of metadata -- which has
- * interesting performance effects. We hope to avoid (or
- * perhaps more accurately hide) this effect by maintaining
- * the min_clean_size, which should force us to start flushing
- * entries long before we actually have to evict something
- * to make space.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -2042,6 +2627,7 @@ H5C_insert_entry(H5F_t * f,
#ifdef H5_HAVE_PARALLEL
entry_ptr->clear_on_unprotect = FALSE;
+ entry_ptr->flush_immediately = FALSE;
#endif /* H5_HAVE_PARALLEL */
entry_ptr->flush_in_progress = FALSE;
@@ -3813,29 +4399,17 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_set_prefix(H5C_t * cache_ptr,
- char * prefix)
+H5C_set_prefix(H5C_t * cache_ptr, char * prefix)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5C_set_prefix)
- FUNC_ENTER_NOAPI(H5C_set_prefix, FAIL)
-
- /* This would normally be an assert, but we need to use an HGOTO_ERROR
- * call to shut up the compiler.
- */
- if ( ( ! cache_ptr ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr")
- }
-
- HDassert( prefix );
- HDassert( HDstrlen(prefix) < H5C__PREFIX_LEN ) ;
+ HDassert((cache_ptr) && (cache_ptr->magic == H5C__H5C_T_MAGIC));
+ HDassert(prefix);
+ HDassert(HDstrlen(prefix) < H5C__PREFIX_LEN);
HDstrcpy(&(cache_ptr->prefix[0]), prefix);
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5C_set_prefix() */
@@ -7441,17 +8015,6 @@ H5C_flush_single_entry(H5F_t * f,
}
}
}
-#if 0
- /* this should be useful for debugging from time to time.
- * lets leave it in for now. -- JRM 12/15/04
- */
- else {
- HDfprintf(stdout,
- "H5C_flush_single_entry(): non-existant entry. addr = %a\n",
- addr);
- HDfflush(stdout);
- }
-#endif
#endif /* H5C_DO_SANITY_CHECKS */
if ( ( entry_ptr != NULL ) && ( entry_ptr->is_protected ) )
@@ -7572,133 +8135,6 @@ H5C_flush_single_entry(H5F_t * f,
*/
if ( destroy ) { /* AKA eviction */
-#if 0 /* JRM */
- /* This test code may come in handy -- lets keep it for a while.
- *
- * Note that it will cause spurious errors in the serial case
- * unless we are maintaining the clean and dirty LRU lists.
- */
- {
- if ( entry_ptr->is_dirty )
- {
- if ( cache_ptr->dLRU_head_ptr == NULL )
- HDfprintf(stdout,
- "%s: cache_ptr->dLRU_head_ptr == NULL.\n",
- FUNC);
-
- if ( cache_ptr->dLRU_tail_ptr == NULL )
- HDfprintf(stdout,
- "%s: cache_ptr->dLRU_tail_ptr == NULL.\n",
- FUNC);
-
- if ( cache_ptr->dLRU_list_len <= 0 )
- HDfprintf(stdout,
- "%s: cache_ptr->dLRU_list_len <= 0.\n",
- FUNC);
-
- if ( cache_ptr->dLRU_list_size <= 0 )
- HDfprintf(stdout,
- "%s: cache_ptr->dLRU_list_size <= 0.\n",
- FUNC);
-
- if ( cache_ptr->dLRU_list_size < entry_ptr->size )
- HDfprintf(stdout,
- "%s: cache_ptr->dLRU_list_size < entry_ptr->size.\n",
- FUNC);
-
- if ( ( (cache_ptr->dLRU_list_size) == entry_ptr->size ) &&
- ( ! ( (cache_ptr->dLRU_list_len) == 1 ) ) )
- HDfprintf(stdout,
- "%s: dLRU_list_size == size && dLRU_list_len != 1\n",
- FUNC);
-
- if ( ( entry_ptr->aux_prev == NULL ) &&
- ( cache_ptr->dLRU_head_ptr != entry_ptr ) )
- HDfprintf(stdout,
- "%s: entry_ptr->aux_prev == NULL && dLRU_head_ptr != entry_ptr\n",
- FUNC);
-
- if ( ( entry_ptr->aux_next == NULL ) &&
- ( cache_ptr->dLRU_tail_ptr != entry_ptr ) )
- HDfprintf(stdout,
- "%s: entry_ptr->aux_next == NULL && dLRU_tail_ptr != entry_ptr\n",
- FUNC);
-
- if ( ( cache_ptr->dLRU_list_len == 1 ) &&
- ( ! ( ( cache_ptr->dLRU_head_ptr == entry_ptr ) &&
- ( cache_ptr->dLRU_tail_ptr == entry_ptr ) &&
- ( entry_ptr->aux_next == NULL ) &&
- ( entry_ptr->aux_prev == NULL ) &&
- ( cache_ptr->dLRU_list_size == entry_ptr->size )
- )
- )
- )
- {
- HDfprintf(stdout,
- "%s: single entry dlru sanity check fails\n",
- FUNC);
- }
-
- }
- else
- {
- if ( cache_ptr->cLRU_head_ptr == NULL )
- HDfprintf(stdout,
- "%s: cache_ptr->cLRU_head_ptr == NULL.\n",
- FUNC);
-
- if ( cache_ptr->cLRU_tail_ptr == NULL )
- HDfprintf(stdout,
- "%s: cache_ptr->cLRU_tail_ptr == NULL.\n",
- FUNC);
-
- if ( cache_ptr->cLRU_list_len <= 0 )
- HDfprintf(stdout,
- "%s: cache_ptr->cLRU_list_len <= 0.\n",
- FUNC);
-
- if ( cache_ptr->cLRU_list_size <= 0 )
- HDfprintf(stdout,
- "%s: cache_ptr->cLRU_list_size <= 0.\n",
- FUNC);
-
- if ( cache_ptr->cLRU_list_size < entry_ptr->size )
- HDfprintf(stdout,
- "%s: cache_ptr->cLRU_list_size < entry_ptr->size.\n",
- FUNC);
-
- if ( ( (cache_ptr->cLRU_list_size) == entry_ptr->size ) &&
- ( ! ( (cache_ptr->cLRU_list_len) == 1 ) ) )
- HDfprintf(stdout,
- "%s: cLRU_list_size == size && cLRU_list_len != 1\n",
- FUNC);
-
- if ( ( entry_ptr->aux_prev == NULL ) &&
- ( cache_ptr->cLRU_head_ptr != entry_ptr ) )
- HDfprintf(stdout, "%s: entry_ptr->aux_prev == NULL && cLRU_head_ptr != entry_ptr\n", FUNC);
-
- if ( ( entry_ptr->aux_next == NULL ) &&
- ( cache_ptr->cLRU_tail_ptr != entry_ptr ) )
- HDfprintf(stdout, "%s: entry_ptr->aux_next == NULL && cLRU_tail_ptr != entry_ptr\n", FUNC);
-
- if ( ( cache_ptr->cLRU_list_len == 1 ) &&
- ( ! ( ( cache_ptr->cLRU_head_ptr == entry_ptr ) &&
- ( cache_ptr->cLRU_tail_ptr == entry_ptr ) &&
- ( entry_ptr->aux_next == NULL ) &&
- ( entry_ptr->aux_prev == NULL ) &&
- ( cache_ptr->cLRU_list_size == entry_ptr->size )
- )
- )
- )
- {
- HDfprintf(stdout,
- "%s: single entry clru sanity check fails\n",
- FUNC);
- }
- }
- }
-#endif /* JRM */
-
H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
} else {
@@ -7864,10 +8300,10 @@ H5C_flush_single_entry(H5F_t * f,
* H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN()).
*/
H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), \
- (entry_ptr->size),\
+ (entry_ptr->size), \
(new_size), \
(entry_ptr), \
- (TRUE));
+ (TRUE))
/* The entry can't be protected since we just flushed it.
* Thus we must update the replacement policy data
@@ -7923,9 +8359,7 @@ H5C_flush_single_entry(H5F_t * f,
}
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C_flush_single_entry() */
@@ -8023,6 +8457,7 @@ H5C_load_entry(H5F_t * f,
entry->flush_marker = FALSE;
#ifdef H5_HAVE_PARALLEL
entry->clear_on_unprotect = FALSE;
+ entry->flush_immediately = FALSE;
#endif /* H5_HAVE_PARALLEL */
entry->flush_in_progress = FALSE;
entry->destroy_in_progress = FALSE;
@@ -8862,20 +9297,17 @@ H5C_flush_marked_entries(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
/* Flush all marked entries */
- if (H5C_flush_cache(f,
+ if(H5C_flush_cache(f,
primary_dxpl_id,
secondary_dxpl_id,
H5C__FLUSH_MARKED_ENTRIES_FLAG |
H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
-
} /* end if */
done:
-
- FUNC_LEAVE_NOAPI(ret_value);
-
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_flush_marked_entries */
#if H5C_DO_TAGGING_SANITY_CHECKS
@@ -8891,8 +9323,6 @@ done:
* Programmer: Mike McGreevy
* January 14, 2010
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -8959,10 +9389,7 @@ H5C_verify_tag(int id, haddr_t tag)
}
done:
-
- /* Function Leave Macro */
- FUNC_LEAVE_NOAPI(ret_value);
-
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_verify_tag */
#endif
@@ -8980,43 +9407,35 @@ done:
* Programmer: Mike McGreevy
* March 17, 2010
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-herr_t
+void
H5C_retag_copied_metadata(H5C_t * cache_ptr, haddr_t metadata_tag)
{
/* Variable Declarations */
- herr_t ret_value = SUCCEED; /* Return Value */
int i = 0; /* Iterator */
- H5C_cache_entry_t *next_entry_ptr = NULL; /* entry pointer */
/* Assertions */
HDassert(cache_ptr);
/* Function Enter Macro */
- FUNC_ENTER_NOAPI(H5C_retag_copied_metadata, FAIL)
+ FUNC_ENTER_NOAPI_NOFUNC(H5C_retag_copied_metadata)
/* Iterate through entries, retagging those with the H5AC__COPIED_TAG tag */
- for (i = 0; i < H5C__HASH_TABLE_LEN; i++) {
+ for(i = 0; i < H5C__HASH_TABLE_LEN; i++) {
+ H5C_cache_entry_t *next_entry_ptr; /* entry pointer */
next_entry_ptr = cache_ptr->index[i];
-
- while ( next_entry_ptr != NULL ) {
- if (cache_ptr->index[i] != NULL) {
- if ((cache_ptr->index[i])->tag == H5AC__COPIED_TAG) {
+ while(next_entry_ptr != NULL) {
+ if(cache_ptr->index[i] != NULL) {
+ if((cache_ptr->index[i])->tag == H5AC__COPIED_TAG)
(cache_ptr->index[i])->tag = metadata_tag;
- } /* end if */
} /* end if */
+
next_entry_ptr = next_entry_ptr->ht_next;
} /* end while */
-
} /* end for */
-done:
-
- /* Function Leave Macro */
- FUNC_LEAVE_NOAPI(ret_value);
-
+ FUNC_LEAVE_NOAPI_VOID
} /* H5C_retag_copied_metadata */
+
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index e1dffa4..b4a8944 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -1875,7 +1875,7 @@ if ( ( (cache_ptr) == NULL ) || \
( ( !( was_clean ) || \
( (cache_ptr)->clean_index_size < (old_size) ) ) && \
( ( (was_clean) ) || \
- ( (cache_ptr)->dirty_index_size < (old_size) ) ) ) \
+ ( (cache_ptr)->dirty_index_size < (old_size) ) ) ) || \
( (entry_ptr) == NULL ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Pre HT entry size change SC failed") \
@@ -1893,7 +1893,7 @@ if ( ( (cache_ptr) == NULL ) || \
( ( !((entry_ptr)->is_dirty ) || \
( (cache_ptr)->dirty_index_size < (new_size) ) ) && \
( ( ((entry_ptr)->is_dirty) ) || \
- ( (cache_ptr)->clean_index_size < (new_size) ) ) ) \
+ ( (cache_ptr)->clean_index_size < (new_size) ) ) ) || \
( ( (cache_ptr)->index_len == 1 ) && \
( (cache_ptr)->index_size != (new_size) ) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
@@ -2098,24 +2098,25 @@ if ( (cache_ptr)->index_size != \
H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \
}
-#define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \
- entry_ptr, was_clean) \
-{ \
- H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
- entry_ptr, was_clean) \
- (cache_ptr)->index_size -= (old_size); \
- (cache_ptr)->index_size += (new_size); \
- if ( was_clean ) { \
- (cache_ptr)->clean_index_size -= (old_size); \
- } else { \
- (cache_ptr)->dirty_index_size -= (old_size); \
- } \
- if ( (entry_ptr)->is_dirty ) { \
- (cache_ptr)->dirty_index_size += (new_size); \
- } else { \
- (cache_ptr)->clean_index_size += (new_size); \
- } \
- H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, entry_ptr) \
+#define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \
+ entry_ptr, was_clean) \
+{ \
+ H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
+ entry_ptr, was_clean) \
+ (cache_ptr)->index_size -= (old_size); \
+ (cache_ptr)->index_size += (new_size); \
+ if ( was_clean ) { \
+ (cache_ptr)->clean_index_size -= (old_size); \
+ } else { \
+ (cache_ptr)->dirty_index_size -= (old_size); \
+ } \
+ if ( (entry_ptr)->is_dirty ) { \
+ (cache_ptr)->dirty_index_size += (new_size); \
+ } else { \
+ (cache_ptr)->clean_index_size += (new_size); \
+ } \
+ H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
+ entry_ptr) \
}
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 3f38500..7e14872 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -383,6 +383,14 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* the unprotect, the entry's is_dirty flag is reset by flushing
* it with the H5C__FLUSH_CLEAR_ONLY_FLAG.
*
+ * flush_immediately: Boolean flag used only in Phdf5 -- and then only
+ * for H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED.
+ *
+ * When a destributed metadata write is triggered at a
+ * sync point, this field is used to mark entries that
+ * must be flushed before leaving the sync point. At all
+ * other times, this field should be set to FALSE.
+ *
* flush_in_progress: Boolean flag that is set to true iff the entry
* is in the process of being flushed. This allows the cache
* to detect when a call is the result of a flush callback.
@@ -581,6 +589,7 @@ typedef struct H5C_cache_entry_t
hbool_t flush_marker;
#ifdef H5_HAVE_PARALLEL
hbool_t clear_on_unprotect;
+ hbool_t flush_immediately;
#endif /* H5_HAVE_PARALLEL */
hbool_t flush_in_progress;
hbool_t destroy_in_progress;
@@ -1034,6 +1043,21 @@ typedef struct H5C_auto_size_ctl_t
#define H5C__FREE_FILE_SPACE_FLAG 0x0800
#define H5C__TAKE_OWNERSHIP_FLAG 0x1000
+#ifdef H5_HAVE_PARALLEL
+H5_DLL herr_t H5C_apply_candidate_list(H5F_t * f,
+ hid_t primary_dxpl_id,
+ hid_t secondary_dxpl_id,
+ H5C_t * cache_ptr,
+ int num_candidates,
+ haddr_t * candidates_list_ptr,
+ int mpi_rank,
+ int mpi_size);
+
+H5_DLL herr_t H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr);
+
+H5_DLL herr_t H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr);
+#endif /* H5_HAVE_PARALLEL */
+
H5_DLL H5C_t * H5C_create(size_t max_cache_size,
size_t min_clean_size,
int max_type_id,
@@ -1177,7 +1201,7 @@ H5_DLL herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
H5_DLL herr_t H5C_ignore_tags(H5C_t * cache_ptr);
-H5_DLL herr_t H5C_retag_copied_metadata(H5C_t * cache_ptr, haddr_t metadata_tag);
+H5_DLL void H5C_retag_copied_metadata(H5C_t * cache_ptr, haddr_t metadata_tag);
#endif /* !_H5Cprivate_H */
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index bd2bd7e..168c011 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -2713,7 +2713,6 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/
H5D_rdcc_ent_t *ent = NULL; /*cache entry */
- hbool_t found = FALSE; /*already in cache? */
haddr_t chunk_addr = HADDR_UNDEF; /* Address of chunk on disk */
size_t chunk_size; /*size of a chunk */
void *chunk = NULL; /*the file chunk */
@@ -2845,7 +2844,7 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
rdcc->stats.ninits++;
} /* end else */
} /* end else */
- HDassert(found || chunk_size > 0);
+ HDassert(chunk_size > 0);
if(ent) {
/*
diff --git a/src/H5Dio.c b/src/H5Dio.c
index 285451e..b7c2ecb 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -291,6 +291,19 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
H5D_io_info_t io_info; /* Dataset I/O info */
H5D_type_info_t type_info; /* Datatype info for operation */
hbool_t type_info_init = FALSE; /* Whether the datatype info has been initialized */
+ H5S_t * projected_mem_space = NULL; /* If not NULL, ptr to dataspace containing a */
+ /* projection of the supplied mem_space to a new */
+ /* data space with rank equal to that of */
+ /* file_space. */
+ /* */
+ /* This field is only used if */
+ /* H5S_select_shape_same() returns TRUE when */
+ /* comparing the mem_space and the data_space, */
+ /* and the mem_space have different rank. */
+ /* */
+ /* Note that if this variable is used, the */
+ /* projected mem space must be discarded at the */
+ /* end of the function to avoid a memory leak. */
H5D_storage_t store; /*union of EFL and chunk pointer in file space */
hssize_t snelmts; /*total number of elmts (signed) */
hsize_t nelmts; /*total number of elmts */
@@ -340,6 +353,37 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
if(!(H5S_has_extent(mem_space)))
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "memory dataspace does not have extent set")
+ /* H5S_select_shape_same() has been modified to accept topologically identical
+ * selections with different rank as having the same shape (if the most
+ * rapidly changing coordinates match up), but the I/O code still has
+ * difficulties with the notion.
+ *
+ * To solve this, we check to see if H5S_select_shape_same() returns true,
+ * and if the ranks of the mem and file spaces are different. If the are,
+ * construct a new mem space that is equivalent to the old mem space, and
+ * use that instead.
+ *
+ * Note that in general, this requires us to touch up the memory buffer as
+ * well.
+ */
+ if(TRUE == H5S_select_shape_same(mem_space, file_space) &&
+ H5S_GET_EXTENT_NDIMS(mem_space) != H5S_GET_EXTENT_NDIMS(file_space)) {
+ void *adj_buf = NULL; /* Pointer to the location in buf corresponding */
+ /* to the beginning of the projected mem space. */
+
+ /* Attempt to construct projected dataspace for memory dataspace */
+ if(H5S_select_construct_projection(mem_space, &projected_mem_space,
+ (unsigned)H5S_GET_EXTENT_NDIMS(file_space), buf, &adj_buf, type_info.dst_type_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to construct projected memory dataspace")
+ HDassert(projected_mem_space);
+ HDassert(adj_buf);
+
+ /* Switch to using projected memory dataspace & adjusted buffer */
+ mem_space = projected_mem_space;
+ buf = adj_buf;
+ } /* end if */
+
+
/* Retrieve dataset properties */
/* <none needed in the general case> */
@@ -417,6 +461,11 @@ done:
if(type_info_init && H5D_typeinfo_term(&type_info) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down type info")
+ /* discard projected mem space if it was created */
+ if(NULL != projected_mem_space)
+ if(H5S_close(projected_mem_space) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down projected memory dataspace")
+
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5D_read() */
@@ -442,6 +491,19 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
H5D_io_info_t io_info; /* Dataset I/O info */
H5D_type_info_t type_info; /* Datatype info for operation */
hbool_t type_info_init = FALSE; /* Whether the datatype info has been initialized */
+ H5S_t * projected_mem_space = NULL; /* If not NULL, ptr to dataspace containing a */
+ /* projection of the supplied mem_space to a new */
+ /* data space with rank equal to that of */
+ /* file_space. */
+ /* */
+ /* This field is only used if */
+ /* H5S_select_shape_same() returns TRUE when */
+ /* comparing the mem_space and the data_space, */
+ /* and the mem_space have different rank. */
+ /* */
+ /* Note that if this variable is used, the */
+ /* projected mem space must be discarded at the */
+ /* end of the function to avoid a memory leak. */
H5D_storage_t store; /*union of EFL and chunk pointer in file space */
hssize_t snelmts; /*total number of elmts (signed) */
hsize_t nelmts; /*total number of elmts */
@@ -515,6 +577,37 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
file_space = dataset->shared->space;
if(!mem_space)
mem_space = file_space;
+
+ /* H5S_select_shape_same() has been modified to accept topologically
+ * identical selections with different rank as having the same shape
+ * (if the most rapidly changing coordinates match up), but the I/O
+ * code still has difficulties with the notion.
+ *
+ * To solve this, we check to see if H5S_select_shape_same() returns
+ * true, and if the ranks of the mem and file spaces are different.
+ * If the are, construct a new mem space that is equivalent to the
+ * old mem space, and use that instead.
+ *
+ * Note that in general, this requires us to touch up the memory buffer
+ * as well.
+ */
+ if(TRUE == H5S_select_shape_same(mem_space, file_space) &&
+ H5S_GET_EXTENT_NDIMS(mem_space) != H5S_GET_EXTENT_NDIMS(file_space)) {
+ void *adj_buf = NULL; /* Pointer to the location in buf corresponding */
+ /* to the beginning of the projected mem space. */
+
+ /* Attempt to construct projected dataspace for memory dataspace */
+ if(H5S_select_construct_projection(mem_space, &projected_mem_space,
+ (unsigned)H5S_GET_EXTENT_NDIMS(file_space), buf, &adj_buf, type_info.src_type_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to construct projected memory dataspace")
+ HDassert(projected_mem_space);
+ HDassert(adj_buf);
+
+ /* Switch to using projected memory dataspace & adjusted buffer */
+ mem_space = projected_mem_space;
+ buf = adj_buf;
+ } /* end if */
+
if((snelmts = H5S_GET_SELECT_NPOINTS(mem_space)) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection")
H5_ASSIGN_OVERFLOW(nelmts, snelmts, hssize_t, hsize_t);
@@ -608,6 +701,11 @@ done:
if(type_info_init && H5D_typeinfo_term(&type_info) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down type info")
+ /* discard projected mem space if it was created */
+ if(NULL != projected_mem_space)
+ if(H5S_close(projected_mem_space) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down projected memory dataspace")
+
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5D_write() */
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index ad9b737..e646a7b 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -115,7 +115,7 @@ static herr_t H5D_inter_collective_io(H5D_io_info_t *io_info,
const H5D_type_info_t *type_info, const H5S_t *file_space,
const H5S_t *mem_space);
static herr_t H5D_final_collective_io(H5D_io_info_t *io_info,
- const H5D_type_info_t *type_info, size_t nelmts, MPI_Datatype *mpi_file_type,
+ const H5D_type_info_t *type_info, hsize_t nelmts, MPI_Datatype *mpi_file_type,
MPI_Datatype *mpi_buf_type);
#ifdef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
static herr_t H5D_sort_chunk(H5D_io_info_t *io_info, const H5D_chunk_map_t *fm,
@@ -819,10 +819,10 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type
H5D_chunk_map_t *fm, int sum_chunk)
{
H5D_chunk_addr_info_t *chunk_addr_info_array = NULL;
- hbool_t mbt_is_derived = FALSE;
- hbool_t mft_is_derived = FALSE;
MPI_Datatype chunk_final_mtype; /* Final memory MPI datatype for all chunks with seletion */
+ hbool_t chunk_final_mtype_is_derived = FALSE;
MPI_Datatype chunk_final_ftype; /* Final file MPI datatype for all chunks with seletion */
+ hbool_t chunk_final_ftype_is_derived = FALSE;
H5D_storage_t ctg_store; /* Storage info for "fake" contiguous dataset */
size_t total_chunks;
haddr_t *total_chunk_addr_array = NULL;
@@ -830,7 +830,10 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type
MPI_Datatype *chunk_ftype = NULL;
MPI_Aint *chunk_disp_array = NULL;
MPI_Aint *chunk_mem_disp_array = NULL;
- int *blocklen = NULL;
+ hbool_t *chunk_mft_is_derived_array = NULL; /* Flags to indicate each chunk's MPI file datatype is derived */
+ hbool_t *chunk_mbt_is_derived_array = NULL; /* Flags to indicate each chunk's MPI memory datatype is derived */
+ int *chunk_mpi_file_counts = NULL; /* Count of MPI file datatype for each chunk */
+ int *chunk_mpi_mem_counts = NULL; /* Count of MPI memory datatype for each chunk */
int mpi_code; /* MPI return code */
herr_t ret_value = SUCCEED;
@@ -897,7 +900,7 @@ if(H5DEBUG(D))
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO")
} /* end if */
else {
- size_t mpi_buf_count; /* Number of MPI types */
+ hsize_t mpi_buf_count; /* Number of MPI types */
size_t num_chunk; /* Number of chunks for this process */
size_t u; /* Local index variable */
@@ -912,21 +915,25 @@ if(H5DEBUG(D))
/* Set up MPI datatype for chunks selected */
if(num_chunk) {
- hsize_t mpi_mem_extra_offset; /* Extra offset for memory MPI datatype */
- hsize_t mpi_file_extra_offset; /* Extra offset for file MPI datatype */
- size_t mpi_mem_count; /* Memory MPI datatype count */
- size_t mpi_file_count; /* File MPI datatype count */
- hbool_t locl_mbt_is_derived = FALSE, /* Whether the buffer (memory) type is derived and needs to be free'd */
- local_mft_is_derived = FALSE; /* Whether the file type is derived and needs to be free'd */
- int blocklen_value; /* Placeholder for array fill */
-
/* Allocate chunking information */
- chunk_addr_info_array= H5MM_malloc(num_chunk * sizeof(H5D_chunk_addr_info_t));
- chunk_mtype = H5MM_malloc(num_chunk * sizeof(MPI_Datatype));
- chunk_ftype = H5MM_malloc(num_chunk * sizeof(MPI_Datatype));
- chunk_disp_array = H5MM_malloc(num_chunk * sizeof(MPI_Aint));
- chunk_mem_disp_array = H5MM_calloc(num_chunk * sizeof(MPI_Aint));
- blocklen = H5MM_malloc(num_chunk * sizeof(int));
+ if(NULL == (chunk_addr_info_array = (H5D_chunk_addr_info_t *)H5MM_malloc(num_chunk * sizeof(H5D_chunk_addr_info_t))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk array buffer")
+ if(NULL == (chunk_mtype = (MPI_Datatype *)H5MM_malloc(num_chunk * sizeof(MPI_Datatype))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory datatype buffer")
+ if(NULL == (chunk_ftype = (MPI_Datatype *)H5MM_malloc(num_chunk * sizeof(MPI_Datatype))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file datatype buffer")
+ if(NULL == (chunk_disp_array = (MPI_Aint *)H5MM_malloc(num_chunk * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file displacement buffer")
+ if(NULL == (chunk_mem_disp_array = (MPI_Aint *)H5MM_calloc(num_chunk * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory displacement buffer")
+ if(NULL == (chunk_mpi_mem_counts = (int *)H5MM_calloc(num_chunk * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory counts buffer")
+ if(NULL == (chunk_mpi_file_counts = (int *)H5MM_calloc(num_chunk * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file counts buffer")
+ if(NULL == (chunk_mbt_is_derived_array = (hbool_t *)H5MM_calloc(num_chunk * sizeof(hbool_t))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory is derived datatype flags buffer")
+ if(NULL == (chunk_mft_is_derived_array = (hbool_t *)H5MM_calloc(num_chunk * sizeof(hbool_t))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file is derived datatype flags buffer")
#ifdef H5D_DEBUG
if(H5DEBUG(D))
@@ -945,14 +952,12 @@ if(H5DEBUG(D))
for(u = 0; u < num_chunk; u++) {
/* Disk MPI derived datatype */
if(H5S_mpio_space_type(chunk_addr_info_array[u].chunk_info.fspace,
- type_info->src_type_size, &chunk_ftype[u], &mpi_file_count,
- &mpi_file_extra_offset, &local_mft_is_derived) < 0)
+ type_info->src_type_size, &chunk_ftype[u], &chunk_mpi_file_counts[u], &(chunk_mft_is_derived_array[u])) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type")
/* Buffer MPI derived datatype */
if(H5S_mpio_space_type(chunk_addr_info_array[u].chunk_info.mspace,
- type_info->dst_type_size, &chunk_mtype[u], &mpi_mem_count,
- &mpi_mem_extra_offset, &locl_mbt_is_derived) < 0)
+ type_info->dst_type_size, &chunk_mtype[u], &chunk_mpi_mem_counts[u], &(chunk_mbt_is_derived_array[u])) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buf type")
/* Chunk address relative to the first chunk */
@@ -963,39 +968,38 @@ if(H5DEBUG(D))
chunk_disp_array[u] = (MPI_Aint)chunk_addr_info_array[u].chunk_addr;
} /* end for */
- /* Initialize the buffer with the constant value 1 */
- blocklen_value = 1;
- H5V_array_fill(blocklen, &blocklen_value, sizeof(int), num_chunk);
-
/* Create final MPI derived datatype for the file */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_struct((int)num_chunk, blocklen, chunk_disp_array, chunk_ftype, &chunk_final_ftype)))
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_struct((int)num_chunk, chunk_mpi_file_counts, chunk_disp_array, chunk_ftype, &chunk_final_ftype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_struct failed", mpi_code)
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&chunk_final_ftype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+ chunk_final_ftype_is_derived = TRUE;
/* Create final MPI derived datatype for memory */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_struct(num_chunk, blocklen, chunk_mem_disp_array, chunk_mtype, &chunk_final_mtype)))
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_struct((int)num_chunk, chunk_mpi_mem_counts, chunk_mem_disp_array, chunk_mtype, &chunk_final_mtype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_struct failed", mpi_code)
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&chunk_final_mtype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+ chunk_final_mtype_is_derived = TRUE;
/* Free the file & memory MPI datatypes for each chunk */
for(u = 0; u < num_chunk; u++) {
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(chunk_mtype + u)))
- HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ if(chunk_mbt_is_derived_array[u])
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(chunk_mtype + u)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(chunk_ftype + u)))
- HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ if(chunk_mft_is_derived_array[u])
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(chunk_ftype + u)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
} /* end for */
- /* buffer, file derived datatypes should be true */
- mbt_is_derived = TRUE;
- mft_is_derived = TRUE;
- mpi_buf_count = (size_t)1;
+ /* We have a single, complicated MPI datatype for both memory & file */
+ mpi_buf_count = (hsize_t)1;
} /* end if */
else { /* no selection at all for this process */
/* Allocate chunking information */
- total_chunk_addr_array = H5MM_malloc(sizeof(haddr_t) * total_chunks);
+ if(NULL == (total_chunk_addr_array = (haddr_t *)H5MM_malloc(sizeof(haddr_t) * total_chunks)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate total chunk address arraybuffer")
/* Retrieve chunk address map */
if(H5D_chunk_addrmap(io_info, total_chunk_addr_array) < 0)
@@ -1012,8 +1016,8 @@ if(H5DEBUG(D))
chunk_final_ftype = MPI_BYTE;
chunk_final_mtype = MPI_BYTE;
- /* buffer, file derived datatypes should be true */
- mpi_buf_count = (size_t)0;
+ /* No chunks selected for this process */
+ mpi_buf_count = (hsize_t)0;
} /* end else */
#ifdef H5D_DEBUG
if(H5DEBUG(D))
@@ -1033,6 +1037,7 @@ done:
if(H5DEBUG(D))
HDfprintf(H5DEBUG(D),"before freeing memory inside H5D_link_collective_io ret_value = %d\n", ret_value);
#endif
+ /* Release resources */
if(total_chunk_addr_array)
H5MM_xfree(total_chunk_addr_array);
if(chunk_addr_info_array)
@@ -1045,13 +1050,19 @@ if(H5DEBUG(D))
H5MM_xfree(chunk_disp_array);
if(chunk_mem_disp_array)
H5MM_xfree(chunk_mem_disp_array);
- if(blocklen)
- H5MM_xfree(blocklen);
+ if(chunk_mpi_mem_counts)
+ H5MM_xfree(chunk_mpi_mem_counts);
+ if(chunk_mpi_file_counts)
+ H5MM_xfree(chunk_mpi_file_counts);
+ if(chunk_mbt_is_derived_array)
+ H5MM_xfree(chunk_mbt_is_derived_array);
+ if(chunk_mft_is_derived_array)
+ H5MM_xfree(chunk_mft_is_derived_array);
/* Free the MPI buf and file types, if they were derived */
- if(mbt_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&chunk_final_mtype)))
+ if(chunk_final_mtype_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&chunk_final_mtype)))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
- if(mft_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&chunk_final_ftype)))
+ if(chunk_final_ftype_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&chunk_final_ftype)))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
FUNC_LEAVE_NOAPI(ret_value)
@@ -1547,32 +1558,29 @@ static herr_t
H5D_inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
const H5S_t *file_space, const H5S_t *mem_space)
{
- size_t mpi_buf_count; /* # of MPI types */
+ int mpi_buf_count; /* # of MPI types */
hbool_t mbt_is_derived = FALSE;
hbool_t mft_is_derived = FALSE;
MPI_Datatype mpi_file_type, mpi_buf_type;
- int mpi_code; /* MPI return code */
- herr_t ret_value = SUCCEED; /* return value */
+ int mpi_code; /* MPI return code */
+ herr_t ret_value = SUCCEED; /* return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_inter_collective_io)
if((file_space != NULL) && (mem_space != NULL)) {
- hsize_t mpi_buf_offset, mpi_file_offset; /* Offset within dataset where selection (ie. MPI type) begins */
- size_t mpi_file_count; /* Number of file "objects" to transfer */
+ int mpi_file_count; /* Number of file "objects" to transfer */
/* Obtain disk and memory MPI derived datatype */
- if(H5S_mpio_space_type(file_space, type_info->src_type_size,
- &mpi_file_type, &mpi_file_count, &mpi_file_offset, &mft_is_derived) < 0)
+ if(H5S_mpio_space_type(file_space, type_info->src_type_size, &mpi_file_type, &mpi_file_count, &mft_is_derived) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type")
- if(H5S_mpio_space_type(mem_space, type_info->src_type_size,
- &mpi_buf_type, &mpi_buf_count, &mpi_buf_offset, &mbt_is_derived) < 0)
+ if(H5S_mpio_space_type(mem_space, type_info->src_type_size, &mpi_buf_type, &mpi_buf_count, &mbt_is_derived) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buffer type")
} /* end if */
else {
/* For non-selection, participate with a none MPI derived datatype, the count is 0. */
mpi_buf_type = MPI_BYTE;
mpi_file_type = MPI_BYTE;
- mpi_buf_count = (size_t)0;
+ mpi_buf_count = 0;
mbt_is_derived = FALSE;
mft_is_derived = FALSE;
} /* end else */
@@ -1583,7 +1591,7 @@ if(H5DEBUG(D))
#endif
/* Perform final collective I/O operation */
- if(H5D_final_collective_io(io_info, type_info, mpi_buf_count, &mpi_file_type, &mpi_buf_type) < 0)
+ if(H5D_final_collective_io(io_info, type_info, (hsize_t)mpi_buf_count, &mpi_file_type, &mpi_buf_type) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish collective MPI-IO")
done:
@@ -1616,7 +1624,7 @@ if(H5DEBUG(D))
*/
static herr_t
H5D_final_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- size_t mpi_buf_count, MPI_Datatype *mpi_file_type, MPI_Datatype *mpi_buf_type)
+ hsize_t mpi_buf_count, MPI_Datatype *mpi_file_type, MPI_Datatype *mpi_buf_type)
{
hbool_t plist_is_setup = FALSE; /* Whether the dxpl has been customized */
herr_t ret_value = SUCCEED;
@@ -1629,11 +1637,11 @@ H5D_final_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info
plist_is_setup = TRUE;
if(io_info->op_type == H5D_IO_OP_WRITE) {
- if((io_info->io_ops.single_write)(io_info, type_info, (hsize_t)mpi_buf_count, NULL, NULL) < 0)
+ if((io_info->io_ops.single_write)(io_info, type_info, mpi_buf_count, NULL, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
} /* end if */
else {
- if((io_info->io_ops.single_read)(io_info, type_info, (hsize_t)mpi_buf_count, NULL, NULL) < 0)
+ if((io_info->io_ops.single_read)(io_info, type_info, mpi_buf_count, NULL, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
} /* end else */
diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c
index 117dfc7..b382fb4 100644
--- a/src/H5FDmpio.c
+++ b/src/H5FDmpio.c
@@ -1771,6 +1771,7 @@ H5FD_mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
} /* end if */
} /* end if */
else {
+#if 0 /* JRM -- 3/23/10 */ /* this is no longer always the case */
/* Only one process can do the actual metadata write */
if(file->mpi_rank != H5_PAR_META_WRITE)
#ifdef LATER
@@ -1778,6 +1779,7 @@ H5FD_mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
#else /* LATER */
HGOTO_DONE(SUCCEED) /* skip the actual write */
#endif /* LATER */
+#endif /* JRM */
} /* end if */
/* Write the data. */
diff --git a/src/H5FDmpiposix.c b/src/H5FDmpiposix.c
index 14947a8..86f4d95 100644
--- a/src/H5FDmpiposix.c
+++ b/src/H5FDmpiposix.c
@@ -1287,9 +1287,11 @@ H5FD_mpiposix_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code)
#endif /* JRM */
+#if 0 /* JRM -- 3/23/10 */ /* this is no longer always the case */
/* Only one process will do the actual write if all procs in comm write same metadata */
if (file->mpi_rank != H5_PAR_META_WRITE)
HGOTO_DONE(SUCCEED) /* skip the actual write */
+#endif /* JRM */
} /* end if */
#ifdef REPORT_IO
diff --git a/src/H5HLcache.c b/src/H5HLcache.c
index 238dce6..51d60f9 100644
--- a/src/H5HLcache.c
+++ b/src/H5HLcache.c
@@ -330,6 +330,11 @@ H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Check if the current buffer from the speculative read already has the heap data */
if(spec_read_size >= (heap->prfx_size + heap->dblk_size)) {
+ /* Set p to the start of the data block. This is necessary
+ * because there may be a gap between the used portion of the
+ * prefix and the data block due to alignment constraints. */
+ p = buf + heap->prfx_size;
+
/* Copy the heap data from the speculative read buffer */
HDmemcpy(heap->dblk_image, p, heap->dblk_size);
} /* end if */
@@ -435,6 +440,11 @@ H5HL_prefix_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
/* Check if the local heap is a single object in cache */
if(heap->single_cache_obj) {
+ /* Set p to the start of the data block. This is necessary because
+ * there may be a gap between the used portion of the prefix and the
+ * data block due to alignment constraints. */
+ p = buf + heap->prfx_size;
+
/* Serialize the free list into the heap data's image */
H5HL_fl_serialize(heap);
diff --git a/src/H5Sall.c b/src/H5Sall.c
index 115d5d35..c98781a 100644
--- a/src/H5Sall.c
+++ b/src/H5Sall.c
@@ -47,6 +47,8 @@ static htri_t H5S_all_is_contiguous(const H5S_t *space);
static htri_t H5S_all_is_single(const H5S_t *space);
static htri_t H5S_all_is_regular(const H5S_t *space);
static herr_t H5S_all_adjust_u(H5S_t *space, const hsize_t *offset);
+static herr_t H5S_all_project_scalar(const H5S_t *space, hsize_t *offset);
+static herr_t H5S_all_project_simple(const H5S_t *space, H5S_t *new_space, hsize_t *offset);
static herr_t H5S_all_iter_init(H5S_sel_iter_t *iter, const H5S_t *space);
/* Selection iteration callbacks */
@@ -76,6 +78,8 @@ const H5S_select_class_t H5S_sel_all[1] = {{
H5S_all_is_single,
H5S_all_is_regular,
H5S_all_adjust_u,
+ H5S_all_project_scalar,
+ H5S_all_project_simple,
H5S_all_iter_init,
}};
@@ -372,18 +376,18 @@ H5S_all_iter_release (H5S_sel_iter_t UNUSED * iter)
EXAMPLES
REVISION LOG
--------------------------------------------------------------------------*/
-herr_t
-H5S_all_release (H5S_t UNUSED * space)
+static herr_t
+H5S_all_release(H5S_t *space)
{
- FUNC_ENTER_NOAPI_NOFUNC(H5S_all_release);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_all_release)
/* Check args */
- assert (space);
+ HDassert(space);
/* Reset the number of elements in the selection */
- space->select.num_elem=0;
+ space->select.num_elem = 0;
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_all_release() */
@@ -406,18 +410,18 @@ H5S_all_release (H5S_t UNUSED * space)
EXAMPLES
REVISION LOG
--------------------------------------------------------------------------*/
-herr_t
+static herr_t
H5S_all_copy(H5S_t *dst, const H5S_t UNUSED *src, hbool_t UNUSED share_selection)
{
- FUNC_ENTER_NOAPI_NOFUNC(H5S_all_copy);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_all_copy)
- assert(src);
- assert(dst);
+ HDassert(src);
+ HDassert(dst);
/* Set number of elements in selection */
- dst->select.num_elem=(hsize_t)H5S_GET_EXTENT_NPOINTS(dst);
+ dst->select.num_elem = (hsize_t)H5S_GET_EXTENT_NPOINTS(dst);
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5S_all_copy() */
@@ -542,20 +546,20 @@ H5S_all_serialize (const H5S_t *space, uint8_t *buf)
REVISION LOG
--------------------------------------------------------------------------*/
herr_t
-H5S_all_deserialize (H5S_t *space, const uint8_t UNUSED *buf)
+H5S_all_deserialize(H5S_t *space, const uint8_t UNUSED *buf)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI(H5S_all_deserialize, FAIL);
+ FUNC_ENTER_NOAPI(H5S_all_deserialize, FAIL)
- assert(space);
+ HDassert(space);
/* Change to "all" selection */
if((ret_value = H5S_select_all(space, TRUE)) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't change selection")
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_all_deserialize() */
@@ -764,6 +768,69 @@ H5S_all_adjust_u(H5S_t UNUSED *space, const hsize_t UNUSED *offset)
} /* H5S_all_adjust_u() */
+/*-------------------------------------------------------------------------
+ * Function: H5S_all_project_scalar
+ *
+ * Purpose: Projects a single element 'all' selection into a scalar
+ * dataspace
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_all_project_scalar(const H5S_t UNUSED *space, hsize_t *offset)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_all_project_scalar)
+
+ /* Check args */
+ HDassert(space && H5S_SEL_ALL == H5S_GET_SELECT_TYPE(space));
+ HDassert(offset);
+
+ /* Set offset of selection in projected buffer */
+ *offset = 0;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5S_all_project_scalar() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_all_project_simple
+ *
+ * Purpose: Projects an 'all' selection onto/into a simple dataspace
+ * of a different rank
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_all_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *offset)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_all_project_simple)
+
+ /* Check args */
+ HDassert(base_space && H5S_SEL_ALL == H5S_GET_SELECT_TYPE(base_space));
+ HDassert(new_space);
+ HDassert(offset);
+
+ /* Select the entire new space */
+ if(H5S_select_all(new_space, TRUE) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSET, FAIL, "unable to set all selection")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_all_project_simple() */
+
+
/*--------------------------------------------------------------------------
NAME
H5S_select_all
diff --git a/src/H5Shyper.c b/src/H5Shyper.c
index 74402b1..df81275 100644
--- a/src/H5Shyper.c
+++ b/src/H5Shyper.c
@@ -33,12 +33,12 @@
/* Local datatypes */
/* Static function prototypes */
-static herr_t H5S_hyper_free_span_info (H5S_hyper_span_info_t *span_info);
-static herr_t H5S_hyper_free_span (H5S_hyper_span_t *span);
-static H5S_hyper_span_info_t *H5S_hyper_copy_span (H5S_hyper_span_info_t *spans);
-static herr_t H5S_hyper_span_scratch (H5S_hyper_span_info_t *spans, void *scr_value);
-static herr_t H5S_hyper_span_precompute (H5S_hyper_span_info_t *spans, size_t elmt_size);
-static herr_t H5S_generate_hyperslab (H5S_t *space, H5S_seloper_t op,
+static herr_t H5S_hyper_free_span_info(H5S_hyper_span_info_t *span_info);
+static herr_t H5S_hyper_free_span(H5S_hyper_span_t *span);
+static H5S_hyper_span_info_t *H5S_hyper_copy_span(H5S_hyper_span_info_t *spans);
+static void H5S_hyper_span_scratch(H5S_hyper_span_info_t *spans, void *scr_value);
+static herr_t H5S_hyper_span_precompute(H5S_hyper_span_info_t *spans, size_t elmt_size);
+static herr_t H5S_generate_hyperslab(H5S_t *space, H5S_seloper_t op,
const hsize_t start[], const hsize_t stride[], const hsize_t count[], const hsize_t block[]);
static herr_t H5S_hyper_generate_spans(H5S_t *space);
/* Needed for use in hyperslab code (H5Shyper.c) */
@@ -62,6 +62,8 @@ static htri_t H5S_hyper_is_contiguous(const H5S_t *space);
static htri_t H5S_hyper_is_single(const H5S_t *space);
static htri_t H5S_hyper_is_regular(const H5S_t *space);
static herr_t H5S_hyper_adjust_u(H5S_t *space, const hsize_t *offset);
+static herr_t H5S_hyper_project_scalar(const H5S_t *space, hsize_t *offset);
+static herr_t H5S_hyper_project_simple(const H5S_t *space, H5S_t *new_space, hsize_t *offset);
static herr_t H5S_hyper_iter_init(H5S_sel_iter_t *iter, const H5S_t *space);
/* Selection iteration callbacks */
@@ -96,6 +98,8 @@ const H5S_select_class_t H5S_sel_hyper[1] = {{
H5S_hyper_is_single,
H5S_hyper_is_regular,
H5S_hyper_adjust_u,
+ H5S_hyper_project_scalar,
+ H5S_hyper_project_simple,
H5S_hyper_iter_init,
}};
@@ -292,15 +296,15 @@ H5S_hyper_iter_init(H5S_sel_iter_t *iter, const H5S_t *space)
/* Check if the regular selection can be "flattened" */
if(cont_dim>0) {
- unsigned last_dim_flattened=1; /* Flag to indicate that the last dimension was flattened */
- unsigned flat_rank=rank-cont_dim; /* Number of dimensions after flattening */
+ unsigned last_dim_flattened = 1; /* Flag to indicate that the last dimension was flattened */
+ unsigned flat_rank = rank-cont_dim; /* Number of dimensions after flattening */
unsigned curr_dim; /* Current dimension */
/* Set the iterator's rank to the contiguous dimensions */
- iter->u.hyp.iter_rank=flat_rank;
+ iter->u.hyp.iter_rank = flat_rank;
/* "Flatten" dataspace extent and selection information */
- curr_dim=flat_rank-1;
+ curr_dim = flat_rank - 1;
for(i = (int)rank - 1, acc = 1; i >= 0; i--) {
if(tdiminfo[i].block == mem_size[i] && i > 0) {
/* "Flatten" this dimension */
@@ -308,24 +312,25 @@ H5S_hyper_iter_init(H5S_sel_iter_t *iter, const H5S_t *space)
acc *= mem_size[i];
/* Indicate that the dimension was flattened */
- last_dim_flattened=1;
+ last_dim_flattened = 1;
} /* end if */
else {
if(last_dim_flattened) {
/* First dimension after flattened dimensions */
- iter->u.hyp.diminfo[curr_dim].start = tdiminfo[i].start*acc;
+ iter->u.hyp.diminfo[curr_dim].start = tdiminfo[i].start * acc;
+
/* Special case for single block regular selections */
if(tdiminfo[i].count==1)
iter->u.hyp.diminfo[curr_dim].stride = 1;
else
- iter->u.hyp.diminfo[curr_dim].stride = tdiminfo[i].stride*acc;
+ iter->u.hyp.diminfo[curr_dim].stride = tdiminfo[i].stride * acc;
iter->u.hyp.diminfo[curr_dim].count = tdiminfo[i].count;
- iter->u.hyp.diminfo[curr_dim].block = tdiminfo[i].block*acc;
- iter->u.hyp.size[curr_dim] = mem_size[i]*acc;
+ iter->u.hyp.diminfo[curr_dim].block = tdiminfo[i].block * acc;
+ iter->u.hyp.size[curr_dim] = mem_size[i] * acc;
iter->u.hyp.sel_off[curr_dim] = space->select.offset[i] * acc;
/* Reset the "last dim flattened" flag to avoid flattened any further dimensions */
- last_dim_flattened=0;
+ last_dim_flattened = 0;
/* Reset the "accumulator" for possible further dimension flattening */
acc=1;
@@ -596,12 +601,12 @@ static htri_t
H5S_hyper_iter_has_next_block(const H5S_sel_iter_t *iter)
{
unsigned u; /* Local index variable */
- herr_t ret_value=FALSE; /* Return value */
+ htri_t ret_value = FALSE; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_iter_has_next_block);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_iter_has_next_block)
/* Check args */
- assert (iter);
+ HDassert(iter);
/* Check for a single "regular" hyperslab */
if(iter->u.hyp.diminfo_valid) {
@@ -609,25 +614,25 @@ H5S_hyper_iter_has_next_block(const H5S_sel_iter_t *iter)
const hsize_t *toff; /* Temporary offset in selection */
/* Check if the offset of the iterator is at the last location in all dimensions */
- tdiminfo=iter->u.hyp.diminfo;
- toff=iter->u.hyp.off;
- for(u=0; u<iter->rank; u++) {
+ tdiminfo = iter->u.hyp.diminfo;
+ toff = iter->u.hyp.off;
+ for(u = 0; u < iter->rank; u++) {
/* If there is only one block, continue */
- if(tdiminfo[u].count==1)
+ if(tdiminfo[u].count == 1)
continue;
- if(toff[u]!=(tdiminfo[u].start+((tdiminfo[u].count-1)*tdiminfo[u].stride)))
+ if(toff[u] != (tdiminfo[u].start + ((tdiminfo[u].count - 1) * tdiminfo[u].stride)))
HGOTO_DONE(TRUE);
} /* end for */
} /* end if */
else {
/* Check for any levels of the tree with more sequences in them */
- for(u=0; u<iter->rank; u++)
- if(iter->u.hyp.span[u]->next!=NULL)
+ for(u = 0; u < iter->rank; u++)
+ if(iter->u.hyp.span[u]->next != NULL)
HGOTO_DONE(TRUE);
} /* end else */
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_hyper_iter_has_next_block() */
@@ -1089,30 +1094,30 @@ H5S_hyper_iter_release (H5S_sel_iter_t *iter)
REVISION LOG
--------------------------------------------------------------------------*/
static H5S_hyper_span_t *
-H5S_hyper_new_span (hsize_t low, hsize_t high, H5S_hyper_span_info_t *down, H5S_hyper_span_t *next)
+H5S_hyper_new_span(hsize_t low, hsize_t high, H5S_hyper_span_info_t *down, H5S_hyper_span_t *next)
{
H5S_hyper_span_t *ret_value;
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_new_span);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_new_span)
/* Allocate a new span node */
- if((ret_value = H5FL_MALLOC(H5S_hyper_span_t))==NULL)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "can't allocate hyperslab span");
+ if(NULL == (ret_value = H5FL_MALLOC(H5S_hyper_span_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, NULL, "can't allocate hyperslab span")
/* Copy the span's basic information */
- ret_value->low=low;
- ret_value->high=high;
- ret_value->nelem=(high-low)+1;
- ret_value->pstride=0;
- ret_value->down=down;
- ret_value->next=next;
+ ret_value->low = low;
+ ret_value->high = high;
+ ret_value->nelem = (high - low) + 1;
+ ret_value->pstride = 0;
+ ret_value->down = down;
+ ret_value->next = next;
/* Increment the reference count of the 'down span' if there is one */
- if(ret_value->down!=NULL)
+ if(ret_value->down)
ret_value->down->count++;
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_hyper_new_span() */
@@ -1195,24 +1200,23 @@ done:
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_hyper_span_precompute (H5S_hyper_span_info_t *spans, size_t elmt_size)
+H5S_hyper_span_precompute(H5S_hyper_span_info_t *spans, size_t elmt_size)
{
- herr_t ret_value=SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_span_precompute);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_span_precompute)
- assert(spans);
+ HDassert(spans);
/* Call the helper routine to actually do the work */
- if(H5S_hyper_span_precompute_helper(spans,elmt_size)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't precompute span info");
+ if(H5S_hyper_span_precompute_helper(spans, elmt_size) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't precompute span info")
/* Reset the scratch pointers for the next routine which needs them */
- if(H5S_hyper_span_scratch(spans,NULL)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't reset hyperslab scratch pointer");
+ H5S_hyper_span_scratch(spans, NULL);
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_hyper_span_precompute() */
@@ -1222,10 +1226,10 @@ done:
PURPOSE
Set the scratch pointers on hyperslab span trees
USAGE
- herr_t H5S_hyper_span_scratch(span_info)
+ void H5S_hyper_span_scratch(span_info)
H5S_hyper_span_info_t *span_info; IN: Span tree to reset
RETURNS
- Non-negative on success, negative on failure
+ <none>
DESCRIPTION
Set the scratch pointers on a hyperslab span tree.
GLOBAL VARIABLES
@@ -1233,37 +1237,33 @@ done:
EXAMPLES
REVISION LOG
--------------------------------------------------------------------------*/
-static herr_t
-H5S_hyper_span_scratch (H5S_hyper_span_info_t *spans, void *scr_value)
+static void
+H5S_hyper_span_scratch(H5S_hyper_span_info_t *spans, void *scr_value)
{
- H5S_hyper_span_t *span; /* Hyperslab span */
- herr_t ret_value=SUCCEED; /* Return value */
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_span_scratch)
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_span_scratch);
-
- assert(spans);
+ HDassert(spans);
/* Check if we've already set this down span tree */
- if(spans->scratch!=scr_value) {
+ if(spans->scratch != scr_value) {
+ H5S_hyper_span_t *span; /* Hyperslab span */
+
/* Set the tree's scratch pointer */
spans->scratch = (H5S_hyper_span_info_t *)scr_value;
/* Set the scratch pointers in all the nodes */
- span=spans->head;
- while(span!=NULL) {
+ span = spans->head;
+ while(span != NULL) {
/* If there are down spans, set their scratch value also */
- if(span->down!=NULL) {
- if(H5S_hyper_span_scratch(span->down,scr_value)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't reset hyperslab scratch pointer");
- } /* end if */
+ if(span->down != NULL)
+ H5S_hyper_span_scratch(span->down, scr_value);
/* Advance to next span */
- span=span->next;
+ span = span->next;
} /* end while */
} /* end if */
-done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI_VOID
} /* H5S_hyper_span_scratch() */
@@ -1293,65 +1293,65 @@ H5S_hyper_copy_span_helper (H5S_hyper_span_info_t *spans)
H5S_hyper_span_info_t *new_down; /* New down span tree */
H5S_hyper_span_info_t *ret_value;
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_copy_span_helper);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_copy_span_helper)
- assert(spans);
+ HDassert(spans);
/* Check if the span tree was already copied */
- if(spans->scratch!=NULL && spans->scratch!=(H5S_hyper_span_info_t *)~((size_t)NULL)) {
+ if(spans->scratch != NULL && spans->scratch != (H5S_hyper_span_info_t *)~((size_t)NULL)) {
/* Just return the value of the already copied span tree */
- ret_value=spans->scratch;
+ ret_value = spans->scratch;
/* Increment the reference count of the span tree */
ret_value->count++;
} /* end if */
else {
/* Allocate a new span_info node */
- if((ret_value = H5FL_MALLOC(H5S_hyper_span_info_t))==NULL)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "can't allocate hyperslab span");
+ if(NULL == (ret_value = H5FL_MALLOC(H5S_hyper_span_info_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, NULL, "can't allocate hyperslab span info")
/* Copy the span_info information */
- ret_value->count=1;
- ret_value->scratch=NULL;
- ret_value->head=NULL;
+ ret_value->count = 1;
+ ret_value->scratch = NULL;
+ ret_value->head = NULL;
/* Set the scratch pointer in the node being copied to the newly allocated node */
- spans->scratch=ret_value;
+ spans->scratch = ret_value;
/* Copy over the nodes in the span list */
- span=spans->head;
- prev_span=NULL;
- while(span!=NULL) {
+ span = spans->head;
+ prev_span = NULL;
+ while(span != NULL) {
/* Allocate a new node */
- if((new_span = H5S_hyper_new_span(span->low,span->high,NULL,NULL))==NULL)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "can't allocate hyperslab span");
+ if(NULL == (new_span = H5S_hyper_new_span(span->low, span->high, NULL, NULL)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, NULL, "can't allocate hyperslab span")
/* Append to list of spans */
- if(prev_span==NULL)
- ret_value->head=new_span;
+ if(NULL == prev_span)
+ ret_value->head = new_span;
else
- prev_span->next=new_span;
+ prev_span->next = new_span;
/* Copy the pstride */
- new_span->pstride=span->pstride;
+ new_span->pstride = span->pstride;
/* Recurse to copy the 'down' spans, if there are any */
- if(span->down!=NULL) {
- if((new_down = H5S_hyper_copy_span_helper(span->down))==NULL)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "can't allocate hyperslab span");
- new_span->down=new_down;
+ if(span->down != NULL) {
+ if(NULL == (new_down = H5S_hyper_copy_span_helper(span->down)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, NULL, "can't copy hyperslab spans")
+ new_span->down = new_down;
} /* end if */
/* Update the previous (new) span */
- prev_span=new_span;
+ prev_span = new_span;
/* Advance to next span */
- span=span->next;
+ span = span->next;
} /* end while */
} /* end else */
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_hyper_copy_span_helper() */
@@ -1375,23 +1375,23 @@ done:
REVISION LOG
--------------------------------------------------------------------------*/
static H5S_hyper_span_info_t *
-H5S_hyper_copy_span (H5S_hyper_span_info_t *spans)
+H5S_hyper_copy_span(H5S_hyper_span_info_t *spans)
{
H5S_hyper_span_info_t *ret_value;
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_copy_span);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_copy_span)
- assert(spans);
+ HDassert(spans);
/* Copy the hyperslab span tree */
- ret_value=H5S_hyper_copy_span_helper(spans);
+ if(NULL == (ret_value = H5S_hyper_copy_span_helper(spans)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "can't copy hyperslab span tree")
/* Reset the scratch pointers for the next routine which needs them */
- if(H5S_hyper_span_scratch(spans,NULL)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, NULL, "can't reset span tree scratch pointers");
+ H5S_hyper_span_scratch(spans, NULL);
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_hyper_copy_span() */
@@ -1630,7 +1630,7 @@ H5S_hyper_copy (H5S_t *dst, const H5S_t *src, hbool_t share_selection)
assert(dst);
/* Allocate space for the hyperslab selection information */
- if((dst->select.sel_info.hslab=H5FL_MALLOC(H5S_hyper_sel_t))==NULL)
+ if(NULL == (dst->select.sel_info.hslab = H5FL_MALLOC(H5S_hyper_sel_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate hyperslab info");
/* Set temporary pointers */
@@ -1658,7 +1658,7 @@ H5S_hyper_copy (H5S_t *dst, const H5S_t *src, hbool_t share_selection)
} /* end if */
else
/* Copy the hyperslab span information */
- dst->select.sel_info.hslab->span_lst=H5S_hyper_copy_span(src->select.sel_info.hslab->span_lst);
+ dst->select.sel_info.hslab->span_lst = H5S_hyper_copy_span(src->select.sel_info.hslab->span_lst);
} /* end if */
done:
@@ -2331,44 +2331,44 @@ H5S_hyper_span_blocklist(H5S_hyper_span_info_t *spans, hsize_t start[], hsize_t
{
H5S_hyper_span_t *curr; /* Pointer to current hyperslab span */
hsize_t u; /* Index variable */
- herr_t ret_value=SUCCEED; /* return value */
+ herr_t ret_value = SUCCEED; /* return value */
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_span_blocklist);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_span_blocklist)
/* Sanity checks */
- assert(spans);
- assert(rank<H5O_LAYOUT_NDIMS);
- assert(start);
- assert(end);
- assert(startblock);
- assert(numblocks && *numblocks>0);
- assert(buf && *buf);
+ HDassert(spans);
+ HDassert(rank < H5O_LAYOUT_NDIMS);
+ HDassert(start);
+ HDassert(end);
+ HDassert(startblock);
+ HDassert(numblocks && *numblocks > 0);
+ HDassert(buf && *buf);
/* Walk through the list of spans, recursing or outputing them */
- curr=spans->head;
- while(curr!=NULL && *numblocks>0) {
+ curr = spans->head;
+ while(curr != NULL && *numblocks > 0) {
/* Recurse if this node has down spans */
- if(curr->down!=NULL) {
+ if(curr->down != NULL) {
/* Add the starting and ending points for this span to the list */
- start[rank]=curr->low;
- end[rank]=curr->high;
+ start[rank] = curr->low;
+ end[rank] = curr->high;
/* Recurse down to the next dimension */
- if(H5S_hyper_span_blocklist(curr->down,start,end,rank+1,startblock,numblocks,buf)<0)
+ if(H5S_hyper_span_blocklist(curr->down, start, end, (rank + 1), startblock, numblocks, buf) < 0)
HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "failed to release hyperslab spans");
} /* end if */
else {
/* Skip this block if we haven't skipped all the startblocks yet */
- if(*startblock>0) {
+ if(*startblock > 0) {
/* Decrement the starting block */
(*startblock)--;
- }
+ } /* end if */
/* Process this block */
else {
/* Encode all the previous dimensions starting & ending points */
/* Copy previous starting points */
- for(u=0; u<rank; u++, (*buf)++)
+ for(u = 0; u < rank; u++, (*buf)++)
HDmemcpy(*buf, &start[u], sizeof(hsize_t));
/* Copy starting point for this span */
@@ -2376,7 +2376,7 @@ H5S_hyper_span_blocklist(H5S_hyper_span_info_t *spans, hsize_t start[], hsize_t
(*buf)++;
/* Copy previous ending points */
- for(u=0; u<rank; u++, (*buf)++)
+ for(u = 0; u < rank; u++, (*buf)++)
HDmemcpy(*buf, &end[u], sizeof(hsize_t));
/* Copy starting point for this span */
@@ -2389,11 +2389,11 @@ H5S_hyper_span_blocklist(H5S_hyper_span_info_t *spans, hsize_t start[], hsize_t
} /* end else */
/* Advance to next node */
- curr=curr->next;
+ curr = curr->next;
} /* end while */
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_hyper_span_blocklist() */
@@ -2430,29 +2430,26 @@ done:
static herr_t
H5S_get_select_hyper_blocklist(H5S_t *space, hbool_t internal, hsize_t startblock, hsize_t numblocks, hsize_t *buf)
{
- H5S_hyper_dim_t *diminfo; /* Alias for dataspace's diminfo information */
- hsize_t tmp_count[H5O_LAYOUT_NDIMS]; /* Temporary hyperslab counts */
- hsize_t offset[H5O_LAYOUT_NDIMS]; /* Offset of element in dataspace */
- hsize_t start[H5O_LAYOUT_NDIMS]; /* Location of start of hyperslab */
- hsize_t end[H5O_LAYOUT_NDIMS]; /* Location of end of hyperslab */
- hsize_t temp_off; /* Offset in a given dimension */
- int i; /* Counter */
- int fast_dim; /* Rank of the fastest changing dimension for the dataspace */
- int temp_dim; /* Temporary rank holder */
- int ndims; /* Rank of the dataspace */
- int done; /* Whether we are done with the iteration */
- herr_t ret_value=SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_get_select_hyper_blocklist);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_get_select_hyper_blocklist)
- assert(space);
- assert(buf);
+ HDassert(space);
+ HDassert(buf);
/* Check for a "regular" hyperslab selection */
if(space->select.sel_info.hslab->diminfo_valid) {
+ const H5S_hyper_dim_t *diminfo; /* Alias for dataspace's diminfo information */
+ hsize_t tmp_count[H5O_LAYOUT_NDIMS]; /* Temporary hyperslab counts */
+ hsize_t offset[H5O_LAYOUT_NDIMS]; /* Offset of element in dataspace */
+ unsigned fast_dim; /* Rank of the fastest changing dimension for the dataspace */
+ unsigned ndims; /* Rank of the dataspace */
+ hbool_t done; /* Whether we are done with the iteration */
+ unsigned u; /* Counter */
+
/* Set some convienence values */
- ndims=space->extent.rank;
- fast_dim=ndims-1;
+ ndims = space->extent.rank;
+ fast_dim = ndims - 1;
/* Check which set of dimension information to use */
if(internal)
@@ -2460,39 +2457,41 @@ H5S_get_select_hyper_blocklist(H5S_t *space, hbool_t internal, hsize_t startbloc
* Use the "optimized dimension information" to pass back information
* on the blocks set, not the "application information".
*/
- diminfo=space->select.sel_info.hslab->opt_diminfo;
+ diminfo = space->select.sel_info.hslab->opt_diminfo;
else
/*
* Use the "application dimension information" to pass back to the user
* the blocks they set, not the optimized, internal information.
*/
- diminfo=space->select.sel_info.hslab->app_diminfo;
+ diminfo = space->select.sel_info.hslab->app_diminfo;
/* Build the tables of count sizes as well as the initial offset */
- for(i=0; i<ndims; i++) {
- tmp_count[i]=diminfo[i].count;
- offset[i]=diminfo[i].start;
+ for(u = 0; u < ndims; u++) {
+ tmp_count[u] = diminfo[u].count;
+ offset[u] = diminfo[u].start;
} /* end for */
/* We're not done with the iteration */
- done=0;
+ done = FALSE;
/* Go iterate over the hyperslabs */
- while(done==0 && numblocks>0) {
+ while(!done && numblocks > 0) {
+ hsize_t temp_off; /* Offset in a given dimension */
+
/* Iterate over the blocks in the fastest dimension */
- while(tmp_count[fast_dim]>0 && numblocks>0) {
+ while(tmp_count[fast_dim] > 0 && numblocks > 0) {
/* Check if we should copy this block information */
- if(startblock==0) {
+ if(startblock == 0) {
/* Copy the starting location */
- HDmemcpy(buf,offset,sizeof(hsize_t)*ndims);
- buf+=ndims;
+ HDmemcpy(buf, offset, sizeof(hsize_t) * ndims);
+ buf += ndims;
/* Compute the ending location */
- HDmemcpy(buf,offset,sizeof(hsize_t)*ndims);
- for(i=0; i<ndims; i++)
- buf[i]+=(diminfo[i].block-1);
- buf+=ndims;
+ HDmemcpy(buf, offset, sizeof(hsize_t) * ndims);
+ for(u = 0; u < ndims; u++)
+ buf[u] += (diminfo[u].block - 1);
+ buf += ndims;
/* Decrement the number of blocks to retrieve */
numblocks--;
@@ -2501,33 +2500,35 @@ H5S_get_select_hyper_blocklist(H5S_t *space, hbool_t internal, hsize_t startbloc
startblock--;
/* Move the offset to the next sequence to start */
- offset[fast_dim]+=diminfo[fast_dim].stride;
+ offset[fast_dim] += diminfo[fast_dim].stride;
/* Decrement the block count */
tmp_count[fast_dim]--;
} /* end while */
/* Work on other dimensions if necessary */
- if(fast_dim>0 && numblocks>0) {
+ if(fast_dim > 0 && numblocks > 0) {
+ int temp_dim; /* Temporary rank holder */
+
/* Reset the block counts */
- tmp_count[fast_dim]=diminfo[fast_dim].count;
+ tmp_count[fast_dim] = diminfo[fast_dim].count;
/* Bubble up the decrement to the slower changing dimensions */
- temp_dim=fast_dim-1;
- while(temp_dim>=0 && done==0) {
+ temp_dim = (int)(fast_dim - 1);
+ while(temp_dim >= 0 && !done) {
/* Decrement the block count */
tmp_count[temp_dim]--;
/* Check if we have more blocks left */
- if(tmp_count[temp_dim]>0)
+ if(tmp_count[temp_dim] > 0)
break;
/* Check for getting out of iterator */
- if(temp_dim==0)
- done=1;
+ if(temp_dim == 0)
+ done = TRUE;
/* Reset the block count in this dimension */
- tmp_count[temp_dim]=diminfo[temp_dim].count;
+ tmp_count[temp_dim] = diminfo[temp_dim].count;
/* Wrapped a dimension, go up to next dimension */
temp_dim--;
@@ -2535,16 +2536,20 @@ H5S_get_select_hyper_blocklist(H5S_t *space, hbool_t internal, hsize_t startbloc
} /* end if */
/* Re-compute offset array */
- for(i=0; i<ndims; i++) {
- temp_off=diminfo[i].start+diminfo[i].stride*(diminfo[i].count-tmp_count[i]);
- offset[i]=temp_off;
+ for(u = 0; u < ndims; u++) {
+ temp_off = diminfo[u].start + diminfo[u].stride * (diminfo[u].count - tmp_count[u]);
+ offset[u] = temp_off;
} /* end for */
} /* end while */
} /* end if */
- else
- ret_value=H5S_hyper_span_blocklist(space->select.sel_info.hslab->span_lst,start,end,(hsize_t)0,&startblock,&numblocks,&buf);
+ else {
+ hsize_t start[H5O_LAYOUT_NDIMS]; /* Location of start of hyperslab */
+ hsize_t end[H5O_LAYOUT_NDIMS]; /* Location of end of hyperslab */
- FUNC_LEAVE_NOAPI(ret_value);
+ ret_value = H5S_hyper_span_blocklist(space->select.sel_info.hslab->span_lst, start, end, (hsize_t)0, &startblock, &numblocks, &buf);
+ } /* end else */
+
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_get_select_hyper_blocklist() */
@@ -2636,40 +2641,40 @@ done:
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_hyper_bounds_helper (const H5S_hyper_span_info_t *spans, const hssize_t *offset, hsize_t rank, hsize_t *start, hsize_t *end)
+H5S_hyper_bounds_helper(const H5S_hyper_span_info_t *spans, const hssize_t *offset, hsize_t rank, hsize_t *start, hsize_t *end)
{
- H5S_hyper_span_t *curr; /* Hyperslab information nodes */
- herr_t ret_value=SUCCEED; /* Return value */
+ H5S_hyper_span_t *curr; /* Hyperslab information nodes */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_bounds_helper)
- assert(spans);
- assert(offset);
- assert(rank<H5O_LAYOUT_NDIMS);
- assert(start);
- assert(end);
+ HDassert(spans);
+ HDassert(offset);
+ HDassert(rank < H5O_LAYOUT_NDIMS);
+ HDassert(start);
+ HDassert(end);
/* Check each point to determine whether selection+offset is within extent */
curr=spans->head;
while(curr!=NULL) {
/* Check for offset moving selection negative */
- if(((hssize_t)curr->low+offset[rank])<0)
+ if(((hssize_t)curr->low + offset[rank]) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "offset moves selection out of bounds")
/* Check if the current span extends the bounding box */
- if((curr->low+offset[rank])<start[rank])
- start[rank]=curr->low+offset[rank];
- if((curr->high+offset[rank])>end[rank])
- end[rank]=curr->high+offset[rank];
+ if((curr->low + offset[rank]) < start[rank])
+ start[rank] = curr->low + offset[rank];
+ if((curr->high + offset[rank]) > end[rank])
+ end[rank] = curr->high + offset[rank];
/* Recurse if this node has down spans */
- if(curr->down!=NULL) {
- if(H5S_hyper_bounds_helper(curr->down,offset,rank+1,start,end)<0)
+ if(curr->down != NULL) {
+ if(H5S_hyper_bounds_helper(curr->down, offset, (rank + 1), start, end) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADSELECT, FAIL, "failure in lower dimension")
} /* end if */
/* Advance to next node */
- curr=curr->next;
+ curr = curr->next;
} /* end while */
done:
@@ -3609,20 +3614,16 @@ done:
herr_t
H5S_hyper_reset_scratch(H5S_t *space)
{
- herr_t ret_value=SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_reset_scratch);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_reset_scratch)
- assert(space);
+ HDassert(space);
/* Check if there are spans in the span tree */
- if(space->select.sel_info.hslab->span_lst!=NULL)
+ if(space->select.sel_info.hslab->span_lst != NULL)
/* Reset the scratch pointers for the next routine which needs them */
- if(H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst,NULL)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't reset span tree scratch pointers");
+ H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst, NULL);
-done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_hyper_reset_scratch() */
@@ -3685,6 +3686,8 @@ H5S_hyper_convert(H5S_t *space)
case H5S_SEL_NONE: /* No elements selected in dataspace */
case H5S_SEL_POINTS: /* Point selection */
+ case H5S_SEL_ERROR: /* Selection error */
+ case H5S_SEL_N: /* Selection count */
default:
HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, FAIL, "can't convert to span tree selection");
} /* end switch */
@@ -4033,8 +4036,7 @@ H5S_hyper_adjust_u(H5S_t *space, const hsize_t *offset)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADSELECT, FAIL, "can't perform hyperslab offset adjustment");
/* Reset the scratch pointers for the next routine which needs them */
- if(H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst,NULL)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't reset hyperslab scratch pointer");
+ H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst, NULL);
} /* end if */
done:
@@ -4042,6 +4044,357 @@ done:
} /* H5S_hyper_adjust_u() */
+/*-------------------------------------------------------------------------
+ * Function: H5S_hyper_project_scalar
+ *
+ * Purpose: Projects a single element hyperslab selection into a scalar
+ * dataspace
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_hyper_project_scalar(const H5S_t *space, hsize_t *offset)
+{
+ hsize_t block[H5S_MAX_RANK]; /* Block selected in base dataspace */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_project_scalar)
+
+ /* Check args */
+ HDassert(space && H5S_SEL_HYPERSLABS == H5S_GET_SELECT_TYPE(space));
+ HDassert(offset);
+
+ /* Check for a "regular" hyperslab selection */
+ if(space->select.sel_info.hslab->diminfo_valid) {
+ const H5S_hyper_dim_t *diminfo = space->select.sel_info.hslab->opt_diminfo; /* Alias for dataspace's diminfo information */
+ unsigned u; /* Counter */
+
+ /* Build the table of the initial offset */
+ for(u = 0; u < space->extent.rank; u++) {
+ block[u] = diminfo[u].start;
+
+ /* Check for more than one hyperslab */
+ if(diminfo[u].count > 1 || diminfo[u].block > 1)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "hyperslab selection of one element has more than one node!")
+ } /* end for */
+ } /* end if */
+ else {
+ const H5S_hyper_span_t *curr; /* Pointer to current hyperslab span */
+ unsigned curr_dim; /* Current dimension being operated on */
+
+ /* Advance down selected spans */
+ curr = space->select.sel_info.hslab->span_lst->head;
+ curr_dim = 0;
+ while(curr) {
+ /* Check for more than one span */
+ if(curr->next || curr->low != curr->high)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "hyperslab selection of one element has more than one node!")
+
+ /* Save the location of the selection in current dimension */
+ block[curr_dim] = curr->low;
+
+ /* Advance down to next dimension */
+ curr = curr->down->head;
+ curr_dim++;
+ } /* end while */
+ } /* end else */
+
+ /* Calculate offset of selection in projected buffer */
+ *offset = H5V_array_offset(space->extent.rank, space->extent.size, block);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_hyper_project_scalar() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_hyper_project_simple_lower
+ *
+ * Purpose: Projects a hyperslab selection onto/into a simple dataspace
+ * of a lower rank
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_hyper_project_simple_lower(const H5S_t *base_space, H5S_t *new_space)
+{
+ H5S_hyper_span_info_t *down; /* Pointer to list of spans */
+ unsigned curr_dim; /* Current dimension being operated on */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_project_simple_lower)
+
+ /* Check args */
+ HDassert(base_space && H5S_SEL_HYPERSLABS == H5S_GET_SELECT_TYPE(base_space));
+ HDassert(new_space);
+ HDassert(new_space->extent.rank < base_space->extent.rank);
+
+ /* Walk down the span tree until we reach the selection to project */
+ down = base_space->select.sel_info.hslab->span_lst;
+ curr_dim = 0;
+ while(down && curr_dim < (base_space->extent.rank - new_space->extent.rank)) {
+ /* Sanity check */
+ HDassert(NULL == down->head->next);
+
+ /* Advance down to next dimension */
+ down = down->head->down;
+ curr_dim++;
+ } /* end while */
+ HDassert(down);
+
+ /* Share the underlying hyperslab span information */
+ new_space->select.sel_info.hslab->span_lst = down;
+ new_space->select.sel_info.hslab->span_lst->count++;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5S_hyper_project_simple_lower() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_hyper_project_simple_higher
+ *
+ * Purpose: Projects a hyperslab selection onto/into a simple dataspace
+ * of a higher rank
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_hyper_project_simple_higher(const H5S_t *base_space, H5S_t *new_space)
+{
+ H5S_hyper_span_t *prev_span = NULL; /* Pointer to previous list of spans */
+ unsigned curr_dim; /* Current dimension being operated on */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_project_simple_higher)
+
+ /* Check args */
+ HDassert(base_space && H5S_SEL_HYPERSLABS == H5S_GET_SELECT_TYPE(base_space));
+ HDassert(new_space);
+ HDassert(new_space->extent.rank > base_space->extent.rank);
+
+ /* Create nodes until reaching the correct # of dimensions */
+ new_space->select.sel_info.hslab->span_lst = NULL;
+ curr_dim = 0;
+ while(curr_dim < (new_space->extent.rank - base_space->extent.rank)) {
+ H5S_hyper_span_info_t *new_span_info; /* Pointer to list of spans */
+ H5S_hyper_span_t *new_span; /* Temporary hyperslab span */
+
+ /* Allocate a new span_info node */
+ if(NULL == (new_span_info = H5FL_MALLOC(H5S_hyper_span_info_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate hyperslab span info")
+
+ /* Check for linking into higher span */
+ if(prev_span)
+ prev_span->down = new_span_info;
+
+ /* Allocate a new node */
+ if(NULL == (new_span = H5S_hyper_new_span(0, 0, NULL, NULL)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate hyperslab span")
+
+ /* Set the span_info information */
+ new_span_info->count = 1;
+ new_span_info->scratch = NULL;
+ new_span_info->head = new_span;
+
+ /* Attach to new space, if top span info */
+ if(NULL == new_space->select.sel_info.hslab->span_lst)
+ new_space->select.sel_info.hslab->span_lst = new_span_info;
+
+ /* Remember previous span info */
+ prev_span = new_span;
+
+ /* Advance to next dimension */
+ curr_dim++;
+ } /* end while */
+ HDassert(new_space->select.sel_info.hslab->span_lst);
+ HDassert(prev_span);
+
+ /* Share the underlying hyperslab span information */
+ prev_span->down = base_space->select.sel_info.hslab->span_lst;
+ prev_span->down->count++;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_hyper_project_simple_higher() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_hyper_project_simple
+ *
+ * Purpose: Projects a hyperslab selection onto/into a simple dataspace
+ * of a different rank
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_hyper_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *offset)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_project_simple)
+
+ /* Check args */
+ HDassert(base_space && H5S_SEL_HYPERSLABS == H5S_GET_SELECT_TYPE(base_space));
+ HDassert(new_space);
+ HDassert(offset);
+
+ /* We are setting a new selection, remove any current selection in new dataspace */
+ if(H5S_SELECT_RELEASE(new_space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release selection")
+
+ /* Allocate space for the hyperslab selection information */
+ if(NULL == (new_space->select.sel_info.hslab = H5FL_MALLOC(H5S_hyper_sel_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate hyperslab info")
+
+ /* Check for a "regular" hyperslab selection */
+ if(base_space->select.sel_info.hslab->diminfo_valid) {
+ unsigned base_space_dim; /* Current dimension in the base dataspace */
+ unsigned new_space_dim; /* Current dimension in the new dataspace */
+
+ /* Check if the new space's rank is < or > base space's rank */
+ if(new_space->extent.rank < base_space->extent.rank) {
+ const H5S_hyper_dim_t *opt_diminfo = base_space->select.sel_info.hslab->opt_diminfo; /* Alias for dataspace's diminfo information */
+ hsize_t block[H5S_MAX_RANK]; /* Block selected in base dataspace */
+ unsigned u; /* Local index variable */
+
+ /* Compute the offset for the down-projection */
+ HDmemset(block, 0, sizeof(block));
+ for(u = 0; u < (base_space->extent.rank - new_space->extent.rank); u++)
+ block[u] = opt_diminfo[u].start;
+ *offset = H5V_array_offset(base_space->extent.rank, base_space->extent.size, block);
+
+ /* Set the correct dimensions for the base & new spaces */
+ base_space_dim = base_space->extent.rank - new_space->extent.rank;
+ new_space_dim = 0;
+ } /* end if */
+ else {
+ HDassert(new_space->extent.rank > base_space->extent.rank);
+
+ /* The offset is zero when projected into higher dimensions */
+ *offset = 0;
+
+ /* Set the diminfo information for the higher dimensions */
+ for(new_space_dim = 0; new_space_dim < (new_space->extent.rank - base_space->extent.rank); new_space_dim++) {
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].start = 0;
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].stride = 1;
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].count = 1;
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].block = 1;
+
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].start = 0;
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].stride = 1;
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].count = 1;
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].block = 1;
+ } /* end for */
+
+ /* Start at beginning of base space's dimension info */
+ base_space_dim = 0;
+ } /* end else */
+
+ /* Copy the diminfo */
+ while(base_space_dim < base_space->extent.rank) {
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].start =
+ base_space->select.sel_info.hslab->app_diminfo[base_space_dim].start;
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].stride =
+ base_space->select.sel_info.hslab->app_diminfo[base_space_dim].stride;
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].count =
+ base_space->select.sel_info.hslab->app_diminfo[base_space_dim].count;
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].block =
+ base_space->select.sel_info.hslab->app_diminfo[base_space_dim].block;
+
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].start =
+ base_space->select.sel_info.hslab->opt_diminfo[base_space_dim].start;
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].stride =
+ base_space->select.sel_info.hslab->opt_diminfo[base_space_dim].stride;
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].count =
+ base_space->select.sel_info.hslab->opt_diminfo[base_space_dim].count;
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].block =
+ base_space->select.sel_info.hslab->opt_diminfo[base_space_dim].block;
+
+ /* Advance to next dimensions */
+ base_space_dim++;
+ new_space_dim++;
+ } /* end for */
+
+ /* Indicate that the dimension information is valid */
+ new_space->select.sel_info.hslab->diminfo_valid = TRUE;
+
+ /* Indicate that there's no slab information */
+ new_space->select.sel_info.hslab->span_lst = NULL;
+ } /* end if */
+ else {
+ /* Check if the new space's rank is < or > base space's rank */
+ if(new_space->extent.rank < base_space->extent.rank) {
+ const H5S_hyper_span_t *curr; /* Pointer to current hyperslab span */
+ hsize_t block[H5S_MAX_RANK]; /* Block selected in base dataspace */
+ unsigned curr_dim; /* Current dimension being operated on */
+
+ /* Clear the block buffer */
+ HDmemset(block, 0, sizeof(block));
+
+ /* Advance down selected spans */
+ curr = base_space->select.sel_info.hslab->span_lst->head;
+ curr_dim = 0;
+ while(curr && curr_dim < (base_space->extent.rank - new_space->extent.rank)) {
+ /* Save the location of the selection in current dimension */
+ block[curr_dim] = curr->low;
+
+ /* Advance down to next dimension */
+ curr = curr->down->head;
+ curr_dim++;
+ } /* end while */
+
+ /* Compute the offset for the down-projection */
+ *offset = H5V_array_offset(base_space->extent.rank, base_space->extent.size, block);
+
+ /* Project the base space's selection down in less dimensions */
+ if(H5S_hyper_project_simple_lower(base_space, new_space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't project hyperslab selection into less dimensions")
+ } /* end if */
+ else {
+ HDassert(new_space->extent.rank > base_space->extent.rank);
+
+ /* The offset is zero when projected into higher dimensions */
+ *offset = 0;
+
+ /* Project the base space's selection down in less dimensions */
+ if(H5S_hyper_project_simple_higher(base_space, new_space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't project hyperslab selection into less dimensions")
+ } /* end else */
+
+ /* Indicate that the dimension information is not valid */
+ new_space->select.sel_info.hslab->diminfo_valid = FALSE;
+ } /* end else */
+
+ /* Number of elements selected will be the same */
+ new_space->select.num_elem = base_space->select.num_elem;
+
+ /* Set selection type */
+ new_space->select.type = H5S_sel_hyper;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_hyper_project_simple() */
+
+
/*--------------------------------------------------------------------------
NAME
H5S_hyper_adjust_helper_s
@@ -4061,41 +4414,41 @@ done:
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_hyper_adjust_helper_s (H5S_hyper_span_info_t *spans, const hssize_t *offset)
+H5S_hyper_adjust_helper_s(H5S_hyper_span_info_t *spans, const hssize_t *offset)
{
H5S_hyper_span_t *span; /* Pointer to current span in span tree */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_adjust_helper_s);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_adjust_helper_s)
/* Sanity check */
- assert(spans);
- assert(offset);
+ HDassert(spans);
+ HDassert(offset);
/* Check if we've already set this down span tree */
- if(spans->scratch!=(H5S_hyper_span_info_t *)~((size_t)NULL)) {
+ if(spans->scratch != (H5S_hyper_span_info_t *)~((size_t)NULL)) {
/* Set the tree's scratch pointer */
- spans->scratch=(H5S_hyper_span_info_t *)~((size_t)NULL);
+ spans->scratch = (H5S_hyper_span_info_t *)~((size_t)NULL);
/* Get the span lists for each span in this tree */
- span=spans->head;
+ span = spans->head;
/* Iterate over the spans in tree */
- while(span!=NULL) {
+ while(span != NULL) {
/* Adjust span offset */
- assert((hssize_t)span->low>=*offset);
- span->low-=*offset;
- span->high-=*offset;
+ HDassert((hssize_t)span->low >= *offset);
+ span->low -= *offset;
+ span->high -= *offset;
/* Recursively adjust spans in next dimension down */
- if(span->down!=NULL)
- H5S_hyper_adjust_helper_s(span->down,offset+1);
+ if(span->down != NULL)
+ H5S_hyper_adjust_helper_s(span->down, offset + 1);
/* Advance to next span in this dimension */
- span=span->next;
+ span = span->next;
} /* end while */
} /* end if */
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_hyper_adjust_helper_s() */
@@ -4142,8 +4495,7 @@ H5S_hyper_adjust_s(H5S_t *space, const hssize_t *offset)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADSELECT, FAIL, "can't perform hyperslab offset adjustment");
/* Reset the scratch pointers for the next routine which needs them */
- if(H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst,NULL)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't reset hyperslab scratch pointer");
+ H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst, NULL);
} /* end if */
done:
@@ -4252,8 +4604,7 @@ H5S_hyper_move(H5S_t *space, const hssize_t *offset)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADSELECT, FAIL, "can't perform hyperslab offset movement");
/* Reset the scratch pointers for the next routine which needs them */
- if(H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst,NULL)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't reset hyperslab scratch pointer");
+ H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst, NULL);
} /* end if */
done:
@@ -6035,8 +6386,6 @@ done:
* Programmer: Quincey Koziol
* Wednesday, January 10, 2001
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -6220,18 +6569,18 @@ H5S_select_hyperslab (H5S_t *space, H5S_seloper_t op,
HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, FAIL, "invalid selection operation");
} /* end switch */
- if(op==H5S_SELECT_SET) {
+ if(op == H5S_SELECT_SET) {
/* If we are setting a new selection, remove current selection first */
- if(H5S_SELECT_RELEASE(space)<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release hyperslab");
+ if(H5S_SELECT_RELEASE(space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release selection")
/* Allocate space for the hyperslab selection information */
- if((space->select.sel_info.hslab=H5FL_MALLOC(H5S_hyper_sel_t))==NULL)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate hyperslab info");
+ if(NULL == (space->select.sel_info.hslab = H5FL_MALLOC(H5S_hyper_sel_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate hyperslab info")
/* Save the diminfo */
- space->select.num_elem=1;
- for(u=0; u<space->extent.rank; u++) {
+ space->select.num_elem = 1;
+ for(u = 0; u < space->extent.rank; u++) {
space->select.sel_info.hslab->app_diminfo[u].start = start[u];
space->select.sel_info.hslab->app_diminfo[u].stride = stride[u];
space->select.sel_info.hslab->app_diminfo[u].count = count[u];
@@ -6241,39 +6590,40 @@ H5S_select_hyperslab (H5S_t *space, H5S_seloper_t op,
space->select.sel_info.hslab->opt_diminfo[u].stride = opt_stride[u];
space->select.sel_info.hslab->opt_diminfo[u].count = opt_count[u];
space->select.sel_info.hslab->opt_diminfo[u].block = opt_block[u];
- space->select.num_elem*=(opt_count[u]*opt_block[u]);
+
+ space->select.num_elem *= (opt_count[u] * opt_block[u]);
} /* end for */
/* Indicate that the dimension information is valid */
- space->select.sel_info.hslab->diminfo_valid=TRUE;
+ space->select.sel_info.hslab->diminfo_valid = TRUE;
/* Indicate that there's no slab information */
- space->select.sel_info.hslab->span_lst=NULL;
+ space->select.sel_info.hslab->span_lst = NULL;
} /* end if */
- else if(op>=H5S_SELECT_OR && op<=H5S_SELECT_NOTA) {
+ else if(op >= H5S_SELECT_OR && op <= H5S_SELECT_NOTA) {
/* Sanity check */
- assert(H5S_GET_SELECT_TYPE(space)==H5S_SEL_HYPERSLABS);
+ HDassert(H5S_GET_SELECT_TYPE(space) == H5S_SEL_HYPERSLABS);
/* Check if there's no hyperslab span information currently */
- if(space->select.sel_info.hslab->span_lst==NULL)
- if(H5S_hyper_generate_spans(space)<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_UNINITIALIZED, FAIL, "dataspace does not have span tree");
+ if(NULL == space->select.sel_info.hslab->span_lst)
+ if(H5S_hyper_generate_spans(space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_UNINITIALIZED, FAIL, "dataspace does not have span tree")
/* Indicate that the regular dimensions are no longer valid */
- space->select.sel_info.hslab->diminfo_valid=FALSE;
+ space->select.sel_info.hslab->diminfo_valid = FALSE;
/* Add in the new hyperslab information */
- if(H5S_generate_hyperslab (space, op, start, opt_stride, opt_count, opt_block)<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINSERT, FAIL, "can't generate hyperslabs");
+ if(H5S_generate_hyperslab(space, op, start, opt_stride, opt_count, opt_block) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINSERT, FAIL, "can't generate hyperslabs")
} /* end if */
else
- HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, FAIL, "invalid selection operation");
+ HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, FAIL, "invalid selection operation")
/* Set selection type */
- space->select.type=H5S_sel_hyper;
+ space->select.type = H5S_sel_hyper;
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_select_hyperslab() */
diff --git a/src/H5Smpio.c b/src/H5Smpio.c
index f535122..e9d0541 100644
--- a/src/H5Smpio.c
+++ b/src/H5Smpio.c
@@ -31,45 +31,25 @@
#include "H5Fprivate.h" /* File access */
#include "H5FDprivate.h" /* File drivers */
#include "H5Iprivate.h" /* IDs */
+#include "H5MMprivate.h" /* Memory management */
#include "H5Oprivate.h" /* Object headers */
#include "H5Pprivate.h" /* Property lists */
#include "H5Spkg.h" /* Dataspaces */
#ifdef H5_HAVE_PARALLEL
-static herr_t
-H5S_mpio_all_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type );
-static herr_t
-H5S_mpio_none_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type );
-static herr_t
-H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type );
-
-static herr_t
-H5S_mpio_span_hyper_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type );
+static herr_t H5S_mpio_all_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
+static herr_t H5S_mpio_none_type(MPI_Datatype *new_type, int *count,
+ hbool_t *is_derived_type);
+static herr_t H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
+static herr_t H5S_mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
+static herr_t H5S_obtain_datatype(const hsize_t down[], H5S_hyper_span_t* span,
+ const MPI_Datatype *elmt_type, MPI_Datatype *span_type, size_t elmt_size);
-static herr_t H5S_obtain_datatype(const hsize_t size[],
- H5S_hyper_span_t* span,MPI_Datatype *span_type,
- size_t elmt_size,int dimindex);
+#define H5S_MPIO_INITIAL_ALLOC_COUNT 256
/*-------------------------------------------------------------------------
@@ -82,30 +62,20 @@ static herr_t H5S_obtain_datatype(const hsize_t size[],
* Outputs: *new_type the MPI type corresponding to the selection
* *count how many objects of the new_type in selection
* (useful if this is the buffer type for xfer)
- * *extra_offset Number of bytes of offset within dataset
* *is_derived_type 0 if MPI primitive type, 1 if derived
*
* Programmer: rky 980813
*
- * Modifications:
- *
- * Quincey Koziol, June 18, 2002
- * Added 'extra_offset' parameter
- *
*-------------------------------------------------------------------------
*/
static herr_t
-H5S_mpio_all_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type )
+H5S_mpio_all_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
{
hsize_t total_bytes;
- hssize_t snelmts; /*total number of elmts (signed) */
- hsize_t nelmts; /*total number of elmts */
- herr_t ret_value = SUCCEED;
+ hssize_t snelmts; /* Total number of elmts (signed) */
+ hsize_t nelmts; /* Total number of elmts */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_all_type)
@@ -121,8 +91,7 @@ H5S_mpio_all_type( const H5S_t *space, size_t elmt_size,
/* fill in the return values */
*new_type = MPI_BYTE;
- H5_ASSIGN_OVERFLOW(*count, total_bytes, hsize_t, size_t);
- *extra_offset = 0;
+ H5_ASSIGN_OVERFLOW(*count, total_bytes, hsize_t, int);
*is_derived_type = FALSE;
done:
@@ -140,32 +109,23 @@ done:
* Outputs: *new_type the MPI type corresponding to the selection
* *count how many objects of the new_type in selection
* (useful if this is the buffer type for xfer)
- * *extra_offset Number of bytes of offset within dataset
* *is_derived_type 0 if MPI primitive type, 1 if derived
*
* Programmer: Quincey Koziol, October 29, 2002
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static herr_t
-H5S_mpio_none_type( const H5S_t UNUSED *space, size_t UNUSED elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type )
+H5S_mpio_none_type(MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_mpio_none_type);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_mpio_none_type)
/* fill in the return values */
*new_type = MPI_BYTE;
*count = 0;
- *extra_offset = 0;
*is_derived_type = FALSE;
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_mpio_none_type() */
@@ -179,35 +139,15 @@ H5S_mpio_none_type( const H5S_t UNUSED *space, size_t UNUSED elmt_size,
* Outputs: *new_type the MPI type corresponding to the selection
* *count how many objects of the new_type in selection
* (useful if this is the buffer type for xfer)
- * *extra_offset Number of bytes of offset within dataset
* *is_derived_type 0 if MPI primitive type, 1 if derived
*
* Programmer: rky 980813
*
- * Modifications: ppw 990401
- * rky, ppw 2000-09-26 Freed old type after creating struct type.
- * rky 2000-10-05 Changed displacements to be MPI_Aint.
- * rky 2000-10-06 Added code for cases of empty hyperslab.
- * akc, rky 2000-11-16 Replaced hard coded dimension size with
- * H5S_MAX_RANK.
- *
- * Quincey Koziol, June 18, 2002
- * Added 'extra_offset' parameter. Also accomodate selection
- * offset in MPI type built.
- *
- * Albert Cheng, August 4, 2004
- * Reimplemented the algorithm of forming the outer_type by
- * defining it as (start, vector, extent) in one call.
- *
*-------------------------------------------------------------------------
*/
static herr_t
-H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type )
+H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
{
H5S_sel_iter_t sel_iter; /* Selection iteration info */
hbool_t sel_iter_init = FALSE; /* Selection iteration info has been initialized */
@@ -231,18 +171,16 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
int mpi_code; /* MPI return code */
herr_t ret_value = SUCCEED;
- FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_hyper_type);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_hyper_type)
/* Check args */
HDassert(space);
HDassert(sizeof(MPI_Aint) >= sizeof(elmt_size));
- if(0 == elmt_size)
- goto empty;
/* Initialize selection iterator */
if(H5S_select_iter_init(&sel_iter, space, elmt_size) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator")
- sel_iter_init = 1; /* Selection iteration info has been initialized */
+ sel_iter_init = TRUE; /* Selection iteration info has been initialized */
/* Abbreviate args */
diminfo = sel_iter.u.hyp.diminfo;
@@ -251,18 +189,16 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
/* make a local copy of the dimension info so we can operate with them */
/* Check if this is a "flattened" regular hyperslab selection */
- if(sel_iter.u.hyp.iter_rank!=0 && sel_iter.u.hyp.iter_rank<space->extent.rank) {
+ if(sel_iter.u.hyp.iter_rank != 0 && sel_iter.u.hyp.iter_rank < space->extent.rank) {
/* Flattened selection */
rank = sel_iter.u.hyp.iter_rank;
HDassert(rank >= 0 && rank <= H5S_MAX_RANK); /* within array bounds */
- if (0==rank)
- goto empty;
#ifdef H5S_DEBUG
if(H5DEBUG(S))
HDfprintf(H5DEBUG(S), "%s: Flattened selection\n",FUNC);
#endif
- for ( i=0; i<rank; ++i) {
- d[i].start = diminfo[i].start+sel_iter.u.hyp.sel_off[i];
+ for(i = 0; i < rank; ++i) {
+ d[i].start = diminfo[i].start + sel_iter.u.hyp.sel_off[i];
d[i].strid = diminfo[i].stride;
d[i].block = diminfo[i].block;
d[i].count = diminfo[i].count;
@@ -277,26 +213,26 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
HDfprintf(H5DEBUG(S), "\n" );
}
#endif
- if (0==d[i].block)
+ if(0 == d[i].block)
goto empty;
- if (0==d[i].count)
+ if(0 == d[i].count)
goto empty;
- if (0==d[i].xtent)
+ if(0 == d[i].xtent)
goto empty;
- }
+ } /* end for */
} /* end if */
else {
/* Non-flattened selection */
rank = space->extent.rank;
- HDassert(rank >= 0 && rank<=H5S_MAX_RANK); /* within array bounds */
- if (0==rank)
+ HDassert(rank >= 0 && rank <= H5S_MAX_RANK); /* within array bounds */
+ if(0 == rank)
goto empty;
#ifdef H5S_DEBUG
if(H5DEBUG(S))
HDfprintf(H5DEBUG(S),"%s: Non-flattened selection\n",FUNC);
#endif
- for ( i=0; i<rank; ++i) {
- d[i].start = diminfo[i].start+space->select.offset[i];
+ for(i = 0; i < rank; ++i) {
+ d[i].start = diminfo[i].start + space->select.offset[i];
d[i].strid = diminfo[i].stride;
d[i].block = diminfo[i].block;
d[i].count = diminfo[i].count;
@@ -311,40 +247,37 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
HDfprintf(H5DEBUG(S), "\n" );
}
#endif
- if (0==d[i].block)
+ if(0 == d[i].block)
goto empty;
- if (0==d[i].count)
+ if(0 == d[i].count)
goto empty;
- if (0==d[i].xtent)
+ if(0 == d[i].xtent)
goto empty;
- }
+ } /* end for */
} /* end else */
/**********************************************************************
Compute array "offset[rank]" which gives the offsets for a multi-
dimensional array with dimensions "d[i].xtent" (i=0,1,...,rank-1).
**********************************************************************/
- offset[rank-1] = 1;
- max_xtent[rank-1] = d[rank-1].xtent;
-/*#ifdef H5Smpi_DEBUG */ /* leave the old way */
+ offset[rank - 1] = 1;
+ max_xtent[rank - 1] = d[rank - 1].xtent;
#ifdef H5S_DEBUG
- if(H5DEBUG(S)){
+ if(H5DEBUG(S)) {
i=rank-1;
- HDfprintf(H5DEBUG(S), " offset[%2d]=%d; max_xtent[%2d]=%d\n",
+ HDfprintf(H5DEBUG(S), " offset[%2d]=%d; max_xtent[%2d]=%d\n",
i, offset[i], i, max_xtent[i]);
}
#endif
- for (i=rank-2; i>=0; --i) {
- offset[i] = offset[i+1]*d[i+1].xtent;
- max_xtent[i] = max_xtent[i+1]*d[i].xtent;
+ for(i = rank - 2; i >= 0; --i) {
+ offset[i] = offset[i + 1] * d[i + 1].xtent;
+ max_xtent[i] = max_xtent[i + 1] * d[i].xtent;
#ifdef H5S_DEBUG
- if(H5DEBUG(S)){
+ if(H5DEBUG(S))
HDfprintf(H5DEBUG(S), " offset[%2d]=%d; max_xtent[%2d]=%d\n",
i, offset[i], i, max_xtent[i]);
- }
#endif
-
- }
+ } /* end for */
/* Create a type covering the selected hyperslab.
* Multidimensional dataspaces are stored in row-major order.
@@ -356,59 +289,58 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
*******************************************************/
#ifdef H5S_DEBUG
if(H5DEBUG(S)) {
- HDfprintf(H5DEBUG(S), "%s: Making contig type %d MPI_BYTEs\n", FUNC,elmt_size );
+ HDfprintf(H5DEBUG(S), "%s: Making contig type %Zu MPI_BYTEs\n", FUNC, elmt_size);
for (i=rank-1; i>=0; --i)
HDfprintf(H5DEBUG(S), "d[%d].xtent=%Hu \n", i, d[i].xtent);
}
#endif
- if (MPI_SUCCESS != (mpi_code= MPI_Type_contiguous( (int)elmt_size, MPI_BYTE, &inner_type )))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code);
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &inner_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
/*******************************************************
* Construct the type by walking the hyperslab dims
* from the inside out:
*******************************************************/
- for ( i=rank-1; i>=0; --i) {
+ for(i = rank - 1; i >= 0; --i) {
#ifdef H5S_DEBUG
- if(H5DEBUG(S)) {
- HDfprintf(H5DEBUG(S), "%s: Dimension i=%d \n"
+ if(H5DEBUG(S))
+ HDfprintf(H5DEBUG(S), "%s: Dimension i=%d \n"
"start=%Hd count=%Hu block=%Hu stride=%Hu, xtent=%Hu max_xtent=%d\n",
FUNC, i, d[i].start, d[i].count, d[i].block, d[i].strid, d[i].xtent, max_xtent[i]);
- }
#endif
#ifdef H5S_DEBUG
if(H5DEBUG(S))
- HDfprintf(H5DEBUG(S), "%s: i=%d Making vector-type \n", FUNC,i);
+ HDfprintf(H5DEBUG(S), "%s: i=%d Making vector-type \n", FUNC,i);
#endif
/****************************************
* Build vector type of the selection.
****************************************/
- mpi_code =MPI_Type_vector((int)(d[i].count), /* count */
- (int)(d[i].block), /* blocklength */
- (int)(d[i].strid), /* stride */
- inner_type, /* old type */
- &outer_type); /* new type */
-
- MPI_Type_free( &inner_type );
- if (mpi_code!=MPI_SUCCESS)
- HMPI_GOTO_ERROR(FAIL, "couldn't create MPI vector type", mpi_code);
-
- /****************************************
- * Then build the dimension type as (start, vector type, xtent).
- ****************************************/
- /* calculate start and extent values of this dimension */
+ mpi_code = MPI_Type_vector((int)(d[i].count), /* count */
+ (int)(d[i].block), /* blocklength */
+ (int)(d[i].strid), /* stride */
+ inner_type, /* old type */
+ &outer_type); /* new type */
+
+ MPI_Type_free(&inner_type);
+ if(mpi_code != MPI_SUCCESS)
+ HMPI_GOTO_ERROR(FAIL, "couldn't create MPI vector type", mpi_code)
+
+ /****************************************
+ * Then build the dimension type as (start, vector type, xtent).
+ ****************************************/
+ /* calculate start and extent values of this dimension */
displacement[1] = d[i].start * offset[i] * elmt_size;
displacement[2] = (MPI_Aint)elmt_size * max_xtent[i];
if(MPI_SUCCESS != (mpi_code = MPI_Type_extent(outer_type, &extent_len)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_extent failed", mpi_code);
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_extent failed", mpi_code)
- /*************************************************
- * Restructure this datatype ("outer_type")
- * so that it still starts at 0, but its extent
- * is the full extent in this dimension.
- *************************************************/
- if (displacement[1] > 0 || (int)extent_len < displacement[2]) {
+ /*************************************************
+ * Restructure this datatype ("outer_type")
+ * so that it still starts at 0, but its extent
+ * is the full extent in this dimension.
+ *************************************************/
+ if(displacement[1] > 0 || (int)extent_len < displacement[2]) {
block_length[0] = 1;
block_length[1] = 1;
@@ -420,42 +352,37 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
old_types[1] = outer_type;
old_types[2] = MPI_UB;
#ifdef H5S_DEBUG
- if(H5DEBUG(S)){
- HDfprintf(H5DEBUG(S), "%s: i=%d Extending struct type\n"
- "***displacements: %d, %d, %d\n",
- FUNC, i, displacement[0], displacement[1], displacement[2]);
- }
+ if(H5DEBUG(S))
+ HDfprintf(H5DEBUG(S), "%s: i=%d Extending struct type\n"
+ "***displacements: %ld, %ld, %ld\n",
+ FUNC, i, (long)displacement[0], (long)displacement[1], (long)displacement[2]);
#endif
- mpi_code = MPI_Type_struct ( 3, /* count */
- block_length, /* blocklengths */
- displacement, /* displacements */
- old_types, /* old types */
- &inner_type); /* new type */
-
- MPI_Type_free (&outer_type);
- if (mpi_code!=MPI_SUCCESS)
- HMPI_GOTO_ERROR(FAIL, "couldn't resize MPI vector type", mpi_code);
- }
- else {
+ mpi_code = MPI_Type_struct(3, /* count */
+ block_length, /* blocklengths */
+ displacement, /* displacements */
+ old_types, /* old types */
+ &inner_type); /* new type */
+
+ MPI_Type_free(&outer_type);
+ if(mpi_code != MPI_SUCCESS)
+ HMPI_GOTO_ERROR(FAIL, "couldn't resize MPI vector type", mpi_code)
+ } /* end if */
+ else
inner_type = outer_type;
- }
} /* end for */
/***************************
* End of loop, walking
* thru dimensions.
***************************/
-
/* At this point inner_type is actually the outermost type, even for 0-trip loop */
-
*new_type = inner_type;
- if (MPI_SUCCESS != (mpi_code= MPI_Type_commit( new_type )))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
/* fill in the remaining return values */
*count = 1; /* only have to move one of these suckers! */
- *extra_offset = 0;
*is_derived_type = TRUE;
HGOTO_DONE(SUCCEED);
@@ -463,24 +390,21 @@ empty:
/* special case: empty hyperslab */
*new_type = MPI_BYTE;
*count = 0;
- *extra_offset = 0;
*is_derived_type = FALSE;
done:
/* Release selection iterator */
- if(sel_iter_init) {
- if (H5S_SELECT_ITER_RELEASE(&sel_iter)<0)
- HDONE_ERROR (H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator");
- } /* end if */
+ if(sel_iter_init)
+ if(H5S_SELECT_ITER_RELEASE(&sel_iter) < 0)
+ HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator")
#ifdef H5S_DEBUG
- if(H5DEBUG(S)){
+ if(H5DEBUG(S))
HDfprintf(H5DEBUG(S), "Leave %s, count=%ld is_derived_type=%t\n",
FUNC, *count, *is_derived_type );
- }
#endif
- FUNC_LEAVE_NOAPI(ret_value);
-}
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5S_mpio_hyper_type() */
/*-------------------------------------------------------------------------
@@ -494,68 +418,57 @@ done:
* Outputs: *new_type the MPI type corresponding to the selection
* *count how many objects of the new_type in selection
* (useful if this is the buffer type for xfer)
- * *extra_offset Number of bytes of offset within dataset
* *is_derived_type 0 if MPI primitive type, 1 if derived
*
* Programmer: kyang
*
+ *-------------------------------------------------------------------------
*/
static herr_t
-H5S_mpio_span_hyper_type( const H5S_t *space,
- size_t elmt_size,
- MPI_Datatype *new_type,/* out: */
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type )
+H5S_mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
{
- MPI_Datatype span_type;
- H5S_hyper_span_t *ospan;
- H5S_hyper_span_info_t *odown;
- hsize_t *size;
- int mpi_code;
- herr_t ret_value = SUCCEED;
+ MPI_Datatype elmt_type; /* MPI datatype for an element */
+ hbool_t elmt_type_is_derived = FALSE; /* Whether the element type has been created */
+ MPI_Datatype span_type; /* MPI datatype for overall span tree */
+ hsize_t down[H5S_MAX_RANK]; /* 'down' sizes for each dimension */
+ int mpi_code; /* MPI return code */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_span_hyper_type)
/* Check args */
HDassert(space);
-
- if(0 == elmt_size)
- goto empty;
- size = space->extent.size;
- if(0 == size)
- goto empty;
-
- odown = space->select.sel_info.hslab->span_lst;
- if(NULL == odown)
- goto empty;
- ospan = odown->head;
- if(NULL == ospan)
- goto empty;
-
- /* obtain derived data type */
- if(FAIL == H5S_obtain_datatype(space->extent.size, ospan, &span_type, elmt_size, space->extent.rank))
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't obtain MPI derived data type")
-
+ HDassert(space->extent.size);
+ HDassert(space->select.sel_info.hslab->span_lst);
+ HDassert(space->select.sel_info.hslab->span_lst->head);
+
+ /* Create the base type for an element */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &elmt_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ elmt_type_is_derived = TRUE;
+
+ /* Compute 'down' sizes for each dimension */
+ if(H5V_array_down(space->extent.rank, space->extent.size, down) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGETSIZE, FAIL, "couldn't compute 'down' dimension sizes")
+
+ /* Obtain derived data type */
+ if(H5S_obtain_datatype(down, space->select.sel_info.hslab->span_lst->head, &elmt_type, &span_type, elmt_size) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't obtain MPI derived data type")
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&span_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
-
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
*new_type = span_type;
+
/* fill in the remaining return values */
*count = 1;
- *extra_offset = 0;
*is_derived_type = TRUE;
- HGOTO_DONE(SUCCEED)
-
-empty:
- /* special case: empty hyperslab */
- *new_type = MPI_BYTE;
- *count = 0;
- *extra_offset = 0;
- *is_derived_type = FALSE;
-
done:
+ /* Release resources */
+ if(elmt_type_is_derived)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&elmt_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_mpio_span_hyper_type() */
@@ -564,7 +477,7 @@ done:
* Function: H5S_obtain datatype
*
* Purpose: Obtain an MPI derived datatype based on span-tree
- implementation
+ * implementation
*
* Return: non-negative on success, negative on failure.
*
@@ -572,165 +485,169 @@ done:
*
* Programmer: kyang
*
+ *-------------------------------------------------------------------------
*/
static herr_t
-H5S_obtain_datatype(const hsize_t size[],
- H5S_hyper_span_t* span,
- MPI_Datatype *span_type,
- size_t elmt_size,
- int dimindex)
+H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span,
+ const MPI_Datatype *elmt_type, MPI_Datatype *span_type, size_t elmt_size)
{
- int innercount, outercount;
- MPI_Datatype bas_type;
- MPI_Datatype temp_type;
- MPI_Datatype tempinner_type;
+ size_t alloc_count; /* Number of span tree nodes allocated at this level */
+ size_t outercount; /* Number of span tree nodes at this level */
MPI_Datatype *inner_type = NULL;
+ hbool_t inner_types_freed = FALSE; /* Whether the inner_type MPI datatypes have been freed */
+ hbool_t span_type_valid = FALSE; /* Whether the span_type MPI datatypes is valid */
int *blocklen = NULL;
MPI_Aint *disp = NULL;
- MPI_Aint stride;
- H5S_hyper_span_info_t *down;
- H5S_hyper_span_t *tspan;
-#ifdef H5_HAVE_MPI2
- MPI_Aint sizeaint, sizedtype;
-#endif /* H5_HAVE_MPI2 */
- hsize_t total_lowd, total_lowd1;
- int i;
- int mpi_code;
- herr_t ret_value = SUCCEED;
+ H5S_hyper_span_t *tspan; /* Temporary pointer to span tree node */
+ int mpi_code; /* MPI return status code */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5S_obtain_datatype)
+ /* Sanity check */
HDassert(span);
- inner_type = NULL;
- down = NULL;
- tspan = NULL;
- down = span->down;
- tspan = span;
-
- /* Obtain the number of span tree nodes for this dimension */
- outercount = 0;
- while(tspan) {
- tspan = tspan->next;
- outercount++;
- } /* end while */
- if(outercount == 0)
- HGOTO_DONE(SUCCEED)
-
-/* MPI2 hasn't been widely acccepted, adding H5_HAVE_MPI2 for the future use */
-#ifdef H5_HAVE_MPI2
- MPI_Type_extent(MPI_Aint, &sizeaint);
- MPI_Type_extent(MPI_Datatype, &sizedtype);
-
- blocklen = (int *)HDcalloc((size_t)outercount, sizeof(int));
- disp = (MPI_Aint *)HDcalloc((size_t)outercount, sizeaint);
- inner_type = (MPI_Datatype *)HDcalloc((size_t)outercount, sizedtype);
-#else
- blocklen = (int *)HDcalloc((size_t)outercount, sizeof(int));
- disp = (MPI_Aint *)HDcalloc((size_t)outercount, sizeof(MPI_Aint));
- inner_type = (MPI_Datatype *)HDcalloc((size_t)outercount, sizeof(MPI_Datatype));
-#endif
-
- tspan = span;
- outercount = 0;
+ /* Allocate the initial displacement & block length buffers */
+ alloc_count = H5S_MPIO_INITIAL_ALLOC_COUNT;
+ if(NULL == (disp = (MPI_Aint *)H5MM_malloc(alloc_count * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+ if(NULL == (blocklen = (int *)H5MM_malloc(alloc_count * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
/* if this is the fastest changing dimension, it is the base case for derived datatype. */
- if(down == NULL) {
-
- HDassert(dimindex <= 1);
-
- if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &bas_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code);
-
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&bas_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
-
+ if(NULL == span->down) {
+ tspan = span;
+ outercount = 0;
while(tspan) {
+ /* Check if we need to increase the size of the buffers */
+ if(outercount >= alloc_count) {
+ MPI_Aint *tmp_disp; /* Temporary pointer to new displacement buffer */
+ int *tmp_blocklen; /* Temporary pointer to new block length buffer */
+
+ /* Double the allocation count */
+ alloc_count *= 2;
+
+ /* Re-allocate the buffers */
+ if(NULL == (tmp_disp = (MPI_Aint *)H5MM_realloc(disp, alloc_count * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+ disp = tmp_disp;
+ if(NULL == (tmp_blocklen = (int *)H5MM_realloc(blocklen, alloc_count * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
+ blocklen = tmp_blocklen;
+ } /* end if */
+
+ /* Store displacement & block length */
disp[outercount] = (MPI_Aint)elmt_size * tspan->low;
blocklen[outercount] = tspan->nelem;
+
tspan = tspan->next;
outercount++;
} /* end while */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_hindexed(outercount, blocklen, disp, bas_type, span_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_hindexed failed", mpi_code);
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_hindexed((int)outercount, blocklen, disp, *elmt_type, span_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_hindexed failed", mpi_code)
+ span_type_valid = TRUE;
} /* end if */
- else { /* dimindex is the rank of the dimension */
-
- HDassert(dimindex > 1);
-
- /* Calculate the total bytes of the lower dimensions */
- total_lowd = 1; /* one dimension down */
- total_lowd1 = 1; /* two dimensions down */
-
- for(i = dimindex - 1; i > 0; i--)
- total_lowd = total_lowd * size[i];
+ else {
+ size_t u; /* Local index variable */
- for(i = dimindex - 1; i > 1; i--)
- total_lowd1 = total_lowd1 * size[i];
+ if(NULL == (inner_type = (MPI_Datatype *)H5MM_malloc(alloc_count * sizeof(MPI_Datatype))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of inner MPI datatypes")
+ tspan = span;
+ outercount = 0;
while(tspan) {
+ MPI_Datatype down_type; /* Temporary MPI datatype for a span tree node's children */
+ MPI_Aint stride; /* Distance between inner MPI datatypes */
+
+ /* Check if we need to increase the size of the buffers */
+ if(outercount >= alloc_count) {
+ MPI_Aint *tmp_disp; /* Temporary pointer to new displacement buffer */
+ int *tmp_blocklen; /* Temporary pointer to new block length buffer */
+ MPI_Datatype *tmp_inner_type; /* Temporary pointer to inner MPI datatype buffer */
+
+ /* Double the allocation count */
+ alloc_count *= 2;
+
+ /* Re-allocate the buffers */
+ if(NULL == (tmp_disp = (MPI_Aint *)H5MM_realloc(disp, alloc_count * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+ disp = tmp_disp;
+ if(NULL == (tmp_blocklen = (int *)H5MM_realloc(blocklen, alloc_count * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
+ blocklen = tmp_blocklen;
+ if(NULL == (tmp_inner_type = (MPI_Datatype *)H5MM_realloc(inner_type, alloc_count * sizeof(MPI_Datatype))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of inner MPI datatypes")
+ } /* end if */
/* Displacement should be in byte and should have dimension information */
/* First using MPI Type vector to build derived data type for this span only */
/* Need to calculate the disp in byte for this dimension. */
/* Calculate the total bytes of the lower dimension */
-
- disp[outercount] = tspan->low * total_lowd * elmt_size;
+ disp[outercount] = tspan->low * (*down) * elmt_size;
blocklen[outercount] = 1;
- /* generating inner derived datatype by using MPI_Type_hvector */
- if(FAIL == H5S_obtain_datatype(size, tspan->down->head, &temp_type, elmt_size, dimindex - 1))
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't obtain MPI derived data type")
-
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&temp_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
-
- /* building the inner vector datatype */
- stride = total_lowd * elmt_size;
- innercount = tspan->nelem;
+ /* Generate MPI datatype for next dimension down */
+ if(H5S_obtain_datatype(down + 1, tspan->down->head, elmt_type, &down_type, elmt_size) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't obtain MPI derived data type")
- if(MPI_SUCCESS != (mpi_code = MPI_Type_hvector(innercount, 1, stride, temp_type, &tempinner_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_hvector failed", mpi_code);
+ /* Build the MPI datatype for this node */
+ stride = (*down) * elmt_size;
+ H5_CHECK_OVERFLOW(tspan->nelem, hsize_t, int)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_hvector((int)tspan->nelem, 1, stride, down_type, &inner_type[outercount]))) {
+ MPI_Type_free(&down_type);
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_hvector failed", mpi_code)
+ } /* end if */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&tempinner_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
+ /* Release MPI datatype for next dimension down */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&down_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&temp_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code);
-
- inner_type[outercount] = tempinner_type;
- outercount ++;
tspan = tspan->next;
+ outercount++;
} /* end while */
/* building the whole vector datatype */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_struct(outercount, blocklen, disp, inner_type, span_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_struct failed", mpi_code);
+ H5_CHECK_OVERFLOW(outercount, size_t, int)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_struct((int)outercount, blocklen, disp, inner_type, span_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_struct failed", mpi_code)
+ span_type_valid = TRUE;
+
+ /* Release inner node types */
+ for(u = 0; u < outercount; u++)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[u])))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ inner_types_freed = TRUE;
} /* end else */
- if(inner_type != NULL && down != NULL) {
- } /* end if */
-
done:
+ /* General cleanup */
if(inner_type != NULL) {
- if(down != NULL) {
- for(i = 0; i < outercount; i++)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[i])))
- HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code);
+ if(!inner_types_freed) {
+ size_t u; /* Local index variable */
+
+ for(u = 0; u < outercount; u++)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[u])))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
} /* end if */
- HDfree(inner_type);
+ H5MM_free(inner_type);
} /* end if */
if(blocklen != NULL)
- HDfree(blocklen);
+ H5MM_free(blocklen);
if(disp != NULL)
- HDfree(disp);
+ H5MM_free(disp);
+
+ /* Error cleanup */
+ if(ret_value < 0) {
+ if(span_type_valid)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(span_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ } /* end if */
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_obtain_datatype() */
-
/*-------------------------------------------------------------------------
* Function: H5S_mpio_space_type
@@ -743,49 +660,38 @@ done:
* Outputs: *new_type the MPI type corresponding to the selection
* *count how many objects of the new_type in selection
* (useful if this is the buffer type for xfer)
- * *extra_offset Number of bytes of offset within dataset
* *is_derived_type 0 if MPI primitive type, 1 if derived
*
* Programmer: rky 980813
*
- * Modifications:
- *
- * Quincey Koziol, June 18, 2002
- * Added 'extra_offset' parameter
- *
*-------------------------------------------------------------------------
*/
herr_t
-H5S_mpio_space_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type )
+H5S_mpio_space_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
{
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_space_type);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_space_type)
/* Check args */
HDassert(space);
+ HDassert(elmt_size);
/* Creat MPI type based on the kind of selection */
- switch (H5S_GET_EXTENT_TYPE(space)) {
+ switch(H5S_GET_EXTENT_TYPE(space)) {
case H5S_NULL:
case H5S_SCALAR:
case H5S_SIMPLE:
switch(H5S_GET_SELECT_TYPE(space)) {
case H5S_SEL_NONE:
- if ( H5S_mpio_none_type( space, elmt_size,
- /* out: */ new_type, count, extra_offset, is_derived_type ) <0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert \"all\" selection to MPI type");
+ if(H5S_mpio_none_type(new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert 'none' selection to MPI type")
break;
case H5S_SEL_ALL:
- if ( H5S_mpio_all_type( space, elmt_size,
- /* out: */ new_type, count, extra_offset, is_derived_type ) <0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert \"all\" selection to MPI type");
+ if(H5S_mpio_all_type(space, elmt_size, new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert 'all' selection to MPI type")
break;
case H5S_SEL_POINTS:
@@ -794,16 +700,14 @@ H5S_mpio_space_type( const H5S_t *space, size_t elmt_size,
break;
case H5S_SEL_HYPERSLABS:
- if((H5S_SELECT_IS_REGULAR(space) == TRUE)) {
- if(H5S_mpio_hyper_type( space, elmt_size,
- /* out: */ new_type, count, extra_offset, is_derived_type )<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert \"all\" selection to MPI type");
- }
- else {
- if(H5S_mpio_span_hyper_type( space, elmt_size,
- /* out: */ new_type, count, extra_offset, is_derived_type )<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert \"all\" selection to MPI type");
- }
+ if((H5S_SELECT_IS_REGULAR(space) == TRUE)) {
+ if(H5S_mpio_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert regular 'hyperslab' selection to MPI type")
+ } /* end if */
+ else {
+ if(H5S_mpio_span_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert irregular 'hyperslab' selection to MPI type")
+ } /* end else */
break;
default:
@@ -815,11 +719,10 @@ H5S_mpio_space_type( const H5S_t *space, size_t elmt_size,
default:
HDassert("unknown data space type" && 0);
break;
- }
+ } /* end switch */
done:
FUNC_LEAVE_NOAPI(ret_value);
-}
-
+} /* end H5S_mpio_space_type() */
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5Snone.c b/src/H5Snone.c
index c6e8a6a..1948f13 100644
--- a/src/H5Snone.c
+++ b/src/H5Snone.c
@@ -48,6 +48,8 @@ static htri_t H5S_none_is_contiguous(const H5S_t *space);
static htri_t H5S_none_is_single(const H5S_t *space);
static htri_t H5S_none_is_regular(const H5S_t *space);
static herr_t H5S_none_adjust_u(H5S_t *space, const hsize_t *offset);
+static herr_t H5S_none_project_scalar(const H5S_t *space, hsize_t *offset);
+static herr_t H5S_none_project_simple(const H5S_t *space, H5S_t *new_space, hsize_t *offset);
static herr_t H5S_none_iter_init(H5S_sel_iter_t *iter, const H5S_t *space);
/* Selection iteration callbacks */
@@ -77,6 +79,8 @@ const H5S_select_class_t H5S_sel_none[1] = {{
H5S_none_is_single,
H5S_none_is_regular,
H5S_none_adjust_u,
+ H5S_none_project_scalar,
+ H5S_none_project_simple,
H5S_none_iter_init,
}};
@@ -110,18 +114,18 @@ static const H5S_sel_iter_class_t H5S_sel_iter_none[1] = {{
*-------------------------------------------------------------------------
*/
herr_t
-H5S_none_iter_init (H5S_sel_iter_t *iter, const H5S_t UNUSED *space)
+H5S_none_iter_init(H5S_sel_iter_t *iter, const H5S_t UNUSED *space)
{
- FUNC_ENTER_NOAPI_NOFUNC(H5S_none_iter_init);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_none_iter_init)
/* Check args */
- assert (space && H5S_SEL_NONE==H5S_GET_SELECT_TYPE(space));
- assert (iter);
+ HDassert(space && H5S_SEL_NONE==H5S_GET_SELECT_TYPE(space));
+ HDassert(iter);
/* Initialize type of selection iterator */
- iter->type=H5S_sel_iter_none;
+ iter->type = H5S_sel_iter_none;
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_none_iter_init() */
@@ -141,15 +145,15 @@ H5S_none_iter_init (H5S_sel_iter_t *iter, const H5S_t UNUSED *space)
*-------------------------------------------------------------------------
*/
static herr_t
-H5S_none_iter_coords (const H5S_sel_iter_t UNUSED *iter, hsize_t UNUSED *coords)
+H5S_none_iter_coords(const H5S_sel_iter_t UNUSED *iter, hsize_t UNUSED *coords)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_coords);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_coords)
/* Check args */
- assert (iter);
- assert (coords);
+ HDassert(iter);
+ HDassert(coords);
- FUNC_LEAVE_NOAPI(FAIL);
+ FUNC_LEAVE_NOAPI(FAIL)
} /* H5S_none_iter_coords() */
@@ -169,16 +173,16 @@ H5S_none_iter_coords (const H5S_sel_iter_t UNUSED *iter, hsize_t UNUSED *coords)
*-------------------------------------------------------------------------
*/
static herr_t
-H5S_none_iter_block (const H5S_sel_iter_t UNUSED *iter, hsize_t UNUSED *start, hsize_t UNUSED *end)
+H5S_none_iter_block(const H5S_sel_iter_t UNUSED *iter, hsize_t UNUSED *start, hsize_t UNUSED *end)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_block);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_block)
/* Check args */
- assert (iter);
- assert (start);
- assert (end);
+ HDassert(iter);
+ HDassert(start);
+ HDassert(end);
- FUNC_LEAVE_NOAPI(FAIL);
+ FUNC_LEAVE_NOAPI(FAIL)
} /* H5S_none_iter_block() */
@@ -197,14 +201,14 @@ H5S_none_iter_block (const H5S_sel_iter_t UNUSED *iter, hsize_t UNUSED *start, h
*-------------------------------------------------------------------------
*/
static hsize_t
-H5S_none_iter_nelmts (const H5S_sel_iter_t UNUSED *iter)
+H5S_none_iter_nelmts(const H5S_sel_iter_t UNUSED *iter)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_nelmts);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_nelmts)
/* Check args */
- assert (iter);
+ HDassert(iter);
- FUNC_LEAVE_NOAPI(0);
+ FUNC_LEAVE_NOAPI(0)
} /* H5S_none_iter_nelmts() */
@@ -228,12 +232,12 @@ H5S_none_iter_nelmts (const H5S_sel_iter_t UNUSED *iter)
static htri_t
H5S_none_iter_has_next_block(const H5S_sel_iter_t UNUSED *iter)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_has_next_block);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_has_next_block)
/* Check args */
- assert (iter);
+ HDassert(iter);
- FUNC_LEAVE_NOAPI(FAIL);
+ FUNC_LEAVE_NOAPI(FAIL)
} /* H5S_none_iter_has_next_block() */
@@ -258,13 +262,13 @@ H5S_none_iter_has_next_block(const H5S_sel_iter_t UNUSED *iter)
static herr_t
H5S_none_iter_next(H5S_sel_iter_t UNUSED *iter, size_t UNUSED nelem)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_next);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_next)
/* Check args */
- assert (iter);
- assert (nelem>0);
+ HDassert(iter);
+ HDassert(nelem>0);
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_none_iter_next() */
@@ -315,14 +319,14 @@ H5S_none_iter_next_block(H5S_sel_iter_t UNUSED *iter)
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_none_iter_release (H5S_sel_iter_t UNUSED * iter)
+H5S_none_iter_release(H5S_sel_iter_t UNUSED * iter)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_release);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_release)
/* Check args */
- assert (iter);
+ HDassert(iter);
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_none_iter_release() */
@@ -344,14 +348,14 @@ H5S_none_iter_release (H5S_sel_iter_t UNUSED * iter)
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_none_release (H5S_t UNUSED * space)
+H5S_none_release(H5S_t UNUSED * space)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_release);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_release)
/* Check args */
- assert (space);
+ HDassert(space);
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_none_release() */
@@ -377,15 +381,15 @@ H5S_none_release (H5S_t UNUSED * space)
static herr_t
H5S_none_copy(H5S_t *dst, const H5S_t UNUSED *src, hbool_t UNUSED share_selection)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_copy);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_copy)
- assert(src);
- assert(dst);
+ HDassert(src);
+ HDassert(dst);
/* Set number of elements in selection */
- dst->select.num_elem=0;
+ dst->select.num_elem = 0;
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5S_none_copy() */
@@ -410,13 +414,13 @@ H5S_none_copy(H5S_t *dst, const H5S_t UNUSED *src, hbool_t UNUSED share_selectio
REVISION LOG
--------------------------------------------------------------------------*/
static htri_t
-H5S_none_is_valid (const H5S_t UNUSED *space)
+H5S_none_is_valid(const H5S_t UNUSED *space)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_valid);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_valid)
- assert(space);
+ HDassert(space);
- FUNC_LEAVE_NOAPI(TRUE);
+ FUNC_LEAVE_NOAPI(TRUE)
} /* end H5S_none_is_valid() */
@@ -440,17 +444,17 @@ H5S_none_is_valid (const H5S_t UNUSED *space)
REVISION LOG
--------------------------------------------------------------------------*/
static hssize_t
-H5S_none_serial_size (const H5S_t UNUSED *space)
+H5S_none_serial_size(const H5S_t UNUSED *space)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_serial_size);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_serial_size)
- assert(space);
+ HDassert(space);
/* Basic number of bytes required to serialize point selection:
* <type (4 bytes)> + <version (4 bytes)> + <padding (4 bytes)> +
* <length (4 bytes)> = 16 bytes
*/
- FUNC_LEAVE_NOAPI(16);
+ FUNC_LEAVE_NOAPI(16)
} /* end H5S_none_serial_size() */
@@ -474,11 +478,11 @@ H5S_none_serial_size (const H5S_t UNUSED *space)
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_none_serialize (const H5S_t *space, uint8_t *buf)
+H5S_none_serialize(const H5S_t *space, uint8_t *buf)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_serialize);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_serialize)
- assert(space);
+ HDassert(space);
/* Store the preamble information */
UINT32ENCODE(buf, (uint32_t)H5S_GET_SELECT_TYPE(space)); /* Store the type of selection */
@@ -486,7 +490,7 @@ H5S_none_serialize (const H5S_t *space, uint8_t *buf)
UINT32ENCODE(buf, (uint32_t)0); /* Store the un-used padding */
UINT32ENCODE(buf, (uint32_t)0); /* Store the additional information length */
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_none_serialize() */
@@ -510,20 +514,20 @@ H5S_none_serialize (const H5S_t *space, uint8_t *buf)
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_none_deserialize (H5S_t *space, const uint8_t UNUSED *buf)
+H5S_none_deserialize(H5S_t *space, const uint8_t UNUSED *buf)
{
- herr_t ret_value; /* return value */
+ herr_t ret_value = SUCCEED; /* return value */
- FUNC_ENTER_NOAPI_NOINIT(H5S_none_deserialize);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_none_deserialize)
- assert(space);
+ HDassert(space);
/* Change to "none" selection */
- if((ret_value=H5S_select_none(space))<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't change selection");
+ if(H5S_select_none(space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't change selection")
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_none_deserialize() */
@@ -555,13 +559,13 @@ done:
static herr_t
H5S_none_bounds(const H5S_t UNUSED *space, hsize_t UNUSED *start, hsize_t UNUSED *end)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_bounds);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_bounds)
- assert(space);
- assert(start);
- assert(end);
+ HDassert(space);
+ HDassert(start);
+ HDassert(end);
- FUNC_LEAVE_NOAPI(FAIL);
+ FUNC_LEAVE_NOAPI(FAIL)
} /* H5Sget_none_bounds() */
@@ -618,11 +622,11 @@ H5S_none_offset(const H5S_t UNUSED *space, hsize_t UNUSED *offset)
static htri_t
H5S_none_is_contiguous(const H5S_t UNUSED *space)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_contiguous);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_contiguous)
- assert(space);
+ HDassert(space);
- FUNC_LEAVE_NOAPI(FALSE);
+ FUNC_LEAVE_NOAPI(FALSE)
} /* H5S_none_is_contiguous() */
@@ -647,11 +651,11 @@ H5S_none_is_contiguous(const H5S_t UNUSED *space)
static htri_t
H5S_none_is_single(const H5S_t UNUSED *space)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_single);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_single)
- assert(space);
+ HDassert(space);
- FUNC_LEAVE_NOAPI(FALSE);
+ FUNC_LEAVE_NOAPI(FALSE)
} /* H5S_none_is_single() */
@@ -677,12 +681,12 @@ H5S_none_is_single(const H5S_t UNUSED *space)
static htri_t
H5S_none_is_regular(const H5S_t UNUSED *space)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_regular);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_regular)
/* Check args */
- assert(space);
+ HDassert(space);
- FUNC_LEAVE_NOAPI(TRUE);
+ FUNC_LEAVE_NOAPI(TRUE)
} /* H5S_none_is_regular() */
@@ -717,6 +721,65 @@ H5S_none_adjust_u(H5S_t UNUSED *space, const hsize_t UNUSED *offset)
} /* H5S_none_adjust_u() */
+/*-------------------------------------------------------------------------
+ * Function: H5S_none_project_scalar
+ *
+ * Purpose: Projects a 'none' selection into a scalar dataspace
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_none_project_scalar(const H5S_t UNUSED *space, hsize_t UNUSED *offset)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_project_scalar)
+
+ /* Check args */
+ HDassert(space && H5S_SEL_NONE == H5S_GET_SELECT_TYPE(space));
+ HDassert(offset);
+
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* H5S_none_project_scalar() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_none_project_simple
+ *
+ * Purpose: Projects an 'none' selection onto/into a simple dataspace
+ * of a different rank
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_none_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *offset)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_none_project_simple)
+
+ /* Check args */
+ HDassert(base_space && H5S_SEL_NONE == H5S_GET_SELECT_TYPE(base_space));
+ HDassert(new_space);
+ HDassert(offset);
+
+ /* Select the entire new space */
+ if(H5S_select_none(new_space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSET, FAIL, "unable to set none selection")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_none_project_simple() */
+
+
/*--------------------------------------------------------------------------
NAME
H5S_select_none
@@ -734,27 +797,28 @@ H5S_none_adjust_u(H5S_t UNUSED *space, const hsize_t UNUSED *offset)
EXAMPLES
REVISION LOG
--------------------------------------------------------------------------*/
-herr_t H5S_select_none (H5S_t *space)
+herr_t
+H5S_select_none(H5S_t *space)
{
- herr_t ret_value=SUCCEED; /* return value */
+ herr_t ret_value = SUCCEED; /* return value */
- FUNC_ENTER_NOAPI(H5S_select_none, FAIL);
+ FUNC_ENTER_NOAPI(H5S_select_none, FAIL)
/* Check args */
- assert(space);
+ HDassert(space);
/* Remove current selection first */
- if(H5S_SELECT_RELEASE(space)<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release hyperslab");
+ if(H5S_SELECT_RELEASE(space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release hyperslab")
/* Set number of elements in selection */
- space->select.num_elem=0;
+ space->select.num_elem = 0;
/* Set selection type */
- space->select.type=H5S_sel_none;
+ space->select.type = H5S_sel_none;
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_none() */
@@ -833,24 +897,24 @@ H5S_none_get_seq_list(const H5S_t UNUSED *space, unsigned UNUSED flags, H5S_sel_
size_t UNUSED maxseq, size_t UNUSED maxelem, size_t *nseq, size_t *nelem,
hsize_t UNUSED *off, size_t UNUSED *len)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_get_seq_list);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_get_seq_list)
/* Check args */
- assert(space);
- assert(iter);
- assert(maxseq>0);
- assert(maxelem>0);
- assert(nseq);
- assert(nelem);
- assert(off);
- assert(len);
+ HDassert(space);
+ HDassert(iter);
+ HDassert(maxseq > 0);
+ HDassert(maxelem > 0);
+ HDassert(nseq);
+ HDassert(nelem);
+ HDassert(off);
+ HDassert(len);
/* "none" selections don't generate sequences of bytes */
- *nseq=0;
+ *nseq = 0;
/* They don't use any elements, either */
- *nelem=0;
+ *nelem = 0;
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5S_none_get_seq_list() */
diff --git a/src/H5Spkg.h b/src/H5Spkg.h
index b7818a2..0a9df69 100644
--- a/src/H5Spkg.h
+++ b/src/H5Spkg.h
@@ -145,6 +145,10 @@ typedef htri_t (*H5S_sel_is_single_func_t)(const H5S_t *space);
typedef htri_t (*H5S_sel_is_regular_func_t)(const H5S_t *space);
/* Method to adjust a selection by an offset */
typedef herr_t (*H5S_sel_adjust_u_func_t)(H5S_t *space, const hsize_t *offset);
+/* Method to construct single element projection onto scalar dataspace */
+typedef herr_t (*H5S_sel_project_scalar)(const H5S_t *space, hsize_t *offset);
+/* Method to construct selection projection onto/into simple dataspace */
+typedef herr_t (*H5S_sel_project_simple)(const H5S_t *space, H5S_t *new_space, hsize_t *offset);
/* Method to initialize iterator for current selection */
typedef herr_t (*H5S_sel_iter_init_func_t)(H5S_sel_iter_t *sel_iter, const H5S_t *space);
@@ -166,6 +170,8 @@ typedef struct {
H5S_sel_is_single_func_t is_single; /* Method to determine if current selection is a single block */
H5S_sel_is_regular_func_t is_regular; /* Method to determine if current selection is "regular" */
H5S_sel_adjust_u_func_t adjust_u; /* Method to adjust a selection by an offset */
+ H5S_sel_project_scalar project_scalar; /* Method to construct scalar dataspace projection */
+ H5S_sel_project_simple project_simple; /* Method to construct simple dataspace projection */
H5S_sel_iter_init_func_t iter_init; /* Method to initialize iterator for current selection */
} H5S_select_class_t;
diff --git a/src/H5Spoint.c b/src/H5Spoint.c
index 24dfe2a..cb7e98f 100644
--- a/src/H5Spoint.c
+++ b/src/H5Spoint.c
@@ -49,6 +49,8 @@ static htri_t H5S_point_is_contiguous(const H5S_t *space);
static htri_t H5S_point_is_single(const H5S_t *space);
static htri_t H5S_point_is_regular(const H5S_t *space);
static herr_t H5S_point_adjust_u(H5S_t *space, const hsize_t *offset);
+static herr_t H5S_point_project_scalar(const H5S_t *space, hsize_t *offset);
+static herr_t H5S_point_project_simple(const H5S_t *space, H5S_t *new_space, hsize_t *offset);
static herr_t H5S_point_iter_init(H5S_sel_iter_t *iter, const H5S_t *space);
/* Selection iteration callbacks */
@@ -78,6 +80,8 @@ const H5S_select_class_t H5S_sel_point[1] = {{
H5S_point_is_single,
H5S_point_is_regular,
H5S_point_adjust_u,
+ H5S_point_project_scalar,
+ H5S_point_project_simple,
H5S_point_iter_init,
}};
@@ -610,18 +614,18 @@ H5S_point_copy(H5S_t *dst, const H5S_t *src, hbool_t UNUSED share_selection)
/* Allocate room for the head of the point list */
if(NULL == (dst->select.sel_info.pnt_lst = H5FL_MALLOC(H5S_pnt_list_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate point node")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point list node")
curr = src->select.sel_info.pnt_lst->head;
new_tail = NULL;
while(curr) {
/* Create new point */
if(NULL == (new_node = H5FL_MALLOC(H5S_pnt_node_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate point node")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point node")
new_node->next = NULL;
- if(NULL == (new_node->pnt = (hsize_t *)H5MM_malloc(src->extent.rank*sizeof(hsize_t)))) {
+ if(NULL == (new_node->pnt = (hsize_t *)H5MM_malloc(src->extent.rank * sizeof(hsize_t)))) {
new_node = H5FL_FREE(H5S_pnt_node_t, new_node);
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate coordinate information")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate coordinate information")
} /* end if */
/* Copy over the point's coordinates */
@@ -976,7 +980,7 @@ H5S_get_select_elem_pointlist(H5S_t *space, hsize_t startpoint, hsize_t numpoint
node = node->next;
} /* end while */
- /* Iterate through the node, copying each hyperslab's information */
+ /* Iterate through the node, copying each point's information */
while(node != NULL && numpoints > 0) {
HDmemcpy(buf, node->pnt, sizeof(hsize_t) * rank);
buf += rank;
@@ -1346,6 +1350,173 @@ H5S_point_adjust_u(H5S_t *space, const hsize_t *offset)
} /* H5S_point_adjust_u() */
+/*-------------------------------------------------------------------------
+ * Function: H5S_point_project_scalar
+ *
+ * Purpose: Projects a single element point selection into a scalar
+ * dataspace
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_point_project_scalar(const H5S_t *space, hsize_t *offset)
+{
+ const H5S_pnt_node_t *node; /* Point node */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_point_project_scalar)
+
+ /* Check args */
+ HDassert(space && H5S_SEL_POINTS == H5S_GET_SELECT_TYPE(space));
+ HDassert(offset);
+
+ /* Get the head of the point list */
+ node = space->select.sel_info.pnt_lst->head;
+
+ /* Check for more than one point selected */
+ if(node->next)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "point selection of one element has more than one node!")
+
+ /* Calculate offset of selection in projected buffer */
+ *offset = H5V_array_offset(space->extent.rank, space->extent.size, node->pnt);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_point_project_scalar() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_point_project_simple
+ *
+ * Purpose: Projects a point selection onto/into a simple dataspace
+ * of a different rank
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_point_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *offset)
+{
+ const H5S_pnt_node_t *base_node; /* Point node in base space */
+ H5S_pnt_node_t *new_node; /* Point node in new space */
+ H5S_pnt_node_t *prev_node; /* Previous point node in new space */
+ unsigned rank_diff; /* Difference in ranks between spaces */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_point_project_simple)
+
+ /* Check args */
+ HDassert(base_space && H5S_SEL_POINTS == H5S_GET_SELECT_TYPE(base_space));
+ HDassert(new_space);
+ HDassert(offset);
+
+ /* We are setting a new selection, remove any current selection in new dataspace */
+ if(H5S_SELECT_RELEASE(new_space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release selection")
+
+ /* Allocate room for the head of the point list */
+ if(NULL == (new_space->select.sel_info.pnt_lst = H5FL_MALLOC(H5S_pnt_list_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point list node")
+
+ /* Check if the new space's rank is < or > base space's rank */
+ if(new_space->extent.rank < base_space->extent.rank) {
+ hsize_t block[H5S_MAX_RANK]; /* Block selected in base dataspace */
+
+ /* Compute the difference in ranks */
+ rank_diff = base_space->extent.rank - new_space->extent.rank;
+
+ /* Calculate offset of selection in projected buffer */
+ HDmemset(block, 0, sizeof(block));
+ HDmemcpy(block, base_space->select.sel_info.pnt_lst->head->pnt, sizeof(hsize_t) * rank_diff);
+ *offset = H5V_array_offset(base_space->extent.rank, base_space->extent.size, block);
+
+ /* Iterate through base space's point nodes, copying the point information */
+ base_node = base_space->select.sel_info.pnt_lst->head;
+ prev_node = NULL;
+ while(base_node) {
+ /* Create new point */
+ if(NULL == (new_node = H5FL_MALLOC(H5S_pnt_node_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point node")
+ new_node->next = NULL;
+ if(NULL == (new_node->pnt = (hsize_t *)H5MM_malloc(new_space->extent.rank * sizeof(hsize_t)))) {
+ new_node = H5FL_FREE(H5S_pnt_node_t, new_node);
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate coordinate information")
+ } /* end if */
+
+ /* Copy over the point's coordinates */
+ HDmemcpy(new_node->pnt, &base_node->pnt[rank_diff], (new_space->extent.rank * sizeof(hsize_t)));
+
+ /* Keep the order the same when copying */
+ if(NULL == prev_node)
+ prev_node = new_space->select.sel_info.pnt_lst->head = new_node;
+ else {
+ prev_node->next = new_node;
+ prev_node = new_node;
+ } /* end else */
+
+ /* Advance to next node */
+ base_node = base_node->next;
+ } /* end while */
+ } /* end if */
+ else {
+ HDassert(new_space->extent.rank > base_space->extent.rank);
+
+ /* Compute the difference in ranks */
+ rank_diff = new_space->extent.rank - base_space->extent.rank;
+
+ /* The offset is zero when projected into higher dimensions */
+ *offset = 0;
+
+ /* Iterate through base space's point nodes, copying the point information */
+ base_node = base_space->select.sel_info.pnt_lst->head;
+ prev_node = NULL;
+ while(base_node) {
+ /* Create new point */
+ if(NULL == (new_node = H5FL_MALLOC(H5S_pnt_node_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point node")
+ new_node->next = NULL;
+ if(NULL == (new_node->pnt = (hsize_t *)H5MM_malloc(new_space->extent.rank * sizeof(hsize_t)))) {
+ new_node = H5FL_FREE(H5S_pnt_node_t, new_node);
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate coordinate information")
+ } /* end if */
+
+ /* Copy over the point's coordinates */
+ HDmemset(new_node->pnt, 0, sizeof(hsize_t) * rank_diff);
+ HDmemcpy(&new_node->pnt[rank_diff], base_node->pnt, (new_space->extent.rank * sizeof(hsize_t)));
+
+ /* Keep the order the same when copying */
+ if(NULL == prev_node)
+ prev_node = new_space->select.sel_info.pnt_lst->head = new_node;
+ else {
+ prev_node->next = new_node;
+ prev_node = new_node;
+ } /* end else */
+
+ /* Advance to next node */
+ base_node = base_node->next;
+ } /* end while */
+ } /* end else */
+
+ /* Number of elements selected will be the same */
+ new_space->select.num_elem = base_space->select.num_elem;
+
+ /* Set selection type */
+ new_space->select.type = H5S_sel_point;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_point_project_simple() */
+
+
/*--------------------------------------------------------------------------
NAME
H5Sselect_elements
diff --git a/src/H5Sprivate.h b/src/H5Sprivate.h
index 2858ddb..d7faa89 100644
--- a/src/H5Sprivate.h
+++ b/src/H5Sprivate.h
@@ -133,6 +133,8 @@ typedef struct H5S_sel_iter_t {
#define H5S_SELECT_IS_SINGLE(S) ((*(S)->select.type->is_single)(S))
#define H5S_SELECT_IS_REGULAR(S) ((*(S)->select.type->is_regular)(S))
#define H5S_SELECT_ADJUST_U(S,O) ((*(S)->select.type->adjust_u)(S, O))
+#define H5S_SELECT_PROJECT_SCALAR(S,O) ((*(S)->select.type->project_scalar)(S, O))
+#define H5S_SELECT_PROJECT_SIMPLE(S,NS, O) ((*(S)->select.type->project_simple)(S, NS, O))
#define H5S_SELECT_ITER_COORDS(ITER,COORDS) ((*(ITER)->type->iter_coords)(ITER,COORDS))
#define H5S_SELECT_ITER_BLOCK(ITER,START,END) ((*(ITER)->type->iter_block)(ITER,START,END))
#define H5S_SELECT_ITER_NELMTS(ITER) ((*(ITER)->type->iter_nelmts)(ITER))
@@ -157,6 +159,8 @@ typedef struct H5S_sel_iter_t {
#define H5S_SELECT_IS_SINGLE(S) (H5S_select_is_single(S))
#define H5S_SELECT_IS_REGULAR(S) (H5S_select_is_regular(S))
#define H5S_SELECT_ADJUST_U(S,O) (H5S_select_adjust_u(S, O))
+#define H5S_SELECT_PROJECT_SCALAR(S,O) (H5S_select_project_scalar)(S, O))
+#define H5S_SELECT_PROJECT_SIMPLE(S,NS,O) (H5S_select_project_simple)(S, NS, O))
#define H5S_SELECT_ITER_COORDS(ITER,COORDS) (H5S_select_iter_coords(ITER,COORDS))
#define H5S_SELECT_ITER_BLOCK(ITER,START,END) (H5S_select_iter_block(ITER,START,END))
#define H5S_SELECT_ITER_NELMTS(ITER) (H5S_select_iter_nelmts(ITER))
@@ -215,6 +219,9 @@ H5_DLL herr_t H5S_get_select_offset(const H5S_t *space, hsize_t *offset);
H5_DLL herr_t H5S_select_offset(H5S_t *space, const hssize_t *offset);
H5_DLL herr_t H5S_select_copy(H5S_t *dst, const H5S_t *src, hbool_t share_selection);
H5_DLL htri_t H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2);
+H5_DLL herr_t H5S_select_construct_projection(const H5S_t *base_space,
+ H5S_t **new_space_ptr, unsigned new_space_rank, const void *buf,
+ void const **adj_buf_ptr, hsize_t element_size);
H5_DLL herr_t H5S_select_release(H5S_t *ds);
H5_DLL herr_t H5S_select_get_seq_list(const H5S_t *space, unsigned flags,
H5S_sel_iter_t *iter, size_t maxseq, size_t maxbytes,
@@ -225,6 +232,8 @@ H5_DLL htri_t H5S_select_is_contiguous(const H5S_t *space);
H5_DLL htri_t H5S_select_is_single(const H5S_t *space);
H5_DLL htri_t H5S_select_is_regular(const H5S_t *space);
H5_DLL herr_t H5S_select_adjust_u(H5S_t *space, const hsize_t *offset);
+H5_DLL herr_t H5S_select_project_scalar(const H5S_t *space, hsize_t *offset);
+H5_DLL herr_t H5S_select_project_simple(const H5S_t *space, H5S_t *new_space, hsize_t *offset);
/* Operations on all selections */
H5_DLL herr_t H5S_select_all(H5S_t *space, hbool_t rel_prev);
@@ -268,18 +277,8 @@ H5_DLL herr_t
H5S_mpio_space_type( const H5S_t *space, size_t elmt_size,
/* out: */
MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
+ int *count,
hbool_t *is_derived_type );
-
-H5_DLL herr_t
-H5S_mpio_space_span_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type );
-
#endif /* H5_HAVE_PARALLEL */
#endif /* _H5Sprivate_H */
diff --git a/src/H5Sselect.c b/src/H5Sselect.c
index a419131..17b7fab 100644
--- a/src/H5Sselect.c
+++ b/src/H5Sselect.c
@@ -27,6 +27,7 @@
#include "H5Eprivate.h" /* Error handling */
#include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */
+#include "H5MMprivate.h" /* Memory management */
#include "H5Spkg.h" /* Dataspaces */
#include "H5Vprivate.h" /* Vector and array functions */
#include "H5WBprivate.h" /* Wrapped Buffers */
@@ -108,7 +109,7 @@ H5S_select_copy (H5S_t *dst, const H5S_t *src, hbool_t share_selection)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI(H5S_select_copy, FAIL);
+ FUNC_ENTER_NOAPI(H5S_select_copy, FAIL)
/* Check args */
assert(dst);
@@ -119,10 +120,10 @@ H5S_select_copy (H5S_t *dst, const H5S_t *src, hbool_t share_selection)
/* Perform correct type of copy based on the type of selection */
if((ret_value=(*src->select.type->copy)(dst,src,share_selection))<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "can't copy selection specific information");
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "can't copy selection specific information")
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_copy() */
@@ -149,14 +150,14 @@ H5S_select_release(H5S_t *ds)
{
herr_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_release);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_release)
assert(ds);
/* Call the selection type's release function */
ret_value=(*ds->select.type->release)(ds);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_select_release() */
@@ -186,14 +187,14 @@ H5S_select_get_seq_list(const H5S_t *space, unsigned flags,
{
herr_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_get_seq_list);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_get_seq_list)
assert(space);
/* Call the selection type's get_seq_list function */
ret_value=(*space->select.type->get_seq_list)(space,flags,iter,maxseq,maxbytes,nseq,nbytes,off,len);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_select_get_seq_list() */
@@ -221,14 +222,14 @@ H5S_select_serial_size(const H5S_t *space)
{
hssize_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_serial_size);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_serial_size)
assert(space);
/* Call the selection type's serial_size function */
ret_value=(*space->select.type->serial_size)(space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_select_serial_size() */
@@ -259,7 +260,7 @@ H5S_select_serialize(const H5S_t *space, uint8_t *buf)
{
herr_t ret_value=SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_serialize);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_serialize)
assert(space);
assert(buf);
@@ -267,7 +268,7 @@ H5S_select_serialize(const H5S_t *space, uint8_t *buf)
/* Call the selection type's serialize function */
ret_value=(*space->select.type->serialize)(space,buf);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_select_serialize() */
@@ -410,13 +411,13 @@ H5S_select_valid(const H5S_t *space)
{
htri_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_valid);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_valid)
assert(space);
ret_value = (*space->select.type->is_valid)(space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_valid() */
@@ -449,7 +450,7 @@ H5S_select_deserialize (H5S_t *space, const uint8_t *buf)
uint32_t sel_type; /* Pointer to the selection type */
herr_t ret_value=FAIL; /* return value */
- FUNC_ENTER_NOAPI(H5S_select_deserialize, FAIL);
+ FUNC_ENTER_NOAPI(H5S_select_deserialize, FAIL)
assert(space);
@@ -476,10 +477,10 @@ H5S_select_deserialize (H5S_t *space, const uint8_t *buf)
break;
}
if(ret_value<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTLOAD, FAIL, "can't deserialize selection");
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTLOAD, FAIL, "can't deserialize selection")
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_deserialize() */
@@ -567,7 +568,7 @@ H5S_get_select_bounds(const H5S_t *space, hsize_t *start, hsize_t *end)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_get_select_bounds);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_get_select_bounds)
/* Check args */
assert(space);
@@ -576,7 +577,7 @@ H5S_get_select_bounds(const H5S_t *space, hsize_t *start, hsize_t *end)
ret_value = (*space->select.type->bounds)(space,start,end);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_get_select_bounds() */
@@ -646,14 +647,14 @@ H5S_select_is_contiguous(const H5S_t *space)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_is_contiguous);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_is_contiguous)
/* Check args */
assert(space);
ret_value = (*space->select.type->is_contiguous)(space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_is_contiguous() */
@@ -683,14 +684,14 @@ H5S_select_is_single(const H5S_t *space)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_is_single);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_is_single)
/* Check args */
assert(space);
ret_value = (*space->select.type->is_single)(space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_is_single() */
@@ -720,14 +721,14 @@ H5S_select_is_regular(const H5S_t *space)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_is_regular);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_is_regular)
/* Check args */
assert(space);
ret_value = (*space->select.type->is_regular)(space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_is_regular() */
@@ -770,6 +771,86 @@ H5S_select_adjust_u(H5S_t *space, const hsize_t *offset)
/*--------------------------------------------------------------------------
NAME
+ H5S_select_project_scalar
+ PURPOSE
+ Project a single element selection for a scalar dataspace
+ USAGE
+ herr_t H5S_select_project_scalar(space, offset)
+ const H5S_t *space; IN: Pointer to dataspace to project
+ hsize_t *offset; IN/OUT: Offset of projected point
+ RETURNS
+ Non-negative on success, negative on failure
+ DESCRIPTION
+ Projects a selection of a single element into a scalar dataspace, computing
+ the offset of the element in the original selection.
+ GLOBAL VARIABLES
+ COMMENTS, BUGS, ASSUMPTIONS
+ This routine participates in the "Inlining C function pointers"
+ pattern, don't call it directly, use the appropriate macro
+ defined in H5Sprivate.h.
+ EXAMPLES
+ REVISION LOG
+--------------------------------------------------------------------------*/
+herr_t
+H5S_select_project_scalar(const H5S_t *space, hsize_t *offset)
+{
+ herr_t ret_value; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_project_scalar)
+
+ /* Check args */
+ HDassert(space);
+ HDassert(offset);
+
+ ret_value = (*space->select.type->project_scalar)(space, offset);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_select_project_scalar() */
+
+
+/*--------------------------------------------------------------------------
+ NAME
+ H5S_select_project_simple
+ PURPOSE
+ Project a selection onto/into a dataspace of different rank
+ USAGE
+ herr_t H5S_select_project_simple(space, new_space, offset)
+ const H5S_t *space; IN: Pointer to dataspace to project
+ H5S_t *new_space; IN/OUT: Pointer to dataspace projected onto
+ hsize_t *offset; IN/OUT: Offset of projected point
+ RETURNS
+ Non-negative on success, negative on failure
+ DESCRIPTION
+ Projects a selection onto/into a simple dataspace, computing
+ the offset of the first element in the original selection.
+ GLOBAL VARIABLES
+ COMMENTS, BUGS, ASSUMPTIONS
+ This routine participates in the "Inlining C function pointers"
+ pattern, don't call it directly, use the appropriate macro
+ defined in H5Sprivate.h.
+ EXAMPLES
+ REVISION LOG
+--------------------------------------------------------------------------*/
+herr_t
+H5S_select_project_simple(const H5S_t *space, H5S_t *new_space, hsize_t *offset)
+{
+ herr_t ret_value; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_project_simple)
+
+ /* Check args */
+ HDassert(space);
+ HDassert(new_space);
+ HDassert(offset);
+
+ ret_value = (*space->select.type->project_simple)(space, new_space, offset);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_select_project_simple() */
+
+
+/*--------------------------------------------------------------------------
+ NAME
H5S_select_iter_init
PURPOSE
Initializes iteration information for a selection.
@@ -790,7 +871,7 @@ H5S_select_iter_init(H5S_sel_iter_t *sel_iter, const H5S_t *space, size_t elmt_s
{
herr_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_init);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_init)
/* Check args */
assert(sel_iter);
@@ -813,7 +894,7 @@ H5S_select_iter_init(H5S_sel_iter_t *sel_iter, const H5S_t *space, size_t elmt_s
/* Call initialization routine for selection type */
ret_value= (*space->select.type->iter_init)(sel_iter, space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_init() */
@@ -844,7 +925,7 @@ H5S_select_iter_coords (const H5S_sel_iter_t *sel_iter, hsize_t *coords)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_coords);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_coords)
/* Check args */
assert(sel_iter);
@@ -853,7 +934,7 @@ H5S_select_iter_coords (const H5S_sel_iter_t *sel_iter, hsize_t *coords)
/* Call iter_coords routine for selection type */
ret_value = (*sel_iter->type->iter_coords)(sel_iter,coords);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_coords() */
#ifdef LATER
@@ -886,7 +967,7 @@ H5S_select_iter_block (const H5S_sel_iter_t *iter, hsize_t *start, hsize_t *end)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_select_iter_block);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_select_iter_block)
/* Check args */
assert(iter);
@@ -896,7 +977,7 @@ H5S_select_iter_block (const H5S_sel_iter_t *iter, hsize_t *start, hsize_t *end)
/* Call iter_block routine for selection type */
ret_value = (*iter->type->iter_block)(iter,start,end);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_block() */
#endif /* LATER */
@@ -926,7 +1007,7 @@ H5S_select_iter_nelmts (const H5S_sel_iter_t *sel_iter)
{
hsize_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_nelmts);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_nelmts)
/* Check args */
assert(sel_iter);
@@ -934,7 +1015,7 @@ H5S_select_iter_nelmts (const H5S_sel_iter_t *sel_iter)
/* Call iter_nelmts routine for selection type */
ret_value = (*sel_iter->type->iter_nelmts)(sel_iter);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_nelmts() */
#ifdef LATER
@@ -965,7 +1046,7 @@ H5S_select_iter_has_next_block (const H5S_sel_iter_t *iter)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_select_iter_has_next_block);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_select_iter_has_next_block)
/* Check args */
assert(iter);
@@ -973,7 +1054,7 @@ H5S_select_iter_has_next_block (const H5S_sel_iter_t *iter)
/* Call iter_has_next_block routine for selection type */
ret_value = (*iter->type->iter_has_next_block)(iter);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_has_next_block() */
#endif /* LATER */
@@ -1005,7 +1086,7 @@ H5S_select_iter_next(H5S_sel_iter_t *iter, size_t nelem)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_next);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_next)
/* Check args */
assert(iter);
@@ -1017,7 +1098,7 @@ H5S_select_iter_next(H5S_sel_iter_t *iter, size_t nelem)
/* Decrement the number of elements left in selection */
iter->elmt_left-=nelem;
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_next() */
#ifdef LATER
@@ -1050,7 +1131,7 @@ H5S_select_iter_next_block(H5S_sel_iter_t *iter)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_select_iter_next_block);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_select_iter_next_block)
/* Check args */
assert(iter);
@@ -1058,7 +1139,7 @@ H5S_select_iter_next_block(H5S_sel_iter_t *iter)
/* Call iter_next_block routine for selection type */
ret_value = (*iter->type->iter_next_block)(iter);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_next_block() */
#endif /* LATER */
@@ -1088,7 +1169,7 @@ H5S_select_iter_release(H5S_sel_iter_t *sel_iter)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_release);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_release)
/* Check args */
assert(sel_iter);
@@ -1096,7 +1177,7 @@ H5S_select_iter_release(H5S_sel_iter_t *sel_iter)
/* Call selection type-specific release routine */
ret_value = (*sel_iter->type->iter_release)(sel_iter);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_release() */
@@ -1154,7 +1235,7 @@ H5S_select_iterate(void *buf, hid_t type_id, const H5S_t *space, H5D_operator_t
herr_t user_ret=0; /* User's return value */
herr_t ret_value=SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(H5S_select_iterate, FAIL);
+ FUNC_ENTER_NOAPI(H5S_select_iterate, FAIL)
/* Check args */
HDassert(buf);
@@ -1302,7 +1383,7 @@ H5S_get_select_type(const H5S_t *space)
{
H5S_sel_type ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_get_select_type);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_get_select_type)
/* Check args */
assert(space);
@@ -1310,7 +1391,7 @@ H5S_get_select_type(const H5S_t *space)
/* Set return value */
ret_value=H5S_GET_SELECT_TYPE(space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_get_select_type() */
@@ -1334,16 +1415,17 @@ H5S_get_select_type(const H5S_t *space)
Assumes that there is only a single "block" for hyperslab selections.
EXAMPLES
REVISION LOG
+ Modified function to view identical shapes with different dimensions
+ as being the same under some circumstances.
--------------------------------------------------------------------------*/
htri_t
H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2)
{
- H5S_sel_iter_t iter1; /* Selection #1 iteration info */
- H5S_sel_iter_t iter2; /* Selection #2 iteration info */
- hbool_t iter1_init = 0; /* Selection #1 iteration info has been initialized */
- hbool_t iter2_init = 0; /* Selection #2 iteration info has been initialized */
- unsigned u; /* Index variable */
- htri_t ret_value = TRUE; /* Return value */
+ H5S_sel_iter_t iter_a; /* Selection a iteration info */
+ H5S_sel_iter_t iter_b; /* Selection b iteration info */
+ hbool_t iter_a_init = 0; /* Selection a iteration info has been initialized */
+ hbool_t iter_b_init = 0; /* Selection b iteration info has been initialized */
+ htri_t ret_value = TRUE; /* Return value */
FUNC_ENTER_NOAPI(H5S_select_shape_same, FAIL)
@@ -1358,139 +1440,527 @@ H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2)
HGOTO_DONE(FALSE)
} /* end if */
else {
- /* Check for different dimensionality */
- if(space1->extent.rank != space2->extent.rank)
- HGOTO_DONE(FALSE)
+ const H5S_t *space_a; /* Dataspace with larger rank */
+ const H5S_t *space_b; /* Dataspace with smaller rank */
+ unsigned space_a_rank; /* Number of dimensions of dataspace A */
+ unsigned space_b_rank; /* Number of dimensions of dataspace B */
+
+ /* need to be able to handle spaces of different rank:
+ *
+ * To simplify logic, let space_a point to the element of the set
+ * {space1, space2} with the largest rank or space1 if the ranks
+ * are identical.
+ *
+ * Similarly, let space_b point to the element of {space1, space2}
+ * with the smallest rank, or space2 if they are identical.
+ *
+ * Let: space_a_rank be the rank of space_a,
+ * space_b_rank be the rank of space_b,
+ * delta_rank = space_a_rank - space_b_rank.
+ *
+ * Set all this up below.
+ */
+ if(space1->extent.rank >= space2->extent.rank) {
+ space_a = space1;
+ space_a_rank = space_a->extent.rank;
+
+ space_b = space2;
+ space_b_rank = space_b->extent.rank;
+ } /* end if */
+ else {
+ space_a = space2;
+ space_a_rank = space_a->extent.rank;
+
+ space_b = space1;
+ space_b_rank = space_b->extent.rank;
+ } /* end else */
+ HDassert(space_a_rank >= space_b_rank);
+ HDassert(space_b_rank > 0);
/* Check for different number of elements selected */
- if(H5S_GET_SELECT_NPOINTS(space1) != H5S_GET_SELECT_NPOINTS(space2))
+ if(H5S_GET_SELECT_NPOINTS(space_a) != H5S_GET_SELECT_NPOINTS(space_b))
HGOTO_DONE(FALSE)
/* Check for "easy" cases before getting into generalized block iteration code */
- if(H5S_GET_SELECT_TYPE(space1)==H5S_SEL_ALL && H5S_GET_SELECT_TYPE(space2)==H5S_SEL_ALL) {
- hsize_t dims1[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #1 */
- hsize_t dims2[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #2 */
-
- if(H5S_get_simple_extent_dims(space1, dims1, NULL)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality");
- if(H5S_get_simple_extent_dims(space2, dims2, NULL)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality");
-
- /* Check that the sizes are the same */
- for (u=0; u<space1->extent.rank; u++)
- if(dims1[u]!=dims2[u])
- HGOTO_DONE(FALSE);
+ if((H5S_GET_SELECT_TYPE(space_a) == H5S_SEL_ALL) && (H5S_GET_SELECT_TYPE(space_b) == H5S_SEL_ALL)) {
+ hsize_t dims1[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #1 */
+ hsize_t dims2[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #2 */
+ int space_a_dim; /* Current dimension in dataspace A */
+ int space_b_dim; /* Current dimension in dataspace B */
+
+ if(H5S_get_simple_extent_dims(space_a, dims1, NULL) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality")
+ if(H5S_get_simple_extent_dims(space_b, dims2, NULL) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality")
+
+ space_a_dim = (int)space_a_rank - 1;
+ space_b_dim = (int)space_b_rank - 1;
+
+ /* recall that space_a_rank >= space_b_rank.
+ *
+ * In the following while loop, we test to see if space_a and space_b
+ * have identical size in all dimensions they have in common.
+ */
+ while(space_b_dim >= 0) {
+ if(dims1[space_a_dim] != dims2[space_b_dim])
+ HGOTO_DONE(FALSE)
+
+ space_a_dim--;
+ space_b_dim--;
+ } /* end while */
+
+ /* Since we are selecting the entire spaces, we must also verify that space_a
+ * has size 1 in all dimensions that it does not share with space_b.
+ */
+ while(space_a_dim >= 0) {
+ if(dims1[space_a_dim] != 1)
+ HGOTO_DONE(FALSE)
+
+ space_a_dim--;
+ } /* end while */
} /* end if */
- else if(H5S_GET_SELECT_TYPE(space1)==H5S_SEL_NONE || H5S_GET_SELECT_TYPE(space2)==H5S_SEL_NONE) {
- HGOTO_DONE(TRUE);
+ else if((H5S_GET_SELECT_TYPE(space1) == H5S_SEL_NONE) || (H5S_GET_SELECT_TYPE(space2) == H5S_SEL_NONE)) {
+ HGOTO_DONE(TRUE)
} /* end if */
- else if((H5S_GET_SELECT_TYPE(space1)==H5S_SEL_HYPERSLABS && space1->select.sel_info.hslab->diminfo_valid)
- && (H5S_GET_SELECT_TYPE(space2)==H5S_SEL_HYPERSLABS && space2->select.sel_info.hslab->diminfo_valid)) {
-
- /* Check that the shapes are the same */
- for (u=0; u<space1->extent.rank; u++) {
- if(space1->select.sel_info.hslab->opt_diminfo[u].stride!=space2->select.sel_info.hslab->opt_diminfo[u].stride)
- HGOTO_DONE(FALSE);
- if(space1->select.sel_info.hslab->opt_diminfo[u].count!=space2->select.sel_info.hslab->opt_diminfo[u].count)
- HGOTO_DONE(FALSE);
- if(space1->select.sel_info.hslab->opt_diminfo[u].block!=space2->select.sel_info.hslab->opt_diminfo[u].block)
- HGOTO_DONE(FALSE);
- } /* end for */
+ else if((H5S_GET_SELECT_TYPE(space_a) == H5S_SEL_HYPERSLABS && space_a->select.sel_info.hslab->diminfo_valid)
+ && (H5S_GET_SELECT_TYPE(space_b) == H5S_SEL_HYPERSLABS && space_b->select.sel_info.hslab->diminfo_valid)) {
+ int space_a_dim; /* Current dimension in dataspace A */
+ int space_b_dim; /* Current dimension in dataspace B */
+
+ space_a_dim = (int)space_a_rank - 1;
+ space_b_dim = (int)space_b_rank - 1;
+
+ /* check that the shapes are the same in the common dimensions, and that
+ * block == 1 in all dimensions that appear only in space_a.
+ */
+ while(space_b_dim >= 0) {
+ if(space_a->select.sel_info.hslab->opt_diminfo[space_a_dim].stride !=
+ space_b->select.sel_info.hslab->opt_diminfo[space_b_dim].stride)
+ HGOTO_DONE(FALSE)
+
+ if(space_a->select.sel_info.hslab->opt_diminfo[space_a_dim].count !=
+ space_b->select.sel_info.hslab->opt_diminfo[space_b_dim].count)
+ HGOTO_DONE(FALSE)
+
+ if(space_a->select.sel_info.hslab->opt_diminfo[space_a_dim].block !=
+ space_b->select.sel_info.hslab->opt_diminfo[space_b_dim].block)
+ HGOTO_DONE(FALSE)
+
+ space_a_dim--;
+ space_b_dim--;
+ } /* end while */
+
+ while(space_a_dim >= 0) {
+ if(space_a->select.sel_info.hslab->opt_diminfo[space_a_dim].block != 1)
+ HGOTO_DONE(FALSE)
+
+ space_a_dim--;
+ } /* end while */
} /* end if */
/* Iterate through all the blocks in the selection */
else {
- hsize_t start1[H5O_LAYOUT_NDIMS]; /* Start point of selection block in dataspace #1 */
- hsize_t start2[H5O_LAYOUT_NDIMS]; /* Start point of selection block in dataspace #2 */
- hsize_t end1[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #1 */
- hsize_t end2[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #2 */
- hsize_t off1[H5O_LAYOUT_NDIMS]; /* Offset of selection #1 blocks */
- hsize_t off2[H5O_LAYOUT_NDIMS]; /* Offset of selection #2 blocks */
- htri_t status1,status2; /* Status from next block checks */
- unsigned first_block=1; /* Flag to indicate the first block */
+ hsize_t start_a[H5O_LAYOUT_NDIMS]; /* Start point of selection block in dataspace a */
+ hsize_t start_b[H5O_LAYOUT_NDIMS]; /* Start point of selection block in dataspace b */
+ hsize_t end_a[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace a */
+ hsize_t end_b[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace b */
+ hsize_t off_a[H5O_LAYOUT_NDIMS]; /* Offset of selection a blocks */
+ hsize_t off_b[H5O_LAYOUT_NDIMS]; /* Offset of selection b blocks */
+ hbool_t first_block = TRUE; /* Flag to indicate the first block */
/* Initialize iterator for each dataspace selection
* Use '0' for element size instead of actual element size to indicate
* that the selection iterator shouldn't be "flattened", since we
* aren't actually going to be doing I/O with the iterators.
*/
- if(H5S_select_iter_init(&iter1, space1, (size_t)0) < 0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator");
- iter1_init = 1;
- if(H5S_select_iter_init(&iter2, space2, (size_t)0) < 0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator");
- iter2_init = 1;
+ if(H5S_select_iter_init(&iter_a, space_a, (size_t)0) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator a")
+ iter_a_init = 1;
+ if(H5S_select_iter_init(&iter_b, space_b, (size_t)0) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator b")
+ iter_b_init = 1;
/* Iterate over all the blocks in each selection */
while(1) {
+ int space_a_dim; /* Current dimension in dataspace A */
+ int space_b_dim; /* Current dimension in dataspace B */
+ htri_t status_a, status_b; /* Status from next block checks */
+
/* Get the current block for each selection iterator */
- if(H5S_SELECT_ITER_BLOCK(&iter1,start1,end1)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator block");
- if(H5S_SELECT_ITER_BLOCK(&iter2,start2,end2)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator block");
+ if(H5S_SELECT_ITER_BLOCK(&iter_a, start_a, end_a) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator block a")
+ if(H5S_SELECT_ITER_BLOCK(&iter_b, start_b, end_b) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator block b")
- /* The first block only compares the sizes and sets the relative offsets for later blocks */
+ space_a_dim = (int)space_a_rank - 1;
+ space_b_dim = (int)space_b_rank - 1;
+
+ /* The first block only compares the sizes and sets the
+ * relative offsets for later blocks
+ */
if(first_block) {
- /* If the block sizes from each selection doesn't match, get out */
- for (u=0; u<space1->extent.rank; u++) {
- if((end1[u]-start1[u])!=(end2[u]-start2[u]))
- HGOTO_DONE(FALSE);
+ /* If the block sizes in the common dimensions from
+ * each selection don't match, get out
+ */
+ while(space_b_dim >= 0) {
+ if((end_a[space_a_dim] - start_a[space_a_dim]) !=
+ (end_b[space_b_dim] - start_b[space_b_dim]))
+ HGOTO_DONE(FALSE)
+
+ /* Set the relative locations of the selections */
+ off_a[space_a_dim] = start_a[space_a_dim];
+ off_b[space_b_dim] = start_b[space_b_dim];
+
+ space_a_dim--;
+ space_b_dim--;
+ } /* end while */
+
+ /* similarly, if the block size in any dimension that appears only
+ * in space_a is not equal to 1, get out.
+ */
+ while(space_a_dim >= 0) {
+ if((end_a[space_a_dim] - start_a[space_a_dim]) != 0)
+ HGOTO_DONE(FALSE)
/* Set the relative locations of the selections */
- off1[u]=start1[u];
- off2[u]=start2[u];
- } /* end for */
+ off_a[space_a_dim] = start_a[space_a_dim];
+
+ space_a_dim--;
+ } /* end while */
/* Reset "first block" flag */
- first_block=0;
+ first_block = FALSE;
} /* end if */
+ /* Check over the blocks for each selection */
else {
- /* Check over the blocks for each selection */
- for (u=0; u<space1->extent.rank; u++) {
+ /* for dimensions that space_a and space_b have in common: */
+ while(space_b_dim >= 0) {
/* Check if the blocks are in the same relative location */
- if((start1[u]-off1[u])!=(start2[u]-off2[u]))
- HGOTO_DONE(FALSE);
+ if((start_a[space_a_dim] - off_a[space_a_dim]) !=
+ (start_b[space_b_dim] - off_b[space_b_dim]))
+ HGOTO_DONE(FALSE)
/* If the block sizes from each selection doesn't match, get out */
- if((end1[u]-start1[u])!=(end2[u]-start2[u]))
- HGOTO_DONE(FALSE);
- } /* end for */
+ if((end_a[space_a_dim] - start_a[space_a_dim]) !=
+ (end_b[space_b_dim] - start_b[space_b_dim]))
+ HGOTO_DONE(FALSE)
+
+ space_a_dim--;
+ space_b_dim--;
+ } /* end while */
+
+ /* For dimensions that appear only in space_a: */
+ while(space_a_dim >= 0) {
+ /* If the block size isn't 1, get out */
+ if((end_a[space_a_dim] - start_a[space_a_dim]) != 0)
+ HGOTO_DONE(FALSE)
+
+ space_a_dim--;
+ } /* end while */
} /* end else */
/* Check if we are able to advance to the next selection block */
- if((status1=H5S_SELECT_ITER_HAS_NEXT_BLOCK(&iter1))<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to check iterator block");
- if((status2=H5S_SELECT_ITER_HAS_NEXT_BLOCK(&iter2))<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to check iterator block");
+ if((status_a = H5S_SELECT_ITER_HAS_NEXT_BLOCK(&iter_a)) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to check iterator block a")
+
+ if((status_b = H5S_SELECT_ITER_HAS_NEXT_BLOCK(&iter_b)) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to check iterator block b")
/* Did we run out of blocks at the same time? */
- if(status1==FALSE && status2==FALSE)
+ if((status_a == FALSE) && (status_b == FALSE))
break;
- else if(status1!=status2) {
- HGOTO_DONE(FALSE);
- } /* end if */
+ else if(status_a != status_b)
+ HGOTO_DONE(FALSE)
else {
/* Advance to next block in selection iterators */
- if(H5S_SELECT_ITER_NEXT_BLOCK(&iter1)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to advance to next iterator block");
- if(H5S_SELECT_ITER_NEXT_BLOCK(&iter2)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to advance to next iterator block");
+ if(H5S_SELECT_ITER_NEXT_BLOCK(&iter_a) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to advance to next iterator block a")
+
+ if(H5S_SELECT_ITER_NEXT_BLOCK(&iter_b) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to advance to next iterator block b")
} /* end else */
} /* end while */
} /* end else */
} /* end else */
done:
- if(iter1_init) {
- if (H5S_SELECT_ITER_RELEASE(&iter1)<0)
- HDONE_ERROR (H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator");
+ if(iter_a_init)
+ if(H5S_SELECT_ITER_RELEASE(&iter_a) < 0)
+ HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator a")
+ if(iter_b_init)
+ if(H5S_SELECT_ITER_RELEASE(&iter_b) < 0)
+ HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator b")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_select_shape_same() */
+
+
+/*--------------------------------------------------------------------------
+ NAME
+ H5S_select_construct_projection
+
+ PURPOSE
+ Given a dataspace a of rank n with some selection, construct a new
+ dataspace b of rank m (m != n), with the selection in a being
+ topologically identical to that in b (as verified by
+ H5S_select_shape_same().
+
+ This function exists, as some I/O code chokes of topologically
+ identical selections with different ranks. At least to begin
+ with, we will deal with the issue by constructing projections
+ of the memory dataspace with ranks equaling those of the file
+ dataspace.
+
+ Note that if m > n, it is possible that the starting point in the
+ buffer associated with the memory dataspace will have to be
+ adjusted to match the projected dataspace. If the buf parameter
+ is not NULL, the function must return an adjusted buffer base
+ address in *adj_buf_ptr.
+
+ USAGE
+ htri_t H5S_select_construct_projection(base_space,
+ new_space_ptr,
+ new_space_rank,
+ buf,
+ adj_buf_ptr)
+ const H5S_t *base_space; IN: Ptr to Dataspace to project
+ H5S_t ** new_space_ptr; OUT: Ptr to location in which to return
+ the address of the projected space
+ int new_space_rank; IN: Rank of the projected space.
+ const void * buf; IN: Base address of the buffer
+ associated with the base space.
+ May be NULL.
+ void ** adj_buf_ptr; OUT: If buf != NULL, store the base
+ address of the section of buf
+ that is described by *new_space_ptr
+ in *adj_buf_ptr.
+
+ RETURNS
+ Non-negative on success/Negative on failure.
+
+ DESCRIPTION
+ Construct a new dataspace and associated selection which is a
+ projection of the supplied dataspace and associated selection into
+ the specified rank. Return it in *new_space_ptr.
+
+ If buf is supplied, computes the base address of the projected
+ selection in buf, and stores the base address in *adj_buf_ptr.
+
+ GLOBAL VARIABLES
+ COMMENTS, BUGS, ASSUMPTIONS
+ The selection in the supplied base_space has thickness 1 in all
+ dimensions greater than new_space_rank. Note that here we count
+ dimensions from the fastest changing coordinate to the slowest
+ changing changing coordinate.
+ EXAMPLES
+ REVISION LOG
+--------------------------------------------------------------------------*/
+herr_t
+H5S_select_construct_projection(const H5S_t *base_space, H5S_t **new_space_ptr,
+ unsigned new_space_rank, const void *buf, void const **adj_buf_ptr, hsize_t element_size)
+{
+ H5S_t * new_space = NULL; /* New dataspace constructed */
+ hsize_t base_space_dims[H5S_MAX_RANK]; /* Current dimensions of base dataspace */
+ hsize_t base_space_maxdims[H5S_MAX_RANK]; /* Maximum dimensions of base dataspace */
+ int sbase_space_rank; /* Signed # of dimensions of base dataspace */
+ unsigned base_space_rank; /* # of dimensions of base dataspace */
+ hsize_t projected_space_element_offset = 0; /* Offset of selected element in projected buffer */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5S_select_construct_projection, FAIL)
+
+ /* Sanity checks */
+ HDassert(base_space != NULL);
+ HDassert((H5S_GET_EXTENT_TYPE(base_space) == H5S_SCALAR) || (H5S_GET_EXTENT_TYPE(base_space) == H5S_SIMPLE));
+ HDassert(new_space_ptr != NULL);
+ HDassert((new_space_rank != 0) || (H5S_GET_SELECT_NPOINTS(base_space) <= 1));
+ HDassert(new_space_rank <= H5S_MAX_RANK);
+ HDassert((buf == NULL) || (adj_buf_ptr != NULL));
+ HDassert(element_size > 0 );
+
+ /* Get the extent info for the base dataspace */
+ if((sbase_space_rank = H5S_get_simple_extent_dims(base_space, base_space_dims, base_space_maxdims)) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality of base space")
+ base_space_rank = (unsigned)sbase_space_rank;
+ HDassert(base_space_rank != new_space_rank);
+
+ /* Check if projected space is scalar */
+ if(new_space_rank == 0) {
+ hssize_t npoints; /* Number of points selected */
+
+ /* Retreve the number of elements selected */
+ if((npoints = (hssize_t)H5S_GET_SELECT_NPOINTS(base_space)) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get number of points selected")
+ HDassert(npoints <= 1);
+
+ /* Create new scalar dataspace */
+ if(NULL == (new_space = H5S_create(H5S_SCALAR)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "unable to create scalar dataspace")
+
+ /* No need to register the dataspace(i.e. get an ID) as
+ * we will just be discarding it shortly.
+ */
+
+ /* Selection for the new space will be either all or
+ * none, depending on whether the base space has 0 or
+ * 1 elements selected.
+ *
+ * Observe that the base space can't have more than
+ * one selected element, since its selection has the
+ * same shape as the file dataspace, and that data
+ * space is scalar.
+ */
+ if(1 == npoints) {
+ /* Assuming that the selection in the base dataspace is not
+ * empty, we must compute the offset of the selected item in
+ * the buffer associated with the base dataspace.
+ *
+ * Since the new space rank is zero, we know that the
+ * the base space must have rank at least 1 -- and
+ * hence it is a simple dataspace. However, the
+ * selection, may be either point, hyperspace, or all.
+ *
+ */
+ if(H5S_SELECT_PROJECT_SCALAR(base_space, &projected_space_element_offset) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSET, FAIL, "unable to project scalar selection")
+ } /* end if */
+ else {
+ HDassert(0 == npoints);
+
+ if(H5S_select_none(new_space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't delete default selection")
+ } /* end else */
+ } /* end if */
+ else { /* projected space must be simple */
+ hsize_t new_space_dims[H5S_MAX_RANK]; /* Current dimensions for new dataspace */
+ hsize_t new_space_maxdims[H5S_MAX_RANK];/* Maximum dimensions for new dataspace */
+ unsigned rank_diff; /* Difference in ranks */
+
+ /* Set up the dimensions of the new, projected dataspace.
+ *
+ * How we do this depends on whether we are projecting up into
+ * increased dimensions, or down into a reduced number of
+ * dimensions.
+ *
+ * If we are projecting up (the first half of the following
+ * if statement), we copy the dimensions of the base data
+ * space into the fastest changing dimensions of the new
+ * projected dataspace, and set the remaining dimensions to
+ * one.
+ *
+ * If we are projecting down (the second half of the following
+ * if statement), we just copy the dimensions with the most
+ * quickly changing dimensions into the dims for the projected
+ * data set.
+ *
+ * This works, because H5S_select_shape_same() will return
+ * true on selections of different rank iff:
+ *
+ * 1) the selection in the lower rank dataspace matches that
+ * in the dimensions with the fastest changing indicies in
+ * the larger rank dataspace, and
+ *
+ * 2) the selection has thickness 1 in all ranks that appear
+ * only in the higher rank dataspace (i.e. those with
+ * more slowly changing indicies).
+ */
+ if(new_space_rank > base_space_rank) {
+ hsize_t tmp_dim_size = 1; /* Temporary dimension value, for filling arrays */
+
+ /* we must copy the dimensions of the base space into
+ * the fastest changing dimensions of the new space,
+ * and set the remaining dimensions to 1
+ */
+ rank_diff = new_space_rank - base_space_rank;
+ H5V_array_fill(new_space_dims, &tmp_dim_size, sizeof(tmp_dim_size), rank_diff);
+ H5V_array_fill(new_space_maxdims, &tmp_dim_size, sizeof(tmp_dim_size), rank_diff);
+ HDmemcpy(&new_space_dims[rank_diff], base_space_dims, sizeof(new_space_dims[0]) * base_space_rank);
+ HDmemcpy(&new_space_maxdims[rank_diff], base_space_maxdims, sizeof(new_space_maxdims[0]) * base_space_rank);
+ } /* end if */
+ else { /* new_space_rank < base_space_rank */
+ /* we must copy the fastest changing dimension of the
+ * base space into the dimensions of the new space.
+ */
+ rank_diff = base_space_rank - new_space_rank;
+ HDmemcpy(new_space_dims, &base_space_dims[rank_diff], sizeof(new_space_dims[0]) * new_space_rank);
+ HDmemcpy(new_space_maxdims, &base_space_maxdims[rank_diff], sizeof(new_space_maxdims[0]) * new_space_rank);
+ } /* end else */
+
+ /* now have the new space rank and dimensions set up --
+ * so we can create the new simple dataspace.
+ */
+ if(NULL == (new_space = H5S_create_simple(new_space_rank, new_space_dims, new_space_maxdims)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace")
+
+ /* No need to register the dataspace(i.e. get an ID) as
+ * we will just be discarding it shortly.
+ */
+
+ /* If we get this far, we have successfully created the projected
+ * dataspace. We must now project the selection in the base
+ * dataspace into the projected dataspace.
+ */
+ if(H5S_SELECT_PROJECT_SIMPLE(base_space, new_space, &projected_space_element_offset) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSET, FAIL, "unable to project simple selection")
+
+ /* If we get this far, we have created the new dataspace, and projected
+ * the selection in the base dataspace into the new dataspace.
+ *
+ * If the base dataspace is simple, check to see if the
+ * offset_changed flag on the base selection has been set -- if so,
+ * project the offset into the new dataspace and set the
+ * offset_changed flag.
+ */
+ if(H5S_GET_EXTENT_TYPE(base_space) == H5S_SIMPLE && base_space->select.offset_changed) {
+ if(new_space_rank > base_space_rank) {
+ HDmemset(new_space->select.offset, 0, sizeof(new_space->select.offset[0]) * rank_diff);
+ HDmemcpy(&new_space->select.offset[rank_diff], base_space->select.offset, sizeof(new_space->select.offset[0]) * base_space_rank);
+ } /* end if */
+ else
+ HDmemcpy(new_space->select.offset, &base_space->select.offset[rank_diff], sizeof(new_space->select.offset[0]) * new_space_rank);
+
+ /* Propagate the offset changed flag into the new dataspace. */
+ new_space->select.offset_changed = TRUE;
+ } /* end if */
+ } /* end else */
+
+ /* If we have done the projection correctly, the following assertion
+ * should hold.
+ */
+ HDassert(TRUE == H5S_select_shape_same(base_space, new_space));
+
+ /* load the address of the new space into *new_space_ptr */
+ *new_space_ptr = new_space;
+
+ /* now adjust the buffer if required */
+ if(buf != NULL) {
+ if(new_space_rank < base_space_rank) {
+ /* a bit of pointer magic here:
+ *
+ * Since we can't do pointer arithmetic on void pointers, we first
+ * cast buf to a pointer to byte -- i.e. uint8_t.
+ *
+ * We then multiply the projected space element offset we
+ * calculated earlier by the supplied element size, add this
+ * value to the type cast buf pointer, cast the result back
+ * to a pointer to void, and assign the result to *adj_buf_ptr.
+ */
+ *adj_buf_ptr = (const void *)(((const uint8_t *)buf) +
+ ((size_t)(projected_space_element_offset * element_size)));
+ } /* end if */
+ else
+ /* No adjustment necessary */
+ *adj_buf_ptr = buf;
} /* end if */
- if(iter2_init) {
- if (H5S_SELECT_ITER_RELEASE(&iter2)<0)
- HDONE_ERROR (H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator");
+
+done:
+ /* Cleanup on error */
+ if(ret_value < 0) {
+ if(new_space && H5S_close(new_space) < 0)
+ HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release dataspace")
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5S_select_shape_same() */
+} /* H5S_select_construct_projection() */
/*--------------------------------------------------------------------------
@@ -1536,7 +2006,7 @@ H5S_select_fill(const void *fill, size_t fill_size, const H5S_t *space, void *_b
/* Initialize iterator */
if(H5S_select_iter_init(&iter, space, fill_size) < 0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator")
iter_init = 1; /* Selection iteration info has been initialized */
/* Get the number of elements in selection */
@@ -1556,7 +2026,7 @@ H5S_select_fill(const void *fill, size_t fill_size, const H5S_t *space, void *_b
/* Get the sequences of bytes */
if(H5S_SELECT_GET_SEQ_LIST(space, 0, &iter, (size_t)H5D_IO_VECTOR_SIZE, max_elem, &nseq, &nelem, off, len) < 0)
- HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed")
+ HGOTO_ERROR(H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed")
/* Loop over sequences */
for(curr_seq = 0; curr_seq < nseq; curr_seq++) {
diff --git a/src/H5public.h b/src/H5public.h
index e07c4e3..0ff480f 100644
--- a/src/H5public.h
+++ b/src/H5public.h
@@ -71,10 +71,10 @@ extern "C" {
/* Version numbers */
#define H5_VERS_MAJOR 1 /* For major interface/format changes */
#define H5_VERS_MINOR 9 /* For minor interface/format changes */
-#define H5_VERS_RELEASE 73 /* For tweaks, bug-fixes, or development */
+#define H5_VERS_RELEASE 75 /* For tweaks, bug-fixes, or development */
#define H5_VERS_SUBRELEASE "FA_a4" /* For pre-releases like snap0 */
/* Empty string for real releases. */
-#define H5_VERS_INFO "HDF5 library version: 1.9.73-FA_a4" /* Full version string */
+#define H5_VERS_INFO "HDF5 library version: 1.9.75-FA_a4" /* Full version string */
#define H5check() H5check_version(H5_VERS_MAJOR,H5_VERS_MINOR, \
H5_VERS_RELEASE)
diff --git a/src/Makefile.in b/src/Makefile.in
index 0e3d148..14a17bf 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -222,6 +222,7 @@ CXX = @CXX@
CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
+CXX_VERSION = @CXX_VERSION@
CYGPATH_W = @CYGPATH_W@
DEBUG_PKG = @DEBUG_PKG@
DEFAULT_API_VERSION = @DEFAULT_API_VERSION@
@@ -247,6 +248,7 @@ FC = @FC@
FCFLAGS = @FCFLAGS@
FCFLAGS_f90 = @FCFLAGS_f90@
FCLIBS = @FCLIBS@
+FC_VERSION = @FC_VERSION@
FGREP = @FGREP@
FILTERS = @FILTERS@
FSEARCH_DIRS = @FSEARCH_DIRS@
@@ -445,7 +447,7 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog
# Add libtool shared library version numbers to the HDF5 library
# See libtool versioning documentation online.
LT_VERS_INTERFACE = 6
-LT_VERS_REVISION = 63
+LT_VERS_REVISION = 65
LT_VERS_AGE = 0
H5detect_CFLAGS = -g $(AM_CFLAGS)
diff --git a/src/libhdf5.settings.in b/src/libhdf5.settings.in
index ba233e8..2e8bc52 100644
--- a/src/libhdf5.settings.in
+++ b/src/libhdf5.settings.in
@@ -37,12 +37,12 @@ Compiling Options:
Languages:
----------
Fortran: @HDF_FORTRAN@
-@BUILD_FORTRAN_CONDITIONAL_TRUE@ Fortran Compiler: @FC@
+@BUILD_FORTRAN_CONDITIONAL_TRUE@ Fortran Compiler: @FC_VERSION@
@BUILD_FORTRAN_CONDITIONAL_TRUE@ Fortran Flags: @FCFLAGS@
@BUILD_FORTRAN_CONDITIONAL_TRUE@ H5 Fortran Flags: @H5_FCFLAGS@
@BUILD_FORTRAN_CONDITIONAL_TRUE@ AM Fortran Flags: @AM_FCFLAGS@
C++: @HDF_CXX@
-@BUILD_CXX_CONDITIONAL_TRUE@ C++ Compiler: @CXX@
+@BUILD_CXX_CONDITIONAL_TRUE@ C++ Compiler: @CXX_VERSION@
@BUILD_CXX_CONDITIONAL_TRUE@ C++ Flags: @CXXFLAGS@
@BUILD_CXX_CONDITIONAL_TRUE@ H5 C++ Flags: @H5_CXXFLAGS@
@BUILD_CXX_CONDITIONAL_TRUE@ AM C++ Flags: @AM_CXXFLAGS@