summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2016-02-10 17:55:55 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2016-02-10 17:55:55 (GMT)
commitc8a6c6030a768952bc4161b2bf30c948b8b58723 (patch)
tree064e57faf3f85c030d70900c3354fee2fbffd8ac /src
parentf1283d59b061cde641f2c24d23f01efbca46ad2f (diff)
downloadhdf5-c8a6c6030a768952bc4161b2bf30c948b8b58723.zip
hdf5-c8a6c6030a768952bc4161b2bf30c948b8b58723.tar.gz
hdf5-c8a6c6030a768952bc4161b2bf30c948b8b58723.tar.bz2
[svn-r29077] Description:
Normalize against the trunk, in preparation for final merge. Tested on: MacOSX/64 10.11.3 (amazon) w/serial & parallel (h5committest not required on this branch)
Diffstat (limited to 'src')
-rw-r--r--src/H5AC.c5
-rw-r--r--src/H5ACmpio.c2
-rw-r--r--src/H5ACprivate.h15
-rw-r--r--src/H5C.c527
-rw-r--r--src/H5Cmpio.c205
-rw-r--r--src/H5Cpkg.h15
-rw-r--r--src/H5Cprivate.h3
-rw-r--r--src/H5Dchunk.c17
-rw-r--r--src/H5Ddeprec.c3
-rw-r--r--src/H5Fint.c30
-rw-r--r--src/H5Fprivate.h13
-rw-r--r--src/H5Fquery.c28
-rw-r--r--src/H5Pdxpl.c5
-rw-r--r--src/H5Pint.c16
-rw-r--r--src/H5Plapl.c4
-rw-r--r--src/H5Pprivate.h4
-rw-r--r--src/H5R.c4
17 files changed, 385 insertions, 511 deletions
diff --git a/src/H5AC.c b/src/H5AC.c
index 12417f2..01fdb72 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -88,8 +88,6 @@ hid_t H5AC_dxpl_id;
hid_t H5AC_coll_read_dxpl_id = (-1);
#endif /* H5_HAVE_PARALLEL */
-/* global flag for collective API sanity checks */
-
/* DXPL to be used in operations that will not result in I/O calls */
hid_t H5AC_noio_dxpl_id = (-1);
@@ -216,7 +214,7 @@ H5AC__init_package(void)
/* Get the property list object */
if (NULL == (xfer_plist = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get new property list object")
- /* set metadata dxpl type */
+ /* Insert the dxpl type property */
dxpl_type = H5FD_METADATA_DXPL;
if(H5P_set(xfer_plist, H5FD_DXPL_TYPE_NAME, &dxpl_type) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set dxpl type property")
@@ -551,7 +549,6 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed.")
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(f->shared->cache);
-
if(aux_ptr)
/* Sanity check */
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c
index 353805d..db291cf 100644
--- a/src/H5ACmpio.c
+++ b/src/H5ACmpio.c
@@ -2067,7 +2067,7 @@ HDfprintf(stdout, "%d:H5AC_propagate...:%u: (u/uu/i/iu/r/ru) = %zu/%u/%zu/%u/%zu
/* clear collective access flag on half of the entries in the
cache and mark them as independent in case they need to be
- evicted later. All ranks are guranteed to mark the same entires
+ evicted later. All ranks are guranteed to mark the same entries
since we don't modify the order of the collectively accessed
entries except through collective access. */
if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 7481f0a..71ddcf3 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -181,13 +181,6 @@ typedef H5C_cache_entry_t H5AC_info_t;
/* Typedef for metadata cache (defined in H5Cpkg.h) */
typedef H5C_t H5AC_t;
-#ifdef H5_HAVE_PARALLEL
-/* Definitions for "collective metadata write" property */
-#define H5AC_COLLECTIVE_META_WRITE_NAME "H5AC_collective_metadata_write"
-#define H5AC_COLLECTIVE_META_WRITE_SIZE sizeof(hbool_t)
-#define H5AC_COLLECTIVE_META_WRITE_DEF 0
-#endif /* H5_HAVE_PARALLEL */
-
#define H5AC_METADATA_TAG_NAME "H5AC_metadata_tag"
#define H5AC_METADATA_TAG_SIZE sizeof(haddr_t)
#define H5AC_METADATA_TAG_DEF H5AC__INVALID_TAG
@@ -196,17 +189,17 @@ typedef H5C_t H5AC_t;
/* Dataset transfer property list for metadata calls */
H5_DLLVAR hid_t H5AC_dxpl_id;
-extern hid_t H5AC_ind_read_dxpl_id;
+H5_DLLVAR hid_t H5AC_ind_read_dxpl_id;
#ifdef H5_HAVE_PARALLEL
-extern hid_t H5AC_coll_read_dxpl_id;
+H5_DLLVAR hid_t H5AC_coll_read_dxpl_id;
#endif /* H5_HAVE_PARALLEL */
/* DXPL to be used in operations that will not result in I/O calls */
-extern hid_t H5AC_noio_dxpl_id;
+H5_DLLVAR hid_t H5AC_noio_dxpl_id;
/* DXPL to be used for raw data I/O operations when one is not
provided by the user (fill values in H5Dcreate) */
-extern hid_t H5AC_rawdata_dxpl_id;
+H5_DLLVAR hid_t H5AC_rawdata_dxpl_id;
/* Default cache configuration. */
diff --git a/src/H5C.c b/src/H5C.c
index 100fbe0..37b0df2 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -93,7 +93,6 @@
#include "H5MFprivate.h" /* File memory management */
#include "H5MMprivate.h" /* Memory management */
#include "H5Pprivate.h" /* Property lists */
-#include "H5SLprivate.h" /* Skip lists */
/****************/
@@ -182,7 +181,7 @@ static herr_t H5C_mark_tagged_entries(H5C_t * cache_ptr,
static herr_t H5C_flush_marked_entries(H5F_t * f,
hid_t dxpl_id);
-static herr_t H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
+static herr_t H5C__generate_image(const H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
hid_t dxpl_id, int64_t *entry_size_change_ptr);
#if H5C_DO_TAGGING_SANITY_CHECKS
@@ -225,6 +224,9 @@ hbool_t H5_PKG_INIT_VAR = FALSE;
/* Declare a free list to manage the H5C_t struct */
H5FL_DEFINE_STATIC(H5C_t);
+/* Declare extern free list to manage the H5C_collective_write_t struct */
+H5FL_EXTERN(H5C_collective_write_t);
+
/****************************************************************************
@@ -1006,7 +1008,7 @@ H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flush_flags, NULL, NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "H5C_flush_single_entry() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't flush entry")
#if H5C_DO_SANITY_CHECKS
if ( entry_was_dirty )
@@ -1913,8 +1915,10 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->aux_next = NULL;
entry_ptr->aux_prev = NULL;
+#ifdef H5_HAVE_PARALLEL
entry_ptr->coll_next = NULL;
entry_ptr->coll_prev = NULL;
+#endif /* H5_HAVE_PARALLEL */
H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
@@ -2041,19 +2045,18 @@ H5C_insert_entry(H5F_t * f,
/* Make sure the size of the collective entries in the cache remain in check */
if(H5P_USER_TRUE == f->coll_md_read) {
- if(cache_ptr->max_cache_size*80 < cache_ptr->coll_list_size*100) {
- if(H5C_clear_coll_entries(cache_ptr, 1) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed.")
+ if(cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) {
+ if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries")
} /* end if */
} /* end if */
else {
- if(cache_ptr->max_cache_size*40 < cache_ptr->coll_list_size*100) {
- if(H5C_clear_coll_entries(cache_ptr, 1) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed.")
+ if(cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) {
+ if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries")
} /* end if */
} /* end else */
} /* end if */
- entry_ptr->ind_access_while_coll = FALSE;
#endif
done:
@@ -2724,52 +2727,52 @@ H5C_protect(H5F_t * f,
the entry in their cache still have to participate in the
bcast. */
#ifdef H5_HAVE_PARALLEL
- if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && coll_access &&
- !(entry_ptr->is_dirty) && !(entry_ptr->coll_access)) {
- MPI_Comm comm; /* File MPI Communicator */
- int mpi_code; /* MPI error code */
- int buf_size;
+ if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && coll_access) {
+ if(!(entry_ptr->is_dirty) && !(entry_ptr->coll_access)) {
+ MPI_Comm comm; /* File MPI Communicator */
+ int mpi_code; /* MPI error code */
+ int buf_size;
- if(MPI_COMM_NULL == (comm = H5F_mpi_get_comm(f)))
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
+ if(MPI_COMM_NULL == (comm = H5F_mpi_get_comm(f)))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
- if(entry_ptr->image_ptr == NULL) {
- int mpi_rank;
- size_t image_size;
+ if(entry_ptr->image_ptr == NULL) {
+ int mpi_rank;
+ size_t image_size;
- if((mpi_rank = H5F_mpi_get_rank(f)) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
+ if((mpi_rank = H5F_mpi_get_rank(f)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
- if(entry_ptr->compressed)
- image_size = entry_ptr->compressed_size;
- else
- image_size = entry_ptr->size;
- HDassert(image_size > 0);
+ if(entry_ptr->compressed)
+ image_size = entry_ptr->compressed_size;
+ else
+ image_size = entry_ptr->size;
+ HDassert(image_size > 0);
- if(NULL == (entry_ptr->image_ptr = H5MM_malloc(image_size + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
+ if(NULL == (entry_ptr->image_ptr = H5MM_malloc(image_size + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
#if H5C_DO_MEMORY_SANITY_CHECKS
- HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + image_size,
- H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+ HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + image_size,
+ H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- if(0 == mpi_rank)
- if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id, NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "Can't get Image")
- } /* end if */
-
- HDassert(entry_ptr->image_ptr);
-
- H5_CHECKED_ASSIGN(buf_size, int, entry_ptr->size, size_t);
- if(MPI_SUCCESS != (mpi_code = MPI_Bcast(entry_ptr->image_ptr, buf_size, MPI_BYTE, 0, comm)))
- HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
+ if(0 == mpi_rank)
+ if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image")
+ } /* end if */
+ HDassert(entry_ptr->image_ptr);
- entry_ptr->coll_access = TRUE;
+ H5_CHECKED_ASSIGN(buf_size, int, entry_ptr->size, size_t);
+ if(MPI_SUCCESS != (mpi_code = MPI_Bcast(entry_ptr->image_ptr, buf_size, MPI_BYTE, 0, comm)))
+ HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
- H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
+ /* Mark the entry as collective and insert into the collective list */
+ entry_ptr->coll_access = TRUE;
+ H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
+ } /* end if */
+ else if(entry_ptr->coll_access) {
+ H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
+ } /* end else-if */
} /* end if */
- else if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && coll_access && entry_ptr->coll_access) {
- H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
- } /* end else-if */
#endif /* H5_HAVE_PARALLEL */
#if H5C_DO_TAGGING_SANITY_CHECKS
@@ -3057,16 +3060,16 @@ H5C_protect(H5F_t * f,
#ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
/* Make sure the size of the collective entries in the cache remain in check */
- if(TRUE == coll_access) {
+ if(coll_access) {
if(H5P_USER_TRUE == f->coll_md_read) {
if(cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100)
- if(H5C_clear_coll_entries(cache_ptr, 1) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "H5C_clear_coll_entries() failed.")
+ if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries")
} /* end if */
else {
if(cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100)
- if(H5C_clear_coll_entries(cache_ptr, 1) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "H5C_clear_coll_entries() failed.")
+ if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries")
} /* end else */
} /* end if */
} /* end if */
@@ -7707,7 +7710,11 @@ done:
*/
herr_t
H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
- unsigned flags, int64_t *entry_size_change_ptr, H5SL_t *collective_write_list)
+ unsigned flags, int64_t *entry_size_change_ptr, H5SL_t
+#ifndef H5_HAVE_PARALLEL
+ H5_ATTR_UNUSED
+#endif /* NDEBUG */
+ *collective_write_list)
{
H5C_t * cache_ptr; /* Cache for file */
hbool_t destroy; /* external flag */
@@ -7718,11 +7725,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
hbool_t write_entry; /* internal flag */
hbool_t destroy_entry; /* internal flag */
hbool_t was_dirty;
- haddr_t new_addr = HADDR_UNDEF;
- haddr_t old_addr = HADDR_UNDEF;
haddr_t entry_addr = HADDR_UNDEF;
- size_t new_len = 0;
- size_t new_compressed_len = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -7807,7 +7810,6 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
/* serialize the entry if necessary, and then write it to disk. */
if(write_entry) {
- unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
/* The entry is dirty, and we are doing either a flush,
* or a flush destroy. In either case, serialize the
@@ -7847,225 +7849,9 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
} /* end if */
if(!(entry_ptr->image_up_to_date)) {
- /* reset cache_ptr->slist_changed so we can detect slist
- * modifications in the pre_serialize call.
- */
- cache_ptr->slist_changed = FALSE;
-
- /* make note of the entry's current address */
- old_addr = entry_ptr->addr;
-
- /* Call client's pre-serialize callback, if there's one */
- if ( ( entry_ptr->type->pre_serialize != NULL ) &&
- ( (entry_ptr->type->pre_serialize)(f, dxpl_id,
- (void *)entry_ptr,
- entry_ptr->addr,
- entry_ptr->size,
- entry_ptr->compressed_size,
- &new_addr, &new_len,
- &new_compressed_len,
- &serialize_flags) < 0 ) )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
-
- /* set cache_ptr->slist_change_in_pre_serialize if the
- * slist was modified.
- */
- if(cache_ptr->slist_changed)
- cache_ptr->slist_change_in_pre_serialize = TRUE;
-
- /* Check for any flags set in the pre-serialize callback */
- if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
- /* Check for unexpected flags from serialize callback */
- if(serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG |
- H5C__SERIALIZE_MOVED_FLAG |
- H5C__SERIALIZE_COMPRESSED_FLAG))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
-#ifdef H5_HAVE_PARALLEL
- /* In the parallel case, resizes and moves in
- * the serialize operation can cause problems.
- * If they occur, scream and die.
- *
- * At present, in the parallel case, the aux_ptr
- * will only be set if there is more than one
- * process. Thus we can use this to detect
- * the parallel case.
- *
- * This works for now, but if we start using the
- * aux_ptr for other purposes, we will have to
- * change this test accordingly.
- *
- * NB: While this test detects entryies that attempt
- * to resize or move themselves during a flush
- * in the parallel case, it will not detect an
- * entry that dirties, resizes, and/or moves
- * other entries during its flush.
- *
- * From what Quincey tells me, this test is
- * sufficient for now, as any flush routine that
- * does the latter will also do the former.
- *
- * If that ceases to be the case, further
- * tests will be necessary.
- */
- if(cache_ptr->aux_ptr != NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occured in parallel case.")
-#endif /* H5_HAVE_PARALLEL */
-
- /* Resize the buffer if required */
- if ( ( ( ! entry_ptr->compressed ) &&
- ( serialize_flags & H5C__SERIALIZE_RESIZED_FLAG ) ) ||
- ( ( entry_ptr->compressed ) &&
- ( serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG ) ) )
- {
- size_t new_image_size;
-
- if(entry_ptr->compressed)
- new_image_size = new_compressed_len;
- else
- new_image_size = new_len;
- HDassert(new_image_size > 0);
-
- /* Release the current image */
- if(entry_ptr->image_ptr)
- entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
-
- /* Allocate a new image buffer */
- if(NULL == (entry_ptr->image_ptr = H5MM_malloc(new_image_size + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
-#if H5C_DO_MEMORY_SANITY_CHECKS
- HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + new_image_size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- } /* end if */
-
- /* If required, update the entry and the cache data structures
- * for a resize.
- */
- if(serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
- H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, \
- entry_ptr, new_len)
-
- /* update the hash table for the size change*/
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, \
- entry_ptr->size, \
- new_len, entry_ptr, \
- !(entry_ptr->is_dirty));
-
- /* The entry can't be protected since we are
- * in the process of flushing it. Thus we must
- * update the replacement policy data
- * structures for the size change. The macro
- * deals with the pinned case.
- */
- H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
-
- /* as we haven't updated the cache data structures for
- * for the flush or flush destroy yet, the entry should
- * be in the slist. Thus update it for the size change.
- */
- HDassert(entry_ptr->in_slist);
- H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, \
- new_len)
-
- /* if defined, update *entry_size_change_ptr for the
- * change in entry size.
- */
- if(entry_size_change_ptr != NULL)
- *entry_size_change_ptr = (int64_t)new_len - (int64_t)(entry_ptr->size);
-
- /* finally, update the entry for its new size */
- entry_ptr->size = new_len;
- } /* end if */
-
- /* If required, udate the entry and the cache data structures
- * for a move
- */
- if(serialize_flags & H5C__SERIALIZE_MOVED_FLAG) {
-#if H5C_DO_SANITY_CHECKS
- int64_t saved_slist_len_increase;
- int64_t saved_slist_size_increase;
-#endif /* H5C_DO_SANITY_CHECKS */
-
- H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
-
- if(entry_ptr->addr == old_addr) {
- /* we must update cache data structures for the
- * change in address.
- */
-
- /* delete the entry from the hash table and the slist */
- H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr)
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
-
- /* update the entry for its new address */
- entry_ptr->addr = new_addr;
-
- /* and then reinsert in the index and slist */
- H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
-
-#if H5C_DO_SANITY_CHECKS
- /* save cache_ptr->slist_len_increase and
- * cache_ptr->slist_size_increase before the
- * reinsertion into the slist, and restore
- * them afterwards to avoid skewing our sanity
- * checking.
- */
- saved_slist_len_increase = cache_ptr->slist_len_increase;
- saved_slist_size_increase = cache_ptr->slist_size_increase;
-#endif /* H5C_DO_SANITY_CHECKS */
-
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
-
-#if H5C_DO_SANITY_CHECKS
- cache_ptr->slist_len_increase = saved_slist_len_increase;
- cache_ptr->slist_size_increase = saved_slist_size_increase;
-#endif /* H5C_DO_SANITY_CHECKS */
- }
- else /* move is alread done for us -- just do sanity checks */
- HDassert(entry_ptr->addr == new_addr);
- } /* end if */
-
- if(serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG) {
- /* just save the new compressed entry size in
- * entry_ptr->compressed_size. We don't need to
- * do more, as compressed size is only used for I/O.
- */
- HDassert(entry_ptr->compressed);
- entry_ptr->compressed_size = new_compressed_len;
- }
- } /* end if ( serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET ) */
-
- /* Serialize object into buffer */
- {
- size_t image_len;
-
- if(entry_ptr->compressed)
- image_len = entry_ptr->compressed_size;
- else
- image_len = entry_ptr->size;
-
- /* reset cache_ptr->slist_changed so we can detect slist
- * modifications in the serialize call.
- */
- cache_ptr->slist_changed = FALSE;
-
-
- if(entry_ptr->type->serialize(f, entry_ptr->image_ptr,
- image_len, (void *)entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry")
-
- /* set cache_ptr->slist_change_in_serialize if the
- * slist was modified.
- */
- if(cache_ptr->slist_changed)
- cache_ptr->slist_change_in_pre_serialize = TRUE;
-
-#if H5C_DO_MEMORY_SANITY_CHECKS
- HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + image_len,
- H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE));
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
-
- entry_ptr->image_up_to_date = TRUE;
- }
+ /* Generate the entry's image */
+ if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id, entry_size_change_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image")
} /* end if ( ! (entry_ptr->image_up_to_date) ) */
/* Finally, write the image to disk.
@@ -8091,10 +7877,10 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
#ifdef H5_HAVE_PARALLEL
if(collective_write_list) {
- H5C_collective_write_t *item = NULL;
+ H5C_collective_write_t *item;
- if(NULL == (item = (H5C_collective_write_t *)H5MM_malloc(sizeof(H5C_collective_write_t))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate skip list item")
+ if(NULL == (item = (H5C_collective_write_t *)H5FL_MALLOC(H5C_collective_write_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "unable to allocate skip list item")
item->length = image_size;
item->free_buf = FALSE;
@@ -8103,7 +7889,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
if(H5SL_insert(collective_write_list, item, &item->offset) < 0) {
H5MM_free(item);
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
} /* end if */
} /* end if */
else
@@ -8778,7 +8564,6 @@ H5C_load_entry(H5F_t * f,
entry->clear_on_unprotect = FALSE;
entry->flush_immediately = FALSE;
entry->coll_access = coll_access;
- entry->ind_access_while_coll = FALSE;
#endif /* H5_HAVE_PARALLEL */
entry->flush_in_progress = FALSE;
entry->destroy_in_progress = FALSE;
@@ -8799,8 +8584,10 @@ H5C_load_entry(H5F_t * f,
entry->aux_next = NULL;
entry->aux_prev = NULL;
+#ifdef H5_HAVE_PARALLEL
entry->coll_next = NULL;
entry->coll_prev = NULL;
+#endif /* H5_HAVE_PARALLEL */
H5C__RESET_CACHE_ENTRY_STATS(entry);
@@ -9018,13 +8805,6 @@ H5C_make_space_in_cache(H5F_t * f,
cache_ptr->entries_scanned_to_make_space++;
#endif /* H5C_COLLECT_CACHE_STATS */
-#ifdef H5_HAVE_PARALLEL
- if(TRUE == entry_ptr->coll_access) {
- entry_ptr->coll_access = FALSE;
- H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- } /* end if */
-#endif
-
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
} else {
@@ -10207,9 +9987,22 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_get_entry_ring() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__generate_image
+ *
+ * Purpose: Serialize an entry and generate its image.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ * 2/10/16
+ *
+ *-------------------------------------------------------------------------
+ */
static herr_t
-H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
- hid_t dxpl_id, int64_t *entry_size_change_ptr)
+H5C__generate_image(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
+ hid_t dxpl_id, int64_t *entry_size_change_ptr)
{
haddr_t new_addr = HADDR_UNDEF;
haddr_t old_addr = HADDR_UNDEF;
@@ -10218,8 +10011,9 @@ H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
herr_t ret_value = SUCCEED;
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
+ /* Sanity check */
HDassert(!entry_ptr->image_up_to_date);
/* reset cache_ptr->slist_changed so we can detect slist
@@ -10231,93 +10025,91 @@ H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
old_addr = entry_ptr->addr;
/* Call client's pre-serialize callback, if there's one */
- if ( ( entry_ptr->type->pre_serialize != NULL ) &&
- ( (entry_ptr->type->pre_serialize)(f, dxpl_id,
- (void *)entry_ptr,
- entry_ptr->addr,
- entry_ptr->size,
- entry_ptr->compressed_size,
- &new_addr, &new_len,
- &new_compressed_len,
- &serialize_flags) < 0 ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "unable to pre-serialize entry");
- }
+ if(entry_ptr->type->pre_serialize &&
+ (entry_ptr->type->pre_serialize)(f, dxpl_id,
+ (void *)entry_ptr, entry_ptr->addr, entry_ptr->size,
+ entry_ptr->compressed_size, &new_addr, &new_len,
+ &new_compressed_len, &serialize_flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
/* set cache_ptr->slist_change_in_pre_serialize if the
* slist was modified.
*/
- if ( cache_ptr->slist_changed )
+ if(cache_ptr->slist_changed)
cache_ptr->slist_change_in_pre_serialize = TRUE;
/* Check for any flags set in the pre-serialize callback */
- if ( serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET ) {
+ if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
/* Check for unexpected flags from serialize callback */
- if ( serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG |
- H5C__SERIALIZE_MOVED_FLAG |
- H5C__SERIALIZE_COMPRESSED_FLAG)) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "unknown serialize flag(s)");
- }
+ if(serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG |
+ H5C__SERIALIZE_MOVED_FLAG |
+ H5C__SERIALIZE_COMPRESSED_FLAG))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
#ifdef H5_HAVE_PARALLEL
- if ( cache_ptr->aux_ptr != NULL )
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "resize/move in serialize occured in parallel case.");
+ /* In the parallel case, resizes and moves in
+ * the serialize operation can cause problems.
+ * If they occur, scream and die.
+ *
+ * At present, in the parallel case, the aux_ptr
+ * will only be set if there is more than one
+ * process. Thus we can use this to detect
+ * the parallel case.
+ *
+ * This works for now, but if we start using the
+ * aux_ptr for other purposes, we will have to
+ * change this test accordingly.
+ *
+ * NB: While this test detects entryies that attempt
+ * to resize or move themselves during a flush
+ * in the parallel case, it will not detect an
+ * entry that dirties, resizes, and/or moves
+ * other entries during its flush.
+ *
+ * From what Quincey tells me, this test is
+ * sufficient for now, as any flush routine that
+ * does the latter will also do the former.
+ *
+ * If that ceases to be the case, further
+ * tests will be necessary.
+ */
+ if(cache_ptr->aux_ptr != NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occured in parallel case.")
#endif
/* Resize the buffer if required */
- if ( ( ( ! entry_ptr->compressed ) &&
- ( serialize_flags & H5C__SERIALIZE_RESIZED_FLAG ) ) ||
- ( ( entry_ptr->compressed ) &&
- ( serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG ) ) ) {
+ if(((!entry_ptr->compressed) && (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG)) ||
+ ((entry_ptr->compressed) && (serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG))) {
size_t new_image_size;
- if ( entry_ptr->compressed )
+ if(entry_ptr->compressed)
new_image_size = new_compressed_len;
else
new_image_size = new_len;
-
HDassert(new_image_size > 0);
/* Release the current image */
- if ( entry_ptr->image_ptr ) {
+ if(entry_ptr->image_ptr)
entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
- }
/* Allocate a new image buffer */
- entry_ptr->image_ptr =
- H5MM_malloc(new_image_size + H5C_IMAGE_EXTRA_SPACE);
-
- if ( NULL == entry_ptr->image_ptr )
- {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, \
- "memory allocation failed for on disk image buffer");
- }
+ if(NULL == (entry_ptr->image_ptr = H5MM_malloc(new_image_size + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
#if H5C_DO_MEMORY_SANITY_CHECKS
-
- HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + new_image_size,
- H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE);
-
+ HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + new_image_size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
-
} /* end if */
/* If required, update the entry and the cache data structures
* for a resize.
*/
- if ( serialize_flags & H5C__SERIALIZE_RESIZED_FLAG ) {
-
- H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, \
- entry_ptr, new_len);
+ if(serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
+ H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
/* update the hash table for the size change*/
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, \
- entry_ptr->size, \
- new_len, entry_ptr, \
- !(entry_ptr->is_dirty));
+ H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, \
+ new_len, entry_ptr, !(entry_ptr->is_dirty));
/* The entry can't be protected since we are
* in the process of flushing it. Thus we must
@@ -10332,17 +10124,13 @@ H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
* be in the slist. Thus update it for the size change.
*/
HDassert(entry_ptr->in_slist);
- H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, \
- new_len);
+ H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len);
/* if defined, update *entry_size_change_ptr for the
* change in entry size.
*/
- if ( entry_size_change_ptr != NULL )
- {
- *entry_size_change_ptr = (int64_t)new_len;
- *entry_size_change_ptr -= (int64_t)(entry_ptr->size);
- }
+ if(entry_size_change_ptr != NULL)
+ *entry_size_change_ptr = (int64_t)new_len - (int64_t)(entry_ptr->size);
/* finally, update the entry for its new size */
entry_ptr->size = new_len;
@@ -10359,7 +10147,7 @@ H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr);
- if ( entry_ptr->addr == old_addr ) {
+ if(entry_ptr->addr == old_addr) {
/* we must update cache data structures for the
* change in address.
*/
@@ -10391,27 +10179,26 @@ H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
cache_ptr->slist_len_increase = saved_slist_len_increase;
cache_ptr->slist_size_increase = saved_slist_size_increase;
#endif /* H5C_DO_SANITY_CHECKS */
- }
- else {
+ } /* end if */
+ else /* move is already done for us -- just do sanity checks */
HDassert(entry_ptr->addr == new_addr);
- }
} /* end if */
- if ( serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG ) {
+ if(serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG) {
/* just save the new compressed entry size in
* entry_ptr->compressed_size. We don't need to
* do more, as compressed size is only used for I/O.
*/
HDassert(entry_ptr->compressed);
entry_ptr->compressed_size = new_compressed_len;
- }
- } /* end if ( serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET ) */
+ } /* end if */
+ } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */
/* Serialize object into buffer */
{
size_t image_len;
- if ( entry_ptr->compressed )
+ if(entry_ptr->compressed)
image_len = entry_ptr->compressed_size;
else
image_len = entry_ptr->size;
@@ -10420,33 +10207,25 @@ H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
* modifications in the serialize call.
*/
cache_ptr->slist_changed = FALSE;
-
- if ( entry_ptr->type->serialize(f, entry_ptr->image_ptr,
- image_len,
- (void *)entry_ptr) < 0) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "unable to serialize entry");
- }
+ if(entry_ptr->type->serialize(f, entry_ptr->image_ptr, image_len, (void *)entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry")
/* set cache_ptr->slist_change_in_serialize if the
* slist was modified.
*/
- if ( cache_ptr->slist_changed )
+ if(cache_ptr->slist_changed)
cache_ptr->slist_change_in_pre_serialize = TRUE;
#if H5C_DO_MEMORY_SANITY_CHECKS
-
- HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) +
- image_len,
- H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE));
-
+ HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + image_len,
+ H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE));
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
entry_ptr->image_up_to_date = TRUE;
- }
+ } /* end block */
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__generate_image */
+
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index e17aacd..4e88e44 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -43,13 +43,13 @@
#include "H5Eprivate.h" /* Error handling */
#include "H5Fpkg.h" /* Files */
#include "H5FDprivate.h" /* File drivers */
+#include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */
#include "H5MMprivate.h" /* Memory management */
#include "H5Pprivate.h" /* Property lists */
-#include "H5SLprivate.h" /* Skip lists */
-#ifdef H5_HAVE_PARALLEL
+#ifdef H5_HAVE_PARALLEL
/****************/
/* Local Macros */
/****************/
@@ -64,6 +64,9 @@
/********************/
/* Local Prototypes */
/********************/
+static herr_t H5C__collective_write(H5F_t *f, hid_t dxpl_id,
+ H5SL_t *collective_write_list);
+static herr_t H5C__collective_write_free(void *_item, void *key, void *op_data);
/*********************/
@@ -80,6 +83,9 @@
/* Local Variables */
/*******************/
+/* Declare a free list to manage the H5C_collective_write_t struct */
+H5FL_DEFINE(H5C_collective_write_t);
+
/*-------------------------------------------------------------------------
@@ -261,7 +267,7 @@ H5C_apply_candidate_list(H5F_t * f,
/* Create skip list of entries for collective write */
if(NULL == (collective_write_list = H5SL_create(H5SL_TYPE_HADDR, NULL)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create skip list for entries")
- }
+ } /* end if */
n = num_candidates / mpi_size;
m = num_candidates % mpi_size;
@@ -376,7 +382,7 @@ H5C_apply_candidate_list(H5F_t * f,
if(TRUE == entry_ptr->coll_access) {
entry_ptr->coll_access = FALSE;
H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- }
+ } /* end if */
} /* end else */
} /* end for */
@@ -453,10 +459,8 @@ H5C_apply_candidate_list(H5F_t * f,
* will not call either the pre_serialize or serialize callbacks.
*/
- if(H5C__flush_single_entry(f, dxpl_id, clear_ptr,
- H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG,
- NULL, NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
+ if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
} /* end if */
/* Else, if this process needs to flush this entry. */
@@ -500,9 +504,9 @@ H5C_apply_candidate_list(H5F_t * f,
cache_ptr->entries_removed_counter = 0;
cache_ptr->last_entry_removed_ptr = NULL;
- if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG,
- NULL, collective_write_list) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't flush entry.")
+ /* Add this entry to the list of entries to collectively write */
+ if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, collective_write_list) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
if ( ( cache_ptr->entries_removed_counter > 1 ) ||
( cache_ptr->last_entry_removed_ptr == entry_ptr ) )
@@ -655,14 +659,12 @@ H5C_apply_candidate_list(H5F_t * f,
entries_cleared++;
#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
- HDfprintf(stdout, "%s:%d: clearing 0x%llx.\n", FUNC, mpi_rank,
- (long long)clear_ptr->addr);
+ HDfprintf(stdout, "%s:%d: clearing 0x%llx.\n", FUNC, mpi_rank,
+ (long long)clear_ptr->addr);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
- if(H5C__flush_single_entry(f, dxpl_id, clear_ptr,
- H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG,
- NULL, NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
+ if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
} /* end else-if */
/* Else, if this process needs to independently flush this entry. */
@@ -677,9 +679,9 @@ H5C_apply_candidate_list(H5F_t * f,
(long long)flush_ptr->addr);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
- if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG,
- NULL, collective_write_list) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
+ /* Add this entry to the list of entries to collectively write */
+ if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, collective_write_list) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
} /* end else-if */
} /* end if */
@@ -712,16 +714,15 @@ H5C_apply_candidate_list(H5F_t * f,
if (delayed_ptr) {
if (delayed_ptr->clear_on_unprotect) {
- if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG,
- NULL, NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't flush entry collectively.")
+ if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
entry_ptr->clear_on_unprotect = FALSE;
entries_cleared++;
} else if (delayed_ptr->flush_immediately) {
- if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG,
- NULL, collective_write_list) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't flush entry collectively.")
+ /* Add this entry to the list of entries to collectively write */
+ if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, collective_write_list) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry collectively.")
entry_ptr->flush_immediately = FALSE;
entries_flushed++;
@@ -731,15 +732,15 @@ H5C_apply_candidate_list(H5F_t * f,
entries_flushed_or_cleared_last++;
} /* end if */
+ /* If we've deferred writing to do it collectively, take care of that now */
if(f->coll_md_write) {
HDassert(collective_write_list);
/* Write collective list */
- if(H5C_collective_write(f,
- dxpl_id,
- collective_write_list) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't write metadata collectively")
- }
+ if(H5C__collective_write(f, dxpl_id, collective_write_list) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, "Can't write metadata collectively")
+ } /* end if */
+
/* ====================================================================== *
* Finished flushing everything. *
* ====================================================================== */
@@ -750,9 +751,9 @@ H5C_apply_candidate_list(H5F_t * f,
HDassert((entries_flushed_collectively == entries_to_flush_collectively));
if((entries_flushed != entries_to_flush) ||
- (entries_cleared != entries_to_clear) ||
- (entries_flushed_or_cleared_last != entries_to_flush_or_clear_last) ||
- (entries_flushed_collectively != entries_to_flush_collectively))
+ (entries_cleared != entries_to_clear) ||
+ (entries_flushed_or_cleared_last != entries_to_flush_or_clear_last) ||
+ (entries_flushed_collectively != entries_to_flush_collectively))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry count mismatch.")
done:
@@ -760,7 +761,7 @@ done:
candidate_assignment_table = (int *)H5MM_xfree((void *)candidate_assignment_table);
if(collective_write_list)
- if(H5SL_destroy(collective_write_list, H5C_collective_write_free, NULL) < 0)
+ if(H5SL_destroy(collective_write_list, H5C__collective_write_free, NULL) < 0)
HDONE_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "failed to destroy skip list")
FUNC_LEAVE_NOAPI(ret_value)
@@ -1132,7 +1133,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
if(TRUE == entry_ptr->coll_access) {
entry_ptr->coll_access = FALSE;
H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- }
+ } /* end if */
entry_ptr->clear_on_unprotect = TRUE;
#if H5C_DO_SANITY_CHECKS
@@ -1194,10 +1195,8 @@ H5C_mark_entries_as_clean(H5F_t * f,
entry_ptr = entry_ptr->prev;
entries_cleared++;
- if(H5C__flush_single_entry(f, dxpl_id, clear_ptr,
- H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG,
- NULL, NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
+ if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
} else {
entry_ptr = entry_ptr->prev;
@@ -1224,10 +1223,8 @@ H5C_mark_entries_as_clean(H5F_t * f,
entry_ptr = entry_ptr->next;
entries_cleared++;
- if(H5C__flush_single_entry(f, dxpl_id, clear_ptr,
- H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG,
- NULL, NULL) < 0 )
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
+ if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, NULL) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
} else {
entry_ptr = entry_ptr->next;
@@ -1288,78 +1285,95 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_clear_coll_entries(H5C_t * cache_ptr, hbool_t partial)
+H5C_clear_coll_entries(H5C_t *cache_ptr, hbool_t partial)
{
- int32_t list_len, coll_entries_cleared = 0;
+ int32_t clear_cnt;
H5C_cache_entry_t * entry_ptr = NULL;
- H5C_cache_entry_t * prev_ptr;
herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI_NOINIT
entry_ptr = cache_ptr->coll_tail_ptr;
- list_len = cache_ptr->coll_list_len;
-
- while(entry_ptr && (coll_entries_cleared < (partial ? list_len/2 : list_len))) {
- prev_ptr = entry_ptr->coll_prev;
+ clear_cnt = (partial ? cache_ptr->coll_list_len / 2 : cache_ptr->coll_list_len);
+ while(entry_ptr && clear_cnt > 0) {
+ H5C_cache_entry_t *prev_ptr = entry_ptr->coll_prev;
+ /* Sanity check */
HDassert(entry_ptr->coll_access);
+ /* Mark entry as independent */
entry_ptr->coll_access = FALSE;
H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- coll_entries_cleared ++;
+ /* Decrement entry count */
+ clear_cnt--;
+
+ /* Advance to next entry */
entry_ptr = prev_ptr;
- }
+ } /* end while */
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_clear_coll_entries */
-herr_t
-H5C_collective_write(H5F_t *f,
- hid_t dxpl_id,
- H5SL_t *collective_write_list)
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__collective_write
+ *
+ * Purpose: Perform a collective write of a list of metadata entries.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: Mohamad Chaarawi
+ * February, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__collective_write(H5F_t *f, hid_t dxpl_id, H5SL_t *collective_write_list)
{
H5P_genplist_t *plist = NULL;
- H5FD_mpio_xfer_t xfer_mode = H5FD_MPIO_COLLECTIVE;
- H5FD_mpio_xfer_t orig_xfer_mode;
- H5SL_node_t *node;
- H5C_collective_write_t *item;
+ H5FD_mpio_xfer_t orig_xfer_mode = H5FD_MPIO_COLLECTIVE;
int count;
- void *base_buf;
int *length_array = NULL;
MPI_Aint *buf_array = NULL;
MPI_Aint *offset_array = NULL;
MPI_Datatype btype;
- MPI_Datatype ftype;
hbool_t btype_created = FALSE;
+ MPI_Datatype ftype;
hbool_t ftype_created = FALSE;
int mpi_code;
- int i;
herr_t ret_value = SUCCEED;
- FUNC_ENTER_NOAPI_NOINIT
-
- count = (int)H5SL_count(collective_write_list);
+ FUNC_ENTER_STATIC
+ /* Get original transfer mode */
if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list")
-
if(H5P_get(plist, H5D_XFER_IO_XFER_MODE_NAME, &orig_xfer_mode) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O property")
+ /* Get number of entries in collective write list */
+ count = (int)H5SL_count(collective_write_list);
+
if(count > 0) {
+ H5FD_mpio_xfer_t xfer_mode = H5FD_MPIO_COLLECTIVE;
+ H5SL_node_t *node;
+ H5C_collective_write_t *item;
+ void *base_buf;
+ int i;
+
if(H5P_set(plist, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O property")
/* Allocate arrays */
if(NULL == (length_array = (int *)H5MM_malloc((size_t)count * sizeof(int))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for collective write table length array")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective write table length array")
if(NULL == (buf_array = (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for collective buf table length array")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective buf table length array")
if(NULL == (offset_array = (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for collective offset table length array")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective offset table length array")
/* Fill arrays */
node = H5SL_first(collective_write_list);
@@ -1367,6 +1381,7 @@ H5C_collective_write(H5F_t *f,
if(NULL == (item = (H5C_collective_write_t *)H5SL_item(node)))
HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item")
+ /* Set up initial array position & buffer base address */
length_array[0] = (int)item->length;
base_buf = item->buf;
buf_array[0] = (MPI_Aint)0;
@@ -1378,28 +1393,31 @@ H5C_collective_write(H5F_t *f,
if(NULL == (item = (H5C_collective_write_t *)H5SL_item(node)))
HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item")
+ /* Set up array position */
length_array[i] = (int)item->length;
buf_array[i] = (MPI_Aint)item->buf - (MPI_Aint)base_buf;
offset_array[i] = (MPI_Aint)item->offset;
+
+ /* Advance to next node & array location */
node = H5SL_next(node);
i++;
} /* end while */
- /* Create memory mpi type */
+ /* Create memory MPI type */
if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed(count, length_array, buf_array, MPI_BYTE, &btype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
btype_created = TRUE;
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&btype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
- /* Create file mpi type */
+ /* Create file MPI type */
if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed(count, length_array, offset_array, MPI_BYTE, &ftype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
ftype_created = TRUE;
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&ftype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
- /* Pass buf type, file type to the file driver. */
+ /* Pass buf type, file type to the file driver */
if(H5FD_mpi_setup_collective(dxpl_id, &btype, &ftype) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O properties")
@@ -1417,19 +1435,16 @@ H5C_collective_write(H5F_t *f,
mpi_fh = *(MPI_File*)mpi_fh_p;
/* just to match up with the 1st MPI_File_set_view from H5FD_mpio_write() */
- if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE,
- MPI_BYTE, "native", MPI_INFO_NULL)))
+ if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE, MPI_BYTE, "native", MPI_INFO_NULL)))
HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code)
/* just to match up with MPI_File_write_at_all from H5FD_mpio_write() */
HDmemset(&mpi_stat, 0, sizeof(MPI_Status));
- if(MPI_SUCCESS != (mpi_code = MPI_File_write_at_all(mpi_fh, (MPI_Offset)0, NULL, 0,
- MPI_BYTE, &mpi_stat)))
+ if(MPI_SUCCESS != (mpi_code = MPI_File_write_at_all(mpi_fh, (MPI_Offset)0, NULL, 0, MPI_BYTE, &mpi_stat)))
HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at_all failed", mpi_code)
/* just to match up with the 2nd MPI_File_set_view (reset) in H5FD_mpio_write() */
- if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE,
- MPI_BYTE, "native", MPI_INFO_NULL)))
+ if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE, MPI_BYTE, "native", MPI_INFO_NULL)))
HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code)
} /* end else */
@@ -1453,23 +1468,37 @@ done:
} /* end if */
FUNC_LEAVE_NOAPI(ret_value);
-} /* end H5C_collective_write() */
+} /* end H5C__collective_write() */
-herr_t
-H5C_collective_write_free(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *op_data)
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__collective_write_free
+ *
+ * Purpose: Release node on collective write skiplist
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: Mohamad Chaarawi
+ * February, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__collective_write_free(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *op_data)
{
H5C_collective_write_t *item = (H5C_collective_write_t *)_item;
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
- HDassert(item);
+ /* Sanity check */
+ HDassert(item);
if(item->free_buf)
item->buf = H5MM_xfree(item->buf);
- /*!FIXME change to use free list for items */
- H5MM_free(item);
+ H5FL_FREE(H5C_collective_write_t, item);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5C_collective_write_free() */
+} /* end H5C__collective_write_free() */
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 2f4d137..08be52b 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -3133,11 +3133,11 @@ if ( ( (entry_ptr) == NULL ) || \
(cache_ptr)->coll_list_size, \
(fail_val)) \
\
- H5C__COLL_DLL_PREPEND((entry_ptr), (cache_ptr)->coll_head_ptr, \
- (cache_ptr)->coll_tail_ptr, \
- (cache_ptr)->coll_list_len, \
- (cache_ptr)->coll_list_size, \
- (fail_val)) \
+ H5C__COLL_DLL_PREPEND((entry_ptr), (cache_ptr)->coll_head_ptr, \
+ (cache_ptr)->coll_tail_ptr, \
+ (cache_ptr)->coll_list_len, \
+ (cache_ptr)->coll_list_size, \
+ (fail_val)) \
\
} /* H5C__MOVE_TO_TOP_IN_COLL_LIST */
#endif /* H5_HAVE_PARALLEL */
@@ -4254,6 +4254,7 @@ typedef struct H5C_collective_write_t {
} H5C_collective_write_t;
#endif /* H5_HAVE_PARALLEL */
+
/*****************************/
/* Package Private Variables */
/*****************************/
@@ -4264,10 +4265,6 @@ typedef struct H5C_collective_write_t {
/******************************/
H5_DLL herr_t H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id,
H5C_cache_entry_t *entry_ptr, unsigned flags, int64_t *entry_size_change_ptr, H5SL_t *collective_write_list);
-#ifdef H5_HAVE_PARALLEL
-H5_DLL herr_t H5C_collective_write(H5F_t *f, hid_t dxpl_id, H5SL_t *collective_write_list);
-H5_DLL herr_t H5C_collective_write_free(void *_item, void *key, void *op_data);
-#endif /* H5_HAVE_PARALLEL */
#endif /* _H5Cpkg_H */
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 5ad026b..5502e9f 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -1609,7 +1609,6 @@ typedef struct H5C_cache_entry_t {
hbool_t clear_on_unprotect;
hbool_t flush_immediately;
hbool_t coll_access;
- hbool_t ind_access_while_coll;
#endif /* H5_HAVE_PARALLEL */
hbool_t flush_in_progress;
hbool_t destroy_in_progress;
@@ -1633,8 +1632,10 @@ typedef struct H5C_cache_entry_t {
struct H5C_cache_entry_t * prev;
struct H5C_cache_entry_t * aux_next;
struct H5C_cache_entry_t * aux_prev;
+#ifdef H5_HAVE_PARALLEL
struct H5C_cache_entry_t * coll_next;
struct H5C_cache_entry_t * coll_prev;
+#endif /* H5_HAVE_PARALLEL */
#if H5C_COLLECT_CACHE_ENTRY_STATS
/* cache entry stats fields */
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index 37e48ba..a17e035 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -45,7 +45,7 @@
/****************/
#include "H5Dmodule.h" /* This source code file is part of the H5D module */
-#define H5F_FRIEND /*suppress error about including H5Fpkg */
+
/***********/
/* Headers */
@@ -56,7 +56,7 @@
#endif /* H5_HAVE_PARALLEL */
#include "H5Dpkg.h" /* Dataset functions */
#include "H5Eprivate.h" /* Error handling */
-#include "H5Fpkg.h" /* File functions */
+#include "H5Fprivate.h" /* File functions */
#include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */
#include "H5MMprivate.h" /* Memory management */
@@ -2652,7 +2652,7 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled,
if(!H5D__chunk_cinfo_cache_found(&dset->shared->cache.chunk.last, udata)) {
H5D_chk_idx_info_t idx_info; /* Chunked index info */
#ifdef H5_HAVE_PARALLEL
- H5P_coll_md_read_flag_t temp_flag; /* temp flag to hold the coll metadata read setting */
+ H5P_coll_md_read_flag_t temp_cmr; /* Temp value to hold the coll metadata read setting */
#endif /* H5_HAVE_PARALLEL */
/* Compose chunked index info struct */
@@ -2668,9 +2668,9 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled,
as it is highly unlikely that users would read the
same chunks from all processes. MSC - might turn on
for root node? */
- temp_flag = idx_info.f->coll_md_read;
- idx_info.f->coll_md_read = H5P_FORCE_FALSE;
- }
+ temp_cmr = H5F_COLL_MD_READ(idx_info.f);
+ H5F_set_coll_md_read(idx_info.f, H5P_FORCE_FALSE);
+ } /* end if */
#endif /* H5_HAVE_PARALLEL */
/* Go get the chunk information */
@@ -2678,9 +2678,8 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled,
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query chunk address")
#ifdef H5_HAVE_PARALLEL
- if(H5F_HAS_FEATURE(idx_info.f, H5FD_FEAT_HAS_MPI)) {
- idx_info.f->coll_md_read = temp_flag;
- }
+ if(H5F_HAS_FEATURE(idx_info.f, H5FD_FEAT_HAS_MPI))
+ H5F_set_coll_md_read(idx_info.f, temp_cmr);
#endif /* H5_HAVE_PARALLEL */
/* Cache the information retrieved */
diff --git a/src/H5Ddeprec.c b/src/H5Ddeprec.c
index 1753917..82545c1 100644
--- a/src/H5Ddeprec.c
+++ b/src/H5Ddeprec.c
@@ -141,8 +141,7 @@ H5Dcreate1(hid_t loc_id, const char *name, hid_t type_id, hid_t space_id,
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not dataset create property list ID")
/* Build and open the new dataset */
- if(NULL == (dset = H5D__create_named(&loc, name, type_id, space, H5P_LINK_CREATE_DEFAULT,
- dcpl_id, H5P_DATASET_ACCESS_DEFAULT, H5AC_dxpl_id)))
+ if(NULL == (dset = H5D__create_named(&loc, name, type_id, space, H5P_LINK_CREATE_DEFAULT, dcpl_id, H5P_DATASET_ACCESS_DEFAULT, H5AC_dxpl_id)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create dataset")
/* Register the new dataset to get an ID for it */
diff --git a/src/H5Fint.c b/src/H5Fint.c
index ea6c9c1..063b65f 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -2110,3 +2110,33 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F__set_eoa() */
+#ifdef H5_HAVE_PARALLEL
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_set_coll_md_read
+ *
+ * Purpose: Set the coll_md_read field with a new value.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * 2/10/16
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+H5F_set_coll_md_read(H5F_t *f, H5P_coll_md_read_flag_t cmr)
+{
+ /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity check */
+ HDassert(f);
+
+ f->coll_md_read = cmr;
+
+ FUNC_LEAVE_NOAPI_VOID
+} /* H5F_set_coll_md_read() */
+#endif /* H5_HAVE_PARALLEL */
+
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index 7decaed..52649e5 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -27,6 +27,9 @@
#include "H5FDpublic.h" /* File drivers */
/* Private headers needed by this file */
+#ifdef H5_HAVE_PARALLEL
+#include "H5Pprivate.h" /* Property lists */
+#endif /* H5_HAVE_PARALLEL */
#include "H5VMprivate.h" /* Vectors and arrays */
@@ -312,6 +315,9 @@
#define H5F_SET_GRP_BTREE_SHARED(F, RC) (((F)->shared->grp_btree_shared = (RC)) ? SUCCEED : FAIL)
#define H5F_USE_TMP_SPACE(F) ((F)->shared->use_tmp_space)
#define H5F_IS_TMP_ADDR(F, ADDR) (H5F_addr_le((F)->shared->tmp_addr, (ADDR)))
+#ifdef H5_HAVE_PARALLEL
+#define H5F_COLL_MD_READ(F) ((F)->coll_md_read)
+#endif /* H5_HAVE_PARALLEL */
#else /* H5F_MODULE */
#define H5F_INTENT(F) (H5F_get_intent(F))
#define H5F_OPEN_NAME(F) (H5F_get_open_name(F))
@@ -354,6 +360,9 @@
#define H5F_SET_GRP_BTREE_SHARED(F, RC) (H5F_set_grp_btree_shared((F), (RC)))
#define H5F_USE_TMP_SPACE(F) (H5F_use_tmp_space(F))
#define H5F_IS_TMP_ADDR(F, ADDR) (H5F_is_tmp_addr((F), (ADDR)))
+#ifdef H5_HAVE_PARALLEL
+#define H5F_COLL_MD_READ(F) (H5F_coll_md_read(F))
+#endif /* H5_HAVE_PARALLEL */
#endif /* H5F_MODULE */
@@ -639,6 +648,10 @@ H5_DLL struct H5UC_t *H5F_grp_btree_shared(const H5F_t *f);
H5_DLL herr_t H5F_set_grp_btree_shared(H5F_t *f, struct H5UC_t *rc);
H5_DLL hbool_t H5F_use_tmp_space(const H5F_t *f);
H5_DLL hbool_t H5F_is_tmp_addr(const H5F_t *f, haddr_t addr);
+#ifdef H5_HAVE_PARALLEL
+H5_DLL H5P_coll_md_read_flag_t H5F_coll_md_read(const H5F_t *f);
+H5_DLL void H5F_set_coll_md_read(H5F_t *f, H5P_coll_md_read_flag_t flag);
+#endif /* H5_HAVE_PARALLEL */
/* Functions that retrieve values from VFD layer */
H5_DLL hid_t H5F_get_driver_id(const H5F_t *f);
diff --git a/src/H5Fquery.c b/src/H5Fquery.c
index 75fc216..e9af300 100644
--- a/src/H5Fquery.c
+++ b/src/H5Fquery.c
@@ -1073,3 +1073,31 @@ H5F_use_tmp_space(const H5F_t *f)
FUNC_LEAVE_NOAPI(f->shared->use_tmp_space)
} /* end H5F_use_tmp_space() */
+#ifdef H5_HAVE_PARALLEL
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_coll_md_read
+ *
+ * Purpose: Retrieve the 'collective metadata reads' flag for the file.
+ *
+ * Return: Success: Non-negative, the 'collective metadata reads' flag
+ * Failure: (can't happen)
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Feb 10 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+H5P_coll_md_read_flag_t
+H5F_coll_md_read(const H5F_t *f)
+{
+ /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(f);
+
+ FUNC_LEAVE_NOAPI(f->coll_md_read)
+} /* end H5F_coll_md_read() */
+#endif /* H5_HAVE_PARALLEL */
+
diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c
index 535e34b..9353094 100644
--- a/src/H5Pdxpl.c
+++ b/src/H5Pdxpl.c
@@ -179,7 +179,6 @@
#ifdef H5_DEBUG_BUILD
/* dxpl I/O type - private property */
#define H5FD_DXPL_TYPE_SIZE sizeof(H5FD_dxpl_type_t)
-#define H5FD_DXPL_TYPE_DEF H5FD_NOIO_DXPL
#endif /* H5_DEBUG_BUILD */
#ifdef H5_HAVE_PARALLEL
/* Definition for reading metadata collectively */
@@ -187,7 +186,9 @@
#define H5D_XFER_COLL_MD_READ_DEF H5P_USER_FALSE
#define H5D_XFER_COLL_MD_READ_ENC H5P__encode_coll_md_read_flag_t
#define H5D_XFER_COLL_MD_READ_DEC H5P__decode_coll_md_read_flag_t
-#endif H5_HAVE_PARALLEL
+#endif /* H5_HAVE_PARALLEL */
+
+
/******************/
/* Local Typedefs */
/******************/
diff --git a/src/H5Pint.c b/src/H5Pint.c
index 3c145c5..a222082 100644
--- a/src/H5Pint.c
+++ b/src/H5Pint.c
@@ -29,7 +29,9 @@
/* Headers */
/***********/
#include "H5private.h" /* Generic Functions */
-#include "H5ACprivate.h" /* Metadata cache */
+#ifdef H5_HAVE_PARALLEL
+#include "H5ACprivate.h" /* Metadata cache */
+#endif /* H5_HAVE_PARALLEL */
#include "H5Eprivate.h" /* Error handling */
#include "H5Fprivate.h" /* File access */
#include "H5FLprivate.h" /* Free lists */
@@ -5452,7 +5454,15 @@ H5P_get_class(const H5P_genplist_t *plist)
*/
herr_t
H5P_verify_apl_and_dxpl(hid_t *acspl_id, const H5P_libclass_t *libclass, hid_t *dxpl_id,
- hid_t loc_id, hbool_t is_collective)
+ hid_t
+#ifndef H5_HAVE_PARALLEL
+ H5_ATTR_UNUSED
+#endif /* H5_HAVE_PARALLEL */
+ loc_id, hbool_t
+#ifndef H5_HAVE_PARALLEL
+ H5_ATTR_UNUSED
+#endif /* H5_HAVE_PARALLEL */
+ is_collective)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -5495,8 +5505,6 @@ H5P_verify_apl_and_dxpl(hid_t *acspl_id, const H5P_libclass_t *libclass, hid_t *
if(TRUE != H5P_isa_class(*acspl_id, *libclass->class_id))
HGOTO_ERROR(H5E_PLIST, H5E_BADTYPE, FAIL, "not the required access property list")
- *dxpl_id = H5AC_ind_read_dxpl_id;
-
#ifdef H5_HAVE_PARALLEL
/* Get the plist structure for the access property list */
if(NULL == (plist = (H5P_genplist_t *)H5I_object(*acspl_id)))
diff --git a/src/H5Plapl.c b/src/H5Plapl.c
index cba2f88..f2711e8 100644
--- a/src/H5Plapl.c
+++ b/src/H5Plapl.c
@@ -94,6 +94,7 @@
#define H5L_ACS_COLL_MD_READ_ENC H5P__encode_coll_md_read_flag_t
#define H5L_ACS_COLL_MD_READ_DEC H5P__decode_coll_md_read_flag_t
+
/******************/
/* Local Typedefs */
/******************/
@@ -222,8 +223,7 @@ H5P__lacc_reg_prop(H5P_genclass_t *pclass)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
/* Register the metadata collective read flag */
- if(H5P_register_real(pclass, H5_COLL_MD_READ_FLAG_NAME, H5L_ACS_COLL_MD_READ_SIZE,
- &H5L_def_coll_md_read_g,
+ if(H5P_register_real(pclass, H5_COLL_MD_READ_FLAG_NAME, H5L_ACS_COLL_MD_READ_SIZE, &H5L_def_coll_md_read_g,
NULL, NULL, NULL, H5L_ACS_COLL_MD_READ_ENC, H5L_ACS_COLL_MD_READ_DEC,
NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
diff --git a/src/H5Pprivate.h b/src/H5Pprivate.h
index be2bceb..29fb919 100644
--- a/src/H5Pprivate.h
+++ b/src/H5Pprivate.h
@@ -41,13 +41,13 @@
#define H5P_CLASS(P) (H5P_get_class(P))
#endif /* H5P_MODULE */
+#define H5_COLL_MD_READ_FLAG_NAME "collective_metadata_read"
+
/****************************/
/* Library Private Typedefs */
/****************************/
-#define H5_COLL_MD_READ_FLAG_NAME "collective_metadata_read"
-
typedef enum H5P_coll_md_read_flag_t {
H5P_FORCE_FALSE = -1,
H5P_USER_FALSE = 0,
diff --git a/src/H5R.c b/src/H5R.c
index f0aafc3..bc49364 100644
--- a/src/H5R.c
+++ b/src/H5R.c
@@ -611,10 +611,10 @@ H5Rdereference2(hid_t obj_id, hid_t oapl_id, H5R_type_t ref_type, const void *_r
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid reference type")
if(_ref == NULL)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid reference pointer")
-
+
/* Verify access property list and get correct dxpl */
if(H5P_verify_apl_and_dxpl(&oapl_id, H5P_CLS_DACC, &dxpl_id, obj_id, FALSE) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't set access and transfer property lists")
+ HGOTO_ERROR(H5E_REFERENCE, H5E_CANTSET, FAIL, "can't set access and transfer property lists")
/* Get the file pointer from the entry */
file = loc.oloc->file;