summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/H5B.c722
-rw-r--r--src/H5Bcache.c68
-rw-r--r--src/H5Bpkg.h4
-rw-r--r--src/H5Bprivate.h26
-rw-r--r--src/H5Dbtree.c386
-rw-r--r--src/H5Dchunk.c121
-rw-r--r--src/H5Dearray.c35
-rw-r--r--src/H5Dpkg.h16
-rw-r--r--src/H5Dproxy.c96
-rw-r--r--src/H5Gnode.c2
-rw-r--r--src/H5Gstab.c34
-rw-r--r--src/H5Gtest.c5
-rw-r--r--src/H5Ostab.c2
-rw-r--r--test/Makefile.am3
-rw-r--r--test/Makefile.in46
-rw-r--r--test/swmr_addrem_writer.c322
-rw-r--r--test/swmr_generator.c54
-rw-r--r--test/swmr_reader.c8
-rw-r--r--test/swmr_remove_reader.c366
-rw-r--r--test/swmr_remove_writer.c243
-rw-r--r--test/swmr_sparse_reader.c319
-rw-r--r--test/swmr_sparse_writer.c345
-rw-r--r--test/swmr_writer.c24
-rwxr-xr-xtest/testswmr.sh276
24 files changed, 3208 insertions, 315 deletions
diff --git a/src/H5B.c b/src/H5B.c
index 40b221d..61c5856 100644
--- a/src/H5B.c
+++ b/src/H5B.c
@@ -111,7 +111,6 @@
#include "H5MFprivate.h" /* File memory management */
#include "H5Pprivate.h" /* Property lists */
-
/****************/
/* Local Macros */
/****************/
@@ -154,12 +153,15 @@ static H5B_ins_t H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
hbool_t *rt_key_changed,
H5B_ins_ud_t *split_bt_ud/*out*/);
static herr_t H5B_insert_child(H5B_t *bt, unsigned *bt_flags,
- unsigned idx, haddr_t child,
+ unsigned *idx, haddr_t child,
H5B_ins_t anchor, const void *md_key);
static herr_t H5B_split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
unsigned idx, void *udata,
H5B_ins_ud_t *split_bt_ud/*out*/);
static H5B_t * H5B_copy(const H5B_t *old_bt);
+static herr_t H5B_find_node(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type,
+ haddr_t addr, void *udata, void *parent,
+ H5B_t **node);
/*********************/
@@ -216,7 +218,7 @@ H5FL_SEQ_DEFINE_STATIC(size_t);
*/
herr_t
H5B_create(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, void *udata,
- haddr_t *addr_p/*out*/)
+ void *parent, haddr_t *addr_p/*out*/)
{
H5B_t *bt = NULL;
H5B_shared_t *shared=NULL; /* Pointer to shared B-tree info */
@@ -249,6 +251,7 @@ H5B_create(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, void *udata,
if(NULL == (bt->native = H5FL_BLK_MALLOC(native_block, shared->sizeof_keys)) ||
NULL == (bt->child = H5FL_SEQ_MALLOC(haddr_t, (size_t)shared->two_k)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "memory allocation failed for B-tree root node")
+ bt->parent = parent;
if(HADDR_UNDEF == (*addr_p = H5MF_alloc(f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)shared->sizeof_rnode)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "file allocation failed for B-tree root node")
@@ -301,7 +304,8 @@ done:
*-------------------------------------------------------------------------
*/
htri_t
-H5B_find(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void *udata)
+H5B_find(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
+ void *udata, void *parent)
{
H5B_t *bt = NULL;
H5RC_t *rc_shared; /* Ref-counted shared info */
@@ -335,6 +339,7 @@ H5B_find(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void *u
*/
cache_udata.f = f;
cache_udata.type = type;
+ cache_udata.parent = parent;
cache_udata.rc_shared = rc_shared;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
@@ -358,7 +363,7 @@ H5B_find(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void *u
HDassert(idx < bt->nchildren);
if(bt->level > 0) {
- if((ret_value = H5B_find(f, dxpl_id, type, bt->child[idx], udata)) < 0)
+ if((ret_value = H5B_find(f, dxpl_id, type, bt->child[idx], udata, bt)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "can't lookup key in subtree")
} /* end if */
else {
@@ -375,6 +380,109 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5B_find_node
+ *
+ * Purpose: Locate the B-tree node containing the item specified in
+ * UDATA, if present. If found, the B-tree node will be
+ * pinned on return and must be unpinned after the caller is
+ * done using it. If not found, *node will be set to NULL.
+ *
+ * Return: Non-negative on success. Negative on failure.
+ *
+ * Programmer: Neil Fortner
+ * Aug 18 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5B_find_node(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
+ void *udata, void *parent, H5B_t **node)
+{
+ H5B_t *bt = NULL;
+ H5RC_t *rc_shared; /* Ref-counted shared info */
+ H5B_shared_t *shared; /* Pointer to shared B-tree info */
+ H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */
+ unsigned idx = 0, lt = 0, rt; /* Final, left & right key indices */
+ int cmp = 1; /* Key comparison value */
+ htri_t found; /* Whether the correct node has been found */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5B_find_node, FAIL)
+
+ /*
+ * Check arguments.
+ */
+ HDassert(f);
+ HDassert(type);
+ HDassert(type->decode);
+ HDassert(type->cmp3);
+ HDassert(H5F_addr_defined(addr));
+
+ /* Get shared info for B-tree */
+ if(NULL == (rc_shared = (type->get_shared)(f, udata)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "can't retrieve B-tree's shared ref. count object")
+ shared = (H5B_shared_t *)H5RC_GET_OBJ(rc_shared);
+ HDassert(shared);
+
+ /*
+ * Perform a binary search to locate the child which contains
+ * the thing for which we're searching.
+ */
+ cache_udata.f = f;
+ cache_udata.type = type;
+ cache_udata.parent = parent;
+ cache_udata.rc_shared = rc_shared;
+ if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
+
+ rt = bt->nchildren;
+ while(lt < rt && cmp) {
+ idx = (lt + rt) / 2;
+ /* compare */
+ if((cmp = (type->cmp3)(H5B_NKEY(bt, shared, idx), udata, H5B_NKEY(bt, shared, (idx + 1)))) < 0)
+ rt = idx;
+ else
+ lt = idx + 1;
+ } /* end while */
+ /* Check if not found */
+ if(cmp)
+ *node = NULL;
+ else {
+ /*
+ * Follow the link to the subtree, or return the leaf node.
+ */
+ HDassert(idx < bt->nchildren);
+
+ if(bt->level > 0) {
+ if(H5B_find_node(f, dxpl_id, type, bt->child[idx], udata, bt, node) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "can't lookup key in subtree")
+ } /* end if */
+ else {
+ /* Check if this is really the correct child */
+ if((found = (type->found)(f, dxpl_id, bt->child[idx], H5B_NKEY(bt, shared, idx), udata)) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "can't lookup key in leaf node")
+
+ if(found) {
+ /* Return this leaf node, pinned */
+ if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__PIN_ENTRY_FLAG) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release node")
+ *node = bt;
+ bt = NULL;
+ } /* end if */
+ else
+ *node = NULL;
+ } /* end else */
+ } /* end else */
+
+done:
+ if(bt && H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release node")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B_find_node() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5B_split
*
* Purpose: Split a single node into two nodes. The old node will
@@ -405,6 +513,7 @@ H5B_split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud, unsigned idx,
H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */
unsigned nleft, nright; /* Number of keys in left & right halves */
double split_ratios[3]; /* B-tree split ratios */
+ hbool_t bt_pinned = FALSE;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5B_split)
@@ -480,10 +589,11 @@ H5B_split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud, unsigned idx,
/*
* Create the new B-tree node.
*/
- if(H5B_create(f, dxpl_id, shared->type, udata, &split_bt_ud->addr/*out*/) < 0)
+ if(H5B_create(f, dxpl_id, shared->type, udata, bt_ud->bt->parent, &split_bt_ud->addr/*out*/) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create B-tree")
cache_udata.f = f;
cache_udata.type = shared->type;
+ cache_udata.parent = bt_ud->bt->parent;
cache_udata.rc_shared = bt_ud->bt->rc_shared;
if(NULL == (split_bt_ud->bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, split_bt_ud->addr, &cache_udata, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree")
@@ -509,29 +619,117 @@ H5B_split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud, unsigned idx,
bt_ud->cache_flags |= H5AC__DIRTIED_FLAG;
bt_ud->bt->nchildren = nleft;
+ /* Actions to take if swmr writes are on */
+ if(shared->swmr_write) {
+ haddr_t new_bt_addr = HADDR_UNDEF;
+ unsigned i;
+
+ /*
+ * We must clone the old btree so readers with an out-of-date version
+ * of the parent can still see all its children, via the shadowed
+ * non-split bt. Remove it from cache but do not mark it free on disk.
+ */
+ /* Allocate space for the cloned child */
+ H5_CHECK_OVERFLOW(shared->sizeof_rnode,size_t,hsize_t);
+ if(HADDR_UNDEF == (new_bt_addr = H5MF_alloc(f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)shared->sizeof_rnode)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "unable to allocate file space to move b-tree")
+
+ /* Pin old entry so it is not flushed when we unprotect */
+ if(H5AC_pin_protected_entry(bt_ud->bt) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPIN, FAIL, "unable to pin old b-tree node")
+ bt_pinned = TRUE;
+
+ /* Unprotect bt so we can move it. Also, note that it will be marked
+ * dirty so it will be written to the new location. */
+ HDassert(bt_ud->cache_flags & H5AC__DIRTIED_FLAG);
+ if(H5AC_unprotect(f, dxpl_id, H5AC_BT, bt_ud->addr, bt_ud->bt, bt_ud->cache_flags) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release old b-tree")
+ bt_ud->cache_flags = H5AC__NO_FLAGS_SET;
+
+ /* Move the location of the old child on the disk */
+ if(H5AC_move_entry(f, H5AC_BT, bt_ud->addr, new_bt_addr) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to move B-tree root node")
+ bt_ud->addr = new_bt_addr;
+
+ /* Re-protect bt at new address */
+ if(bt_ud->bt != H5AC_protect(f, dxpl_id, H5AC_BT, new_bt_addr, &cache_udata, H5AC_WRITE))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree node")
+
+ /*
+ * Update flush dependencies for children that moved to the new node
+ */
+ if(bt_ud->bt->level > 0) {
+ H5B_t *child;
+
+ for(i=0; i<nright; i++) {
+ /* Protect child b-tree node */
+ cache_udata.parent = split_bt_ud->bt;
+ if(NULL == (child = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, split_bt_ud->bt->child[i], &cache_udata, H5AC_WRITE)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
+
+ /* Update the flush dependency, if necessary */
+ HDassert(child->parent);
+ if(child->parent == bt_ud->bt) {
+ child->parent = split_bt_ud->bt;
+ if(H5AC_destroy_flush_dependency(bt_ud->bt, child) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5AC_create_flush_dependency(child->parent, child) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+ else
+ HDassert(child->parent == split_bt_ud->bt);
+
+ /* Unprotect the child */
+ if(H5AC_unprotect(f, dxpl_id, H5AC_BT, split_bt_ud->bt->child[idx], child, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ } /* end for */
+ } /* end if */
+ else {
+ /* At leaf node, delegate to client */
+ HDassert(shared->type->update_flush_dep);
+ for(i=0; i<nright; i++) {
+ if((shared->type->update_flush_dep)(H5B_NKEY(split_bt_ud->bt, shared, i), udata, bt_ud->bt, split_bt_ud->bt) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to update flush dependency")
+ } /* end for */
+ } /* end else */
+
+ /*
+ * Update left sibling to point to new bt. Only necessary when doing
+ * swmr writes as otherwise the address of bt doesn't change.
+ */
+ if(H5F_addr_defined(bt_ud->bt->left)) {
+ H5B_t *tmp_bt;
+
+ if(NULL == (tmp_bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt_ud->bt->left, &cache_udata, H5AC_WRITE)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load right sibling")
+
+ tmp_bt->right = bt_ud->addr;
+
+ if(H5AC_unprotect(f, dxpl_id, H5AC_BT, bt_ud->bt->left, tmp_bt, H5AC__DIRTIED_FLAG) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ } /* end if */
+ } /* end if */
+
/*
- * Update sibling pointers.
+ * Update other sibling pointers.
*/
split_bt_ud->bt->left = bt_ud->addr;
split_bt_ud->bt->right = bt_ud->bt->right;
if(H5F_addr_defined(bt_ud->bt->right)) {
- H5B_t *tmp_bt;
- H5B_cache_ud_t cache_udata2; /* User-data for metadata cache callback */
+ H5B_t *tmp_bt;
- cache_udata2.f = f;
- cache_udata2.type = shared->type;
- cache_udata2.rc_shared = bt_ud->bt->rc_shared;
- if(NULL == (tmp_bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt_ud->bt->right, &cache_udata2, H5AC_WRITE)))
- HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load right sibling")
+ if(NULL == (tmp_bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt_ud->bt->right, &cache_udata, H5AC_WRITE)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load right sibling")
- tmp_bt->left = split_bt_ud->addr;
+ tmp_bt->left = split_bt_ud->addr;
if(H5AC_unprotect(f, dxpl_id, H5AC_BT, bt_ud->bt->right, tmp_bt, H5AC__DIRTIED_FLAG) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
} /* end if */
bt_ud->bt->right = split_bt_ud->addr;
+ bt_ud->cache_flags |= H5AC__DIRTIED_FLAG;
done:
if(ret_value < 0) {
@@ -539,6 +737,13 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
split_bt_ud->bt = NULL;
split_bt_ud->addr = HADDR_UNDEF;
+ split_bt_ud->cache_flags = H5AC__NO_FLAGS_SET;
+ } /* end if */
+
+ if(bt_pinned) {
+ HDassert(shared->swmr_write);
+ if(H5AC_unpin_entry(bt_ud->bt) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin old root")
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
@@ -560,7 +765,7 @@ done:
*/
herr_t
H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
- void *udata)
+ void *udata, void *parent)
{
/*
* These are defined this way to satisfy alignment constraints.
@@ -580,6 +785,8 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
H5B_shared_t *shared; /* Pointer to shared B-tree info */
H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */
H5B_ins_t my_ins = H5B_INS_ERROR;
+ hbool_t bt_protected = FALSE;
+ hbool_t nrbt_pinned = FALSE; /* TRUE if new_root_bt is pinned */
herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI(H5B_insert, FAIL)
@@ -599,20 +806,27 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
/* Protect the root node */
cache_udata.f = f;
cache_udata.type = type;
+ cache_udata.parent = parent;
cache_udata.rc_shared = rc_shared;
bt_ud.addr = addr;
if(NULL == (bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to locate root of B-tree")
+ bt_protected = TRUE;
/* Insert the object */
if((int)(my_ins = H5B_insert_helper(f, dxpl_id, &bt_ud, type, lt_key,
&lt_key_changed, md_key, udata, rt_key, &rt_key_changed,
&split_bt_ud/*out*/)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to insert key")
+
+ /* Check if the root node split */
if(H5B_INS_NOOP == my_ins) {
+ /* The root node did not split - just update the flush dependency (if
+ * necessary) and exit */
HDassert(!split_bt_ud.bt);
HGOTO_DONE(SUCCEED)
} /* end if */
+
HDassert(H5B_INS_RIGHT == my_ins);
HDassert(split_bt_ud.bt);
HDassert(H5F_addr_defined(split_bt_ud.addr));
@@ -631,33 +845,33 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
* at the old root's previous address. This prevents the B-tree from
* "moving".
*/
- H5_CHECK_OVERFLOW(shared->sizeof_rnode,size_t,hsize_t);
- if(HADDR_UNDEF == (old_root_addr = H5MF_alloc(f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)shared->sizeof_rnode)))
- HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "unable to allocate file space to move root")
-
- /*
- * Move the node to the new location
- */
+ /* Note that this is not necessary if swmr writes are on, as H5B_split
+ * already moved the node in this case */
+ if(!shared->swmr_write) {
+ H5_CHECK_OVERFLOW(shared->sizeof_rnode,size_t,hsize_t);
+ if(HADDR_UNDEF == (old_root_addr = H5MF_alloc(f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)shared->sizeof_rnode)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "unable to allocate file space to move root")
+
+ /* Unprotect the old root so we can move it. Also force it to be marked
+ * dirty so it is written to the new location. */
+ if(H5AC_unprotect(f, dxpl_id, H5AC_BT, bt_ud.addr, bt_ud.bt, H5AC__DIRTIED_FLAG) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release old root")
+ bt_protected = FALSE;
+
+ /* Move the location of the old root on the disk */
+ if(H5AC_move_entry(f, H5AC_BT, bt_ud.addr, old_root_addr) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to move B-tree root node")
+ bt_ud.addr = old_root_addr;
+
+ /* Update the split b-tree's left pointer to point to the new location */
+ split_bt_ud.bt->left = bt_ud.addr;
+ split_bt_ud.cache_flags |= H5AC__DIRTIED_FLAG;
+ } /* end else */
/* Make a copy of the old root information */
if(NULL == (new_root_bt = H5B_copy(bt_ud.bt)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to copy old root");
- /* Unprotect the old root so we can move it. Also force it to be marked
- * dirty so it is written to the new location. */
- if(H5AC_unprotect(f, dxpl_id, H5AC_BT, bt_ud.addr, bt_ud.bt, H5AC__DIRTIED_FLAG) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release old root")
- bt_ud.bt = NULL; /* Make certain future references will be caught */
-
- /* Move the location of the old root on the disk */
- if(H5AC_move_entry(f, H5AC_BT, bt_ud.addr, old_root_addr) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to move B-tree root node")
- bt_ud.addr = old_root_addr;
-
- /* Update the split b-tree's left pointer to point to the new location */
- split_bt_ud.bt->left = bt_ud.addr;
- split_bt_ud.cache_flags |= H5AC__DIRTIED_FLAG;
-
/* clear the old root info at the old address (we already copied it) */
new_root_bt->left = HADDR_UNDEF;
new_root_bt->right = HADDR_UNDEF;
@@ -673,23 +887,61 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
HDmemcpy(H5B_NKEY(new_root_bt, shared, 1), md_key, shared->type->sizeof_nkey);
HDmemcpy(H5B_NKEY(new_root_bt, shared, 2), rt_key, shared->type->sizeof_nkey);
- /* Insert the modified copy of the old root into the file again */
- if(H5AC_insert_entry(f, dxpl_id, H5AC_BT, addr, new_root_bt, H5AC__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTFLUSH, FAIL, "unable to add old B-tree root node to cache")
+ /* Insert the modified copy of the old root into the file again, and pin if
+ * doing swmr writes */
+ if(shared->swmr_write) {
+ if(H5AC_insert_entry(f, dxpl_id, H5AC_BT, addr, new_root_bt, H5AC__PIN_ENTRY_FLAG) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTFLUSH, FAIL, "unable to add new B-tree root node to cache")
+ nrbt_pinned = TRUE;
+
+ /* Set up flush dependencies */
+ HDassert(parent);
+ HDassert(bt_ud.bt->parent == parent);
+ if(H5AC_destroy_flush_dependency(parent, bt_ud.bt) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5AC_create_flush_dependency(new_root_bt, bt_ud.bt) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ bt_ud.bt->parent = new_root_bt;
+
+ HDassert(split_bt_ud.bt->parent == parent);
+ if(H5AC_destroy_flush_dependency(parent, split_bt_ud.bt) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5AC_create_flush_dependency(new_root_bt, split_bt_ud.bt) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ split_bt_ud.bt->parent = new_root_bt;
+
+ HDassert(new_root_bt->parent == parent);
+ } /* end if */
+ else {
+ if(H5AC_insert_entry(f, dxpl_id, H5AC_BT, addr, new_root_bt, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTFLUSH, FAIL, "unable to add new B-tree root node to cache")
-done:
- if(ret_value < 0)
- if(new_root_bt && H5B_node_dest(new_root_bt) < 0)
- HDONE_ERROR(H5E_BTREE, H5E_CANTRELEASE, FAIL, "unable to free B-tree root node");
+ /* Mark new_root_bt as NULL, as it is not pinned or protected and does
+ * not need to be freed as it is now in the cache. */
+ new_root_bt = NULL;
+ } /* end else */
- if(bt_ud.bt)
- if(H5AC_unprotect(f, dxpl_id, H5AC_BT, bt_ud.addr, bt_ud.bt, bt_ud.cache_flags) < 0)
+done:
+ if(bt_protected)
+ if(H5AC_unprotect(f, dxpl_id, H5AC_BT, bt_ud.addr, bt_ud.bt,
+ bt_ud.cache_flags) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to unprotect old root")
if(split_bt_ud.bt)
if(H5AC_unprotect(f, dxpl_id, H5AC_BT, split_bt_ud.addr, split_bt_ud.bt, split_bt_ud.cache_flags) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to unprotect new child")
+ if(nrbt_pinned) {
+ HDassert(shared->swmr_write);
+ if(H5AC_unpin_entry(new_root_bt) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin new root")
+ } /* end if */
+ else if(new_root_bt) {
+ HDassert(ret_value < 0);
+ if(H5B_node_dest(new_root_bt) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free B-tree root node");
+ } /* end if */
+
#ifdef H5B_DEBUG
if(ret_value >= 0)
H5B_assert(f, dxpl_id, addr, type, udata);
@@ -715,7 +967,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B_insert_child(H5B_t *bt, unsigned *bt_flags, unsigned idx,
+H5B_insert_child(H5B_t *bt, unsigned *bt_flags, unsigned *idx,
haddr_t child, H5B_ins_t anchor, const void *md_key)
{
H5B_shared_t *shared; /* Pointer to shared B-tree info */
@@ -733,8 +985,8 @@ H5B_insert_child(H5B_t *bt, unsigned *bt_flags, unsigned idx,
/* Check for inserting right-most key into node (common when just appending
* records to an unlimited dimension chunked dataset)
*/
- base = H5B_NKEY(bt, shared, (idx + 1));
- if((idx + 1) == bt->nchildren) {
+ base = H5B_NKEY(bt, shared, (*idx + 1));
+ if((*idx + 1) == bt->nchildren) {
/* Make room for the new key */
HDmemcpy(base + shared->type->sizeof_nkey, base,
shared->type->sizeof_nkey); /* No overlap possible - memcpy() OK */
@@ -742,34 +994,34 @@ H5B_insert_child(H5B_t *bt, unsigned *bt_flags, unsigned idx,
/* The MD_KEY is the left key of the new node */
if(H5B_INS_RIGHT == anchor)
- idx++; /* Don't have to memmove() child addresses down, just add new child */
+ (*idx)++; /* Don't have to memmove() child addresses down, just add new child */
else
/* Make room for the new child address */
- bt->child[idx + 1] = bt->child[idx];
+ bt->child[*idx + 1] = bt->child[*idx];
} /* end if */
else {
/* Make room for the new key */
HDmemmove(base + shared->type->sizeof_nkey, base,
- (bt->nchildren - idx) * shared->type->sizeof_nkey);
+ (bt->nchildren - *idx) * shared->type->sizeof_nkey);
HDmemcpy(base, md_key, shared->type->sizeof_nkey);
/* The MD_KEY is the left key of the new node */
if(H5B_INS_RIGHT == anchor)
- idx++;
+ (*idx)++;
/* Make room for the new child address */
- HDmemmove(bt->child + idx + 1, bt->child + idx,
- (bt->nchildren - idx) * sizeof(haddr_t));
+ HDmemmove(bt->child + *idx + 1, bt->child + *idx,
+ (bt->nchildren - *idx) * sizeof(haddr_t));
} /* end if */
- bt->child[idx] = child;
+ bt->child[*idx] = child;
bt->nchildren += 1;
/* Mark node as dirty */
*bt_flags |= H5AC__DIRTIED_FLAG;
FUNC_LEAVE_NOAPI(SUCCEED)
-}
+} /* end H5B_insert_child() */
/*-------------------------------------------------------------------------
@@ -873,6 +1125,7 @@ H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
/* Set up user data for cache callbacks */
cache_udata.f = f;
cache_udata.type = type;
+ cache_udata.parent = bt;
cache_udata.rc_shared = rc_shared;
if(0 == bt->nchildren) {
@@ -1046,18 +1299,30 @@ H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
else
HDmemcpy(rt_key, H5B_NKEY(bt, shared, idx + 1), type->sizeof_nkey);
} /* end if */
- if(H5B_INS_CHANGE == my_ins) {
- /*
- * The insertion simply changed the address for the child.
- */
- HDassert(!child_bt_ud.bt);
- HDassert(bt->level == 0);
- bt->child[idx] = new_child_bt_ud.addr;
- bt_ud->cache_flags |= H5AC__DIRTIED_FLAG;
- } else if(H5B_INS_LEFT == my_ins || H5B_INS_RIGHT == my_ins) {
+
+ /*
+ * Handle changes/additions to children
+ */
+ HDassert(!(bt->level == 0) != !(child_bt_ud.bt));
+ if(H5B_INS_LEFT == my_ins || H5B_INS_RIGHT == my_ins) {
hbool_t *tmp_bt_flags_ptr = NULL;
H5B_t *tmp_bt;
+ /* Update child pointer to (old) child if swmr writes are on and level >
+ * 0, as it has been moved by H5B_split (one level down) */
+ if(shared->swmr_write && bt->level > 0) {
+ HDassert(child_bt_ud.bt);
+ HDassert(bt_ud->bt->child[idx] != child_bt_ud.addr);
+
+ bt_ud->bt->child[idx] = child_bt_ud.addr;
+ } /* end if */
+#ifndef NDEBUG
+ if(!(shared->swmr_write) && bt->level > 0) {
+ HDassert(child_bt_ud.bt);
+ HDassert(bt_ud->bt->child[idx] == child_bt_ud.addr);
+ } /* end if */
+#endif /* NDEBUG */
+
/*
* If this node is full then split it before inserting the new child.
*/
@@ -1079,9 +1344,34 @@ H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
} /* end else */
/* Insert the child */
- if(H5B_insert_child(tmp_bt, tmp_bt_flags_ptr, idx, new_child_bt_ud.addr, my_ins, md_key) < 0)
+ if(H5B_insert_child(tmp_bt, tmp_bt_flags_ptr, &idx, new_child_bt_ud.addr, my_ins, md_key) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, H5B_INS_ERROR, "can't insert child")
- }
+
+ /* Set up flush dependency on child client object, if appropriate */
+ if(shared->swmr_write && bt->level == 0) {
+ HDassert(!child_bt_ud.bt);
+ HDassert(shared->type->create_flush_dep);
+ if((shared->type->create_flush_dep)(H5B_NKEY(tmp_bt, shared, idx), udata, tmp_bt) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, H5B_INS_ERROR, "unable to create flush dependency")
+ } /* end if */
+ } else {
+ if(H5B_INS_CHANGE == my_ins) {
+ /*
+ * The insertion simply changed the address for the child.
+ */
+ HDassert(!child_bt_ud.bt);
+ HDassert(bt->level == 0);
+ bt->child[idx] = new_child_bt_ud.addr;
+ bt_ud->cache_flags |= H5AC__DIRTIED_FLAG;
+ } /* end if */
+ /*Set up flush dependency on child client object, if appropriate */
+ if(shared->swmr_write && bt->level == 0) {
+ HDassert(!child_bt_ud.bt);
+ HDassert(shared->type->create_flush_dep);
+ if((shared->type->create_flush_dep)(H5B_NKEY(bt, shared, idx), udata, bt) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, H5B_INS_ERROR, "unable to create flush dependency")
+ } /* end if */
+ } /* end if */
/*
* If this node split, return the mid key (the one that is shared
@@ -1105,7 +1395,8 @@ H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
done:
if(child_bt_ud.bt)
- if(H5AC_unprotect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, child_bt_ud.bt, child_bt_ud.cache_flags) < 0)
+ if(H5AC_unprotect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, child_bt_ud.bt,
+ child_bt_ud.cache_flags) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to unprotect child")
if(new_child_bt_ud.bt)
@@ -1113,7 +1404,7 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to unprotect new child")
FUNC_LEAVE_NOAPI(ret_value)
-}
+} /* end H5B_insert_helper() */
/*-------------------------------------------------------------------------
@@ -1128,19 +1419,24 @@ done:
* matzke@llnl.gov
* Jun 23 1997
*
+ * Modifications: Neil Fortner
+ * Jun 23 2011
+ * Replaced original function with new algorithm that doesn't
+ * use sibling pointers (for SWMR consistency) or unprotect
+ * nodes during recursion (for performance and safety).
+ *
*-------------------------------------------------------------------------
*/
static herr_t
H5B_iterate_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
- H5B_operator_t op, void *udata)
+ H5B_operator_t op, void *udata, void *parent)
{
- H5B_t *bt = NULL; /* Pointer to current B-tree node */
- H5RC_t *rc_shared; /* Ref-counted shared info */
+ H5B_t *bt = NULL; /* Pointer to current B-tree node */
+ H5RC_t *rc_shared; /* Ref-counted shared info */
H5B_shared_t *shared; /* Pointer to shared B-tree info */
H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */
- uint8_t *native = NULL; /* Array of keys in native format */
- haddr_t *child = NULL; /* Array of child pointers */
- herr_t ret_value; /* Return value */
+ unsigned i; /* Index */
+ herr_t ret_value = H5_ITER_CONT; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5B_iterate_helper)
@@ -1155,108 +1451,33 @@ H5B_iterate_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t add
/* Get shared info for B-tree */
if(NULL == (rc_shared = (type->get_shared)(f, udata)))
- HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "can't retrieve B-tree's shared ref. count object")
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "can't retrieve B-tree's shared ref. count object")
shared = (H5B_shared_t *)H5RC_GET_OBJ(rc_shared);
HDassert(shared);
/* Protect the initial/current node */
cache_udata.f = f;
cache_udata.type = type;
+ cache_udata.parent = parent;
cache_udata.rc_shared = rc_shared;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
- HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to load B-tree node")
-
- if(bt->level > 0) {
- haddr_t left_child = bt->child[0]; /* Address of left-most child in node */
-
- /* Release current node */
- if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to release B-tree node")
- bt = NULL;
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to load B-tree node")
- /* Keep following the left-most child until we reach a leaf node. */
- if((ret_value = H5B_iterate_helper(f, dxpl_id, type, left_child, op, udata)) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, H5_ITER_ERROR, "unable to list B-tree node")
- } /* end if */
- else {
- unsigned nchildren; /* Number of child pointers */
- haddr_t next_addr; /* Address of next node to the right */
-
- /* Allocate space for a copy of the native records & child pointers */
- if(NULL == (native = H5FL_BLK_MALLOC(native_block, shared->sizeof_keys)))
- HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, H5_ITER_ERROR, "memory allocation failed for shared B-tree native records")
- if(NULL == (child = H5FL_SEQ_MALLOC(haddr_t, (size_t)shared->two_k)))
- HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, H5_ITER_ERROR, "memory allocation failed for shared B-tree child addresses")
-
- /* Cache information from this node */
- nchildren = bt->nchildren;
- next_addr = bt->right;
-
- /* Copy the native keys & child pointers into local arrays */
- HDmemcpy(native, bt->native, shared->sizeof_keys);
- HDmemcpy(child, bt->child, (nchildren * sizeof(haddr_t)));
-
- /* Release current node */
- if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to release B-tree node")
- bt = NULL;
-
- /*
- * We've reached the left-most leaf. Now follow the right-sibling
- * pointer from leaf to leaf until we've processed all leaves.
- */
- ret_value = H5_ITER_CONT;
- while(ret_value == H5_ITER_CONT) {
- haddr_t *curr_child; /* Pointer to node's child addresses */
- uint8_t *curr_native; /* Pointer to node's native keys */
- unsigned u; /* Local index variable */
-
- /*
- * Perform the iteration operator, which might invoke an
- * application callback.
- */
- for(u = 0, curr_child = child, curr_native = native; u < nchildren && ret_value == H5_ITER_CONT; u++, curr_child++, curr_native += type->sizeof_nkey) {
- ret_value = (*op)(f, dxpl_id, curr_native, *curr_child, curr_native + type->sizeof_nkey, udata);
- if(ret_value < 0)
- HERROR(H5E_BTREE, H5E_CANTLIST, "iterator function failed");
- } /* end for */
-
- /* Check for continuing iteration */
- if(ret_value == H5_ITER_CONT) {
- /* Check for another node */
- if(H5F_addr_defined(next_addr)) {
- /* Protect the next node to the right */
- addr = next_addr;
- if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
- HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5_ITER_ERROR, "B-tree node")
-
- /* Cache information from this node */
- nchildren = bt->nchildren;
- next_addr = bt->right;
-
- /* Copy the native keys & child pointers into local arrays */
- HDmemcpy(native, bt->native, shared->sizeof_keys);
- HDmemcpy(child, bt->child, nchildren * sizeof(haddr_t));
-
- /* Unprotect node */
- if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to release B-tree node")
- bt = NULL;
- } /* end if */
- else
- /* Exit loop */
- break;
- } /* end if */
- } /* end while */
- } /* end else */
+ /* Iterate over children */
+ for(i=0; i<bt->nchildren && ret_value == H5_ITER_CONT; i++) {
+ if(bt->level > 0) {
+ /* Keep following the left-most child until we reach a leaf node. */
+ if((ret_value = H5B_iterate_helper(f, dxpl_id, type, bt->child[i], op, udata, bt)) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, H5_ITER_ERROR, "unable to list B-tree node")
+ } /* end if */
+ else
+ if((ret_value = (*op)(f, dxpl_id, H5B_NKEY(bt, shared, i), bt->child[i], H5B_NKEY(bt, shared, i + 1), udata)) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, H5_ITER_ERROR, "iterator function failed")
+ } /* end for */
done:
if(bt && H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to release B-tree node")
- if(native)
- native = H5FL_BLK_FREE(native_block, native);
- if(child)
- child = H5FL_SEQ_FREE(haddr_t, child);
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5B_iterate_helper() */
@@ -1278,7 +1499,7 @@ done:
*/
herr_t
H5B_iterate(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
- H5B_operator_t op, void *udata)
+ H5B_operator_t op, void *udata, void *parent)
{
herr_t ret_value; /* Return value */
@@ -1294,7 +1515,7 @@ H5B_iterate(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
HDassert(udata);
/* Iterate over the B-tree records */
- if((ret_value = H5B_iterate_helper(f, dxpl_id, type, addr, op, udata)) < 0)
+ if((ret_value = H5B_iterate_helper(f, dxpl_id, type, addr, op, udata, parent)) < 0)
HERROR(H5E_BTREE, H5E_BADITER, "B-tree iteration failed");
FUNC_LEAVE_NOAPI(ret_value)
@@ -1329,7 +1550,8 @@ static H5B_ins_t
H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type,
int level, uint8_t *lt_key/*out*/,
hbool_t *lt_key_changed/*out*/, void *udata,
- uint8_t *rt_key/*out*/, hbool_t *rt_key_changed/*out*/)
+ uint8_t *rt_key/*out*/, hbool_t *rt_key_changed/*out*/,
+ void *parent)
{
H5B_t *bt = NULL, *sibling = NULL;
unsigned bt_flags = H5AC__NO_FLAGS_SET;
@@ -1343,7 +1565,6 @@ H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type
FUNC_ENTER_NOAPI(H5B_remove_helper, H5B_INS_ERROR)
HDassert(f);
- HDassert(H5F_addr_defined(addr));
HDassert(type);
HDassert(type->decode);
HDassert(type->cmp3);
@@ -1364,6 +1585,7 @@ H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
+ cache_udata.parent = parent;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load B-tree node")
@@ -1388,7 +1610,7 @@ H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type
if((int)(ret_value = H5B_remove_helper(f, dxpl_id,
bt->child[idx], type, level + 1, H5B_NKEY(bt, shared, idx)/*out*/,
lt_key_changed/*out*/, udata, H5B_NKEY(bt, shared, idx + 1)/*out*/,
- rt_key_changed/*out*/)) < 0)
+ rt_key_changed/*out*/, bt)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, H5B_INS_ERROR, "key not found in subtree")
} else if(type->remove) {
/*
@@ -1504,9 +1726,10 @@ H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type
bt->nchildren = 0;
/* Delete the node from disk (via the metadata cache) */
- bt_flags |= H5AC__DIRTIED_FLAG;
+ if(!shared->swmr_write)
+ bt_flags |= H5AC__DIRTIED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
H5_CHECK_OVERFLOW(shared->sizeof_rnode, size_t, hsize_t);
- if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, bt_flags | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG) < 0) {
+ if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, bt_flags | H5AC__DELETED_FLAG) < 0) {
bt = NULL;
bt_flags = H5AC__NO_FLAGS_SET;
HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to free B-tree node")
@@ -1652,7 +1875,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B_remove(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void *udata)
+H5B_remove(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
+ void *udata, void *parent)
{
/* These are defined this way to satisfy alignment constraints */
uint64_t _lt_key[128], _rt_key[128];
@@ -1672,7 +1896,8 @@ H5B_remove(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void
/* The actual removal */
if(H5B_remove_helper(f, dxpl_id, addr, type, 0, lt_key, &lt_key_changed,
- udata, rt_key, &rt_key_changed) == H5B_INS_ERROR)
+ udata, rt_key, &rt_key_changed, parent)
+ == H5B_INS_ERROR)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to remove entry from B-tree")
#ifdef H5B_DEBUG
@@ -1697,7 +1922,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B_delete(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void *udata)
+H5B_delete(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
+ void *udata, void *parent)
{
H5B_t *bt = NULL; /* B-tree node being operated on */
H5RC_t *rc_shared; /* Ref-counted shared info */
@@ -1722,6 +1948,7 @@ H5B_delete(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void
/* Lock this B-tree node into memory for now */
cache_udata.f = f;
cache_udata.type = type;
+ cache_udata.parent = parent;
cache_udata.rc_shared = rc_shared;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
@@ -1730,7 +1957,7 @@ H5B_delete(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void
if(bt->level > 0) {
/* Iterate over all children in node, deleting them */
for(u = 0; u < bt->nchildren; u++)
- if(H5B_delete(f, dxpl_id, type, bt->child[u], udata) < 0)
+ if(H5B_delete(f, dxpl_id, type, bt->child[u], udata, bt) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "unable to delete B-tree node")
} /* end if */
@@ -1751,7 +1978,7 @@ H5B_delete(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void
} /* end else */
done:
- if(bt && H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG) < 0)
+ if(bt && H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__DELETED_FLAG | (shared->swmr_write ? 0 : H5AC__FREE_FILE_SPACE_FLAG)) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node in cache")
FUNC_LEAVE_NOAPI(ret_value)
@@ -1816,6 +2043,11 @@ HDmemset(shared->page, 0, shared->sizeof_rnode);
for(u = 0; u < (shared->two_k + 1); u++)
shared->nkey[u] = u * type->sizeof_nkey;
+ /* Determine if we are doing SWMR writes. Only enable for chunks for now.
+ */
+ shared->swmr_write = (H5F_INTENT(f) & H5F_ACC_SWMR_WRITE) > 0
+ && type->id == H5B_CHUNK_ID;
+
/* Set return value */
ret_value = shared;
@@ -1948,7 +2180,7 @@ done:
*/
static herr_t
H5B_get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
- const H5B_info_ud_t *info_udata)
+ const H5B_info_ud_t *info_udata, void *parent)
{
H5B_t *bt = NULL; /* Pointer to current B-tree node */
H5RC_t *rc_shared; /* Ref-counted shared info */
@@ -1984,6 +2216,7 @@ H5B_get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t ad
/* Protect the initial/current node */
cache_udata.f = f;
cache_udata.type = type;
+ cache_udata.parent = parent;
cache_udata.rc_shared = rc_shared;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
@@ -2028,7 +2261,7 @@ H5B_get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t ad
/* Check for another "row" of B-tree nodes to iterate over */
if(level > 0) {
/* Keep following the left-most child until we reach a leaf node. */
- if(H5B_get_info_helper(f, dxpl_id, type, left_child, info_udata) < 0)
+ if(H5B_get_info_helper(f, dxpl_id, type, left_child, info_udata, bt) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "unable to list B-tree node")
} /* end if */
@@ -2054,7 +2287,7 @@ done:
*/
herr_t
H5B_get_info(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
- H5B_info_t *bt_info, H5B_operator_t op, void *udata)
+ H5B_info_t *bt_info, H5B_operator_t op, void *udata, void *parent)
{
H5B_info_ud_t info_udata; /* User-data for B-tree size iteration */
herr_t ret_value = SUCCEED; /* Return value */
@@ -2078,13 +2311,13 @@ H5B_get_info(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
info_udata.udata = udata;
/* Iterate over the B-tree nodes */
- if(H5B_get_info_helper(f, dxpl_id, type, addr, &info_udata) < 0)
+ if(H5B_get_info_helper(f, dxpl_id, type, addr, &info_udata, parent) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_BADITER, FAIL, "B-tree iteration failed")
/* Iterate over the B-tree records, making any "leaf" callbacks */
/* (Only if operator defined) */
if(op)
- if((ret_value = H5B_iterate_helper(f, dxpl_id, type, addr, op, udata)) < 0)
+ if((ret_value = H5B_iterate_helper(f, dxpl_id, type, addr, op, udata, parent)) < 0)
HERROR(H5E_BTREE, H5E_BADITER, "B-tree iteration failed");
done:
@@ -2105,7 +2338,8 @@ done:
*-------------------------------------------------------------------------
*/
htri_t
-H5B_valid(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr)
+H5B_valid(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
+ void *parent)
{
H5B_t *bt = NULL; /* The B-tree */
H5RC_t *rc_shared; /* Ref-counted shared info */
@@ -2135,6 +2369,7 @@ H5B_valid(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr)
*/
cache_udata.f = f;
cache_udata.type = type;
+ cache_udata.parent = parent;
cache_udata.rc_shared = rc_shared;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree node")
@@ -2179,3 +2414,122 @@ H5B_node_dest(H5B_t *bt)
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5B_node_dest() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5B_support
+ *
+ * Purpose: Add a flush dependency between the b-tree child object (as
+ * the flush dependency child) and the b-tree node containing
+ * the address of that child. If the child has not yet been
+ * inserted, does nothing.
+ *
+ * Return: TRUE if flush dependency created
+ * FALSE if child is not in b-tree
+ * Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Aug 18 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+htri_t
+H5B_support(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
+ void *udata, void *parent, void *child)
+{
+ H5B_t *node = NULL; /* Node containing direct link to child */
+ herr_t ret_value; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5B_support, FAIL)
+
+ /*
+ * Check arguments.
+ */
+ HDassert(f);
+ HDassert(type);
+ HDassert(type->decode);
+ HDassert(type->cmp3);
+ HDassert(type->found);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(udata);
+ HDassert(child);
+
+ /* Lookup the node which points to the requested child */
+ if(H5B_find_node(f, dxpl_id, type, addr, udata, parent, &node) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "can't lookup key in B-tree")
+
+ /* Set the return value, and add the flush dependency if the ndoe was found
+ */
+ if(node) {
+ if(H5AC_create_flush_dependency(node, child) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ ret_value = TRUE;
+ } /* end if */
+ else
+ ret_value = FALSE;
+
+done:
+ if(node && H5AC_unpin_entry(node) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin node")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B_support() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5B_unsupport
+ *
+ * Purpose: Destroy a flush dependency between the b-tree child object
+ * and the b-tree node containing the address of that child.
+ * This dependency *must* have been created earlier either by
+ * a call to H5B_support() or by the client (presumably via
+ * the create_flush_dep() callback on insertion).
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Aug 18 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5B_unsupport(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
+ void *udata, void *parent, void *child)
+{
+ H5B_t *node = NULL; /* Node containing direct link to child */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5B_unsupport, FAIL)
+
+ /*
+ * Check arguments.
+ */
+ HDassert(f);
+ HDassert(type);
+ HDassert(type->decode);
+ HDassert(type->cmp3);
+ HDassert(type->found);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(udata);
+ HDassert(child);
+
+ /* Lookup the node which points to the requested child */
+ if(H5B_find_node(f, dxpl_id, type, addr, udata, parent, &node) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "can't lookup key in B-tree")
+
+ /* It is an error if the node does not exist yet - the client should only
+ * call this function if support() succeeded or if the client's
+ * create_flush_dep() callback has been called. */
+ if(!node)
+ HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "node not found in B-tree")
+
+ /* Add the flush dependency */
+ if(H5AC_destroy_flush_dependency(node, child) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+
+done:
+ if(node && H5AC_unpin_entry(node) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin node")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B_unsupport() */
+
diff --git a/src/H5Bcache.c b/src/H5Bcache.c
index 7c006bc..64c8b56 100644
--- a/src/H5Bcache.c
+++ b/src/H5Bcache.c
@@ -60,6 +60,7 @@ static herr_t H5B_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
static herr_t H5B_dest(H5F_t *f, H5B_t *bt);
static herr_t H5B_clear(H5F_t *f, H5B_t *b, hbool_t destroy);
static herr_t H5B_compute_size(const H5F_t *f, const H5B_t *bt, size_t *size_ptr);
+static herr_t H5B_notify(H5AC_notify_action_t action, H5B_t *bt);
/*********************/
@@ -73,7 +74,7 @@ const H5AC_class_t H5AC_BT[1] = {{
(H5AC_flush_func_t)H5B_flush,
(H5AC_dest_func_t)H5B_dest,
(H5AC_clear_func_t)H5B_clear,
- (H5AC_notify_func_t)NULL,
+ (H5AC_notify_func_t)H5B_notify,
(H5AC_size_func_t)H5B_compute_size,
}};
@@ -176,6 +177,13 @@ H5B_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDECODE, NULL, "unable to decode key")
} /* end if */
+ /* Set up flush dependency. The dependency will actually be created in the
+ * "notify" callback. */
+ if(shared->swmr_write) {
+ HDassert(udata->parent);
+ bt->parent = udata->parent;
+ } /* end if */
+
/* Set return value */
ret_value = bt;
@@ -370,6 +378,64 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5B_notify
+ *
+ * Purpose: Handle cache action notifications
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * nfortne2@hdfgroup.org
+ * Aug 17 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5B_notify(H5AC_notify_action_t action, H5B_t *bt)
+{
+ H5B_shared_t *shared; /* Pointer to shared B-tree info */
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5B_notify)
+
+ /*
+ * Check arguments.
+ */
+ HDassert(bt);
+ HDassert(bt->rc_shared);
+ shared = (H5B_shared_t *)H5RC_GET_OBJ(bt->rc_shared);
+
+ /* Check if the file was opened with SWMR-write access */
+ if(shared->swmr_write) {
+ HDassert(bt->parent);
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ /* Create flush dependency on parent */
+ if(H5AC_create_flush_dependency(bt->parent, bt) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ /* Destroy flush dependency on parent */
+ if(H5AC_destroy_flush_dependency(bt->parent, bt) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ break;
+
+ default:
+#ifdef NDEBUG
+ HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, FAIL, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B_notify() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5B_compute_size
*
* Purpose: Compute the size in bytes of the specified instance of
diff --git a/src/H5Bpkg.h b/src/H5Bpkg.h
index 46e2b28..8478ae3 100644
--- a/src/H5Bpkg.h
+++ b/src/H5Bpkg.h
@@ -59,12 +59,16 @@ typedef struct H5B_t {
haddr_t right; /*address of right sibling */
uint8_t *native; /*array of keys in native format */
haddr_t *child; /*2k child pointers */
+
+ /* Not stored on disk */
+ void *parent; /* Flush dependency parent */
} H5B_t;
/* Callback info for loading a B-tree node into the cache */
typedef struct H5B_cache_ud_t {
H5F_t *f; /* File that B-tree node is within */
const struct H5B_class_t *type; /* Type of tree */
+ void *parent; /* Flush dependency parent */
H5RC_t *rc_shared; /* Ref-counted shared info */
} H5B_cache_ud_t;
diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h
index 83a357b..a7b303d 100644
--- a/src/H5Bprivate.h
+++ b/src/H5Bprivate.h
@@ -100,6 +100,7 @@ typedef struct H5B_shared_t {
size_t sizeof_len; /* Size of file lengths (in bytes) */
uint8_t *page; /* Disk page */
size_t *nkey; /* Offsets of each native key in native key buffer */
+ hbool_t swmr_write; /* Whether we are doing SWMR writes */
} H5B_shared_t;
/*
@@ -138,6 +139,10 @@ typedef struct H5B_class_t {
herr_t (*decode)(const H5B_shared_t*, const uint8_t*, void*);
herr_t (*encode)(const H5B_shared_t*, uint8_t*, const void*);
herr_t (*debug_key)(FILE*, int, int, const void*, const void*);
+
+ /* flush dependency functions */
+ herr_t (*create_flush_dep)(void*, void *, void *);
+ herr_t (*update_flush_dep)(void*, void *, void *, void*);
} H5B_class_t;
/* Information about B-tree */
@@ -157,25 +162,30 @@ typedef struct H5B_info_t {
/* Library-private Function Prototypes */
/***************************************/
H5_DLL herr_t H5B_create(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type,
- void *udata, haddr_t *addr_p/*out*/);
+ void *udata, void *parent, haddr_t *addr_p/*out*/);
H5_DLL herr_t H5B_find(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type,
- haddr_t addr, void *udata);
+ haddr_t addr, void *udata, void *parent);
H5_DLL herr_t H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type,
- haddr_t addr, void *udata);
+ haddr_t addr, void *udata, void *parent);
H5_DLL herr_t H5B_iterate(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type,
- haddr_t addr, H5B_operator_t op, void *udata);
+ haddr_t addr, H5B_operator_t op, void *udata, void *parent);
H5_DLL herr_t H5B_get_info(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type,
- haddr_t addr, H5B_info_t *bt_info, H5B_operator_t op, void *udata);
+ haddr_t addr, H5B_info_t *bt_info, H5B_operator_t op, void *udata,
+ void *parent);
H5_DLL herr_t H5B_remove(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type,
- haddr_t addr, void *udata);
+ haddr_t addr, void *udata, void *parent);
H5_DLL herr_t H5B_delete(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type,
- haddr_t addr, void *udata);
+ haddr_t addr, void *udata, void *parent);
H5_DLL H5B_shared_t *H5B_shared_new(const H5F_t *f, const H5B_class_t *type,
size_t sizeof_rkey);
H5_DLL herr_t H5B_shared_free(void *_shared);
+H5_DLL htri_t H5B_support(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type,
+ haddr_t addr, void *udata, void *parent, void *child);
+H5_DLL herr_t H5B_unsupport(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type,
+ haddr_t addr, void *udata, void *parent, void *child);
H5_DLL herr_t H5B_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream,
int indent, int fwidth, const H5B_class_t *type, void *udata);
H5_DLL htri_t H5B_valid(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type,
- haddr_t addr);
+ haddr_t addr, void *parent);
#endif /* _H5Bprivate_H */
diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c
index e61811d..f511d90 100644
--- a/src/H5Dbtree.c
+++ b/src/H5Dbtree.c
@@ -124,6 +124,10 @@ static herr_t H5D_btree_encode_key(const H5B_shared_t *shared, uint8_t *raw,
const void *_key);
static herr_t H5D_btree_debug_key(FILE *stream, int indent, int fwidth,
const void *key, const void *udata);
+static herr_t H5D_btree_create_flush_dep(void *_key, void *_udata,
+ void *parent);
+static herr_t H5D_btree_update_flush_dep(void *_key, void *_udata,
+ void *old_parent, void *new_parent);
/* Chunked layout indexing callbacks */
static herr_t H5D_btree_idx_init(const H5D_chk_idx_info_t *idx_info,
@@ -146,6 +150,10 @@ static herr_t H5D_btree_idx_copy_shutdown(H5O_storage_chunk_t *storage_src,
static herr_t H5D_btree_idx_size(const H5D_chk_idx_info_t *idx_info,
hsize_t *size);
static herr_t H5D_btree_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr);
+static herr_t H5D_btree_idx_support(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_ud_t *udata, H5AC_info_t *child_entry);
+static herr_t H5D_btree_idx_unsupport(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_common_ud_t *udata, H5AC_info_t *child_entry);
static herr_t H5D_btree_idx_dump(const H5O_storage_chunk_t *storage,
FILE *stream);
static herr_t H5D_btree_idx_dest(const H5D_chk_idx_info_t *idx_info);
@@ -157,7 +165,7 @@ static herr_t H5D_btree_idx_dest(const H5D_chk_idx_info_t *idx_info);
/* v1 B-tree indexed chunk I/O ops */
const H5D_chunk_ops_t H5D_COPS_BTREE[1] = {{
- FALSE, /* v1 B-tree indices don't support SWMR access */
+ TRUE, /* v1 B-tree indices do support SWMR access */
H5D_btree_idx_init,
H5D_btree_idx_create,
H5D_btree_idx_is_space_alloc,
@@ -171,8 +179,8 @@ const H5D_chunk_ops_t H5D_COPS_BTREE[1] = {{
H5D_btree_idx_copy_shutdown,
H5D_btree_idx_size,
H5D_btree_idx_reset,
- NULL,
- NULL,
+ H5D_btree_idx_support,
+ H5D_btree_idx_unsupport,
H5D_btree_idx_dump,
H5D_btree_idx_dest
}};
@@ -199,6 +207,8 @@ H5B_class_t H5B_BTREE[1] = {{
H5D_btree_decode_key, /*decode */
H5D_btree_encode_key, /*encode */
H5D_btree_debug_key, /*debug */
+ H5D_btree_create_flush_dep, /*create_flush_dep */
+ H5D_btree_update_flush_dep, /*update_flush_dep */
}};
@@ -561,13 +571,20 @@ H5D_btree_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key,
* QAK - 11/19/2002
*/
#ifdef OLD_WAY
+ /* Note that this does not take SWMR writes into account! Fix this
+ * if we ever want to go back to this code. -NAF 8/2/11 */
if(HADDR_UNDEF == (*new_node_p = H5MF_realloc(f, H5FD_MEM_DRAW, addr,
(hsize_t)lt_key->nbytes, (hsize_t)udata->nbytes)))
HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "unable to reallocate chunk storage")
#else /* OLD_WAY */
- H5_CHECK_OVERFLOW(lt_key->nbytes, uint32_t, hsize_t);
- if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, addr, (hsize_t)lt_key->nbytes) < 0)
- HGOTO_ERROR(H5E_STORAGE, H5E_CANTFREE, H5B_INS_ERROR, "unable to free chunk")
+ /* Only free the old location if not doing SWMR writes - otherwise
+ * we must keep the old chunk around in case a reader has an
+ * outdated version of the b-tree node */
+ if(!(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE)) {
+ H5_CHECK_OVERFLOW(lt_key->nbytes, uint32_t, hsize_t);
+ if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, addr, (hsize_t)lt_key->nbytes) < 0)
+ HGOTO_ERROR(H5E_STORAGE, H5E_CANTFREE, H5B_INS_ERROR, "unable to free chunk")
+ } /* end if */
H5_CHECK_OVERFLOW(udata->nbytes, uint32_t, hsize_t);
if(HADDR_UNDEF == (*new_node_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->nbytes)))
HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "unable to reallocate chunk")
@@ -644,9 +661,11 @@ H5D_btree_remove(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key /*in,out *
FUNC_ENTER_NOAPI_NOINIT(H5D_btree_remove)
/* Remove raw data chunk from file */
- H5_CHECK_OVERFLOW(lt_key->nbytes, uint32_t, hsize_t);
- if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, addr, (hsize_t)lt_key->nbytes) < 0)
- HGOTO_ERROR(H5E_STORAGE, H5E_CANTFREE, H5B_INS_ERROR, "unable to free chunk")
+ if(!(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE)) {
+ H5_CHECK_OVERFLOW(lt_key->nbytes, uint32_t, hsize_t);
+ if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, addr, (hsize_t)lt_key->nbytes) < 0)
+ HGOTO_ERROR(H5E_STORAGE, H5E_CANTFREE, H5B_INS_ERROR, "unable to free chunk")
+ } /* end if */
/* Mark keys as unchanged */
*lt_key_changed = FALSE;
@@ -801,7 +820,7 @@ H5D_btree_shared_create(const H5F_t *f, H5O_storage_chunk_t *store, unsigned ndi
/* Set up the "local" information for this dataset's chunks */
/* <none> */
-
+HDassert(!store->u.btree.shared);
/* Make shared B-tree info reference counted */
if(NULL == (store->u.btree.shared = H5RC_create(shared, H5B_shared_free)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't create ref-count wrapper for shared B-tree info")
@@ -812,6 +831,91 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5D_btree_create_flush_dep
+ *
+ * Purpose: Creates a flush dependency between the specified chunk
+ * (child) and parent.
+ *
+ * Return: Success: 0
+ * Failure: FAIL
+ *
+ * Programmer: Neil Fortner
+ * Tuesday, September 21, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D_btree_create_flush_dep(void *_key, void *_udata, void *parent)
+{
+ H5D_btree_key_t *key = (H5D_btree_key_t *)_key;
+ H5D_chunk_common_ud_t *udata = (H5D_chunk_common_ud_t *) _udata;
+ int ret_value;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5D_btree_create_flush_dep)
+
+ HDassert(key);
+ HDassert(udata);
+ HDassert(udata->layout->ndims > 0 && udata->layout->ndims <= H5O_LAYOUT_NDIMS);
+ HDassert(parent);
+
+ /* If there is no rdcc, then there are no cached chunks to create
+ * dependencies on. This should only happen when copying */
+ if(udata->rdcc)
+ /* Delegate to chunk routine */
+ if(H5D_chunk_create_flush_dep(udata->rdcc, udata->layout, key->offset,
+ parent) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D_btree_create_flush_dep() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D_btree_update_flush_dep
+ *
+ * Purpose: Updates the flush dependency of the specified chunk from
+ * old_parent to new_parent, but only if the current parent
+ * is cached. If the chunk is not cached, does nothing.
+ *
+ * Return: Success: 0
+ * Failure: FAIL
+ *
+ * Programmer: Neil Fortner
+ * Tuesday, August 31, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D_btree_update_flush_dep(void *_key, void *_udata, void *old_parent,
+ void *new_parent)
+{
+ H5D_btree_key_t *key = (H5D_btree_key_t *)_key;
+ H5D_chunk_common_ud_t *udata = (H5D_chunk_common_ud_t *) _udata;
+ int ret_value;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5D_btree_update_flush_dep)
+
+ HDassert(key);
+ HDassert(udata);
+ HDassert(udata->layout->ndims > 0 && udata->layout->ndims <= H5O_LAYOUT_NDIMS);
+ HDassert(old_parent);
+ HDassert(new_parent);
+
+ /* If there is no rdcc, then there are no cached chunks to update
+ * dependencies. This should only happen when copying */
+ if(udata->rdcc)
+ /* Delegate to chunk routine */
+ if(H5D_chunk_update_flush_dep(udata->rdcc, udata->layout, key->offset,
+ old_parent, new_parent) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to update flush dependency")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D_btree_update_flush_dep() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D_btree_idx_init
*
* Purpose: Initialize the indexing information for a dataset.
@@ -871,7 +975,9 @@ done:
static herr_t
H5D_btree_idx_create(const H5D_chk_idx_info_t *idx_info)
{
- H5D_chunk_common_ud_t udata; /* User data for B-tree callback */
+ H5D_chunk_common_ud_t udata; /* User data for B-tree callback */
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_t *oh = NULL; /* Dataset's object header */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_btree_idx_create)
@@ -888,11 +994,26 @@ H5D_btree_idx_create(const H5D_chk_idx_info_t *idx_info)
udata.layout = idx_info->layout;
udata.storage = idx_info->storage;
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) {
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr;
+
+ /* Pin the dataset's object header */
+ if(NULL == (oh = H5O_pin(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header")
+ } /* end if */
+
/* Create the v1 B-tree for the chunk index */
- if(H5B_create(idx_info->f, idx_info->dxpl_id, H5B_BTREE, &udata, &(idx_info->storage->idx_addr)/*out*/) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create B-tree")
+ if(H5B_create(idx_info->f, idx_info->dxpl_id, H5B_BTREE, &udata, oh, &(idx_info->storage->idx_addr)/*out*/) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create B-tree")
done:
+ if(oh && H5O_unpin(oh) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_btree_idx_create() */
@@ -942,7 +1063,9 @@ H5D_btree_idx_is_space_alloc(const H5O_storage_chunk_t *storage)
static herr_t
H5D_btree_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_t *oh = NULL; /* Dataset's object header */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_btree_idx_insert)
@@ -954,14 +1077,30 @@ H5D_btree_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata)
HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
HDassert(udata);
+ /* Check for SWMR writes to the file. If so we must pin the dataset object
+ * header so it can be set as a flush dependency parent. */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) {
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr;
+
+ /* Pin the dataset's object header */
+ if(NULL == (oh = H5O_pin(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header")
+ } /* end if */
+
/*
* Create the chunk it if it doesn't exist, or reallocate the chunk if
* its size changed.
*/
- if(H5B_insert(idx_info->f, idx_info->dxpl_id, H5B_BTREE, idx_info->storage->idx_addr, udata) < 0)
+ if(H5B_insert(idx_info->f, idx_info->dxpl_id, H5B_BTREE, idx_info->storage->idx_addr, udata, oh) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk")
done:
+ if(oh && H5O_unpin(oh) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D_btree_idx_insert() */
@@ -983,6 +1122,8 @@ done:
static herr_t
H5D_btree_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata)
{
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_t *oh = NULL; /* Dataset's object header */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_btree_idx_get_addr)
@@ -996,11 +1137,27 @@ H5D_btree_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata
HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
HDassert(udata);
+ /* Check for SWMR writes to the file. If so we must pin the dataset object
+ * header so it can be set as a flush dependency parent. */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) {
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr;
+
+ /* Pin the dataset's object header */
+ if(NULL == (oh = H5O_pin(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header")
+ } /* end if */
+
/* Go get the chunk information from the B-tree */
- if(H5B_find(idx_info->f, idx_info->dxpl_id, H5B_BTREE, idx_info->storage->idx_addr, udata) < 0)
+ if(H5B_find(idx_info->f, idx_info->dxpl_id, H5B_BTREE, idx_info->storage->idx_addr, udata, oh) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info")
done:
+ if(oh && H5O_unpin(oh) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D_btree_idx_get_addr() */
@@ -1071,9 +1228,11 @@ H5D_btree_idx_iterate(const H5D_chk_idx_info_t *idx_info,
H5D_chunk_cb_func_t chunk_cb, void *chunk_udata)
{
H5D_btree_it_ud_t udata; /* User data for B-tree iterator callback */
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_t *oh = NULL; /* Dataset's object header */
int ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT_NOERR(H5D_btree_idx_iterate)
+ FUNC_ENTER_NOAPI_NOINIT(H5D_btree_idx_iterate)
HDassert(idx_info);
HDassert(idx_info->f);
@@ -1084,6 +1243,19 @@ H5D_btree_idx_iterate(const H5D_chk_idx_info_t *idx_info,
HDassert(chunk_cb);
HDassert(chunk_udata);
+ /* Check for SWMR writes to the file. If so we must pin the dataset object
+ * header so it can be set as a flush dependency parent. */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) {
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr;
+
+ /* Pin the dataset's object header */
+ if(NULL == (oh = H5O_pin(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header")
+ } /* end if */
+
/* Initialize userdata */
HDmemset(&udata, 0, sizeof udata);
udata.common.layout = idx_info->layout;
@@ -1092,9 +1264,13 @@ H5D_btree_idx_iterate(const H5D_chk_idx_info_t *idx_info,
udata.udata = chunk_udata;
/* Iterate over existing chunks */
- if((ret_value = H5B_iterate(idx_info->f, idx_info->dxpl_id, H5B_BTREE, idx_info->storage->idx_addr, H5D_btree_idx_iterate_cb, &udata)) < 0)
+ if((ret_value = H5B_iterate(idx_info->f, idx_info->dxpl_id, H5B_BTREE, idx_info->storage->idx_addr, H5D_btree_idx_iterate_cb, &udata, oh)) < 0)
HERROR(H5E_DATASET, H5E_BADITER, "unable to iterate over chunk B-tree");
+done:
+ if(oh && H5O_unpin(oh) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_btree_idx_iterate() */
@@ -1114,6 +1290,8 @@ H5D_btree_idx_iterate(const H5D_chk_idx_info_t *idx_info,
static herr_t
H5D_btree_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata)
{
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_t *oh = NULL; /* Dataset's object header */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_btree_idx_remove)
@@ -1126,13 +1304,29 @@ H5D_btree_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *
HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
HDassert(udata);
+ /* Check for SWMR writes to the file. If so we must pin the dataset object
+ * header so it can be set as a flush dependency parent. */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) {
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr;
+
+ /* Pin the dataset's object header */
+ if(NULL == (oh = H5O_pin(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header")
+ } /* end if */
+
/* Remove the chunk from the v1 B-tree index and release the space for the
* chunk (in the B-tree callback).
*/
- if(H5B_remove(idx_info->f, idx_info->dxpl_id, H5B_BTREE, idx_info->storage->idx_addr, udata) < 0)
+ if(H5B_remove(idx_info->f, idx_info->dxpl_id, H5B_BTREE, idx_info->storage->idx_addr, udata, oh) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to remove chunk entry")
done:
+ if(oh && H5O_unpin(oh) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D_btree_idx_remove() */
@@ -1154,7 +1348,9 @@ done:
static herr_t
H5D_btree_idx_delete(const H5D_chk_idx_info_t *idx_info)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_t *oh = NULL; /* Dataset's object header */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_btree_idx_delete)
@@ -1170,6 +1366,19 @@ H5D_btree_idx_delete(const H5D_chk_idx_info_t *idx_info)
H5O_storage_chunk_t tmp_storage; /* Local copy of storage info */
H5D_chunk_common_ud_t udata; /* User data for B-tree operations */
+ /* Check for SWMR writes to the file. If so we must pin the dataset object
+ * header so it can be set as a flush dependency parent. */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) {
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr;
+
+ /* Pin the dataset's object header */
+ if(NULL == (oh = H5O_pin(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header")
+ } /* end if */
+
/* Set up temporary chunked storage info */
tmp_storage = *idx_info->storage;
@@ -1183,7 +1392,7 @@ H5D_btree_idx_delete(const H5D_chk_idx_info_t *idx_info)
udata.storage = &tmp_storage;
/* Delete entire B-tree */
- if(H5B_delete(idx_info->f, idx_info->dxpl_id, H5B_BTREE, tmp_storage.idx_addr, &udata) < 0)
+ if(H5B_delete(idx_info->f, idx_info->dxpl_id, H5B_BTREE, tmp_storage.idx_addr, &udata, oh) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to delete chunk B-tree")
/* Release the shared B-tree page */
@@ -1194,6 +1403,9 @@ H5D_btree_idx_delete(const H5D_chk_idx_info_t *idx_info)
} /* end if */
done:
+ if(oh && H5O_unpin(oh) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_btree_idx_delete() */
@@ -1300,6 +1512,8 @@ H5D_btree_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size)
H5D_chunk_common_ud_t udata; /* User-data for loading B-tree nodes */
H5B_info_t bt_info; /* B-tree info */
hbool_t shared_init = FALSE; /* Whether shared B-tree info is initialized */
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_t *oh = NULL; /* Dataset's object header */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5D_btree_idx_size, FAIL)
@@ -1312,6 +1526,19 @@ H5D_btree_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size)
HDassert(idx_info->storage);
HDassert(index_size);
+ /* Check for SWMR writes to the file. If so we must pin the dataset object
+ * header so it can be set as a flush dependency parent. */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) {
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr;
+
+ /* Pin the dataset's object header */
+ if(NULL == (oh = H5O_pin(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header")
+ } /* end if */
+
/* Initialize the shared info for the B-tree traversal */
if(H5D_btree_shared_create(idx_info->f, idx_info->storage, idx_info->layout->ndims) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info")
@@ -1323,13 +1550,16 @@ H5D_btree_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size)
udata.storage = idx_info->storage;
/* Get metadata information for B-tree */
- if(H5B_get_info(idx_info->f, idx_info->dxpl_id, H5B_BTREE, idx_info->storage->idx_addr, &bt_info, NULL, &udata) < 0)
+ if(H5B_get_info(idx_info->f, idx_info->dxpl_id, H5B_BTREE, idx_info->storage->idx_addr, &bt_info, NULL, &udata, oh) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to iterate over chunk B-tree")
/* Set the size of the B-tree */
*index_size = bt_info.size;
done:
+ if(oh && H5O_unpin(oh) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header")
+
if(shared_init) {
if(NULL == idx_info->storage->u.btree.shared)
HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "ref-counted page nil")
@@ -1370,6 +1600,118 @@ H5D_btree_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr)
/*-------------------------------------------------------------------------
+ * Function: H5D_btree_idx_support
+ *
+ * Purpose: Create a dependency between a chunk [proxy] and the index
+ * metadata that contains the record for the chunk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Friday, Jun 24, 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5D_btree_idx_support(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_ud_t *udata, H5AC_info_t *child_entry)
+{
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_t *oh = NULL; /* Dataset's object header */
+ herr_t ret_value; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5D_btree_idx_support)
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(udata);
+ HDassert(child_entry);
+ HDassert(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE);
+
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr;
+
+ /* Pin the dataset's object header */
+ if(NULL == (oh = H5O_pin(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header")
+
+ /* Add the flush dependency on the chunk */
+ if((ret_value = H5B_support(idx_info->f, idx_info->dxpl_id, H5B_BTREE, idx_info->storage->idx_addr,
+ udata, oh, child_entry)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on b-tree array metadata")
+
+done:
+ if(oh && H5O_unpin(oh) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D_btree_idx_support() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D_btree_idx_unsupport
+ *
+ * Purpose: Destroy a dependency between a chunk [proxy] and the index
+ * metadata that contains the record for the chunk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Wednesday, Jul 6, 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D_btree_idx_unsupport(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_common_ud_t *udata, H5AC_info_t *child_entry)
+{
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_t *oh = NULL; /* Dataset's object header */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5D_btree_idx_unsupport)
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(udata);
+ HDassert(child_entry);
+ HDassert(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE);
+
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr;
+
+ /* Pin the dataset's object header */
+ if(NULL == (oh = H5O_pin(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header")
+
+ /* Add the flush dependency on the chunk */
+ if((ret_value = H5B_unsupport(idx_info->f, idx_info->dxpl_id, H5B_BTREE, idx_info->storage->idx_addr,
+ udata, oh, child_entry)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency on b-tree array metadata")
+
+done:
+ if(oh && H5O_unpin(oh) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D_btree_idx_unsupport() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D_btree_idx_dump
*
* Purpose: Dump indexing information to a stream.
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index abebe2a..48a7c20 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -3137,7 +3137,7 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
* flush dependencies are maintained in the proper way for SWMR
* access to work.
*/
- if(H5D_chunk_proxy_create(io_info->dset, io_info->dxpl_id, (H5D_chunk_common_ud_t *)udata, ent) < 0)
+ if(H5D_chunk_proxy_create(io_info->dset, io_info->dxpl_id, udata, ent) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, NULL, "can't insert proxy for chunk in metadata cache")
} /* end if */
} /* end if */
@@ -5871,3 +5871,122 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D_chunk_is_partial_edge_chunk() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D_chunk_create_flush_dep
+ *
+ * Purpose: Creates a flush dependency between the specified chunk
+ * (child) and parent.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Tuesday, September 21, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D_chunk_create_flush_dep(const H5D_rdcc_t *rdcc,
+ const H5O_layout_chunk_t *layout, const hsize_t offset[], void *parent)
+{
+ hsize_t chunk_idx; /* Chunk index */
+ H5D_rdcc_ent_t *ent = NULL; /* Cache entry */
+ hbool_t found = FALSE; /* In cache? */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_create_flush_dep)
+
+ /* Check args */
+ HDassert(rdcc);
+ HDassert(layout);
+ HDassert(offset);
+ HDassert(parent);
+
+ /* Calculate the index of this chunk */
+ if(H5V_chunk_index(layout->ndims - 1, offset, layout->dim,
+ layout->down_chunks, &chunk_idx) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
+
+ /* Check for chunk in cache */
+ if(rdcc->nslots > 0) {
+ ent = rdcc->slot[H5F_addr_hash(chunk_idx, rdcc->nslots)];
+
+ if(ent)
+ for(u = 0, found = TRUE; u < layout->ndims - 1; u++)
+ if(offset[u] != ent->offset[u]) {
+ found = FALSE;
+ break;
+ } /* end if */
+ } /* end if */
+
+ /* Create the dependency on the chunk proxy */
+ if(found)
+ if(H5D_chunk_proxy_create_flush_dep(ent, parent) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D_chunk_create_flush_dep() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D_chunk_update_flush_dep
+ *
+ * Purpose: Updates the flush dependency of the specified chunk from
+ * old_parent to new_parent, but only if the current parent
+ * is cached. If the chunk is not cached, does nothing.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 7 Sept 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D_chunk_update_flush_dep(const H5D_rdcc_t *rdcc,
+ const H5O_layout_chunk_t *layout, const hsize_t offset[], void *old_parent,
+ void *new_parent)
+{
+ hsize_t chunk_idx; /* Chunk index */
+ H5D_rdcc_ent_t *ent = NULL; /* Cache entry */
+ hbool_t found = FALSE; /* In cache? */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_update_flush_dep)
+
+ /* Check args */
+ HDassert(rdcc);
+ HDassert(layout);
+ HDassert(offset);
+ HDassert(old_parent);
+ HDassert(new_parent);
+
+ /* Calculate the index of this chunk */
+ if(H5V_chunk_index(layout->ndims - 1, offset, layout->dim,
+ layout->down_chunks, &chunk_idx) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
+
+ /* Check for chunk in cache */
+ if(rdcc->nslots > 0) {
+ ent = rdcc->slot[H5F_addr_hash(chunk_idx, rdcc->nslots)];
+
+ if(ent)
+ for(u = 0, found = TRUE; u < layout->ndims - 1; u++)
+ if(offset[u] != ent->offset[u]) {
+ found = FALSE;
+ break;
+ } /* end if */
+ } /* end if */
+
+ /* Update the dependencies on the chunk proxy */
+ if(found)
+ if(H5D_chunk_proxy_update_flush_dep(ent, old_parent, new_parent) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to update flush dependency")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D_chunk_update_flush_dep() */
+
diff --git a/src/H5Dearray.c b/src/H5Dearray.c
index bcdedc5..ba965a0 100644
--- a/src/H5Dearray.c
+++ b/src/H5Dearray.c
@@ -1151,9 +1151,14 @@ H5D_earray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata)
/* Check for chunk being same size */
if(udata->nbytes != elmt.nbytes) {
/* Release previous chunk */
- H5_CHECK_OVERFLOW(elmt.nbytes, uint32_t, hsize_t);
- if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, elmt.addr, (hsize_t)elmt.nbytes) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+ /* Only free the old location if not doing SWMR writes - otherwise
+ * we must keep the old chunk around in case a reader has an
+ * outdated version of the b-tree node */
+ if(!(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)) {
+ H5_CHECK_OVERFLOW(elmt.nbytes, uint32_t, hsize_t);
+ if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, elmt.addr, (hsize_t)elmt.nbytes) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+ } /* end if */
elmt.addr = HADDR_UNDEF;
alloc_chunk = TRUE;
} /* end if */
@@ -1526,11 +1531,13 @@ H5D_earray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t
if(H5EA_get(ea, idx_info->dxpl_id, idx, &elmt) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info")
- /* Remove raw data chunk from file */
+ /* Remove raw data chunk from file if not doing SWMR writes */
HDassert(H5F_addr_defined(elmt.addr));
- H5_CHECK_OVERFLOW(elmt.nbytes, /*From: */uint32_t, /*To: */hsize_t);
- if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, elmt.addr, (hsize_t)elmt.nbytes) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+ if(!(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)) {
+ H5_CHECK_OVERFLOW(elmt.nbytes, /*From: */uint32_t, /*To: */hsize_t);
+ if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, elmt.addr, (hsize_t)elmt.nbytes) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+ } /* end if */
/* Reset the info about the chunk for the index */
elmt.addr = HADDR_UNDEF;
@@ -1546,11 +1553,13 @@ H5D_earray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t
if(H5EA_get(ea, idx_info->dxpl_id, idx, &addr) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address")
- /* Remove raw data chunk from file */
+ /* Remove raw data chunk from file if not doing SWMR writes */
HDassert(H5F_addr_defined(addr));
- H5_CHECK_OVERFLOW(idx_info->layout->size, /*From: */uint32_t, /*To: */hsize_t);
- if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, addr, (hsize_t)idx_info->layout->size) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+ if(!(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)) {
+ H5_CHECK_OVERFLOW(idx_info->layout->size, /*From: */uint32_t, /*To: */hsize_t);
+ if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, addr, (hsize_t)idx_info->layout->size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+ } /* end if */
/* Reset the address of the chunk for the index */
addr = HADDR_UNDEF;
@@ -1868,13 +1877,13 @@ H5D_earray_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr)
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static htri_t
H5D_earray_idx_support(const H5D_chk_idx_info_t *idx_info,
H5D_chunk_common_ud_t *udata, H5AC_info_t *child_entry)
{
H5EA_t *ea; /* Pointer to extensible array structure */
hsize_t idx; /* Array index of chunk */
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = TRUE; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_earray_idx_support)
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index 02eb950..50a10ef 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -322,7 +322,7 @@ typedef herr_t (*H5D_chunk_copy_shutdown_func_t)(H5O_storage_chunk_t *storage_sr
typedef herr_t (*H5D_chunk_size_func_t)(const H5D_chk_idx_info_t *idx_info,
hsize_t *idx_size);
typedef herr_t (*H5D_chunk_reset_func_t)(H5O_storage_chunk_t *storage, hbool_t reset_addr);
-typedef herr_t (*H5D_chunk_support_func_t)(const H5D_chk_idx_info_t *idx_info,
+typedef htri_t (*H5D_chunk_support_func_t)(const H5D_chk_idx_info_t *idx_info,
H5D_chunk_common_ud_t *udata, H5AC_info_t *child_entry);
typedef herr_t (*H5D_chunk_unsupport_func_t)(const H5D_chk_idx_info_t *idx_info,
H5D_chunk_common_ud_t *udata, H5AC_info_t *child_entry);
@@ -553,6 +553,7 @@ typedef struct H5D_chunk_proxy_t {
/* first field in structure */
H5D_t *dset; /* Pointer to dataset that chunk proxies are related to */
H5D_rdcc_ent_t *ent; /* Pointer to chunk cache entry this proxy is standing in for */
+ hbool_t supported; /* Whether the proxy is a flush dependency of the index */
} H5D_chunk_proxy_t;
@@ -694,6 +695,11 @@ H5_DLL herr_t H5D_chunk_bh_info(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout,
const H5O_pline_t *pline, hsize_t *btree_size);
H5_DLL herr_t H5D_chunk_dump_index(H5D_t *dset, hid_t dxpl_id, FILE *stream);
H5_DLL herr_t H5D_chunk_dest(H5F_t *f, hid_t dxpl_id, H5D_t *dset);
+H5_DLL herr_t H5D_chunk_create_flush_dep(const H5D_rdcc_t *rdcc,
+ const H5O_layout_chunk_t *layout, const hsize_t offset[], void *parent);
+H5_DLL herr_t H5D_chunk_update_flush_dep(const H5D_rdcc_t *rdcc,
+ const H5O_layout_chunk_t *layout, const hsize_t offset[], void *old_parent,
+ void *new_parent);
#ifdef H5D_CHUNK_DEBUG
H5_DLL herr_t H5D_chunk_stats(const H5D_t *dset, hbool_t headers);
#endif /* H5D_CHUNK_DEBUG */
@@ -724,10 +730,14 @@ H5_DLL herr_t H5D_fill_term(H5D_fill_buf_info_t *fb_info);
/* Functions that operate on chunk proxy objects */
H5_DLL herr_t H5D_chunk_proxy_create(H5D_t *dset, hid_t dxpl_id,
- H5D_chunk_common_ud_t *udata, H5D_rdcc_ent_t *ent);
+ H5D_chunk_ud_t *udata, H5D_rdcc_ent_t *ent);
H5_DLL herr_t H5D_chunk_proxy_remove(const H5D_t *dset, hid_t dxpl_it,
H5D_rdcc_ent_t *ent);
-H5_DLL herr_t H5D_chunk_proxy_mark(const H5D_rdcc_ent_t *ent, hbool_t dirty);
+H5_DLL herr_t H5D_chunk_proxy_mark(H5D_rdcc_ent_t *ent, hbool_t dirty);
+H5_DLL herr_t H5D_chunk_proxy_create_flush_dep(H5D_rdcc_ent_t *ent,
+ void *parent);
+H5_DLL herr_t H5D_chunk_proxy_update_flush_dep(H5D_rdcc_ent_t *ent,
+ void *old_parent, void *new_parent);
#ifdef H5_HAVE_PARALLEL
diff --git a/src/H5Dproxy.c b/src/H5Dproxy.c
index fb52517..7be2121 100644
--- a/src/H5Dproxy.c
+++ b/src/H5Dproxy.c
@@ -319,17 +319,19 @@ H5D_cache_proxy_size(const H5F_t UNUSED *f, const H5D_chunk_proxy_t UNUSED *prox
*-------------------------------------------------------------------------
*/
herr_t
-H5D_chunk_proxy_create(H5D_t *dset, hid_t dxpl_id, H5D_chunk_common_ud_t *udata,
+H5D_chunk_proxy_create(H5D_t *dset, hid_t dxpl_id, H5D_chunk_ud_t *udata,
H5D_rdcc_ent_t *ent)
{
H5D_chunk_proxy_t *proxy = NULL; /* Chunk proxy */
H5D_chk_idx_info_t idx_info; /* Chunked index info */
+ htri_t supported; /* Return value from "support" callback */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_proxy_create)
HDassert(dset);
HDassert(ent);
+ HDassert(dset->shared->layout.storage.u.chunk.ops->support);
/* Get a temp. address for chunk proxy */
if(HADDR_UNDEF == (ent->proxy_addr = H5MF_alloc_tmp(dset->oloc.file, (hsize_t)1)))
@@ -363,8 +365,9 @@ HDfprintf(stderr, "%s: ent->proxy_addr = %a\n", FUNC, ent->proxy_addr);
/* Create a flush dependency between the proxy (as the child) and the
* metadata object in the index (as the parent).
*/
- if((dset->shared->layout.storage.u.chunk.ops->support)(&idx_info, udata, (H5AC_info_t *)proxy) < 0)
+ if((supported = (dset->shared->layout.storage.u.chunk.ops->support)(&idx_info, udata, (H5AC_info_t *)proxy)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency for chunk proxy")
+ proxy->supported = (hbool_t)supported;
done:
if(ret_value < 0) {
@@ -402,6 +405,7 @@ H5D_chunk_proxy_remove(const H5D_t *dset, hid_t dxpl_id, H5D_rdcc_ent_t *ent)
HDassert(dset);
HDassert(ent);
+ HDassert(dset->shared->layout.storage.u.chunk.ops->unsupport);
#ifdef QAK
HDfprintf(stderr, "%s: ent->proxy_addr = %a\n", FUNC, ent->proxy_addr);
#endif /* QAK */
@@ -425,8 +429,11 @@ HDfprintf(stderr, "%s: ent->proxy_addr = %a\n", FUNC, ent->proxy_addr);
/* Remove flush dependency between the proxy (as the child) and the
* metadata object in the index (as the parent).
*/
- if((dset->shared->layout.storage.u.chunk.ops->unsupport)(&idx_info, &udata, (H5AC_info_t *)proxy) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency for chunk proxy")
+ if(proxy->supported) {
+ if((dset->shared->layout.storage.u.chunk.ops->unsupport)(&idx_info, &udata, (H5AC_info_t *)proxy) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to remove flush dependency for chunk proxy")
+ proxy->supported = FALSE;
+ } /* end if */
/* Unpin & delete chunk proxy from metadata cache, taking ownership of it */
if(H5AC_unprotect(dset->oloc.file, dxpl_id, H5AC_CHUNK_PROXY, ent->proxy_addr, proxy, (H5AC__UNPIN_ENTRY_FLAG | H5AC__DELETED_FLAG | H5AC__TAKE_OWNERSHIP_FLAG)) < 0)
@@ -456,7 +463,7 @@ done:
* be invoked collectively when operating in parallel I/O mode
* and it's possible that this routine can be invoked during
* indepedent raw data I/O.
- *
+ *
* So, the chunk proxy's dirty state in the metadata cache may
* be out of sync with the chunk itself, but only in the direction
* of being dirty when the chunk itself is clean. We'll call
@@ -473,7 +480,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_chunk_proxy_mark(const H5D_rdcc_ent_t *ent, hbool_t dirty)
+H5D_chunk_proxy_mark(H5D_rdcc_ent_t *ent, hbool_t dirty)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -522,3 +529,80 @@ H5D_chunk_proxy_destroy(H5D_chunk_proxy_t *proxy)
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5D_chunk_proxy_destroy() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D_chunk_proxy_create_flush_dep
+ *
+ * Purpose: Creates a flush dependency between the specified chunk
+ * (child) and parent, if not already present.
+ *
++ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 21 Sept 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D_chunk_proxy_create_flush_dep(H5D_rdcc_ent_t *ent, void *parent)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_proxy_create_flush_dep)
+
+ HDassert(ent);
+ HDassert(parent);
+
+ /* If the proxy already has a parent, do nothing. */
+ if(!(ent->proxy->supported)) {
+ /* Create the flush dependency */
+ if(H5AC_create_flush_dependency(parent, ent->proxy) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ ent->proxy->supported = TRUE;
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D_chunk_proxy_create_flush_dep() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D_chunk_proxy_update_flush_dep
+ *
+ * Purpose: Updates the flush dependency of the specified chunk from
+ * old_parent to new_parent, if the dependency exists.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 7 Sept 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D_chunk_proxy_update_flush_dep(H5D_rdcc_ent_t *ent, void *old_parent,
+ void *new_parent)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_proxy_update_flush_dep)
+
+ HDassert(ent);
+ HDassert(old_parent);
+ HDassert(new_parent);
+
+ /* It is guaranteed that the proxy has a parent, because the dependency
+ * should always be present if the parent object exists in the index, and
+ * this should only be called when updating the parent object */
+ HDassert(ent->proxy->supported);
+
+ /* Update the flush dependencies */
+ if(H5AC_destroy_flush_dependency(old_parent, ent->proxy) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5AC_create_flush_dependency(new_parent, ent->proxy) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D_chunk_proxy_update_flush_dep() */
+
diff --git a/src/H5Gnode.c b/src/H5Gnode.c
index 5f32eb5..9da5a6e 100644
--- a/src/H5Gnode.c
+++ b/src/H5Gnode.c
@@ -96,6 +96,8 @@ H5B_class_t H5B_SNODE[1] = {{
H5G_node_decode_key, /*decode */
H5G_node_encode_key, /*encode */
H5G_node_debug_key, /*debug */
+ NULL, /*create_flush_dep */
+ NULL, /*update_flush_dep */
}};
/* Declare a free list to manage the H5G_node_t struct */
diff --git a/src/H5Gstab.c b/src/H5Gstab.c
index afa137c..37c54e4 100644
--- a/src/H5Gstab.c
+++ b/src/H5Gstab.c
@@ -113,7 +113,7 @@ H5G_stab_create_components(H5F_t *f, H5O_stab_t *stab, size_t size_hint, hid_t d
HDassert(size_hint > 0);
/* Create the B-tree */
- if(H5B_create(f, dxpl_id, H5B_SNODE, NULL, &(stab->btree_addr)/*out*/) < 0)
+ if(H5B_create(f, dxpl_id, H5B_SNODE, NULL, NULL, &(stab->btree_addr)/*out*/) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "can't create B-tree")
/* Create symbol table private heap */
@@ -248,7 +248,7 @@ H5G_stab_insert_real(H5F_t *f, H5O_stab_t *stab, const char *name,
udata.crt_info = crt_info;
/* Insert into symbol table */
- if(H5B_insert(f, dxpl_id, H5B_SNODE, stab->btree_addr, &udata) < 0)
+ if(H5B_insert(f, dxpl_id, H5B_SNODE, stab->btree_addr, &udata, NULL) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, FAIL, "unable to insert entry")
done:
@@ -343,7 +343,7 @@ H5G_stab_remove(H5O_loc_t *loc, hid_t dxpl_id, H5RS_str_t *grp_full_path_r,
udata.grp_full_path_r = grp_full_path_r;
/* Remove from symbol table */
- if(H5B_remove(loc->file, dxpl_id, H5B_SNODE, stab.btree_addr, &udata) < 0)
+ if(H5B_remove(loc->file, dxpl_id, H5B_SNODE, stab.btree_addr, &udata, NULL) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to remove entry")
done:
@@ -401,7 +401,7 @@ H5G_stab_remove_by_idx(H5O_loc_t *grp_oloc, hid_t dxpl_id, H5RS_str_t *grp_full_
udata.grp_full_path_r = grp_full_path_r;
/* Remove link from symbol table */
- if(H5B_remove(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, &udata) < 0)
+ if(H5B_remove(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, &udata, NULL) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to remove entry")
done:
@@ -454,7 +454,7 @@ H5G_stab_delete(H5F_t *f, hid_t dxpl_id, const H5O_stab_t *stab)
udata.common.heap = heap;
/* Delete entire B-tree */
- if(H5B_delete(f, dxpl_id, H5B_SNODE, stab->btree_addr, &udata) < 0)
+ if(H5B_delete(f, dxpl_id, H5B_SNODE, stab->btree_addr, &udata, NULL) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTDELETE, FAIL, "unable to delete symbol table B-tree")
/* Release resources */
@@ -523,7 +523,7 @@ H5G_stab_iterate(const H5O_loc_t *oloc, hid_t dxpl_id, H5_iter_order_t order,
udata.op_data = op_data;
/* Iterate over the group members */
- if((ret_value = H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_iterate, &udata)) < 0)
+ if((ret_value = H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_iterate, &udata, NULL)) < 0)
HERROR(H5E_SYM, H5E_CANTNEXT, "iteration operator failed");
/* Check for too high of a starting index (ex post facto :-) */
@@ -540,7 +540,7 @@ H5G_stab_iterate(const H5O_loc_t *oloc, hid_t dxpl_id, H5_iter_order_t order,
udata.ltable = &ltable;
/* Iterate over the group members */
- if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_build_table, &udata) < 0)
+ if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_build_table, &udata, NULL) < 0)
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to build link table")
/* Check for skipping out of bounds */
@@ -599,7 +599,7 @@ H5G_stab_count(H5O_loc_t *oloc, hsize_t *num_objs, hid_t dxpl_id)
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to determine local heap address")
/* Iterate over the group members */
- if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_sumup, num_objs) < 0)
+ if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_sumup, num_objs, NULL) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "iteration operator failed")
done:
@@ -638,7 +638,7 @@ H5G_stab_bh_size(H5F_t *f, hid_t dxpl_id, const H5O_stab_t *stab,
snode_size = 0;
/* Get the B-tree & symbol table node size info */
- if(H5B_get_info(f, dxpl_id, H5B_SNODE, stab->btree_addr, &bt_info, H5G_node_iterate_size, &snode_size) < 0)
+ if(H5B_get_info(f, dxpl_id, H5B_SNODE, stab->btree_addr, &bt_info, H5G_node_iterate_size, &snode_size, NULL) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "iteration operator failed")
/* Add symbol table & B-tree node sizes to index info */
@@ -732,7 +732,7 @@ H5G_stab_get_name_by_idx(H5O_loc_t *oloc, H5_iter_order_t order, hsize_t n,
hsize_t nlinks = 0; /* Number of links in group */
/* Iterate over the symbol table nodes, to count the links */
- if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_sumup, &nlinks) < 0)
+ if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_sumup, &nlinks, NULL) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "iteration operator failed")
/* Map decreasing iteration order index to increasing iteration order index */
@@ -748,7 +748,7 @@ H5G_stab_get_name_by_idx(H5O_loc_t *oloc, H5_iter_order_t order, hsize_t n,
udata_valid = TRUE;
/* Iterate over the group members */
- if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_by_idx, &udata) < 0)
+ if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_by_idx, &udata, NULL) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "iteration operator failed")
/* If we don't know the name now, we almost certainly went out of bounds */
@@ -861,7 +861,7 @@ H5G_stab_lookup(H5O_loc_t *grp_oloc, const char *name, H5O_link_t *lnk,
bt_udata.op_data = &udata;
/* Search the B-tree */
- if((ret_value = H5B_find(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, &bt_udata)) < 0)
+ if((ret_value = H5B_find(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, &bt_udata, NULL)) < 0)
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "not found")
done:
@@ -955,7 +955,7 @@ H5G_stab_lookup_by_idx(H5O_loc_t *grp_oloc, H5_iter_order_t order, hsize_t n,
hsize_t nlinks = 0; /* Number of links in group */
/* Iterate over the symbol table nodes, to count the links */
- if(H5B_iterate(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_sumup, &nlinks) < 0)
+ if(H5B_iterate(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_sumup, &nlinks, NULL) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "iteration operator failed")
/* Map decreasing iteration order index to increasing iteration order index */
@@ -971,7 +971,7 @@ H5G_stab_lookup_by_idx(H5O_loc_t *grp_oloc, H5_iter_order_t order, hsize_t n,
udata.found = FALSE;
/* Iterate over the group members */
- if(H5B_iterate(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_by_idx, &udata) < 0)
+ if(H5B_iterate(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_by_idx, &udata, NULL) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "iteration operator failed")
/* If we didn't find the link, we almost certainly went out of bounds */
@@ -1019,10 +1019,10 @@ H5G_stab_valid(H5O_loc_t *grp_oloc, hid_t dxpl_id, H5O_stab_t *alt_stab)
HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "unable to read symbol table message");
/* Check if the symbol table message's b-tree address is valid */
- if(H5B_valid(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr) < 0) {
+ if(H5B_valid(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, NULL) < 0) {
/* Address is invalid, try the b-tree address in the alternate symbol
* table message */
- if(!alt_stab || H5B_valid(grp_oloc->file, dxpl_id, H5B_SNODE, alt_stab->btree_addr) < 0)
+ if(!alt_stab || H5B_valid(grp_oloc->file, dxpl_id, H5B_SNODE, alt_stab->btree_addr, NULL) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to locate b-tree")
else {
/* The alternate symbol table's b-tree address is valid. Adjust the
@@ -1163,7 +1163,7 @@ H5G_stab_get_type_by_idx(H5O_loc_t *oloc, hsize_t idx, hid_t dxpl_id)
udata.type = H5G_UNKNOWN;
/* Iterate over the group members */
- if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_by_idx, &udata) < 0)
+ if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_by_idx, &udata, NULL) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5G_UNKNOWN, "iteration operator failed")
/* If we don't know the type now, we almost certainly went out of bounds */
diff --git a/src/H5Gtest.c b/src/H5Gtest.c
index ec55e47..b6e3e141 100644
--- a/src/H5Gtest.c
+++ b/src/H5Gtest.c
@@ -626,7 +626,7 @@ H5G_verify_cached_stab_test(H5O_loc_t *grp_oloc, H5G_entry_t *ent)
HGOTO_ERROR(H5E_SYM, H5E_BADVALUE, FAIL, "cached stab info does not match object header")
/* Verify that the btree address is valid */
- if(H5B_valid(grp_oloc->file, H5AC_ind_dxpl_id, H5B_SNODE, stab.btree_addr) < 0)
+ if(H5B_valid(grp_oloc->file, H5AC_ind_dxpl_id, H5B_SNODE, stab.btree_addr, NULL) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "b-tree address is invalid")
/* Verify that the heap address is valid */
@@ -794,7 +794,8 @@ H5G_verify_cached_stabs_test(hid_t gid)
/* Iterate over the b-tree, checking validity of cached information */
if((ret_value = H5B_iterate(grp->oloc.file, H5AC_ind_dxpl_id, H5B_SNODE,
- stab.btree_addr, H5G_verify_cached_stabs_test_cb, &udata)) < 0)
+ stab.btree_addr, H5G_verify_cached_stabs_test_cb, &udata, NULL))
+ < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTNEXT, FAIL, "iteration operator failed");
/* Reset metadata tagging */
diff --git a/src/H5Ostab.c b/src/H5Ostab.c
index e29a856..5d1909b 100644
--- a/src/H5Ostab.c
+++ b/src/H5Ostab.c
@@ -399,7 +399,7 @@ H5O_stab_post_copy_file(const H5O_loc_t *src_oloc, const void *mesg_src, H5O_loc
udata.cpy_info = cpy_info;
/* Iterate over objects in group, copying them */
- if((H5B_iterate(src_oloc->file, dxpl_id, H5B_SNODE, stab_src->btree_addr, H5G_node_copy, &udata)) < 0)
+ if((H5B_iterate(src_oloc->file, dxpl_id, H5B_SNODE, stab_src->btree_addr, H5G_node_copy, &udata, NULL)) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "iteration operator failed")
done:
diff --git a/test/Makefile.am b/test/Makefile.am
index 70f49f1..7c8eeed 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -45,7 +45,8 @@ TEST_PROG= testhdf5 lheap ohdr stab gheap cache cache_api cache_tagging \
getname vfd ntypes dangle dtransform reserved cross_read \
freespace mf farray earray btree2 fheap
-bin_PROGRAMS=swmr_generator swmr_reader swmr_writer
+bin_PROGRAMS=swmr_generator swmr_reader swmr_writer swmr_remove_reader \
+ swmr_remove_writer swmr_addrem_writer swmr_sparse_reader swmr_sparse_writer
# List programs to be built when testing here. error_test and err_compat are
# built at the same time as the other tests, but executed by testerror.sh.
diff --git a/test/Makefile.in b/test/Makefile.in
index 268d334..23c6ca8 100644
--- a/test/Makefile.in
+++ b/test/Makefile.in
@@ -59,7 +59,7 @@ DIST_COMMON = $(srcdir)/H5srcdir_str.h.in $(srcdir)/Makefile.am \
$(top_srcdir)/config/commence.am \
$(top_srcdir)/config/conclude.am COPYING
bin_PROGRAMS = swmr_generator$(EXEEXT) swmr_reader$(EXEEXT) \
- swmr_writer$(EXEEXT)
+ swmr_writer$(EXEEXT) swmr_remove_reader$(EXEEXT) swmr_remove_writer$(EXEEXT) swmr_addrem_writer$(EXEEXT) swmr_sparse_reader$(EXEEXT) swmr_sparse_writer$(EXEEXT)
check_PROGRAMS = $(am__EXEEXT_1) error_test$(EXEEXT) \
err_compat$(EXEEXT) tcheck_version$(EXEEXT) testmeta$(EXEEXT) \
links_env$(EXEEXT) flushrefresh$(EXEEXT)
@@ -380,6 +380,26 @@ swmr_writer_SOURCES = swmr_writer.c
swmr_writer_OBJECTS = swmr_writer.$(OBJEXT)
swmr_writer_LDADD = $(LDADD)
swmr_writer_DEPENDENCIES = libh5test.la $(LIBHDF5)
+swmr_remove_reader_SOURCES = swmr_remove_reader.c
+swmr_remove_reader_OBJECTS = swmr_remove_reader.$(OBJEXT)
+swmr_remove_reader_LDADD = $(LDADD)
+swmr_remove_reader_DEPENDENCIES = libh5test.la $(LIBHDF5)
+swmr_remove_writer_SOURCES = swmr_remove_writer.c
+swmr_remove_writer_OBJECTS = swmr_remove_writer.$(OBJEXT)
+swmr_remove_writer_LDADD = $(LDADD)
+swmr_remove_writer_DEPENDENCIES = libh5test.la $(LIBHDF5)
+swmr_addrem_writer_SOURCES = swmr_addrem_writer.c
+swmr_addrem_writer_OBJECTS = swmr_addrem_writer.$(OBJEXT)
+swmr_addrem_writer_LDADD = $(LDADD)
+swmr_addrem_writer_DEPENDENCIES = libh5test.la $(LIBHDF5)
+swmr_sparse_reader_SOURCES = swmr_sparse_reader.c
+swmr_sparse_reader_OBJECTS = swmr_sparse_reader.$(OBJEXT)
+swmr_sparse_reader_LDADD = $(LDADD)
+swmr_sparse_reader_DEPENDENCIES = libh5test.la $(LIBHDF5)
+swmr_sparse_writer_SOURCES = swmr_sparse_writer.c
+swmr_sparse_writer_OBJECTS = swmr_sparse_writer.$(OBJEXT)
+swmr_sparse_writer_LDADD = $(LDADD)
+swmr_sparse_writer_DEPENDENCIES = libh5test.la $(LIBHDF5)
tcheck_version_SOURCES = tcheck_version.c
tcheck_version_OBJECTS = tcheck_version.$(OBJEXT)
tcheck_version_LDADD = $(LDADD)
@@ -453,7 +473,7 @@ SOURCES = $(libh5test_la_SOURCES) accum.c app_ref.c big.c bittests.c \
gen_udlinks.c getname.c gheap.c hyperslab.c istore.c lheap.c \
links.c links_env.c mf.c mount.c mtime.c ntypes.c objcopy.c \
ohdr.c pool.c reserved.c set_extent.c space_overflow.c stab.c \
- swmr_generator.c swmr_reader.c swmr_writer.c tcheck_version.c \
+ swmr_generator.c swmr_reader.c swmr_writer.c swmr_remove_reader.c swmr_remove_writer.c swmr_addrem_writer.c swmr_sparse_reader.c swmr_sparse_writer.c tcheck_version.c \
$(testhdf5_SOURCES) testmeta.c $(ttsafe_SOURCES) unlink.c \
vfd.c
DIST_SOURCES = $(libh5test_la_SOURCES) accum.c app_ref.c big.c \
@@ -470,7 +490,7 @@ DIST_SOURCES = $(libh5test_la_SOURCES) accum.c app_ref.c big.c \
istore.c lheap.c links.c links_env.c mf.c mount.c mtime.c \
ntypes.c objcopy.c ohdr.c pool.c reserved.c set_extent.c \
space_overflow.c stab.c swmr_generator.c swmr_reader.c \
- swmr_writer.c tcheck_version.c $(testhdf5_SOURCES) testmeta.c \
+ swmr_writer.c swmr_remove_reader.c swmr_remove_writer.c swmr_addrem_writer.c swmr_sparse_reader.c swmr_sparse_writer.c tcheck_version.c $(testhdf5_SOURCES) testmeta.c \
$(ttsafe_SOURCES) unlink.c vfd.c
ETAGS = etags
CTAGS = ctags
@@ -1188,6 +1208,21 @@ swmr_reader$(EXEEXT): $(swmr_reader_OBJECTS) $(swmr_reader_DEPENDENCIES)
swmr_writer$(EXEEXT): $(swmr_writer_OBJECTS) $(swmr_writer_DEPENDENCIES)
@rm -f swmr_writer$(EXEEXT)
$(AM_V_CCLD)$(LINK) $(swmr_writer_OBJECTS) $(swmr_writer_LDADD) $(LIBS)
+swmr_remove_reader$(EXEEXT): $(swmr_remove_reader_OBJECTS) $(swmr_remove_reader_DEPENDENCIES)
+ @rm -f swmr_remove_reader$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(swmr_remove_reader_OBJECTS) $(swmr_remove_reader_LDADD) $(LIBS)
+swmr_remove_writer$(EXEEXT): $(swmr_remove_writer_OBJECTS) $(swmr_remove_writer_DEPENDENCIES)
+ @rm -f swmr_remove_writer$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(swmr_remove_writer_OBJECTS) $(swmr_remove_writer_LDADD) $(LIBS)
+swmr_addrem_writer$(EXEEXT): $(swmr_addrem_writer_OBJECTS) $(swmr_addrem_writer_DEPENDENCIES)
+ @rm -f swmr_addrem_writer$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(swmr_addrem_writer_OBJECTS) $(swmr_addrem_writer_LDADD) $(LIBS)
+swmr_sparse_reader$(EXEEXT): $(swmr_sparse_reader_OBJECTS) $(swmr_sparse_reader_DEPENDENCIES)
+ @rm -f swmr_sparse_reader$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(swmr_sparse_reader_OBJECTS) $(swmr_sparse_reader_LDADD) $(LIBS)
+swmr_sparse_writer$(EXEEXT): $(swmr_sparse_writer_OBJECTS) $(swmr_sparse_writer_DEPENDENCIES)
+ @rm -f swmr_sparse_writer$(EXEEXT)
+ $(AM_V_CCLD)$(LINK) $(swmr_sparse_writer_OBJECTS) $(swmr_sparse_writer_LDADD) $(LIBS)
tcheck_version$(EXEEXT): $(tcheck_version_OBJECTS) $(tcheck_version_DEPENDENCIES)
@rm -f tcheck_version$(EXEEXT)
$(AM_V_CCLD)$(LINK) $(tcheck_version_OBJECTS) $(tcheck_version_LDADD) $(LIBS)
@@ -1284,6 +1319,11 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/swmr_generator.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/swmr_reader.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/swmr_writer.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/swmr_remove_reader.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/swmr_remove_writer.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/swmr_addrem_writer.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/swmr_sparse_reader.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/swmr_sparse_writer.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tarray.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tattr.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tcheck_version.Po@am__quote@
diff --git a/test/swmr_addrem_writer.c b/test/swmr_addrem_writer.c
new file mode 100644
index 0000000..ddfcd5c
--- /dev/null
+++ b/test/swmr_addrem_writer.c
@@ -0,0 +1,322 @@
+#include "swmr_common.h"
+
+#define MAX_SIZE_CHANGE 10
+
+static hid_t
+open_skeleton(const char *filename, unsigned verbose)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dim; /* Dataspace dimension */
+ unsigned u, v; /* Local index variable */
+
+ /* Create file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return(-1);
+
+#ifdef QAK
+/* Increase the initial size of the metadata cache */
+{
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+printf("mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+printf("mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+/* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+}
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return(-1);
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return(-1);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return(-1);
+ if((sid = H5Dget_space(symbol_info[u][v].dsid)) < 0)
+ return -1;
+ if(1 != H5Sget_simple_extent_ndims(sid))
+ return -1;
+ if(H5Sget_simple_extent_dims(sid, &dim, NULL) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = (hsize_t)dim;
+ } /* end for */
+
+ return(fid);
+}
+
+static int
+addrem_records(hid_t fid, unsigned verbose, unsigned long nops, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start, count; /* Hyperslab selection values */
+ symbol_t buf[MAX_SIZE_CHANGE]; /* Write buffer */
+ H5AC_cache_config_t mdc_config_orig; /* Original metadata cache configuration */
+ H5AC_cache_config_t mdc_config_cork; /* Corked metadata cache configuration */
+ unsigned long op_to_flush; /* # of operations before flush */
+ unsigned long u, v; /* Local index variables */
+
+ /* Reset the buffer */
+ memset(&buf, 0, sizeof(buf));
+
+ /* Create a dataspace for the record to add */
+ count = 1;
+ if((mem_sid = H5Screate_simple(1, &count, NULL)) < 0)
+ return(-1);
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return(-1);
+
+ /* Get the current metadata cache configuration, and set up the corked
+ * configuration */
+ mdc_config_orig.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ if(H5Fget_mdc_config(fid, &mdc_config_orig) < 0)
+ return(-1);
+ memcpy(&mdc_config_cork, &mdc_config_orig, sizeof(mdc_config_cork));
+ mdc_config_cork.evictions_enabled = FALSE;
+ mdc_config_cork.incr_mode = H5C_incr__off;
+ mdc_config_cork.flash_incr_mode = H5C_flash_incr__off;
+ mdc_config_cork.decr_mode = H5C_decr__off;
+
+ /* Add and remove records to random datasets, according to frequency
+ * distribution */
+ op_to_flush = flush_count;
+ for(u=0; u<nops; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Decide whether to shrink or expand, and by how much */
+ count = (hsize_t)random() % (MAX_SIZE_CHANGE * 2) + 1;
+
+ if(count > MAX_SIZE_CHANGE) {
+ /* Add records */
+ count -= MAX_SIZE_CHANGE;
+
+ /* Set the buffer's IDs (equal to its position) */
+ for(v=0; v<count; v++)
+ buf[v].rec_id = (uint64_t)symbol->nrecords + (uint64_t)v;
+
+ /* Set the memory space to the correct size */
+ if(H5Sset_extent_simple(mem_sid, 1, &count, NULL) < 0)
+ return(-1);
+
+ /* Get the coordinates to write */
+ start = symbol->nrecords;
+
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ /*if(H5Fset_mdc_config(fid, &mdc_config_cork) < 0)
+ return(-1);*/
+
+ /* Extend the dataset's dataspace to hold the new record */
+ symbol->nrecords+= count;
+ if(H5Dset_extent(symbol->dsid, &symbol->nrecords) < 0)
+ return(-1);
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return(-1);
+
+ /* Choose the last record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, &start, NULL, &count, NULL) < 0)
+ return(-1);
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &buf) < 0)
+ return(-1);
+
+ /* Uncork the metadata cache */
+ /*if(H5Fset_mdc_config(fid, &mdc_config_orig) < 0)
+ return(-1);*/
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return(-1);
+ } /* end if */
+ else {
+ /* Shrink the dataset's dataspace */
+ if(count > symbol->nrecords)
+ symbol->nrecords = 0;
+ else
+ symbol->nrecords -= count;
+ if(H5Dset_extent(symbol->dsid, &symbol->nrecords) < 0)
+ return(-1);
+ } /* end else */
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ op_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == op_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return(-1);
+
+ /* Reset flush counter */
+ op_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return(-1);
+
+ return(0);
+}
+
+static void
+usage(void)
+{
+ printf("Usage error!\n");
+ printf("Usage: swmr_addrem_writer [-q] [-f <# of operations between flushing file contents>] <# of shrinks>\n");
+ printf("<# of operations between flushing file contents> should be 0 (for no flushing) or between 1 and (<# of shrinks> - 1)\n");
+ printf("Defaults to verbose (no '-q' given) and flushing every 1000 operations('-f 1000')\n");
+ exit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ time_t curr_time; /* Current time, for seeding random number generator */
+ long nops = 0; /* # of times to grow or shrink the dataset */
+ long flush_count = 1000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned u; /* Local index variable */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = atol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nops = atol(argv[u]);
+ if(nops <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nops <= 0)
+ usage();
+ if(flush_count >= nops)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ printf("Parameters:\n");
+ printf("\t# of operations between flushes = %ld\n", flush_count);
+ printf("\t# of operations = %ld\n", nops);
+ } /* end if */
+
+ /* Create randomized set of numbers */
+ curr_time = time(NULL);
+ srandom((unsigned)curr_time);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return(-1);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose)) < 0) {
+ printf("Error opening skeleton file!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Adding and removing records\n");
+
+ /* Grow and shrink datasets */
+ if(addrem_records(fid, verbose, (unsigned long)nops, (unsigned long)flush_count) < 0) {
+ printf("Error adding and removing records from datasets!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ printf("Error releasing symbols!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ printf("Error closing file!\n");
+ exit(1);
+ } /* end if */
+
+ return(0);
+}
+
diff --git a/test/swmr_generator.c b/test/swmr_generator.c
index 3b4d7ef..a19f70f 100644
--- a/test/swmr_generator.c
+++ b/test/swmr_generator.c
@@ -3,7 +3,8 @@
#define CHUNK_SIZE 50
static int
-gen_skeleton(const char *filename, unsigned verbose, int comp_level)
+gen_skeleton(const char *filename, unsigned verbose, int comp_level,
+ const char *index_type)
{
hid_t fid; /* File ID for new HDF5 file */
hid_t fcpl; /* File creation property list */
@@ -11,16 +12,24 @@ gen_skeleton(const char *filename, unsigned verbose, int comp_level)
hid_t dcpl; /* Dataset creation property list */
hid_t tid; /* Datatype for dataset elements */
hid_t sid; /* Dataspace ID */
+ hid_t aid; /* Attribute ID */
hsize_t dims = 0; /* Dataset starting dimensions */
hsize_t max_dims = H5S_UNLIMITED; /* Dataset maximum dimensions */
hsize_t chunk_dims = CHUNK_SIZE; /* Chunk dimensions */
+#ifdef FILLVAL_WORKS
+ symbol_t fillval; /* Dataset fill value */
+#endif /* FILLVAL_WORKS */
+ unsigned seed; /* Random seed to write to root group attribute */
unsigned u, v; /* Local index variable */
/* Create file access property list */
if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
return(-1);
- if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
- return(-1);
+
+ /* Select the correct index type */
+ if(strcmp(index_type, "b1"))
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return(-1);
#ifdef QAK
/* Increase the initial size of the metadata cache */
@@ -70,6 +79,17 @@ printf("mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length
if(H5Pclose(fapl) < 0)
return(-1);
+ /* Create attribute with (shared) random number seed - for sparse test */
+ seed = (unsigned)time(NULL);
+ if((sid = H5Screate(H5S_SCALAR)) < 0)
+ return(-1);
+ if((aid = H5Acreate2(fid, "seed", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return(-1);
+ if(H5Awrite(aid, H5T_NATIVE_UINT, &seed) < 0)
+ return(-1);
+ if(H5Sclose(sid) < 0)
+ return(-1);
+
/* Create datatype for creating datasets */
if((tid = create_symbol_datatype()) < 0)
return(-1);
@@ -87,6 +107,15 @@ printf("mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length
if(H5Pset_deflate(dcpl, (unsigned)comp_level) < 0)
return(-1);
} /* end if */
+#ifdef FILLVAL_WORKS
+ /* Currently fill values do not work because they can bump the dataspace
+ * message to the second object header chunk. We should enable the fillval
+ * here when this is fixed. -NAF 8/11/11 */
+ memset(&fillval, 0, sizeof(fillval));
+ fillval.rec_id = (uint64_t)ULLONG_MAX;
+ if(H5Pset_fill_value(dcpl, tid, &fillval) < 0)
+ return(-1);
+#endif /* FILLVAL_WORKS */
/* Emit informational message */
if(verbose)
@@ -127,9 +156,11 @@ static void
usage(void)
{
printf("Usage error!\n");
- printf("Usage: swmr_generator [-q] [-c <deflate compression level>]\n");
+ printf("Usage: swmr_generator [-q] [-c <deflate compression level>] [-i <index type>]\n");
printf("<deflate compression level> should be -1 (for no compression) or 0-9\n");
- printf("Defaults to verbose (no '-q' given) and no compression ('-c -1')\n");
+ printf("<index type> should be b1, b2, fa, or ea (fa and b2 not yet implemented)\n");
+ printf("Defaults to verbose (no '-q' given), no compression ('-c -1') and v1 b-tree\n");
+ printf(" (-i b1)");
exit(1);
} /* end usage() */
@@ -137,6 +168,7 @@ int main(int argc, const char *argv[])
{
int comp_level = (-1); /* Compression level (-1 is no compression) */
unsigned verbose = 1; /* Whether to emit some informational messages */
+ const char *index_type = "b1"; /* Chunk index type */
unsigned u; /* Local index variables */
/* Parse command line options */
@@ -153,6 +185,15 @@ int main(int argc, const char *argv[])
u += 2;
break;
+ /* Chunk index type */
+ case 'i':
+ index_type = argv[u + 1];
+ if(strcmp(index_type, "b1")
+ && strcmp(index_type, "ea"))
+ usage();
+ u += 2;
+ break;
+
/* Be quiet */
case 'q':
verbose = 0;
@@ -171,6 +212,7 @@ int main(int argc, const char *argv[])
if(verbose) {
printf("Parameters:\n");
printf("\tcompression level = %d\n", comp_level);
+ printf("\tindex_type = %s\n", index_type);
} /* end if */
/* Emit informational message */
@@ -178,7 +220,7 @@ int main(int argc, const char *argv[])
printf("Generating skeleton file: %s\n", FILENAME);
/* Generate file skeleton */
- if(gen_skeleton(FILENAME, verbose, comp_level) < 0) {
+ if(gen_skeleton(FILENAME, verbose, comp_level, index_type) < 0) {
printf("Error generating skeleton file!\n");
exit(1);
} /* end if */
diff --git a/test/swmr_reader.c b/test/swmr_reader.c
index 394a46e..48cf4bd 100644
--- a/test/swmr_reader.c
+++ b/test/swmr_reader.c
@@ -81,7 +81,7 @@ read_records(const char *filename, unsigned verbose, unsigned long nseconds,
/* Allocate space for 'common' datasets, if any */
if(ncommon > 0) {
/* Allocate array to hold pointers to symbols for common datasets */
- if(NULL == (sym_com = malloc(sizeof(symbol_info_t *) * ncommon)))
+ if(NULL == (sym_com = (symbol_info_t **)malloc(sizeof(symbol_info_t *) * ncommon)))
return(-1);
/* Open the common datasets */
@@ -90,7 +90,7 @@ read_records(const char *filename, unsigned verbose, unsigned long nseconds,
/* Determine the offset of the symbol, within level 0 symbols */
/* (level 0 symbols are the most common symbols) */
- offset = random() % symbol_count[0];
+ offset = (unsigned)(random() % symbol_count[0]);
sym_com[v] = &symbol_info[0][offset];
/* Emit informational message */
@@ -102,7 +102,7 @@ read_records(const char *filename, unsigned verbose, unsigned long nseconds,
/* Allocate space for 'random' datasets, if any */
if(nrandom > 0) {
/* Allocate array to hold pointers to symbols for random datasets */
- if(NULL == (sym_rand = malloc(sizeof(symbol_info_t *) * nrandom)))
+ if(NULL == (sym_rand = (symbol_info_t **)malloc(sizeof(symbol_info_t *) * nrandom)))
return(-1);
/* Determine the random datasets */
@@ -133,7 +133,7 @@ read_records(const char *filename, unsigned verbose, unsigned long nseconds,
curr_time = start_time;
/* Loop over reading records until [at least] the correct # of seconds have passed */
- while(curr_time < (time_t)(start_time + nseconds)) {
+ while(curr_time < (time_t)(start_time + (time_t)nseconds)) {
hid_t fid; /* File ID */
/* Emit informational message */
diff --git a/test/swmr_remove_reader.c b/test/swmr_remove_reader.c
new file mode 100644
index 0000000..61d5aab
--- /dev/null
+++ b/test/swmr_remove_reader.c
@@ -0,0 +1,366 @@
+#include "swmr_common.h"
+#include <unistd.h>
+
+static hid_t symbol_tid = (-1);
+
+static int
+check_dataset(hid_t fid, unsigned verbose, const char *sym_name, symbol_t *record,
+ hid_t rec_sid)
+{
+ hid_t dsid; /* Dataset ID */
+ hid_t file_sid; /* Dataset's space ID */
+ hssize_t snpoints; /* Number of elements in dataset */
+ hsize_t start, count = 1; /* Hyperslab selection values */
+
+ /* Open dataset for symbol */
+ if((dsid = H5Dopen2(fid, sym_name, H5P_DEFAULT)) < 0)
+ return(-1);
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(dsid)) < 0)
+ return(-1);
+
+ /* Get the number of elements (= records, for 1-D datasets) */
+ if((snpoints = H5Sget_simple_extent_npoints(file_sid)) < 0)
+ return(-1);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Symbol = '%s', # of records = %lld\n", sym_name, (long long)snpoints);
+
+ /* Check if there are records for symbol */
+ if(snpoints > 0) {
+ /* Choose a random record in the dataset, choosing the last record half
+ * the time */
+ start = (hsize_t)(random() % (snpoints * 2));
+ if(start > (hsize_t)(snpoints - 1))
+ start = (hsize_t)(snpoints - 1);
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, &start, NULL, &count, NULL) < 0)
+ return(-1);
+
+ /* Read record from dataset */
+#ifdef FILLVAL_WORKS
+ /* When shrinking the dataset, we cannot guarantee that the buffer will
+ * even be touched, unless there is a fill value. Since fill values do
+ * not work with SWMR currently (see note in swmr_generator.c), we
+ * simply initialize rec_id to 0. */
+ record->rec_id = (uint64_t)ULLONG_MAX - 1;
+#else /* FILLVAL_WORKS */
+ record->rec_id = (uint64_t)0;
+#endif /* FILLVAL_WORKS */
+ if(H5Dread(dsid, symbol_tid, rec_sid, file_sid, H5P_DEFAULT, record) < 0)
+ return(-1);
+
+ /* Verify record value - note that it may be the fill value, because the
+ * chunk may be deleted before the object header has the updated
+ * dimensions */
+ if(record->rec_id != start && record->rec_id != (uint64_t)0) {
+ printf("Incorrect record value!\n");
+ printf("Symbol = '%s', # of records = %lld, record->rec_id = %llx\n", sym_name, (long long)snpoints, (unsigned long long)record->rec_id);
+ return(-1);
+ } /* end if */
+ } /* end if */
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return(-1);
+
+ /* Close dataset for symbol */
+ if(H5Dclose(dsid) < 0)
+ return(-1);
+
+ return(0);
+} /* end check_dataset() */
+
+static int
+read_records(const char *filename, unsigned verbose, unsigned long nseconds,
+ unsigned poll_time, unsigned ncommon, unsigned nrandom)
+{
+ time_t start_time; /* Starting time */
+ time_t curr_time; /* Current time */
+ symbol_info_t **sym_com = NULL, **sym_rand = NULL; /* Pointers to arrays of common & random dataset IDs */
+ hid_t mem_sid; /* Memory dataspace ID */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned v; /* Local index variable */
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ memset(&record, 0, sizeof(record));
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Choosing datasets\n");
+
+ /* Allocate space for 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Allocate array to hold pointers to symbols for common datasets */
+ if(NULL == (sym_com = (symbol_info_t **)malloc(sizeof(symbol_info_t *) * ncommon)))
+ return(-1);
+
+ /* Open the common datasets */
+ for(v = 0; v < ncommon; v++) {
+ unsigned offset; /* Offset of symbol to use */
+
+ /* Determine the offset of the symbol, within level 0 symbols */
+ /* (level 0 symbols are the most common symbols) */
+ offset = (unsigned)(random() % symbol_count[0]);
+ sym_com[v] = &symbol_info[0][offset];
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Common symbol #%u = '%s'\n", v, symbol_info[0][offset].name);
+ } /* end for */
+ } /* end if */
+
+ /* Allocate space for 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Allocate array to hold pointers to symbols for random datasets */
+ if(NULL == (sym_rand = (symbol_info_t **)malloc(sizeof(symbol_info_t *) * nrandom)))
+ return(-1);
+
+ /* Determine the random datasets */
+ for(v = 0; v < nrandom; v++) {
+ symbol_info_t *sym; /* Symbol to use */
+
+ /* Determine the symbol, within all symbols */
+ if(NULL == (sym = choose_dataset()))
+ return(-1);
+ sym_rand[v] = sym;
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Random symbol #%u = '%s'\n", v, sym->name);
+ } /* end for */
+ } /* end if */
+
+ /* Create a dataspace for the record to read */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return(-1);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Reading records\n");
+
+ /* Get the starting time */
+ start_time = time(NULL);
+ curr_time = start_time;
+
+ /* Loop over reading records until [at least] the correct # of seconds have passed */
+ while(curr_time < (time_t)(start_time + (time_t)nseconds)) {
+ hid_t fid; /* File ID */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Opening file: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, H5P_DEFAULT)) < 0)
+ return(-1);
+
+ /* Check 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Emit informational message */
+ if(verbose)
+ printf("Checking common symbols\n");
+
+ /* Iterate over common datasets */
+ for(v = 0; v < ncommon; v++) {
+ /* Check common dataset */
+ if(check_dataset(fid, verbose, sym_com[v]->name, &record, mem_sid) < 0)
+ return(-1);
+ } /* end for */
+ } /* end if */
+
+ /* Check 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Emit informational message */
+ if(verbose)
+ printf("Checking random symbols\n");
+
+ /* Iterate over random datasets */
+ for(v = 0; v < nrandom; v++) {
+ /* Check random dataset */
+ if(check_dataset(fid, verbose, sym_rand[v]->name, &record, mem_sid) < 0)
+ return(-1);
+ } /* end for */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Closing file\n");
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ return(-1);
+
+ /* Sleep for the appropriate # of seconds */
+ sleep(poll_time);
+
+ /* Retrieve the current time */
+ curr_time = time(NULL);
+ } /* end while */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return(-1);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Closing datasets\n");
+
+ /* Close 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Release array holding dataset ID's for random datasets */
+ free(sym_rand);
+ } /* end if */
+
+ /* Close 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Release array holding dataset ID's for common datasets */
+ free(sym_com);
+ } /* end if */
+
+ return(0);
+} /* end read_records() */
+
+static void
+usage(void)
+{
+ printf("Usage error!\n");
+ printf("Usage: swmr_reader [-q] [-s <# of seconds to sleep between polling>] [-h <# of common symbols to poll>] [-l <# of random symbols to poll>] [-r <random # seed>] <# of seconds to test>\n");
+ printf("Defaults to verbose (no '-q' given), 1 second between polling ('-s 1'), 5 common symbols to poll ('-h 5') and 10 random symbols to poll ('-l 10')\n");
+ exit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ long nseconds = 0; /* # of seconds to test */
+ int poll_time = 1; /* # of seconds between polling */
+ int ncommon = 5; /* # of common symbols to poll */
+ int nrandom = 10; /* # of random symbols to poll */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ int random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variables */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of common symbols to poll */
+ case 'h':
+ ncommon = atoi(argv[u + 1]);
+ if(ncommon < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* # of random symbols to poll */
+ case 'l':
+ nrandom = atoi(argv[u + 1]);
+ if(nrandom < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ random_seed = atoi(argv[u + 1]);
+ if(random_seed < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* # of seconds between polling */
+ case 's':
+ poll_time = atoi(argv[u + 1]);
+ if(poll_time < 0)
+ usage();
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nseconds = atol(argv[u]);
+ if(nseconds <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nseconds <= 0)
+ usage();
+ if(poll_time >= nseconds)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ printf("Parameters:\n");
+ printf("\t# of seconds between polling = %d\n", poll_time);
+ printf("\t# of common symbols to poll = %d\n", ncommon);
+ printf("\t# of random symbols to poll = %d\n", nrandom);
+ printf("\t# of seconds to test = %ld\n", nseconds);
+ } /* end if */
+
+ /* Create randomized set of numbers */
+ random_seed += (int)time(NULL);
+ srandom((unsigned)random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0) {
+ printf("Error generating symbol names!\n");
+ exit(1);
+ } /* end if */
+
+ /* Create datatype for creating datasets */
+ if((symbol_tid = create_symbol_datatype()) < 0)
+ return(-1);
+
+ /* Reading records from datasets */
+ if(read_records(FILENAME, verbose, (unsigned long)nseconds, (unsigned)poll_time, (unsigned)ncommon, (unsigned)nrandom) < 0) {
+ printf("Error reading records from datasets!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ printf("Error releasing symbols!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Closing objects\n");
+
+ /* Close objects created */
+ if(H5Tclose(symbol_tid) < 0) {
+ printf("Error closing symbol datatype!\n");
+ exit(1);
+ } /* end if */
+
+ return(0);
+}
+
diff --git a/test/swmr_remove_writer.c b/test/swmr_remove_writer.c
new file mode 100644
index 0000000..576fae8
--- /dev/null
+++ b/test/swmr_remove_writer.c
@@ -0,0 +1,243 @@
+#include "swmr_common.h"
+
+#define MAX_REMOVE_SIZE 10
+
+static hid_t
+open_skeleton(const char *filename, unsigned verbose)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dim; /* Dataspace dimension */
+ unsigned u, v; /* Local index variable */
+
+ /* Create file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return(-1);
+
+#ifdef QAK
+/* Increase the initial size of the metadata cache */
+{
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+printf("mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+printf("mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+/* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+}
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return(-1);
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return(-1);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return(-1);
+ if((sid = H5Dget_space(symbol_info[u][v].dsid)) < 0)
+ return -1;
+ if(1 != H5Sget_simple_extent_ndims(sid))
+ return -1;
+ if(H5Sget_simple_extent_dims(sid, &dim, NULL) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = (hsize_t)dim;
+ } /* end for */
+
+ return(fid);
+}
+
+static int
+remove_records(hid_t fid, unsigned verbose, unsigned long nshrinks, unsigned long flush_count)
+{
+ unsigned long shrink_to_flush; /* # of removals before flush */
+ unsigned long u, v; /* Local index variables */
+
+ /* Remove records from random datasets, according to frequency distribution */
+ shrink_to_flush = flush_count;
+ for(u = 0; u < nshrinks; u++) {
+ symbol_info_t *symbol; /* Symbol to remove record from */
+ hsize_t remove_size; /* Size to reduce dataset dimension by */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Shrink the dataset's dataspace */
+ remove_size = (hsize_t)random() % MAX_REMOVE_SIZE + 1;
+ if(remove_size > symbol->nrecords)
+ symbol->nrecords = 0;
+ else
+ symbol->nrecords -= remove_size;
+ if(H5Dset_extent(symbol->dsid, &symbol->nrecords) < 0)
+ return(-1);
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ shrink_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == shrink_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return(-1);
+
+ /* Reset flush counter */
+ shrink_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return(-1);
+
+ return(0);
+}
+
+static void
+usage(void)
+{
+ printf("Usage error!\n");
+ printf("Usage: swmr_remove_writer [-q] [-f <# of shrinks between flushing file contents>] <# of shrinks>\n");
+ printf("<# of shrinks between flushing file contents> should be 0 (for no flushing) or between 1 and (<# of shrinks> - 1)\n");
+ printf("Defaults to verbose (no '-q' given) and flushing every 1000 shrinks('-f 1000')\n");
+ exit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ time_t curr_time; /* Current time, for seeding random number generator */
+ long nshrinks = 0; /* # of times to shrink the dataset */
+ long flush_count = 1000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned u; /* Local index variable */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = atol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nshrinks = atol(argv[u]);
+ if(nshrinks <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nshrinks <= 0)
+ usage();
+ if(flush_count >= nshrinks)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ printf("Parameters:\n");
+ printf("\t# of shrinks between flushes = %ld\n", flush_count);
+ printf("\t# of shrinks = %ld\n", nshrinks);
+ } /* end if */
+
+ /* Create randomized set of numbers */
+ curr_time = time(NULL);
+ srandom((unsigned)curr_time);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return(-1);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose)) < 0) {
+ printf("Error opening skeleton file!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Removing records\n");
+
+ /* Remove records from datasets */
+ if(remove_records(fid, verbose, (unsigned long)nshrinks, (unsigned long)flush_count) < 0) {
+ printf("Error removing records from datasets!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ printf("Error releasing symbols!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ printf("Error closing file!\n");
+ exit(1);
+ } /* end if */
+
+ return(0);
+}
+
diff --git a/test/swmr_sparse_reader.c b/test/swmr_sparse_reader.c
new file mode 100644
index 0000000..53bd877
--- /dev/null
+++ b/test/swmr_sparse_reader.c
@@ -0,0 +1,319 @@
+#include "swmr_common.h"
+#include <unistd.h>
+
+#define TIMEOUT 300
+
+static hid_t symbol_tid = (-1);
+
+static int
+check_dataset(hid_t fid, unsigned verbose, const symbol_info_t *symbol, symbol_t *record,
+ hid_t rec_sid)
+{
+ hid_t dsid; /* Dataset ID */
+ hid_t file_sid; /* Dataset's space ID */
+ hsize_t start, count = 1; /* Hyperslab selection values */
+
+ /* Open dataset for symbol */
+ if((dsid = H5Dopen2(fid, symbol->name, H5P_DEFAULT)) < 0)
+ return(-1);
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(dsid)) < 0)
+ return(-1);
+
+ /* Choose the random record in the dataset (will be the same as chosen by
+ * the writer) */
+ start = (hsize_t)random() % symbol->nrecords;;
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, &start, NULL, &count, NULL) < 0)
+ return(-1);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Symbol = '%s', location = %lld\n", symbol->name, (long long)start);
+
+ /* Read record from dataset */
+#ifdef OHDR_DEPS_WORK
+ /* Even with the sequence number attribute and all the flush dependencues,
+ * it is still currently possible for the attribute to be updated before the
+ * index and/or raw data, because the attribute may reside in an object
+ * header chunk afer the first. Until this is fixed, just allow the read
+ * value to be 0. */
+ record->rec_id = (uint64_t)ULLONG_MAX;
+#else /* FILLVAL_WORKS */
+ record->rec_id = (uint64_t)0;
+#endif /* FILLVAL_WORKS */
+ if(H5Dread(dsid, symbol_tid, rec_sid, file_sid, H5P_DEFAULT, record) < 0)
+ return(-1);
+
+ /* Verify record value */
+ if(record->rec_id != start
+#ifndef OHDR_DEPS_WORK
+ && record->rec_id != (uint64_t)0
+#endif
+ ) {
+ printf("Incorrect record value!\n");
+ printf("Symbol = '%s', location = %lld, record->rec_id = %llu\n", symbol->name, (long long)start, (unsigned long long)record->rec_id);
+ return(-1);
+ } /* end if */
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return(-1);
+
+ /* Close dataset for symbol */
+ if(H5Dclose(dsid) < 0)
+ return(-1);
+
+ return(0);
+} /* end check_dataset() */
+
+static int
+read_records(const char *filename, unsigned verbose, unsigned long nrecords,
+ unsigned poll_time, unsigned reopen_count)
+{
+ hid_t fid; /* File ID */
+ hid_t aid; /* Attribute ID */
+ time_t start_time; /* Starting time */
+ hid_t mem_sid; /* Memory dataspace ID */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned seed; /* Seed for random number generator */
+ unsigned iter_to_reopen = reopen_count; /* # of iterations until reopen */
+ unsigned long u; /* Local index variable */
+hid_t fapl;
+fapl = H5Pcreate(H5P_FILE_ACCESS);
+H5Pset_fclose_degree(fapl, H5F_CLOSE_SEMI);
+ /* Emit informational message */
+ if(verbose)
+ printf("Opening file: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return(-1);
+
+ /* Seed the random number generator with the attribute in the file */
+ if((aid = H5Aopen(fid, "seed", H5P_DEFAULT)) < 0)
+ return(-1);
+ if(H5Aread(aid, H5T_NATIVE_UINT, &seed) < 0)
+ return(-1);
+ if(H5Aclose(aid) < 0)
+ return(-1);
+ srandom(seed);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ memset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to read */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return(-1);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Reading records\n");
+
+ /* Get the starting time */
+ start_time = time(NULL);
+
+ /* Read records */
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol = NULL; /* Symbol (dataset) */
+ int can_read; /* Boolean: whether we can read the dataset */
+ htri_t attr_exists; /* Whether the sequence number attribute exists */
+ unsigned long file_u; /* Attribute sequence number (writer's "u") */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Fill in "nrecords" field. Note that this depends on the writer
+ * using the same algorithm and "nrecords" */
+ symbol->nrecords = nrecords / 5;
+
+ /* Wait until we can read the dataset */
+ can_read = 0;
+ do {
+ /* Check if sequence attribute exists */
+ if((attr_exists = H5Aexists_by_name(fid, symbol->name, "seq", H5P_DEFAULT)) < 0)
+ return(-1);
+
+ if(attr_exists) {
+ /* Read sequence number attribute */
+ if((aid = H5Aopen_by_name(fid, symbol->name, "seq", H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return(-1);
+ if(H5Aread(aid, H5T_NATIVE_ULONG, &file_u) < 0)
+ return(-1);
+ if(H5Aclose(aid) < 0)
+ return(-1);
+
+ /* Check if sequence number is at least u - if so, this should
+ * guarantee that this record has been written */
+ if(file_u >= u)
+ break;
+ } /* end if */
+
+ /* Check for timeout */
+ if(time(NULL) >= (time_t)(start_time + (time_t)TIMEOUT)) {
+ printf("Reader timed out\n");
+ return(-1);
+ } /* end if */
+
+ /* Pause */
+ sleep(poll_time);
+
+ /* Reopen the file */
+ if(H5Fclose(fid) < 0)
+ return(-1);
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return(-1);
+ iter_to_reopen = reopen_count;
+ } while(1);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Checking dataset %lu\n", u);
+
+ /* Check dataset */
+ if(check_dataset(fid, verbose, symbol, &record, mem_sid) < 0)
+ return(-1);
+
+ /* Check for reopen */
+ iter_to_reopen--;
+ if(iter_to_reopen == 0) {
+ /* Emit informational message */
+ if(verbose)
+ printf("Reopening file: %s\n", filename);
+
+ /* Reopen the file */
+ if(H5Fclose(fid) < 0)
+ return(-1);
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return(-1);
+ iter_to_reopen = reopen_count;
+ } /* end if */
+ } /* end while */
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ return(-1);
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return(-1);
+
+ return(0);
+} /* end read_records() */
+
+static void
+usage(void)
+{
+ printf("Usage error!\n");
+ printf("Usage: swmr_sparse_reader [-q] [-s <# of seconds to wait for writer>] [-r <# of reads between reopens>] <# of records>\n");
+ printf("Defaults to verbose (no '-q' given), 1 second wait ('-s 1') and 1 read between reopens ('-r 1')\n");
+ printf("Note that the # of records *must* be the same as that supplied to swmr_sparse_writer\n");
+ exit(1);
+} /* end usage() */
+
+int main(int argc, const char *argv[])
+{
+ long nrecords = 0; /* # of records to read */
+ int poll_time = 1; /* # of seconds to sleep when waiting for writer */
+ int reopen_count = 1; /* # of reads between reopens */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned u; /* Local index variables */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of reads between reopens */
+ case 'r':
+ reopen_count = atoi(argv[u + 1]);
+ if(reopen_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* # of seconds between polling */
+ case 's':
+ poll_time = atoi(argv[u + 1]);
+ if(poll_time < 0)
+ usage();
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to read */
+ nrecords = atol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose) {
+ printf("Parameters:\n");
+ printf("\t# of seconds between polling = %d\n", poll_time);
+ printf("\t# of reads between reopens = %d\n", reopen_count);
+ printf("\t# of records to read = %ld\n", nrecords);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0) {
+ printf("Error generating symbol names!\n");
+ exit(1);
+ } /* end if */
+
+ /* Create datatype for creating datasets */
+ if((symbol_tid = create_symbol_datatype()) < 0)
+ return(-1);
+
+ /* Reading records from datasets */
+ if(read_records(FILENAME, verbose, (unsigned long) nrecords, (unsigned)poll_time, (unsigned)reopen_count) < 0) {
+ printf("Error reading records from datasets!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ printf("Error releasing symbols!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Closing objects\n");
+
+ /* Close objects created */
+ if(H5Tclose(symbol_tid) < 0) {
+ printf("Error closing symbol datatype!\n");
+ exit(1);
+ } /* end if */
+
+ return(0);
+}
+
diff --git a/test/swmr_sparse_writer.c b/test/swmr_sparse_writer.c
new file mode 100644
index 0000000..97e2674
--- /dev/null
+++ b/test/swmr_sparse_writer.c
@@ -0,0 +1,345 @@
+#include "swmr_common.h"
+
+#define BUSY_WAIT 100000
+
+static hid_t
+open_skeleton(const char *filename, unsigned verbose)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ hid_t aid; /* Attribute ID */
+ unsigned seed; /* Seed for random number generator */
+ unsigned u, v; /* Local index variable */
+
+ /* Create file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return(-1);
+
+#ifdef QAK
+/* Increase the initial size of the metadata cache */
+{
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+printf("mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+printf("mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+/* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+}
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return(-1);
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return(-1);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Opening datasets\n");
+
+ /* Seed the random number generator with the attribute in the file */
+ if((aid = H5Aopen(fid, "seed", H5P_DEFAULT)) < 0)
+ return(-1);
+ if(H5Aread(aid, H5T_NATIVE_UINT, &seed) < 0)
+ return(-1);
+ if(H5Aclose(aid) < 0)
+ return(-1);
+ srandom(seed);
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return(-1);
+ symbol_info[u][v].nrecords = 0;
+ } /* end for */
+
+ return(fid);
+}
+
+static int
+add_records(hid_t fid, unsigned verbose, unsigned long nrecords, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start, count = 1; /* Hyperslab selection values */
+ symbol_t record; /* The record to add to the dataset */
+ H5AC_cache_config_t mdc_config_orig; /* Original metadata cache configuration */
+ H5AC_cache_config_t mdc_config_cork; /* Corked metadata cache configuration */
+ unsigned long rec_to_flush; /* # of records left to write before flush */
+ volatile int dummy; /* Dummy varialbe for busy sleep */
+ unsigned long u, v; /* Local index variables */
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ memset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to add */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return(-1);
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return(-1);
+
+ /* Get the current metadata cache configuration, and set up the corked
+ * configuration */
+ mdc_config_orig.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ if(H5Fget_mdc_config(fid, &mdc_config_orig) < 0)
+ return(-1);
+ memcpy(&mdc_config_cork, &mdc_config_orig, sizeof(mdc_config_cork));
+ mdc_config_cork.evictions_enabled = FALSE;
+ mdc_config_cork.incr_mode = H5C_incr__off;
+ mdc_config_cork.flash_incr_mode = H5C_flash_incr__off;
+ mdc_config_cork.decr_mode = H5C_decr__off;
+
+ /* Add records to random datasets, according to frequency distribution */
+ rec_to_flush = flush_count;
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+ hid_t aid; /* Attribute ID */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ /*if(H5Fset_mdc_config(fid, &mdc_config_cork) < 0)
+ return(-1);*/
+
+ /* If this is the first time the dataset has been opened, extend it and
+ * add the sequence attribute */
+ if(symbol->nrecords == 0) {
+ symbol->nrecords = nrecords / 5;
+
+ if(H5Dset_extent(symbol->dsid, &symbol->nrecords) < 0)
+ return(-1);
+
+ if((file_sid = H5Screate(H5S_SCALAR)) < 0)
+ return(-1);
+ if((aid = H5Acreate2(symbol->dsid, "seq", H5T_NATIVE_ULONG, file_sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return(-1);
+ if(H5Sclose(file_sid) < 0)
+ return(-1);
+ } /* end if */
+ else if((aid = H5Aopen(symbol->dsid, "seq", H5P_DEFAULT)) < 0)
+ return(-1);
+
+ /* Get the coordinate to write */
+ start = (hsize_t)random() % symbol->nrecords;
+
+ /* Set the record's ID (equal to its position) */
+ record.rec_id = start;
+
+ /* Extend the dataset's dataspace to hold the new record */
+ symbol->nrecords++;
+ if(H5Dset_extent(symbol->dsid, &symbol->nrecords) < 0)
+ return(-1);
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return(-1);
+
+ /* Choose the last record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, &start, NULL, &count, NULL) < 0)
+ return(-1);
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &record) < 0)
+ return(-1);
+
+ /* Write the sequence number attribute. Since we synchronize the random
+ * number seed, the readers will always generate the same sequence of
+ * randomly chosen datasets and offsets. Therefore, and because of the
+ * flush dependencies on the object header, the reader will be
+ * guaranteed to see the written data if the sequence attribute is >=u.
+ */
+ if(H5Awrite(aid, H5T_NATIVE_ULONG, &u) < 0)
+ return(-1);
+
+ /* Close the attribute */
+ if(H5Aclose(aid) < 0)
+ return(-1);
+
+ /* Uncork the metadata cache */
+ /*if(H5Fset_mdc_config(fid, &mdc_config_orig) < 0)
+ return(-1);*/
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return(-1);
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ rec_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == rec_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return(-1);
+
+ /* Reset flush counter */
+ rec_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+
+ /* Busy wait, to let readers catch up */
+ dummy = 0;
+ for(v=0; v<BUSY_WAIT; v++)
+ dummy++;
+ if((unsigned long)dummy != v)
+ return(-1);
+ } /* end for */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return(-1);
+
+ /* Close the datatype */
+ if(H5Tclose(tid) < 0)
+ return(-1);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return(-1);
+
+ return(0);
+}
+
+static void
+usage(void)
+{
+ printf("Usage error!\n");
+ printf("Usage: swmr_sparse_writer [-q] [-f <# of records to write between flushing file contents>] <# of records>\n");
+ printf("<# of records to write between flushing file contents> should be 0 (for no flushing) or between 1 and (<# of records> - 1)\n");
+ printf("Defaults to verbose (no '-q' given) and flushing every 1000 records('-f 1000')\n");
+ exit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */\
+ long nrecords = 0; /* # of records to append */
+ long flush_count = 1000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned u; /* Local index variable */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = atol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nrecords = atol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nrecords <= 0)
+ usage();
+ if(flush_count >= nrecords)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ printf("Parameters:\n");
+ printf("\t# of records between flushes = %ld\n", flush_count);
+ printf("\t# of records to write = %ld\n", nrecords);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return(-1);
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose)) < 0) {
+ printf("Error opening skeleton file!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Adding records\n");
+
+ /* Append records to datasets */
+ if(add_records(fid, verbose, (unsigned long)nrecords, (unsigned long)flush_count) < 0) {
+ printf("Error appending records to datasets!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ printf("Error releasing symbols!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ printf("Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ printf("Error closing file!\n");
+ exit(1);
+ } /* end if */
+
+ return(0);
+}
+
diff --git a/test/swmr_writer.c b/test/swmr_writer.c
index 0949872..c584f60 100644
--- a/test/swmr_writer.c
+++ b/test/swmr_writer.c
@@ -61,6 +61,8 @@ add_records(hid_t fid, unsigned verbose, unsigned long nrecords, unsigned long f
hid_t mem_sid; /* Memory dataspace ID */
hsize_t start, count = 1; /* Hyperslab selection values */
symbol_t record; /* The record to add to the dataset */
+ H5AC_cache_config_t mdc_config_orig; /* Original metadata cache configuration */
+ H5AC_cache_config_t mdc_config_cork; /* Corked metadata cache configuration */
unsigned long rec_to_flush; /* # of records left to write before flush */
unsigned long u, v; /* Local index variables */
@@ -76,6 +78,17 @@ add_records(hid_t fid, unsigned verbose, unsigned long nrecords, unsigned long f
if((tid = create_symbol_datatype()) < 0)
return(-1);
+ /* Get the current metadata cache configuration, and set up the corked
+ * configuration */
+ mdc_config_orig.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ if(H5Fget_mdc_config(fid, &mdc_config_orig) < 0)
+ return(-1);
+ memcpy(&mdc_config_cork, &mdc_config_orig, sizeof(mdc_config_cork));
+ mdc_config_cork.evictions_enabled = FALSE;
+ mdc_config_cork.incr_mode = H5C_incr__off;
+ mdc_config_cork.flash_incr_mode = H5C_flash_incr__off;
+ mdc_config_cork.decr_mode = H5C_decr__off;
+
/* Add records to random datasets, according to frequency distribution */
rec_to_flush = flush_count;
for(u = 0; u < nrecords; u++) {
@@ -86,11 +99,16 @@ add_records(hid_t fid, unsigned verbose, unsigned long nrecords, unsigned long f
symbol = choose_dataset();
/* Set the record's ID (equal to its position) */
- record.rec_id = symbol->nrecords;;
+ record.rec_id = symbol->nrecords;
/* Get the coordinate to write */
start = symbol->nrecords;
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ /*if(H5Fset_mdc_config(fid, &mdc_config_cork) < 0)
+ return(-1);*/
+
/* Extend the dataset's dataspace to hold the new record */
symbol->nrecords++;
if(H5Dset_extent(symbol->dsid, &symbol->nrecords) < 0)
@@ -108,6 +126,10 @@ add_records(hid_t fid, unsigned verbose, unsigned long nrecords, unsigned long f
if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &record) < 0)
return(-1);
+ /* Uncork the metadata cache */
+ /*if(H5Fset_mdc_config(fid, &mdc_config_orig) < 0)
+ return(-1);*/
+
/* Close the dataset's dataspace */
if(H5Sclose(file_sid) < 0)
return(-1);
diff --git a/test/testswmr.sh b/test/testswmr.sh
index c0d2e08..090a131 100755
--- a/test/testswmr.sh
+++ b/test/testswmr.sh
@@ -24,8 +24,13 @@
###############################################################################
Nreaders=5 # number of readers to launch
+Nrdrs_spa=3 # number of sparse readers to launch
Nrecords=200000 # number of records to write
-Nsecs=5 # number of seconds per read interval
+Nrecs_rem=40000 # number of times to shrink
+Nrecs_spa=20000 # number of records to write in the sparse test
+Nsecs_add=5 # number of seconds per read interval
+Nsecs_rem=3 # number of seconds per read interval
+Nsecs_addrem=8 # number of seconds per read interval
nerrors=0
###############################################################################
@@ -63,49 +68,236 @@ while [ $# -gt 0 ]; do
esac
done
-# Launch the Generator
-echo launch the swmr_generator
-./swmr_generator
-if test $? -ne 0; then
- echo generator had error
- nerrors=`expr $nerrors + 1`
-fi
+# Loop over index types
+for index_type in "-i b1" "-i ea"
+do
+ # Try with and without compression
+ for compress in "" "-c 1"
+ do
+ ###############################################################################
+ ## Writer test - test expanding the dataset
+ ###############################################################################
-# Launch the Writer
-echo launch the swmr_writer
-./swmr_writer $Nrecords &
-pid_writer=$!
-$DPRINT pid_writer=$pid_writer
-
-# Launch the Readers
-n=0
-echo launch $Nreaders swmr_readers
-while [ $n -lt $Nreaders ]; do
- ./swmr_reader -r $n $Nsecs &
- pid_readers="$pid_readers $!"
- n=`expr $n + 1`
-done
-$DPRINT pid_readers=$pid_readers
-$IFDEBUG ps
-
-# Collect exit code of the readers first because they usually finish
-# before the writer.
-for xpid in $pid_readers; do
- $DPRINT checked reader $xpid
- wait $xpid
- if test $? -ne 0; then
- echo reader had error
- nerrors=`expr $nerrors + 1`
- fi
-done
+ # Launch the Generator
+ echo launch the swmr_generator
+ ./swmr_generator $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
-# Collect exit code of the writer
-$DPRINT checked writer $pid_writer
-wait $pid_writer
-if test $? -ne 0; then
- echo writer had error
- nerrors=`expr $nerrors + 1`
-fi
+ # Launch the Writer
+ echo launch the swmr_writer
+ ./swmr_writer $Nrecords &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Launch the Readers
+ n=0
+ echo launch $Nreaders swmr_readers
+ pid_readers=""
+ while [ $n -lt $Nreaders ]; do
+ ./swmr_reader -r $n $Nsecs_add &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+ fi
+
+ ###############################################################################
+ ## Remove test - test shrinking the dataset
+ ###############################################################################
+
+ # Launch the Remove Writer
+ echo launch the swmr_remove_writer
+ ./swmr_remove_writer $Nrecs_rem &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Launch the Remove Readers
+ n=0
+ pid_readers=""
+ echo launch $Nreaders swmr_remove_readers
+ while [ $n -lt $Nreaders ]; do
+ ./swmr_remove_reader -r $n $Nsecs_rem &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+ fi
+
+ ###############################################################################
+ ## Add/remove test - randomly grow or shrink the dataset
+ ###############################################################################
+
+ # Launch the Generator
+ echo launch the swmr_generator
+ ./swmr_generator $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Launch the Writer (not in parallel - just to rebuild the datasets)
+ echo launch the swmr_writer
+ ./swmr_writer $Nrecords
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Launch the Add/Remove Writer
+ echo launch the swmr_addrem_writer
+ ./swmr_addrem_writer $Nrecords &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Launch the Add/Remove Readers
+ n=0
+ pid_readers=""
+ echo launch $Nreaders swmr_remove_readers
+ while [ $n -lt $Nreaders ]; do
+ ./swmr_remove_reader -r $n $Nsecs_addrem &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+ fi
+
+ ###############################################################################
+ ## Sparse writer test - test writing to random locations in the dataset
+ ###############################################################################
+
+ # Launch the Generator
+ echo launch the swmr_generator
+ ./swmr_generator $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Launch the Sparse writer
+ echo launch the swmr_sparse_writer
+ nice -n 20 ./swmr_sparse_writer -q $Nrecs_spa &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Launch the Sparse readers
+ n=0
+ pid_readers=""
+ echo launch $Nrdrs_spa swmr_sparse_readers
+ while [ $n -lt $Nrdrs_spa ]; do
+ ./swmr_sparse_reader -q $Nrecs_spa &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Collect exit code of the readers
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+ fi
+ done
+done
###############################################################################
## Report and exit