diff options
Diffstat (limited to 'src')
60 files changed, 994 insertions, 575 deletions
@@ -1624,8 +1624,7 @@ H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr, - const void *udata1, - void *udata2, + void *udata, H5AC_protect_t rw) { /* char * fcn_name = "H5AC_protect"; */ @@ -1697,8 +1696,7 @@ H5AC_protect(H5F_t *f, H5AC_noblock_dxpl_id, type, addr, - udata1, - udata2, + udata, protect_flags); if ( thing == NULL ) { diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h index 43a512d..6d79608 100644 --- a/src/H5ACprivate.h +++ b/src/H5ACprivate.h @@ -308,7 +308,7 @@ H5_DLL herr_t H5AC_set(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr, void *thing, unsigned int flags); H5_DLL herr_t H5AC_pin_protected_entry(void *thing); H5_DLL void * H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, - haddr_t addr, const void *udata1, void *udata2, + haddr_t addr, void *udata, H5AC_protect_t rw); H5_DLL herr_t H5AC_resize_pinned_entry(void *thing, size_t new_size); H5_DLL herr_t H5AC_unpin_entry(void *thing); @@ -258,6 +258,7 @@ done: (void)H5MF_xfree(f, H5FD_MEM_BTREE, dxpl_id, *addr_p, (hsize_t)shared->sizeof_rnode); } /* end if */ if(bt) + /* Destroy B-tree node */ if(H5B_node_dest(bt) < 0) HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree node") } /* end if */ @@ -293,7 +294,9 @@ htri_t H5B_find(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void *udata) { H5B_t *bt = NULL; + H5RC_t *rc_shared; /* Ref-counted shared info */ H5B_shared_t *shared; /* Pointer to shared B-tree info */ + H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */ unsigned idx = 0, lt = 0, rt; /* Final, left & right key indices */ int cmp = 1; /* Key comparison value */ htri_t ret_value; /* Return value */ @@ -310,14 +313,21 @@ H5B_find(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void *u HDassert(type->found); HDassert(H5F_addr_defined(addr)); + /* Get shared info for B-tree */ + if(NULL == (rc_shared = (type->get_shared)(f, udata))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "can't retrieve B-tree's shared ref. count object") + shared = (H5B_shared_t *)H5RC_GET_OBJ(rc_shared); + HDassert(shared); + /* * Perform a binary search to locate the child which contains * the thing for which we're searching. */ - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, udata, H5AC_READ))) + cache_udata.f = f; + cache_udata.type = type; + cache_udata.rc_shared = rc_shared; + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node") - shared = (H5B_shared_t *)H5RC_GET_OBJ(bt->rc_shared); - HDassert(shared); rt = bt->nchildren; while(lt < rt && cmp) { @@ -382,6 +392,7 @@ H5B_split(H5F_t *f, hid_t dxpl_id, H5B_t *old_bt, unsigned *old_bt_flags, { H5P_genplist_t *dx_plist; /* Data transfer property list */ H5B_shared_t *shared; /* Pointer to shared B-tree info */ + H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */ unsigned new_bt_flags = H5AC__NO_FLAGS_SET; H5B_t *new_bt = NULL; unsigned nleft, nright; /* Number of keys in left & right halves */ @@ -461,7 +472,10 @@ H5B_split(H5F_t *f, hid_t dxpl_id, H5B_t *old_bt, unsigned *old_bt_flags, */ if(H5B_create(f, dxpl_id, shared->type, udata, new_addr_p/*out*/) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create B-tree") - if(NULL == (new_bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, *new_addr_p, shared->type, udata, H5AC_WRITE))) + cache_udata.f = f; + cache_udata.type = shared->type; + cache_udata.rc_shared = old_bt->rc_shared; + if(NULL == (new_bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, *new_addr_p, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree") new_bt->level = old_bt->level; @@ -500,8 +514,12 @@ H5B_split(H5F_t *f, hid_t dxpl_id, H5B_t *old_bt, unsigned *old_bt_flags, if(H5F_addr_defined(old_bt->right)) { H5B_t *tmp_bt; + H5B_cache_ud_t cache_udata2; /* User-data for metadata cache callback */ - if(NULL == (tmp_bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, old_bt->right, shared->type, udata, H5AC_WRITE))) + cache_udata2.f = f; + cache_udata2.type = shared->type; + cache_udata2.rc_shared = old_bt->rc_shared; + if(NULL == (tmp_bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, old_bt->right, &cache_udata2, H5AC_WRITE))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load right sibling") tmp_bt->left = *new_addr_p; @@ -551,7 +569,9 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, unsigned level; H5B_t *bt; H5B_t *new_bt; /* Copy of B-tree info */ + H5RC_t *rc_shared; /* Ref-counted shared info */ H5B_shared_t *shared; /* Pointer to shared B-tree info */ + H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */ H5B_ins_t my_ins = H5B_INS_ERROR; herr_t ret_value = SUCCEED; @@ -570,11 +590,18 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, HGOTO_DONE(SUCCEED) HDassert(H5B_INS_RIGHT == my_ins); + /* Get shared info for B-tree */ + if(NULL == (rc_shared = (type->get_shared)(f, udata))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "can't retrieve B-tree's shared ref. count object") + shared = (H5B_shared_t *)H5RC_GET_OBJ(rc_shared); + HDassert(shared); + /* the current root */ - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, udata, H5AC_READ))) + cache_udata.f = f; + cache_udata.type = type; + cache_udata.rc_shared = rc_shared; + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to locate root of B-tree") - shared=(H5B_shared_t *)H5RC_GET_OBJ(bt->rc_shared); - HDassert(shared); level = bt->level; @@ -586,7 +613,7 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, bt = NULL; /* the new node */ - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child, type, udata, H5AC_READ))) + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load new node") if(!rt_key_changed) @@ -606,7 +633,7 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "unable to allocate file space to move root") /* update the new child's left pointer */ - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child, type, udata, H5AC_WRITE))) + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load new child") bt->left = old_root; @@ -620,7 +647,7 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, * at the new location -QAK */ /* Bring the old root into the cache if it's not already */ - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, udata, H5AC_WRITE))) + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load new child") /* Make certain the old root info is marked as dirty before moving it, */ @@ -785,7 +812,9 @@ H5B_insert_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type { unsigned bt_flags = H5AC__NO_FLAGS_SET, twin_flags = H5AC__NO_FLAGS_SET; H5B_t *bt = NULL, *twin = NULL; + H5RC_t *rc_shared; /* Ref-counted shared info */ H5B_shared_t *shared; /* Pointer to shared B-tree info */ + H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */ unsigned lt = 0, idx = 0, rt; /* Left, final & right index values */ int cmp = -1; /* Key comparison value */ haddr_t child_addr = HADDR_UNDEF; @@ -812,15 +841,22 @@ H5B_insert_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type *lt_key_changed = FALSE; *rt_key_changed = FALSE; + /* Get shared info for B-tree */ + if(NULL == (rc_shared = (type->get_shared)(f, udata))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, H5B_INS_ERROR, "can't retrieve B-tree's shared ref. count object") + shared = (H5B_shared_t *)H5RC_GET_OBJ(rc_shared); + HDassert(shared); + /* * Use a binary search to find the child that will receive the new * data. When the search completes IDX points to the child that * should get the new data. */ - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, udata, H5AC_WRITE))) + cache_udata.f = f; + cache_udata.type = type; + cache_udata.rc_shared = rc_shared; + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node") - shared=(H5B_shared_t *)H5RC_GET_OBJ(bt->rc_shared); - HDassert(shared); rt = bt->nchildren; while(lt < rt && cmp) { @@ -1004,7 +1040,7 @@ H5B_insert_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type if(bt->nchildren == shared->two_k) { if(H5B_split(f, dxpl_id, bt, &bt_flags, addr, idx, udata, new_node_p/*out*/) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, H5B_INS_ERROR, "unable to split node") - if(NULL == (twin = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, *new_node_p, type, udata, H5AC_WRITE))) + if(NULL == (twin = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, *new_node_p, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node") if(idx < bt->nchildren) { tmp_bt = bt; @@ -1076,6 +1112,9 @@ H5B_iterate_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t add H5B_operator_t op, void *udata) { H5B_t *bt = NULL; /* Pointer to current B-tree node */ + H5RC_t *rc_shared; /* Ref-counted shared info */ + H5B_shared_t *shared; /* Pointer to shared B-tree info */ + H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */ uint8_t *native = NULL; /* Array of keys in native format */ haddr_t *child = NULL; /* Array of child pointers */ herr_t ret_value; /* Return value */ @@ -1091,8 +1130,17 @@ H5B_iterate_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t add HDassert(op); HDassert(udata); + /* Get shared info for B-tree */ + if(NULL == (rc_shared = (type->get_shared)(f, udata))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "can't retrieve B-tree's shared ref. count object") + shared = (H5B_shared_t *)H5RC_GET_OBJ(rc_shared); + HDassert(shared); + /* Protect the initial/current node */ - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, udata, H5AC_READ))) + cache_udata.f = f; + cache_udata.type = type; + cache_udata.rc_shared = rc_shared; + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to load B-tree node") if(bt->level > 0) { @@ -1108,14 +1156,9 @@ H5B_iterate_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t add HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, H5_ITER_ERROR, "unable to list B-tree node") } /* end if */ else { - H5B_shared_t *shared; /* Pointer to shared B-tree info */ unsigned nchildren; /* Number of child pointers */ haddr_t next_addr; /* Address of next node to the right */ - /* Get the shared B-tree information */ - shared = (H5B_shared_t *)H5RC_GET_OBJ(bt->rc_shared); - HDassert(shared); - /* Allocate space for a copy of the native records & child pointers */ if(NULL == (native = H5FL_BLK_MALLOC(native_block, shared->sizeof_keys))) HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, H5_ITER_ERROR, "memory allocation failed for shared B-tree native records") @@ -1161,7 +1204,7 @@ H5B_iterate_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t add if(H5F_addr_defined(next_addr)) { /* Protect the next node to the right */ addr = next_addr; - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, udata, H5AC_READ))) + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5_ITER_ERROR, "B-tree node") /* Cache information from this node */ @@ -1267,7 +1310,9 @@ H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type { H5B_t *bt = NULL, *sibling = NULL; unsigned bt_flags = H5AC__NO_FLAGS_SET; + H5RC_t *rc_shared; /* Ref-counted shared info */ H5B_shared_t *shared; /* Pointer to shared B-tree info */ + H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */ unsigned idx = 0, lt = 0, rt; /* Final, left & right indices */ int cmp = 1; /* Key comparison value */ H5B_ins_t ret_value = H5B_INS_ERROR; @@ -1283,14 +1328,21 @@ H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type HDassert(udata); HDassert(rt_key && rt_key_changed); + /* Get shared info for B-tree */ + if(NULL == (rc_shared = (type->get_shared)(f, udata))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, H5B_INS_ERROR, "can't retrieve B-tree's shared ref. count object") + shared = (H5B_shared_t *)H5RC_GET_OBJ(rc_shared); + HDassert(shared); + /* * Perform a binary search to locate the child which contains the thing * for which we're searching. */ - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, udata, H5AC_WRITE))) + cache_udata.f = f; + cache_udata.type = type; + cache_udata.rc_shared = rc_shared; + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load B-tree node") - shared=(H5B_shared_t *)H5RC_GET_OBJ(bt->rc_shared); - HDassert(shared); rt = bt->nchildren; while(lt < rt && cmp) { @@ -1389,7 +1441,7 @@ H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type * "critical" for any child in its node to maintain this * consistency (and avoid breaking key/child consistency) */ if(H5F_addr_defined(bt->left)) { - if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->left, type, udata, H5AC_WRITE))) + if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->left, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node from tree") /* Copy right-most key from deleted node to right-most key @@ -1406,7 +1458,7 @@ H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type sibling = NULL; /* Make certain future references will be caught */ } /* end if */ if(H5F_addr_defined(bt->right)) { - if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->right, type, udata, H5AC_WRITE))) + if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->right, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to unlink node from tree") /* Copy left-most key from deleted node to left-most key in @@ -1526,15 +1578,13 @@ H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type HDassert(level > 0); /* Update the rightmost key in the left sibling */ - if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, - bt->left, type, udata, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to unlink node from tree") + if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->left, &cache_udata, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to protect node") HDmemcpy(H5B_NKEY(sibling, shared, sibling->nchildren), H5B_NKEY(bt, shared, 0), type->sizeof_nkey); - if(H5AC_unprotect(f, dxpl_id, H5AC_BT, bt->left, sibling, - H5AC__DIRTIED_FLAG) < 0) + if(H5AC_unprotect(f, dxpl_id, H5AC_BT, bt->left, sibling, H5AC__DIRTIED_FLAG) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to release node from tree") sibling = NULL; /* Make certain future references will be caught */ } /* end if */ @@ -1543,15 +1593,13 @@ H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type HDassert(level > 0); /* Update the lefttmost key in the right sibling */ - if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, - bt->right, type, udata, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to unlink node from tree") + if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->right, &cache_udata, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to protect node") HDmemcpy(H5B_NKEY(sibling, shared, 0), H5B_NKEY(bt, shared, bt->nchildren), type->sizeof_nkey); - if(H5AC_unprotect(f, dxpl_id, H5AC_BT, bt->right, sibling, - H5AC__DIRTIED_FLAG) < 0) + if(H5AC_unprotect(f, dxpl_id, H5AC_BT, bt->right, sibling, H5AC__DIRTIED_FLAG) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to release node from tree") sibling = NULL; /* Make certain future references will be caught */ } /* end else */ @@ -1561,7 +1609,7 @@ done: HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to release node") FUNC_LEAVE_NOAPI(ret_value) -} +} /* end H5B_remove_helper() */ /*------------------------------------------------------------------------- @@ -1589,7 +1637,7 @@ H5B_remove(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void uint8_t *rt_key = (uint8_t*)_rt_key; /*right key*/ hbool_t lt_key_changed = FALSE; /*left key changed?*/ hbool_t rt_key_changed = FALSE; /*right key changed?*/ - herr_t ret_value=SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5B_remove, FAIL) @@ -1629,7 +1677,9 @@ herr_t H5B_delete(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void *udata) { H5B_t *bt = NULL; /* B-tree node being operated on */ + H5RC_t *rc_shared; /* Ref-counted shared info */ H5B_shared_t *shared; /* Pointer to shared B-tree info */ + H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ @@ -1640,11 +1690,18 @@ H5B_delete(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void HDassert(type); HDassert(H5F_addr_defined(addr)); + /* Get shared info for B-tree */ + if(NULL == (rc_shared = (type->get_shared)(f, udata))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "can't retrieve B-tree's shared ref. count object") + shared = (H5B_shared_t *)H5RC_GET_OBJ(rc_shared); + HDassert(shared); + /* Lock this B-tree node into memory for now */ - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, udata, H5AC_WRITE))) + cache_udata.f = f; + cache_udata.type = type; + cache_udata.rc_shared = rc_shared; + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node") - shared=(H5B_shared_t *)H5RC_GET_OBJ(bt->rc_shared); - HDassert(shared); /* Iterate over all children in tree, deleting them */ if(bt->level > 0) { @@ -1871,7 +1928,9 @@ H5B_get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t ad const H5B_info_ud_t *info_udata) { H5B_t *bt = NULL; /* Pointer to current B-tree node */ + H5RC_t *rc_shared; /* Ref-counted shared info */ H5B_shared_t *shared; /* Pointer to shared B-tree info */ + H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */ unsigned level; /* Node level */ size_t sizeof_rnode; /* Size of raw (disk) node */ haddr_t next_addr; /* Address of next node to the right */ @@ -1890,17 +1949,22 @@ H5B_get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t ad HDassert(info_udata->bt_info); HDassert(info_udata->udata); - /* Protect the initial/current node */ - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, info_udata->udata, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node") - - /* Get the shared B-tree information */ - shared = (H5B_shared_t *)H5RC_GET_OBJ(bt->rc_shared); + /* Get shared info for B-tree */ + if(NULL == (rc_shared = (type->get_shared)(f, info_udata->udata))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "can't retrieve B-tree's shared ref. count object") + shared = (H5B_shared_t *)H5RC_GET_OBJ(rc_shared); HDassert(shared); /* Get the raw node size for iteration */ sizeof_rnode = shared->sizeof_rnode; + /* Protect the initial/current node */ + cache_udata.f = f; + cache_udata.type = type; + cache_udata.rc_shared = rc_shared; + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node") + /* Cache information from this node */ left_child = bt->child[0]; next_addr = bt->right; @@ -1922,7 +1986,7 @@ H5B_get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t ad while(H5F_addr_defined(next_addr)) { /* Protect the next node to the right */ addr = next_addr; - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, info_udata->udata, H5AC_READ))) + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "B-tree node") /* Cache information from this node */ @@ -2008,7 +2072,7 @@ done: /*------------------------------------------------------------------------- * Function: H5B_valid * - * Purpose: Attempt to load a b-tree node. + * Purpose: Attempt to load a B-tree node. * * Return: Non-negative on success/Negative on failure * @@ -2020,8 +2084,11 @@ done: htri_t H5B_valid(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr) { - H5B_t *bt = NULL; /* The btree */ - htri_t ret_value = SUCCEED; /* Return value */ + H5B_t *bt = NULL; /* The B-tree */ + H5RC_t *rc_shared; /* Ref-counted shared info */ + H5B_shared_t *shared; /* Pointer to shared B-tree info */ + H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */ + htri_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5B_valid, FAIL) @@ -2034,9 +2101,20 @@ H5B_valid(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr) if(!H5F_addr_defined(addr)) HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, FAIL, "address is undefined") - /* Protect the node */ - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, NULL, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node") + /* Get shared info for B-tree */ + if(NULL == (rc_shared = (type->get_shared)(f, NULL))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "can't retrieve B-tree's shared ref. count object") + shared = (H5B_shared_t *)H5RC_GET_OBJ(rc_shared); + HDassert(shared); + + /* + * Load the tree node. + */ + cache_udata.f = f; + cache_udata.type = type; + cache_udata.rc_shared = rc_shared; + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree node") done: /* Release the node */ @@ -2046,6 +2124,7 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5B_valid() */ + /*------------------------------------------------------------------------- * Function: H5B_node_dest * @@ -130,6 +130,7 @@ H5B2_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam, void *ctx_udat { H5B2_t *bt2 = NULL; /* Pointer to the B-tree */ H5B2_hdr_t *hdr = NULL; /* Pointer to the B-tree header */ + H5B2_hdr_cache_ud_t cache_udata; /* User-data for callback */ haddr_t hdr_addr; /* B-tree header address */ H5B2_t *ret_value; /* Return value */ @@ -153,7 +154,9 @@ H5B2_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam, void *ctx_udat HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "memory allocation failed for v2 B-tree info") /* Look up the B-tree header */ - if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, NULL, ctx_udata, H5AC_WRITE))) + cache_udata.f = f; + cache_udata.ctx_udata = ctx_udata; + if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to load B-tree header") /* Point v2 B-tree wrapper at header and bump it's ref count */ @@ -201,6 +204,7 @@ H5B2_open(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata) { H5B2_t *bt2 = NULL; /* Pointer to the B-tree */ H5B2_hdr_t *hdr = NULL; /* Pointer to the B-tree header */ + H5B2_hdr_cache_ud_t cache_udata; /* User-data for callback */ H5B2_t *ret_value; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_open) @@ -210,7 +214,9 @@ H5B2_open(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata) HDassert(H5F_addr_defined(addr)); /* Look up the B-tree header */ - if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, NULL, ctx_udata, H5AC_READ))) + cache_udata.f = f; + cache_udata.ctx_udata = ctx_udata; + if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to load B-tree header") /* Check for pending heap deletion */ @@ -494,8 +500,8 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op, H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */ /* Lock B-tree leaf node */ - if(NULL == (leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr.addr, &(curr_node_ptr.node_nrec), hdr, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Locate record */ cmp = H5B2_locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx); @@ -665,8 +671,8 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx, H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */ /* Lock B-tree leaf node */ - if(NULL == (leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr.addr, &(curr_node_ptr.node_nrec), hdr, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Sanity check index */ HDassert(idx < leaf->nrec); @@ -1048,13 +1054,13 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op, } /* end while */ { - unsigned leaf_flags = H5AC__NO_FLAGS_SET; H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */ + unsigned leaf_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting the leaf node */ hbool_t changed = FALSE;/* Whether the 'modify' callback changed the record */ /* Lock B-tree leaf node */ - if(NULL == (leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr.addr, &(curr_node_ptr.node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Locate record */ cmp = H5B2_locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx); @@ -1166,8 +1172,8 @@ H5B2_close(H5B2_t *bt2, hid_t dxpl_id) /* Lock the v2 B-tree header into memory */ /* (OK to pass in NULL for callback context, since we know the header must be in the cache) */ - if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(bt2->f, dxpl_id, H5AC_BT2_HDR, bt2_addr, NULL, NULL, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load v2 B-tree header") + if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(bt2->f, dxpl_id, H5AC_BT2_HDR, bt2_addr, NULL, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect v2 B-tree header") /* Set the shared v2 B-tree header's file context for this operation */ hdr->f = bt2->f; @@ -1228,6 +1234,7 @@ H5B2_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata, H5B2_remove_t op, void *op_data) { H5B2_hdr_t *hdr = NULL; /* Pointer to the B-tree header */ + H5B2_hdr_cache_ud_t cache_udata; /* User-data for callback */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5B2_delete, FAIL) @@ -1240,7 +1247,9 @@ H5B2_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata, #ifdef QAK HDfprintf(stderr, "%s: addr = %a\n", FUNC, addr); #endif /* QAK */ - if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, NULL, ctx_udata, H5AC_WRITE))) + cache_udata.f = f; + cache_udata.ctx_udata = ctx_udata; + if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect v2 B-tree header") /* Remember the callback & context for later */ diff --git a/src/H5B2cache.c b/src/H5B2cache.c index 16c0da3..b485afb 100644 --- a/src/H5B2cache.c +++ b/src/H5B2cache.c @@ -69,17 +69,17 @@ /********************/ /* Metadata cache callbacks */ -static H5B2_hdr_t *H5B2_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_type, void *udata); +static H5B2_hdr_t *H5B2_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5B2_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B2_hdr_t *hdr, unsigned UNUSED * flags_ptr); static herr_t H5B2_cache_hdr_dest(H5F_t *f, H5B2_hdr_t *hdr); static herr_t H5B2_cache_hdr_clear(H5F_t *f, H5B2_hdr_t *hdr, hbool_t destroy); static herr_t H5B2_cache_hdr_size(const H5F_t *f, const H5B2_hdr_t *hdr, size_t *size_ptr); -static H5B2_internal_t *H5B2_cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_nrec, void *udata2); +static H5B2_internal_t *H5B2_cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5B2_cache_internal_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B2_internal_t *i, unsigned UNUSED * flags_ptr); static herr_t H5B2_cache_internal_dest(H5F_t *f, H5B2_internal_t *internal); static herr_t H5B2_cache_internal_clear(H5F_t *f, H5B2_internal_t *i, hbool_t destroy); static herr_t H5B2_cache_internal_size(const H5F_t *f, const H5B2_internal_t *i, size_t *size_ptr); -static H5B2_leaf_t *H5B2_cache_leaf_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_nrec, void *_hdr); +static H5B2_leaf_t *H5B2_cache_leaf_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5B2_cache_leaf_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B2_leaf_t *l, unsigned UNUSED * flags_ptr); static herr_t H5B2_cache_leaf_dest(H5F_t *f, H5B2_leaf_t *leaf); static herr_t H5B2_cache_leaf_clear(H5F_t *f, H5B2_leaf_t *l, hbool_t destroy); @@ -147,13 +147,13 @@ const H5AC_class_t H5AC_BT2_LEAF[1] = {{ *------------------------------------------------------------------------- */ static H5B2_hdr_t * -H5B2_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *udata1, - void *ctx_udata) +H5B2_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { + H5B2_hdr_t *hdr = NULL; /* B-tree header */ + H5B2_hdr_cache_ud_t *udata = (H5B2_hdr_cache_ud_t *)_udata; H5B2_create_t cparam; /* B-tree creation parameters */ H5B2_subid_t id; /* ID of B-tree class, as found in file */ uint16_t depth; /* Depth of B-tree */ - H5B2_hdr_t *hdr = NULL; /* B-tree header */ size_t size; /* Header size */ uint32_t stored_chksum; /* Stored metadata checksum value */ uint32_t computed_chksum; /* Computed metadata checksum value */ @@ -168,6 +168,7 @@ H5B2_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *ud /* Check arguments */ HDassert(f); HDassert(H5F_addr_defined(addr)); + HDassert(udata); /* Allocate new B-tree header and reset cache info */ if(NULL == (hdr = H5B2_hdr_alloc(f))) @@ -203,7 +204,7 @@ H5B2_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *ud /* B-tree class */ id = (H5B2_subid_t)*p++; if(id >= H5B2_NUM_BTREE_ID) - HGOTO_ERROR(H5E_BTREE, H5E_BADTYPE, NULL, "invalid B-tree type") + HGOTO_ERROR(H5E_BTREE, H5E_BADTYPE, NULL, "incorrect B-tree type") /* Node size (in bytes) */ UINT32DECODE(p, cparam.node_size); @@ -219,9 +220,9 @@ H5B2_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *ud cparam.merge_percent = *p++; /* Root node pointer */ - H5F_addr_decode(f, (const uint8_t **)&p, &(hdr->root.addr)); + H5F_addr_decode(udata->f, (const uint8_t **)&p, &(hdr->root.addr)); UINT16DECODE(p, hdr->root.node_nrec); - H5F_DECODE_LENGTH(f, p, hdr->root.all_nrec); + H5F_DECODE_LENGTH(udata->f, p, hdr->root.all_nrec); /* Metadata checksum */ UINT32DECODE(p, stored_chksum); @@ -238,7 +239,7 @@ H5B2_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *ud /* Initialize B-tree header info */ cparam.cls = H5B2_client_class_g[id]; - if(H5B2_hdr_init(f, hdr, &cparam, ctx_udata, depth) < 0) + if(H5B2_hdr_init(udata->f, hdr, &cparam, udata->ctx_udata, depth) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, NULL, "can't initialize B-tree header info") /* Set the B-tree header's address */ @@ -494,9 +495,9 @@ H5B2_cache_hdr_size(const H5F_t UNUSED *f, const H5B2_hdr_t *hdr, size_t *size_p *------------------------------------------------------------------------- */ static H5B2_internal_t * -H5B2_cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_udata, void UNUSED *udata2) +H5B2_cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { - const H5B2_int_load_ud1_t *udata = (const H5B2_int_load_ud1_t *)_udata; /* Pointer to user data */ + H5B2_internal_cache_ud_t *udata = (H5B2_internal_cache_ud_t *)_udata; /* Pointer to user data */ H5B2_internal_t *internal = NULL; /* Internal node read */ const uint8_t *p; /* Pointer into raw data buffer */ uint8_t *native; /* Pointer to native record info */ @@ -575,7 +576,7 @@ H5B2_cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_uda int_node_ptr = internal->node_ptrs; for(u = 0; u < (unsigned)(internal->nrec + 1); u++) { /* Decode node pointer */ - H5F_addr_decode(f, (const uint8_t **)&p, &(int_node_ptr->addr)); + H5F_addr_decode(udata->f, (const uint8_t **)&p, &(int_node_ptr->addr)); UINT64DECODE_VAR(p, int_node_ptr->node_nrec, udata->hdr->max_nrec_size); if(udata->depth > 1) UINT64DECODE_VAR(p, int_node_ptr->all_nrec, udata->hdr->node_info[udata->depth - 1].cum_max_nrec_size) @@ -837,10 +838,9 @@ H5B2_cache_internal_size(const H5F_t UNUSED *f, const H5B2_internal_t *internal, *------------------------------------------------------------------------- */ static H5B2_leaf_t * -H5B2_cache_leaf_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_nrec, void *_hdr) +H5B2_cache_leaf_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { - const uint16_t *nrec = (const uint16_t *)_nrec; - H5B2_hdr_t *hdr = (H5B2_hdr_t *)_hdr; /* B-tree header information */ + H5B2_leaf_cache_ud_t *udata = (H5B2_leaf_cache_ud_t *)_udata; H5B2_leaf_t *leaf = NULL; /* Pointer to lead node loaded */ const uint8_t *p; /* Pointer into raw data buffer */ uint8_t *native; /* Pointer to native keys */ @@ -854,7 +854,7 @@ H5B2_cache_leaf_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_nrec, v /* Check arguments */ HDassert(f); HDassert(H5F_addr_defined(addr)); - HDassert(hdr); + HDassert(udata); /* Allocate new leaf node and reset cache info */ if(NULL == (leaf = H5FL_MALLOC(H5B2_leaf_t))) @@ -862,20 +862,20 @@ H5B2_cache_leaf_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_nrec, v HDmemset(&leaf->cache_info, 0, sizeof(H5AC_info_t)); /* Set the B-tree header's file context for this operation */ - hdr->f = f; + udata->hdr->f = udata->f; /* Increment ref. count on B-tree header */ - if(H5B2_hdr_incr(hdr) < 0) + if(H5B2_hdr_incr(udata->hdr) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, NULL, "can't increment ref. count on B-tree header") /* Share B-tree header information */ - leaf->hdr = hdr; + leaf->hdr = udata->hdr; /* Read header from disk */ - if(H5F_block_read(f, H5FD_MEM_BTREE, addr, hdr->node_size, dxpl_id, hdr->page) < 0) + if(H5F_block_read(udata->f, H5FD_MEM_BTREE, addr, udata->hdr->node_size, dxpl_id, udata->hdr->page) < 0) HGOTO_ERROR(H5E_BTREE, H5E_READERROR, NULL, "can't read B-tree leaf node") - p = hdr->page; + p = udata->hdr->page; /* Magic number */ if(HDmemcmp(p, H5B2_LEAF_MAGIC, (size_t)H5_SIZEOF_MAGIC)) @@ -887,36 +887,36 @@ H5B2_cache_leaf_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_nrec, v HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, NULL, "wrong B-tree leaf node version") /* B-tree type */ - if(*p++ != (uint8_t)hdr->cls->id) + if(*p++ != (uint8_t)udata->hdr->cls->id) HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, NULL, "incorrect B-tree type") /* Allocate space for the native keys in memory */ - if(NULL == (leaf->leaf_native = (uint8_t *)H5FL_FAC_MALLOC(hdr->node_info[0].nat_rec_fac))) + if(NULL == (leaf->leaf_native = (uint8_t *)H5FL_FAC_MALLOC(udata->hdr->node_info[0].nat_rec_fac))) HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, NULL, "memory allocation failed for B-tree leaf native keys") /* Set the number of records in the leaf */ - leaf->nrec = *nrec; + leaf->nrec = udata->nrec; /* Deserialize records for leaf node */ native = leaf->leaf_native; for(u = 0; u < leaf->nrec; u++) { /* Decode record */ - if((hdr->cls->decode)(p, native, hdr->cb_ctx) < 0) + if((udata->hdr->cls->decode)(p, native, udata->hdr->cb_ctx) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, NULL, "unable to decode B-tree record") /* Move to next record */ - p += hdr->rrec_size; - native += hdr->cls->nrec_size; + p += udata->hdr->rrec_size; + native += udata->hdr->cls->nrec_size; } /* end for */ /* Compute checksum on internal node */ - computed_chksum = H5_checksum_metadata(hdr->page, (size_t)(p - (const uint8_t *)hdr->page), 0); + computed_chksum = H5_checksum_metadata(udata->hdr->page, (size_t)(p - (const uint8_t *)udata->hdr->page), 0); /* Metadata checksum */ UINT32DECODE(p, stored_chksum); /* Sanity check parsing */ - HDassert((size_t)(p - (const uint8_t *)hdr->page) <= hdr->node_size); + HDassert((size_t)(p - (const uint8_t *)udata->hdr->page) <= udata->hdr->node_size); /* Verify checksum */ if(stored_chksum != computed_chksum) diff --git a/src/H5B2dbg.c b/src/H5B2dbg.c index f8a1fc9..cf7301d 100644 --- a/src/H5B2dbg.c +++ b/src/H5B2dbg.c @@ -94,6 +94,7 @@ H5B2_hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, void *dbg_ctx = NULL; /* v2 B-tree debugging context */ unsigned u; /* Local index variable */ char temp_str[128]; /* Temporary string, for formatting */ + H5B2_hdr_cache_ud_t cache_udata; /* User-data for callback */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5B2_hdr_debug, FAIL) @@ -120,7 +121,9 @@ H5B2_hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, /* * Load the B-tree header. */ - if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, type, dbg_ctx, H5AC_READ))) + cache_udata.f = f; + cache_udata.ctx_udata = dbg_ctx; + if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree header") /* Set file pointer for this B-tree operation */ @@ -206,6 +209,7 @@ H5B2_int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, void *dbg_ctx = NULL; /* v2 B-tree debugging context */ unsigned u; /* Local index variable */ char temp_str[128]; /* Temporary string, for formatting */ + H5B2_hdr_cache_ud_t cache_udata; /* User-data for callback */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5B2_int_debug, FAIL) @@ -234,7 +238,9 @@ H5B2_int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, /* * Load the B-tree header. */ - if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, type, dbg_ctx, H5AC_READ))) + cache_udata.f = f; + cache_udata.ctx_udata = dbg_ctx; + if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree header") /* Set file pointer for this B-tree operation */ @@ -331,6 +337,7 @@ H5B2_leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, { H5B2_hdr_t *hdr = NULL; /* B-tree header */ H5B2_leaf_t *leaf = NULL; /* B-tree leaf node */ + H5B2_hdr_cache_ud_t cache_udata; /* User-data for callback */ void *dbg_ctx = NULL; /* v2 B-tree debugging context */ unsigned u; /* Local index variable */ char temp_str[128]; /* Temporary string, for formatting */ @@ -362,8 +369,10 @@ H5B2_leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, /* * Load the B-tree header. */ - if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, type, dbg_ctx, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree header") + cache_udata.f = f; + cache_udata.ctx_udata = dbg_ctx; + if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC_READ))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree header") /* Set file pointer for this B-tree operation */ hdr->f = f; @@ -371,8 +380,8 @@ H5B2_leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, /* * Load the B-tree leaf node */ - if(NULL == (leaf = (H5B2_leaf_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_LEAF, addr, &nrec, hdr, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree leaf node") + if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, addr, nrec, H5AC_READ))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Print opening message */ HDfprintf(stream, "%*sv2 B-tree Leaf Node...\n", indent, ""); diff --git a/src/H5B2int.c b/src/H5B2int.c index f7f8995..91459a0 100644 --- a/src/H5B2int.c +++ b/src/H5B2int.c @@ -184,6 +184,7 @@ H5B2_split1(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, H5B2_node_ptr_t *cur H5B2_node_ptr_t *left_node_ptrs = NULL, *right_node_ptrs = NULL;/* Pointers to childs' node pointer info */ uint16_t mid_record; /* Index of "middle" record in current node */ uint16_t old_node_nrec; /* Number of records in internal node split */ + unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_split1) @@ -215,9 +216,9 @@ H5B2_split1(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, H5B2_node_ptr_t *cur /* Protect both leafs */ if(NULL == (left_int = H5B2_protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (depth - 1), H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") if(NULL == (right_int = H5B2_protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (depth - 1), H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* More setup for child nodes */ left_child = left_int; @@ -243,10 +244,10 @@ H5B2_split1(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, H5B2_node_ptr_t *cur right_addr = internal->node_ptrs[idx + 1].addr; /* Protect both leafs */ - if(NULL == (left_leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, child_class, left_addr, &(internal->node_ptrs[idx].node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") - if(NULL == (right_leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, child_class, right_addr, &(internal->node_ptrs[idx + 1].node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + if(NULL == (left_leaf = H5B2_protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + if(NULL == (right_leaf = H5B2_protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* More setup for child nodes */ left_child = left_leaf; @@ -276,6 +277,10 @@ H5B2_split1(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, H5B2_node_ptr_t *cur /* Copy "middle" record to internal node */ HDmemcpy(H5B2_INT_NREC(internal, hdr, idx), H5B2_NAT_NREC(left_native, hdr, mid_record), hdr->cls->nrec_size); + /* Mark nodes as dirty */ + left_child_flags |= H5AC__DIRTIED_FLAG; + right_child_flags |= H5AC__DIRTIED_FLAG; + /* Update record counts in child nodes */ internal->node_ptrs[idx].node_nrec = *left_nrec = mid_record; internal->node_ptrs[idx + 1].node_nrec = *right_nrec = (uint16_t)(old_node_nrec - (mid_record + 1)); @@ -328,13 +333,13 @@ H5B2_split1(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, H5B2_node_ptr_t *cur } /* end else */ #endif /* H5B2_DEBUG */ +done: /* Release child nodes (marked as dirty) */ - if(H5AC_unprotect(hdr->f, dxpl_id, child_class, left_addr, left_child, H5AC__DIRTIED_FLAG) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node") - if(H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, H5AC__DIRTIED_FLAG) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node") + if(left_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, left_addr, left_child, left_child_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node") + if(right_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, right_child_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node") -done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2_split1 */ @@ -356,7 +361,7 @@ done: herr_t H5B2_split_root(H5B2_hdr_t *hdr, hid_t dxpl_id) { - H5B2_internal_t *new_root; /* Pointer to new root node */ + H5B2_internal_t *new_root = NULL; /* Pointer to new root node */ unsigned new_root_flags = H5AC__NO_FLAGS_SET; /* Cache flags for new root node */ H5B2_node_ptr_t old_root_ptr; /* Old node pointer to root node in B-tree */ size_t sz_max_nrec; /* Temporary variable for range checking */ @@ -399,7 +404,7 @@ H5B2_split_root(H5B2_hdr_t *hdr, hid_t dxpl_id) /* Protect new root node */ if(NULL == (new_root = H5B2_protect_internal(hdr, dxpl_id, hdr->root.addr, hdr->root.node_nrec, hdr->depth, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* Set first node pointer in root node to old root node pointer info */ new_root->node_ptrs[0] = old_root_ptr; @@ -408,11 +413,11 @@ H5B2_split_root(H5B2_hdr_t *hdr, hid_t dxpl_id) if(H5B2_split1(hdr, dxpl_id, hdr->depth, &(hdr->root), NULL, new_root, &new_root_flags, 0) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to split old root node") +done: /* Release new root node (marked as dirty) */ - if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, hdr->root.addr, new_root, new_root_flags) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree internal node") + if(new_root && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, hdr->root.addr, new_root, new_root_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree internal node") -done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2_split_root() */ @@ -437,11 +442,12 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, { const H5AC_class_t *child_class; /* Pointer to child node's class info */ haddr_t left_addr, right_addr; /* Addresses of left & right child nodes */ - void *left_child, *right_child; /* Pointers to child nodes */ + void *left_child = NULL, *right_child = NULL; /* Pointers to child nodes */ uint16_t *left_nrec, *right_nrec; /* Pointers to child # of records */ uint8_t *left_native, *right_native; /* Pointers to childs' native records */ H5B2_node_ptr_t *left_node_ptrs = NULL, *right_node_ptrs = NULL;/* Pointers to childs' node pointer info */ hssize_t left_moved_nrec = 0, right_moved_nrec = 0; /* Number of records moved, for internal redistrib */ + unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_redistribute2) @@ -462,9 +468,9 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Lock left & right B-tree child nodes */ if(NULL == (left_internal = H5B2_protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (depth - 1), H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") if(NULL == (right_internal = H5B2_protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (depth - 1), H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* More setup for child nodes */ left_child = left_internal; @@ -486,10 +492,10 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, right_addr = internal->node_ptrs[idx + 1].addr; /* Lock left & right B-tree child nodes */ - if(NULL == (left_leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, child_class, left_addr, &(internal->node_ptrs[idx].node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") - if(NULL == (right_leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, child_class, right_addr, &(internal->node_ptrs[idx + 1].node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + if(NULL == (left_leaf = H5B2_protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + if(NULL == (right_leaf = H5B2_protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* More setup for child nodes */ left_child = left_leaf; @@ -553,6 +559,10 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Update number of records in child nodes */ *left_nrec = (uint16_t)(*left_nrec + move_nrec); *right_nrec = new_right_nrec; + + /* Mark nodes as dirty */ + left_child_flags |= H5AC__DIRTIED_FLAG; + right_child_flags |= H5AC__DIRTIED_FLAG; } /* end if */ else { /* Moving record from left node to right */ @@ -596,6 +606,10 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Update number of records in child nodes */ *left_nrec = new_left_nrec; *right_nrec = (uint16_t)(*right_nrec + move_nrec); + + /* Mark nodes as dirty */ + left_child_flags |= H5AC__DIRTIED_FLAG; + right_child_flags |= H5AC__DIRTIED_FLAG; } /* end else */ /* Update # of records in child nodes */ @@ -624,13 +638,13 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, } /* end else */ #endif /* H5B2_DEBUG */ +done: /* Release child nodes (marked as dirty) */ - if(H5AC_unprotect(hdr->f, dxpl_id, child_class, left_addr, left_child, H5AC__DIRTIED_FLAG) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") - if(H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, H5AC__DIRTIED_FLAG) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + if(left_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, left_addr, left_child, left_child_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + if(right_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, right_child_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") -done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2_redistribute2 */ @@ -653,6 +667,8 @@ static herr_t H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx) { + H5B2_node_ptr_t *left_node_ptrs = NULL, *right_node_ptrs = NULL; /* Pointers to childs' node pointer info */ + H5B2_node_ptr_t *middle_node_ptrs = NULL; /* Pointers to childs' node pointer info */ const H5AC_class_t *child_class; /* Pointer to child node's class info */ haddr_t left_addr, right_addr; /* Addresses of left & right child nodes */ haddr_t middle_addr; /* Address of middle child node */ @@ -662,10 +678,10 @@ H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, uint16_t *middle_nrec; /* Pointers to middle child # of records */ uint8_t *left_native, *right_native; /* Pointers to childs' native records */ uint8_t *middle_native; /* Pointers to middle child's native records */ - H5B2_node_ptr_t *left_node_ptrs = NULL, *right_node_ptrs = NULL; /* Pointers to childs' node pointer info */ - H5B2_node_ptr_t *middle_node_ptrs = NULL; /* Pointers to childs' node pointer info */ hssize_t left_moved_nrec = 0, right_moved_nrec = 0; /* Number of records moved, for internal split */ hssize_t middle_moved_nrec = 0; /* Number of records moved, for internal split */ + unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */ + unsigned middle_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_redistribute3) @@ -689,11 +705,11 @@ H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Lock B-tree child nodes */ if(NULL == (left_internal = H5B2_protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (depth - 1), H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") if(NULL == (middle_internal = H5B2_protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (depth - 1), H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") if(NULL == (right_internal = H5B2_protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (depth - 1), H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* More setup for child nodes */ left_child = left_internal; @@ -721,12 +737,12 @@ H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, right_addr = internal->node_ptrs[idx + 1].addr; /* Lock B-tree child nodes */ - if(NULL == (left_leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, child_class, left_addr, &(internal->node_ptrs[idx - 1].node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") - if(NULL == (middle_leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, child_class, middle_addr, &(internal->node_ptrs[idx].node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") - if(NULL == (right_leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, child_class, right_addr, &(internal->node_ptrs[idx + 1].node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + if(NULL == (left_leaf = H5B2_protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + if(NULL == (middle_leaf = H5B2_protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + if(NULL == (right_leaf = H5B2_protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* More setup for child nodes */ left_child = left_leaf; @@ -795,6 +811,10 @@ H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Update the current number of records in middle node */ curr_middle_nrec = (uint16_t)(curr_middle_nrec - moved_middle_nrec); + + /* Mark nodes as dirty */ + left_child_flags |= H5AC__DIRTIED_FLAG; + middle_child_flags |= H5AC__DIRTIED_FLAG; } /* end if */ /* Move records into right node */ @@ -834,6 +854,10 @@ H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Update the current number of records in middle node */ curr_middle_nrec = (uint16_t)(curr_middle_nrec - right_nrec_move); + + /* Mark nodes as dirty */ + middle_child_flags |= H5AC__DIRTIED_FLAG; + right_child_flags |= H5AC__DIRTIED_FLAG; } /* end if */ /* Move records out of left node */ @@ -873,6 +897,10 @@ H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Update the current number of records in middle node */ curr_middle_nrec = (uint16_t)(curr_middle_nrec + left_nrec_move); + + /* Mark nodes as dirty */ + left_child_flags |= H5AC__DIRTIED_FLAG; + middle_child_flags |= H5AC__DIRTIED_FLAG; } /* end if */ /* Move records out of right node */ @@ -908,6 +936,10 @@ H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Slide the node pointers in right node down */ HDmemmove(&(right_node_ptrs[0]), &(right_node_ptrs[right_nrec_move]), sizeof(H5B2_node_ptr_t) * (size_t)(new_right_nrec + 1)); } /* end if */ + + /* Mark nodes as dirty */ + middle_child_flags |= H5AC__DIRTIED_FLAG; + right_child_flags |= H5AC__DIRTIED_FLAG; } /* end if */ /* Update # of records in nodes */ @@ -991,15 +1023,15 @@ H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, } /* end else */ #endif /* H5B2_DEBUG */ +done: /* Unlock child nodes (marked as dirty) */ - if(H5AC_unprotect(hdr->f, dxpl_id, child_class, left_addr, left_child, H5AC__DIRTIED_FLAG) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") - if(H5AC_unprotect(hdr->f, dxpl_id, child_class, middle_addr, middle_child, H5AC__DIRTIED_FLAG) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") - if(H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, H5AC__DIRTIED_FLAG) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + if(left_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, left_addr, left_child, left_child_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + if(middle_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, middle_addr, middle_child, middle_child_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + if(right_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, right_child_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") -done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2_redistribute3 */ @@ -1030,6 +1062,7 @@ H5B2_merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, uint16_t *left_nrec, *right_nrec; /* Pointers to left & right child # of records */ uint8_t *left_native, *right_native; /* Pointers to left & right children's native records */ H5B2_node_ptr_t *left_node_ptrs = NULL, *right_node_ptrs = NULL;/* Pointers to childs' node pointer info */ + unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_merge2) @@ -1052,9 +1085,9 @@ H5B2_merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Lock left & right B-tree child nodes */ if(NULL == (left_internal = H5B2_protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (depth - 1), H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") if(NULL == (right_internal = H5B2_protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (depth - 1), H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* More setup for accessing child node information */ left_child = left_internal; @@ -1076,10 +1109,10 @@ H5B2_merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, right_addr = internal->node_ptrs[idx + 1].addr; /* Lock left & right B-tree child nodes */ - if(NULL == (left_leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, child_class, left_addr, &(internal->node_ptrs[idx].node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") - if(NULL == (right_leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, child_class, right_addr, &(internal->node_ptrs[idx + 1].node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + if(NULL == (left_leaf = H5B2_protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + if(NULL == (right_leaf = H5B2_protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* More setup for accessing child node information */ left_child = left_leaf; @@ -1104,6 +1137,10 @@ H5B2_merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Update # of records in left node */ *left_nrec = (uint16_t)(*left_nrec + *right_nrec + 1); + + /* Mark nodes as dirty */ + left_child_flags |= H5AC__DIRTIED_FLAG; + right_child_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; } /* end block */ /* Update # of records in child nodes */ @@ -1139,15 +1176,15 @@ H5B2_merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, H5B2_assert_leaf(hdr, left_child); #endif /* H5B2_DEBUG */ +done: /* Unlock left node (marked as dirty) */ - if(H5AC_unprotect(hdr->f, dxpl_id, child_class, left_addr, left_child, H5AC__DIRTIED_FLAG) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + if(left_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, left_addr, left_child, left_child_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") /* Delete right node & remove from cache (marked as dirty) */ - if(H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + if(right_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, right_child_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") -done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2_merge2() */ @@ -1184,6 +1221,8 @@ H5B2_merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, H5B2_node_ptr_t *left_node_ptrs = NULL, *right_node_ptrs = NULL;/* Pointers to childs' node pointer info */ H5B2_node_ptr_t *middle_node_ptrs = NULL;/* Pointer to child's node pointer info */ hsize_t middle_moved_nrec; /* Number of records moved, for internal split */ + unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */ + unsigned middle_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_merge3) @@ -1208,11 +1247,11 @@ H5B2_merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Lock B-tree child nodes */ if(NULL == (left_internal = H5B2_protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (depth - 1), H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") if(NULL == (middle_internal = H5B2_protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (depth - 1), H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") if(NULL == (right_internal = H5B2_protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (depth - 1), H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* More setup for accessing child node information */ left_child = left_internal; @@ -1240,12 +1279,12 @@ H5B2_merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, right_addr = internal->node_ptrs[idx + 1].addr; /* Lock B-tree child nodes */ - if(NULL == (left_leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, child_class, left_addr, &(internal->node_ptrs[idx - 1].node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") - if(NULL == (middle_leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, child_class, middle_addr, &(internal->node_ptrs[idx].node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") - if(NULL == (right_leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, child_class, right_addr, &(internal->node_ptrs[idx + 1].node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + if(NULL == (left_leaf = H5B2_protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + if(NULL == (middle_leaf = H5B2_protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + if(NULL == (right_leaf = H5B2_protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* More setup for accessing child node information */ left_child = left_leaf; @@ -1290,13 +1329,17 @@ H5B2_merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, for(u = 0; u < middle_nrec_move; u++) middle_moved_nrec += middle_node_ptrs[u].all_nrec; - /* Slide the node pointers in right node down */ + /* Slide the node pointers in middle node down */ HDmemmove(&(middle_node_ptrs[0]), &(middle_node_ptrs[middle_nrec_move]), sizeof(H5B2_node_ptr_t) * (size_t)((unsigned)(*middle_nrec + 1) - middle_nrec_move)); } /* end if */ /* Update # of records in left & middle nodes */ *left_nrec = (uint16_t)(*left_nrec + middle_nrec_move); *middle_nrec = (uint16_t)(*middle_nrec - middle_nrec_move); + + /* Mark nodes as dirty */ + left_child_flags |= H5AC__DIRTIED_FLAG; + middle_child_flags |= H5AC__DIRTIED_FLAG; } /* end block */ /* Redistribute records into middle node */ @@ -1309,11 +1352,15 @@ H5B2_merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Move node pointers also if this is an internal node */ if(depth > 1) - /* Copy node pointers from middle node into left node */ + /* Copy node pointers from right node into middle node */ HDmemcpy(&(middle_node_ptrs[*middle_nrec + 1]), &(right_node_ptrs[0]), sizeof(H5B2_node_ptr_t) * (size_t)(*right_nrec + 1)); /* Update # of records in middle node */ *middle_nrec = (uint16_t)(*middle_nrec + (*right_nrec + 1)); + + /* Mark nodes as dirty */ + middle_child_flags |= H5AC__DIRTIED_FLAG; + right_child_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; } /* end block */ /* Update # of records in child nodes */ @@ -1355,17 +1402,17 @@ H5B2_merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, } /* end else */ #endif /* H5B2_DEBUG */ +done: /* Unlock left & middle nodes (marked as dirty) */ - if(H5AC_unprotect(hdr->f, dxpl_id, child_class, left_addr, left_child, H5AC__DIRTIED_FLAG) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") - if(H5AC_unprotect(hdr->f, dxpl_id, child_class, middle_addr, middle_child, H5AC__DIRTIED_FLAG) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + if(left_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, left_addr, left_child, left_child_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + if(middle_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, middle_addr, middle_child, middle_child_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") /* Delete right node & remove from cache (marked as dirty) */ - if(H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + if(right_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, right_child_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") -done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2_merge3() */ @@ -1414,7 +1461,7 @@ H5B2_swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Lock B-tree child nodes */ if(NULL == (child_internal = H5B2_protect_internal(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, (depth - 1), H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* More setup for accessing child node information */ child = child_internal; @@ -1428,8 +1475,8 @@ H5B2_swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, child_addr = internal->node_ptrs[idx].addr; /* Lock B-tree child node */ - if(NULL == (child_leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, child_class, child_addr, &(internal->node_ptrs[idx].node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + if(NULL == (child_leaf = H5B2_protect_leaf(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* More setup for accessing child node information */ child = child_leaf; @@ -1452,11 +1499,11 @@ H5B2_swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, H5B2_assert_leaf(hdr, child); #endif /* H5B2_DEBUG */ +done: /* Unlock child node */ - if(H5AC_unprotect(hdr->f, dxpl_id, child_class, child_addr, child, H5AC__DIRTIED_FLAG) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + if(child && H5AC_unprotect(hdr->f, dxpl_id, child_class, child_addr, child, H5AC__DIRTIED_FLAG) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") -done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2_swap_leaf */ @@ -1481,7 +1528,7 @@ H5B2_insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr, H5B2_leaf_t *leaf; /* Pointer to leaf node */ int cmp; /* Comparison value of records */ unsigned idx; /* Location of record which matches key */ - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_insert_leaf) @@ -1491,8 +1538,8 @@ H5B2_insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr, HDassert(H5F_addr_defined(curr_node_ptr->addr)); /* Lock current B-tree node */ - if(NULL == (leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr->addr, &(curr_node_ptr->node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Must have a leaf node with enough space to insert a record now */ HDassert(curr_node_ptr->node_nrec < hdr->node_info[0].max_nrec); @@ -1557,7 +1604,7 @@ H5B2_insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, H5B2_internal_t *internal; /* Pointer to internal node */ unsigned internal_flags = H5AC__NO_FLAGS_SET; unsigned idx; /* Location of record which matches key */ - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_insert_internal) @@ -1569,7 +1616,7 @@ H5B2_insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Lock current B-tree node */ if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* Split or redistribute child node pointers, if necessary */ { @@ -1686,8 +1733,8 @@ done: herr_t H5B2_create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *node_ptr) { - H5B2_leaf_t *leaf = NULL; /* Pointer to new leaf node created */ - herr_t ret_value = SUCCEED; + H5B2_leaf_t *leaf = NULL; /* Pointer to new leaf node created */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_create_leaf) @@ -1739,6 +1786,46 @@ done: /*------------------------------------------------------------------------- + * Function: H5B2_protect_leaf + * + * Purpose: "Protect" an leaf node in the metadata cache + * + * Return: Pointer to leaf node on success/NULL on failure + * + * Programmer: Quincey Koziol + * koziol@hdfgroup.org + * May 5 2010 + * + *------------------------------------------------------------------------- + */ +H5B2_leaf_t * +H5B2_protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, unsigned nrec, + H5AC_protect_t rw) +{ + H5B2_leaf_cache_ud_t udata; /* User-data for callback */ + H5B2_leaf_t *ret_value; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5B2_protect_leaf) + + /* Check arguments. */ + HDassert(hdr); + HDassert(H5F_addr_defined(addr)); + + /* Set up user data for callback */ + udata.f = hdr->f; + udata.hdr = hdr; + H5_ASSIGN_OVERFLOW(/* To: */ udata.nrec, /* From: */ nrec, /* From: */ unsigned, /* To: */ uint16_t) + + /* Protect the leaf node */ + if(NULL == (ret_value = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, addr, &udata, rw))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to protect B-tree leaf node") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2_protect_leaf() */ + + +/*------------------------------------------------------------------------- * Function: H5B2_create_internal * * Purpose: Creates empty internal node of a B-tree and update node pointer @@ -1756,8 +1843,8 @@ static herr_t H5B2_create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *node_ptr, unsigned depth) { - H5B2_internal_t *internal = NULL; /* Pointer to new internal node created */ - herr_t ret_value = SUCCEED; + H5B2_internal_t *internal = NULL; /* Pointer to new internal node created */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_create_internal) @@ -1834,7 +1921,7 @@ H5B2_internal_t * H5B2_protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, unsigned nrec, unsigned depth, H5AC_protect_t rw) { - H5B2_int_load_ud1_t udata; /* User data to pass through to cache 'load' callback */ + H5B2_internal_cache_ud_t udata; /* User data to pass through to cache 'deserialize' callback */ H5B2_internal_t *ret_value; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_protect_internal) @@ -1845,13 +1932,14 @@ H5B2_protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, HDassert(depth > 0); /* Set up user data for callback */ + udata.f = hdr->f; udata.hdr = hdr; H5_ASSIGN_OVERFLOW(/* To: */ udata.nrec, /* From: */ nrec, /* From: */ unsigned, /* To: */ uint16_t) H5_ASSIGN_OVERFLOW(/* To: */ udata.depth, /* From: */ depth, /* From: */ unsigned, /* To: */ uint16_t) /* Protect the internal node */ - if(NULL == (ret_value = (H5B2_internal_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_INT, addr, &udata, NULL, rw))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to load B-tree internal node") + if(NULL == (ret_value = (H5B2_internal_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_INT, addr, &udata, rw))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to protect B-tree internal node") done: FUNC_LEAVE_NOAPI(ret_value) @@ -1885,7 +1973,7 @@ H5B2_iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, uint8_t *native = NULL; /* Pointers to copy of node's native records */ H5B2_node_ptr_t *node_ptrs = NULL; /* Pointers to node's node pointers */ unsigned u; /* Local index */ - herr_t ret_value = H5_ITER_CONT; /* Iterator return value */ + herr_t ret_value = H5_ITER_CONT; /* Iterator return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_iterate_node) @@ -1900,7 +1988,7 @@ H5B2_iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Lock the current B-tree node */ if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* Set up information about current node */ curr_node_class = H5AC_BT2_INT; @@ -1918,8 +2006,8 @@ H5B2_iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, H5B2_leaf_t *leaf; /* Pointer to leaf node */ /* Lock the current B-tree node */ - if(NULL == (leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node->addr, &(curr_node->node_nrec), hdr, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC_READ))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Set up information about current node */ curr_node_class = H5AC_BT2_LEAF; @@ -1963,9 +2051,9 @@ H5B2_iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, done: /* Release the node pointers & native records, if they were copied */ if(node_ptrs) - H5FL_FAC_FREE(hdr->node_info[depth].node_ptr_fac, node_ptrs); + node_ptrs = (H5B2_node_ptr_t *)H5FL_FAC_FREE(hdr->node_info[depth].node_ptr_fac, node_ptrs); if(native) - H5FL_FAC_FREE(hdr->node_info[depth].nat_rec_fac, native); + native = (uint8_t *)H5FL_FAC_FREE(hdr->node_info[depth].nat_rec_fac, native); FUNC_LEAVE_NOAPI(ret_value) } /* H5B2_iterate_node() */ @@ -1992,7 +2080,7 @@ H5B2_remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr, haddr_t leaf_addr = HADDR_UNDEF; /* Leaf address on disk */ unsigned leaf_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting leaf node */ unsigned idx; /* Location of record which matches key */ - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_remove_leaf) @@ -2003,8 +2091,8 @@ H5B2_remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr, /* Lock current B-tree node */ leaf_addr = curr_node_ptr->addr; - if(NULL == (leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, leaf_addr, &(curr_node_ptr->node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Sanity check number of records */ HDassert(curr_node_ptr->all_nrec == curr_node_ptr->node_nrec); @@ -2077,7 +2165,7 @@ H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased, haddr_t internal_addr; /* Address of internal node */ size_t merge_nrec; /* Number of records to merge node at */ hbool_t collapsed_root = FALSE; /* Whether the root was collapsed */ - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_remove_internal) @@ -2091,7 +2179,7 @@ H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased, /* Lock current B-tree node */ internal_addr = curr_node_ptr->addr; if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* Determine the correct number of records to merge at */ merge_nrec = hdr->node_info[depth - 1].merge_nrec; @@ -2273,7 +2361,7 @@ H5B2_remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_leaf_t *leaf; /* Pointer to leaf node */ haddr_t leaf_addr = HADDR_UNDEF; /* Leaf address on disk */ unsigned leaf_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting leaf node */ - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_remove_leaf_by_idx) @@ -2284,8 +2372,8 @@ H5B2_remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id, /* Lock B-tree leaf node */ leaf_addr = curr_node_ptr->addr; - if(NULL == (leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, leaf_addr, &(curr_node_ptr->node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Sanity check number of records */ HDassert(curr_node_ptr->all_nrec == curr_node_ptr->node_nrec); @@ -2357,7 +2445,7 @@ H5B2_remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t internal_addr; /* Address of internal node */ size_t merge_nrec; /* Number of records to merge node at */ hbool_t collapsed_root = FALSE; /* Whether the root was collapsed */ - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_remove_internal_by_idx) @@ -2371,7 +2459,7 @@ H5B2_remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id, /* Lock current B-tree node */ internal_addr = curr_node_ptr->addr; if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") HDassert(internal->nrec == curr_node_ptr->node_nrec); HDassert(depth == hdr->depth || internal->nrec > 1); @@ -2619,8 +2707,8 @@ H5B2_neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_pt { H5B2_leaf_t *leaf; /* Pointer to leaf node */ unsigned idx; /* Location of record which matches key */ - int cmp = 0; /* Comparison value of records */ - herr_t ret_value = SUCCEED; + int cmp = 0; /* Comparison value of records */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_neighbor_leaf) @@ -2631,8 +2719,8 @@ H5B2_neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_pt HDassert(op); /* Lock current B-tree node */ - if(NULL == (leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr->addr, &(curr_node_ptr->node_nrec), hdr, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC_READ))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Locate node pointer for child */ cmp = H5B2_locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx); @@ -2706,7 +2794,7 @@ H5B2_neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, H5B2_internal_t *internal; /* Pointer to internal node */ unsigned idx; /* Location of record which matches key */ int cmp = 0; /* Comparison value of records */ - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_neighbor_internal) @@ -2719,7 +2807,7 @@ H5B2_neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Lock current B-tree node */ if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* Locate node pointer for child */ cmp = H5B2_locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx); @@ -2776,9 +2864,9 @@ H5B2_delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, const H5B2_node_ptr_t *curr_node, H5B2_remove_t op, void *op_data) { const H5AC_class_t *curr_node_class = NULL; /* Pointer to current node's class info */ - void *node = NULL; /* Pointers to current node */ + void *node = NULL; /* Pointers to current node */ uint8_t *native; /* Pointers to node's native records */ - herr_t ret_value = SUCCEED; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5B2_delete_node) @@ -2792,7 +2880,7 @@ H5B2_delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Lock the current B-tree node */ if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* Set up information about current node */ curr_node_class = H5AC_BT2_INT; @@ -2801,15 +2889,15 @@ H5B2_delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Descend into children */ for(u = 0; u < internal->nrec + (unsigned)1; u++) - if(H5B2_delete_node(hdr, dxpl_id, depth - 1, &(internal->node_ptrs[u]), op, op_data) < 0) + if(H5B2_delete_node(hdr, dxpl_id, (depth - 1), &(internal->node_ptrs[u]), op, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "node descent failed") } /* end if */ else { H5B2_leaf_t *leaf; /* Pointer to leaf node */ /* Lock the current B-tree node */ - if(NULL == (leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node->addr, &(curr_node->node_nrec), hdr, H5AC_WRITE))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree leaf node") + if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC_WRITE))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Set up information about current node */ curr_node_class = H5AC_BT2_LEAF; @@ -2868,7 +2956,7 @@ H5B2_node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, /* Lock the current B-tree node */ if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* Recursively descend into child nodes, if we are above the "twig" level in the B-tree */ if(depth > 1) { @@ -2887,7 +2975,7 @@ H5B2_node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, done: if(internal && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node->addr, internal, H5AC__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") FUNC_LEAVE_NOAPI(ret_value) } /* H5B2_node_size() */ @@ -2920,11 +3008,11 @@ H5B2_internal_free(H5B2_internal_t *internal) /* Release internal node's native key buffer */ if(internal->int_native) - H5FL_FAC_FREE(internal->hdr->node_info[internal->depth].nat_rec_fac, internal->int_native); + internal->int_native = (uint8_t *)H5FL_FAC_FREE(internal->hdr->node_info[internal->depth].nat_rec_fac, internal->int_native); /* Release internal node's node pointer buffer */ if(internal->node_ptrs) - H5FL_FAC_FREE(internal->hdr->node_info[internal->depth].node_ptr_fac, internal->node_ptrs); + internal->node_ptrs = (H5B2_node_ptr_t *)H5FL_FAC_FREE(internal->hdr->node_info[internal->depth].node_ptr_fac, internal->node_ptrs); /* Decrement ref. count on B-tree header */ if(H5B2_hdr_decr(internal->hdr) < 0) @@ -2965,7 +3053,7 @@ H5B2_leaf_free(H5B2_leaf_t *leaf) /* Release leaf's native key buffer */ if(leaf->leaf_native) - H5FL_FAC_FREE(leaf->hdr->node_info[0].nat_rec_fac, leaf->leaf_native); + leaf->leaf_native = (uint8_t *)H5FL_FAC_FREE(leaf->hdr->node_info[0].nat_rec_fac, leaf->leaf_native); /* Decrement ref. count on B-tree header */ if(H5B2_hdr_decr(leaf->hdr) < 0) diff --git a/src/H5B2pkg.h b/src/H5B2pkg.h index f2f4e61..35c2eb1 100644 --- a/src/H5B2pkg.h +++ b/src/H5B2pkg.h @@ -207,12 +207,26 @@ struct H5B2_t { H5F_t *f; /* Pointer to file for v2 B-tree */ }; -/* User data for metadata cache 'load' callback */ -typedef struct { - H5B2_hdr_t *hdr; /* Pointer to the [pinned] v2 B-tree header */ - uint16_t nrec; /* Number of records in node to load */ - uint16_t depth; /* Depth of node to load */ -} H5B2_int_load_ud1_t; +/* Callback info for loading a free space header into the cache */ +typedef struct H5B2_hdr_cache_ud_t { + H5F_t *f; /* File that v2 b-tree header is within */ + void *ctx_udata; /* User-data for protecting */ +} H5B2_hdr_cache_ud_t; + +/* Callback info for loading a free space internal node into the cache */ +typedef struct H5B2_internal_cache_ud_t { + H5F_t *f; /* File that v2 b-tree header is within */ + H5B2_hdr_t *hdr; /* v2 B-tree header */ + unsigned nrec; /* Number of records in node to load */ + unsigned depth; /* Depth of node to load */ +} H5B2_internal_cache_ud_t; + +/* Callback info for loading a free space leaf node into the cache */ +typedef struct H5B2_leaf_cache_ud_t { + H5F_t *f; /* File that v2 b-tree header is within */ + H5B2_hdr_t *hdr; /* v2 B-tree header */ + unsigned nrec; /* Number of records in node to load */ +} H5B2_leaf_cache_ud_t; #ifdef H5B2_TESTING /* Node information for testing */ @@ -268,6 +282,10 @@ H5_DLL size_t H5B2_hdr_fuse_decr(H5B2_hdr_t *hdr); H5_DLL herr_t H5B2_hdr_dirty(H5B2_hdr_t *hdr); H5_DLL herr_t H5B2_hdr_delete(H5B2_hdr_t *hdr, hid_t dxpl_id); +/* Routines for operating on leaf nodes */ +H5B2_leaf_t *H5B2_protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, + unsigned nrec, H5AC_protect_t rw); + /* Routines for operating on internal nodes */ H5_DLL H5B2_internal_t *H5B2_protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, unsigned nrec, unsigned depth, H5AC_protect_t rw); diff --git a/src/H5B2stat.c b/src/H5B2stat.c index 0ee404e..a15ff11 100644 --- a/src/H5B2stat.c +++ b/src/H5B2stat.c @@ -86,22 +86,14 @@ herr_t H5B2_stat_info(H5B2_t *bt2, H5B2_stat_t *info) { - H5B2_hdr_t *hdr; /* Pointer to the B-tree header */ - FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5B2_stat_info) /* Check arguments. */ HDassert(info); - /* Set the shared v2 B-tree header's file context for this operation */ - bt2->hdr->f = bt2->f; - - /* Get the v2 B-tree header */ - hdr = bt2->hdr; - /* Get information about the B-tree */ - info->depth = hdr->depth; - info->nrecords = hdr->root.all_nrec; + info->depth = bt2->hdr->depth; + info->nrecords = bt2->hdr->root.all_nrec; FUNC_LEAVE_NOAPI(SUCCEED) } /* H5B2_stat_info() */ diff --git a/src/H5B2test.c b/src/H5B2test.c index 222279b..a604e51 100644 --- a/src/H5B2test.c +++ b/src/H5B2test.c @@ -471,8 +471,8 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */ /* Lock B-tree leaf node */ - if(NULL == (leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr.addr, &(curr_node_ptr.node_nrec), hdr, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Locate record */ cmp = H5B2_locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx); diff --git a/src/H5Bcache.c b/src/H5Bcache.c index e9210ca..11b98d3 100644 --- a/src/H5Bcache.c +++ b/src/H5Bcache.c @@ -55,7 +55,7 @@ /********************/ /* Metadata cache callbacks */ -static H5B_t *H5B_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_type, void *udata); +static H5B_t *H5B_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5B_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B_t *b, unsigned UNUSED * flags_ptr); static herr_t H5B_dest(H5F_t *f, H5B_t *bt); static herr_t H5B_clear(H5F_t *f, H5B_t *b, hbool_t destroy); @@ -97,10 +97,10 @@ const H5AC_class_t H5AC_BT[1] = {{ *------------------------------------------------------------------------- */ static H5B_t * -H5B_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_type, void *udata) +H5B_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { - const H5B_class_t *type = (const H5B_class_t *) _type; - H5B_t *bt = NULL; + H5B_t *bt = NULL; /* Pointer to the deserialized B-tree node */ + H5B_cache_ud_t *udata = (H5B_cache_ud_t *)_udata; /* User data for callback */ H5B_shared_t *shared; /* Pointer to shared B-tree info */ const uint8_t *p; /* Pointer into raw data buffer */ uint8_t *native; /* Pointer to native keys */ @@ -112,16 +112,14 @@ H5B_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_type, void *udata) /* Check arguments */ HDassert(f); HDassert(H5F_addr_defined(addr)); - HDassert(type); - HDassert(type->get_shared); + HDassert(udata); if(NULL == (bt = H5FL_MALLOC(H5B_t))) HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "can't allocate B-tree struct") HDmemset(&bt->cache_info, 0, sizeof(H5AC_info_t)); /* Set & increment the ref-counted "shared" B-tree information for the node */ - if(NULL == (bt->rc_shared = (type->get_shared)(f, udata))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "can't retrieve B-tree node buffer") + bt->rc_shared = udata->rc_shared; H5RC_INC(bt->rc_shared); /* Get a pointer to the shared info, for convenience */ @@ -146,7 +144,7 @@ H5B_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_type, void *udata) p += 4; /* node type and level */ - if(*p++ != (uint8_t)type->id) + if(*p++ != (uint8_t)udata->type->id) HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, NULL, "incorrect B-tree node type") bt->level = *p++; @@ -154,26 +152,26 @@ H5B_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_type, void *udata) UINT16DECODE(p, bt->nchildren); /* sibling pointers */ - H5F_addr_decode(f, (const uint8_t **)&p, &(bt->left)); - H5F_addr_decode(f, (const uint8_t **)&p, &(bt->right)); + H5F_addr_decode(udata->f, (const uint8_t **)&p, &(bt->left)); + H5F_addr_decode(udata->f, (const uint8_t **)&p, &(bt->right)); /* the child/key pairs */ native = bt->native; for(u = 0; u < bt->nchildren; u++) { /* Decode native key value */ - if((type->decode)(shared, p, native) < 0) + if((udata->type->decode)(shared, p, native) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTDECODE, NULL, "unable to decode key") p += shared->sizeof_rkey; - native += type->sizeof_nkey; + native += udata->type->sizeof_nkey; /* Decode address value */ - H5F_addr_decode(f, (const uint8_t **)&p, bt->child + u); + H5F_addr_decode(udata->f, (const uint8_t **)&p, bt->child + u); } /* end for */ /* Decode final key */ if(bt->nchildren > 0) { /* Decode native key value */ - if((type->decode)(shared, p, native) < 0) + if((udata->type->decode)(shared, p, native) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTDECODE, NULL, "unable to decode key") } /* end if */ diff --git a/src/H5Bdbg.c b/src/H5Bdbg.c index dff2380..3212416 100644 --- a/src/H5Bdbg.c +++ b/src/H5Bdbg.c @@ -59,7 +59,9 @@ H5B_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int f const H5B_class_t *type, void *udata) { H5B_t *bt = NULL; + H5RC_t *rc_shared; /* Ref-counted shared info */ H5B_shared_t *shared; /* Pointer to shared B-tree info */ + H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ @@ -75,13 +77,20 @@ H5B_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int f HDassert(fwidth >= 0); HDassert(type); + /* Get shared info for B-tree */ + if(NULL == (rc_shared = (type->get_shared)(f, udata))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "can't retrieve B-tree's shared ref. count object") + shared = (H5B_shared_t *)H5RC_GET_OBJ(rc_shared); + HDassert(shared); + /* * Load the tree node. */ - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, udata, H5AC_READ))) + cache_udata.f = f; + cache_udata.type = type; + cache_udata.rc_shared = rc_shared; + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node") - shared = (H5B_shared_t *)H5RC_GET_OBJ(bt->rc_shared); - HDassert(shared); /* * Print the values. @@ -165,7 +174,9 @@ herr_t H5B_assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void *udata) { H5B_t *bt = NULL; + H5RC_t *rc_shared; /* Ref-counted shared info */ H5B_shared_t *shared; /* Pointer to shared B-tree info */ + H5B_cache_ud_t cache_udata; /* User-data for metadata cache callback */ int i, ncell, cmp; static int ncalls = 0; herr_t status; @@ -185,8 +196,17 @@ H5B_assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void fprintf(H5DEBUG(B), "H5B: debugging B-trees (expensive)\n"); } /* end if */ + /* Get shared info for B-tree */ + if(NULL == (rc_shared = (type->get_shared)(f, udata))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "can't retrieve B-tree's shared ref. count object") + shared = (H5B_shared_t *)H5RC_GET_OBJ(rc_shared); + HDassert(shared); + /* Initialize the queue */ - bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, udata, H5AC_READ); + cache_udata.f = f; + cache_udata.type = type; + cache_udata.rc_shared = rc_shared; + bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ); HDassert(bt); shared = (H5B_shared_t *)H5RC_GET_OBJ(bt->rc_shared); HDassert(shared); @@ -207,7 +227,7 @@ H5B_assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void * test. */ for(ncell = 0; cur; ncell++) { - bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, cur->addr, type, udata, H5AC_READ); + bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, cur->addr, &cache_udata, H5AC_READ); HDassert(bt); /* Check node header */ diff --git a/src/H5Bpkg.h b/src/H5Bpkg.h index ad88351..46e2b28 100644 --- a/src/H5Bpkg.h +++ b/src/H5Bpkg.h @@ -33,8 +33,7 @@ /* Other private headers needed by this file */ #include "H5ACprivate.h" /* Metadata cache */ -#include "H5FLprivate.h" /* Free Lists */ -#include "H5RCprivate.h" /* Reference counted objects */ +#include "H5FLprivate.h" /* Free Lists */ /**************************/ @@ -62,6 +61,13 @@ typedef struct H5B_t { haddr_t *child; /*2k child pointers */ } H5B_t; +/* Callback info for loading a B-tree node into the cache */ +typedef struct H5B_cache_ud_t { + H5F_t *f; /* File that B-tree node is within */ + const struct H5B_class_t *type; /* Type of tree */ + H5RC_t *rc_shared; /* Ref-counted shared info */ +} H5B_cache_ud_t; + /*****************************/ /* Package Private Variables */ /*****************************/ @@ -82,7 +88,7 @@ H5FL_EXTERN(H5B_t); /******************************/ /* Package Private Prototypes */ /******************************/ -H5_DLL herr_t H5B_node_dest(H5B_t *b); +H5_DLL herr_t H5B_node_dest(H5B_t *bt); #ifdef H5B_DEBUG herr_t H5B_assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void *udata); @@ -175,8 +175,7 @@ static void * H5C_load_entry(H5F_t * f, hid_t dxpl_id, const H5C_class_t * type, haddr_t addr, - const void * udata1, - void * udata2, + void * udata, hbool_t skip_file_checks); static herr_t H5C_make_space_in_cache(H5F_t * f, @@ -210,7 +209,7 @@ static herr_t H5C_verify_not_in_index(H5C_t * cache_ptr, #define H5C__EPOCH_MARKER_TYPE H5C__MAX_NUM_TYPE_IDS static void *H5C_epoch_marker_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, - const void *udata1, void *udata2); + void *udata); static herr_t H5C_epoch_marker_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, void *thing, unsigned *flags_ptr); @@ -242,8 +241,7 @@ static void * H5C_epoch_marker_load(H5F_t UNUSED * f, hid_t UNUSED dxpl_id, haddr_t UNUSED addr, - const void UNUSED * udata1, - void UNUSED * udata2) + void UNUSED * udata) { void * ret_value = NULL; /* Return value */ @@ -336,6 +334,7 @@ done: FUNC_LEAVE_NOAPI(ret_value) } + /*------------------------------------------------------------------------- * Function: H5C_create @@ -2167,7 +2166,7 @@ H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr, *------------------------------------------------------------------------- */ herr_t -H5C_insert_entry(H5F_t * f, +H5C_insert_entry(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, const H5C_class_t * type, @@ -2486,7 +2485,7 @@ done: */ #ifdef H5_HAVE_PARALLEL herr_t -H5C_mark_entries_as_clean(H5F_t * f, +H5C_mark_entries_as_clean(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, int32_t ce_array_len, @@ -3398,13 +3397,12 @@ done: *------------------------------------------------------------------------- */ void * -H5C_protect(H5F_t * f, +H5C_protect(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, const H5C_class_t * type, haddr_t addr, - const void * udata1, - void * udata2, + void * udata, unsigned flags) { H5C_t * cache_ptr; @@ -3467,8 +3465,7 @@ H5C_protect(H5F_t * f, hit = FALSE; - thing = H5C_load_entry(f, primary_dxpl_id, type, addr, udata1, udata2, - cache_ptr->skip_file_checks); + thing = H5C_load_entry(f, primary_dxpl_id, type, addr, udata, cache_ptr->skip_file_checks); if ( thing == NULL ) { @@ -7704,7 +7701,7 @@ H5C_flush_single_entry(H5F_t * f, hid_t secondary_dxpl_id, const H5C_class_t * type_ptr, haddr_t addr, - unsigned flags, + unsigned flags, hbool_t * first_flush_ptr, hbool_t del_entry_from_slist_on_destroy) { @@ -8123,7 +8120,7 @@ H5C_flush_single_entry(H5F_t * f, if ( cache_ptr->aux_ptr != NULL ) { HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ - "Flush operation occured in the parallel case.") + "resize/rename in serialize occured in parallel case.") } } @@ -8292,8 +8289,7 @@ H5C_load_entry(H5F_t * f, hid_t dxpl_id, const H5C_class_t * type, haddr_t addr, - const void * udata1, - void * udata2, + void * udata, #ifndef NDEBUG hbool_t skip_file_checks) #else /* NDEBUG */ @@ -8315,7 +8311,7 @@ H5C_load_entry(H5F_t * f, HDassert( type->size ); HDassert( H5F_addr_defined(addr) ); - if ( NULL == (thing = (type->load)(f, dxpl_id, addr, udata1, udata2)) ) { + if ( NULL == (thing = (type->load)(f, dxpl_id, addr, udata)) ) { HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "unable to load entry") diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index 2316700..e1b9d19 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -134,8 +134,7 @@ typedef enum H5C_notify_action_t { typedef void *(*H5C_load_func_t)(H5F_t *f, hid_t dxpl_id, haddr_t addr, - const void *udata1, - void *udata2); + void *udata); typedef herr_t (*H5C_flush_func_t)(H5F_t *f, hid_t dxpl_id, hbool_t dest, @@ -460,6 +459,37 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr, * there is no previous item, it should be NULL. * * + * Fields supporting metadata journaling: + * + * last_trans: unit64_t containing the ID of the last transaction in + * which this entry was dirtied. If journaling is disabled, + * or if the entry has never been dirtied in a transaction, + * this field should be set to zero. Once we notice that + * the specified transaction has made it to disk, we will + * reset this field to zero as well. + * + * We must maintain this field, as to avoid messages from + * the future, we must not flush a dirty entry to disk + * until the last transaction in which it was dirtied + * has made it to disk in the journal file. + * + * trans_next: Next pointer in the entries modified in the current + * transaction list. This field should always be null + * unless journaling is enabled, the entry is dirty, + * and last_trans field contains the current transaction + * number. Even if all these conditions are fulfilled, + * the field will still be NULL if this is the last + * entry on the list. + * + * trans_prev: Previous pointer in the entries modified in the current + * transaction list. This field should always be null + * unless journaling is enabled, the entry is dirty, + * and last_trans field contains the current transaction + * number. Even if all these conditions are fulfilled, + * the field will still be NULL if this is the first + * entry on the list. + * + * * Cache entry stats collection fields: * * These fields should only be compiled in when both H5C_COLLECT_CACHE_STATS @@ -1047,11 +1077,10 @@ H5_DLL herr_t H5C_pin_protected_entry(void *thing); H5_DLL void * H5C_protect(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, - const H5C_class_t * type, + const H5C_class_t * type, haddr_t addr, - const void * udata1, - void * udata2, - unsigned flags); + void * udata, + unsigned flags); H5_DLL herr_t H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr); @@ -22,7 +22,7 @@ /* Packages needed by this file... */ #include "H5private.h" /* Generic Functions */ #include "H5Aprivate.h" /* Attributes */ -#include "H5ACprivate.h" /* Metadata cache */ +#include "H5ACprivate.h" /* Metadata cache */ #include "H5Dprivate.h" /* Datasets */ #include "H5Eprivate.h" /* Error handling */ #include "H5Fpkg.h" /* File access */ diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c index 1c246ae..117dfc7 100644 --- a/src/H5FDmpio.c +++ b/src/H5FDmpio.c @@ -26,7 +26,6 @@ #include "H5private.h" /* Generic Functions */ -#include "H5ACprivate.h" /* Metadata cache */ #include "H5Dprivate.h" /* Dataset functions */ #include "H5Eprivate.h" /* Error handling */ #include "H5Fprivate.h" /* File access */ diff --git a/src/H5FLprivate.h b/src/H5FLprivate.h index 85f997e..786599e 100644 --- a/src/H5FLprivate.h +++ b/src/H5FLprivate.h @@ -277,7 +277,7 @@ typedef struct H5FL_arr_head_t { #define H5FL_ARR_CALLOC(t,elem) H5FL_arr_calloc(&(H5FL_ARR_NAME(t)),elem) /* Free an array of type 't' */ -#define H5FL_ARR_FREE(t,obj) H5FL_arr_free(&(H5FL_ARR_NAME(t)),obj) +#define H5FL_ARR_FREE(t,obj) (t *)H5FL_arr_free(&(H5FL_ARR_NAME(t)),obj) /* Re-allocate an array of type 't' */ #define H5FL_ARR_REALLOC(t,obj,new_elem) H5FL_arr_realloc(&(H5FL_ARR_NAME(t)),obj,new_elem) @@ -293,7 +293,7 @@ typedef struct H5FL_arr_head_t { #define H5FL_BARR_DEFINE_STATIC(b,t,m) static H5FL_ARR_DEFINE_COMMON(t,m) = sizeof(b) #define H5FL_ARR_MALLOC(t,elem) H5MM_malloc(H5FL_ARR_NAME(t) + ((elem)*sizeof(t))) #define H5FL_ARR_CALLOC(t,elem) H5MM_calloc(H5FL_ARR_NAME(t) + ((elem)*sizeof(t))) -#define H5FL_ARR_FREE(t,obj) H5MM_xfree(obj) +#define H5FL_ARR_FREE(t,obj) (t *)H5MM_xfree(obj) #define H5FL_ARR_REALLOC(t,obj,new_elem) H5MM_realloc(obj,H5FL_ARR_NAME(t) + ((new_elem)*sizeof(t))) #endif /* H5_NO_ARR_FREE_LISTS */ @@ -33,6 +33,7 @@ /* Headers */ /***********/ #include "H5private.h" /* Generic Functions */ +#include "H5ACprivate.h" /* Metadata cache */ #include "H5Eprivate.h" /* Error handling */ #include "H5FSpkg.h" /* File free space */ #include "H5MFprivate.h" /* File memory management */ @@ -55,6 +56,8 @@ /********************/ /* Local Prototypes */ /********************/ + +/* Section info routines */ static herr_t H5FS_sinfo_free_sect_cb(void *item, void *key, void *op_data); static herr_t H5FS_sinfo_free_node_cb(void *item, void *key, void *op_data); @@ -192,7 +195,7 @@ H5FS_open(H5F_t *f, hid_t dxpl_id, haddr_t fs_addr, size_t nclasses, const H5FS_section_class_t *classes[], void *cls_init_udata, hsize_t alignment, hsize_t threshold) { H5FS_t *fspace = NULL; /* New free space structure */ - H5FS_prot_t fs_prot; /* Information for protecting free space manager */ + H5FS_hdr_cache_ud_t cache_udata; /* User-data for metadata cache callback */ H5FS_t *ret_value; /* Return value */ FUNC_ENTER_NOAPI(H5FS_open, NULL) @@ -206,12 +209,13 @@ HDfprintf(stderr, "%s: Opening free space manager, fs_addr = %a, nclasses = %Zu\ HDassert(classes); /* Initialize user data for protecting the free space manager */ - fs_prot.nclasses = nclasses; - fs_prot.classes = classes; - fs_prot.cls_init_udata = cls_init_udata; + cache_udata.f = f; + cache_udata.nclasses = nclasses; + cache_udata.classes = classes; + cache_udata.cls_init_udata = cls_init_udata; /* Protect the free space header */ - if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &fs_prot, NULL, H5AC_READ))) + if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, NULL, "unable to load free space header") #ifdef H5FS_DEBUG HDfprintf(stderr, "%s: fspace->sect_addr = %a\n", FUNC, fspace->sect_addr); @@ -259,7 +263,7 @@ herr_t H5FS_delete(H5F_t *f, hid_t dxpl_id, haddr_t fs_addr) { H5FS_t *fspace = NULL; /* Free space header loaded from file */ - H5FS_prot_t fs_prot; /* Temporary information for protecting free space header */ + H5FS_hdr_cache_ud_t cache_udata; /* User-data for metadata cache callback */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5FS_delete, FAIL) @@ -273,12 +277,13 @@ HDfprintf(stderr, "%s: Deleting free space manager, fs_addr = %a\n", FUNC, fs_ad /* Initialize user data for protecting the free space manager */ /* (no class information necessary for delete) */ - fs_prot.nclasses = 0; - fs_prot.classes = NULL; - fs_prot.cls_init_udata = NULL; + cache_udata.f = f; + cache_udata.nclasses = 0; + cache_udata.classes = NULL; + cache_udata.cls_init_udata = NULL; /* Protect the free space header */ - if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &fs_prot, NULL, H5AC_WRITE))) + if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, FAIL, "unable to protect free space header") /* Sanity check */ diff --git a/src/H5FScache.c b/src/H5FScache.c index 739f964..2f7a4eb 100644 --- a/src/H5FScache.c +++ b/src/H5FScache.c @@ -34,6 +34,7 @@ /* Headers */ /***********/ #include "H5private.h" /* Generic Functions */ +#include "H5ACprivate.h" /* Metadata cache */ #include "H5Eprivate.h" /* Error handling */ #include "H5FSpkg.h" /* File free space */ #include "H5MFprivate.h" /* File memory management */ @@ -78,12 +79,12 @@ static herr_t H5FS_sinfo_serialize_sect_cb(void *_item, void UNUSED *key, void * static herr_t H5FS_sinfo_serialize_node_cb(void *_item, void UNUSED *key, void *_udata); /* Metadata cache callbacks */ -static H5FS_t *H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata, void *udata2); +static H5FS_t *H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5FS_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5FS_t *fspace, unsigned UNUSED * flags_ptr); static herr_t H5FS_cache_hdr_dest(H5F_t *f, H5FS_t *fspace); static herr_t H5FS_cache_hdr_clear(H5F_t *f, H5FS_t *fspace, hbool_t destroy); static herr_t H5FS_cache_hdr_size(const H5F_t *f, const H5FS_t *fspace, size_t *size_ptr); -static H5FS_sinfo_t *H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata, void *udata2); +static H5FS_sinfo_t *H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5FS_cache_sinfo_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5FS_sinfo_t *sinfo, unsigned UNUSED * flags_ptr); static herr_t H5FS_cache_sinfo_dest(H5F_t *f, H5FS_sinfo_t *sinfo); static herr_t H5FS_cache_sinfo_clear(H5F_t *f, H5FS_sinfo_t *sinfo, hbool_t destroy); @@ -144,10 +145,10 @@ H5FL_BLK_DEFINE_STATIC(sect_block); *------------------------------------------------------------------------- */ static H5FS_t * -H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_fs_prot, void UNUSED *udata2) +H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { H5FS_t *fspace = NULL; /* Free space header info */ - const H5FS_prot_t *fs_prot = (const H5FS_prot_t *)_fs_prot; /* User data for protecting */ + H5FS_hdr_cache_ud_t *udata = (H5FS_hdr_cache_ud_t *)_udata; /* user data for callback */ size_t size; /* Header size */ H5WB_t *wb = NULL; /* Wrapped buffer for header data */ uint8_t hdr_buf[H5FS_HDR_BUF_SIZE]; /* Buffer for header */ @@ -163,10 +164,10 @@ H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_fs_prot, /* Check arguments */ HDassert(f); HDassert(H5F_addr_defined(addr)); - HDassert(fs_prot); + HDassert(udata); /* Allocate a new free space manager */ - if(NULL == (fspace = H5FS_new(fs_prot->nclasses, fs_prot->classes, fs_prot->cls_init_udata))) + if(NULL == (fspace = H5FS_new(udata->nclasses, udata->classes, udata->cls_init_udata))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Set free space manager's internal information */ @@ -177,7 +178,7 @@ H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_fs_prot, HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, NULL, "can't wrap buffer") /* Compute the size of the free space header on disk */ - size = (size_t)H5FS_HEADER_SIZE(f); + size = (size_t)H5FS_HEADER_SIZE(udata->f); /* Get a pointer to a buffer that's large enough for header */ if(NULL == (hdr = (uint8_t *)H5WB_actual(wb, size))) @@ -204,16 +205,16 @@ H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_fs_prot, HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "unknown client ID in free space header") /* Total space tracked */ - H5F_DECODE_LENGTH(f, p, fspace->tot_space); + H5F_DECODE_LENGTH(udata->f, p, fspace->tot_space); /* Total # of free space sections tracked */ - H5F_DECODE_LENGTH(f, p, fspace->tot_sect_count); + H5F_DECODE_LENGTH(udata->f, p, fspace->tot_sect_count); /* # of serializable free space sections tracked */ - H5F_DECODE_LENGTH(f, p, fspace->serial_sect_count); + H5F_DECODE_LENGTH(udata->f, p, fspace->serial_sect_count); /* # of ghost free space sections tracked */ - H5F_DECODE_LENGTH(f, p, fspace->ghost_sect_count); + H5F_DECODE_LENGTH(udata->f, p, fspace->ghost_sect_count); /* # of section classes */ /* (only check if we actually have some classes) */ @@ -231,16 +232,16 @@ H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_fs_prot, UINT16DECODE(p, fspace->max_sect_addr); /* Max. size of section to track */ - H5F_DECODE_LENGTH(f, p, fspace->max_sect_size); + H5F_DECODE_LENGTH(udata->f, p, fspace->max_sect_size); /* Address of serialized free space sections */ - H5F_addr_decode(f, &p, &fspace->sect_addr); + H5F_addr_decode(udata->f, &p, &fspace->sect_addr); /* Size of serialized free space sections */ - H5F_DECODE_LENGTH(f, p, fspace->sect_size); + H5F_DECODE_LENGTH(udata->f, p, fspace->sect_size); /* Allocated size of serialized free space sections */ - H5F_DECODE_LENGTH(f, p, fspace->alloc_sect_size); + H5F_DECODE_LENGTH(udata->f, p, fspace->alloc_sect_size); /* Compute checksum on indirect block */ computed_chksum = H5_checksum_metadata(hdr, (size_t)(p - (const uint8_t *)hdr), 0); @@ -570,10 +571,10 @@ H5FS_cache_hdr_size(const H5F_t *f, const H5FS_t UNUSED *fspace, size_t *size_pt *------------------------------------------------------------------------- */ static H5FS_sinfo_t * -H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *udata1, void *_fspace) +H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { H5FS_sinfo_t *sinfo = NULL; /* Free space section info */ - H5FS_t *fspace = (H5FS_t *)_fspace; /* User data for protecting */ + H5FS_sinfo_cache_ud_t *udata = (H5FS_sinfo_cache_ud_t *)_udata; /* user data for callback */ haddr_t fs_addr; /* Free space header address */ size_t old_sect_size; /* Old section size */ uint8_t *buf = NULL; /* Temporary buffer */ @@ -587,23 +588,23 @@ H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED * /* Check arguments */ HDassert(f); HDassert(H5F_addr_defined(addr)); - HDassert(fspace); + HDassert(udata); /* Allocate a new free space section info */ - if(NULL == (sinfo = H5FS_sinfo_new(f, fspace))) + if(NULL == (sinfo = H5FS_sinfo_new(udata->f, udata->fspace))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Sanity check address */ - if(H5F_addr_ne(addr, fspace->sect_addr)) + if(H5F_addr_ne(addr, udata->fspace->sect_addr)) HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "incorrect address for free space sections") /* Allocate space for the buffer to serialize the sections into */ - H5_ASSIGN_OVERFLOW(/* To: */ old_sect_size, /* From: */ fspace->sect_size, /* From: */ hsize_t, /* To: */ size_t); - if(NULL == (buf = H5FL_BLK_MALLOC(sect_block, (size_t)fspace->sect_size))) + H5_ASSIGN_OVERFLOW(/* To: */ old_sect_size, /* From: */ udata->fspace->sect_size, /* From: */ hsize_t, /* To: */ size_t); + if(NULL == (buf = H5FL_BLK_MALLOC(sect_block, (size_t)udata->fspace->sect_size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Read buffer from disk */ - if(H5F_block_read(f, H5FD_MEM_FSPACE_SINFO, fspace->sect_addr, (size_t)fspace->sect_size, dxpl_id, buf) < 0) + if(H5F_block_read(f, H5FD_MEM_FSPACE_SINFO, udata->fspace->sect_addr, (size_t)udata->fspace->sect_size, dxpl_id, buf) < 0) HGOTO_ERROR(H5E_FSPACE, H5E_READERROR, NULL, "can't read free space sections") /* Deserialize free sections from buffer available */ @@ -619,12 +620,12 @@ H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED * HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "wrong free space sections version") /* Address of free space header for these sections */ - H5F_addr_decode(f, &p, &fs_addr); - if(H5F_addr_ne(fs_addr, fspace->addr)) + H5F_addr_decode(udata->f, &p, &fs_addr); + if(H5F_addr_ne(fs_addr, udata->fspace->addr)) HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "incorrect header address for free space sections") /* Check for any serialized sections */ - if(fspace->serial_sect_count > 0) { + if(udata->fspace->serial_sect_count > 0) { hsize_t old_tot_sect_count; /* Total section count from header */ hsize_t old_serial_sect_count; /* Total serializable section count from header */ hsize_t old_ghost_sect_count; /* Total ghost section count from header */ @@ -632,17 +633,17 @@ H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED * unsigned sect_cnt_size; /* The size of the section size counts */ /* Compute the size of the section counts */ - sect_cnt_size = H5V_limit_enc_size((uint64_t)fspace->serial_sect_count); + sect_cnt_size = H5V_limit_enc_size((uint64_t)udata->fspace->serial_sect_count); /* Reset the section count, the "add" routine will update it */ - old_tot_sect_count = fspace->tot_sect_count; - old_serial_sect_count = fspace->serial_sect_count; - old_ghost_sect_count = fspace->ghost_sect_count; - old_tot_space = fspace->tot_space; - fspace->tot_sect_count = 0; - fspace->serial_sect_count = 0; - fspace->ghost_sect_count = 0; - fspace->tot_space = 0; + old_tot_sect_count = udata->fspace->tot_sect_count; + old_serial_sect_count = udata->fspace->serial_sect_count; + old_ghost_sect_count = udata->fspace->ghost_sect_count; + old_tot_space = udata->fspace->tot_space; + udata->fspace->tot_sect_count = 0; + udata->fspace->serial_sect_count = 0; + udata->fspace->ghost_sect_count = 0; + udata->fspace->tot_space = 0; /* Walk through the buffer, deserializing sections */ do { @@ -673,27 +674,27 @@ H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED * /* Call 'deserialize' callback for this section */ des_flags = 0; - HDassert(fspace->sect_cls[sect_type].deserialize); - if(NULL == (new_sect = (*fspace->sect_cls[sect_type].deserialize)(&fspace->sect_cls[sect_type], dxpl_id, p, sect_addr, sect_size, &des_flags))) + HDassert(udata->fspace->sect_cls[sect_type].deserialize); + if(NULL == (new_sect = (*udata->fspace->sect_cls[sect_type].deserialize)(&udata->fspace->sect_cls[sect_type], udata->dxpl_id, p, sect_addr, sect_size, &des_flags))) HGOTO_ERROR(H5E_FSPACE, H5E_CANTDECODE, NULL, "can't deserialize section") /* Update offset in serialization buffer */ - p += fspace->sect_cls[sect_type].serial_size; + p += udata->fspace->sect_cls[sect_type].serial_size; /* Insert section in free space manager, unless requested not to */ if(!(des_flags & H5FS_DESERIALIZE_NO_ADD)) - if(H5FS_sect_add(f, dxpl_id, fspace, new_sect, H5FS_ADD_DESERIALIZING, NULL) < 0) + if(H5FS_sect_add(udata->f, udata->dxpl_id, udata->fspace, new_sect, H5FS_ADD_DESERIALIZING, NULL) < 0) HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, NULL, "can't add section to free space manager") } /* end for */ } while(p < ((buf + old_sect_size) - H5FS_SIZEOF_CHKSUM)); /* Sanity check */ HDassert((size_t)(p - buf) == (old_sect_size - H5FS_SIZEOF_CHKSUM)); - HDassert(old_sect_size == fspace->sect_size); - HDassert(old_tot_sect_count == fspace->tot_sect_count); - HDassert(old_serial_sect_count == fspace->serial_sect_count); - HDassert(old_ghost_sect_count == fspace->ghost_sect_count); - HDassert(old_tot_space == fspace->tot_space); + HDassert(old_sect_size == udata->fspace->sect_size); + HDassert(old_tot_sect_count == udata->fspace->tot_sect_count); + HDassert(old_serial_sect_count == udata->fspace->serial_sect_count); + HDassert(old_ghost_sect_count == udata->fspace->ghost_sect_count); + HDassert(old_tot_space == udata->fspace->tot_space); } /* end if */ /* Compute checksum on indirect block */ @@ -717,7 +718,7 @@ done: buf = H5FL_BLK_FREE(sect_block, buf); if(!ret_value && sinfo) if(H5FS_sinfo_dest(sinfo) < 0) - HDONE_ERROR(H5E_FSPACE, H5E_CANTFREE, NULL, "unable to destroy free space section info") + HDONE_ERROR(H5E_FSPACE, H5E_CANTFREE, NULL, "unable to destroy free space info") FUNC_LEAVE_NOAPI(ret_value) } /* end H5FS_cache_sinfo_load() */ /*lint !e818 Can't make udata a pointer to const */ diff --git a/src/H5FSdbg.c b/src/H5FSdbg.c index 3342cb9..50d4bbf 100644 --- a/src/H5FSdbg.c +++ b/src/H5FSdbg.c @@ -97,7 +97,7 @@ herr_t H5FS_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int fwidth) { H5FS_t *fspace = NULL; /* Free space header info */ - H5FS_prot_t fs_prot; /* Information for protecting free space manager */ + H5FS_hdr_cache_ud_t cache_udata; /* User-data for cache callback */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5FS_debug, FAIL) @@ -112,14 +112,15 @@ H5FS_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int HDassert(fwidth >= 0); /* Initialize user data for protecting the free space manager */ - fs_prot.nclasses = 0; - fs_prot.classes = NULL; - fs_prot.cls_init_udata = NULL; + cache_udata.f = f; + cache_udata.nclasses = 0; + cache_udata.classes = NULL; + cache_udata.cls_init_udata = NULL; /* * Load the free space header. */ - if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, addr, &fs_prot, NULL, H5AC_READ))) + if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, FAIL, "unable to load free space header") /* Print opening message */ @@ -234,8 +235,8 @@ H5FS_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, FILE *stream, int haddr_t fs_addr, haddr_t client_addr) { H5FS_t *fspace = NULL; /* Free space header info */ - H5FS_prot_t fs_prot; /* Information for protecting free space manager */ H5FS_client_t client; /* The client of the free space */ + H5FS_hdr_cache_ud_t cache_udata; /* User-data for cache callback */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5FS_sects_debug, FAIL) @@ -252,14 +253,15 @@ H5FS_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, FILE *stream, int HDassert(H5F_addr_defined(client_addr)); /* Initialize user data for protecting the free space manager */ - fs_prot.nclasses = 0; - fs_prot.classes = NULL; - fs_prot.cls_init_udata = NULL; + cache_udata.f = f; + cache_udata.nclasses = 0; + cache_udata.classes = NULL; + cache_udata.cls_init_udata = NULL; /* * Load the free space header. */ - if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &fs_prot, NULL, H5AC_READ))) + if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, FAIL, "unable to load free space header") /* Retrieve the client id */ diff --git a/src/H5FSpkg.h b/src/H5FSpkg.h index a287d23..2b0eab4 100644 --- a/src/H5FSpkg.h +++ b/src/H5FSpkg.h @@ -93,12 +93,20 @@ /* Package Private Typedefs */ /****************************/ -/* Information for protecting a free space manager header */ -typedef struct H5FS_prot_t { +/* Callback info for loading a free space header into the cache */ +typedef struct H5FS_hdr_cache_ud_t { + H5F_t *f; /* File that free space header is within */ size_t nclasses; /* Number of section classes */ const H5FS_section_class_t **classes; /* Array of section class info */ void *cls_init_udata; /* Pointer to class init user data */ -} H5FS_prot_t; +} H5FS_hdr_cache_ud_t; + +/* Callback info for loading free space section info into the cache */ +typedef struct H5FS_sinfo_cache_ud_t { + H5F_t *f; /* File that free space section info is within */ + H5FS_t *fspace; /* free space manager */ + hid_t dxpl_id; +} H5FS_sinfo_cache_ud_t; /* Free space section bin info */ typedef struct H5FS_bin_t { diff --git a/src/H5FSsection.c b/src/H5FSsection.c index 8a9fab0..dbc76ad 100644 --- a/src/H5FSsection.c +++ b/src/H5FSsection.c @@ -206,6 +206,7 @@ done: static herr_t H5FS_sinfo_lock(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace, H5AC_protect_t accmode) { + H5FS_sinfo_cache_ud_t cache_udata; /* User-data for cache callback */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5FS_sinfo_lock) @@ -231,7 +232,10 @@ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n" HGOTO_ERROR(H5E_FSPACE, H5E_CANTUNPROTECT, FAIL, "unable to release free space section info") /* Re-protect the section info with read-write access */ - if(NULL == (fspace->sinfo = (H5FS_sinfo_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, NULL, fspace, H5AC_WRITE))) + cache_udata.f = f; + cache_udata.dxpl_id = dxpl_id; + cache_udata.fspace = fspace; + if(NULL == (fspace->sinfo = (H5FS_sinfo_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, FAIL, "unable to load free space sections") /* Switch the access mode we have */ @@ -250,7 +254,10 @@ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n" HDfprintf(stderr, "%s: Reading in existing sections, fspace->sect_addr = %a\n", FUNC, fspace->sect_addr); #endif /* H5FS_SINFO_DEBUG */ /* Protect the free space sections */ - if(NULL == (fspace->sinfo = (H5FS_sinfo_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, NULL, fspace, accmode))) + cache_udata.f = f; + cache_udata.dxpl_id = dxpl_id; + cache_udata.fspace = fspace; + if(NULL == (fspace->sinfo = (H5FS_sinfo_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, &cache_udata, accmode))) HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, FAIL, "unable to load free space sections") /* Remember that we protected the section info & the access mode */ @@ -558,7 +565,7 @@ H5FS_sect_increase(H5FS_t *fspace, const H5FS_section_class_t *cls, /* Increment amount of space required to serialize all sections */ #ifdef QAK -HDfprintf(stderr, "%s: sinfo->serial_size = %Zu\n", FUNC, sinfo->serial_size); +HDfprintf(stderr, "%s: sinfo->serial_size = %Zu\n", FUNC, fspace->sinfo->serial_size); HDfprintf(stderr, "%s: cls->serial_size = %Zu\n", FUNC, cls->serial_size); #endif /* QAK */ fspace->sinfo->serial_size += cls->serial_size; @@ -620,7 +627,7 @@ H5FS_sect_decrease(H5FS_t *fspace, const H5FS_section_class_t *cls) /* Decrement amount of space required to serialize all sections */ #ifdef QAK -HDfprintf(stderr, "%s: fspace->serial_size = %Zu\n", FUNC, fspace->serial_size); +HDfprintf(stderr, "%s: fspace->serial_size = %Zu\n", FUNC, fspace->sinfo->serial_size); HDfprintf(stderr, "%s: cls->serial_size = %Zu\n", FUNC, cls->serial_size); #endif /* QAK */ fspace->sinfo->serial_size -= cls->serial_size; diff --git a/src/H5Fmount.c b/src/H5Fmount.c index f70a7c6..40be54c 100644 --- a/src/H5Fmount.c +++ b/src/H5Fmount.c @@ -21,6 +21,7 @@ /* Packages needed by this file... */ #include "H5private.h" /* Generic Functions */ +#include "H5ACprivate.h" /* Metadata cache */ #include "H5Eprivate.h" /* Error handling */ #include "H5Fpkg.h" /* File access */ #include "H5Gprivate.h" /* Groups */ diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h index 09babd2..33c56f9 100644 --- a/src/H5Fpkg.h +++ b/src/H5Fpkg.h @@ -36,7 +36,7 @@ /* Other private headers needed by this file */ #include "H5private.h" /* Generic Functions */ -#include "H5ACprivate.h" /* Metadata cache */ +#include "H5ACprivate.h" /* Metadata cache */ #include "H5FLprivate.h" /* Free Lists */ #include "H5FOprivate.h" /* File objects */ #include "H5FSprivate.h" /* File free space */ @@ -206,7 +206,7 @@ typedef struct H5F_file_t { unsigned long feature_flags; /* VFL Driver feature Flags */ haddr_t maxaddr; /* Maximum address for file */ - H5AC_t *cache; /* The object cache */ + H5AC_t *cache; /* The object cache */ H5AC_cache_config_t mdc_initCacheCfg; /* initial configuration for the */ /* metadata cache. This structure is */ diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c index d6e89c9..5d77cec 100644 --- a/src/H5Fsuper.c +++ b/src/H5Fsuper.c @@ -320,7 +320,7 @@ H5F_super_read(H5F_t *f, hid_t dxpl_id) rw = H5AC_READ; /* Look up the superblock */ - if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, NULL, &dirtied, rw))) + if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, &dirtied, rw))) HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, FAIL, "unable to load superblock") /* Mark the superblock dirty if it was modified during loading or VFD indicated to do so */ diff --git a/src/H5Fsuper_cache.c b/src/H5Fsuper_cache.c index 7a0a585..bffb5e0 100644 --- a/src/H5Fsuper_cache.c +++ b/src/H5Fsuper_cache.c @@ -59,7 +59,7 @@ /********************/ /* Metadata cache (H5AC) callbacks */ -static H5F_super_t *H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata1, void *udata2); +static H5F_super_t *H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5F_sblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5F_super_t *sblock); static herr_t H5F_sblock_dest(H5F_t *f, H5F_super_t * sblock); static herr_t H5F_sblock_clear(H5F_t *f, H5F_super_t *sblock, hbool_t destroy); @@ -110,8 +110,7 @@ H5FL_EXTERN(H5F_super_t); *------------------------------------------------------------------------- */ static H5F_super_t * -H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, const void UNUSED *udata1, - void *udata2/*out*/) +H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, void *_udata) { H5F_super_t *sblock = NULL; /* File's superblock */ haddr_t base_addr = HADDR_UNDEF; /* Base address of file */ @@ -127,7 +126,7 @@ H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, const void UNUSED size_t variable_size; /*variable sizeof superblock */ uint8_t *p; /* Temporary pointer into encoding buffer */ unsigned super_vers; /* Superblock version */ - hbool_t *dirtied = (hbool_t *)udata2; /* Set up dirtied out value */ + hbool_t *dirtied = (hbool_t *)_udata; /* Set up dirtied out value */ H5F_super_t *ret_value; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5F_sblock_load) @@ -135,6 +134,7 @@ H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, const void UNUSED /* check arguments */ HDassert(f); HDassert(H5F_addr_eq(addr, 0)); + HDassert(dirtied); /* Short cuts */ shared = f->shared; @@ -83,6 +83,7 @@ /* Packages needed by this file... */ #include "H5private.h" /* Generic Functions */ +#include "H5ACprivate.h" /* Metadata cache */ #include "H5Eprivate.h" /* Error handling */ #include "H5Fpkg.h" /* File access */ #include "H5Gpkg.h" /* Groups */ diff --git a/src/H5Gcache.c b/src/H5Gcache.c index 1376570..81669d8 100644 --- a/src/H5Gcache.c +++ b/src/H5Gcache.c @@ -64,10 +64,9 @@ /********************/ /* Metadata cache (H5AC) callbacks */ -static H5G_node_t *H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_udata1, - void *_udata2); +static H5G_node_t *H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5G_node_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, - H5G_node_t *sym, unsigned UNUSED * flags_ptr); + H5G_node_t *sym, unsigned *flags_ptr); static herr_t H5G_node_dest(H5F_t *f, H5G_node_t *sym); static herr_t H5G_node_clear(H5F_t *f, H5G_node_t *sym, hbool_t destroy); static herr_t H5G_node_size(const H5F_t *f, const H5G_node_t *sym, size_t *size_ptr); @@ -121,8 +120,7 @@ H5FL_SEQ_EXTERN(H5G_entry_t); *------------------------------------------------------------------------- */ static H5G_node_t * -H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *_udata1, - void UNUSED * _udata2) +H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata) { H5G_node_t *sym = NULL; size_t size; @@ -139,8 +137,7 @@ H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *_udata1 */ HDassert(f); HDassert(H5F_addr_defined(addr)); - HDassert(!_udata1); - HDassert(NULL == _udata2); + HDassert(udata); /* * Initialize variables. diff --git a/src/H5Gdeprec.c b/src/H5Gdeprec.c index 61432b6..e9e124f 100644 --- a/src/H5Gdeprec.c +++ b/src/H5Gdeprec.c @@ -41,6 +41,7 @@ /* Headers */ /***********/ #include "H5private.h" /* Generic Functions */ +#include "H5ACprivate.h" /* Metadata cache */ #include "H5Eprivate.h" /* Error handling */ #include "H5Gpkg.h" /* Groups */ #include "H5Iprivate.h" /* IDs */ diff --git a/src/H5Gnode.c b/src/H5Gnode.c index 6661058..17378d3 100644 --- a/src/H5Gnode.c +++ b/src/H5Gnode.c @@ -536,7 +536,7 @@ H5G_node_found(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *_lt_key /* * Load the symbol table node for exclusive access. */ - if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, NULL, NULL, H5AC_READ))) + if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, FAIL, "unable to protect symbol table node") /* Get base address of heap */ @@ -642,7 +642,7 @@ H5G_node_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, /* * Load the symbol node. */ - if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, NULL, NULL, H5AC_WRITE))) + if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_WRITE))) HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5B_INS_ERROR, "unable to protect symbol table node") /* Get base address of heap */ @@ -685,7 +685,7 @@ H5G_node_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, if(H5G_node_create(f, dxpl_id, H5B_INS_FIRST, NULL, NULL, NULL, new_node_p/*out*/) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, H5B_INS_ERROR, "unable to split symbol table node") - if(NULL == (snrt = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, *new_node_p, NULL, NULL, H5AC_WRITE))) + if(NULL == (snrt = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, *new_node_p, f, H5AC_WRITE))) HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5B_INS_ERROR, "unable to split symbol table node") HDmemcpy(snrt->entry, sn->entry + H5F_SYM_LEAF_K(f), @@ -801,7 +801,7 @@ H5G_node_remove(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key/*in,out*/, HDassert(udata && udata->common.heap); /* Load the symbol table */ - if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, NULL, NULL, H5AC_WRITE))) + if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_WRITE))) HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5B_INS_ERROR, "unable to protect symbol table node") /* "Normal" removal of a single entry from the symbol table node */ @@ -992,7 +992,7 @@ H5G_node_iterate(H5F_t *f, hid_t dxpl_id, const void UNUSED *_lt_key, haddr_t ad HDassert(udata && udata->heap); /* Protect the symbol table node & local heap while we iterate over entries */ - if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, NULL, NULL, H5AC_READ))) + if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node") /* @@ -1069,7 +1069,7 @@ H5G_node_sumup(H5F_t *f, hid_t dxpl_id, const void UNUSED *_lt_key, haddr_t addr HDassert(num_objs); /* Find the object node and add the number of symbol entries. */ - if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, NULL, NULL, H5AC_READ))) + if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node") *num_objs += sn->nsyms; @@ -1114,7 +1114,7 @@ H5G_node_by_idx(H5F_t *f, hid_t dxpl_id, const void UNUSED *_lt_key, haddr_t add HDassert(udata); /* Get a pointer to the symbol table node */ - if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, NULL, NULL, H5AC_READ))) + if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node"); /* Find the node, locate the object symbol table entry and retrieve the name */ @@ -1252,7 +1252,7 @@ H5G_node_copy(H5F_t *f, hid_t dxpl_id, const void UNUSED *_lt_key, haddr_t addr, HDassert(udata); /* load the symbol table into memory from the source file */ - if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, NULL, NULL, H5AC_READ))) + if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node") /* get the base address of the heap */ @@ -1387,7 +1387,7 @@ H5G_node_build_table(H5F_t *f, hid_t dxpl_id, const void UNUSED *_lt_key, haddr_ * Save information about the symbol table node since we can't lock it * because we're about to call an application function. */ - if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, NULL, NULL, H5AC_READ))) + if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node") /* Check if the link table needs to be extended */ @@ -1501,7 +1501,7 @@ H5G_node_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent, * If we couldn't load the symbol table node, then try loading the * B-tree node. */ - if (NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, NULL, NULL, H5AC_READ))) { + if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ))) { H5G_bt_common_t udata; /*data to pass through B-tree */ H5E_clear_stack(NULL); /* discard that error */ @@ -152,6 +152,7 @@ H5HF_create(H5F_t *f, hid_t dxpl_id, const H5HF_create_t *cparam) { H5HF_t *fh = NULL; /* Pointer to new fractal heap */ H5HF_hdr_t *hdr = NULL; /* The fractal heap header information */ + H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */ haddr_t fh_addr; /* Heap header address */ H5HF_t *ret_value; /* Return value */ @@ -174,8 +175,12 @@ HDfprintf(stderr, "%s: Called\n", FUNC); if(NULL == (fh = H5FL_MALLOC(H5HF_t))) HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "memory allocation failed for fractal heap info") + /* Set up userdata for protect call */ + cache_udata.f = f; + cache_udata.dxpl_id = dxpl_id; + /* Lock the heap header into memory */ - if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, fh_addr, NULL, NULL, H5AC_WRITE))) + if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, fh_addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to load fractal heap header") /* Point fractal heap wrapper at header and bump it's ref count */ @@ -223,6 +228,7 @@ H5HF_open(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr) { H5HF_t *fh = NULL; /* Pointer to new fractal heap */ H5HF_hdr_t *hdr = NULL; /* The fractal heap header information */ + H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */ H5HF_t *ret_value; /* Return value */ FUNC_ENTER_NOAPI(H5HF_open, NULL) @@ -233,11 +239,15 @@ H5HF_open(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr) HDassert(f); HDassert(H5F_addr_defined(fh_addr)); + /* Set up userdata for protect call */ + cache_udata.f = f; + cache_udata.dxpl_id = dxpl_id; + /* Load the heap header into memory */ #ifdef QAK HDfprintf(stderr, "%s: fh_addr = %a\n", FUNC, fh_addr); #endif /* QAK */ - if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, fh_addr, NULL, NULL, H5AC_READ))) + if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, fh_addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "unable to load fractal heap header") #ifdef QAK HDfprintf(stderr, "%s: hdr->rc = %u, hdr->fspace = %p\n", FUNC, hdr->rc, hdr->fspace); @@ -783,6 +793,7 @@ done: herr_t H5HF_close(H5HF_t *fh, hid_t dxpl_id) { + H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */ hbool_t pending_delete = FALSE; /* Whether the heap is pending deletion */ haddr_t heap_addr = HADDR_UNDEF; /* Address of heap (for deletion) */ herr_t ret_value = SUCCEED; /* Return value */ @@ -852,8 +863,12 @@ HDfprintf(stderr, "%s; After iterator reset fh->hdr->rc = %Zu\n", FUNC, fh->hdr- if(pending_delete) { H5HF_hdr_t *hdr; /* Another pointer to fractal heap header */ + /* Set up userdata for protect call */ + cache_udata.f = fh->f; + cache_udata.dxpl_id = dxpl_id; + /* Lock the heap header into memory */ - if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(fh->f, dxpl_id, H5AC_FHEAP_HDR, heap_addr, NULL, NULL, H5AC_WRITE))) + if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(fh->f, dxpl_id, H5AC_FHEAP_HDR, heap_addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap header") /* Set the shared heap header's file context for this operation */ @@ -889,7 +904,8 @@ herr_t H5HF_delete(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr) { H5HF_hdr_t *hdr = NULL; /* The fractal heap header information */ - herr_t ret_value = SUCCEED; + H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5HF_delete, FAIL) @@ -899,11 +915,15 @@ H5HF_delete(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr) HDassert(f); HDassert(H5F_addr_defined(fh_addr)); + /* Set up userdata for protect call */ + cache_udata.f = f; + cache_udata.dxpl_id = dxpl_id; + /* Lock the heap header into memory */ #ifdef QAK HDfprintf(stderr, "%s: fh_addr = %a\n", FUNC, fh_addr); #endif /* QAK */ - if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, fh_addr, NULL, NULL, H5AC_WRITE))) + if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, fh_addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap header") /* Check for files using shared heap header */ diff --git a/src/H5HFcache.c b/src/H5HFcache.c index 0468c5e..19b192c 100644 --- a/src/H5HFcache.c +++ b/src/H5HFcache.c @@ -35,6 +35,7 @@ /* Headers */ /***********/ #include "H5private.h" /* Generic Functions */ +#include "H5ACprivate.h" /* Metadata cache */ #include "H5Eprivate.h" /* Error handling */ #include "H5HFpkg.h" /* Fractal heaps */ #include "H5MFprivate.h" /* File memory management */ @@ -78,17 +79,17 @@ static herr_t H5HF_dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *dt static herr_t H5HF_dtable_decode(H5F_t *f, const uint8_t **pp, H5HF_dtable_t *dtable); /* Metadata cache (H5AC) callbacks */ -static H5HF_hdr_t *H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata, void *udata2); +static H5HF_hdr_t *H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5HF_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5HF_hdr_t *hdr, unsigned UNUSED * flags_ptr); static herr_t H5HF_cache_hdr_dest(H5F_t *f, H5HF_hdr_t *hdr); static herr_t H5HF_cache_hdr_clear(H5F_t *f, H5HF_hdr_t *hdr, hbool_t destroy); static herr_t H5HF_cache_hdr_size(const H5F_t *f, const H5HF_hdr_t *hdr, size_t *size_ptr); -static H5HF_indirect_t *H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata, void *udata2); +static H5HF_indirect_t *H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5HF_cache_iblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5HF_indirect_t *iblock, unsigned UNUSED * flags_ptr); static herr_t H5HF_cache_iblock_dest(H5F_t *f, H5HF_indirect_t *iblock); static herr_t H5HF_cache_iblock_clear(H5F_t *f, H5HF_indirect_t *iblock, hbool_t destroy); static herr_t H5HF_cache_iblock_size(const H5F_t *f, const H5HF_indirect_t *iblock, size_t *size_ptr); -static H5HF_direct_t *H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata, void *udata2); +static H5HF_direct_t *H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5HF_cache_dblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5HF_direct_t *dblock, unsigned UNUSED * flags_ptr); static herr_t H5HF_cache_dblock_dest(H5F_t *f, H5HF_direct_t *dblock); static herr_t H5HF_cache_dblock_clear(H5F_t *f, H5HF_direct_t *dblock, hbool_t destroy); @@ -259,9 +260,10 @@ H5HF_dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *dtable) *------------------------------------------------------------------------- */ static H5HF_hdr_t * -H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *udata1, void UNUSED *udata2) +H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { H5HF_hdr_t *hdr = NULL; /* Fractal heap info */ + H5HF_hdr_cache_ud_t *udata = (H5HF_hdr_cache_ud_t *)_udata; size_t size; /* Header size */ H5WB_t *wb = NULL; /* Wrapped buffer for header data */ uint8_t hdr_buf[H5HF_HDR_BUF_SIZE]; /* Buffer for header */ @@ -277,9 +279,10 @@ H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *ud /* Check arguments */ HDassert(f); HDassert(H5F_addr_defined(addr)); + HDassert(udata); /* Allocate space for the fractal heap data structure */ - if(NULL == (hdr = H5HF_hdr_alloc(f))) + if(NULL == (hdr = H5HF_hdr_alloc(udata->f))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Set the heap header's address */ @@ -325,22 +328,22 @@ H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *ud /* "Huge" object information */ UINT32DECODE(p, hdr->max_man_size); /* Max. size of "managed" objects */ - H5F_DECODE_LENGTH(f, p, hdr->huge_next_id); /* Next ID to use for "huge" object */ - H5F_addr_decode(f, &p, &hdr->huge_bt2_addr); /* Address of "huge" object tracker B-tree */ + H5F_DECODE_LENGTH(udata->f, p, hdr->huge_next_id); /* Next ID to use for "huge" object */ + H5F_addr_decode(udata->f, &p, &hdr->huge_bt2_addr); /* Address of "huge" object tracker B-tree */ /* "Managed" object free space information */ - H5F_DECODE_LENGTH(f, p, hdr->total_man_free); /* Internal free space in managed direct blocks */ - H5F_addr_decode(f, &p, &hdr->fs_addr); /* Address of free section header */ + H5F_DECODE_LENGTH(udata->f, p, hdr->total_man_free); /* Internal free space in managed direct blocks */ + H5F_addr_decode(udata->f, &p, &hdr->fs_addr); /* Address of free section header */ /* Heap statistics */ - H5F_DECODE_LENGTH(f, p, hdr->man_size); - H5F_DECODE_LENGTH(f, p, hdr->man_alloc_size); - H5F_DECODE_LENGTH(f, p, hdr->man_iter_off); - H5F_DECODE_LENGTH(f, p, hdr->man_nobjs); - H5F_DECODE_LENGTH(f, p, hdr->huge_size); - H5F_DECODE_LENGTH(f, p, hdr->huge_nobjs); - H5F_DECODE_LENGTH(f, p, hdr->tiny_size); - H5F_DECODE_LENGTH(f, p, hdr->tiny_nobjs); + H5F_DECODE_LENGTH(udata->f, p, hdr->man_size); + H5F_DECODE_LENGTH(udata->f, p, hdr->man_alloc_size); + H5F_DECODE_LENGTH(udata->f, p, hdr->man_iter_off); + H5F_DECODE_LENGTH(udata->f, p, hdr->man_nobjs); + H5F_DECODE_LENGTH(udata->f, p, hdr->huge_size); + H5F_DECODE_LENGTH(udata->f, p, hdr->huge_nobjs); + H5F_DECODE_LENGTH(udata->f, p, hdr->tiny_size); + H5F_DECODE_LENGTH(udata->f, p, hdr->tiny_nobjs); /* Managed objects' doubling-table info */ if(H5HF_dtable_decode(hdr->f, &p, &(hdr->man_dtable)) < 0) @@ -380,13 +383,13 @@ H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *ud p = buf + filter_info_off; /* Decode the size of a filtered root direct block */ - H5F_DECODE_LENGTH(f, p, hdr->pline_root_direct_size); + H5F_DECODE_LENGTH(udata->f, p, hdr->pline_root_direct_size); /* Decode the filter mask for a filtered root direct block */ UINT32DECODE(p, hdr->pline_root_direct_filter_mask); /* Decode I/O filter information */ - if(NULL == (pline = (H5O_pline_t *)H5O_msg_decode(hdr->f, dxpl_id, NULL, H5O_PLINE_ID, p))) + if(NULL == (pline = (H5O_pline_t *)H5O_msg_decode(hdr->f, udata->dxpl_id, NULL, H5O_PLINE_ID, p))) HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, NULL, "can't decode I/O pipeline filters") p += hdr->filter_len; @@ -699,11 +702,10 @@ H5HF_cache_hdr_size(const H5F_t UNUSED *f, const H5HF_hdr_t *hdr, size_t *size_p *------------------------------------------------------------------------- */ static H5HF_indirect_t * -H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_nrows, void *_par_info) +H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { H5HF_hdr_t *hdr; /* Shared fractal heap information */ - const unsigned *nrows = (const unsigned *)_nrows; /* # of rows in indirect block */ - H5HF_parent_t *par_info = (H5HF_parent_t *)_par_info; /* Shared parent information */ + H5HF_iblock_cache_ud_t *udata = (H5HF_iblock_cache_ud_t *)_udata; /* user data for callback */ H5HF_indirect_t *iblock = NULL; /* Indirect block info */ H5WB_t *wb = NULL; /* Wrapped buffer for indirect block data */ uint8_t iblock_buf[H5HF_IBLOCK_BUF_SIZE]; /* Buffer for indirect block */ @@ -720,17 +722,17 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_nrows /* Check arguments */ HDassert(f); HDassert(H5F_addr_defined(addr)); - HDassert(par_info); + HDassert(udata); /* Allocate space for the fractal heap indirect block */ if(NULL == (iblock = H5FL_CALLOC(H5HF_indirect_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Get the pointer to the shared heap header */ - hdr = par_info->hdr; + hdr = udata->par_info->hdr; /* Set the shared heap header's file context for this operation */ - hdr->f = f; + hdr->f = udata->f; /* Share common heap information */ iblock->hdr = hdr; @@ -739,7 +741,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_nrows /* Set block's internal information */ iblock->rc = 0; - iblock->nrows = *nrows; + iblock->nrows = *udata->nrows; iblock->addr = addr; iblock->nchildren = 0; @@ -771,13 +773,13 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_nrows HGOTO_ERROR(H5E_HEAP, H5E_VERSION, NULL, "wrong fractal heap direct block version") /* Address of heap that owns this block */ - H5F_addr_decode(f, &p, &heap_addr); + H5F_addr_decode(udata->f, &p, &heap_addr); if(H5F_addr_ne(heap_addr, hdr->heap_addr)) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "incorrect heap header address for direct block") /* Address of parent block */ - iblock->parent = par_info->iblock; - iblock->par_entry = par_info->entry; + iblock->parent = udata->par_info->iblock; + iblock->par_entry = udata->par_info->entry; if(iblock->parent) { /* Share parent block */ if(H5HF_iblock_incr(iblock->parent) < 0) @@ -812,7 +814,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_nrows iblock->filt_ents = NULL; for(u = 0; u < (iblock->nrows * hdr->man_dtable.cparam.width); u++) { /* Decode child block address */ - H5F_addr_decode(f, &p, &(iblock->ents[u].addr)); + H5F_addr_decode(udata->f, &p, &(iblock->ents[u].addr)); /* Check for heap with I/O filters */ if(hdr->filter_len > 0) { @@ -822,7 +824,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_nrows /* Decode extra information for direct blocks */ if(u < (hdr->man_dtable.max_direct_rows * hdr->man_dtable.cparam.width)) { /* Size of filtered direct block */ - H5F_DECODE_LENGTH(f, p, iblock->filt_ents[u].size); + H5F_DECODE_LENGTH(udata->f, p, iblock->filt_ents[u].size); /* Sanity check */ /* (either both the address & size are defined or both are @@ -1210,11 +1212,11 @@ H5HF_cache_iblock_size(const H5F_t UNUSED *f, const H5HF_indirect_t *iblock, siz *------------------------------------------------------------------------- */ static H5HF_direct_t * -H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_size, void *_par_info) +H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { - const size_t *size = (const size_t *)_size; /* Size of block */ + H5HF_dblock_cache_ud_t *udata = (H5HF_dblock_cache_ud_t *)_udata; /* pointer to user data */ H5HF_hdr_t *hdr; /* Shared fractal heap information */ - H5HF_parent_t *par_info = (H5HF_parent_t *)_par_info; /* Pointer to parent information */ + H5HF_parent_t *par_info; /* Pointer to parent information */ H5HF_direct_t *dblock = NULL; /* Direct block info */ const uint8_t *p; /* Pointer into raw data buffer */ haddr_t heap_addr; /* Address of heap header in the file */ @@ -1225,7 +1227,9 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_size, /* Check arguments */ HDassert(f); HDassert(H5F_addr_defined(addr)); - HDassert(par_info); + HDassert(udata != NULL); + HDassert(udata->f != NULL); + HDassert(udata->dblock_size > 0); /* Allocate space for the fractal heap direct block */ if(NULL == (dblock = H5FL_MALLOC(H5HF_direct_t))) @@ -1233,10 +1237,11 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_size, HDmemset(&dblock->cache_info, 0, sizeof(H5AC_info_t)); /* Get the pointer to the shared heap header */ + par_info = (H5HF_parent_t *)(&(udata->par_info)); hdr = par_info->hdr; /* Set the shared heap header's file context for this operation */ - hdr->f = f; + hdr->f = udata->f; /* Share common heap information */ dblock->hdr = hdr; @@ -1244,7 +1249,7 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_size, HGOTO_ERROR(H5E_HEAP, H5E_CANTINC, NULL, "can't increment reference count on shared heap header") /* Set block's internal information */ - dblock->size = *size; + dblock->size = udata->dblock_size; dblock->file_size = 0; dblock->blk_off_size = H5HF_SIZEOF_OFFSET_LEN(dblock->size); @@ -1268,7 +1273,6 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_size, /* Set up parameters to read filtered direct block */ read_size = hdr->pline_root_direct_size; - filter_mask = hdr->pline_root_direct_filter_mask; } /* end if */ else { /* Sanity check */ @@ -1276,7 +1280,6 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_size, /* Set up parameters to read filtered direct block */ read_size = par_info->iblock->filt_ents[par_info->entry].size; - filter_mask = par_info->iblock->filt_ents[par_info->entry].filter_mask; } /* end else */ /* Allocate buffer to perform I/O filtering on */ @@ -1289,6 +1292,7 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_size, /* Push direct block data through I/O filter pipeline */ nbytes = read_size; + filter_mask = udata->filter_mask; if(H5Z_pipeline(&(hdr->pline), H5Z_FLAG_REVERSE, &filter_mask, H5Z_ENABLE_EDC, filter_cb, &nbytes, &read_size, &read_buf) < 0) HGOTO_ERROR(H5E_HEAP, H5E_CANTFILTER, NULL, "output pipeline failed") @@ -1320,7 +1324,7 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_size, HGOTO_ERROR(H5E_HEAP, H5E_VERSION, NULL, "wrong fractal heap direct block version") /* Address of heap that owns this block (just for file integrity checks) */ - H5F_addr_decode(f, &p, &heap_addr); + H5F_addr_decode(udata->f, &p, &heap_addr); if(H5F_addr_ne(heap_addr, hdr->heap_addr)) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "incorrect heap header address for direct block") diff --git a/src/H5HFdbg.c b/src/H5HFdbg.c index 6f476b8..eba253e 100644 --- a/src/H5HFdbg.c +++ b/src/H5HFdbg.c @@ -188,6 +188,7 @@ herr_t H5HF_hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int fwidth) { H5HF_hdr_t *hdr = NULL; /* Fractal heap header info */ + H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5HF_hdr_debug, FAIL) @@ -201,10 +202,14 @@ H5HF_hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, HDassert(indent >= 0); HDassert(fwidth >= 0); + /* Set up user data for protect call */ + cache_udata.f = f; + cache_udata.dxpl_id = dxpl_id; + /* * Load the fractal heap header. */ - if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, addr, NULL, NULL, H5AC_READ))) + if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap header") /* Print opening message */ @@ -394,6 +399,7 @@ H5HF_dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, { H5HF_hdr_t *hdr = NULL; /* Fractal heap header info */ H5HF_direct_t *dblock = NULL; /* Fractal heap direct block info */ + H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */ size_t blk_prefix_size; /* Size of prefix for block */ size_t amount_free; /* Amount of free space in block */ uint8_t *marker = NULL; /* Track free space for block */ @@ -412,10 +418,14 @@ H5HF_dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, HDassert(H5F_addr_defined(hdr_addr)); HDassert(block_size > 0); + /* Set up user data for protect call */ + cache_udata.f = f; + cache_udata.dxpl_id = dxpl_id; + /* * Load the fractal heap header. */ - if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, hdr_addr, NULL, NULL, H5AC_READ))) + if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, hdr_addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap header") /* @@ -526,6 +536,7 @@ H5HF_iblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, { H5HF_hdr_t *hdr = NULL; /* Fractal heap header info */ H5HF_indirect_t *iblock = NULL; /* Fractal heap direct block info */ + H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */ hbool_t did_protect; /* Whether we protected the indirect block or not */ char temp_str[64]; /* Temporary string, for formatting */ size_t u, v; /* Local index variable */ @@ -544,10 +555,14 @@ H5HF_iblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, HDassert(H5F_addr_defined(hdr_addr)); HDassert(nrows > 0); + /* Set up user data for protect call */ + cache_udata.f = f; + cache_udata.dxpl_id = dxpl_id; + /* * Load the fractal heap header. */ - if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, hdr_addr, NULL, NULL, H5AC_READ))) + if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, hdr_addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap header") /* @@ -715,6 +730,7 @@ H5HF_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr, FILE *stream, int indent, int fwidth) { H5HF_hdr_t *hdr = NULL; /* Fractal heap header info */ + H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5HF_sects_debug, FAIL) @@ -728,10 +744,14 @@ H5HF_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr, HDassert(indent >= 0); HDassert(fwidth >= 0); + /* Set up user data for protect call */ + cache_udata.f = f; + cache_udata.dxpl_id = dxpl_id; + /* * Load the fractal heap header. */ - if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, fh_addr, NULL, NULL, H5AC_READ))) + if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, fh_addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap header") /* Initialize the free space information for the heap */ diff --git a/src/H5HFdblock.c b/src/H5HFdblock.c index 8f27bfe..d886f4e 100644 --- a/src/H5HFdblock.c +++ b/src/H5HFdblock.c @@ -438,8 +438,8 @@ H5HF_man_dblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t dblock_addr, size_t dblock_size, H5HF_indirect_t *par_iblock, unsigned par_entry, H5AC_protect_t rw) { - H5HF_parent_t par_info; /* Parent info for loading block */ H5HF_direct_t *dblock; /* Direct block from cache */ + H5HF_dblock_cache_ud_t udata; /* parent and other infor for deserializing direct block */ H5HF_direct_t *ret_value; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5HF_man_dblock_protect) @@ -452,12 +452,36 @@ H5HF_man_dblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t dblock_addr, HDassert(dblock_size > 0); /* Set up parent info */ - par_info.hdr = hdr; - par_info.iblock = par_iblock; - par_info.entry = par_entry; + udata.par_info.hdr = hdr; + udata.par_info.iblock = par_iblock; + udata.par_info.entry = par_entry; + + /* set up the file pointer in the user data */ + udata.f = hdr->f; + + /* set up the direct block size */ + udata.dblock_size = dblock_size; + + /* compute the on disk image size -- observe that odi_size and + * dblock_size will be identical if there is no filtering. + */ + if(hdr->filter_len > 0) { + if(par_iblock == NULL) { + udata.filter_mask = hdr->pline_root_direct_filter_mask; + } /* end if */ + else { + /* Sanity check */ + HDassert(H5F_addr_eq(par_iblock->ents[par_entry].addr, dblock_addr)); + + /* Set up parameters to read filtered direct block */ + udata.filter_mask = par_iblock->filt_ents[par_entry].filter_mask; + } /* end else */ + } /* end if */ + else + udata.filter_mask = 0; /* Protect the direct block */ - if(NULL == (dblock = (H5HF_direct_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_DBLOCK, dblock_addr, &dblock_size, &par_info, rw))) + if(NULL == (dblock = (H5HF_direct_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_DBLOCK, dblock_addr, &udata, rw))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap direct block") /* Set the return value */ diff --git a/src/H5HFhdr.c b/src/H5HFhdr.c index 468f9c2..54b4ff9 100644 --- a/src/H5HFhdr.c +++ b/src/H5HFhdr.c @@ -670,6 +670,12 @@ H5HF_hdr_dirty(H5HF_hdr_t *hdr) /* Sanity check */ HDassert(hdr); + /* Resize pinned header in cache if I/O filter is present. */ + if(hdr->filter_len > 0) { + if(H5AC_resize_pinned_entry(hdr, (size_t)hdr->heap_size) < 0) + HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, FAIL, "unable to resize fractal heap header") + } /* end if */ + /* Mark header as dirty in cache */ if(H5AC_mark_pinned_or_protected_entry_dirty(hdr) < 0) HGOTO_ERROR(H5E_HEAP, H5E_CANTMARKDIRTY, FAIL, "unable to mark fractal heap header as dirty") diff --git a/src/H5HFiblock.c b/src/H5HFiblock.c index 66b6390..7b222a4 100644 --- a/src/H5HFiblock.c +++ b/src/H5HFiblock.c @@ -489,6 +489,7 @@ H5HF_man_iblock_root_double(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t min_dblock_si haddr_t new_addr; /* New address of indirect block */ hsize_t acc_dblock_free; /* Accumulated free space in direct blocks */ hsize_t next_size; /* The previous value of the "next size" for the new block iterator */ + hsize_t old_iblock_size; /* Old size of indirect block */ unsigned next_row; /* The next row to allocate block in */ unsigned next_entry; /* The previous value of the "next entry" for the new block iterator */ unsigned new_next_entry = 0;/* The new value of the "next entry" for the new block iterator */ @@ -544,6 +545,7 @@ H5HF_man_iblock_root_double(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t min_dblock_si * QAK - 3/14/2006 */ /* Free previous indirect block disk space */ + old_iblock_size = iblock->size; if(H5MF_xfree(hdr->f, H5FD_MEM_FHEAP_IBLOCK, dxpl_id, iblock->addr, (hsize_t)iblock->size) < 0) HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free fractal heap indirect block file space") } /* end if */ @@ -562,6 +564,12 @@ H5HF_man_iblock_root_double(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t min_dblock_si HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap indirect block") } /* end else */ + /* Resize pinned indirect block in the cache, if its changed size */ + if(old_iblock_size != iblock->size) { + if(H5AC_resize_pinned_entry(iblock, (size_t)iblock->size) < 0) + HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, FAIL, "unable to resize fractal heap indirect block") + } /* end if */ + /* Move object in cache, if it actually was relocated */ if(H5F_addr_ne(iblock->addr, new_addr)) { if(H5AC_rename(hdr->f, H5AC_FHEAP_IBLOCK, iblock->addr, new_addr) < 0) @@ -667,6 +675,7 @@ H5HF_man_iblock_root_halve(H5HF_indirect_t *iblock, hid_t dxpl_id) H5HF_hdr_t *hdr = iblock->hdr; /* Pointer to heap header */ haddr_t new_addr; /* New address of indirect block */ hsize_t acc_dblock_free; /* Accumulated free space in direct blocks */ + hsize_t old_size; /* Old size of indirect block */ unsigned max_child_row; /* Row for max. child entry */ unsigned old_nrows; /* Old # of rows */ unsigned new_nrows; /* New # of rows */ @@ -699,6 +708,7 @@ H5HF_man_iblock_root_halve(H5HF_indirect_t *iblock, hid_t dxpl_id) * QAK - 6/12/2006 */ /* Free previous indirect block disk space */ + old_size = iblock->size; if(H5MF_xfree(hdr->f, H5FD_MEM_FHEAP_IBLOCK, dxpl_id, iblock->addr, (hsize_t)iblock->size) < 0) HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free fractal heap indirect block file space") } /* end if */ @@ -723,6 +733,12 @@ H5HF_man_iblock_root_halve(H5HF_indirect_t *iblock, hid_t dxpl_id) HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap indirect block") } /* end else */ + /* Resize pinned indirect block in the cache, if it has changed size */ + if(old_size != iblock->size) { + if(H5AC_resize_pinned_entry(iblock, (size_t)iblock->size) < 0) + HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, FAIL, "unable to resize fractal heap indirect block") + } /* end if */ + /* Move object in cache, if it actually was relocated */ if(H5F_addr_ne(iblock->addr, new_addr)) { if(H5AC_rename(hdr->f, H5AC_FHEAP_IBLOCK, iblock->addr, new_addr) < 0) @@ -1123,13 +1139,20 @@ H5HF_man_iblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t iblock_addr, /* Check for protecting indirect block */ if(must_protect || should_protect) { + H5HF_iblock_cache_ud_t cache_udata; /* User-data for callback */ + /* Set up parent info */ par_info.hdr = hdr; par_info.iblock = par_iblock; par_info.entry = par_entry; + /* Set up user data for protect call */ + cache_udata.f = hdr->f; + cache_udata.par_info = &par_info; + cache_udata.nrows = &iblock_nrows; + /* Protect the indirect block */ - if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_IBLOCK, iblock_addr, &iblock_nrows, &par_info, rw))) + if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_IBLOCK, iblock_addr, &cache_udata, rw))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap indirect block") *did_protect = TRUE; } /* end if */ @@ -1595,11 +1618,11 @@ H5HF_man_iblock_dest(H5HF_indirect_t *iblock) /* Release entry tables */ if(iblock->ents) - H5FL_SEQ_FREE(H5HF_indirect_ent_t, iblock->ents); + iblock->ents = H5FL_SEQ_FREE(H5HF_indirect_ent_t, iblock->ents); if(iblock->filt_ents) - H5FL_SEQ_FREE(H5HF_indirect_filt_ent_t, iblock->filt_ents); + iblock->filt_ents = H5FL_SEQ_FREE(H5HF_indirect_filt_ent_t, iblock->filt_ents); if(iblock->child_iblocks) - H5FL_SEQ_FREE(H5HF_indirect_ptr_t, iblock->child_iblocks); + iblock->child_iblocks = H5FL_SEQ_FREE(H5HF_indirect_ptr_t, iblock->child_iblocks); /* Free fractal heap indirect block info */ iblock = H5FL_FREE(H5HF_indirect_t, iblock); diff --git a/src/H5HFpkg.h b/src/H5HFpkg.h index be52dcd..01600ad 100644 --- a/src/H5HFpkg.h +++ b/src/H5HFpkg.h @@ -468,6 +468,36 @@ typedef struct { hsize_t obj_len; /* Length of object removed (out) */ } H5HF_huge_remove_ud1_t; +/* User data for fractal heap header cache client callback */ +typedef struct H5HF_hdr_cache_ud_t { + H5F_t *f; /* File pointer */ + hid_t dxpl_id; /* DXPL ID for operation (in) */ +} H5HF_hdr_cache_ud_t; + +/* User data for fractal heap indirect block cache client callbacks */ +typedef struct H5HF_iblock_cache_ud_t { + H5HF_parent_t * par_info; /* Parent info */ + H5F_t * f; /* File pointer */ + const unsigned *nrows; /* Number of rows */ +} H5HF_iblock_cache_ud_t; + +/* User data for fractal heap direct block cache client callbacks */ +typedef struct H5HF_dblock_cache_ud_t { + H5HF_parent_t par_info; /* Parent info */ + H5F_t * f; /* File pointer */ + size_t dblock_size; /* size of the direct block, which bears + * no necessary relation to the block + * odi_size -- the size of the on disk + * image of the block. Note that the + * metadata cache is only interested + * in the odi_size, and thus it is this + * value that is passed to the cache in + * calls to it. + */ + unsigned filter_mask; /* Excluded filters for direct block */ +} H5HF_dblock_cache_ud_t; + + /*****************************/ /* Package Private Variables */ /*****************************/ @@ -591,7 +591,7 @@ H5HG_insert(H5F_t *f, hid_t dxpl_id, size_t size, void *obj, H5HG_t *hobj/*out*/ } /* end if */ } /* end else */ HDassert(H5F_addr_defined(addr)); - if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, addr, NULL, NULL, H5AC_WRITE))) + if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, addr, f, H5AC_WRITE))) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load heap") /* Split the free space to make room for the new object */ @@ -654,7 +654,7 @@ H5HG_read(H5F_t *f, hid_t dxpl_id, H5HG_t *hobj, void *object/*out*/, HDassert(hobj); /* Load the heap */ - if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, hobj->addr, NULL, NULL, H5AC_READ))) + if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, hobj->addr, f, H5AC_READ))) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "unable to load heap") HDassert(hobj->idx < heap->nused); @@ -736,7 +736,7 @@ H5HG_link(H5F_t *f, hid_t dxpl_id, const H5HG_t *hobj, int adjust) HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "no write intent on file") /* Load the heap */ - if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, hobj->addr, NULL, NULL, H5AC_WRITE))) + if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, hobj->addr, f, H5AC_WRITE))) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load heap") if(adjust != 0) { @@ -799,7 +799,7 @@ H5HG_remove (H5F_t *f, hid_t dxpl_id, H5HG_t *hobj) HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "no write intent on file") /* Load the heap */ - if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, hobj->addr, NULL, NULL, H5AC_WRITE))) + if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, hobj->addr, f, H5AC_WRITE))) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load heap") HDassert(hobj->idx < heap->nused); diff --git a/src/H5HGcache.c b/src/H5HGcache.c index 10d683e..74bbd3c 100644 --- a/src/H5HGcache.c +++ b/src/H5HGcache.c @@ -63,8 +63,7 @@ /********************/ /* Metadata cache callbacks */ -static H5HG_heap_t *H5HG_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata1, - void *udata2); +static H5HG_heap_t *H5HG_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5HG_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, H5HG_heap_t *heap, unsigned UNUSED * flags_ptr); static herr_t H5HG_dest(H5F_t *f, H5HG_heap_t *heap); @@ -113,8 +112,7 @@ const H5AC_class_t H5AC_GHEAP[1] = {{ *------------------------------------------------------------------------- */ static H5HG_heap_t * -H5HG_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED * udata1, - void UNUSED * udata2) +H5HG_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata) { H5HG_heap_t *heap = NULL; uint8_t *p; @@ -127,8 +125,7 @@ H5HG_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED * udata1, /* check arguments */ HDassert(f); HDassert(H5F_addr_defined(addr)); - HDassert(!udata1); - HDassert(!udata2); + HDassert(udata); /* Read the initial 4k page */ if(NULL == (heap = H5FL_CALLOC(H5HG_heap_t))) diff --git a/src/H5HGdbg.c b/src/H5HGdbg.c index 07de139..1fc0133 100644 --- a/src/H5HGdbg.c +++ b/src/H5HGdbg.c @@ -73,7 +73,7 @@ H5HG_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, HDassert(indent >= 0); HDassert(fwidth >= 0); - if(NULL == (h = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, addr, NULL, NULL, H5AC_READ))) + if(NULL == (h = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, addr, f, H5AC_READ))) HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load global heap collection"); fprintf(stream, "%*sGlobal Heap Collection...\n", indent, ""); @@ -455,7 +455,7 @@ H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw) prfx_udata.free_block = H5HL_FREE_NULL; /* Protect the local heap prefix */ - if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, NULL, &prfx_udata, rw))) + if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, rw))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to load heap prefix") /* Get the pointer to the heap */ @@ -479,7 +479,7 @@ H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw) dblk_udata.loaded = FALSE; /* Protect the local heap data block */ - if(NULL == (dblk = (H5HL_dblk_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_DBLK, heap->dblk_addr, NULL, &dblk_udata, rw))) + if(NULL == (dblk = (H5HL_dblk_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_DBLK, heap->dblk_addr, &dblk_udata, rw))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to load heap data block") /* Pin the prefix, if the data block was loaded from file */ @@ -1071,7 +1071,7 @@ H5HL_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr) prfx_udata.free_block = H5HL_FREE_NULL; /* Protect the local heap prefix */ - if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, NULL, &prfx_udata, H5AC_WRITE))) + if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load heap prefix") /* Get the pointer to the heap */ @@ -1088,7 +1088,7 @@ H5HL_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr) dblk_udata.loaded = FALSE; /* Protect the local heap data block */ - if(NULL == (dblk = (H5HL_dblk_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_DBLK, heap->dblk_addr, NULL, &dblk_udata, H5AC_WRITE))) + if(NULL == (dblk = (H5HL_dblk_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_DBLK, heap->dblk_addr, &dblk_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load heap data block") /* Pin the prefix, if the data block was loaded from file */ @@ -1150,7 +1150,7 @@ H5HL_get_size(H5F_t *f, hid_t dxpl_id, haddr_t addr, size_t *size) prfx_udata.free_block = H5HL_FREE_NULL; /* Protect the local heap prefix */ - if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, NULL, &prfx_udata, H5AC_READ))) + if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC_READ))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load heap prefix") /* Get the pointer to the heap */ @@ -1203,7 +1203,7 @@ H5HL_heapsize(H5F_t *f, hid_t dxpl_id, haddr_t addr, hsize_t *heap_size) prfx_udata.free_block = H5HL_FREE_NULL; /* Protect the local heap prefix */ - if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, NULL, &prfx_udata, H5AC_READ))) + if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC_READ))) HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load heap prefix") /* Get the pointer to the heap */ diff --git a/src/H5HLcache.c b/src/H5HLcache.c index b224343..779486d 100644 --- a/src/H5HLcache.c +++ b/src/H5HLcache.c @@ -69,15 +69,13 @@ /********************/ /* Metadata cache callbacks */ -static void *H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, - const void *udata1, void *udata2); +static void *H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5HL_prefix_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, void *thing, unsigned *flags_ptr); static herr_t H5HL_prefix_dest(H5F_t *f, void *thing); static herr_t H5HL_prefix_clear(H5F_t *f, void *thing, hbool_t destroy); static herr_t H5HL_prefix_size(const H5F_t *f, const void *thing, size_t *size_ptr); -static void *H5HL_datablock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, - const void *udata1, void *udata2); +static void *H5HL_datablock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5HL_datablock_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr, void *thing, unsigned *flags_ptr); static herr_t H5HL_datablock_dest(H5F_t *f, void *thing); @@ -168,7 +166,7 @@ H5HL_fl_deserialize(H5HL_t *heap, hsize_t free_block) /* Decode length of this free block */ H5F_DECODE_LENGTH_LEN(p, fl->size, heap->sizeof_size); - if(fl->offset + fl->size > heap->dblk_size) + if((fl->offset + fl->size) > heap->dblk_size) HGOTO_ERROR(H5E_HEAP, H5E_BADRANGE, FAIL, "bad heap free list") /* Append node onto list */ @@ -247,24 +245,22 @@ H5HL_fl_serialize(const H5HL_t *heap) *------------------------------------------------------------------------- */ static void * -H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata1, - void *_udata) +H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { - H5HL_t *heap = NULL; /* Local heap */ - H5HL_prfx_t *prfx = NULL; /* Heap prefix deserialized */ - H5HL_cache_prfx_ud_t *udata = (H5HL_cache_prfx_ud_t *)_udata; /* User data for protecting local heap prefix */ + H5HL_t *heap = NULL; /* Local heap */ + H5HL_prfx_t *prfx = NULL; /* Heap prefix deserialized */ + H5HL_cache_prfx_ud_t *udata = (H5HL_cache_prfx_ud_t *)_udata; /* User data for callback */ uint8_t buf[H5HL_SPEC_READ_SIZE]; /* Buffer for decoding */ size_t spec_read_size; /* Size of buffer to speculatively read in */ const uint8_t *p; /* Pointer into decoding buffer */ haddr_t eoa; /* Relative end of file address */ - void *ret_value; /* Return value */ + H5HL_prfx_t *ret_value; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5HL_prefix_load) /* check arguments */ HDassert(f); HDassert(H5F_addr_defined(addr)); - HDassert(!udata1); HDassert(udata); HDassert(udata->sizeof_size > 0); HDassert(udata->sizeof_addr > 0); @@ -291,7 +287,7 @@ H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata1, /* Version */ if(H5HL_VERSION != *p++) - HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "wrong version number in global heap") + HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "wrong version number in local heap") /* Reserved */ p += 3; @@ -609,22 +605,21 @@ H5HL_prefix_size(const H5F_t UNUSED *f, const void *thing, size_t *size_ptr) *------------------------------------------------------------------------- */ static void * -H5HL_datablock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata1, - void *_udata) +H5HL_datablock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { - H5HL_dblk_t *dblk = NULL; /* Heap data block deserialized */ - H5HL_cache_dblk_ud_t *udata = (H5HL_cache_dblk_ud_t *)_udata; /* User data for protecting local heap data block */ - void *ret_value; /* Return value */ + H5HL_dblk_t *dblk = NULL; /* Local heap data block deserialized */ + H5HL_cache_dblk_ud_t *udata = (H5HL_cache_dblk_ud_t *)_udata; /* User data for callback */ + H5HL_dblk_t *ret_value; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5HL_datablock_load) /* check arguments */ HDassert(f); HDassert(H5F_addr_defined(addr)); - HDassert(!udata1); HDassert(udata); HDassert(udata->heap); HDassert(!udata->heap->single_cache_obj); + HDassert(NULL == udata->heap->dblk); /* Allocate space in memory for the heap data block */ if(NULL == (dblk = H5HL_dblk_new(udata->heap))) @@ -28,6 +28,7 @@ /* Headers */ /***********/ #include "H5private.h" /* Generic Functions */ +#include "H5ACprivate.h" /* Metadata cache */ #include "H5Dprivate.h" /* Datasets */ #include "H5Eprivate.h" /* Error handling */ #include "H5Gpkg.h" /* Groups */ diff --git a/src/H5Lexternal.c b/src/H5Lexternal.c index 7ccdb68..c5ae92f 100644 --- a/src/H5Lexternal.c +++ b/src/H5Lexternal.c @@ -20,6 +20,7 @@ #define H5_INTERFACE_INIT_FUNC H5L_init_extern_interface #include "H5private.h" /* Generic Functions */ +#include "H5ACprivate.h" /* Metadata cache */ #include "H5Eprivate.h" /* Error handling */ #include "H5Gpkg.h" /* Groups */ #include "H5Iprivate.h" /* IDs */ @@ -114,8 +114,9 @@ H5MF_init_merge_flags(H5F_t *f) H5MF_aggr_merge_t mapping_type; /* Type of free list mapping */ H5FD_mem_t type; /* Memory type for iteration */ hbool_t all_same; /* Whether all the types map to the same value */ + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI_NOFUNC(H5MF_init_merge_flags) + FUNC_ENTER_NOAPI(H5MF_init_merge_flags, FAIL) /* check args */ HDassert(f); @@ -192,9 +193,13 @@ H5MF_init_merge_flags(H5F_t *f) /* Merge all allocation types together */ HDmemset(f->shared->fs_aggr_merge, (H5F_FS_MERGE_METADATA | H5F_FS_MERGE_RAWDATA), sizeof(f->shared->fs_aggr_merge)); break; + + default: + HGOTO_ERROR(H5E_RESOURCE, H5E_BADVALUE, FAIL, "invalid mapping type") } /* end switch */ - FUNC_LEAVE_NOAPI(SUCCEED) +done: + FUNC_LEAVE_NOAPI(ret_value) } /* end H5MF_init_merge_flags() */ @@ -1654,7 +1654,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot) udata.common.cont_msg_info = &cont_msg_info; /* Lock the object header into the cache */ - if(NULL == (oh = (H5O_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR, loc->addr, NULL, &udata, prot))) + if(NULL == (oh = (H5O_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR, loc->addr, &udata, prot))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header") /* Check if there are any continuation messages to process */ @@ -1689,7 +1689,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot) /* Bring the chunk into the cache */ /* (which adds to the object header) */ chk_udata.chunk_size = cont_msg_info.msgs[curr_msg].size; - if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR_CHK, cont_msg_info.msgs[curr_msg].addr, NULL, &chk_udata, prot))) + if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR_CHK, cont_msg_info.msgs[curr_msg].addr, &chk_udata, prot))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header chunk") /* Sanity check */ diff --git a/src/H5Ocache.c b/src/H5Ocache.c index 1e7ed9a..d217c74 100644 --- a/src/H5Ocache.c +++ b/src/H5Ocache.c @@ -68,15 +68,13 @@ /********************/ /* Metadata cache callbacks */ -static H5O_t *H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_udata1, - void *_udata2); +static H5O_t *H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5O_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5O_t *oh, unsigned UNUSED * flags_ptr); static herr_t H5O_dest(H5F_t *f, H5O_t *oh); static herr_t H5O_clear(H5F_t *f, H5O_t *oh, hbool_t destroy); static herr_t H5O_size(const H5F_t *f, const H5O_t *oh, size_t *size_ptr); -static H5O_chunk_proxy_t *H5O_cache_chk_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_udata1, - void *_udata2); +static H5O_chunk_proxy_t *H5O_cache_chk_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5O_cache_chk_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5O_chunk_proxy_t *chk_proxy, unsigned UNUSED * flags_ptr); static herr_t H5O_cache_chk_dest(H5F_t *f, H5O_chunk_proxy_t *chk_proxy); static herr_t H5O_cache_chk_clear(H5F_t *f, H5O_chunk_proxy_t *chk_proxy, hbool_t destroy); @@ -156,11 +154,10 @@ H5FL_SEQ_DEFINE(H5O_cont_t); *------------------------------------------------------------------------- */ static H5O_t * -H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED * _udata1, - void *_udata2) +H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { H5O_t *oh = NULL; /* Object header read in */ - H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata2; /* User data for callback */ + H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata; /* User data for callback */ H5WB_t *wb = NULL; /* Wrapped buffer for prefix data */ uint8_t read_buf[H5O_SPEC_READ_SIZE]; /* Buffer for speculative read */ const uint8_t *p; /* Pointer into buffer to decode */ @@ -176,7 +173,6 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED * _udata1, /* Check arguments */ HDassert(f); HDassert(H5F_addr_defined(addr)); - HDassert(!_udata1); HDassert(udata); HDassert(udata->common.f); HDassert(udata->common.cont_msg_info); @@ -352,7 +348,7 @@ done: /* Release the [possibly partially initialized] object header on errors */ if(!ret_value && oh) if(H5O_free(oh) < 0) - HDONE_ERROR(H5E_OHDR, H5E_CANTFREE, NULL, "unable to destroy object header data") + HDONE_ERROR(H5E_OHDR, H5E_CANTRELEASE, NULL, "unable to destroy object header data") FUNC_LEAVE_NOAPI(ret_value) } /* end H5O_load() */ @@ -641,11 +637,10 @@ H5O_size(const H5F_t UNUSED *f, const H5O_t *oh, size_t *size_ptr) *------------------------------------------------------------------------- */ static H5O_chunk_proxy_t * -H5O_cache_chk_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED * _udata1, - void *_udata2) +H5O_cache_chk_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk proxy object */ - H5O_chk_cache_ud_t *udata = (H5O_chk_cache_ud_t *)_udata2; /* User data for callback */ + H5O_chk_cache_ud_t *udata = (H5O_chk_cache_ud_t *)_udata; /* User data for callback */ H5WB_t *wb = NULL; /* Wrapped buffer for prefix data */ uint8_t chunk_buf[H5O_SPEC_READ_SIZE]; /* Buffer for speculative read */ uint8_t *buf; /* Buffer to decode */ diff --git a/src/H5Ochunk.c b/src/H5Ochunk.c index f1668f7..147d9c6 100644 --- a/src/H5Ochunk.c +++ b/src/H5Ochunk.c @@ -184,7 +184,7 @@ H5O_chunk_protect(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx) chk_udata.chunk_size = oh->chunk[idx].size; /* Get the chunk proxy */ - if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, NULL, &chk_udata, H5AC_WRITE))) + if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header chunk") /* Sanity check */ @@ -301,7 +301,7 @@ H5O_chunk_update_idx(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx) chk_udata.chunk_size = oh->chunk[idx].size; /* Get the chunk proxy */ - if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, NULL, &chk_udata, H5AC_WRITE))) + if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk") /* Update index for chunk proxy in cache */ @@ -353,7 +353,7 @@ H5O_chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx) chk_udata.chunk_size = oh->chunk[idx].size; /* Get the chunk proxy */ - if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, NULL, &chk_udata, H5AC_WRITE))) + if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk") /* Sanity check */ diff --git a/src/H5Odbg.c b/src/H5Odbg.c index 8661cfe..81f1e70 100644 --- a/src/H5Odbg.c +++ b/src/H5Odbg.c @@ -511,7 +511,7 @@ H5O_debug_real(H5F_t *f, hid_t dxpl_id, H5O_t *oh, haddr_t addr, FILE *stream, i done: /* Release resources */ if(sequence) - sequence = H5MM_xfree(sequence); + sequence = (unsigned *)H5MM_xfree(sequence); FUNC_LEAVE_NOAPI(ret_value) } /* end H5O_debug_real() */ diff --git a/src/H5Olink.c b/src/H5Olink.c index ed65ee7..0438bb7 100644 --- a/src/H5Olink.c +++ b/src/H5Olink.c @@ -440,7 +440,7 @@ done: if(NULL == ret_value) if(dest) { if(dest->name && dest->name != lnk->name) - dest->name = H5MM_xfree(dest->name); + dest->name = (char *)H5MM_xfree(dest->name); if(NULL == _dest) dest = H5FL_FREE(H5O_link_t ,dest); } /* end if */ diff --git a/src/H5Oname.c b/src/H5Oname.c index 01df68c..f8d3553 100644 --- a/src/H5Oname.c +++ b/src/H5Oname.c @@ -108,7 +108,7 @@ H5O_name_decode(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, H5O_t UNUSED *open_oh, done: if(NULL == ret_value) { if(mesg) - H5MM_xfree(mesg); + mesg = (H5O_name_t *)H5MM_xfree(mesg); } /* end if */ FUNC_LEAVE_NOAPI(ret_value) @@ -193,7 +193,7 @@ H5O_name_copy(const void *_mesg, void *_dest) done: if(NULL == ret_value) if(dest && NULL == _dest) - dest = H5MM_xfree(dest); + dest = (H5O_name_t *)H5MM_xfree(dest); FUNC_LEAVE_NOAPI(ret_value) } /* end H5O_name_copy() */ @@ -287,11 +287,11 @@ H5S_extent_release(H5S_extent_t *extent) assert(extent); /* Release extent */ - if(extent->type==H5S_SIMPLE) { + if(extent->type == H5S_SIMPLE) { if(extent->size) - H5FL_ARR_FREE(hsize_t,extent->size); + extent->size = H5FL_ARR_FREE(hsize_t, extent->size); if(extent->max) - H5FL_ARR_FREE(hsize_t,extent->max); + extent->max = H5FL_ARR_FREE(hsize_t, extent->max); } /* end if */ done: @@ -356,7 +356,7 @@ H5SM_type_shared(H5F_t *f, unsigned type_id, hid_t dxpl_id) /* Look up the master SOHM table */ if(H5F_addr_defined(f->shared->sohm_addr)) { - if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, NULL, NULL, H5AC_READ))) + if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table") } /* end if */ else @@ -406,7 +406,7 @@ H5SM_get_fheap_addr(H5F_t *f, hid_t dxpl_id, unsigned type_id, haddr_t *fheap_ad HDassert(fheap_addr); /* Look up the master SOHM table */ - if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, NULL, NULL, H5AC_READ))) + if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table") /* Look up index for message type */ @@ -661,7 +661,7 @@ done: if(ret_value == HADDR_UNDEF) { if(list != NULL) { if(list->messages != NULL) - H5FL_ARR_FREE(H5SM_sohm_t, list->messages); + list->messages = H5FL_ARR_FREE(H5SM_sohm_t, list->messages); list = H5FL_FREE(H5SM_list_t, list); } /* end if */ if(addr != HADDR_UNDEF) @@ -805,6 +805,7 @@ static herr_t H5SM_convert_btree_to_list(H5F_t * f, H5SM_index_header_t * header, hid_t dxpl_id) { H5SM_list_t *list = NULL; + H5SM_list_cache_ud_t cache_udata; /* User-data for metadata cache callback */ haddr_t btree_addr; herr_t ret_value = SUCCEED; @@ -822,7 +823,12 @@ H5SM_convert_btree_to_list(H5F_t * f, H5SM_index_header_t * header, hid_t dxpl_i if(HADDR_UNDEF == (header->index_addr = H5SM_create_list(f, header, dxpl_id))) HGOTO_ERROR(H5E_SOHM, H5E_CANTINIT, FAIL, "unable to create shared message list") - if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, NULL, header, H5AC_WRITE))) + /* Set up user data for metadata cache callback */ + cache_udata.f = f; + cache_udata.header = header; + + /* Protect the SOHM list */ + if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM list index") /* Delete the B-tree and have messages copy themselves to the @@ -925,7 +931,7 @@ H5SM_can_share(H5F_t *f, hid_t dxpl_id, H5SM_master_table_t *table, if(table) my_table = table; else { - if(NULL == (my_table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, NULL, NULL, H5AC_READ))) + if(NULL == (my_table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table") } /* end if */ @@ -1031,7 +1037,7 @@ H5SM_try_share(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, unsigned type_id, HGOTO_DONE(FALSE) /* Look up the master SOHM table */ - if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, NULL, NULL, H5AC_WRITE))) + if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_WRITE))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table") /* "complex" sharing checks */ @@ -1166,6 +1172,7 @@ H5SM_write_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, { H5SM_list_t *list = NULL; /* List index */ H5SM_mesg_key_t key; /* Key used to search the index */ + H5SM_list_cache_ud_t cache_udata; /* User-data for metadata cache callback */ H5O_shared_t shared; /* Shared H5O message */ hbool_t found = FALSE; /* Was the message in the index? */ H5HF_t *fheap = NULL; /* Fractal heap handle */ @@ -1210,8 +1217,12 @@ H5SM_write_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, if(header->index_type == H5SM_LIST) { size_t list_pos; /* Position in a list index */ + /* Set up user data for metadata cache callback */ + cache_udata.f = f; + cache_udata.header = header; + /* The index is a list; get it from the cache */ - if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, NULL, header, H5AC_WRITE))) + if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM index") /* See if the message is already in the index and get its location. @@ -1421,7 +1432,7 @@ H5SM_delete(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, H5O_shared_t *sh_mesg) type_id = sh_mesg->msg_type_id; /* Look up the master SOHM table */ - if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, NULL, NULL, H5AC_WRITE))) + if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_WRITE))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table") /* Find the correct index and try to delete from it */ @@ -1682,10 +1693,15 @@ H5SM_delete_from_index(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, /* Try to find the message in the index */ if(header->index_type == H5SM_LIST) { + H5SM_list_cache_ud_t cache_udata; /* User-data for metadata cache callback */ size_t list_pos; /* Position of the message in the list */ + /* Set up user data for metadata cache callback */ + cache_udata.f = f; + cache_udata.header = header; + /* If the index is stored as a list, get it from the cache */ - if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, NULL, header, H5AC_WRITE))) + if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, H5AC_WRITE))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM index") /* Find the message in the list */ @@ -1852,7 +1868,7 @@ H5SM_get_info(const H5O_loc_t *ext_loc, H5P_genplist_t *fc_plist, hid_t dxpl_id) HDassert(shared->sohm_nindexes > 0 && shared->sohm_nindexes <= H5O_SHMESG_MAX_NINDEXES); /* Read the rest of the SOHM table information from the cache */ - if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, shared->sohm_addr, NULL, NULL, H5AC_READ))) + if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, shared->sohm_addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table") /* Get index conversion limits */ @@ -2009,7 +2025,7 @@ H5SM_get_refcount(H5F_t *f, hid_t dxpl_id, unsigned type_id, HDassert(ref_count); /* Look up the master SOHM table */ - if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, NULL, NULL, H5AC_READ))) + if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table") /* Find the correct index and find the message in it */ @@ -2040,10 +2056,15 @@ H5SM_get_refcount(H5F_t *f, hid_t dxpl_id, unsigned type_id, /* Try to find the message in the index */ if(header->index_type == H5SM_LIST) { + H5SM_list_cache_ud_t cache_udata; /* User-data for metadata cache callback */ size_t list_pos; /* Position of the message in the list */ + /* Set up user data for metadata cache callback */ + cache_udata.f = f; + cache_udata.header = header; + /* If the index is stored as a list, get it from the cache */ - if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, NULL, header, H5AC_READ))) + if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM index") /* Find the message in the list */ @@ -2310,7 +2331,7 @@ H5SM_table_free(H5SM_master_table_t *table) HDassert(table); HDassert(table->indexes); - H5FL_ARR_FREE(H5SM_index_header_t, table->indexes); + table->indexes = H5FL_ARR_FREE(H5SM_index_header_t, table->indexes); table = H5FL_FREE(H5SM_master_table_t, table); @@ -2338,7 +2359,7 @@ H5SM_list_free(H5SM_list_t *list) HDassert(list); HDassert(list->messages); - H5FL_ARR_FREE(H5SM_sohm_t, list->messages); + list->messages = H5FL_ARR_FREE(H5SM_sohm_t, list->messages); list = H5FL_FREE(H5SM_list_t, list); @@ -2397,7 +2418,7 @@ H5SM_table_debug(H5F_t *f, hid_t dxpl_id, haddr_t table_addr, HGOTO_ERROR(H5E_SOHM, H5E_BADVALUE, FAIL, "number of indexes must be between 1 and H5O_SHMESG_MAX_NINDEXES") /* Look up the master SOHM table */ - if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, table_addr, NULL, NULL, H5AC_READ))) + if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, table_addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table") HDfprintf(stream, "%*sShared Message Master Table...\n", indent, ""); @@ -2454,6 +2475,7 @@ H5SM_list_debug(H5F_t *f, hid_t dxpl_id, haddr_t list_addr, H5SM_list_t *list = NULL; /* SOHM index list for message type (if in list form) */ H5SM_index_header_t header; /* A "false" header used to read the list */ + H5SM_list_cache_ud_t cache_udata; /* User-data for metadata cache callback */ unsigned x; /* Counter variable */ herr_t ret_value = SUCCEED; /* Return value */ @@ -2477,8 +2499,12 @@ H5SM_list_debug(H5F_t *f, hid_t dxpl_id, haddr_t list_addr, header.index_type = H5SM_LIST; header.index_addr = list_addr; + /* Set up user data for metadata cache callback */ + cache_udata.f = f; + cache_udata.header = &header; + /* Get the list from the cache */ - if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, list_addr, NULL, &header, H5AC_READ))) + if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, list_addr, &cache_udata, H5AC_READ))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM index") HDfprintf(stream, "%*sShared Message List Index...\n", indent, ""); @@ -2550,7 +2576,7 @@ H5SM_ih_size(H5F_t *f, hid_t dxpl_id, H5F_info_t *finfo) HDassert(finfo); /* Look up the master SOHM table */ - if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, NULL, NULL, H5AC_READ))) + if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table") /* Get SOHM header size */ diff --git a/src/H5SMcache.c b/src/H5SMcache.c index e7c640f..d4a2a39 100644 --- a/src/H5SMcache.c +++ b/src/H5SMcache.c @@ -54,12 +54,12 @@ /********************/ /* Metadata cache (H5AC) callbacks */ -static H5SM_master_table_t *H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata1, void *table); +static H5SM_master_table_t *H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5SM_table_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5SM_master_table_t *table); static herr_t H5SM_table_dest(H5F_t *f, H5SM_master_table_t* table); static herr_t H5SM_table_clear(H5F_t *f, H5SM_master_table_t *table, hbool_t destroy); static herr_t H5SM_table_size(const H5F_t *f, const H5SM_master_table_t *table, size_t *size_ptr); -static H5SM_list_t *H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata1, void *udata2); +static H5SM_list_t *H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata); static herr_t H5SM_list_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5SM_list_t *list); static herr_t H5SM_list_dest(H5F_t *f, H5SM_list_t* list); static herr_t H5SM_list_clear(H5F_t *f, H5SM_list_t *list, hbool_t destroy); @@ -114,7 +114,7 @@ const H5AC_class_t H5AC_SOHM_LIST[1] = {{ *------------------------------------------------------------------------- */ static H5SM_master_table_t * -H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *udata1, void UNUSED *udata2) +H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void UNUSED *udata) { H5SM_master_table_t *table = NULL; size_t size; /* Size of SOHM master table on disk */ @@ -460,10 +460,10 @@ H5SM_table_size(const H5F_t *f, const H5SM_master_table_t *table, size_t *size_p *------------------------------------------------------------------------- */ static H5SM_list_t * -H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *udata1, void *udata2) +H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata) { H5SM_list_t *list; /* The SOHM list being read in */ - H5SM_index_header_t *header = (H5SM_index_header_t *) udata2; /* Index header for this list */ + H5SM_list_cache_ud_t *udata = (H5SM_list_cache_ud_t *)_udata; /* User data for callback */ H5SM_bt2_ctx_t ctx; /* Message encoding context */ size_t size; /* Size of SOHM list on disk */ H5WB_t *wb = NULL; /* Wrapped buffer for list index data */ @@ -478,7 +478,7 @@ H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *udata1, FUNC_ENTER_NOAPI_NOINIT(H5SM_list_load) /* Sanity check */ - HDassert(header); + HDassert(udata->header); /* Allocate space for the SOHM list data structure */ if(NULL == (list = H5FL_MALLOC(H5SM_list_t))) @@ -486,17 +486,17 @@ H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *udata1, HDmemset(&list->cache_info, 0, sizeof(H5AC_info_t)); /* Allocate list in memory as an array*/ - if((list->messages = (H5SM_sohm_t *)H5FL_ARR_MALLOC(H5SM_sohm_t, header->list_max)) == NULL) + if((list->messages = (H5SM_sohm_t *)H5FL_ARR_MALLOC(H5SM_sohm_t, udata->header->list_max)) == NULL) HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "file allocation failed for SOHM list") - list->header = header; + list->header = udata->header; /* Wrap the local buffer for serialized list index info */ if(NULL == (wb = H5WB_wrap(lst_buf, sizeof(lst_buf)))) HGOTO_ERROR(H5E_SOHM, H5E_CANTINIT, NULL, "can't wrap buffer") /* Compute the size of the SOHM list on disk */ - size = H5SM_LIST_SIZE(f, header->num_messages); + size = H5SM_LIST_SIZE(udata->f, udata->header->num_messages); /* Get a pointer to a buffer that's large enough for serialized list index */ if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size))) @@ -515,11 +515,11 @@ H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *udata1, p += H5_SIZEOF_MAGIC; /* Read messages into the list array */ - ctx.sizeof_addr = H5F_SIZEOF_ADDR(f); - for(x = 0; x < header->num_messages; x++) { + ctx.sizeof_addr = H5F_SIZEOF_ADDR(udata->f); + for(x = 0; x < udata->header->num_messages; x++) { if(H5SM_message_decode(p, &(list->messages[x]), &ctx) < 0) HGOTO_ERROR(H5E_SOHM, H5E_CANTLOAD, NULL, "can't decode shared message") - p += H5SM_SOHM_ENTRY_SIZE(f); + p += H5SM_SOHM_ENTRY_SIZE(udata->f); } /* end for */ /* Read in checksum */ @@ -536,7 +536,7 @@ H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void UNUSED *udata1, HGOTO_ERROR(H5E_SOHM, H5E_BADVALUE, NULL, "incorrect metadata checksum for shared message list") /* Initialize the rest of the array */ - for(x = header->num_messages; x < header->list_max; x++) + for(x = udata->header->num_messages; x < udata->header->list_max; x++) list->messages[x].location = H5SM_NO_LOC; /* Set return value */ @@ -548,7 +548,7 @@ done: HDONE_ERROR(H5E_SOHM, H5E_CLOSEERROR, NULL, "can't close wrapped buffer") if(!ret_value && list) { if(list->messages) - H5FL_ARR_FREE(H5SM_sohm_t, list->messages); + list->messages = H5FL_ARR_FREE(H5SM_sohm_t, list->messages); list = H5FL_FREE(H5SM_list_t, list); } /* end if */ diff --git a/src/H5SMpkg.h b/src/H5SMpkg.h index b023c97..3aab317 100755 --- a/src/H5SMpkg.h +++ b/src/H5SMpkg.h @@ -32,6 +32,7 @@ #include "H5SMprivate.h" /* Shared Object Header Messages */ /* Other private headers needed by this file */ +#include "H5ACprivate.h" /* Metadata Cache */ #include "H5B2private.h" /* B-trees */ #include "H5HFprivate.h" /* Fractal heaps */ @@ -218,6 +219,12 @@ typedef struct H5SM_bt2_ctx_t { uint8_t sizeof_addr; /* Size of file addresses */ } H5SM_bt2_ctx_t; +/* Callback info for loading a shared message index into the cache */ +typedef struct H5SM_list_cache_ud_t { + H5F_t *f; /* File that shared message index stored as a list is in */ + H5SM_index_header_t *header; /* Index header for this list */ +} H5SM_list_cache_ud_t; + /****************************/ /* Package Variables */ diff --git a/src/H5SMtest.c b/src/H5SMtest.c index 582bc0e..cc8a98f 100644 --- a/src/H5SMtest.c +++ b/src/H5SMtest.c @@ -94,7 +94,7 @@ H5SM_get_mesg_count_test(H5F_t *f, hid_t dxpl_id, unsigned type_id, ssize_t index_num; /* Table index for message type */ /* Look up the master SOHM table */ - if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, NULL, NULL, H5AC_READ))) + if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_READ))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table") /* Find the correct index for this message type */ diff --git a/src/H5Spoint.c b/src/H5Spoint.c index d2e46cf..24dfe2a 100644 --- a/src/H5Spoint.c +++ b/src/H5Spoint.c @@ -645,7 +645,7 @@ done: while(curr) { H5S_pnt_node_t *tmp_node = curr; - curr->pnt = H5MM_xfree(curr->pnt); + curr->pnt = (hsize_t *)H5MM_xfree(curr->pnt); curr = curr->next; tmp_node = H5FL_FREE(H5S_pnt_node_t, tmp_node); } /* end while */ @@ -3165,7 +3165,7 @@ H5T_copy(const H5T_t *old_dt, H5T_copy_t method) /* The object is already open. Free the H5T_shared_t struct * we had been using and use the one that already exists. * Not terribly efficient. */ - H5FL_FREE(H5T_shared_t, new_dt->shared); + new_dt->shared = H5FL_FREE(H5T_shared_t, new_dt->shared); new_dt->shared = reopened_fo; reopened_fo->fo_count++; @@ -3207,7 +3207,7 @@ H5T_copy(const H5T_t *old_dt, H5T_copy_t method) */ /* Only malloc if space has been allocated for members - NAF */ if(new_dt->shared->u.compnd.nalloc > 0) { - new_dt->shared->u.compnd.memb = H5MM_malloc(new_dt->shared->u.compnd.nalloc * + new_dt->shared->u.compnd.memb = (H5T_cmemb_t *)H5MM_malloc(new_dt->shared->u.compnd.nalloc * sizeof(H5T_cmemb_t)); if (NULL==new_dt->shared->u.compnd.memb) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); @@ -3265,9 +3265,9 @@ H5T_copy(const H5T_t *old_dt, H5T_copy_t method) * of each new member with copied values. That is, H5T_copy() is a * deep copy. */ - new_dt->shared->u.enumer.name = H5MM_malloc(new_dt->shared->u.enumer.nalloc * + new_dt->shared->u.enumer.name = (char **)H5MM_malloc(new_dt->shared->u.enumer.nalloc * sizeof(char*)); - new_dt->shared->u.enumer.value = H5MM_malloc(new_dt->shared->u.enumer.nalloc * + new_dt->shared->u.enumer.value = (uint8_t *)H5MM_malloc(new_dt->shared->u.enumer.nalloc * new_dt->shared->size); if(NULL == new_dt->shared->u.enumer.value) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); @@ -3721,7 +3721,7 @@ H5T_set_size(H5T_t *dt, size_t size) H5T_str_t tmp_strpad; /* Temp. strpad info */ /* Get a copy of unsigned char type as the base/parent type */ - if (NULL==(base=H5I_object(H5T_NATIVE_UCHAR))) + if(NULL == (base = (H5T_t *)H5I_object(H5T_NATIVE_UCHAR))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid base datatype"); dt->shared->parent=H5T_copy(base,H5T_COPY_ALL); @@ -3899,8 +3899,8 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset) HGOTO_DONE(1); /* Build an index for each type so the names are sorted */ - if(NULL == (idx1 = H5MM_malloc(dt1->shared->u.compnd.nmembs * sizeof(unsigned))) || - NULL == (idx2 = H5MM_malloc(dt2->shared->u.compnd.nmembs * sizeof(unsigned)))) + if(NULL == (idx1 = (unsigned *)H5MM_malloc(dt1->shared->u.compnd.nmembs * sizeof(unsigned))) || + NULL == (idx2 = (unsigned *)H5MM_malloc(dt2->shared->u.compnd.nmembs * sizeof(unsigned)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "memory allocation failed"); for(u = 0; u < dt1->shared->u.compnd.nmembs; u++) idx1[u] = idx2[u] = u; @@ -3977,8 +3977,8 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset) } /* end else */ /* Build an index for each type so the names are sorted */ - if (NULL==(idx1 = H5MM_malloc(dt1->shared->u.enumer.nmembs * sizeof(unsigned))) || - NULL==(idx2 = H5MM_malloc(dt2->shared->u.enumer.nmembs * sizeof(unsigned)))) + if(NULL == (idx1 = (unsigned *)H5MM_malloc(dt1->shared->u.enumer.nmembs * sizeof(unsigned))) || + NULL == (idx2 = (unsigned *)H5MM_malloc(dt2->shared->u.enumer.nmembs * sizeof(unsigned)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "memory allocation failed"); for (u=0; u<dt1->shared->u.enumer.nmembs; u++) idx1[u] = u; @@ -4310,7 +4310,7 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name, * Make sure the first entry in the table is the no-op conversion path. */ if(0 == H5T_g.npaths) { - if(NULL == (H5T_g.path = H5MM_malloc(128 * sizeof(H5T_path_t *)))) + if(NULL == (H5T_g.path = (H5T_path_t **)H5MM_malloc(128 * sizeof(H5T_path_t *)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for type conversion path table") H5T_g.apaths = 128; if(NULL == (H5T_g.path[0] = H5FL_CALLOC(H5T_path_t))) @@ -4499,7 +4499,7 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name, size_t na = MAX(128, 2 * H5T_g.apaths); H5T_path_t **x; - if(NULL == (x = H5MM_realloc(H5T_g.path, na * sizeof(H5T_path_t*)))) + if(NULL == (x = (H5T_path_t **)H5MM_realloc(H5T_g.path, na * sizeof(H5T_path_t*)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") H5T_g.apaths = na; H5T_g.path = x; diff --git a/src/H5Tenum.c b/src/H5Tenum.c index 6da6931..ec3f2e1 100644 --- a/src/H5Tenum.c +++ b/src/H5Tenum.c @@ -87,7 +87,7 @@ H5Tenum_create(hid_t parent_id) H5TRACE1("i", "i", parent_id); /* Check args */ - if (NULL==(parent=H5I_object_verify(parent_id,H5I_DATATYPE)) || H5T_INTEGER!=parent->shared->type) + if(NULL == (parent = (H5T_t *)H5I_object_verify(parent_id, H5I_DATATYPE)) || H5T_INTEGER != parent->shared->type) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an integer data type") /* Build new type */ @@ -173,9 +173,9 @@ H5Tenum_insert(hid_t type, const char *name, const void *value) H5TRACE3("e", "i*s*x", type, name, value); /* Check args */ - if (NULL==(dt=H5I_object_verify(type,H5I_DATATYPE))) + if(NULL == (dt = (H5T_t *)H5I_object_verify(type, H5I_DATATYPE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type") - if (H5T_ENUM!=dt->shared->type) + if(H5T_ENUM != dt->shared->type) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an enumeration data type") if (!name || !*name) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no name specified") @@ -233,13 +233,14 @@ H5T_enum_insert(const H5T_t *dt, const char *name, const void *value) } /* Increase table sizes */ - if (dt->shared->u.enumer.nmembs >= dt->shared->u.enumer.nalloc) { + if(dt->shared->u.enumer.nmembs >= dt->shared->u.enumer.nalloc) { unsigned n = MAX(32, 2*dt->shared->u.enumer.nalloc); - if (NULL==(names=H5MM_realloc(dt->shared->u.enumer.name, n*sizeof(char*)))) + + if(NULL == (names = (char **)H5MM_realloc(dt->shared->u.enumer.name, n * sizeof(char *)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed") dt->shared->u.enumer.name = names; - if (NULL==(values=H5MM_realloc(dt->shared->u.enumer.value, n*dt->shared->size))) + if(NULL == (values = (uint8_t *)H5MM_realloc(dt->shared->u.enumer.value, n * dt->shared->size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed") dt->shared->u.enumer.value = values; dt->shared->u.enumer.nalloc = n; @@ -282,9 +283,9 @@ H5Tget_member_value(hid_t type, unsigned membno, void *value/*out*/) FUNC_ENTER_API(H5Tget_member_value, FAIL) H5TRACE3("e", "iIux", type, membno, value); - if (NULL==(dt=H5I_object_verify(type,H5I_DATATYPE))) + if(NULL == (dt = (H5T_t *)H5I_object_verify(type, H5I_DATATYPE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type") - if (H5T_ENUM!=dt->shared->type) + if(H5T_ENUM != dt->shared->type) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not defined for data type class") if (membno>=dt->shared->u.enumer.nmembs) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid member number") @@ -366,9 +367,9 @@ H5Tenum_nameof(hid_t type, const void *value, char *name/*out*/, size_t size) H5TRACE4("e", "i*xxz", type, value, name, size); /* Check args */ - if (NULL==(dt=H5I_object_verify(type,H5I_DATATYPE))) + if(NULL == (dt = (H5T_t *)H5I_object_verify(type, H5I_DATATYPE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type") - if (H5T_ENUM!=dt->shared->type) + if(H5T_ENUM != dt->shared->type) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an enumeration data type") if (!value) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no value supplied") @@ -457,7 +458,7 @@ H5T_enum_nameof(const H5T_t *dt, const void *value, char *name/*out*/, size_t si HGOTO_ERROR(H5E_DATATYPE, H5E_NOTFOUND, NULL, "value is currently not defined") /* Save result name */ - if (!name && NULL==(name=H5MM_malloc(HDstrlen(copied_dt->shared->u.enumer.name[md])+1))) + if(!name && NULL == (name = (char *)H5MM_malloc(HDstrlen(copied_dt->shared->u.enumer.name[md]) + 1))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); HDstrncpy(name, copied_dt->shared->u.enumer.name[md], size); if (HDstrlen(copied_dt->shared->u.enumer.name[md])>=size) @@ -507,9 +508,9 @@ H5Tenum_valueof(hid_t type, const char *name, void *value/*out*/) H5TRACE3("e", "i*sx", type, name, value); /* Check args */ - if (NULL==(dt=H5I_object_verify(type,H5I_DATATYPE))) + if(NULL == (dt = (H5T_t *)H5I_object_verify(type, H5I_DATATYPE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type") - if (H5T_ENUM!=dt->shared->type) + if(H5T_ENUM != dt->shared->type) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an enumeration data type") if (!name || !*name) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no name") |