diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2008-06-03 19:44:12 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2008-06-03 19:44:12 (GMT) |
commit | 771bae88881f4ae8102c9553fd10e0938aa3094c (patch) | |
tree | 62ac7bce1e109e1b0b8899ea14da603a2b6e6ffc /src | |
parent | d36f67c0e27a39a54f870c4f917ab2c1754e80d6 (diff) | |
download | hdf5-771bae88881f4ae8102c9553fd10e0938aa3094c.zip hdf5-771bae88881f4ae8102c9553fd10e0938aa3094c.tar.gz hdf5-771bae88881f4ae8102c9553fd10e0938aa3094c.tar.bz2 |
[svn-r15131] Description:
Finish omnibus chunked dataset I/O refactoring, to separate general
actions on chunked datasets from actions that are specific to using the v1
B-tree index.
Cleaned up a few bugs and added some additional tests also.
Tested on:
FreeBSD/32 6.2 (duty) in debug mode
FreeBSD/64 6.2 (liberty) w/C++ & FORTRAN, in debug mode
Linux/32 2.6 (kagiso) w/PGI compilers, w/C++ & FORTRAN, w/threadsafe,
in debug mode
Linux/64-amd64 2.6 (smirom) w/default API=1.6.x, w/C++ & FORTRAN,
in production mode
Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN,
in production mode
Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN,
w/szip filter, in production mode
Mac OS X/32 10.5.2 (amazon) in debug mode
Linux/64-ia64 2.4 (tg-login3) w/parallel, w/FORTRAN, in production mode
Diffstat (limited to 'src')
-rw-r--r-- | src/H5B.c | 469 | ||||
-rw-r--r-- | src/H5Bcache.c | 13 | ||||
-rw-r--r-- | src/H5Bpkg.h | 3 | ||||
-rw-r--r-- | src/H5Bprivate.h | 50 | ||||
-rw-r--r-- | src/H5Dchunk.c | 3047 | ||||
-rw-r--r-- | src/H5Dcompact.c | 50 | ||||
-rw-r--r-- | src/H5Dcontig.c | 70 | ||||
-rw-r--r-- | src/H5Ddbg.c | 2 | ||||
-rw-r--r-- | src/H5Ddeprec.c | 4 | ||||
-rw-r--r-- | src/H5Defl.c | 74 | ||||
-rw-r--r-- | src/H5Dint.c | 263 | ||||
-rw-r--r-- | src/H5Dio.c | 20 | ||||
-rw-r--r-- | src/H5Distore.c | 3210 | ||||
-rw-r--r-- | src/H5Dmpio.c | 46 | ||||
-rw-r--r-- | src/H5Doh.c | 2 | ||||
-rw-r--r-- | src/H5Dpkg.h | 299 | ||||
-rw-r--r-- | src/H5Dprivate.h | 5 | ||||
-rw-r--r-- | src/H5Gname.c | 3 | ||||
-rw-r--r-- | src/H5Gnode.c | 78 | ||||
-rw-r--r-- | src/H5Gstab.c | 35 | ||||
-rw-r--r-- | src/H5Olayout.c | 42 | ||||
-rw-r--r-- | src/H5Oprivate.h | 6 | ||||
-rw-r--r-- | src/H5Ostab.c | 2 | ||||
-rw-r--r-- | src/H5Pint.c | 2 | ||||
-rw-r--r-- | src/H5Pprivate.h | 2 | ||||
-rw-r--r-- | src/H5S.c | 3 | ||||
-rw-r--r-- | src/H5Sprivate.h | 2 |
27 files changed, 4287 insertions, 3515 deletions
@@ -128,6 +128,13 @@ /* Local Typedefs */ /******************/ +/* "user data" for iterating over B-tree (collects B-tree metadata size) */ +typedef struct H5B_iter_ud_t { + H5B_info_t *bt_info; /* Information about B-tree */ + void *udata; /* Node type's 'udata' for loading & iterator callback */ +} H5B_info_ud_t; + + /********************/ /* Local Prototypes */ /********************/ @@ -168,13 +175,20 @@ H5FL_DEFINE(H5B_t); /* Library Private Variables */ /*****************************/ -/* Declare a free list to manage the H5B_shared_t struct */ -H5FL_DEFINE(H5B_shared_t); - /*******************/ /* Local Variables */ /*******************/ +/* Declare a free list to manage the H5B_shared_t struct */ +H5FL_DEFINE_STATIC(H5B_shared_t); + +/* Declare a free list to manage the raw page information */ +H5FL_BLK_DEFINE_STATIC(page); + +/* Declare a free list to manage the native key offset sequence information */ +H5FL_SEQ_DEFINE_STATIC(size_t); + + /*------------------------------------------------------------------------- * Function: H5B_create @@ -1143,10 +1157,10 @@ done: /*------------------------------------------------------------------------- - * Function: H5B_iterate + * Function: H5B_iterate_helper * * Purpose: Calls the list callback for each leaf node of the - * B-tree, passing it the UDATA structure. + * B-tree, passing it the caller's UDATA structure. * * Return: Non-negative on success/Negative on failure * @@ -1154,106 +1168,167 @@ done: * matzke@llnl.gov * Jun 23 1997 * - * Modifications: - * Robb Matzke, 1999-04-21 - * The key values are passed to the function which is called. - * - * Robb Matzke, 1999-07-28 - * The ADDR argument is passed by value. - * - * Quincey Koziol, 2002-04-22 - * Changed callback to function pointer from static function - * - * John Mainzer, 6/10/05 - * Modified the function to use the new dirtied parameter of - * of H5AC_unprotect() instead of modifying the is_dirty - * field of the cache info. - * *------------------------------------------------------------------------- */ -herr_t -H5B_iterate(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, H5B_operator_t op, haddr_t addr, void *udata) +static herr_t +H5B_iterate_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, + H5B_operator_t op, void *udata) { H5B_t *bt = NULL; /* Pointer to current B-tree node */ + uint8_t *native = NULL; /* Array of keys in native format */ + haddr_t *child = NULL; /* Array of child pointers */ herr_t ret_value; /* Return value */ - FUNC_ENTER_NOAPI(H5B_iterate, FAIL) + FUNC_ENTER_NOAPI_NOINIT(H5B_iterate_helper) /* * Check arguments. */ HDassert(f); HDassert(type); - HDassert(op); HDassert(H5F_addr_defined(addr)); + HDassert(op); HDassert(udata); /* Protect the initial/current node */ if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, udata, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree node") + HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load B-tree node") if(bt->level > 0) { + haddr_t left_child = bt->child[0]; /* Address of left-most child in node */ + + /* Release current node */ + if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_PROTECT, H5_ITER_ERROR, "unable to release B-tree node") + bt = NULL; + /* Keep following the left-most child until we reach a leaf node. */ - if((ret_value = H5B_iterate(f, dxpl_id, type, op, bt->child[0], udata)) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "unable to list B-tree node") + if((ret_value = H5B_iterate_helper(f, dxpl_id, type, left_child, op, udata)) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, H5_ITER_ERROR, "unable to list B-tree node") } /* end if */ else { + H5B_shared_t *shared; /* Pointer to shared B-tree info */ + unsigned nchildren; /* Number of child pointers */ + haddr_t next_addr; /* Address of next node to the right */ + + /* Get the shared B-tree information */ + shared = (H5B_shared_t *)H5RC_GET_OBJ(bt->rc_shared); + HDassert(shared); + + /* Allocate space for a copy of the native records & child pointers */ + if(NULL == (native = H5FL_BLK_MALLOC(native_block, shared->sizeof_keys))) + HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, H5_ITER_ERROR, "memory allocation failed for shared B-tree native records") + if(NULL == (child = H5FL_SEQ_MALLOC(haddr_t, (size_t)shared->two_k))) + HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, H5_ITER_ERROR, "memory allocation failed for shared B-tree child addresses") + + /* Cache information from this node */ + nchildren = bt->nchildren; + next_addr = bt->right; + + /* Copy the native keys & child pointers into local arrays */ + HDmemcpy(native, bt->native, shared->sizeof_keys); + HDmemcpy(child, bt->child, (nchildren * sizeof(haddr_t))); + + /* Release current node */ + if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_PROTECT, H5_ITER_ERROR, "unable to release B-tree node") + bt = NULL; + /* * We've reached the left-most leaf. Now follow the right-sibling * pointer from leaf to leaf until we've processed all leaves. */ ret_value = H5_ITER_CONT; - while(bt && ret_value == H5_ITER_CONT) { - haddr_t *child; /* Pointer to node's child addresses */ - uint8_t *key; /* Pointer to node's native keys */ + while(ret_value == H5_ITER_CONT) { + haddr_t *curr_child; /* Pointer to node's child addresses */ + uint8_t *curr_native; /* Pointer to node's native keys */ unsigned u; /* Local index variable */ /* * Perform the iteration operator, which might invoke an * application callback. */ - for(u = 0, child = bt->child, key = bt->native; u < bt->nchildren && ret_value == H5_ITER_CONT; u++, child++, key += type->sizeof_nkey) { - ret_value = (*op)(f, dxpl_id, key, *child, key + type->sizeof_nkey, udata); + for(u = 0, curr_child = child, curr_native = native; u < nchildren && ret_value == H5_ITER_CONT; u++, curr_child++, curr_native += type->sizeof_nkey) { + ret_value = (*op)(f, dxpl_id, curr_native, *curr_child, curr_native + type->sizeof_nkey, udata); if(ret_value < 0) HERROR(H5E_BTREE, H5E_CANTLIST, "iterator function failed"); } /* end for */ /* Check for continuing iteration */ if(ret_value == H5_ITER_CONT) { - H5B_t *next_bt; /* Pointer to next B-tree node */ - haddr_t next_addr; /* Address of next node to iterate over */ - - /* Protect the next node to the right, if there is one */ - if(H5F_addr_defined(bt->right)) { + /* Check for another node */ + if(H5F_addr_defined(next_addr)) { + /* Protect the next node to the right */ + addr = next_addr; + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, udata, H5AC_READ))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, H5_ITER_ERROR, "B-tree node") + + /* Cache information from this node */ + nchildren = bt->nchildren; next_addr = bt->right; - if(NULL == (next_bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, next_addr, type, udata, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "B-tree node") - } /* end if */ - else { - next_addr = HADDR_UNDEF; - next_bt = NULL; - } /* end if */ - /* Unprotect this node */ - if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) { - if(next_bt) { - HDassert(H5F_addr_defined(next_addr)); - if(H5AC_unprotect(f, dxpl_id, H5AC_BT, next_addr, next_bt, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree node") - } /* end if */ - HGOTO_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree node") - } /* end if */ + /* Copy the native keys & child pointers into local arrays */ + HDmemcpy(native, bt->native, shared->sizeof_keys); + HDmemcpy(child, bt->child, nchildren * sizeof(haddr_t)); - /* Advance to the next node */ - bt = next_bt; - addr = next_addr; + /* Unprotect node */ + if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_PROTECT, H5_ITER_ERROR, "unable to release B-tree node") + bt = NULL; + } /* end if */ + else + /* Exit loop */ + break; } /* end if */ - } /* end for */ + } /* end while */ } /* end else */ done: if(bt && H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree node") + HDONE_ERROR(H5E_BTREE, H5E_PROTECT, H5_ITER_ERROR, "unable to release B-tree node") + if(native) + (void)H5FL_BLK_FREE(native_block, native); + if(child) + (void)H5FL_SEQ_FREE(haddr_t, child); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B_iterate_helper() */ + + +/*------------------------------------------------------------------------- + * Function: H5B_iterate + * + * Purpose: Calls the list callback for each leaf node of the + * B-tree, passing it the UDATA structure. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Robb Matzke + * matzke@llnl.gov + * Jun 23 1997 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B_iterate(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, + H5B_operator_t op, void *udata) +{ + herr_t ret_value; /* Return value */ + + FUNC_ENTER_NOAPI(H5B_iterate, FAIL) + + /* + * Check arguments. + */ + HDassert(f); + HDassert(type); + HDassert(H5F_addr_defined(addr)); + HDassert(op); + HDassert(udata); + + /* Iterate over the B-tree records */ + if((ret_value = H5B_iterate_helper(f, dxpl_id, type, addr, op, udata)) < 0) + HERROR(H5E_BTREE, H5E_BADITER, "B-tree iteration failed"); FUNC_LEAVE_NOAPI(ret_value) } /* end H5B_iterate() */ @@ -1704,59 +1779,99 @@ done: /*------------------------------------------------------------------------- - * Function: H5B_nodesize - * - * Purpose: Returns the number of bytes needed for this type of - * B-tree node. The size is the size of the header plus - * enough space for 2t child pointers and 2t+1 keys. + * Function: H5B_shared_new * - * If TOTAL_NKEY_SIZE is non-null, what it points to will - * be initialized with the total number of bytes required to - * hold all the key values in native order. + * Purpose: Allocates & constructs a shared v1 B-tree struct for client. * - * Return: Success: Size of node in file. + * Return: Success: non-NULL pointer to struct allocated + * Failure: NULL * - * Failure: 0 - * - * Programmer: Robb Matzke - * matzke@llnl.gov - * Jul 3 1997 - * - * Modifications: + * Programmer: Quincey Koziol + * koziol@hdfgroup.org + * May 27 2008 * *------------------------------------------------------------------------- */ -size_t -H5B_nodesize(const H5F_t *f, const H5B_shared_t *shared, - size_t *total_nkey_size/*out*/) +H5B_shared_t * +H5B_shared_new(const H5F_t *f, const H5B_class_t *type, size_t sizeof_rkey) { - size_t size; + H5B_shared_t *shared; /* New shared B-tree struct */ + size_t u; /* Local index variable */ + H5B_shared_t *ret_value; /* Return value */ - FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5B_nodesize) + FUNC_ENTER_NOAPI(H5B_shared_new, NULL) /* * Check arguments. */ - assert(f); - assert(shared); - assert(shared->two_k > 0); - assert(shared->sizeof_rkey > 0); - - /* - * Total native key size. - */ - if (total_nkey_size) - *total_nkey_size = (shared->two_k + 1) * shared->type->sizeof_nkey; + HDassert(type); - /* - * Total node size. - */ - size = (H5B_SIZEOF_HDR(f) + /*node header */ + /* Allocate space for the shared structure */ + if(NULL == (shared = H5FL_MALLOC(H5B_shared_t))) + HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, NULL, "memory allocation failed for shared B-tree info") + + /* Set up the "global" information for this file's groups */ + shared->type = type; + shared->two_k = 2 * H5F_KVALUE(f, type); + shared->sizeof_rkey = sizeof_rkey; + HDassert(shared->sizeof_rkey); + shared->sizeof_keys = (shared->two_k + 1) * type->sizeof_nkey; + shared->sizeof_rnode = (H5B_SIZEOF_HDR(f) + /*node header */ shared->two_k * H5F_SIZEOF_ADDR(f) + /*child pointers */ (shared->two_k + 1) * shared->sizeof_rkey); /*keys */ + HDassert(shared->sizeof_rnode); - FUNC_LEAVE_NOAPI(size) -} + /* Allocate shared buffers */ + if(NULL == (shared->page = H5FL_BLK_MALLOC(page, shared->sizeof_rnode))) + HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, NULL, "memory allocation failed for B-tree page") +#ifdef H5_CLEAR_MEMORY +HDmemset(shared->page, 0, shared->sizeof_rnode); +#endif /* H5_CLEAR_MEMORY */ + if(NULL == (shared->nkey = H5FL_SEQ_MALLOC(size_t, (size_t)(2 * H5F_KVALUE(f, type) + 1)))) + HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, NULL, "memory allocation failed for B-tree page") + + /* Initialize the offsets into the native key buffer */ + for(u = 0; u < (2 * H5F_KVALUE(f, type) + 1); u++) + shared->nkey[u] = u * type->sizeof_nkey; + + /* Set return value */ + ret_value = shared; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B_shared_new() */ + + +/*------------------------------------------------------------------------- + * Function: H5B_shared_free + * + * Purpose: Free B-tree shared info + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Tuesday, May 27, 2008 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B_shared_free(void *_shared) +{ + H5B_shared_t *shared = (H5B_shared_t *)_shared; + + FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5B_shared_free) + + /* Free the raw B-tree node buffer */ + (void)H5FL_BLK_FREE(page, shared->page); + + /* Free the B-tree native key offsets buffer */ + (void)H5FL_SEQ_FREE(size_t, shared->nkey); + + /* Free the shared B-tree info */ + (void)H5FL_FREE(H5B_shared_t, shared); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5B_shared_free() */ /*------------------------------------------------------------------------- @@ -1827,29 +1942,31 @@ done: /*------------------------------------------------------------------------- - * Function: H5B_iterate_size + * Function: H5B_get_info_helper * - * Purpose: Return the amount of storage used for the btree. - * Keep following the left-most child until reaching the leaf node. - * For each level, gather storage for all the nodes on that level. - * For 0 level, also gather storage for the SNODs. + * Purpose: Walks the B-tree nodes, getting information for all of them. * - * Return: Non-negative on success/Negative on failure + * Return: Non-negative on success/Negative on failure * - * Programmer: Vailin Choi - * June 19, 2007 + * Programmer: Quincey Koziol + * koziol@hdfgroup.org + * Jun 3 2008 * *------------------------------------------------------------------------- */ -herr_t -H5B_iterate_size(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, - H5B_operator_t op, haddr_t addr, H5B_info_ud_t *bh_udata) +static herr_t +H5B_get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, + const H5B_info_ud_t *info_udata) { - H5B_t *bt = NULL; /* Pointer to current B-tree node */ - H5B_shared_t *shared; /* Pointer to shared B-tree info */ - herr_t ret_value = SUCCEED; /* Return value */ + H5B_t *bt = NULL; /* Pointer to current B-tree node */ + H5B_shared_t *shared; /* Pointer to shared B-tree info */ + unsigned level; /* Node level */ + size_t sizeof_rnode; /* Size of raw (disk) node */ + haddr_t next_addr; /* Address of next node to the right */ + haddr_t left_child; /* Address of left-most child in node */ + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(H5B_iterate_size, FAIL) + FUNC_ENTER_NOAPI_NOINIT(H5B_get_info_helper) /* * Check arguments. @@ -1857,61 +1974,123 @@ H5B_iterate_size(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, HDassert(f); HDassert(type); HDassert(H5F_addr_defined(addr)); - HDassert(bh_udata); + HDassert(info_udata); + HDassert(info_udata->bt_info); + HDassert(info_udata->udata); /* Protect the initial/current node */ - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, bh_udata->udata, H5AC_READ))) + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, info_udata->udata, H5AC_READ))) HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree node") - shared = H5RC_GET_OBJ(bt->rc_shared); + + /* Get the shared B-tree information */ + shared = (H5B_shared_t *)H5RC_GET_OBJ(bt->rc_shared); HDassert(shared); - /* Keep following the left-most child until we reach a leaf node. */ - if(bt->level > 0) - if(H5B_iterate_size(f, dxpl_id, type, op, bt->child[0], bh_udata) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "unable to list B-tree node") + /* Get the raw node size for iteration */ + sizeof_rnode = shared->sizeof_rnode; - /* Iterate through all nodes at this level of the tree */ - while(bt) { - haddr_t next_addr; /* Address of next node to iterate over */ + /* Cache information from this node */ + left_child = bt->child[0]; + next_addr = bt->right; + level = bt->level; - /* for leaf node with callback, add in the space pointed to by each key */ - /* (currently only used for symbol table nodes) */ - if(bt->level == 0 && op) { - haddr_t *child; /* Pointer to node's child addresses */ - uint8_t *key; /* Pointer to node's native keys */ - unsigned u; /* Local index variable */ + /* Update B-tree info */ + info_udata->bt_info->size += sizeof_rnode; + info_udata->bt_info->num_nodes++; - for(u = 0, child = bt->child, key = bt->native; u < bt->nchildren; u++, child++, key += type->sizeof_nkey) - if((*op)(f, dxpl_id, key, *child, key + type->sizeof_nkey, bh_udata->btree_size) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "iterator function failed") - } /* end if */ + /* Release current node */ + if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree node") + bt = NULL; - /* count the size of this node */ - *(bh_udata->btree_size) += H5B_nodesize(f, shared, NULL); + /* + * Follow the right-sibling pointer from node to node until we've + * processed all nodes. + */ + while(H5F_addr_defined(next_addr)) { + /* Protect the next node to the right */ + addr = next_addr; + if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, info_udata->udata, H5AC_READ))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "B-tree node") - /* Get the address of the next node to the right */ + /* Cache information from this node */ next_addr = bt->right; - /* Unprotect current node */ - if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree node") + /* Update B-tree info */ + info_udata->bt_info->size += sizeof_rnode; + info_udata->bt_info->num_nodes++; - /* Protect bt's next node to the right, if there is one */ - if(H5F_addr_defined(next_addr)) { - addr = next_addr; - if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, type, bh_udata->udata, H5AC_READ))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "B-tree node") - } /* end if */ - else - bt = NULL; + /* Unprotect node */ + if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree node") + bt = NULL; } /* end while */ + /* Check for another "row" of B-tree nodes to iterate over */ + if(level > 0) { + /* Keep following the left-most child until we reach a leaf node. */ + if(H5B_get_info_helper(f, dxpl_id, type, left_child, info_udata) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "unable to list B-tree node") + } /* end if */ + done: if(bt && H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, H5AC__NO_FLAGS_SET) < 0) HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree node") FUNC_LEAVE_NOAPI(ret_value) -} /* end H5B_iterate_size() */ +} /* end H5B_get_info_helper() */ + + +/*------------------------------------------------------------------------- + * Function: H5B_get_info + * + * Purpose: Return the amount of storage used for the btree. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * June 19, 2007 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B_get_info(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, + H5B_info_t *bt_info, H5B_operator_t op, void *udata) +{ + H5B_info_ud_t info_udata; /* User-data for B-tree size iteration */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5B_get_info, FAIL) + + /* + * Check arguments. + */ + HDassert(f); + HDassert(type); + HDassert(bt_info); + HDassert(H5F_addr_defined(addr)); + HDassert(udata); + + /* Portably initialize B-tree info struct */ + HDmemset(bt_info, 0, sizeof(*bt_info)); + + /* Set up internal user-data for the B-tree 'get info' helper routine */ + info_udata.bt_info = bt_info; + info_udata.udata = udata; + + /* Iterate over the B-tree nodes */ + if(H5B_get_info_helper(f, dxpl_id, type, addr, &info_udata) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_BADITER, FAIL, "B-tree iteration failed") + + /* Iterate over the B-tree records, making any "leaf" callbacks */ + /* (Only if operator defined) */ + if(op) + if((ret_value = H5B_iterate_helper(f, dxpl_id, type, addr, op, udata)) < 0) + HERROR(H5E_BTREE, H5E_BADITER, "B-tree iteration failed"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B_get_info() */ /*------------------------------------------------------------------------- diff --git a/src/H5Bcache.c b/src/H5Bcache.c index 2748963..b129925 100644 --- a/src/H5Bcache.c +++ b/src/H5Bcache.c @@ -397,10 +397,8 @@ static herr_t H5B_compute_size(const H5F_t *f, const H5B_t *bt, size_t *size_ptr) { H5B_shared_t *shared; /* Pointer to shared B-tree info */ - size_t size; - herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI_NOINIT(H5B_compute_size) + FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5B_compute_size) /* check arguments */ HDassert(f); @@ -411,13 +409,8 @@ H5B_compute_size(const H5F_t *f, const H5B_t *bt, size_t *size_ptr) HDassert(shared->type); HDassert(size_ptr); - /* Check node's size */ - if ((size = H5B_nodesize(f, shared, NULL)) == 0) - HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, FAIL, "H5B_nodesize() failed") - /* Set size value */ - *size_ptr = size; + *size_ptr = shared->sizeof_rnode; -done: - FUNC_LEAVE_NOAPI(ret_value) + FUNC_LEAVE_NOAPI(SUCCEED) } /* H5B_compute_size() */ diff --git a/src/H5Bpkg.h b/src/H5Bpkg.h index cd79cf7..7f1fd37 100644 --- a/src/H5Bpkg.h +++ b/src/H5Bpkg.h @@ -74,6 +74,7 @@ H5FL_EXTERN(H5B_t); /******************************/ /* Package Private Prototypes */ /******************************/ -herr_t H5B_dest(H5F_t *f, H5B_t *b); +H5_DLL herr_t H5B_dest(H5F_t *f, H5B_t *b); #endif /*_H5Bpkg_H*/ + diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h index 4dfea84..18a2c71 100644 --- a/src/H5Bprivate.h +++ b/src/H5Bprivate.h @@ -124,42 +124,40 @@ typedef struct H5B_class_t { herr_t (*debug_key)(FILE*, H5F_t*, hid_t, int, int, const void*, const void*); } H5B_class_t; -/* "user data" for iterating over B-tree when collecting B-tree metadata size */ -typedef struct H5B_info_ud_t { - void *udata; /* Node type's 'udata' for loading */ - hsize_t *btree_size; /* Accumulated size for B-tree metadata */ -} H5B_info_ud_t; +/* Information about B-tree */ +typedef struct H5B_info_t { + hsize_t size; /* Size of B-tree nodes */ + hsize_t num_nodes; /* Number of B-tree nodes */ +} H5B_info_t; + /*****************************/ /* Library-private Variables */ /*****************************/ -/* Declare a free list to manage the H5B_shared_t struct */ -H5FL_EXTERN(H5B_shared_t); - /***************************************/ /* Library-private Function Prototypes */ /***************************************/ -H5_DLL size_t H5B_nodesize(const H5F_t *f, const H5B_shared_t *shared, - size_t *total_nkey_size); -H5_DLL herr_t H5B_create(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, void *udata, - haddr_t *addr_p/*out*/); -H5_DLL herr_t H5B_find(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, - void *udata); -H5_DLL herr_t H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, - void *udata); -H5_DLL herr_t H5B_iterate(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, H5B_operator_t - op, haddr_t addr, void *udata); -H5_DLL herr_t H5B_iterate_size(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, H5B_operator_t - op, haddr_t addr, H5B_info_ud_t *bh_udata); -H5_DLL herr_t H5B_remove(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, - void *udata); -H5_DLL herr_t H5B_delete(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, - void *udata); +H5_DLL herr_t H5B_create(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, + void *udata, haddr_t *addr_p/*out*/); +H5_DLL herr_t H5B_find(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, + haddr_t addr, void *udata); +H5_DLL herr_t H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, + haddr_t addr, void *udata); +H5_DLL herr_t H5B_iterate(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, + haddr_t addr, H5B_operator_t op, void *udata); +H5_DLL herr_t H5B_get_info(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, + haddr_t addr, H5B_info_t *bt_info, H5B_operator_t op, void *udata); +H5_DLL herr_t H5B_remove(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, + haddr_t addr, void *udata); +H5_DLL herr_t H5B_delete(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, + haddr_t addr, void *udata); +H5_DLL H5B_shared_t *H5B_shared_new(const H5F_t *f, const H5B_class_t *type, + size_t sizeof_rkey); +H5_DLL herr_t H5B_shared_free(void *_shared); H5_DLL herr_t H5B_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, - int indent, int fwidth, const H5B_class_t *type, - void *udata); + int indent, int fwidth, const H5B_class_t *type, void *udata); #endif /* _H5Bprivate_H */ diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index ba07725..711b91b 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -28,9 +28,7 @@ #include "H5Eprivate.h" /* Error handling */ #include "H5FLprivate.h" /* Free Lists */ #include "H5Iprivate.h" /* IDs */ -#ifdef H5_HAVE_PARALLEL #include "H5MMprivate.h" /* Memory management */ -#endif /* H5_HAVE_PARALLEL */ #include "H5Vprivate.h" /* Vector and array functions */ @@ -39,24 +37,125 @@ /****************/ /* Default skip list height for storing list of chunks */ -#define H5D_DEFAULT_SKIPLIST_HEIGHT 8 +#define H5D_CHUNK_DEFAULT_SKIPLIST_HEIGHT 8 /* Macros for iterating over chunks to operate on */ #define H5D_CHUNK_GET_FIRST_NODE(map) (map->use_single ? (H5SL_node_t *)(1) : H5SL_first(map->sel_chunks)) #define H5D_CHUNK_GET_NODE_INFO(map, node) (map->use_single ? map->single_chunk_info : (H5D_chunk_info_t *)H5SL_item(node)) #define H5D_CHUNK_GET_NEXT_NODE(map, node) (map->use_single ? (H5SL_node_t *)NULL : H5SL_next(node)) +/* + * Feature: If this constant is defined then every cache preemption and load + * causes a character to be printed on the standard error stream: + * + * `.': Entry was preempted because it has been completely read or + * completely written but not partially read and not partially + * written. This is often a good reason for preemption because such + * a chunk will be unlikely to be referenced in the near future. + * + * `:': Entry was preempted because it hasn't been used recently. + * + * `#': Entry was preempted because another chunk collided with it. This + * is usually a relatively bad thing. If there are too many of + * these then the number of entries in the cache can be increased. + * + * c: Entry was preempted because the file is closing. + * + * w: A chunk read operation was eliminated because the library is + * about to write new values to the entire chunk. This is a good + * thing, especially on files where the chunk size is the same as + * the disk block size, chunks are aligned on disk block boundaries, + * and the operating system can also eliminate a read operation. + */ + +/*#define H5D_CHUNK_DEBUG */ + /******************/ /* Local Typedefs */ /******************/ +/* Callback info for iteration to prune chunks */ +typedef struct H5D_chunk_it_ud1_t { + H5D_chunk_common_ud_t common; /* Common info for B-tree user data (must be first) */ + const H5D_chk_idx_info_t *idx_info; /* Chunked index info */ + const H5D_io_info_t *io_info; /* I/O info for dataset operation */ + const hsize_t *dims; /* New dataset dimensions */ + const hsize_t *down_chunks; /* "down" size of number of chunks in each dimension */ + H5SL_t *outside; /* Skip list to hold chunks outside the new dimensions */ + H5S_t *chunk_space; /* Dataspace for a chunk */ + uint32_t elmts_per_chunk;/* Elements in chunk */ + hsize_t *hyper_start; /* Starting location of hyperslab */ + H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */ + hbool_t fb_info_init; /* Whether the fill value buffer has been initialized */ +} H5D_chunk_it_ud1_t; + +/* Skip list node for storing chunks to remove during a "prune" iteration */ +typedef struct H5D_chunk_sl_ck_t { + hsize_t index; /* Index of chunk to remove (must be first) */ + H5D_chunk_rec_t rec; /* Chunk record */ +} H5D_chunk_sl_ck_t; + +/* Skip list callback info when destroying list & removing chunks during "prune" */ +typedef struct H5D_chunk_sl_rm_t { + const H5D_chk_idx_info_t *idx_info; /* I/O info for dataset operation */ + const H5O_layout_t *mesg; /* Layout message */ +} H5D_chunk_sl_rm_t; + +/* Callback info for iteration to obtain chunk address and the index of the chunk for all chunks in the B-tree. */ +typedef struct H5D_chunk_id_ud2_t { + /* down */ + H5D_chunk_common_ud_t common; /* Common info for B-tree user data (must be first) */ + const hsize_t *down_chunks; /* "down chunk" element counts for chunks */ + + /* up */ + haddr_t *chunk_addr; /* Array of chunk addresses to fill in */ +} H5D_chunk_it_ud2_t; + +/* Callback info for iteration to copy data */ +typedef struct H5D_chunk_it_ud3_t { + H5D_chunk_common_ud_t common; /* Common info for B-tree user data (must be first) */ + H5F_t *file_src; /* Source file for copy */ + H5D_chk_idx_info_t *idx_info_dst; /* Dest. chunk index info object */ + void *buf; /* Buffer to hold chunk data for read/write */ + void *bkg; /* Buffer for background information during type conversion */ + size_t buf_size; /* Buffer size */ + hbool_t do_convert; /* Whether to perform type conversions */ + + /* needed for converting variable-length data */ + hid_t tid_src; /* Datatype ID for source datatype */ + hid_t tid_dst; /* Datatype ID for destination datatype */ + hid_t tid_mem; /* Datatype ID for memory datatype */ + H5T_t *dt_src; /* Source datatype */ + H5T_path_t *tpath_src_mem; /* Datatype conversion path from source file to memory */ + H5T_path_t *tpath_mem_dst; /* Datatype conversion path from memory to dest. file */ + void *reclaim_buf; /* Buffer for reclaiming data */ + size_t reclaim_buf_size; /* Reclaim buffer size */ + uint32_t nelmts; /* Number of elements in buffer */ + H5S_t *buf_space; /* Dataspace describing buffer */ + + /* needed for compressed variable-length data */ + H5O_pline_t *pline; /* Filter pipeline */ + + /* needed for copy object pointed by refs */ + H5O_copy_t *cpy_info; /* Copy options */ +} H5D_chunk_it_ud3_t; + +/* Callback info for iteration to dump index */ +typedef struct H5D_chunk_it_ud4_t { + FILE *stream; /* Output stream */ + hbool_t header_displayed; /* Node's header is displayed? */ + unsigned ndims; /* Number of dimensions for chunk/dataset */ +} H5D_chunk_it_ud4_t; + /********************/ /* Local Prototypes */ /********************/ /* Chunked layout operation callbacks */ +static herr_t H5D_chunk_new(H5F_t *f, hid_t dxpl_id, H5D_t *dset, + const H5P_genplist_t *dc_plist); static herr_t H5D_chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, H5D_chunk_map_t *fm); @@ -74,6 +173,8 @@ static ssize_t H5D_null_readvv(const H5D_io_info_t *io_info, size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[]); /* Helper routines */ +static void *H5D_chunk_alloc(size_t size, const H5O_pline_t *pline); +static void *H5D_chunk_xfree(void *chk, const H5O_pline_t *pline); static herr_t H5D_free_chunk_info(void *item, void *key, void *opdata); static herr_t H5D_create_chunk_map_single(H5D_chunk_map_t *fm, const H5D_io_info_t *io_info); @@ -93,6 +194,7 @@ static herr_t H5D_chunk_mem_cb(void *elem, hid_t type_id, unsigned ndims, /* Compact storage layout I/O ops */ const H5D_layout_ops_t H5D_LOPS_CHUNK[1] = {{ + H5D_chunk_new, H5D_chunk_io_init, H5D_chunk_read, H5D_chunk_write, @@ -115,6 +217,7 @@ const H5D_layout_ops_t H5D_LOPS_NULL[1] = {{ NULL, NULL, NULL, + NULL, #ifdef H5_HAVE_PARALLEL NULL, NULL, @@ -124,9 +227,111 @@ const H5D_layout_ops_t H5D_LOPS_NULL[1] = {{ NULL }}; +/* Declare a free list to manage the H5F_rdcc_ent_ptr_t sequence information */ +H5FL_SEQ_DEFINE_STATIC(H5D_rdcc_ent_ptr_t); + +/* Declare a free list to manage H5F_rdcc_ent_t objects */ +H5FL_DEFINE_STATIC(H5D_rdcc_ent_t); + /* Declare a free list to manage the H5D_chunk_info_t struct */ H5FL_DEFINE(H5D_chunk_info_t); +/* Declare a free list to manage the chunk sequence information */ +H5FL_BLK_DEFINE_STATIC(chunk); + +/* Declare a free list to manage H5D_chunk_sl_ck_t objects */ +H5FL_DEFINE_STATIC(H5D_chunk_sl_ck_t); + + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_new + * + * Purpose: Constructs new chunked layout information for dataset + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Thursday, May 22, 2008 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D_chunk_new(H5F_t *f, hid_t dxpl_id, H5D_t *dset, + const H5P_genplist_t *dc_plist) +{ + const H5T_t *type = dset->shared->type; /* Convenience pointer to dataset's datatype */ + hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */ + uint64_t chunk_size; /* Size of chunk in bytes */ + unsigned chunk_ndims = 0; /* Dimensionality of chunk */ + int ndims; /* Rank of dataspace */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_new) + + /* Sanity checks */ + HDassert(f); + HDassert(dset); + HDassert(dc_plist); + + /* Retrieve rank of chunks from property list */ + if(H5P_get(dc_plist, H5D_CRT_CHUNK_DIM_NAME, &chunk_ndims) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve chunk dimensions") + + /* Set up layout information */ + if((ndims = H5S_GET_EXTENT_NDIMS(dset->shared->space)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get rank") + dset->shared->layout.u.chunk.ndims = (unsigned)ndims + 1; + HDassert((unsigned)(dset->shared->layout.u.chunk.ndims) <= NELMTS(dset->shared->layout.u.chunk.dim)); + + /* Initialize to no address */ + dset->shared->layout.u.chunk.addr = HADDR_UNDEF; + + /* + * Chunked storage allows any type of data space extension, so we + * don't even bother checking. + */ + if(chunk_ndims != (unsigned)ndims) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "dimensionality of chunks doesn't match the data space") + if(dset->shared->dcpl_cache.efl.nused > 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "external storage not supported with chunked layout") + + /* + * The chunk size of a dimension with a fixed size cannot exceed + * the maximum dimension size + */ + if(H5P_get(dc_plist, H5D_CRT_CHUNK_SIZE_NAME, dset->shared->layout.u.chunk.dim) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve chunk size") + dset->shared->layout.u.chunk.dim[dset->shared->layout.u.chunk.ndims - 1] = H5T_get_size(type); + + /* Sanity check dimensions */ + if(H5S_get_simple_extent_dims(dset->shared->space, NULL, max_dim) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to query maximum dimensions") + for(u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++) + if(max_dim[u] != H5S_UNLIMITED && max_dim[u] < dset->shared->layout.u.chunk.dim[u]) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be <= maximum dimension size for fixed-sized dimensions") + + /* Compute the total size of a chunk */ + /* (Use 64-bit value to ensure that we can detect >4GB chunks) */ + for(u = 1, chunk_size = (uint64_t)dset->shared->layout.u.chunk.dim[0]; u < dset->shared->layout.u.chunk.ndims; u++) + chunk_size *= (uint64_t)dset->shared->layout.u.chunk.dim[u]; + + /* Check for chunk larger than can be represented in 32-bits */ + /* (Chunk size is encoded in 32-bit value in v1 B-tree records) */ + if(chunk_size > (uint64_t)0xffffffff) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be < 4GB") + + /* Retain computed chunk size */ + H5_ASSIGN_OVERFLOW(dset->shared->layout.u.chunk.size, chunk_size, uint64_t, uint32_t); + + /* Initialize the chunk cache for the dataset */ + if(H5D_chunk_init(f, dxpl_id, dset) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize chunk cache") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_new() */ /*------------------------------------------------------------------------- @@ -269,7 +474,7 @@ H5D_chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info else { /* Initialize skip list for chunk selections */ if(NULL == dataset->shared->cache.chunk.sel_chunks) { - if(NULL == (dataset->shared->cache.chunk.sel_chunks = H5SL_create(H5SL_TYPE_HSIZE, 0.5, (size_t)H5D_DEFAULT_SKIPLIST_HEIGHT))) + if(NULL == (dataset->shared->cache.chunk.sel_chunks = H5SL_create(H5SL_TYPE_HSIZE, 0.5, (size_t)H5D_CHUNK_DEFAULT_SKIPLIST_HEIGHT))) HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create skip list for chunk selections") } /* end if */ fm->sel_chunks = dataset->shared->cache.chunk.sel_chunks; @@ -416,6 +621,7 @@ done: HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't decrement temporary datatype ID") } /* end if */ if(file_space_normalized) { + /* (Casting away const OK -QAK) */ if(H5S_hyper_denormalize_offset((H5S_t *)file_space, old_offset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to normalize dataspace by offset") } /* end if */ @@ -424,6 +630,71 @@ done: } /* end H5D_chunk_io_init() */ +/*------------------------------------------------------------------------- + * Function: H5D_chunk_alloc + * + * Purpose: Allocate space for a chunk in memory. This routine allocates + * memory space for non-filtered chunks from a block free list + * and uses malloc()/free() for filtered chunks. + * + * Return: Pointer to memory for chunk on success/NULL on failure + * + * Programmer: Quincey Koziol + * April 22, 2004 + * + *------------------------------------------------------------------------- + */ +static void * +H5D_chunk_alloc(size_t size, const H5O_pline_t *pline) +{ + void *ret_value = NULL; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_alloc) + + HDassert(size); + HDassert(pline); + + if(pline->nused > 0) + ret_value = H5MM_malloc(size); + else + ret_value = H5FL_BLK_MALLOC(chunk, size); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D_chunk_alloc() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_xfree + * + * Purpose: Free space for a chunk in memory. This routine allocates + * memory space for non-filtered chunks from a block free list + * and uses malloc()/free() for filtered chunks. + * + * Return: NULL (never fails) + * + * Programmer: Quincey Koziol + * April 22, 2004 + * + *------------------------------------------------------------------------- + */ +static void * +H5D_chunk_xfree(void *chk, const H5O_pline_t *pline) +{ + FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_xfree) + + HDassert(pline); + + if(chk) { + if(pline->nused > 0) + H5MM_xfree(chk); + else + (void)H5FL_BLK_FREE(chunk, chk); + } /* end if */ + + FUNC_LEAVE_NOAPI(NULL) +} /* H5D_chunk_xfree() */ + + /*-------------------------------------------------------------------------- NAME H5D_free_chunk_info @@ -672,9 +943,9 @@ H5D_create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t new_chunk_info->coords[fm->f_ndims]=0; /* Insert the new chunk into the skip list */ - if(H5SL_insert(fm->sel_chunks,new_chunk_info,&new_chunk_info->index) < 0) { - H5D_free_chunk_info(new_chunk_info,NULL,NULL); - HGOTO_ERROR(H5E_DATASPACE,H5E_CANTINSERT,FAIL,"can't insert chunk into skip list") + if(H5SL_insert(fm->sel_chunks, new_chunk_info, &new_chunk_info->index) < 0) { + H5D_free_chunk_info(new_chunk_info, NULL, NULL); + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINSERT, FAIL, "can't insert chunk into skip list") } /* end if */ /* Get number of elements selected in chunk */ @@ -1100,26 +1371,29 @@ H5D_chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr) *------------------------------------------------------------------------- */ static hbool_t -H5D_chunk_in_cache(const H5D_io_info_t *io_info) +H5D_chunk_in_cache(const H5D_t *dset, const hsize_t *chunk_offset, + hsize_t chunk_idx) { - H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk);/*raw data chunk cache*/ + H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);/*raw data chunk cache*/ hbool_t found = FALSE; /*already in cache? */ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_in_cache) - HDassert(io_info); + /* Sanity checks */ + HDassert(dset); + HDassert(chunk_offset); /* Check if the chunk is in the cache (but hasn't been written to disk yet) */ if(rdcc->nslots > 0) { - unsigned idx = H5D_CHUNK_HASH(io_info->dset->shared, io_info->store->chunk.index); /* Cache entry index */ + unsigned idx = H5D_CHUNK_HASH(dset->shared, chunk_idx); /* Cache entry index */ H5D_rdcc_ent_t *ent = rdcc->slot[idx]; /* Cache entry */ /* Potential match... */ if(ent) { size_t u; /* Local index variable */ - for(u = 0, found = TRUE; u < io_info->dset->shared->layout.u.chunk.ndims; u++) { - if(io_info->store->chunk.offset[u] != ent->offset[u]) { + for(u = 0, found = TRUE; u < dset->shared->layout.u.chunk.ndims; u++) { + if(chunk_offset[u] != ent->offset[u]) { found = FALSE; break; } /* end if */ @@ -1211,20 +1485,16 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */ void *chunk; /* Pointer to locked chunk buffer */ haddr_t chunk_addr; /* Chunk address on disk */ - H5D_istore_ud1_t udata; /* B-tree pass-through */ + H5D_chunk_ud_t udata; /* B-tree pass-through */ /* Get the actual chunk information from the skip list node */ chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node); - /* Pass in chunk's coordinates in a union. */ - io_info->store->chunk.offset = chunk_info->coords; - io_info->store->chunk.index = chunk_info->index; - /* Get the address of the chunk in the file */ - chunk_addr = H5D_istore_get_addr(io_info, &udata); + chunk_addr = H5D_chunk_get_addr(io_info->dset, io_info->dxpl_id, chunk_info->coords, &udata); /* Check for non-existant chunk & skip it if appropriate */ - if(!H5F_addr_defined(chunk_addr) && !H5D_chunk_in_cache(io_info) + if(!H5F_addr_defined(chunk_addr) && !H5D_chunk_in_cache(io_info->dset, chunk_info->coords, chunk_info->index) && skip_missing_chunks) { /* No chunk cached */ chunk = NULL; @@ -1235,14 +1505,15 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, else { /* Load the chunk into cache and lock it. */ if(H5D_chunk_cacheable(io_info, chunk_addr)) { - size_t tmp_src_accessed_bytes; /* Total accessed size in a chunk */ + /* Pass in chunk's coordinates in a union. */ + io_info->store->chunk.offset = chunk_info->coords; + io_info->store->chunk.index = chunk_info->index; /* Compute # of bytes accessed in chunk */ - tmp_src_accessed_bytes = chunk_info->chunk_points * type_info->src_type_size; - H5_ASSIGN_OVERFLOW(src_accessed_bytes, tmp_src_accessed_bytes, size_t, uint32_t); + src_accessed_bytes = chunk_info->chunk_points * type_info->src_type_size; /* Lock the chunk into the cache */ - if(NULL == (chunk = H5D_istore_lock(io_info, &udata, FALSE, &idx_hint))) + if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, FALSE, &idx_hint))) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") /* Set up the storage buffer information for this chunk */ @@ -1272,7 +1543,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed") /* Release the cache lock on the chunk. */ - if(chunk && H5D_istore_unlock(io_info, FALSE, idx_hint, chunk, src_accessed_bytes) < 0) + if(chunk && H5D_chunk_unlock(io_info, FALSE, idx_hint, chunk, src_accessed_bytes) < 0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk") /* Advance to next chunk in list */ @@ -1342,25 +1613,23 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */ void *chunk; /* Pointer to locked chunk buffer */ haddr_t chunk_addr; /* Chunk address on disk */ - H5D_istore_ud1_t udata; /* B-tree pass-through */ + H5D_chunk_ud_t udata; /* B-tree pass-through */ /* Get the actual chunk information from the skip list node */ chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node); - /* Pass in chunk's coordinates in a union. */ - io_info->store->chunk.offset = chunk_info->coords; - io_info->store->chunk.index = chunk_info->index; - /* Load the chunk into cache. But if the whole chunk is written, * simply allocate space instead of load the chunk. */ - chunk_addr = H5D_istore_get_addr(io_info, &udata); + chunk_addr = H5D_chunk_get_addr(io_info->dset, io_info->dxpl_id, chunk_info->coords, &udata); if(H5D_chunk_cacheable(io_info, chunk_addr)) { hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */ - size_t tmp_dst_accessed_bytes; /* Total accessed size in a chunk */ + + /* Pass in chunk's coordinates in a union. */ + io_info->store->chunk.offset = chunk_info->coords; + io_info->store->chunk.index = chunk_info->index; /* Compute # of bytes accessed in chunk */ - tmp_dst_accessed_bytes = chunk_info->chunk_points * type_info->dst_type_size; - H5_ASSIGN_OVERFLOW(dst_accessed_bytes, tmp_dst_accessed_bytes, size_t, uint32_t); + dst_accessed_bytes = chunk_info->chunk_points * type_info->dst_type_size; /* Determine if we will access all the data in the chunk */ if(dst_accessed_bytes != ctg_store.contig.dset_size || @@ -1368,7 +1637,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, entire_chunk = FALSE; /* Lock the chunk into the cache */ - if(NULL == (chunk = H5D_istore_lock(io_info, &udata, entire_chunk, &idx_hint))) + if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk, &idx_hint))) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") /* Set up the storage buffer information for this chunk */ @@ -1397,7 +1666,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed") /* Release the cache lock on the chunk. */ - if(chunk && H5D_istore_unlock(io_info, TRUE, idx_hint, chunk, dst_accessed_bytes) < 0) + if(chunk && H5D_chunk_unlock(io_info, TRUE, idx_hint, chunk, dst_accessed_bytes) < 0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk") /* Advance to next chunk in list */ @@ -1461,6 +1730,2712 @@ done: /*------------------------------------------------------------------------- + * Function: H5D_chunk_init + * + * Purpose: Initialize the raw data chunk cache for a dataset. This is + * called when the dataset is initialized. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Robb Matzke + * Monday, May 18, 1998 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset) +{ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Convenience pointer to dataset's chunk cache */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_init, FAIL) + + /* Sanity check */ + HDassert(f); + HDassert(dset); + + if(H5F_RDCC_NBYTES(f) > 0 && H5F_RDCC_NELMTS(f) > 0) { + rdcc->nbytes = H5F_RDCC_NBYTES(f); + rdcc->nslots = H5F_RDCC_NELMTS(f); + rdcc->slot = H5FL_SEQ_CALLOC(H5D_rdcc_ent_ptr_t, rdcc->nslots); + if(NULL == rdcc->slot) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed") + + /* Reset any cached chunk info for this dataset */ + H5D_chunk_cinfo_cache_reset(&(rdcc->last)); + } /* end if */ + + /* Compose chunked index info struct */ + idx_info.f = f; + idx_info.dxpl_id = dxpl_id; + idx_info.layout = &dset->shared->layout; + + /* Allocate any indexing structures */ + if((dset->shared->layout.u.chunk.ops->init)(&idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize indexing information") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_init() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_cinfo_cache_reset + * + * Purpose: Reset the cached chunk info + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * November 27, 2007 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_cinfo_cache_reset(H5D_chunk_cached_t *last) +{ + FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_cinfo_cache_reset) + + /* Sanity check */ + HDassert(last); + + /* Indicate that the cached info is not valid */ + last->valid = FALSE; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D_chunk_cinfo_cache_reset() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_cinfo_cache_update + * + * Purpose: Update the cached chunk info + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * November 27, 2007 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D_chunk_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_chunk_ud_t *udata) +{ + unsigned u; /* Local index variable */ + + FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_cinfo_cache_update) + + /* Sanity check */ + HDassert(last); + HDassert(udata); + HDassert(udata->common.mesg); + HDassert(udata->common.offset); + + /* Stored the information to cache */ + for(u = 0; u < udata->common.mesg->u.chunk.ndims; u++) + last->offset[u] = udata->common.offset[u]; + last->nbytes = udata->nbytes; + last->filter_mask = udata->filter_mask; + last->addr = udata->addr; + + /* Indicate that the cached info is valid */ + last->valid = TRUE; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D_chunk_cinfo_cache_update() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_cinfo_cache_found + * + * Purpose: Look for chunk info in cache + * + * Return: TRUE/FALSE/FAIL + * + * Programmer: Quincey Koziol + * November 27, 2007 + * + *------------------------------------------------------------------------- + */ +static hbool_t +H5D_chunk_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_chunk_ud_t *udata) +{ + hbool_t ret_value = FALSE; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_cinfo_cache_found) + + /* Sanity check */ + HDassert(last); + HDassert(udata); + HDassert(udata->common.mesg); + HDassert(udata->common.offset); + + /* Check if the cached information is what is desired */ + if(last->valid) { + unsigned u; /* Local index variable */ + + /* Check that the offset is the same */ + for(u = 0; u < udata->common.mesg->u.chunk.ndims; u++) + if(last->offset[u] != udata->common.offset[u]) + HGOTO_DONE(FALSE) + + /* Retrieve the information from the cache */ + udata->nbytes = last->nbytes; + udata->filter_mask = last->filter_mask; + udata->addr = last->addr; + + /* Indicate that the data was found */ + HGOTO_DONE(TRUE) + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D_chunk_cinfo_cache_found() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_create + * + * Purpose: Creates a new chunked storage index and initializes the + * layout information with information about the storage. The + * layout info should be immediately written to the object header. + * + * Return: Non-negative on success (with the layout information initialized + * and ready to write to an object header). Negative on failure. + * + * Programmer: Quincey Koziol + * Thursday, May 22, 2008 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_create(H5D_t *dset /*in,out*/, hid_t dxpl_id) +{ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_create, FAIL) + + /* Check args */ + HDassert(dset); + HDassert(H5D_CHUNKED == dset->shared->layout.type); + HDassert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS); +#ifndef NDEBUG +{ + unsigned u; /* Local index variable */ + + for(u = 0; u < dset->shared->layout.u.chunk.ndims; u++) + HDassert(dset->shared->layout.u.chunk.dim[u] > 0); +} +#endif + + /* Compose chunked index info struct */ + idx_info.f = dset->oloc.file; + idx_info.dxpl_id = dxpl_id; + idx_info.layout = &dset->shared->layout; + + /* Create the index for the chunks */ + if((dset->shared->layout.u.chunk.ops->create)(&idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create chunk index") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_create() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_get_addr + * + * Purpose: Get the file address of a chunk if file space has been + * assigned. Save the retrieved information in the udata + * supplied. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Albert Cheng + * June 27, 1998 + * + *------------------------------------------------------------------------- + */ +haddr_t +H5D_chunk_get_addr(const H5D_t *dset, hid_t dxpl_id, const hsize_t *chunk_offset, + H5D_chunk_ud_t *_udata) +{ + H5D_chunk_ud_t tmp_udata; /* Information about a chunk */ + H5D_chunk_ud_t *udata; /* Pointer to information about a chunk */ + haddr_t ret_value; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_get_addr) + + HDassert(dset); + HDassert(dset->shared->layout.u.chunk.ndims > 0); + HDassert(chunk_offset); + + /* Check for udata struct to return */ + udata = (_udata != NULL ? _udata : &tmp_udata); + + /* Initialize the information about the chunk we are looking for */ + udata->common.mesg = &(dset->shared->layout); + udata->common.offset = chunk_offset; + udata->nbytes = 0; + udata->filter_mask = 0; + udata->addr = HADDR_UNDEF; + + /* Check for cached information */ + if(!H5D_chunk_cinfo_cache_found(&dset->shared->cache.chunk.last, udata)) { + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + + /* Compose chunked index info struct */ + idx_info.f = dset->oloc.file; + idx_info.dxpl_id = dxpl_id; + idx_info.layout = &dset->shared->layout; + + /* Go get the chunk information */ + if(!H5F_addr_defined((dset->shared->layout.u.chunk.ops->get_addr)(&idx_info, udata))) { + /* Cache the fact that the chunk is not in the B-tree */ + H5D_chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, udata); + + HGOTO_DONE(HADDR_UNDEF) + } /* end if */ + + /* Cache the information retrieved */ + HDassert(H5F_addr_defined(udata->addr)); + H5D_chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, udata); + } /* end if */ + + /* Success! Set the return value */ + ret_value = udata->addr; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D_chunk_get_addr() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_flush_entry + * + * Purpose: Writes a chunk to disk. If RESET is non-zero then the + * entry is cleared -- it's slightly faster to flush a chunk if + * the RESET flag is turned on because it results in one fewer + * memory copy. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Robb Matzke + * Thursday, May 21, 1998 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D_chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *dxpl_cache, + H5D_rdcc_ent_t *ent, hbool_t reset) +{ + void *buf = NULL; /* Temporary buffer */ + hbool_t point_of_no_return = FALSE; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_flush_entry) + + HDassert(dset); + HDassert(dxpl_cache); + HDassert(ent); + HDassert(!ent->locked); + + buf = ent->chunk; + if(ent->dirty) { + H5D_chunk_ud_t udata; /* pass through B-tree */ + hbool_t must_insert = FALSE; /* Whether the chunk must go through the "insert" method */ + + /* Set up user data for index callbacks */ + udata.common.mesg = &dset->shared->layout; + udata.common.offset = ent->offset; + udata.filter_mask = 0; + udata.nbytes = ent->chunk_size; + udata.addr = ent->chunk_addr; + + /* Should the chunk be filtered before writing it to disk? */ + if(dset->shared->dcpl_cache.pline.nused) { + size_t alloc = ent->alloc_size; /* Bytes allocated for BUF */ + size_t nbytes; /* Chunk size (in bytes) */ + + if(!reset) { + /* + * Copy the chunk to a new buffer before running it through + * the pipeline because we'll want to save the original buffer + * for later. + */ + H5_ASSIGN_OVERFLOW(alloc, ent->chunk_size, uint32_t, size_t); + if(NULL == (buf = H5MM_malloc(alloc))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for pipeline") + HDmemcpy(buf, ent->chunk, ent->chunk_size); + } /* end if */ + else { + /* + * If we are reseting and something goes wrong after this + * point then it's too late to recover because we may have + * destroyed the original data by calling H5Z_pipeline(). + * The only safe option is to continue with the reset + * even if we can't write the data to disk. + */ + point_of_no_return = TRUE; + ent->chunk = NULL; + } /* end else */ + H5_ASSIGN_OVERFLOW(nbytes, udata.nbytes, uint32_t, size_t); + if(H5Z_pipeline(&(dset->shared->dcpl_cache.pline), 0, &(udata.filter_mask), dxpl_cache->err_detect, + dxpl_cache->filter_cb, &nbytes, &alloc, &buf) < 0) + HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed") + H5_ASSIGN_OVERFLOW(udata.nbytes, nbytes, size_t, uint32_t); + + /* Indicate that the chunk must go through 'insert' method */ + must_insert = TRUE; + } /* end if */ + else if(!H5F_addr_defined(udata.addr)) + /* Indicate that the chunk must go through 'insert' method */ + must_insert = TRUE; + + /* Check if the chunk needs to be 'inserted' (could exist already and + * the 'insert' operation could resize it) + */ + if(must_insert) { + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + + /* Compose chunked index info struct */ + idx_info.f = dset->oloc.file; + idx_info.dxpl_id = dxpl_id; + idx_info.layout = &dset->shared->layout; + + /* Create the chunk it if it doesn't exist, or reallocate the chunk + * if its size changed. + */ + if((dset->shared->layout.u.chunk.ops->insert)(&idx_info, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk") + + /* Update the chunk entry's address, in case it was allocated or relocated */ + ent->chunk_addr = udata.addr; + } /* end if */ + + /* Write the data to the file */ + HDassert(H5F_addr_defined(udata.addr)); + if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.nbytes, dxpl_id, buf) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") + + /* Cache the chunk's info, in case it's accessed again shortly */ + H5D_chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, &udata); + + /* Mark cache entry as clean */ + ent->dirty = FALSE; + + /* Increment # of flushed entries */ + dset->shared->cache.chunk.stats.nflushes++; + } /* end if */ + + /* Reset, but do not free or removed from list */ + if(reset) { + point_of_no_return = FALSE; + if(buf == ent->chunk) + buf = NULL; + if(ent->chunk != NULL) + ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline)); + } /* end if */ + +done: + /* Free the temp buffer only if it's different than the entry chunk */ + if(buf != ent->chunk) + H5MM_xfree(buf); + + /* + * If we reached the point of no return then we have no choice but to + * reset the entry. This can only happen if RESET is true but the + * output pipeline failed. Do not free the entry or remove it from the + * list. + */ + if(ret_value < 0 && point_of_no_return) { + if(ent->chunk) + ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline)); + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_flush_entry() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_cache_evict + * + * Purpose: Preempts the specified entry from the cache, flushing it to + * disk if necessary. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Robb Matzke + * Thursday, May 21, 1998 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D_chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *dxpl_cache, + H5D_rdcc_ent_t *ent, hbool_t flush) +{ + H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_cache_evict) + + HDassert(dset); + HDassert(dxpl_cache); + HDassert(ent); + HDassert(!ent->locked); + HDassert(ent->idx < rdcc->nslots); + + if(flush) { + /* Flush */ + if(H5D_chunk_flush_entry(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer") + } /* end if */ + else { + /* Don't flush, just free chunk */ + if(ent->chunk != NULL) + ent->chunk = (uint8_t *)H5D_chunk_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline)); + } /* end else */ + + /* Unlink from list */ + if(ent->prev) + ent->prev->next = ent->next; + else + rdcc->head = ent->next; + if(ent->next) + ent->next->prev = ent->prev; + else + rdcc->tail = ent->prev; + ent->prev = ent->next = NULL; + + /* Remove from cache */ + rdcc->slot[ent->idx] = NULL; + ent->idx = UINT_MAX; + rdcc->nbytes -= ent->chunk_size; + --rdcc->nused; + + /* Free */ + H5FL_FREE(H5D_rdcc_ent_t, ent); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_cache_evict() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_cache_prune + * + * Purpose: Prune the cache by preempting some things until the cache has + * room for something which is SIZE bytes. Only unlocked + * entries are considered for preemption. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Robb Matzke + * Thursday, May 21, 1998 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D_chunk_cache_prune(const H5D_t *dset, hid_t dxpl_id, + const H5D_dxpl_cache_t *dxpl_cache, size_t size) +{ + const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); + size_t total = rdcc->nbytes; + const int nmeth = 2; /*number of methods */ + int w[1]; /*weighting as an interval */ + H5D_rdcc_ent_t *p[2], *cur; /*list pointers */ + H5D_rdcc_ent_t *n[2]; /*list next pointers */ + int nerrors = 0; /* Accumulated error count during preemptions */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_cache_prune) + + /* + * Preemption is accomplished by having multiple pointers (currently two) + * slide down the list beginning at the head. Pointer p(N+1) will start + * traversing the list when pointer pN reaches wN percent of the original + * list. In other words, preemption method N gets to consider entries in + * approximate least recently used order w0 percent before method N+1 + * where 100% means tha method N will run to completion before method N+1 + * begins. The pointers participating in the list traversal are each + * given a chance at preemption before any of the pointers are advanced. + */ + w[0] = (int)(rdcc->nused * H5F_RDCC_W0(dset->oloc.file)); + p[0] = rdcc->head; + p[1] = NULL; + + while((p[0] || p[1]) && (rdcc->nbytes + size) > total) { + int i; /* Local index variable */ + + /* Introduce new pointers */ + for(i = 0; i < nmeth - 1; i++) + if(0 == w[i]) + p[i + 1] = rdcc->head; + + /* Compute next value for each pointer */ + for(i = 0; i < nmeth; i++) + n[i] = p[i] ? p[i]->next : NULL; + + /* Give each method a chance */ + for(i = 0; i < nmeth && (rdcc->nbytes + size) > total; i++) { + if(0 == i && p[0] && !p[0]->locked && + ((0 == p[0]->rd_count && 0 == p[0]->wr_count) || + (0 == p[0]->rd_count && p[0]->chunk_size == p[0]->wr_count) || + (p[0]->chunk_size == p[0]->rd_count && 0 == p[0]->wr_count))) { + /* + * Method 0: Preempt entries that have been completely written + * and/or completely read but not entries that are partially + * written or partially read. + */ + cur = p[0]; + } else if(1 == i && p[1] && !p[1]->locked) { + /* + * Method 1: Preempt the entry without regard to + * considerations other than being locked. This is the last + * resort preemption. + */ + cur = p[1]; + } else { + /* Nothing to preempt at this point */ + cur = NULL; + } + + if(cur) { + int j; /* Local index variable */ + + for(j = 0; j < nmeth; j++) { + if(p[j] == cur) + p[j] = NULL; + if(n[j] == cur) + n[j] = cur->next; + } /* end for */ + if(H5D_chunk_cache_evict(dset, dxpl_id, dxpl_cache, cur, TRUE) < 0) + nerrors++; + } /* end if */ + } /* end for */ + + /* Advance pointers */ + for(i = 0; i < nmeth; i++) + p[i] = n[i]; + for(i = 0; i < nmeth - 1; i++) + w[i] -= 1; + } /* end while */ + + if(nerrors) + HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to preempt one or more raw data cache entry") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_cache_prune() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_lock + * + * Purpose: Return a pointer to a dataset chunk. The pointer points + * directly into the chunk cache and should not be freed + * by the caller but will be valid until it is unlocked. The + * input value IDX_HINT is used to speed up cache lookups and + * it's output value should be given to H5F_chunk_unlock(). + * IDX_HINT is ignored if it is out of range, and if it points + * to the wrong entry then we fall back to the normal search + * method. + * + * If RELAX is non-zero and the chunk isn't in the cache then + * don't try to read it from the file, but just allocate an + * uninitialized buffer to hold the result. This is intended + * for output functions that are about to overwrite the entire + * chunk. + * + * Return: Success: Ptr to a file chunk. + * + * Failure: NULL + * + * Programmer: Robb Matzke + * Thursday, May 21, 1998 + * + *------------------------------------------------------------------------- + */ +void * +H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, + hbool_t relax, unsigned *idx_hint/*in,out*/) +{ + H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */ + const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */ + const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */ + const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */ + H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */ + hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */ + H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/ + H5D_rdcc_ent_t *ent = NULL; /*cache entry */ + unsigned idx = 0; /*hash index number */ + hbool_t found = FALSE; /*already in cache? */ + haddr_t chunk_addr = HADDR_UNDEF; /* Address of chunk on disk */ + size_t chunk_size; /*size of a chunk */ + void *chunk = NULL; /*the file chunk */ + unsigned u; /*counters */ + void *ret_value; /*return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_lock) + + HDassert(io_info); + HDassert(dset); + HDassert(io_info->dxpl_cache); + HDassert(io_info->store); + HDassert(TRUE == H5P_isa_class(io_info->dxpl_id, H5P_DATASET_XFER)); + + /* Get the chunk's size */ + HDassert(layout->u.chunk.size > 0); + H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t); + + /* Search for the chunk in the cache */ + if(rdcc->nslots > 0) { + idx = H5D_CHUNK_HASH(dset->shared, io_info->store->chunk.index); + ent = rdcc->slot[idx]; + + if(ent) + for(u = 0, found = TRUE; u < layout->u.chunk.ndims; u++) + if(io_info->store->chunk.offset[u] != ent->offset[u]) { + found = FALSE; + break; + } /* end if */ + } /* end if */ + + if(found) { + /* + * Already in the cache. Count a hit. + */ + rdcc->stats.nhits++; + } /* end if */ + else if(relax) { + /* + * Not in the cache, but we're about to overwrite the whole thing + * anyway, so just allocate a buffer for it but don't initialize that + * buffer with the file contents. Count this as a hit instead of a + * miss because we saved ourselves lots of work. + */ + rdcc->stats.nhits++; + + if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk") + + /* In the case that some dataset functions look through this data, + * clear it to all 0s. */ + HDmemset(chunk, 0, chunk_size); + } /* end if */ + else { + H5D_chunk_ud_t tmp_udata; /*B-tree pass-through */ + + if(udata != NULL) + chunk_addr = udata->addr; + else { + /* Point at temporary storage for B-tree pass through */ + udata = &tmp_udata; + + /* + * Not in the cache. Read it from the file and count this as a miss + * if it's in the file or an init if it isn't. + */ + chunk_addr = H5D_chunk_get_addr(io_info->dset, io_info->dxpl_id, io_info->store->chunk.offset, udata); + } /* end else */ + + /* Check if the chunk exists on disk */ + if(H5F_addr_defined(chunk_addr)) { + size_t chunk_alloc = 0; /*allocated chunk size */ + + /* Chunk size on disk isn't [likely] the same size as the final chunk + * size in memory, so allocate memory big enough. */ + H5_ASSIGN_OVERFLOW(chunk_alloc, udata->nbytes, uint32_t, size_t); + if(NULL == (chunk = H5D_chunk_alloc(chunk_alloc, pline))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk") + if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, chunk_alloc, io_info->dxpl_id, chunk) < 0) + HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk") + + if(pline->nused) { + if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), io_info->dxpl_cache->err_detect, + io_info->dxpl_cache->filter_cb, &chunk_alloc, &chunk_alloc, &chunk) < 0) + HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, NULL, "data pipeline read failed") + H5_ASSIGN_OVERFLOW(udata->nbytes, chunk_alloc, size_t, uint32_t); + } /* end if */ + + /* Increment # of cache misses */ + rdcc->stats.nmisses++; + } /* end if */ + else { + H5D_fill_value_t fill_status; + +#ifdef OLD_WAY + /* Clear the error stack from not finding the chunk on disk */ + H5E_clear_stack(NULL); +#endif /* OLD_WAY */ + + /* Chunk size on disk isn't [likely] the same size as the final chunk + * size in memory, so allocate memory big enough. */ + if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk") + + if(H5P_is_fill_value_defined(fill, &fill_status) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't tell if fill value defined") + + if(fill->fill_time == H5D_FILL_TIME_ALLOC || + (fill->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED)) { + /* + * The chunk doesn't exist in the file. Replicate the fill + * value throughout the chunk, if the fill value is defined. + */ + + /* Initialize the fill value buffer */ + /* (use the compact dataset storage buffer as the fill value buffer) */ + if(H5D_fill_init(&fb_info, chunk, FALSE, + NULL, NULL, NULL, NULL, + &dset->shared->dcpl_cache.fill, dset->shared->type, + dset->shared->type_id, (size_t)0, chunk_size, io_info->dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize fill buffer info") + fb_info_init = TRUE; + + /* Check for VL datatype & non-default fill value */ + if(fb_info.has_vlen_fill_type) + /* Fill the buffer with VL datatype fill values */ + if(H5D_fill_refill_vl(&fb_info, fb_info.elmts_per_buf, io_info->dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, NULL, "can't refill fill value buffer") + } /* end if */ + else + HDmemset(chunk, 0, chunk_size); + + /* Increment # of creations */ + rdcc->stats.ninits++; + } /* end else */ + } /* end else */ + HDassert(found || chunk_size > 0); + + if(!found && rdcc->nslots > 0 && chunk_size <= rdcc->nbytes && + (!ent || !ent->locked)) { + /* + * Add the chunk to the cache only if the slot is not already locked. + * Preempt enough things from the cache to make room. + */ + if(ent) { + if(H5D_chunk_cache_evict(io_info->dset, io_info->dxpl_id, io_info->dxpl_cache, ent, TRUE) < 0) + HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk from cache") + } /* end if */ + if(H5D_chunk_cache_prune(io_info->dset, io_info->dxpl_id, io_info->dxpl_cache, chunk_size) < 0) + HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk(s) from cache") + + /* Create a new entry */ + ent = H5FL_MALLOC(H5D_rdcc_ent_t); + ent->locked = 0; + ent->dirty = FALSE; + ent->chunk_addr = chunk_addr; + H5_ASSIGN_OVERFLOW(ent->chunk_size, chunk_size, size_t, uint32_t); + ent->alloc_size = chunk_size; + for(u = 0; u < layout->u.chunk.ndims; u++) + ent->offset[u] = io_info->store->chunk.offset[u]; + H5_ASSIGN_OVERFLOW(ent->rd_count, chunk_size, size_t, uint32_t); + H5_ASSIGN_OVERFLOW(ent->wr_count, chunk_size, size_t, uint32_t); + ent->chunk = (uint8_t *)chunk; + + /* Add it to the cache */ + HDassert(NULL == rdcc->slot[idx]); + rdcc->slot[idx] = ent; + ent->idx = idx; + rdcc->nbytes += chunk_size; + rdcc->nused++; + + /* Add it to the linked list */ + ent->next = NULL; + if(rdcc->tail) { + rdcc->tail->next = ent; + ent->prev = rdcc->tail; + rdcc->tail = ent; + } /* end if */ + else { + rdcc->head = rdcc->tail = ent; + ent->prev = NULL; + } /* end else */ + found = TRUE; + } else if(!found) { + /* + * The chunk is larger than the entire cache so we don't cache it. + * This is the reason all those arguments have to be repeated for the + * unlock function. + */ + ent = NULL; + idx = UINT_MAX; + } else { + /* + * The chunk is not at the beginning of the cache; move it backward + * by one slot. This is how we implement the LRU preemption + * algorithm. + */ + HDassert(ent); + if(ent->next) { + if(ent->next->next) + ent->next->next->prev = ent; + else + rdcc->tail = ent; + ent->next->prev = ent->prev; + if(ent->prev) + ent->prev->next = ent->next; + else + rdcc->head = ent->next; + ent->prev = ent->next; + ent->next = ent->next->next; + ent->prev->next = ent; + } /* end if */ + } /* end else */ + + /* Lock the chunk into the cache */ + if(ent) { + HDassert(!ent->locked); + ent->locked = TRUE; + chunk = ent->chunk; + } /* end if */ + + if(idx_hint) + *idx_hint = idx; + + /* Set return value */ + ret_value = chunk; + +done: + /* Release the fill buffer info, if it's been initialized */ + if(fb_info_init && H5D_fill_term(&fb_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, NULL, "Can't release fill buffer info") + + /* Release the chunk allocated, on error */ + if(!ret_value) + if(chunk) + chunk = H5D_chunk_xfree(chunk, pline); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_lock() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_unlock + * + * Purpose: Unlocks a previously locked chunk. The LAYOUT, COMP, and + * OFFSET arguments should be the same as for H5D_chunk_lock(). + * The DIRTY argument should be set to non-zero if the chunk has + * been modified since it was locked. The IDX_HINT argument is + * the returned index hint from the lock operation and BUF is + * the return value from the lock. + * + * The NACCESSED argument should be the number of bytes accessed + * for reading or writing (depending on the value of DIRTY). + * It's only purpose is to provide additional information to the + * preemption policy. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Robb Matzke + * Thursday, May 21, 1998 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_unlock(const H5D_io_info_t *io_info, hbool_t dirty, unsigned idx_hint, + void *chunk, uint32_t naccessed) +{ + const H5O_layout_t *layout = &(io_info->dset->shared->layout); /* Dataset layout */ + const H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk); + H5D_rdcc_ent_t *ent = NULL; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_unlock) + + HDassert(io_info); + + if(UINT_MAX == idx_hint) { + /* + * It's not in the cache, probably because it's too big. If it's + * dirty then flush it to disk. In any case, free the chunk. + * Note: we have to copy the layout and filter messages so we + * don't discard the `const' qualifier. + */ + if(dirty) { + H5D_rdcc_ent_t x; + + HDmemset(&x, 0, sizeof(x)); + x.dirty = TRUE; + HDmemcpy(x.offset, io_info->store->chunk.offset, layout->u.chunk.ndims * sizeof(x.offset[0])); + HDassert(layout->u.chunk.size > 0); + x.chunk_addr = HADDR_UNDEF; + x.chunk_size = layout->u.chunk.size; + H5_ASSIGN_OVERFLOW(x.alloc_size, x.chunk_size, uint32_t, size_t); + x.chunk = (uint8_t *)chunk; + + if(H5D_chunk_flush_entry(io_info->dset, io_info->dxpl_id, io_info->dxpl_cache, &x, TRUE) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer") + } /* end if */ + else { + if(chunk) + chunk = H5D_chunk_xfree(chunk, &(io_info->dset->shared->dcpl_cache.pline)); + } /* end else */ + } /* end if */ + else { + /* Sanity check */ + HDassert(idx_hint < rdcc->nslots); + HDassert(rdcc->slot[idx_hint]); + HDassert(rdcc->slot[idx_hint]->chunk == chunk); + + /* + * It's in the cache so unlock it. + */ + ent = rdcc->slot[idx_hint]; + HDassert(ent->locked); + if(dirty) { + ent->dirty = TRUE; + ent->wr_count -= MIN(ent->wr_count, naccessed); + } /* end if */ + else + ent->rd_count -= MIN(ent->rd_count, naccessed); + ent->locked = FALSE; + } /* end else */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_unlock() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_flush + * + * Purpose: Writes all dirty chunks to disk and optionally preempts them + * from the cache. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Robb Matzke + * Thursday, May 21, 1998 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_flush(H5D_t *dset, hid_t dxpl_id, unsigned flags) +{ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ + H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); + unsigned nerrors = 0; + H5D_rdcc_ent_t *ent, *next; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_flush, FAIL) + + /* Fill the DXPL cache values for later use */ + if(H5D_get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + + /* Loop over all entries in the chunk cache */ + for(ent = rdcc->head; ent; ent = next) { + next = ent->next; + if((flags & H5F_FLUSH_INVALIDATE)) { + if(H5D_chunk_cache_evict(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0) + nerrors++; + } else { + if(H5D_chunk_flush_entry(dset, dxpl_id, dxpl_cache, ent, FALSE) < 0) + nerrors++; + } + } /* end for */ + if(nerrors) + HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_flush() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_allocated_cb + * + * Purpose: Simply counts the number of chunks for a dataset. + * + * Return: Success: Non-negative + * Failure: Negative + * + * Programmer: Robb Matzke + * Wednesday, April 21, 1999 + * + *------------------------------------------------------------------------- + */ +/* ARGSUSED */ +static int +H5D_chunk_allocated_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) +{ + hsize_t *nbytes = (hsize_t *)_udata; + + FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_allocated_cb) + + *(hsize_t *)nbytes += chunk_rec->nbytes; + + FUNC_LEAVE_NOAPI(H5_ITER_CONT) +} /* H5D_chunk_allocated_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_allocated + * + * Purpose: Return the number of bytes allocated in the file for storage + * of raw data in the chunked dataset + * + * Return: Success: Number of bytes stored in all chunks. + * Failure: 0 + * + * Programmer: Quincey Koziol + * Tuesday, May 20, 2008 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_allocated(H5D_t *dset, hid_t dxpl_id, hsize_t *nbytes) +{ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Raw data chunk cache */ + H5D_rdcc_ent_t *ent; /* Cache entry */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ + hsize_t chunk_bytes = 0; /* Number of bytes allocated for chunks */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_allocated, FAIL) + + HDassert(dset); + + /* Fill the DXPL cache values for later use */ + if(H5D_get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + + /* Search for cached chunks that haven't been written out */ + for(ent = rdcc->head; ent; ent = ent->next) { + /* Flush the chunk out to disk, to make certain the size is correct later */ + if(H5D_chunk_flush_entry(dset, dxpl_id, dxpl_cache, ent, FALSE) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer") + } /* end for */ + + /* Compose chunked index info struct */ + idx_info.f = dset->oloc.file; + idx_info.dxpl_id = dxpl_id; + idx_info.layout = &dset->shared->layout; + + /* Call the index-specific "get all the allocated chunks sizes" routine */ + if((dset->shared->layout.u.chunk.ops->iterate)(&idx_info, H5D_chunk_allocated_cb, &chunk_bytes) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve allocated chunk information from index") + + /* Set number of bytes for caller */ + *nbytes = chunk_bytes; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_allocated() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_allocate + * + * Purpose: Allocate file space for all chunks that are not allocated yet. + * Return SUCCEED if all needed allocation succeed, otherwise + * FAIL. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Albert Cheng + * June 26, 1998 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite) +{ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + const H5D_chunk_ops_t *ops = dset->shared->layout.u.chunk.ops; /* Chunk operations */ + hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */ + size_t orig_chunk_size; /* Original size of chunk in bytes */ + unsigned filter_mask = 0; /* Filter mask for chunks that have them */ + const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */ + const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */ + const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */ + H5D_fill_value_t fill_status; /* The fill value status */ + hbool_t should_fill = FALSE; /* Whether fill values should be written */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ +#ifdef H5_HAVE_PARALLEL + MPI_Comm mpi_comm = MPI_COMM_NULL; /* MPI communicator for file */ + int mpi_rank = (-1); /* This process's rank */ + int mpi_code; /* MPI return code */ + hbool_t blocks_written = FALSE; /* Flag to indicate that chunk was actually written */ + hbool_t using_mpi = FALSE; /* Flag to indicate that the file is being accessed with an MPI-capable file driver */ +#endif /* H5_HAVE_PARALLEL */ + hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */ + int space_ndims; /* Dataset's space rank */ + hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Dataset's dataspace dimensions */ + H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */ + hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */ + hid_t data_dxpl_id; /* DXPL ID to use for raw data I/O operations */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_allocate, FAIL) + + /* Check args */ + HDassert(dset && H5D_CHUNKED == layout->type); + HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + HDassert(H5F_addr_defined(layout->u.chunk.addr)); + HDassert(TRUE == H5P_isa_class(dxpl_id, H5P_DATASET_XFER)); + + /* Retrieve the dataset dimensions */ + if((space_ndims = H5S_get_simple_extent_dims(dset->shared->space, space_dim, NULL)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple dataspace info") + space_dim[space_ndims] = layout->u.chunk.dim[space_ndims]; + +#ifdef H5_HAVE_PARALLEL + /* Retrieve MPI parameters */ + if(IS_H5FD_MPI(dset->oloc.file)) { + /* Get the MPI communicator */ + if(MPI_COMM_NULL == (mpi_comm = H5F_mpi_get_comm(dset->oloc.file))) + HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI communicator") + + /* Get the MPI rank */ + if((mpi_rank = H5F_mpi_get_rank(dset->oloc.file)) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI rank") + + /* Set the MPI-capable file driver flag */ + using_mpi = TRUE; + + /* Use the internal "independent" DXPL */ + data_dxpl_id = H5AC_ind_dxpl_id; + } /* end if */ + else { +#endif /* H5_HAVE_PARALLEL */ + /* Use the DXPL we were given */ + data_dxpl_id = dxpl_id; +#ifdef H5_HAVE_PARALLEL + } /* end else */ +#endif /* H5_HAVE_PARALLEL */ + + /* Fill the DXPL cache values for later use */ + if(H5D_get_dxpl_cache(data_dxpl_id, &dxpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + + /* Get original chunk size */ + H5_ASSIGN_OVERFLOW(orig_chunk_size, layout->u.chunk.size, uint32_t, size_t); + + /* Check the dataset's fill-value status */ + if(H5P_is_fill_value_defined(fill, &fill_status) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't tell if fill value defined") + + /* If we are filling the dataset on allocation or "if set" and + * the fill value _is_ set, _and_ we are not overwriting the new blocks, + * or if there are any pipeline filters defined, + * set the "should fill" flag + */ + if((!full_overwrite && (fill->fill_time == H5D_FILL_TIME_ALLOC || + (fill->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED))) + || pline->nused > 0) + should_fill = TRUE; + + /* Check if fill values should be written to chunks */ + if(should_fill) { + /* Initialize the fill value buffer */ + /* (delay allocating fill buffer for VL datatypes until refilling) */ + /* (casting away const OK - QAK) */ + if(H5D_fill_init(&fb_info, NULL, (hbool_t)(pline->nused > 0), + (H5MM_allocate_t)H5D_chunk_alloc, (void *)pline, + (H5MM_free_t)H5D_chunk_xfree, (void *)pline, + &dset->shared->dcpl_cache.fill, dset->shared->type, + dset->shared->type_id, (size_t)0, orig_chunk_size, data_dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info") + fb_info_init = TRUE; + + /* Check if there are filters which need to be applied to the chunk */ + /* (only do this in advance when the chunk info can be re-used (i.e. + * it doesn't contain any non-default VL datatype fill values) + */ + if(!fb_info.has_vlen_fill_type && pline->nused > 0) { + size_t buf_size = orig_chunk_size; + + /* Push the chunk through the filters */ + if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &orig_chunk_size, &buf_size, &fb_info.fill_buf) < 0) + HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed") + } /* end if */ + } /* end if */ + + /* Compose chunked index info struct */ + idx_info.f = dset->oloc.file; + idx_info.dxpl_id = dxpl_id; + idx_info.layout = &dset->shared->layout; + + /* Reset the chunk offset indices */ + HDmemset(chunk_offset, 0, (layout->u.chunk.ndims * sizeof(chunk_offset[0]))); + + /* Loop over all chunks */ + carry = FALSE; + while(!carry) { + haddr_t chunk_addr; /* Address of chunk */ + int i; /* Local index variable */ + + /* Get the chunk's address */ + chunk_addr = H5D_chunk_get_addr(dset, dxpl_id, chunk_offset, NULL); + + /* Check if the chunk exists yet on disk */ + if(!H5F_addr_defined(chunk_addr)) { + const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Raw data chunk cache */ + H5D_rdcc_ent_t *ent; /* Cache entry */ + hbool_t chunk_exists; /* Flag to indicate whether a chunk exists already */ + unsigned u; /* Local index variable */ + + /* Didn't find the chunk on disk */ + chunk_exists = FALSE; + + /* Look for chunk in cache */ + for(ent = rdcc->head; ent && !chunk_exists; ent = ent->next) { + /* Assume a match */ + chunk_exists = TRUE; + for(u = 0; u < layout->u.chunk.ndims; u++) + if(ent->offset[u] != chunk_offset[u]) { + chunk_exists = FALSE; /* Reset if no match */ + break; + } /* end if */ + } /* end for */ + + /* Chunk wasn't in cache either, create it now */ + if(!chunk_exists) { + H5D_chunk_ud_t udata; /* B-tree pass-through for creating chunk */ + size_t chunk_size; /* Size of chunk in bytes, possibly filtered */ + + /* Check for VL datatype & non-default fill value */ + if(fb_info_init && fb_info.has_vlen_fill_type) { + /* Sanity check */ + HDassert(should_fill); + + /* Fill the buffer with VL datatype fill values */ + if(H5D_fill_refill_vl(&fb_info, fb_info.elmts_per_buf, data_dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "can't refill fill value buffer") + + /* Check if there are filters which need to be applied to the chunk */ + if(pline->nused > 0) { + size_t buf_size = orig_chunk_size; + size_t nbytes = fb_info.fill_buf_size; + + /* Push the chunk through the filters */ + if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &nbytes, &buf_size, &fb_info.fill_buf) < 0) + HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed") + + /* Keep the number of bytes the chunk turned in to */ + chunk_size = nbytes; + } /* end if */ + else + H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t); + } /* end if */ + else + chunk_size = orig_chunk_size; + + /* Initialize the chunk information */ + udata.common.mesg = layout; + udata.common.offset = chunk_offset; + H5_ASSIGN_OVERFLOW(udata.nbytes, chunk_size, size_t, uint32_t); + udata.filter_mask = filter_mask; + udata.addr = HADDR_UNDEF; + + /* Allocate the chunk with all processes */ + if((ops->insert)(&idx_info, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert record into chunk index") + HDassert(H5F_addr_defined(udata.addr)); + + /* Check if fill values should be written to chunks */ + if(should_fill) { + /* Sanity check */ + HDassert(fb_info_init); + HDassert(udata.nbytes == chunk_size); + +#ifdef H5_HAVE_PARALLEL + /* Check if this file is accessed with an MPI-capable file driver */ + if(using_mpi) { + /* Write the chunks out from only one process */ + /* !! Use the internal "independent" DXPL!! -QAK */ + if(H5_PAR_META_WRITE == mpi_rank) + if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") + + /* Indicate that blocks are being written */ + blocks_written = TRUE; + } /* end if */ + else { +#endif /* H5_HAVE_PARALLEL */ + if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") +#ifdef H5_HAVE_PARALLEL + } /* end else */ +#endif /* H5_HAVE_PARALLEL */ + } /* end if */ + + /* Release the fill buffer if we need to re-allocate it each time */ + if(fb_info_init && fb_info.has_vlen_fill_type && pline->nused > 0) + H5D_fill_release(&fb_info); + } /* end if */ + } /* end if */ + + /* Increment indices */ + carry = TRUE; + for(i = (int)(space_ndims - 1); i >= 0; --i) { + chunk_offset[i] += layout->u.chunk.dim[i]; + if(chunk_offset[i] >= space_dim[i]) + chunk_offset[i] = 0; + else { + carry = FALSE; + break; + } /* end else */ + } /* end for */ + } /* end while */ + +#ifdef H5_HAVE_PARALLEL + /* Only need to block at the barrier if we actually initialized a chunk */ + /* using an MPI-capable file driver */ + if(using_mpi && blocks_written) { + /* Wait at barrier to avoid race conditions where some processes are + * still writing out chunks and other processes race ahead to read + * them in, getting bogus data. + */ + if(MPI_SUCCESS != (mpi_code = MPI_Barrier(mpi_comm))) + HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code) + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + + /* Reset any cached chunk info for this dataset */ + H5D_chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last); + +done: + /* Release the fill buffer info, if it's been initialized */ + if(fb_info_init && H5D_fill_term(&fb_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info") + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_allocate() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_prune_cb + * + * Purpose: Search for chunks that are no longer inside the pruned + * dataset's extent + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu + * March 26, 2002 + * + *------------------------------------------------------------------------- + */ +/* ARGSUSED */ +static int +H5D_chunk_prune_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) +{ + H5D_chunk_it_ud1_t *udata = (H5D_chunk_it_ud1_t *)_udata; /* User data */ + H5D_chunk_sl_ck_t *sl_node = NULL; /* Skip list node for chunk to remove */ + unsigned rank; /* Current # of dimensions */ + hbool_t should_delete = FALSE; /* Whether the chunk should be deleted */ + hbool_t needs_fill = FALSE; /* Whether the chunk overlaps the new extent and needs fill valiues */ + unsigned u; /* Local index variable */ + int ret_value = H5_ITER_CONT; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_prune_cb) + + /* Figure out what chunks are no longer in use for the specified extent and release them */ + rank = udata->common.mesg->u.chunk.ndims - 1; + for(u = 0; u < rank; u++) + /* The chunk record points to a chunk of storage that contains the + * beginning of the logical address space represented by UDATA. + */ + if(chunk_rec->offset[u] >= udata->dims[u]) { + /* Indicate that the chunk will be deleted */ + should_delete = TRUE; + + /* Break out of loop, we know the chunk is outside the current dimensions */ + break; + } /* end if */ + /* Check for chunk that overlaps new extent and will need fill values */ + else if((chunk_rec->offset[u] + udata->common.mesg->u.chunk.dim[u]) > udata->dims[u]) + /* Indicate that the chunk needs filling */ + /* (but continue in loop, since it could be outside the extent in + * another dimension -QAK) + */ + needs_fill = TRUE; + + /* Check for chunk to delete */ + if(should_delete) { + /* Allocate space for the shared structure */ + if(NULL == (sl_node = H5FL_MALLOC(H5D_chunk_sl_ck_t))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, H5_ITER_ERROR, "memory allocation failed for shared B-tree info") + + /* Calculate the index of this chunk */ + if(H5V_chunk_index(rank, chunk_rec->offset, udata->common.mesg->u.chunk.dim, udata->down_chunks, &sl_node->index) < 0) + HGOTO_ERROR(H5E_IO, H5E_BADRANGE, H5_ITER_ERROR, "can't get chunk index") + + /* Store the key for the chunk */ + sl_node->rec = *chunk_rec; + + /* Insert the chunk description in the skip list */ + if(H5SL_insert(udata->outside, sl_node, &sl_node->index) < 0) + HGOTO_ERROR(H5E_IO, H5E_CANTINSERT, H5_ITER_ERROR, "can't insert chunk into skip list") + } /* end if */ + /* Check for chunk that overlaps the new dataset dimensions and needs filling */ + else if(needs_fill) { + const H5D_io_info_t *io_info = udata->io_info; /* Local pointer to I/O info */ + H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */ + const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset's layout */ + H5S_sel_iter_t chunk_iter; /* Memory selection iteration info */ + hssize_t sel_nelmts; /* Number of elements in selection */ + hsize_t count[H5O_LAYOUT_NDIMS]; /* Element count of hyperslab */ + void *chunk; /* The file chunk */ + unsigned idx_hint; /* Which chunk we're dealing with */ + H5D_chunk_ud_t chk_udata; /* User data for locking chunk */ + uint32_t bytes_accessed; /* Bytes accessed in chunk */ + + /* Initialize the fill value buffer, if necessary */ + if(!udata->fb_info_init) { + H5_CHECK_OVERFLOW(udata->elmts_per_chunk, uint32_t, size_t); + if(H5D_fill_init(&udata->fb_info, NULL, FALSE, NULL, NULL, NULL, NULL, + &dset->shared->dcpl_cache.fill, + dset->shared->type, dset->shared->type_id, (size_t)udata->elmts_per_chunk, + io_info->dxpl_cache->max_temp_buf, io_info->dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, H5_ITER_ERROR, "can't initialize fill buffer info") + udata->fb_info_init = TRUE; + } /* end if */ + + /* Compute the # of elements to leave with existing value, in each dimension */ + for(u = 0; u < rank; u++) { + count[u] = MIN(layout->u.chunk.dim[u], (udata->dims[u] - chunk_rec->offset[u])); + HDassert(count[u] > 0); + } /* end for */ + + /* Select all elements in chunk, to begin with */ + if(H5S_select_all(udata->chunk_space, TRUE) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSELECT, H5_ITER_ERROR, "unable to select space") + + /* "Subtract out" the elements to keep */ + if(H5S_select_hyperslab(udata->chunk_space, H5S_SELECT_NOTB, udata->hyper_start, NULL, count, NULL) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSELECT, H5_ITER_ERROR, "unable to select hyperslab") + + /* Calculate the index of this chunk */ + if(H5V_chunk_index(rank, chunk_rec->offset, layout->u.chunk.dim, udata->down_chunks, &io_info->store->chunk.index) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, H5_ITER_ERROR, "can't get chunk index") + + /* Lock the chunk into the cache, to get a pointer to the chunk buffer */ + /* (Casting away const OK -QAK) */ + io_info->store->chunk.offset = (hsize_t *)chunk_rec->offset; + chk_udata.common.mesg = layout; + chk_udata.common.offset = chunk_rec->offset; + chk_udata.nbytes = chunk_rec->nbytes; + chk_udata.filter_mask = chunk_rec->filter_mask; + chk_udata.addr = chunk_rec->chunk_addr; + if(NULL == (chunk = (void *)H5D_chunk_lock(udata->io_info, &chk_udata, FALSE, &idx_hint))) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, H5_ITER_ERROR, "unable to lock raw data chunk") + + + /* Fill the selection in the memory buffer */ + /* Use the size of the elements in the chunk directly instead of */ + /* relying on the fill.size, which might be set to 0 if there is */ + /* no fill-value defined for the dataset -QAK */ + + /* Get the number of elements in the selection */ + sel_nelmts = H5S_GET_SELECT_NPOINTS(udata->chunk_space); + HDassert(sel_nelmts >= 0); + H5_CHECK_OVERFLOW(sel_nelmts, hssize_t, size_t); + + /* Check for VL datatype & non-default fill value */ + if(udata->fb_info.has_vlen_fill_type) + /* Re-fill the buffer to use for this I/O operation */ + if(H5D_fill_refill_vl(&udata->fb_info, (size_t)sel_nelmts, io_info->dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, H5_ITER_ERROR, "can't refill fill value buffer") + + /* Create a selection iterator for scattering the elements to memory buffer */ + if(H5S_select_iter_init(&chunk_iter, udata->chunk_space, layout->u.chunk.dim[rank]) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, H5_ITER_ERROR, "unable to initialize chunk selection information") + + /* Scatter the data into memory */ + if(H5D_scatter_mem(udata->fb_info.fill_buf, udata->chunk_space, &chunk_iter, (size_t)sel_nelmts, io_info->dxpl_cache, chunk/*out*/) < 0) { + H5S_SELECT_ITER_RELEASE(&chunk_iter); + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, H5_ITER_ERROR, "scatter failed") + } /* end if */ + + /* Release the selection iterator */ + if(H5S_SELECT_ITER_RELEASE(&chunk_iter) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, H5_ITER_ERROR, "Can't release selection iterator") + + + /* The number of bytes accessed in the chunk */ + /* (i.e. the bytes replaced with fill values) */ + bytes_accessed = sel_nelmts * layout->u.chunk.dim[rank]; + + /* Release lock on chunk */ + if(H5D_chunk_unlock(io_info, TRUE, idx_hint, chunk, bytes_accessed) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, H5_ITER_ERROR, "unable to unlock raw data chunk") + } /* end else-if */ + +done: + if(ret_value != H5_ITER_CONT && sl_node) + H5FL_FREE(H5D_chunk_sl_ck_t, sl_node); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_prune_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_prune_sl_rm_cb + * + * Purpose: Destroy a skip list node for "pruning" chunks, also removes + * the chunk from the index. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol, koziol@hdfgroup.org + * May 3, 2007 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D_chunk_prune_sl_rm_cb(void *item, void UNUSED *key, void *op_data) +{ + H5D_chunk_sl_ck_t *sl_node = (H5D_chunk_sl_ck_t *)item; /* Temporary pointer to chunk to remove */ + H5D_chunk_sl_rm_t *rm_info = (H5D_chunk_sl_rm_t *)op_data; /* Information needed for removing chunk from B-tree */ + H5D_chunk_common_ud_t idx_udata; /* User data for index removal routine */ + herr_t ret_value = H5_ITER_CONT; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_prune_sl_rm_cb) + + /* Sanity checks */ + HDassert(sl_node); + HDassert(rm_info); + + /* Initialize the user data for the index callback */ + idx_udata.mesg = rm_info->mesg; + idx_udata.offset = sl_node->rec.offset; + + /* Remove */ + if((rm_info->idx_info->layout->u.chunk.ops->remove)(rm_info->idx_info, &idx_udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, H5_ITER_ERROR, "unable to remove chunk entry from index") + +done: + H5FL_FREE(H5D_chunk_sl_ck_t, sl_node); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D_chunk_prune_sl_rm_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_prune_by_extent + * + * Purpose: This function searches for chunks that are no longer necessary + * both in the raw data cache and in the chunk index. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu + * Algorithm: Robb Matzke + * March 27, 2002 + * + * The algorithm is: + * + * For chunks that are no longer necessary: + * + * 1. Search in the raw data cache for each chunk + * 2. If found then preempt it from the cache + * 3. Search in the B-tree for each chunk + * 4. If found then remove it from the B-tree and deallocate file storage for the chunk + * + * This example shows a 2d dataset of 90x90 with a chunk size of 20x20. + * + * + * 0 20 40 60 80 90 100 + * 0 +---------+---------+---------+---------+-----+...+ + * |:::::X:::::::::::::: : : | : + * |:::::::X:::::::::::: : : | : Key + * |::::::::::X::::::::: : : | : -------- + * |::::::::::::X::::::: : : | : +-+ Dataset + * 20+::::::::::::::::::::.........:.........:.....+...: | | Extent + * | :::::X::::: : : | : +-+ + * | ::::::::::: : : | : + * | ::::::::::: : : | : ... Chunk + * | :::::::X::: : : | : : : Boundary + * 40+.........:::::::::::.........:.........:.....+...: :.: + * | : : : : | : + * | : : : : | : ... Allocated + * | : : : : | : ::: & Filled + * | : : : : | : ::: Chunk + * 60+.........:.........:.........:.........:.....+...: + * | : :::::::X::: : | : X Element + * | : ::::::::::: : | : Written + * | : ::::::::::: : | : + * | : ::::::::::: : | : + * 80+.........:.........:::::::::::.........:.....+...: O Fill Val + * | : : ::::::::::: | : Explicitly + * | : : ::::::X:::: | : Written + * 90+---------+---------+---------+---------+-----+ : + * : : : ::::::::::: : + * 100:.........:.........:.........:::::::::::.........: + * + * + * We have 25 total chunks for this dataset, 5 of which have space + * allocated in the file because they were written to one or more + * elements. These five chunks (and only these five) also have entries in + * the storage B-tree for this dataset. + * + * Now lets say we want to shrink the dataset down to 70x70: + * + * + * 0 20 40 60 70 80 90 100 + * 0 +---------+---------+---------+----+----+-----+...+ + * |:::::X:::::::::::::: : | : | : + * |:::::::X:::::::::::: : | : | : Key + * |::::::::::X::::::::: : | : | : -------- + * |::::::::::::X::::::: : | : | : +-+ Dataset + * 20+::::::::::::::::::::.........:....+....:.....|...: | | Extent + * | :::::X::::: : | : | : +-+ + * | ::::::::::: : | : | : + * | ::::::::::: : | : | : ... Chunk + * | :::::::X::: : | : | : : : Boundary + * 40+.........:::::::::::.........:....+....:.....|...: :.: + * | : : : | : | : + * | : : : | : | : ... Allocated + * | : : : | : | : ::: & Filled + * | : : : | : | : ::: Chunk + * 60+.........:.........:.........:....+....:.....|...: + * | : :::::::X::: | : | : X Element + * | : ::::::::::: | : | : Written + * +---------+---------+---------+----+ : | : + * | : ::::::::::: : | : + * 80+.........:.........:::::::::X:.........:.....|...: O Fill Val + * | : : ::::::::::: | : Explicitly + * | : : ::::::X:::: | : Written + * 90+---------+---------+---------+---------+-----+ : + * : : : ::::::::::: : + * 100:.........:.........:.........:::::::::::.........: + * + * + * That means that the nine chunks along the bottom and right side should + * no longer exist. Of those nine chunks, (0,80), (20,80), (40,80), + * (60,80), (80,80), (80,60), (80,40), (80,20), and (80,0), one is actually allocated + * that needs to be released. + * To release the chunks, we traverse the B-tree to obtain a list of unused + * allocated chunks, and then call H5B_remove() for each chunk. + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dims) +{ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + H5D_io_info_t chk_io_info; /* Chunked I/O info object */ + H5D_storage_t chk_store; /* Chunk storage information */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ + const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset's layout */ + const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ + H5D_rdcc_ent_t *ent = NULL, *next = NULL; /* Cache entries */ + hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /* Current dataspace dimensions */ + H5D_chunk_it_ud1_t udata; /* Chunk index iterator user data */ + hbool_t udata_init = FALSE; /* Whether the chunk index iterator user data has been initialized */ + H5D_chunk_sl_rm_t rm_info; /* User data for skip list destroy callback */ + H5S_t *chunk_space = NULL; /* Dataspace for a chunk */ + hsize_t chunk_dims[H5O_LAYOUT_NDIMS]; /* Chunk dimensions */ + hsize_t chunks[H5O_LAYOUT_NDIMS]; /* Current number of chunks in each dimension */ + hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */ + hsize_t hyper_start[H5O_LAYOUT_NDIMS]; /* Starting location of hyperslab */ + uint32_t elmts_per_chunk; /* Elements in chunk */ + unsigned rank; /* Current # of dimensions */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_prune_by_extent, FAIL) + + /* Check args */ + HDassert(dset && H5D_CHUNKED == layout->type); + HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + HDassert(H5F_addr_defined(layout->u.chunk.addr)); + HDassert(dxpl_cache); + + /* Fill the DXPL cache values for later use */ + if(H5D_get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + + /* Go get the rank & dimensions (including the element size) */ + rank = layout->u.chunk.ndims - 1; + if(H5S_get_simple_extent_dims(dset->shared->space, curr_dims, NULL) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions") + curr_dims[rank] = layout->u.chunk.dim[rank]; + + /*------------------------------------------------------------------------- + * Figure out what chunks are no longer in use for the specified extent + * and release them from the linked list raw data cache + *------------------------------------------------------------------------- + */ + for(ent = rdcc->head; ent; ent = next) { + /* Get pointer to next extry in cache, in case this one is evicted */ + next = ent->next; + + /* Check for chunk offset outside of new dimensions */ + for(u = 0; u < rank; u++) + if((hsize_t)ent->offset[u] > curr_dims[u]) { + /* Evict the entry from the cache, but do not flush it to disk */ + if(H5D_chunk_cache_evict(dset, dxpl_id, dxpl_cache, ent, FALSE) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk") + + /* Break out of loop, chunk is evicted */ + break; + } /* end if */ + } /* end for */ + + /* Round up to the next integer # of chunks, to accomodate partial chunks */ + /* (also compute the number of elements per chunk) */ + /* (also copy the chunk dimensions into 'hsize_t' array for creating dataspace) */ + elmts_per_chunk = 1; + for(u = 0; u < rank; u++) { + chunks[u] = ((old_dims[u] + layout->u.chunk.dim[u]) - 1) / layout->u.chunk.dim[u]; + elmts_per_chunk *= layout->u.chunk.dim[u]; + chunk_dims[u] = layout->u.chunk.dim[u]; + } /* end for */ + + /* Get the "down" sizes for each dimension */ + if(H5V_array_down(rank, chunks, down_chunks) < 0) + HGOTO_ERROR(H5E_IO, H5E_BADVALUE, FAIL, "can't compute 'down' sizes") + + /* Create a data space for a chunk & set the extent */ + if(NULL == (chunk_space = H5S_create_simple(rank, chunk_dims, NULL))) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace") + + /* Reset hyperslab start array */ + /* (hyperslabs will always start from origin) */ + HDmemset(hyper_start, 0, sizeof(hyper_start)); + + /* Set up chunked I/O info object, for operations on chunks (in callback) */ + /* (Casting away const OK -QAK) */ + H5D_BUILD_IO_INFO_RD(&chk_io_info, dset, dxpl_cache, dxpl_id, &chk_store, NULL); + + /* Compose chunked index info struct */ + idx_info.f = dset->oloc.file; + idx_info.dxpl_id = dxpl_id; + idx_info.layout = &dset->shared->layout; + + /* Initialize the user data for the iteration */ + HDmemset(&udata, 0, sizeof udata); + udata.common.mesg = layout; + udata.io_info = &chk_io_info; + udata.idx_info = &idx_info; + udata.dims = curr_dims; + udata.down_chunks = down_chunks; + udata.elmts_per_chunk = elmts_per_chunk; + udata.chunk_space = chunk_space; + udata.hyper_start = hyper_start; + udata_init = TRUE; + + /* Initialize the skip list that will hold the chunks outside the dimensions */ + if(NULL == (udata.outside = H5SL_create(H5SL_TYPE_HSIZE, 0.5, (size_t)H5D_CHUNK_DEFAULT_SKIPLIST_HEIGHT))) + HGOTO_ERROR(H5E_IO, H5E_CANTCREATE, FAIL, "can't create skip list for chunks outside new dimensions") + + /* Iterate over the chunks */ + if((dset->shared->layout.u.chunk.ops->iterate)(&idx_info, H5D_chunk_prune_cb, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve prune chunks from index") + + /* Set up user data for skip list callback */ + rm_info.idx_info = &idx_info; + rm_info.mesg = layout; + + /* Destroy the skip list, deleting the chunks in the callback */ + H5SL_destroy(udata.outside, H5D_chunk_prune_sl_rm_cb, &rm_info); + + /* Reset any cached chunk info for this dataset */ + H5D_chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last); + +done: + /* Release resources */ + if(chunk_space && H5S_close(chunk_space) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release dataspace") + if(udata_init) { + if(udata.fb_info_init && H5D_fill_term(&udata.fb_info) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info") + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_prune_by_extent() */ + +#ifdef H5_HAVE_PARALLEL + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_addrmap_cb + * + * Purpose: Callback when obtaining the chunk addresses for all existing chunks + * + * Return: Success: Non-negative + * Failure: Negative + * + * Programmer: Kent Yang + * Tuesday, November 15, 2005 + * + *------------------------------------------------------------------------- + */ +static int +H5D_chunk_addrmap_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) +{ + H5D_chunk_it_ud2_t *udata = (H5D_chunk_it_ud2_t *)_udata; /* User data for callback */ + unsigned rank = udata->common.mesg->u.chunk.ndims - 1; /* # of dimensions of dataset */ + hsize_t chunk_index; + int ret_value = H5_ITER_CONT; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_addrmap_cb) + + /* Compute the index for this chunk */ + if(H5V_chunk_index(rank, chunk_rec->offset, udata->common.mesg->u.chunk.dim, udata->down_chunks, &chunk_index) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, H5_ITER_ERROR, "can't get chunk index") + + /* Set it in the userdata to return */ + udata->chunk_addr[chunk_index] = chunk_rec->chunk_addr; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D_chunk_addrmap_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_addrmap + * + * Purpose: Obtain the chunk addresses for all existing chunks + * + * Return: Success: Non-negative on succeed. + * Failure: negative value + * + * Programmer: Kent Yang + * November 15, 2005 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_addrmap(const H5D_io_info_t *io_info, haddr_t chunk_addr[], + const hsize_t down_chunks[]) +{ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + H5D_t *dset = io_info->dset; /* Local pointer to dataset info */ + H5D_chunk_it_ud2_t udata; /* User data for iteration callback */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_addrmap, FAIL) + + HDassert(dset); + HDassert(chunk_addr); + HDassert(down_chunks); + + /* Set up user data for B-tree callback */ + HDmemset(&udata, 0, sizeof(udata)); + udata.common.mesg = &dset->shared->layout; + udata.down_chunks = down_chunks; + udata.chunk_addr = chunk_addr; + + /* Compose chunked index info struct */ + idx_info.f = dset->oloc.file; + idx_info.dxpl_id = io_info->dxpl_id; + idx_info.layout = &dset->shared->layout; + + /* Iterate over chunks to build mapping of chunk addresses */ + if((dset->shared->layout.u.chunk.ops->iterate)(&idx_info, H5D_chunk_addrmap_cb, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to iterate over chunk index to build address map") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_addrmap() */ +#endif /* H5_HAVE_PARALLEL */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_delete + * + * Purpose: Delete raw data storage for entire dataset (i.e. all chunks) + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Thursday, March 20, 2003 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout) +{ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_delete, FAIL) + + /* Compose chunked index info struct */ + idx_info.f = f; + idx_info.dxpl_id = dxpl_id; + idx_info.layout = layout; + + /* Check if the index has been created in the file */ + if(H5F_addr_defined(layout->u.chunk.addr)) { + /* Delete the chunked storage information in the file */ + if((layout->u.chunk.ops->delete)(&idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to delete chunk index") + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_delete() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_update_cache + * + * Purpose: Update any cached chunks index values after the dataspace + * size has changed + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Saturday, May 29, 2004 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_update_cache(H5D_t *dset, hid_t dxpl_id) +{ + H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ + H5D_rdcc_ent_t *ent, *next; /*cache entry */ + H5D_rdcc_ent_t *old_ent; /* Old cache entry */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ + unsigned rank; /*current # of dimensions */ + hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */ + hsize_t chunks[H5O_LAYOUT_NDIMS]; /*current number of chunks in each dimension */ + hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */ + unsigned u; /*counters */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_update_cache, FAIL) + + /* Check args */ + HDassert(dset && H5D_CHUNKED == dset->shared->layout.type); + HDassert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS); + + /* Get the rank */ + rank = dset->shared->layout.u.chunk.ndims-1; + HDassert(rank > 0); + + /* 1-D dataset's chunks can't have their index change */ + if(rank == 1) + HGOTO_DONE(SUCCEED) + + /* Go get the dimensions */ + if(H5S_get_simple_extent_dims(dset->shared->space, curr_dims, NULL) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions") + + /* Round up to the next integer # of chunks, to accomodate partial chunks */ + for(u = 0; u < rank; u++) + chunks[u] = ((curr_dims[u] + dset->shared->layout.u.chunk.dim[u]) - 1) / dset->shared->layout.u.chunk.dim[u]; + + /* Get the "down" sizes for each dimension */ + if(H5V_array_down(rank, chunks, down_chunks) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes") + + /* Fill the DXPL cache values for later use */ + if(H5D_get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + + /* Recompute the index for each cached chunk that is in a dataset */ + for(ent = rdcc->head; ent; ent = next) { + hsize_t idx; /* Chunk index */ + unsigned old_idx; /* Previous index number */ + + /* Get the pointer to the next cache entry */ + next = ent->next; + + /* Calculate the index of this chunk */ + if(H5V_chunk_index(rank, ent->offset, dset->shared->layout.u.chunk.dim, down_chunks, &idx) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") + + /* Compute the index for the chunk entry */ + old_idx = ent->idx; /* Save for later */ + ent->idx = H5D_CHUNK_HASH(dset->shared, idx); + + if(old_idx != ent->idx) { + /* Check if there is already a chunk at this chunk's new location */ + old_ent = rdcc->slot[ent->idx]; + if(old_ent != NULL) { + HDassert(old_ent->locked == 0); + + /* Check if we are removing the entry we would walk to next */ + if(old_ent == next) + next = old_ent->next; + + /* Remove the old entry from the cache */ + if(H5D_chunk_cache_evict(dset, dxpl_id, dxpl_cache, old_ent, TRUE) < 0) + HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks") + } /* end if */ + + /* Insert this chunk into correct location in hash table */ + rdcc->slot[ent->idx] = ent; + + /* Null out previous location */ + rdcc->slot[old_idx] = NULL; + } /* end if */ + } /* end for */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_update_cache() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_copy_cb + * + * Purpose: Copy chunked raw data from source file and insert to the + * index in the destination file + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Peter Cao + * August 20, 2005 + * + *------------------------------------------------------------------------- + */ +static int +H5D_chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) +{ + H5D_chunk_it_ud3_t *udata = (H5D_chunk_it_ud3_t *)_udata; /* User data for callback */ + H5D_chunk_ud_t udata_dst; /* User data about new destination chunk */ + hbool_t is_vlen = FALSE; /* Whether datatype is variable-length */ + hbool_t fix_ref = FALSE; /* Whether to fix up references in the dest. file */ + + /* General information about chunk copy */ + void *bkg = udata->bkg; /* Background buffer for datatype conversion */ + void *buf = udata->buf; /* Chunk buffer for I/O & datatype conversions */ + size_t buf_size = udata->buf_size; /* Size of chunk buffer */ + H5O_pline_t *pline = udata->pline; /* I/O pipeline for applying filters */ + + /* needed for commpressed variable length data */ + hbool_t has_filters = FALSE; /* Whether chunk has filters */ + size_t nbytes; /* Size of chunk in file (in bytes) */ + H5Z_cb_t cb_struct; /* Filter failure callback struct */ + + int ret_value = H5_ITER_CONT; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_copy_cb) + + /* Get 'size_t' local value for number of bytes in chunk */ + H5_ASSIGN_OVERFLOW(nbytes, chunk_rec->nbytes, uint32_t, size_t); + + /* Check parameter for type conversion */ + if(udata->do_convert) { + if(H5T_detect_class(udata->dt_src, H5T_VLEN) > 0) + is_vlen = TRUE; + else if((H5T_get_class(udata->dt_src, FALSE) == H5T_REFERENCE) && (udata->file_src != udata->idx_info_dst->f)) + fix_ref = TRUE; + else + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "unable to copy dataset elements") + } /* end if */ + + /* Check for filtered chunks */ + if(pline && pline->nused) { + has_filters = TRUE; + cb_struct.func = NULL; /* no callback function when failed */ + } /* end if */ + + /* Resize the buf if it is too small to hold the data */ + if(nbytes > buf_size) { + void *new_buf; /* New buffer for data */ + + /* Re-allocate memory for copying the chunk */ + if(NULL == (new_buf = H5MM_realloc(udata->buf, nbytes))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, H5_ITER_ERROR, "memory allocation failed for raw data chunk") + udata->buf = new_buf; + if(udata->bkg) { + if(NULL == (new_buf = H5MM_realloc(udata->bkg, nbytes))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, H5_ITER_ERROR, "memory allocation failed for raw data chunk") + udata->bkg = new_buf; + if(!udata->cpy_info->expand_ref) + HDmemset((uint8_t *)udata->bkg + buf_size, 0, (size_t)(nbytes - buf_size)); + + bkg = udata->bkg; + } /* end if */ + + buf = udata->buf; + udata->buf_size = buf_size = nbytes; + } /* end if */ + + /* read chunk data from the source file */ + if(H5F_block_read(udata->file_src, H5FD_MEM_DRAW, chunk_rec->chunk_addr, nbytes, udata->idx_info_dst->dxpl_id, buf) < 0) + HGOTO_ERROR(H5E_IO, H5E_READERROR, H5_ITER_ERROR, "unable to read raw data chunk") + + /* Need to uncompress variable-length & reference data elements */ + if(has_filters && (is_vlen || fix_ref)) { + unsigned filter_mask = chunk_rec->filter_mask; + + if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &filter_mask, H5Z_NO_EDC, cb_struct, &nbytes, &buf_size, &buf) < 0) + HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "data pipeline read failed") + } /* end if */ + + /* Perform datatype conversion, if necessary */ + if(is_vlen) { + H5T_path_t *tpath_src_mem = udata->tpath_src_mem; + H5T_path_t *tpath_mem_dst = udata->tpath_mem_dst; + H5S_t *buf_space = udata->buf_space; + hid_t tid_src = udata->tid_src; + hid_t tid_dst = udata->tid_dst; + hid_t tid_mem = udata->tid_mem; + void *reclaim_buf = udata->reclaim_buf; + size_t reclaim_buf_size = udata->reclaim_buf_size; + + /* Convert from source file to memory */ + H5_CHECK_OVERFLOW(udata->nelmts, uint32_t, size_t); + if(H5T_convert(tpath_src_mem, tid_src, tid_mem, (size_t)udata->nelmts, (size_t)0, (size_t)0, buf, NULL, udata->idx_info_dst->dxpl_id) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, H5_ITER_ERROR, "datatype conversion failed") + + /* Copy into another buffer, to reclaim memory later */ + HDmemcpy(reclaim_buf, buf, reclaim_buf_size); + + /* Set background buffer to all zeros */ + HDmemset(bkg, 0, buf_size); + + /* Convert from memory to destination file */ + if(H5T_convert(tpath_mem_dst, tid_mem, tid_dst, udata->nelmts, (size_t)0, (size_t)0, buf, bkg, udata->idx_info_dst->dxpl_id) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, H5_ITER_ERROR, "datatype conversion failed") + + /* Reclaim space from variable length data */ + if(H5D_vlen_reclaim(tid_mem, buf_space, H5P_DATASET_XFER_DEFAULT, reclaim_buf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADITER, H5_ITER_ERROR, "unable to reclaim variable-length data") + } /* end if */ + else if(fix_ref) { + /* Check for expanding references */ + /* (background buffer has already been zeroed out, if not expanding) */ + if(udata->cpy_info->expand_ref) { + size_t ref_count; + + /* Determine # of reference elements to copy */ + ref_count = nbytes / H5T_get_size(udata->dt_src); + + /* Copy the reference elements */ + if(H5O_copy_expand_ref(udata->file_src, buf, udata->idx_info_dst->dxpl_id, udata->idx_info_dst->f, bkg, ref_count, H5T_get_ref_type(udata->dt_src), udata->cpy_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "unable to copy reference attribute") + } /* end if */ + + /* After fix ref, copy the new reference elements to the buffer to write out */ + HDmemcpy(buf, bkg, buf_size); + } /* end if */ + + /* Set up destination chunk callback information for insertion */ + udata_dst.common.mesg = udata->idx_info_dst->layout; + udata_dst.common.offset = chunk_rec->offset; + udata_dst.nbytes = chunk_rec->nbytes; + udata_dst.filter_mask = chunk_rec->filter_mask; + udata_dst.addr = HADDR_UNDEF; + + /* Need to compress variable-length & reference data elements before writing to file */ + if(has_filters && (is_vlen || fix_ref) ) { + if(H5Z_pipeline(pline, 0, &(udata_dst.filter_mask), H5Z_NO_EDC, cb_struct, &nbytes, &buf_size, &buf) < 0) + HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "output pipeline failed") + H5_ASSIGN_OVERFLOW(udata_dst.nbytes, nbytes, size_t, uint32_t); + udata->buf = buf; + udata->buf_size = buf_size; + } /* end if */ + + /* Insert chunk into the destination index */ + if((udata->idx_info_dst->layout->u.chunk.ops->insert)(udata->idx_info_dst, &udata_dst) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk into index") + + /* Write chunk data to destination file */ + HDassert(H5F_addr_defined(udata_dst.addr)); + if(H5F_block_write(udata->idx_info_dst->f, H5FD_MEM_DRAW, udata_dst.addr, nbytes, udata->idx_info_dst->dxpl_id, buf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, H5_ITER_ERROR, "unable to write raw data to file") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_copy_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_copy + * + * Purpose: Copy chunked storage from SRC file to DST file. + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Peter Cao + * August 20, 2005 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_copy(H5F_t *f_src, H5O_layout_t *layout_src, H5F_t *f_dst, + H5O_layout_t *layout_dst, H5T_t *dt_src, H5O_copy_t *cpy_info, + H5O_pline_t *pline, hid_t dxpl_id) +{ + H5D_chunk_it_ud3_t udata; /* User data for iteration callback */ + H5D_chk_idx_info_t idx_info_dst; /* Dest. chunked index info */ + H5D_chk_idx_info_t idx_info_src; /* Source chunked index info */ + H5T_path_t *tpath_src_mem = NULL, *tpath_mem_dst = NULL; /* Datatype conversion paths */ + hid_t tid_src = -1; /* Datatype ID for source datatype */ + hid_t tid_dst = -1; /* Datatype ID for destination datatype */ + hid_t tid_mem = -1; /* Datatype ID for memory datatype */ + size_t buf_size; /* Size of copy buffer */ + size_t reclaim_buf_size; /* Size of reclaim buffer */ + void *buf = NULL; /* Buffer for copying data */ + void *bkg = NULL; /* Buffer for background during type conversion */ + void *reclaim_buf = NULL; /* Buffer for reclaiming data */ + H5S_t *buf_space = NULL; /* Dataspace describing buffer */ + hid_t sid_buf = -1; /* ID for buffer dataspace */ + uint32_t nelmts = 0; /* Number of elements in buffer */ + hbool_t do_convert = FALSE; /* Indicate that type conversions should be performed */ + hbool_t copy_setup_done = FALSE; /* Indicate that 'copy setup' is done */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_copy, FAIL) + + /* Check args */ + HDassert(f_src); + HDassert(f_dst); + HDassert(layout_src && H5D_CHUNKED == layout_src->type); + HDassert(layout_dst && H5D_CHUNKED == layout_dst->type); + HDassert(dt_src); + + /* Compose source & dest chunked index info structs */ + idx_info_src.f = f_src; + idx_info_src.dxpl_id = dxpl_id; + idx_info_src.layout = layout_src; + + idx_info_dst.f = f_dst; + idx_info_dst.dxpl_id = dxpl_id; + idx_info_dst.layout = layout_dst; + + /* Call the index-specific "copy setup" routine */ + if((layout_src->u.chunk.ops->copy_setup)(&idx_info_src, &idx_info_dst) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up index-specific chunk copying information") + HDassert(H5F_addr_defined(layout_dst->u.chunk.addr)); + copy_setup_done = TRUE; + + /* Create datatype ID for src datatype */ + if((tid_src = H5I_register(H5I_DATATYPE, dt_src)) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register source file datatype") + + /* If there's a VLEN source datatype, set up type conversion information */ + if(H5T_detect_class(dt_src, H5T_VLEN) > 0) { + H5T_t *dt_dst; /* Destination datatype */ + H5T_t *dt_mem; /* Memory datatype */ + size_t mem_dt_size; /* Memory datatype size */ + size_t tmp_dt_size; /* Temp. datatype size */ + size_t max_dt_size; /* Max atatype size */ + hsize_t buf_dim; /* Dimension for buffer */ + unsigned u; + + /* create a memory copy of the variable-length datatype */ + if(NULL == (dt_mem = H5T_copy(dt_src, H5T_COPY_TRANSIENT))) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to copy") + if((tid_mem = H5I_register(H5I_DATATYPE, dt_mem)) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register memory datatype") + + /* create variable-length datatype at the destinaton file */ + if(NULL == (dt_dst = H5T_copy(dt_src, H5T_COPY_TRANSIENT))) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to copy") + if(H5T_set_loc(dt_dst, f_dst, H5T_LOC_DISK) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "cannot mark datatype on disk") + if((tid_dst = H5I_register(H5I_DATATYPE, dt_dst)) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register destination file datatype") + + /* Set up the conversion functions */ + if(NULL == (tpath_src_mem = H5T_path_find(dt_src, dt_mem, NULL, NULL, dxpl_id, FALSE))) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to convert between src and mem datatypes") + if(NULL == (tpath_mem_dst = H5T_path_find(dt_mem, dt_dst, NULL, NULL, dxpl_id, FALSE))) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to convert between mem and dst datatypes") + + /* Determine largest datatype size */ + if(0 == (max_dt_size = H5T_get_size(dt_src))) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to determine datatype size") + if(0 == (mem_dt_size = H5T_get_size(dt_mem))) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to determine datatype size") + max_dt_size = MAX(max_dt_size, mem_dt_size); + if(0 == (tmp_dt_size = H5T_get_size(dt_dst))) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to determine datatype size") + max_dt_size = MAX(max_dt_size, tmp_dt_size); + + /* Compute the number of elements per chunk */ + nelmts = 1; + for(u = 0; u < (layout_src->u.chunk.ndims - 1); u++) + nelmts *= layout_src->u.chunk.dim[u]; + + /* Create the space and set the initial extent */ + buf_dim = nelmts; + if(NULL == (buf_space = H5S_create_simple((unsigned)1, &buf_dim, NULL))) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace") + + /* Atomize */ + if((sid_buf = H5I_register(H5I_DATASPACE, buf_space)) < 0) { + H5S_close(buf_space); + HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to register dataspace ID") + } /* end if */ + + /* Set initial buffer sizes */ + buf_size = nelmts * max_dt_size; + reclaim_buf_size = nelmts * mem_dt_size; + + /* Allocate memory for reclaim buf */ + if(NULL == (reclaim_buf = H5MM_malloc(reclaim_buf_size))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for raw data chunk") + + /* Indicate that type conversion should be performed */ + do_convert = TRUE; + } /* end if */ + else { + if(H5T_get_class(dt_src, FALSE) == H5T_REFERENCE) { + /* Indicate that type conversion should be performed */ + do_convert = TRUE; + } /* end if */ + + H5_ASSIGN_OVERFLOW(buf_size, layout_src->u.chunk.size, uint32_t, size_t); + reclaim_buf_size = 0; + } /* end else */ + + /* Set up conversion buffer, if appropriate */ + if(do_convert) { + /* Allocate background memory for converting the chunk */ + if(NULL == (bkg = H5MM_malloc(buf_size))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for raw data chunk") + + /* Check for reference datatype and no expanding references & clear background buffer */ + if(!cpy_info->expand_ref && + ((H5T_get_class(dt_src, FALSE) == H5T_REFERENCE) && (f_src != f_dst))) + /* Reset value to zero */ + HDmemset(bkg, 0, buf_size); + } /* end if */ + + /* Allocate memory for copying the chunk */ + if(NULL == (buf = H5MM_malloc(buf_size))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for raw data chunk") + + /* Initialize the callback structure for the source */ + HDmemset(&udata, 0, sizeof udata); + udata.common.mesg = layout_src; + udata.file_src = f_src; + udata.idx_info_dst = &idx_info_dst; + udata.buf = buf; + udata.bkg = bkg; + udata.buf_size = buf_size; + udata.tid_src = tid_src; + udata.tid_mem = tid_mem; + udata.tid_dst = tid_dst; + udata.dt_src = dt_src; + udata.do_convert = do_convert; + udata.tpath_src_mem = tpath_src_mem; + udata.tpath_mem_dst = tpath_mem_dst; + udata.reclaim_buf = reclaim_buf; + udata.reclaim_buf_size = reclaim_buf_size; + udata.buf_space = buf_space; + udata.nelmts = nelmts; + udata.pline = pline; + udata.cpy_info = cpy_info; + + /* Iterate over chunks to copy data */ + if((layout_src->u.chunk.ops->iterate)(&idx_info_src, H5D_chunk_copy_cb, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk index to copy data") + + /* I/O buffers may have been re-allocated */ + buf = udata.buf; + bkg = udata.bkg; + +done: + if(sid_buf > 0) + if(H5I_dec_ref(sid_buf) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't decrement temporary dataspace ID") + if(tid_src > 0) + if(H5I_dec_ref(tid_src) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't decrement temporary datatype ID") + if(tid_dst > 0) + if(H5I_dec_ref(tid_dst) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't decrement temporary datatype ID") + if(tid_mem > 0) + if(H5I_dec_ref(tid_mem) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't decrement temporary datatype ID") + if(buf) + H5MM_xfree(buf); + if(bkg) + H5MM_xfree(bkg); + if(reclaim_buf) + H5MM_xfree(reclaim_buf); + + /* Clean up any index information */ + if(copy_setup_done) + if((layout_src->u.chunk.ops->copy_shutdown)(layout_src, layout_dst) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to shut down index copying info") + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_copy() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_bh_size + * + * Purpose: Retrieve the amount of index storage for chunked dataset + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * June 8, 2007 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_bh_info(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout, + hsize_t *index_size) +{ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_bh_info, FAIL) + + /* Check args */ + HDassert(f); + HDassert(layout); + HDassert(index_size); + + /* Compose chunked index info struct */ + idx_info.f = f; + idx_info.dxpl_id = dxpl_id; + idx_info.layout = layout; + + /* Get size of index structure */ + if((layout->u.chunk.ops->size)(&idx_info, index_size) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve chunk index info") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_bh_info() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_istore_iter_dump + * + * Purpose: If the UDATA.STREAM member is non-null then debugging + * information is written to that stream. + * + * Return: Success: Non-negative + * + * Failure: Negative + * + * Programmer: Robb Matzke + * Wednesday, April 21, 1999 + * + *------------------------------------------------------------------------- + */ +/* ARGSUSED */ +static int +H5D_chunk_dump_index_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) +{ + H5D_chunk_it_ud4_t *udata = (H5D_chunk_it_ud4_t *)_udata; /* User data from caller */ + + FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_dump_index_cb) + + if(udata->stream) { + unsigned u; /* Local index variable */ + + /* Print header if not already displayed */ + if(!udata->header_displayed) { + HDfprintf(udata->stream, " Flags Bytes Address Logical Offset\n"); + HDfprintf(udata->stream, " ========== ======== ========== ==============================\n"); + + /* Set flag that the headers has been printed */ + udata->header_displayed = TRUE; + } /* end if */ + + /* Print information about this chunk */ + HDfprintf(udata->stream, " 0x%08x %8Zu %10a [", chunk_rec->filter_mask, chunk_rec->nbytes, chunk_rec->chunk_addr); + for(u = 0; u < udata->ndims; u++) + HDfprintf(udata->stream, "%s%Hd", (u ? ", " : ""), chunk_rec->offset[u]); + HDfputs("]\n", udata->stream); + } /* end if */ + + FUNC_LEAVE_NOAPI(H5_ITER_CONT) +} /* H5D_chunk_dump_index_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_dump_index + * + * Purpose: Prints information about the storage index to the specified + * stream. + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Robb Matzke + * Wednesday, April 28, 1999 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_dump_index(H5D_t *dset, hid_t dxpl_id, FILE *stream) +{ + H5D_chunk_it_ud4_t udata; /* User data for callback */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_dump_index, FAIL) + + /* Sanity check */ + HDassert(dset); + + /* Only display info if stream is defined */ + if(stream) { + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + + /* Display address of index */ + HDfprintf(stream, " Address: %a\n", dset->shared->layout.u.chunk.addr); + + /* Set up user data for callback */ + udata.stream = stream; + udata.header_displayed = FALSE; + udata.ndims = dset->shared->layout.u.chunk.ndims; + + /* Compose chunked index info struct */ + idx_info.f = dset->oloc.file; + idx_info.dxpl_id = dxpl_id; + idx_info.layout = &dset->shared->layout; + + /* Iterate over index and dump chunk info */ + if((dset->shared->layout.u.chunk.ops->iterate)(&idx_info, H5D_chunk_dump_index_cb, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk index to dump chunk info") + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_dump_index() */ + + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_dest + * + * Purpose: Destroy the entire chunk cache by flushing dirty entries, + * preempting all entries, and freeing the cache itself. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Robb Matzke + * Thursday, May 21, 1998 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_dest(H5F_t *f, hid_t dxpl_id, H5D_t *dset) +{ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ + H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Dataset's chunk cache */ + H5D_rdcc_ent_t *ent = NULL, *next = NULL; /* Pointer to current & next cache entries */ + int nerrors = 0; /* Accumulated count of errors */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_dest, FAIL) + + HDassert(f); + HDassert(dset); + + /* Fill the DXPL cache values for later use */ + if(H5D_get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + + /* Flush all the cached chunks */ + for(ent = rdcc->head; ent; ent = next) { + next = ent->next; + if(H5D_chunk_cache_evict(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0) + nerrors++; + } /* end for */ + if(nerrors) + HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks") + + /* Release cache structures */ + if(rdcc->slot) + H5FL_SEQ_FREE(H5D_rdcc_ent_ptr_t, rdcc->slot); + HDmemset(rdcc, 0, sizeof(H5D_rdcc_t)); + + /* Compose chunked index info struct */ + idx_info.f = f; + idx_info.dxpl_id = dxpl_id; + idx_info.layout = &dset->shared->layout; + + /* Free any index structures */ + if((dset->shared->layout.u.chunk.ops->dest)(&idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to release chunk index info") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_dest() */ + +#ifdef H5D_CHUNK_DEBUG + +/*------------------------------------------------------------------------- + * Function: H5D_chunk_stats + * + * Purpose: Print raw data cache statistics to the debug stream. If + * HEADERS is non-zero then print table column headers, + * otherwise assume that the H5AC layer has already printed them. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Robb Matzke + * Thursday, May 21, 1998 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D_chunk_stats(const H5D_t *dset, hbool_t headers) +{ + H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); + double miss_rate; + char ascii[32]; + herr_t ret_value=SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5D_chunk_stats, FAIL) + + if (!H5DEBUG(AC)) + HGOTO_DONE(SUCCEED) + + if (headers) { + fprintf(H5DEBUG(AC), "H5D: raw data cache statistics\n"); + fprintf(H5DEBUG(AC), " %-18s %8s %8s %8s %8s+%-8s\n", + "Layer", "Hits", "Misses", "MissRate", "Inits", "Flushes"); + fprintf(H5DEBUG(AC), " %-18s %8s %8s %8s %8s-%-8s\n", + "-----", "----", "------", "--------", "-----", "-------"); + } + +#ifdef H5AC_DEBUG + if (H5DEBUG(AC)) headers = TRUE; +#endif + + if (headers) { + if (rdcc->nhits>0 || rdcc->nmisses>0) { + miss_rate = 100.0 * rdcc->nmisses / + (rdcc->nhits + rdcc->nmisses); + } else { + miss_rate = 0.0; + } + if (miss_rate > 100) { + sprintf(ascii, "%7d%%", (int) (miss_rate + 0.5)); + } else { + sprintf(ascii, "%7.2f%%", miss_rate); + } + + fprintf(H5DEBUG(AC), " %-18s %8u %8u %7s %8d+%-9ld\n", + "raw data chunks", rdcc->nhits, rdcc->nmisses, ascii, + rdcc->ninits, (long)(rdcc->nflushes)-(long)(rdcc->ninits)); + } + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_chunk_stats() */ +#endif /* H5D_CHUNK_DEBUG */ + + +/*------------------------------------------------------------------------- * Function: H5D_null_readvv * * Purpose: Performs "no-op" I/O operation, advancing through two I/O diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c index c3abab6..0458e74 100644 --- a/src/H5Dcompact.c +++ b/src/H5Dcompact.c @@ -57,6 +57,8 @@ /********************/ /* Layout operation callbacks */ +static herr_t H5D_compact_new(H5F_t *f, hid_t dxpl_id, H5D_t *dset, + const H5P_genplist_t *dc_plist); static herr_t H5D_compact_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, H5D_chunk_map_t *cm); @@ -74,6 +76,7 @@ static ssize_t H5D_compact_writevv(const H5D_io_info_t *io_info, /* Compact storage layout I/O ops */ const H5D_layout_ops_t H5D_LOPS_COMPACT[1] = {{ + H5D_compact_new, H5D_compact_io_init, H5D_contig_read, H5D_contig_write, @@ -149,6 +152,53 @@ done: /*------------------------------------------------------------------------- + * Function: H5D_compact_new + * + * Purpose: Constructs new compact layout information for dataset + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Thursday, May 22, 2008 + * + *------------------------------------------------------------------------- + */ +/* ARGSUSED */ +static herr_t +H5D_compact_new(H5F_t *f, hid_t UNUSED dxpl_id, H5D_t *dset, + const H5P_genplist_t UNUSED *dc_plist) +{ + hssize_t tmp_size; /* Temporary holder for raw data size */ + hsize_t comp_data_size; /* Size of compact data */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_compact_new) + + /* Sanity checks */ + HDassert(f); + HDassert(dset); + HDassert(dc_plist); + + /* + * Compact dataset is stored in dataset object header message of + * layout. + */ + tmp_size = H5S_GET_EXTENT_NPOINTS(dset->shared->space) * H5T_get_size(dset->shared->type); + H5_ASSIGN_OVERFLOW(dset->shared->layout.u.compact.size, tmp_size, hssize_t, size_t); + + /* Verify data size is smaller than maximum header message size + * (64KB) minus other layout message fields. + */ + comp_data_size = H5O_MESG_MAX_SIZE - H5O_layout_meta_size(f, &(dset->shared->layout)); + if(dset->shared->layout.u.compact.size > comp_data_size) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "compact dataset size is bigger than header message maximum size") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_compact_new() */ + + +/*------------------------------------------------------------------------- * Function: H5D_compact_io_init * * Purpose: Performs initialization before any sort of I/O on the raw data diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c index 1a3fda5..14ad520 100644 --- a/src/H5Dcontig.c +++ b/src/H5Dcontig.c @@ -61,6 +61,8 @@ /********************/ /* Layout operation callbacks */ +static herr_t H5D_contig_new(H5F_t *f, hid_t dxpl_id, H5D_t *dset, + const H5P_genplist_t *dc_plist); static herr_t H5D_contig_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, H5D_chunk_map_t *cm); @@ -76,6 +78,7 @@ static herr_t H5D_contig_write_one(H5D_io_info_t *io_info, hsize_t offset, /* Contiguous storage layout I/O ops */ const H5D_layout_ops_t H5D_LOPS_CONTIG[1] = {{ + H5D_contig_new, H5D_contig_io_init, H5D_contig_read, H5D_contig_write, @@ -102,7 +105,7 @@ H5FL_BLK_EXTERN(type_conv); /*------------------------------------------------------------------------- - * Function: H5D_contig_create + * Function: H5D_contig_alloc * * Purpose: Allocate file space for a contiguously stored dataset * @@ -114,11 +117,11 @@ H5FL_BLK_EXTERN(type_conv); *------------------------------------------------------------------------- */ herr_t -H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout /*out */ ) +H5D_contig_alloc(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout /*out */ ) { herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(H5D_contig_create, FAIL) + FUNC_ENTER_NOAPI(H5D_contig_alloc, FAIL) /* check args */ HDassert(f); @@ -130,7 +133,7 @@ H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout /*out */ ) done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_contig_create */ +} /* end H5D_contig_alloc */ /*------------------------------------------------------------------------- @@ -355,6 +358,63 @@ H5D_contig_get_addr(const H5D_t *dset) /*------------------------------------------------------------------------- + * Function: H5D_contig_new + * + * Purpose: Constructs new contiguous layout information for dataset + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Thursday, May 22, 2008 + * + *------------------------------------------------------------------------- + */ +/* ARGSUSED */ +static herr_t +H5D_contig_new(H5F_t *f, hid_t UNUSED dxpl_id, H5D_t *dset, + const H5P_genplist_t UNUSED *dc_plist) +{ + const H5T_t *type = dset->shared->type; /* Convenience pointer to dataset's datatype */ + hssize_t tmp_size; /* Temporary holder for raw data size */ + hsize_t dim[H5O_LAYOUT_NDIMS]; /* Current size of data in elements */ + hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */ + int ndims; /* Rank of dataspace */ + int i; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_contig_new) + + /* Sanity checks */ + HDassert(f); + HDassert(dset); + + /* + * The maximum size of the dataset cannot exceed the storage size. + * Also, only the slowest varying dimension of a simple data space + * can be extendible (currently only for external data storage). + */ + dset->shared->layout.u.contig.addr = HADDR_UNDEF; /* Initialize to no address */ + + /* Check for invalid dataset dimensions */ + if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dim, max_dim)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize contiguous storage") + for(i = 0; i < ndims; i++) + if(max_dim[i] > dim[i]) + HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "extendible contiguous non-external dataset") + + /* Compute the total size of dataset */ + tmp_size = H5S_GET_EXTENT_NPOINTS(dset->shared->space) * H5T_get_size(type); + H5_ASSIGN_OVERFLOW(dset->shared->layout.u.contig.size, tmp_size, hssize_t, hsize_t); + + /* Get the sieve buffer size for this dataset */ + dset->shared->cache.contig.sieve_buf_size = H5F_SIEVE_BUF_SIZE(f); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_contig_new() */ + + +/*------------------------------------------------------------------------- * Function: H5D_contig_io_init * * Purpose: Performs initialization before any sort of I/O on the raw data @@ -1137,7 +1197,7 @@ H5D_contig_copy(H5F_t *f_src, const H5O_layout_t *layout_src, H5F_t *f_dst, HDassert(dt_src); /* Allocate space for destination raw data */ - if(H5D_contig_create(f_dst, dxpl_id, layout_dst) < 0) + if(H5D_contig_alloc(f_dst, dxpl_id, layout_dst) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to allocate contiguous storage") /* Set up number of bytes to copy, and initial buffer size */ diff --git a/src/H5Ddbg.c b/src/H5Ddbg.c index 4ca5b21..714c2e3 100644 --- a/src/H5Ddbg.c +++ b/src/H5Ddbg.c @@ -113,7 +113,7 @@ H5Ddebug(hid_t dset_id) /* Print B-tree information */ if(H5D_CHUNKED == dset->shared->layout.type) - (void)H5D_istore_dump_btree(dset->oloc.file, H5AC_dxpl_id, stdout, dset->shared->layout.u.chunk.ndims, dset->shared->layout.u.chunk.addr); + (void)H5D_chunk_dump_index(dset, H5AC_dxpl_id, stdout); else if(H5D_CONTIGUOUS == dset->shared->layout.type) HDfprintf(stdout, " %-10s %a\n", "Address:", dset->shared->layout.u.contig.addr); diff --git a/src/H5Ddeprec.c b/src/H5Ddeprec.c index fd1d2e8..a07c86f 100644 --- a/src/H5Ddeprec.c +++ b/src/H5Ddeprec.c @@ -346,13 +346,13 @@ H5D_extend(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id) if(changed) { /* Update the index values for the cached chunks for this dataset */ if(H5D_CHUNKED == dataset->shared->layout.type) - if(H5D_istore_update_cache(dataset, dxpl_id) < 0) + if(H5D_chunk_update_cache(dataset, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices") /* Allocate space for the new parts of the dataset, if appropriate */ fill = &dataset->shared->dcpl_cache.fill; if(fill->alloc_time == H5D_ALLOC_TIME_EARLY) - if(H5D_alloc_storage(dataset->oloc.file, dxpl_id, dataset, H5D_ALLOC_EXTEND, FALSE) < 0) + if(H5D_alloc_storage(dataset, dxpl_id, H5D_ALLOC_EXTEND, FALSE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize dataset with fill value") /* Mark the dataspace as dirty, for later writing to the file */ diff --git a/src/H5Defl.c b/src/H5Defl.c index 7dac29b..15bcd60 100644 --- a/src/H5Defl.c +++ b/src/H5Defl.c @@ -49,6 +49,8 @@ /********************/ /* Layout operation callbacks */ +static herr_t H5D_efl_new(H5F_t *f, hid_t dxpl_id, H5D_t *dset, + const H5P_genplist_t *dc_plist); static herr_t H5D_efl_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, H5D_chunk_map_t *cm); @@ -72,6 +74,7 @@ static herr_t H5D_efl_write(const H5O_efl_t *efl, haddr_t addr, size_t size, /* External File List (EFL) storage layout I/O ops */ const H5D_layout_ops_t H5D_LOPS_EFL[1] = {{ + H5D_efl_new, H5D_efl_io_init, H5D_contig_read, H5D_contig_write, @@ -92,6 +95,77 @@ const H5D_layout_ops_t H5D_LOPS_EFL[1] = {{ /*------------------------------------------------------------------------- + * Function: H5D_efl_new + * + * Purpose: Constructs new EFL layout information for dataset + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Thursday, May 22, 2008 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D_efl_new(H5F_t *f, hid_t UNUSED dxpl_id, H5D_t *dset, + const H5P_genplist_t *dc_plist) +{ + const H5T_t *type = dset->shared->type; /* Convenience pointer to dataset's datatype */ + hsize_t dim[H5O_LAYOUT_NDIMS]; /* Current size of data in elements */ + hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */ + hssize_t tmp_size; /* Temporary holder for raw data size */ + hsize_t max_points; /* Maximum elements */ + hsize_t max_storage; /* Maximum storage size */ + int ndims; /* Rank of dataspace */ + int i; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_efl_new) + + /* Sanity checks */ + HDassert(f); + HDassert(dset); + HDassert(dc_plist); + + /* + * The maximum size of the dataset cannot exceed the storage size. + * Also, only the slowest varying dimension of a simple data space + * can be extendible (currently only for external data storage). + */ + dset->shared->layout.u.contig.addr = HADDR_UNDEF; /* Initialize to no address */ + + /* Check for invalid dataset dimensions */ + if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dim, max_dim)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize contiguous storage") + for(i = 1; i < ndims; i++) + if(max_dim[i] > dim[i]) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "only the first dimension can be extendible") + + /* Check for storage overflows */ + max_points = H5S_get_npoints_max(dset->shared->space); + max_storage = H5O_efl_total_size(&dset->shared->dcpl_cache.efl); + if(H5S_UNLIMITED == max_points) { + if(H5O_EFL_UNLIMITED != max_storage) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unlimited data space but finite storage") + } /* end if */ + else if(max_points * H5T_get_size(type) < max_points) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "data space * type size overflowed") + else if(max_points * H5T_get_size(type) > max_storage) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "data space size exceeds external storage size") + + /* Compute the total size of dataset */ + tmp_size = H5S_GET_EXTENT_NPOINTS(dset->shared->space) * H5T_get_size(type); + H5_ASSIGN_OVERFLOW(dset->shared->layout.u.contig.size, tmp_size, hssize_t, hsize_t); + + /* Get the sieve buffer size for this dataset */ + dset->shared->cache.contig.sieve_buf_size = H5F_SIEVE_BUF_SIZE(f); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D_efl_new() */ + + +/*------------------------------------------------------------------------- * Function: H5D_efl_io_init * * Purpose: Performs initialization before any sort of I/O on the raw data diff --git a/src/H5Dint.c b/src/H5Dint.c index f8577cc..162a14f 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -710,17 +710,21 @@ H5D_set_io_ops(H5D_t *dataset) switch(dataset->shared->layout.type) { case H5D_CONTIGUOUS: if(dataset->shared->dcpl_cache.efl.nused > 0) - dataset->shared->layout_ops = H5D_LOPS_EFL; + dataset->shared->layout.ops = H5D_LOPS_EFL; else - dataset->shared->layout_ops = H5D_LOPS_CONTIG; + dataset->shared->layout.ops = H5D_LOPS_CONTIG; break; case H5D_CHUNKED: - dataset->shared->layout_ops = H5D_LOPS_CHUNK; + dataset->shared->layout.ops = H5D_LOPS_CHUNK; + + /* Set the chunk operations */ + /* (Only "istore" indexing type currently supported */ + dataset->shared->layout.u.chunk.ops = H5D_COPS_ISTORE; break; case H5D_COMPACT: - dataset->shared->layout_ops = H5D_LOPS_COMPACT; + dataset->shared->layout.ops = H5D_LOPS_COMPACT; break; default: @@ -831,6 +835,7 @@ H5D_update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset) /* Create an object header for the dataset */ if(H5O_create(file, dxpl_id, ohdr_size, dset->shared->dcpl_id, oloc/*out*/) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create dataset object header") + HDassert(file == dset->oloc.file); /* Get a pointer to the object header itself */ if((oh = H5O_protect(oloc, dxpl_id)) == NULL) @@ -877,7 +882,7 @@ H5D_update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset) * allocation until later. */ if(fill_prop->alloc_time == H5D_ALLOC_TIME_EARLY) - if(H5D_alloc_storage(file, dxpl_id, dset, H5D_ALLOC_CREATE, FALSE) < 0) + if(H5D_alloc_storage(dset, dxpl_id, H5D_ALLOC_CREATE, FALSE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage") /* Update external storage message, if it's used */ @@ -1000,13 +1005,10 @@ H5D_create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id, { const H5T_t *type; /* Datatype for dataset */ H5D_t *new_dset = NULL; - int i, ndims; - unsigned chunk_ndims = 0; /* Dimensionality of chunk */ H5P_genplist_t *dc_plist = NULL; /* New Property list */ hbool_t has_vl_type = FALSE; /* Flag to indicate a VL-type for dataset */ - hbool_t chunk_init = FALSE; /* Flag to indicate that chunk information was initialized */ + hbool_t layout_init = FALSE; /* Flag to indicate that chunk information was initialized */ H5G_loc_t dset_loc; /* Dataset location */ - unsigned u; /* Local index variable */ H5D_t *ret_value; /* Return value */ FUNC_ENTER_NOAPI(H5D_create, NULL) @@ -1101,10 +1103,6 @@ H5D_create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id, if(IS_H5FD_MPI(file) && pline->nused > 0) HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, NULL, "Parallel I/O does not support filters yet") - /* Chunked datasets are non-default, so retrieve their info here */ - if(H5P_get(dc_plist, H5D_CRT_CHUNK_DIM_NAME, &chunk_ndims) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve chunk dimensions") - /* Get the dataset's external file list information */ if(H5P_get(dc_plist, H5D_CRT_EXT_FILE_LIST_NAME, &new_dset->shared->dcpl_cache.efl) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve external file list") @@ -1125,140 +1123,17 @@ H5D_create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id, if(IS_H5FD_MPI(file)) new_dset->shared->dcpl_cache.fill.alloc_time = H5D_ALLOC_TIME_EARLY; - switch(new_dset->shared->layout.type) { - case H5D_CONTIGUOUS: - { - hssize_t tmp_size; /* Temporary holder for raw data size */ - hsize_t dim[H5O_LAYOUT_NDIMS]; /* Current size of data in elements */ - hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */ - - /* - * The maximum size of the dataset cannot exceed the storage size. - * Also, only the slowest varying dimension of a simple data space - * can be extendible (currently only for external data storage). - */ - new_dset->shared->layout.u.contig.addr = HADDR_UNDEF; /* Initialize to no address */ - - if((ndims = H5S_get_simple_extent_dims(new_dset->shared->space, dim, max_dim)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize contiguous storage") - for(i = 1; i < ndims; i++) - if(max_dim[i] > dim[i]) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "only the first dimension can be extendible") - if(new_dset->shared->dcpl_cache.efl.nused > 0) { - hsize_t max_points = H5S_get_npoints_max(new_dset->shared->space); - hsize_t max_storage = H5O_efl_total_size(&new_dset->shared->dcpl_cache.efl); - - if(H5S_UNLIMITED == max_points) { - if(H5O_EFL_UNLIMITED != max_storage) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unlimited data space but finite storage") - } else if(max_points * H5T_get_size(type) < max_points) { - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "data space * type size overflowed") - } else if(max_points * H5T_get_size(type) > max_storage) { - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "data space size exceeds external storage size") - } - } /* end if */ - else { - if(ndims > 0 && max_dim[0] > dim[0]) - HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, NULL, "extendible contiguous non-external dataset") - } /* end else */ - - /* Compute the total size of a chunk */ - tmp_size = H5S_GET_EXTENT_NPOINTS(new_dset->shared->space) * H5T_get_size(new_dset->shared->type); - H5_ASSIGN_OVERFLOW(new_dset->shared->layout.u.contig.size, tmp_size, hssize_t, hsize_t); - - /* Get the sieve buffer size for this dataset */ - new_dset->shared->cache.contig.sieve_buf_size = H5F_SIEVE_BUF_SIZE(file); - } /* end case */ - break; - - case H5D_CHUNKED: - { - hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */ - uint64_t chunk_size; /* Size of chunk in bytes */ - - /* Set up layout information */ - if((ndims = H5S_GET_EXTENT_NDIMS(new_dset->shared->space)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "unable to get rank") - new_dset->shared->layout.u.chunk.ndims = (unsigned)ndims + 1; - HDassert((unsigned)(new_dset->shared->layout.u.chunk.ndims) <= NELMTS(new_dset->shared->layout.u.chunk.dim)); - - /* Initialize to no address */ - new_dset->shared->layout.u.chunk.addr = HADDR_UNDEF; - - /* - * Chunked storage allows any type of data space extension, so we - * don't even bother checking. - */ - if(chunk_ndims != (unsigned)ndims) - HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "dimensionality of chunks doesn't match the data space") - if(new_dset->shared->dcpl_cache.efl.nused > 0) - HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "external storage not supported with chunked layout") - - /* - * The chunk size of a dimension with a fixed size cannot exceed - * the maximum dimension size - */ - if(H5P_get(dc_plist, H5D_CRT_CHUNK_SIZE_NAME, new_dset->shared->layout.u.chunk.dim) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve chunk size") - new_dset->shared->layout.u.chunk.dim[new_dset->shared->layout.u.chunk.ndims-1] = H5T_get_size(new_dset->shared->type); - - if(H5S_get_simple_extent_dims(new_dset->shared->space, NULL, max_dim) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to query maximum dimensions") - for(u = 0; u < new_dset->shared->layout.u.chunk.ndims - 1; u++) - if(max_dim[u] != H5S_UNLIMITED && max_dim[u] < new_dset->shared->layout.u.chunk.dim[u]) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "chunk size must be <= maximum dimension size for fixed-sized dimensions") - - /* Compute the total size of a chunk */ - /* (Use 64-bit value to ensure that we can detect >4GB chunks) */ - for(u = 1, chunk_size = (uint64_t)new_dset->shared->layout.u.chunk.dim[0]; u < new_dset->shared->layout.u.chunk.ndims; u++) - chunk_size *= (uint64_t)new_dset->shared->layout.u.chunk.dim[u]; - - /* Check for chunk larger than can be represented in 32-bits */ - /* (Chunk size is encoded in 32-bit value in v1 B-tree records) */ - if(chunk_size > (uint64_t)0xffffffff) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "chunk size must be < 4GB") - - /* Retain computed chunk size */ - H5_ASSIGN_OVERFLOW(new_dset->shared->layout.u.chunk.size, chunk_size, uint64_t, uint32_t); - - /* Initialize the chunk cache for the dataset */ - if(H5D_istore_init(file, new_dset) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize chunk cache") - - /* Indicate that the chunk information was initialized */ - chunk_init = TRUE; - } /* end case */ - break; - - case H5D_COMPACT: - { - hssize_t tmp_size; /* Temporary holder for raw data size */ - hsize_t comp_data_size; - - /* - * Compact dataset is stored in dataset object header message of - * layout. - */ - tmp_size = H5S_GET_EXTENT_NPOINTS(space) * H5T_get_size(new_dset->shared->type); - H5_ASSIGN_OVERFLOW(new_dset->shared->layout.u.compact.size, tmp_size, hssize_t, size_t); - - /* Verify data size is smaller than maximum header message size - * (64KB) minus other layout message fields. - */ - comp_data_size = H5O_MESG_MAX_SIZE - H5O_layout_meta_size(file, &(new_dset->shared->layout)); - if(new_dset->shared->layout.u.compact.size > comp_data_size) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "compact dataset size is bigger than header message maximum size") - } /* end case */ - break; - - default: - HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, NULL, "not implemented yet") - } /* end switch */ /*lint !e788 All appropriate cases are covered */ - /* Set the dataset's I/O operations */ if(H5D_set_io_ops(new_dset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize I/O operations") + /* Create the layout information for the new dataset */ + if((new_dset->shared->layout.ops->new)(file, dxpl_id, new_dset, dc_plist) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize layout information") + + /* Indicate that the layout information was initialized */ + layout_init = TRUE; + /* Update the dataset's object header info. */ if(H5D_update_oh_info(file, dxpl_id, new_dset) != SUCCEED) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't update the metadata cache") @@ -1277,8 +1152,8 @@ H5D_create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id, done: if(!ret_value && new_dset && new_dset->shared) { if(new_dset->shared) { - if(new_dset->shared->layout.type == H5D_CHUNKED && chunk_init) { - if(H5D_istore_dest(new_dset,H5AC_dxpl_id) < 0) + if(new_dset->shared->layout.type == H5D_CHUNKED && layout_init) { + if(H5D_chunk_dest(file, dxpl_id, new_dset) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, NULL, "unable to destroy chunk cache") } /* end if */ if(new_dset->shared->space) { @@ -1482,6 +1357,30 @@ H5D_open_oid(H5D_t *dataset, hid_t dxpl_id) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to read data layout message") if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &dataset->shared->layout.type) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set layout") + + /* Get the external file list message, which might not exist. Space is + * also undefined when space allocate time is H5D_ALLOC_TIME_LATE. */ + if((dataset->shared->layout.type == H5D_CONTIGUOUS && !H5F_addr_defined(dataset->shared->layout.u.contig.addr)) + || (dataset->shared->layout.type == H5D_CHUNKED && !H5F_addr_defined(dataset->shared->layout.u.chunk.addr))) { + if((msg_exists = H5O_msg_exists(&(dataset->oloc), H5O_EFL_ID, dxpl_id)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if message exists") + if(msg_exists) { + /* Retrieve the EFL message */ + if(NULL == H5O_msg_read(&(dataset->oloc), H5O_EFL_ID, &dataset->shared->dcpl_cache.efl, dxpl_id)) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve message") + + /* Set the EFL info in the property list */ + if(H5P_set(plist, H5D_CRT_EXT_FILE_LIST_NAME, &dataset->shared->dcpl_cache.efl) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set external file list") + + /* Set the dataset's I/O operations */ + dataset->shared->layout.ops = H5D_LOPS_EFL; + } /* end if */ + } /* end if */ + + /* Sanity check that the layout operations are set up */ + HDassert(dataset->shared->layout.ops); + switch(dataset->shared->layout.type) { case H5D_CONTIGUOUS: /* Compute the size of the contiguous storage for versions of the @@ -1516,7 +1415,7 @@ H5D_open_oid(H5D_t *dataset, hid_t dxpl_id) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set chunk size") /* Initialize the chunk cache for the dataset */ - if(H5D_istore_init(dataset->oloc.file, dataset) < 0) + if(H5D_chunk_init(dataset->oloc.file, dxpl_id, dataset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize chunk cache") } break; @@ -1584,27 +1483,6 @@ H5D_open_oid(H5D_t *dataset, hid_t dxpl_id) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set allocation time state") } /* end if */ - /* Get the external file list message, which might not exist. Space is - * also undefined when space allocate time is H5D_ALLOC_TIME_LATE. */ - if((dataset->shared->layout.type == H5D_CONTIGUOUS && !H5F_addr_defined(dataset->shared->layout.u.contig.addr)) - || (dataset->shared->layout.type == H5D_CHUNKED && !H5F_addr_defined(dataset->shared->layout.u.chunk.addr))) { - if((msg_exists = H5O_msg_exists(&(dataset->oloc), H5O_EFL_ID, dxpl_id)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if message exists") - if(msg_exists) { - /* Retrieve the EFL message */ - if(NULL == H5O_msg_read(&(dataset->oloc), H5O_EFL_ID, &dataset->shared->dcpl_cache.efl, dxpl_id)) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve message") - - /* Set the EFL info in the property list */ - if(H5P_set(plist, H5D_CRT_EXT_FILE_LIST_NAME, &dataset->shared->dcpl_cache.efl) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set external file list") - } /* end if */ - } /* end if */ - - /* Set the dataset's I/O operations */ - if(H5D_set_io_ops(dataset) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize I/O operations") - /* * Make sure all storage is properly initialized. * This is important only for parallel I/O where the space must @@ -1614,7 +1492,7 @@ H5D_open_oid(H5D_t *dataset, hid_t dxpl_id) && ((dataset->shared->layout.type == H5D_CONTIGUOUS && !H5F_addr_defined(dataset->shared->layout.u.contig.addr)) || (dataset->shared->layout.type == H5D_CHUNKED && !H5F_addr_defined(dataset->shared->layout.u.chunk.addr))) && IS_H5FD_MPI(dataset->oloc.file)) { - if(H5D_alloc_storage(dataset->oloc.file, dxpl_id, dataset, H5D_ALLOC_OPEN, FALSE) < 0) + if(H5D_alloc_storage(dataset, dxpl_id, H5D_ALLOC_OPEN, FALSE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize file storage") } /* end if */ @@ -1671,9 +1549,9 @@ H5D_close(H5D_t *dataset) HDassert(dataset->shared->fo_count >0); /* Dump debugging info */ -#ifdef H5D_ISTORE_DEBUG - H5D_istore_stats(dataset, FALSE); -#endif /* H5F_ISTORE_DEBUG */ +#ifdef H5D_CHUNK_DEBUG + H5D_chunk_stats(dataset, FALSE); +#endif /* H5D_CHUNK_DEBUG */ dataset->shared->fo_count--; if(dataset->shared->fo_count == 0) { @@ -1714,7 +1592,7 @@ H5D_close(H5D_t *dataset) } /* end if */ /* Flush and destroy chunks in the cache */ - if(H5D_istore_dest(dataset, H5AC_dxpl_id) < 0) + if(H5D_chunk_dest(dataset->oloc.file, H5AC_dxpl_id, dataset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy chunk cache") break; @@ -1899,10 +1777,11 @@ H5D_get_file(const H5D_t *dset) *------------------------------------------------------------------------- */ herr_t -H5D_alloc_storage(H5F_t *f, hid_t dxpl_id, H5D_t *dset/*in,out*/, H5D_time_alloc_t time_alloc, +H5D_alloc_storage(H5D_t *dset/*in,out*/, hid_t dxpl_id, H5D_time_alloc_t time_alloc, hbool_t full_overwrite) { - struct H5O_layout_t *layout; /* The dataset's layout information */ + H5F_t *f = dset->oloc.file; /* The dataset's file pointer */ + H5O_layout_t *layout; /* The dataset's layout information */ hbool_t must_init_space = FALSE; /* Flag to indicate that space should be initialized */ hbool_t addr_set = FALSE; /* Flag to indicate that the dataset's storage address was set */ herr_t ret_value = SUCCEED; /* Return value */ @@ -1910,8 +1789,8 @@ H5D_alloc_storage(H5F_t *f, hid_t dxpl_id, H5D_t *dset/*in,out*/, H5D_time_alloc FUNC_ENTER_NOAPI_NOINIT(H5D_alloc_storage) /* check args */ - HDassert(f); HDassert(dset); + HDassert(f); /* If the data is stored in external files, don't set an address for the layout * We assume that external storage is already @@ -1925,7 +1804,7 @@ H5D_alloc_storage(H5F_t *f, hid_t dxpl_id, H5D_t *dset/*in,out*/, H5D_time_alloc case H5D_CONTIGUOUS: if(!H5F_addr_defined(layout->u.contig.addr)) { /* Reserve space in the file for the entire array */ - if(H5D_contig_create(f, dxpl_id, layout/*out*/) < 0) + if(H5D_contig_alloc(f, dxpl_id, layout/*out*/) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize contiguous storage") /* Indicate that we set the storage addr */ @@ -1939,7 +1818,7 @@ H5D_alloc_storage(H5F_t *f, hid_t dxpl_id, H5D_t *dset/*in,out*/, H5D_time_alloc case H5D_CHUNKED: if(!H5F_addr_defined(layout->u.chunk.addr)) { /* Create the root of the B-tree that describes chunked storage */ - if(H5D_istore_create(f, dxpl_id, layout/*out*/) < 0) + if(H5D_chunk_create(dset /*in,out*/, dxpl_id) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize chunked storage") /* Indicate that we set the storage addr */ @@ -2079,7 +1958,7 @@ H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hid_t dxpl_id) * Allocate file space * for all chunks now and initialize each chunk with the fill value. */ - if(H5D_istore_allocate(dset, dxpl_id, full_overwrite) < 0) + if(H5D_chunk_allocate(dset, dxpl_id, full_overwrite) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset") break; @@ -2122,7 +2001,8 @@ H5D_get_storage_size(H5D_t *dset, hid_t dxpl_id) if(dset->shared->layout.u.chunk.addr == HADDR_UNDEF) ret_value = 0; else - ret_value = H5D_istore_allocated(dset, dxpl_id); + if(H5D_chunk_allocated(dset, dxpl_id, &ret_value) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, 0, "can't retrieve chunked dataset allocated size") break; case H5D_CONTIGUOUS: @@ -2428,7 +2308,7 @@ done: * Function: H5D_set_extent * * Purpose: Based on H5D_extend, allows change to a lower dimension, - * calls H5S_set_extent and H5D_istore_prune_by_extent instead + * calls H5S_set_extent and H5D_chunk_prune_by_extent instead * * Return: Non-negative on success, negative on failure * @@ -2491,12 +2371,12 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id) */ /* Update the index values for the cached chunks for this dataset */ if(H5D_CHUNKED == dset->shared->layout.type) - if(H5D_istore_update_cache(dset, dxpl_id) < 0) + if(H5D_chunk_update_cache(dset, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices") /* Allocate space for the new parts of the dataset, if appropriate */ if(expand && dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_EARLY) - if(H5D_alloc_storage(dset->oloc.file, dxpl_id, dset, H5D_ALLOC_EXTEND, FALSE) < 0) + if(H5D_alloc_storage(dset, dxpl_id, H5D_ALLOC_EXTEND, FALSE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize dataset storage") @@ -2506,24 +2386,9 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id) *------------------------------------------------------------------------- */ if(shrink && H5D_CHUNKED == dset->shared->layout.type) { - H5D_io_info_t io_info; /* Dataset I/O info */ - H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ - H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ - - /* Fill the DXPL cache values for later use */ - if(H5D_get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") - - /* Construct dataset I/O info */ - H5D_BUILD_IO_INFO_RD(&io_info, dset, dxpl_cache, dxpl_id, NULL, NULL); - /* Remove excess chunks */ - if(H5D_istore_prune_by_extent(&io_info, curr_dims) < 0) + if(H5D_chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks ") - - /* Reset the elements outsize the new dimensions, but in existing chunks */ - if(H5D_istore_initialize_by_extent(&io_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to initialize chunks ") } /* end if */ /* Mark the dataspace as dirty, for later writing to the file */ @@ -2611,7 +2476,7 @@ H5D_flush_real(H5D_t *dataset, hid_t dxpl_id, unsigned flags) case H5D_CHUNKED: /* Flush the raw data cache */ - if(H5D_istore_flush(dataset, dxpl_id, flags & H5F_FLUSH_INVALIDATE) < 0) + if(H5D_chunk_flush(dataset, dxpl_id, flags & H5F_FLUSH_INVALIDATE) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush raw data cache") break; diff --git a/src/H5Dio.c b/src/H5Dio.c index b43ec04..92d88f4 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -392,7 +392,7 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, || dataset->shared->layout.type == H5D_COMPACT); /* Call storage method's I/O initialization routine */ - if(io_info.layout_ops.init && (*io_info.layout_ops.init)(&io_info, &type_info, nelmts, file_space, mem_space, &fm) < 0) + if(io_info.layout_ops.io_init && (*io_info.layout_ops.io_init)(&io_info, &type_info, nelmts, file_space, mem_space, &fm) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info") io_op_init = TRUE; @@ -408,7 +408,7 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, done: /* Shut down the I/O op information */ - if(io_op_init && io_info.layout_ops.term && (*io_info.layout_ops.term)(&fm) < 0) + if(io_op_init && io_info.layout_ops.io_term && (*io_info.layout_ops.io_term)(&fm) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down I/O op info") #ifdef H5_HAVE_PARALLEL /* Shut down io_info struct */ @@ -553,7 +553,7 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, full_overwrite = (hsize_t)file_nelmts == nelmts ? TRUE : FALSE; /* Allocate storage */ - if(H5D_alloc_storage(dataset->oloc.file, dxpl_id, dataset, H5D_ALLOC_WRITE, full_overwrite) < 0) + if(H5D_alloc_storage(dataset, dxpl_id, H5D_ALLOC_WRITE, full_overwrite) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage") } /* end if */ @@ -567,7 +567,7 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, #endif /*H5_HAVE_PARALLEL*/ /* Call storage method's I/O initialization routine */ - if(io_info.layout_ops.init && (*io_info.layout_ops.init)(&io_info, &type_info, nelmts, file_space, mem_space, &fm) < 0) + if(io_info.layout_ops.io_init && (*io_info.layout_ops.io_init)(&io_info, &type_info, nelmts, file_space, mem_space, &fm) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info") io_op_init = TRUE; @@ -598,7 +598,7 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, done: /* Shut down the I/O op information */ - if(io_op_init && io_info.layout_ops.term && (*io_info.layout_ops.term)(&fm) < 0) + if(io_op_init && io_info.layout_ops.io_term && (*io_info.layout_ops.io_term)(&fm) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down I/O op info") #ifdef H5_HAVE_PARALLEL /* Shut down io_info struct */ @@ -646,11 +646,11 @@ H5D_ioinfo_init(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, io_info->store = store; /* Set I/O operations to initial values */ - io_info->layout_ops = *dset->shared->layout_ops; + io_info->layout_ops = *dset->shared->layout.ops; /* Set the "high-level" I/O operations for the dataset */ - io_info->io_ops.multi_read = dset->shared->layout_ops->ser_read; - io_info->io_ops.multi_write = dset->shared->layout_ops->ser_write; + io_info->io_ops.multi_read = dset->shared->layout.ops->ser_read; + io_info->io_ops.multi_write = dset->shared->layout.ops->ser_write; /* Set the I/O operations for reading/writing single blocks on disk */ if(type_info->is_xform_noop && type_info->is_conv_noop) { @@ -887,8 +887,8 @@ H5D_ioinfo_adjust(H5D_io_info_t *io_info, const H5D_t *dset, /* Check if we can use the optimized parallel I/O routines */ if(opt == TRUE) { /* Override the I/O op pointers to the MPI-specific routines */ - io_info->io_ops.multi_read = dset->shared->layout_ops->par_read; - io_info->io_ops.multi_write = dset->shared->layout_ops->par_write; + io_info->io_ops.multi_read = dset->shared->layout.ops->par_read; + io_info->io_ops.multi_write = dset->shared->layout.ops->par_write; io_info->io_ops.single_read = H5D_mpio_select_read; io_info->io_ops.single_write = H5D_mpio_select_write; } /* end if */ diff --git a/src/H5Distore.c b/src/H5Distore.c index aed5add..b6005db 100644 --- a/src/H5Distore.c +++ b/src/H5Distore.c @@ -73,44 +73,36 @@ /****************/ /* - * Feature: If this constant is defined then every cache preemption and load - * causes a character to be printed on the standard error stream: - * - * `.': Entry was preempted because it has been completely read or - * completely written but not partially read and not partially - * written. This is often a good reason for preemption because such - * a chunk will be unlikely to be referenced in the near future. - * - * `:': Entry was preempted because it hasn't been used recently. - * - * `#': Entry was preempted because another chunk collided with it. This - * is usually a relatively bad thing. If there are too many of - * these then the number of entries in the cache can be increased. - * - * c: Entry was preempted because the file is closing. - * - * w: A chunk read operation was eliminated because the library is - * about to write new values to the entire chunk. This is a good - * thing, especially on files where the chunk size is the same as - * the disk block size, chunks are aligned on disk block boundaries, - * and the operating system can also eliminate a read operation. - */ - -/*#define H5D_ISTORE_DEBUG */ - -/* * Given a B-tree node return the dimensionality of the chunks pointed to by * that node. */ #define H5D_ISTORE_NDIMS(X) (((X)->sizeof_rkey-8)/8) -#define H5D_ISTORE_DEFAULT_SKIPLIST_HEIGHT 8 - /******************/ /* Local Typedefs */ /******************/ /* + * B-tree key. A key contains the minimum logical N-dimensional coordinates and + * the logical size of the chunk to which this key refers. The + * fastest-varying dimension is assumed to reference individual bytes of the + * array, so a 100-element 1-d array of 4-byte integers would really be a 2-d + * array with the slow varying dimension of size 100 and the fast varying + * dimension of size 4 (the storage dimensionality has very little to do with + * the real dimensionality). + * + * Only the first few values of the OFFSET and SIZE fields are actually + * stored on disk, depending on the dimensionality. + * + * The chunk's file address is part of the B-tree and not part of the key. + */ +typedef struct H5D_istore_key_t { + size_t nbytes; /*size of stored data */ + hsize_t offset[H5O_LAYOUT_NDIMS]; /*logical offset to start*/ + unsigned filter_mask; /*excluded filters */ +} H5D_istore_key_t; + +/* * Data exchange structure for indexed storage nodes. This structure is * passed through the B-link tree layer to the methods for the objects * to which the B-link tree points for operations which require no @@ -118,99 +110,25 @@ * * (Just an alias for the "common" info). */ -typedef H5D_istore_bt_ud_common_t H5D_istore_ud0_t; - -/* B-tree callback info for iteration to total allocated space */ -typedef struct H5D_istore_it_ud1_t { - H5D_istore_bt_ud_common_t common; /* Common info for B-tree user data (must be first) */ - hsize_t total_storage; /*output from iterator */ -} H5D_istore_it_ud1_t; - -/* B-tree callback info for iteration to dump node's info */ -typedef struct H5D_istore_it_ud2_t { - H5D_istore_bt_ud_common_t common; /* Common info for B-tree user data (must be first) */ - FILE *stream; /*debug output stream */ - hbool_t header_displayed; /* Node's header is displayed? */ -} H5D_istore_it_ud2_t; - -/* B-tree callback info for iteration to prune chunks */ -typedef struct H5D_istore_it_ud3_t { - H5D_istore_bt_ud_common_t common; /* Common info for B-tree user data (must be first) */ - const hsize_t *dims; /* New dataset dimensions */ - const hsize_t *down_chunks; /* "down" size of number of chunks in each dimension */ - H5SL_t *outside; /* Skip list to hold chunks outside the new dimensions */ -} H5D_istore_it_ud3_t; - -/* B-tree callback info for iteration to copy data */ -typedef struct H5D_istore_it_ud4_t { - H5D_istore_bt_ud_common_t common; /* Common info for B-tree user data (must be first) */ - H5F_t *file_src; /* Source file for copy */ - haddr_t addr_dst; /* Address of dest. B-tree */ - void *buf; /* Buffer to hold chunk data for read/write */ - void *bkg; /* Buffer for background information during type conversion */ - size_t buf_size; /* Buffer size */ - hbool_t do_convert; /* Whether to perform type conversions */ - - /* needed for converting variable-length data */ - hid_t tid_src; /* Datatype ID for source datatype */ - hid_t tid_dst; /* Datatype ID for destination datatype */ - hid_t tid_mem; /* Datatype ID for memory datatype */ - H5T_t *dt_src; /* Source datatype */ - H5T_path_t *tpath_src_mem; /* Datatype conversion path from source file to memory */ - H5T_path_t *tpath_mem_dst; /* Datatype conversion path from memory to dest. file */ - void *reclaim_buf; /* Buffer for reclaiming data */ - size_t reclaim_buf_size; /* Reclaim buffer size */ - uint32_t nelmts; /* Number of elements in buffer */ - H5S_t *buf_space; /* Dataspace describing buffer */ - - /* needed for compressed variable-length data */ - H5O_pline_t *pline; /* Filter pipeline */ - - /* needed for copy object pointed by refs */ - H5F_t *file_dst; /* Destination file for copy */ - H5O_copy_t *cpy_info; /* Copy options */ -} H5D_istore_it_ud4_t; - -/* B-tree callback info for iteration to obtain chunk address and the index of the chunk for all chunks in the B-tree. */ -typedef struct H5D_istore_it_ud5_t { - H5D_istore_bt_ud_common_t common; /* Common info for B-tree user data (must be first) */ - const hsize_t *down_chunks; - haddr_t *chunk_addr; -} H5D_istore_it_ud5_t; - -/* Skip list node for storing chunks to remove during an iteration */ -typedef struct H5D_istore_sl_ck_t { - hsize_t index; /* Index of chunk to remove (must be first) */ - H5D_istore_key_t key; /* Chunk key */ -} H5D_istore_sl_ck_t; - -/* Skip list callback info when destroying list & removing chunks */ -typedef struct H5D_istore_sl_rm_t { - H5F_t *f; /* Pointer to file for B-tree */ - hid_t dxpl_id; /* DXPL to use */ - const H5O_layout_t *mesg; /* Layout message */ -} H5D_istore_sl_rm_t; +typedef H5D_chunk_common_ud_t H5D_istore_ud0_t; + +/* B-tree callback info for iteration over chunks */ +typedef struct H5D_istore_it_ud_t { + H5D_chunk_common_ud_t common; /* Common info for B-tree user data (must be first) */ + H5D_chunk_cb_func_t cb; /* Chunk callback routine */ + void *udata; /* User data for chunk callback routine */ +} H5D_istore_it_ud_t; + /********************/ /* Local Prototypes */ /********************/ -static void *H5D_istore_chunk_alloc(size_t size, const H5O_pline_t *pline); -static void *H5D_istore_chunk_xfree(void *chk, const H5O_pline_t *pline); -static herr_t H5D_istore_shared_create (const H5F_t *f, H5O_layout_t *layout); -static herr_t H5D_istore_shared_free (void *page); +static herr_t H5D_istore_shared_create(const H5F_t *f, H5O_layout_t *layout); /* B-tree iterator callbacks */ -static int H5D_istore_iter_chunkmap(H5F_t *f, hid_t dxpl_id, const void *left_key, haddr_t addr, - const void *right_key, void *_udata); -static int H5D_istore_iter_allocated(H5F_t *f, hid_t dxpl_id, const void *left_key, haddr_t addr, - const void *right_key, void *_udata); -static int H5D_istore_iter_dump(H5F_t *f, hid_t dxpl_id, const void *left_key, haddr_t addr, - const void *right_key, void *_udata); -static int H5D_istore_prune_check(H5F_t *f, hid_t dxpl_id, const void *_lt_key, haddr_t addr, - const void *_rt_key, void *_udata); -static int H5D_istore_iter_copy(H5F_t *f, hid_t dxpl_id, const void *_lt_key, haddr_t addr, - const void *_rt_key, void *_udata); +static int H5D_istore_idx_iterate_cb(H5F_t *f, hid_t dxpl_id, const void *left_key, + haddr_t addr, const void *right_key, void *_udata); /* B-tree callbacks */ static H5RC_t *H5D_istore_get_shared(const H5F_t *f, const void *_udata); @@ -239,6 +157,51 @@ static herr_t H5D_istore_debug_key(FILE *stream, H5F_t *f, hid_t dxpl_id, int indent, int fwidth, const void *key, const void *udata); +/* Chunked layout indexing callbacks */ +static herr_t H5D_istore_idx_init(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D_istore_idx_create(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D_istore_idx_insert(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_ud_t *udata); +static haddr_t H5D_istore_idx_get_addr(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_ud_t *udata); +static int H5D_istore_idx_iterate(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); +static herr_t H5D_istore_idx_remove(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_common_ud_t *udata); +static herr_t H5D_istore_idx_delete(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D_istore_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, + const H5D_chk_idx_info_t *idx_info_dst); +static herr_t H5D_istore_idx_copy_shutdown(H5O_layout_t *layout_src, + H5O_layout_t *layout_dst); +static herr_t H5D_istore_idx_size(const H5D_chk_idx_info_t *idx_info, + hsize_t *size); +static herr_t H5D_istore_idx_dest(const H5D_chk_idx_info_t *idx_info); + + +/*********************/ +/* Package Variables */ +/*********************/ + +/* v1 B-tree indexed chunk I/O ops */ +const H5D_chunk_ops_t H5D_COPS_ISTORE[1] = {{ + H5D_istore_idx_init, + H5D_istore_idx_create, + H5D_istore_idx_insert, + H5D_istore_idx_get_addr, + H5D_istore_idx_iterate, + H5D_istore_idx_remove, + H5D_istore_idx_delete, + H5D_istore_idx_copy_setup, + H5D_istore_idx_copy_shutdown, + H5D_istore_idx_size, + H5D_istore_idx_dest +}}; + + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + /* inherits B-tree like properties from H5B */ H5B_class_t H5B_ISTORE[1] = {{ H5B_ISTORE_ID, /*id */ @@ -257,36 +220,11 @@ H5B_class_t H5B_ISTORE[1] = {{ H5D_istore_debug_key, /*debug */ }}; -/*********************/ -/* Package Variables */ -/*********************/ - -/*****************************/ -/* Library Private Variables */ -/*****************************/ /*******************/ /* Local Variables */ /*******************/ -/* Declare a free list to manage H5F_rdcc_ent_t objects */ -H5FL_DEFINE_STATIC(H5D_rdcc_ent_t); - -/* Declare a free list to manage the H5F_rdcc_ent_ptr_t sequence information */ -H5FL_SEQ_DEFINE_STATIC(H5D_rdcc_ent_ptr_t); - -/* Declare a free list to manage the chunk sequence information */ -H5FL_BLK_DEFINE_STATIC(chunk); - -/* Declare a free list to manage the native key offset sequence information */ -H5FL_SEQ_DEFINE_STATIC(size_t); - -/* Declare a free list to manage the raw page information */ -H5FL_BLK_DEFINE_STATIC(chunk_page); - -/* Declare a free list to manage H5D_istore_sl_ck_t objects */ -H5FL_DEFINE_STATIC(H5D_istore_sl_ck_t); - /*------------------------------------------------------------------------- * Function: H5D_istore_get_shared @@ -580,7 +518,7 @@ H5D_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op, { H5D_istore_key_t *lt_key = (H5D_istore_key_t *) _lt_key; H5D_istore_key_t *rt_key = (H5D_istore_key_t *) _rt_key; - H5D_istore_ud1_t *udata = (H5D_istore_ud1_t *) _udata; + H5D_chunk_ud_t *udata = (H5D_chunk_ud_t *) _udata; unsigned u; herr_t ret_value = SUCCEED; /* Return value */ @@ -658,7 +596,7 @@ static herr_t H5D_istore_found(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr, const void *_lt_key, void *_udata) { - H5D_istore_ud1_t *udata = (H5D_istore_ud1_t *) _udata; + H5D_chunk_ud_t *udata = (H5D_chunk_ud_t *) _udata; const H5D_istore_key_t *lt_key = (const H5D_istore_key_t *) _lt_key; unsigned u; herr_t ret_value = SUCCEED; /* Return value */ @@ -726,7 +664,7 @@ H5D_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key, H5D_istore_key_t *lt_key = (H5D_istore_key_t *) _lt_key; H5D_istore_key_t *md_key = (H5D_istore_key_t *) _md_key; H5D_istore_key_t *rt_key = (H5D_istore_key_t *) _rt_key; - H5D_istore_ud1_t *udata = (H5D_istore_ud1_t *) _udata; + H5D_chunk_ud_t *udata = (H5D_chunk_ud_t *) _udata; int cmp; unsigned u; H5B_ins_t ret_value; @@ -825,738 +763,43 @@ done: /*------------------------------------------------------------------------- - * Function: H5D_istore_iter_allocated - * - * Purpose: Simply counts the number of chunks for a dataset. - * - * Return: Success: Non-negative - * - * Failure: Negative - * - * Programmer: Robb Matzke - * Wednesday, April 21, 1999 - * - *------------------------------------------------------------------------- - */ -/* ARGSUSED */ -static int -H5D_istore_iter_allocated (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const void *_lt_key, haddr_t UNUSED addr, - const void UNUSED *_rt_key, void *_udata) -{ - H5D_istore_it_ud1_t *udata = (H5D_istore_it_ud1_t *)_udata; - const H5D_istore_key_t *lt_key = (const H5D_istore_key_t *)_lt_key; - - FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_iter_allocated) - - udata->total_storage += lt_key->nbytes; - - FUNC_LEAVE_NOAPI(H5_ITER_CONT) -} /* H5D_istore_iter_allocated() */ - -/*------------------------------------------------------------------------- - * Function: H5D_istore_iter_chunkmap - * - * Purpose: obtain chunk address and the corresponding index - * - * Return: Success: Non-negative - * - * Failure: Negative - * - * Programmer: Kent Yang - * Tuesday, November 15, 2005 - * - *------------------------------------------------------------------------- - */ -/* ARGSUSED */ -static int -H5D_istore_iter_chunkmap (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const void *_lt_key, haddr_t addr, - const void UNUSED *_rt_key, void *_udata) -{ - H5D_istore_it_ud5_t *udata = (H5D_istore_it_ud5_t *)_udata; - const H5D_istore_key_t *lt_key = (const H5D_istore_key_t *)_lt_key; - unsigned rank = udata->common.mesg->u.chunk.ndims - 1; - hsize_t chunk_index; - int ret_value = H5_ITER_CONT; /* Return value */ - - FUNC_ENTER_NOAPI_NOINIT(H5D_istore_iter_chunkmap) - - if(H5V_chunk_index(rank, lt_key->offset, udata->common.mesg->u.chunk.dim, udata->down_chunks, &chunk_index) < 0) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") - - udata->chunk_addr[chunk_index] = addr; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5D_istore_iter_chunkmap() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_iter_dump - * - * Purpose: If the UDATA.STREAM member is non-null then debugging - * information is written to that stream. - * - * Return: Success: Non-negative - * - * Failure: Negative - * - * Programmer: Robb Matzke - * Wednesday, April 21, 1999 - * - *------------------------------------------------------------------------- - */ -/* ARGSUSED */ -static int -H5D_istore_iter_dump (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const void *_lt_key, haddr_t UNUSED addr, - const void UNUSED *_rt_key, void *_udata) -{ - H5D_istore_it_ud2_t *udata = (H5D_istore_it_ud2_t *)_udata; - const H5D_istore_key_t *lt_key = (const H5D_istore_key_t *)_lt_key; - unsigned u; - - FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_iter_dump) - - if(udata->stream) { - if(!udata->header_displayed) { - HDfprintf(udata->stream, " Flags Bytes Address Logical Offset\n"); - HDfprintf(udata->stream, " ========== ======== ========== ==============================\n"); - - /* Set flag that the headers has been printed */ - udata->header_displayed = TRUE; - } /* end if */ - HDfprintf(udata->stream, " 0x%08x %8Zu %10a [", lt_key->filter_mask, lt_key->nbytes, addr); - for(u = 0; u < udata->common.mesg->u.chunk.ndims; u++) - HDfprintf(udata->stream, "%s%Hd", (u ? ", " : ""), lt_key->offset[u]); - HDfputs("]\n", udata->stream); - } /* end if */ - - FUNC_LEAVE_NOAPI(H5_ITER_CONT) -} /* H5D_istore_iter_dump() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_iter_copy - * - * Purpose: copy chunked raw data from source file and insert to the - * B-tree node in the destination file - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Peter Cao - * August 20, 2005 - * - *------------------------------------------------------------------------- - */ -static int -H5D_istore_iter_copy(H5F_t *f_src, hid_t dxpl_id, const void *_lt_key, - haddr_t addr_src, const void UNUSED *_rt_key, void *_udata) -{ - H5D_istore_it_ud4_t *udata = (H5D_istore_it_ud4_t *)_udata; - const H5D_istore_key_t *lt_key = (const H5D_istore_key_t *)_lt_key; - H5D_istore_ud1_t udata_dst; /* User data about new destination chunk */ - hbool_t is_vlen = FALSE; - hbool_t fix_ref = FALSE; - - /* General information about chunk copy */ - void *bkg = udata->bkg; - void *buf = udata->buf; - size_t buf_size = udata->buf_size; - H5O_pline_t *pline = udata->pline; - - /* needed for commpressed variable length data */ - hbool_t is_compressed = FALSE; - H5Z_EDC_t edc_read = H5Z_NO_EDC; - size_t nbytes; /* Size of chunk (in bytes) */ - H5Z_cb_t cb_struct; - - int ret_value = H5_ITER_CONT; /* Return value */ - - FUNC_ENTER_NOAPI_NOINIT(H5D_istore_iter_copy) - - /* Get 'size_t' local value for number of bytes in chunk */ - H5_ASSIGN_OVERFLOW(nbytes, lt_key->nbytes, uint32_t, size_t); - - /* Check parameter for type conversion */ - if(udata->do_convert) { - if(H5T_detect_class(udata->dt_src, H5T_VLEN) > 0) - is_vlen = TRUE; - else if((H5T_get_class(udata->dt_src, FALSE) == H5T_REFERENCE) && (udata->file_src != udata->file_dst)) - fix_ref = TRUE; - else - HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "unable to copy dataset elements") - } /* end if */ - - /* Check for filtered chunks */ - if(pline && pline->nused) { - is_compressed = TRUE; - cb_struct.func = NULL; /* no callback function when failed */ - } /* end if */ - - /* Resize the buf if it is too small to hold the data */ - if(nbytes > buf_size) { - void *new_buf; /* New buffer for data */ - - /* Re-allocate memory for copying the chunk */ - if(NULL == (new_buf = H5MM_realloc(udata->buf, nbytes))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, H5_ITER_ERROR, "memory allocation failed for raw data chunk") - udata->buf = new_buf; - if(udata->bkg) { - if(NULL == (new_buf = H5MM_realloc(udata->bkg, nbytes))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, H5_ITER_ERROR, "memory allocation failed for raw data chunk") - udata->bkg = new_buf; - if(!udata->cpy_info->expand_ref) - HDmemset((uint8_t *)udata->bkg + buf_size, 0, (size_t)(nbytes - buf_size)); - - bkg = udata->bkg; - } /* end if */ - - buf = udata->buf; - udata->buf_size = buf_size = nbytes; - } /* end if */ - - /* read chunk data from the source file */ - if(H5F_block_read(f_src, H5FD_MEM_DRAW, addr_src, nbytes, dxpl_id, buf) < 0) - HGOTO_ERROR(H5E_IO, H5E_READERROR, H5_ITER_ERROR, "unable to read raw data chunk") - - /* Need to uncompress variable-length & reference data elements */ - if(is_compressed && (is_vlen || fix_ref)) { - unsigned filter_mask = lt_key->filter_mask; - - if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &filter_mask, edc_read, cb_struct, &nbytes, &buf_size, &buf) < 0) - HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "data pipeline read failed") - } /* end if */ - - /* Perform datatype conversion, if necessary */ - if(is_vlen) { - H5T_path_t *tpath_src_mem = udata->tpath_src_mem; - H5T_path_t *tpath_mem_dst = udata->tpath_mem_dst; - H5S_t *buf_space = udata->buf_space; - hid_t tid_src = udata->tid_src; - hid_t tid_dst = udata->tid_dst; - hid_t tid_mem = udata->tid_mem; - void *reclaim_buf = udata->reclaim_buf; - size_t reclaim_buf_size = udata->reclaim_buf_size; - - /* Convert from source file to memory */ - H5_CHECK_OVERFLOW(udata->nelmts, uint32_t, size_t); - if(H5T_convert(tpath_src_mem, tid_src, tid_mem, (size_t)udata->nelmts, (size_t)0, (size_t)0, buf, NULL, dxpl_id) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, H5_ITER_ERROR, "datatype conversion failed") - - /* Copy into another buffer, to reclaim memory later */ - HDmemcpy(reclaim_buf, buf, reclaim_buf_size); - - /* Set background buffer to all zeros */ - HDmemset(bkg, 0, buf_size); - - /* Convert from memory to destination file */ - if(H5T_convert(tpath_mem_dst, tid_mem, tid_dst, udata->nelmts, (size_t)0, (size_t)0, buf, bkg, dxpl_id) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, H5_ITER_ERROR, "datatype conversion failed") - - /* Reclaim space from variable length data */ - if(H5D_vlen_reclaim(tid_mem, buf_space, H5P_DATASET_XFER_DEFAULT, reclaim_buf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_BADITER, H5_ITER_ERROR, "unable to reclaim variable-length data") - } /* end if */ - else if(fix_ref) { - /* Check for expanding references */ - /* (background buffer has already been zeroed out, if not expanding) */ - if(udata->cpy_info->expand_ref) { - size_t ref_count; - - /* Determine # of reference elements to copy */ - ref_count = nbytes / H5T_get_size(udata->dt_src); - - /* Copy the reference elements */ - if(H5O_copy_expand_ref(f_src, buf, dxpl_id, udata->file_dst, bkg, ref_count, H5T_get_ref_type(udata->dt_src), udata->cpy_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "unable to copy reference attribute") - } /* end if */ - - /* After fix ref, copy the new reference elements to the buffer to write out */ - HDmemcpy(buf, bkg, buf_size); - } /* end if */ - - /* Set up destination chunk callback information for insertion */ - udata_dst.common.mesg = udata->common.mesg; /* Share this pointer for a short while */ - udata_dst.common.offset = lt_key->offset; - udata_dst.nbytes = lt_key->nbytes; - udata_dst.filter_mask = lt_key->filter_mask; - udata_dst.addr = HADDR_UNDEF; - - /* Need to compress variable-length & reference data elements before writing to file */ - if(is_compressed && (is_vlen || fix_ref) ) { - if(H5Z_pipeline(pline, 0, &(udata_dst.filter_mask), edc_read, - cb_struct, &nbytes, &buf_size, &buf) < 0) - HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "output pipeline failed") - H5_ASSIGN_OVERFLOW(udata_dst.nbytes, nbytes, size_t, uint32_t); - udata->buf = buf; - udata->buf_size = buf_size; - } /* end if */ - - /* Insert chunk into the destination Btree */ - if(H5B_insert(udata->file_dst, dxpl_id, H5B_ISTORE, udata->addr_dst, &udata_dst) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, H5_ITER_ERROR, "unable to allocate chunk") - - /* Write chunk data to destination file */ - HDassert(H5F_addr_defined(udata_dst.addr)); - if(H5F_block_write(udata->file_dst, H5FD_MEM_DRAW, udata_dst.addr, nbytes, dxpl_id, buf) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, H5_ITER_ERROR, "unable to write raw data to file") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_iter_copy() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_cinfo_cache_reset - * - * Purpose: Reset the cached chunk info - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * November 27, 2007 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D_istore_cinfo_cache_reset(H5D_chunk_cached_t *last) -{ - FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_cinfo_cache_reset) - - /* Sanity check */ - HDassert(last); - - /* Indicate that the cached info is not valid */ - last->valid = FALSE; - - FUNC_LEAVE_NOAPI(SUCCEED) -} /* H5D_istore_cinfo_cache_reset() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_cinfo_cache_update - * - * Purpose: Update the cached chunk info - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * November 27, 2007 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D_istore_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_istore_ud1_t *udata) -{ - unsigned u; /* Local index variable */ - - FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_cinfo_cache_update) - - /* Sanity check */ - HDassert(last); - HDassert(udata); - HDassert(udata->common.mesg); - HDassert(udata->common.offset); - - /* Stored the information to cache */ - for(u = 0; u < udata->common.mesg->u.chunk.ndims; u++) - last->offset[u] = udata->common.offset[u]; - last->nbytes = udata->nbytes; - last->filter_mask = udata->filter_mask; - last->addr = udata->addr; - - /* Indicate that the cached info is valid */ - last->valid = TRUE; - - FUNC_LEAVE_NOAPI(SUCCEED) -} /* H5D_istore_cinfo_cache_update() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_cinfo_cache_found - * - * Purpose: Look for chunk info in cache - * - * Return: TRUE/FALSE/FAIL - * - * Programmer: Quincey Koziol - * November 27, 2007 - * - *------------------------------------------------------------------------- - */ -static hbool_t -H5D_istore_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_istore_ud1_t *udata) -{ - hbool_t ret_value = FALSE; /* Return value */ - - FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_cinfo_cache_found) - - /* Sanity check */ - HDassert(last); - HDassert(udata); - HDassert(udata->common.mesg); - HDassert(udata->common.offset); - - /* Check if the cached information is what is desired */ - if(last->valid) { - unsigned u; /* Local index variable */ - - /* Check that the offset is the same */ - for(u = 0; u < udata->common.mesg->u.chunk.ndims; u++) - if(last->offset[u] != udata->common.offset[u]) - HGOTO_DONE(FALSE) - - /* Retrieve the information from the cache */ - udata->nbytes = last->nbytes; - udata->filter_mask = last->filter_mask; - udata->addr = last->addr; - - /* Indicate that the data was found */ - HGOTO_DONE(TRUE) - } /* end if */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5D_istore_cinfo_cache_found() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_init - * - * Purpose: Initialize the raw data chunk cache for a dataset. This is - * called when the dataset is initialized. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Robb Matzke - * Monday, May 18, 1998 - * - *------------------------------------------------------------------------- - */ -herr_t -H5D_istore_init(const H5F_t *f, const H5D_t *dset) -{ - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(H5D_istore_init, FAIL) - - if(H5F_RDCC_NBYTES(f) > 0 && H5F_RDCC_NELMTS(f) > 0) { - rdcc->nbytes = H5F_RDCC_NBYTES(f); - rdcc->nslots = H5F_RDCC_NELMTS(f); - rdcc->slot = H5FL_SEQ_CALLOC (H5D_rdcc_ent_ptr_t,rdcc->nslots); - if(NULL==rdcc->slot) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed") - - /* Reset any cached chunk info for this dataset */ - H5D_istore_cinfo_cache_reset(&(rdcc->last)); - } /* end if */ - - /* Allocate the shared structure */ - if(H5D_istore_shared_create(f, &dset->shared->layout) < 0) - HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_init() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_flush_entry - * - * Purpose: Writes a chunk to disk. If RESET is non-zero then the - * entry is cleared -- it's slightly faster to flush a chunk if - * the RESET flag is turned on because it results in one fewer - * memory copy. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Robb Matzke - * Thursday, May 21, 1998 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D_istore_flush_entry(const H5D_io_info_t *io_info, H5D_rdcc_ent_t *ent, hbool_t reset) -{ - void *buf = NULL; /*temporary buffer */ - hbool_t point_of_no_return = FALSE; - herr_t ret_value = SUCCEED; /*return value */ - - FUNC_ENTER_NOAPI_NOINIT(H5D_istore_flush_entry) - - assert(io_info); - assert(io_info->dset); - assert(ent); - assert(!ent->locked); - - buf = ent->chunk; - if(ent->dirty) { - H5D_istore_ud1_t udata; /*pass through B-tree */ - - /* Set up user data for B-tree callbacks */ - udata.common.mesg = &io_info->dset->shared->layout; - udata.common.offset = ent->offset; - udata.filter_mask = 0; - udata.nbytes = ent->chunk_size; - udata.addr = HADDR_UNDEF; - - /* Should the chunk be filtered before writing it to disk? */ - if(io_info->dset->shared->dcpl_cache.pline.nused) { - size_t alloc = ent->alloc_size; /* Bytes allocated for BUF */ - size_t nbytes; /* Chunk size (in bytes) */ - - if(!reset) { - /* - * Copy the chunk to a new buffer before running it through - * the pipeline because we'll want to save the original buffer - * for later. - */ - H5_ASSIGN_OVERFLOW(alloc, ent->chunk_size, uint32_t, size_t); - if(NULL == (buf = H5MM_malloc(alloc))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for pipeline") - HDmemcpy(buf, ent->chunk, alloc); - } /* end if */ - else { - /* - * If we are reseting and something goes wrong after this - * point then it's too late to recover because we may have - * destroyed the original data by calling H5Z_pipeline(). - * The only safe option is to continue with the reset - * even if we can't write the data to disk. - */ - point_of_no_return = TRUE; - ent->chunk = NULL; - } /* end else */ - H5_ASSIGN_OVERFLOW(nbytes, udata.nbytes, uint32_t, size_t); - if(H5Z_pipeline(&(io_info->dset->shared->dcpl_cache.pline), 0, &(udata.filter_mask), io_info->dxpl_cache->err_detect, - io_info->dxpl_cache->filter_cb, &nbytes, &alloc, &buf) < 0) - HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed") - H5_ASSIGN_OVERFLOW(udata.nbytes, nbytes, size_t, uint32_t); - } /* end if */ - - /* - * Create the chunk it if it doesn't exist, or reallocate the chunk if - * its size changed. Then write the data into the file. - */ - if(H5B_insert(io_info->dset->oloc.file, io_info->dxpl_id, H5B_ISTORE, io_info->dset->shared->layout.u.chunk.addr, &udata)<0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk") - H5_CHECK_OVERFLOW(udata.nbytes, uint32_t, size_t); - if(H5F_block_write(io_info->dset->oloc.file, H5FD_MEM_DRAW, udata.addr, (size_t)udata.nbytes, io_info->dxpl_id, buf) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") - - /* Cache the chunk's info, in case it's accessed again shortly */ - H5D_istore_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, &udata); - - /* Mark cache entry as clean */ - ent->dirty = FALSE; -#ifdef H5D_ISTORE_DEBUG - io_info->dset->shared->cache.chunk.nflushes++; -#endif /* H5D_ISTORE_DEBUG */ - } /* end if */ - - /* Reset, but do not free or removed from list */ - if(reset) { - point_of_no_return = FALSE; - if(buf == ent->chunk) - buf = NULL; - if(ent->chunk != NULL) - ent->chunk = (uint8_t *)H5D_istore_chunk_xfree(ent->chunk, &(io_info->dset->shared->dcpl_cache.pline)); - } /* end if */ - -done: - /* Free the temp buffer only if it's different than the entry chunk */ - if(buf != ent->chunk) - H5MM_xfree(buf); - - /* - * If we reached the point of no return then we have no choice but to - * reset the entry. This can only happen if RESET is true but the - * output pipeline failed. Do not free the entry or remove it from the - * list. - */ - if(ret_value < 0 && point_of_no_return) { - if(ent->chunk) - ent->chunk = (uint8_t *)H5D_istore_chunk_xfree(ent->chunk, &(io_info->dset->shared->dcpl_cache.pline)); - } /* end if */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_flush_entry() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_preempt - * - * Purpose: Preempts the specified entry from the cache, flushing it to - * disk if necessary. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Robb Matzke - * Thursday, May 21, 1998 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D_istore_preempt(const H5D_io_info_t *io_info, H5D_rdcc_ent_t * ent, hbool_t flush) -{ - H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk); - herr_t ret_value=SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI_NOINIT(H5D_istore_preempt) - - assert(io_info); - assert(ent); - assert(!ent->locked); - assert(ent->idx < rdcc->nslots); - - if(flush) { - /* Flush */ - if(H5D_istore_flush_entry(io_info, ent, TRUE) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer") - } /* end if */ - else { - /* Don't flush, just free chunk */ - if(ent->chunk != NULL) - ent->chunk = (uint8_t *)H5D_istore_chunk_xfree(ent->chunk, &(io_info->dset->shared->dcpl_cache.pline)); - } /* end else */ - - /* Unlink from list */ - if(ent->prev) - ent->prev->next = ent->next; - else - rdcc->head = ent->next; - if(ent->next) - ent->next->prev = ent->prev; - else - rdcc->tail = ent->prev; - ent->prev = ent->next = NULL; - - /* Remove from cache */ - rdcc->slot[ent->idx] = NULL; - ent->idx = UINT_MAX; - rdcc->nbytes -= ent->chunk_size; - --rdcc->nused; - - /* Free */ - H5FL_FREE(H5D_rdcc_ent_t, ent); - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_preempt() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_flush - * - * Purpose: Writes all dirty chunks to disk and optionally preempts them - * from the cache. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Robb Matzke - * Thursday, May 21, 1998 - * - *------------------------------------------------------------------------- - */ -herr_t -H5D_istore_flush(H5D_t *dset, hid_t dxpl_id, unsigned flags) -{ - H5D_io_info_t io_info; /* Temporary I/O info object */ - H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ - H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); - unsigned nerrors = 0; - H5D_rdcc_ent_t *ent, *next; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(H5D_istore_flush, FAIL) - - /* Fill the DXPL cache values for later use */ - if(H5D_get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") - - /* Construct dataset I/O info */ - H5D_BUILD_IO_INFO_WRT(&io_info, dset, dxpl_cache, dxpl_id, NULL, NULL); - - /* Loop over all entries in the chunk cache */ - for(ent = rdcc->head; ent; ent = next) { - next = ent->next; - if((flags & H5F_FLUSH_INVALIDATE)) { - if(H5D_istore_preempt(&io_info, ent, TRUE) < 0) - nerrors++; - } else { - if(H5D_istore_flush_entry(&io_info, ent, FALSE) < 0) - nerrors++; - } - } /* end for */ - if(nerrors) - HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_flush() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_dest + * Function: H5D_istore_remove * - * Purpose: Destroy the entire chunk cache by flushing dirty entries, - * preempting all entries, and freeing the cache itself. + * Purpose: Removes chunks that are no longer necessary in the B-tree. * * Return: Non-negative on success/Negative on failure * - * Programmer: Robb Matzke - * Thursday, May 21, 1998 + * Programmer: Robb Matzke + * Pedro Vicente, pvn@ncsa.uiuc.edu + * March 28, 2002 * *------------------------------------------------------------------------- */ -herr_t -H5D_istore_dest(H5D_t *dset, hid_t dxpl_id) +/* ARGSUSED */ +static H5B_ins_t +H5D_istore_remove(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key /*in,out */ , + hbool_t *lt_key_changed /*out */ , + void UNUSED * _udata /*in,out */ , + void UNUSED * _rt_key /*in,out */ , + hbool_t *rt_key_changed /*out */ ) { - H5D_io_info_t io_info; /* Temporary I/O info object */ - H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ - H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); - int nerrors = 0; - H5D_rdcc_ent_t *ent = NULL, *next = NULL; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(H5D_istore_dest, FAIL) - - HDassert(dset); - - /* Fill the DXPL cache values for later use */ - if(H5D_get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") - - /* Construct dataset I/O info */ - H5D_BUILD_IO_INFO_WRT(&io_info, dset, dxpl_cache, dxpl_id, NULL, NULL); + H5D_istore_key_t *lt_key = (H5D_istore_key_t *)_lt_key; + H5B_ins_t ret_value=H5B_INS_REMOVE; /* Return value */ - /* Flush all the cached chunks */ - for(ent = rdcc->head; ent; ent = next) { -#ifdef H5D_ISTORE_DEBUG - HDfputc('c', stderr); - HDfflush(stderr); -#endif - next = ent->next; - if(H5D_istore_preempt(&io_info, ent, TRUE) < 0) - nerrors++; - } /* end for */ - if(nerrors) - HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks") + FUNC_ENTER_NOAPI_NOINIT(H5D_istore_remove) - if(rdcc->slot) - H5FL_SEQ_FREE(H5D_rdcc_ent_ptr_t, rdcc->slot); - HDmemset(rdcc, 0, sizeof(H5D_rdcc_t)); + /* Remove raw data chunk from file */ + H5_CHECK_OVERFLOW(lt_key->nbytes, uint32_t, hsize_t); + if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, addr, (hsize_t)lt_key->nbytes) < 0) + HGOTO_ERROR(H5E_STORAGE, H5E_CANTFREE, H5B_INS_ERROR, "unable to free chunk") - /* Free the raw B-tree node buffer */ - if(dset->shared->layout.u.chunk.btree_shared == NULL) - HGOTO_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "ref-counted page nil") - if(H5RC_DEC(dset->shared->layout.u.chunk.btree_shared) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page") + /* Mark keys as unchanged */ + *lt_key_changed = FALSE; + *rt_key_changed = FALSE; done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_dest() */ +} /* end H5D_istore_remove() */ /*------------------------------------------------------------------------- @@ -1572,41 +815,28 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5D_istore_shared_create (const H5F_t *f, H5O_layout_t *layout) +H5D_istore_shared_create(const H5F_t *f, H5O_layout_t *layout) { H5B_shared_t *shared; /* Shared B-tree node info */ - size_t u; /* Local index variable */ - herr_t ret_value=SUCCEED; /* Return value */ + size_t sizeof_rkey; /* Size of raw (disk) key */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_istore_shared_create) - /* Allocate space for the shared structure */ - if(NULL==(shared=H5FL_MALLOC(H5B_shared_t))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for shared B-tree info") - - /* Set up the "global" information for this file's groups */ - shared->type= H5B_ISTORE; - shared->two_k=2*H5F_KVALUE(f,H5B_ISTORE); - shared->sizeof_rkey = 4 + /*storage size */ - 4 + /*filter mask */ - layout->u.chunk.ndims*8; /*dimension indices */ - assert(shared->sizeof_rkey); - shared->sizeof_rnode = H5B_nodesize(f, shared, &shared->sizeof_keys); - assert(shared->sizeof_rnode); - if(NULL==(shared->page=H5FL_BLK_MALLOC(chunk_page,shared->sizeof_rnode))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree page") -#ifdef H5_CLEAR_MEMORY -HDmemset(shared->page, 0, shared->sizeof_rnode); -#endif /* H5_CLEAR_MEMORY */ - if(NULL==(shared->nkey=H5FL_SEQ_MALLOC(size_t,(size_t)(2*H5F_KVALUE(f,H5B_ISTORE)+1)))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree page") - - /* Initialize the offsets into the native key buffer */ - for(u=0; u<(2*H5F_KVALUE(f,H5B_ISTORE)+1); u++) - shared->nkey[u]=u*H5B_ISTORE[0].sizeof_nkey; + /* Set the raw key size */ + sizeof_rkey = 4 + /*storage size */ + 4 + /*filter mask */ + layout->u.chunk.ndims * 8; /*dimension indices */ + + /* Allocate & initialize global info for the shared structure */ + if(NULL == (shared = H5B_shared_new(f, H5B_ISTORE, sizeof_rkey))) + HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, FAIL, "memory allocation failed for shared B-tree info") + + /* Set up the "local" information for this dataset's chunks */ + /* <none> */ /* Make shared B-tree info reference counted */ - if(NULL==(layout->u.chunk.btree_shared=H5RC_create(shared,H5D_istore_shared_free))) + if(NULL == (layout->u.chunk.btree_shared = H5RC_create(shared, H5B_shared_free))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't create ref-count wrapper for shared B-tree info") done: @@ -1615,538 +845,49 @@ done: /*------------------------------------------------------------------------- - * Function: H5D_istore_shared_free + * Function: H5D_istore_idx_init * - * Purpose: Free B-tree shared info - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * Thursday, July 8, 2004 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D_istore_shared_free (void *_shared) -{ - H5B_shared_t *shared = (H5B_shared_t *)_shared; - - FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_shared_free) - - /* Free the raw B-tree node buffer */ - (void)H5FL_BLK_FREE(chunk_page, shared->page); - - /* Free the B-tree native key offsets buffer */ - H5FL_SEQ_FREE(size_t, shared->nkey); - - /* Free the shared B-tree info */ - H5FL_FREE(H5B_shared_t, shared); - - FUNC_LEAVE_NOAPI(SUCCEED) -} /* end H5D_istore_shared_free() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_prune - * - * Purpose: Prune the cache by preempting some things until the cache has - * room for something which is SIZE bytes. Only unlocked - * entries are considered for preemption. + * Purpose: Initialize the indexing information for a dataset. * * Return: Non-negative on success/Negative on failure * * Programmer: Robb Matzke - * Thursday, May 21, 1998 + * Monday, May 18, 1998 * *------------------------------------------------------------------------- */ static herr_t -H5D_istore_prune (const H5D_io_info_t *io_info, size_t size) -{ - int i, j, nerrors=0; - const H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk); - size_t total = rdcc->nbytes; - const int nmeth=2; /*number of methods */ - int w[1]; /*weighting as an interval */ - H5D_rdcc_ent_t *p[2], *cur; /*list pointers */ - H5D_rdcc_ent_t *n[2]; /*list next pointers */ - herr_t ret_value=SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI_NOINIT(H5D_istore_prune) - - /* - * Preemption is accomplished by having multiple pointers (currently two) - * slide down the list beginning at the head. Pointer p(N+1) will start - * traversing the list when pointer pN reaches wN percent of the original - * list. In other words, preemption method N gets to consider entries in - * approximate least recently used order w0 percent before method N+1 - * where 100% means tha method N will run to completion before method N+1 - * begins. The pointers participating in the list traversal are each - * given a chance at preemption before any of the pointers are advanced. - */ - w[0] = (int)(rdcc->nused * H5F_RDCC_W0(io_info->dset->oloc.file)); - p[0] = rdcc->head; - p[1] = NULL; - - while ((p[0] || p[1]) && rdcc->nbytes+size>total) { - - /* Introduce new pointers */ - for(i = 0; i < (nmeth - 1); i++) - if(0 == w[i]) - p[i + 1] = rdcc->head; - - /* Compute next value for each pointer */ - for(i = 0; i < nmeth; i++) - n[i] = p[i] ? p[i]->next : NULL; - - /* Give each method a chance */ - for(i = 0; i < nmeth && (rdcc->nbytes + size) > total; i++) { - if(0 == i && p[0] && !p[0]->locked && - ((0 == p[0]->rd_count && 0 == p[0]->wr_count) || - (0 == p[0]->rd_count && p[0]->chunk_size == p[0]->wr_count) || - (p[0]->chunk_size == p[0]->rd_count && 0 == p[0]->wr_count))) { - /* - * Method 0: Preempt entries that have been completely written - * and/or completely read but not entries that are partially - * written or partially read. - */ - cur = p[0]; -#ifdef H5D_ISTORE_DEBUG - HDputc('.', stderr); - HDfflush(stderr); -#endif - - } else if (1==i && p[1] && !p[1]->locked) { - /* - * Method 1: Preempt the entry without regard to - * considerations other than being locked. This is the last - * resort preemption. - */ - cur = p[1]; -#ifdef H5D_ISTORE_DEBUG - HDputc(':', stderr); - HDfflush(stderr); -#endif - - } else { - /* Nothing to preempt at this point */ - cur= NULL; - } - - if (cur) { - for (j=0; j<nmeth; j++) { - if (p[j]==cur) - p[j] = NULL; - if (n[j]==cur) - n[j] = cur->next; - } - if (H5D_istore_preempt(io_info, cur, TRUE)<0) - nerrors++; - } - } - - /* Advance pointers */ - for (i=0; i<nmeth; i++) - p[i] = n[i]; - for (i=0; i<nmeth-1; i++) - w[i] -= 1; - } - - if (nerrors) - HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to preempt one or more raw data cache entry") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_prune() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_lock - * - * Purpose: Return a pointer to a dataset chunk. The pointer points - * directly into the chunk cache and should not be freed - * by the caller but will be valid until it is unlocked. The - * input value IDX_HINT is used to speed up cache lookups and - * it's output value should be given to H5F_istore_unlock(). - * IDX_HINT is ignored if it is out of range, and if it points - * to the wrong entry then we fall back to the normal search - * method. - * - * If RELAX is non-zero and the chunk isn't in the cache then - * don't try to read it from the file, but just allocate an - * uninitialized buffer to hold the result. This is intended - * for output functions that are about to overwrite the entire - * chunk. - * - * Return: Success: Ptr to a file chunk. - * - * Failure: NULL - * - * Programmer: Robb Matzke - * Thursday, May 21, 1998 - * - *------------------------------------------------------------------------- - */ -void * -H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata, - hbool_t relax, unsigned *idx_hint/*in,out*/) -{ - H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */ - const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */ - const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */ - const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */ - H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */ - hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */ - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/ - H5D_rdcc_ent_t *ent = NULL; /*cache entry */ - unsigned idx = 0; /*hash index number */ - hbool_t found = FALSE; /*already in cache? */ - size_t chunk_size; /*size of a chunk */ - void *chunk = NULL; /*the file chunk */ - unsigned u; /*counters */ - void *ret_value; /*return value */ - - FUNC_ENTER_NOAPI_NOINIT(H5D_istore_lock) - - HDassert(io_info); - HDassert(dset); - HDassert(io_info->dxpl_cache); - HDassert(io_info->store); - HDassert(TRUE == H5P_isa_class(io_info->dxpl_id, H5P_DATASET_XFER)); - - /* Get the chunk's size */ - HDassert(layout->u.chunk.size > 0); - H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t); - - /* Search for the chunk in the cache */ - if(rdcc->nslots > 0) { - idx = H5D_CHUNK_HASH(dset->shared, io_info->store->chunk.index); - ent = rdcc->slot[idx]; - - if(ent) - for(u = 0, found = TRUE; u < layout->u.chunk.ndims; u++) - if(io_info->store->chunk.offset[u] != ent->offset[u]) { - found = FALSE; - break; - } /* end if */ - } /* end if */ - - if(found) { - /* - * Already in the cache. Count a hit. - */ -#ifdef H5D_ISTORE_DEBUG - rdcc->nhits++; -#endif /* H5D_ISTORE_DEBUG */ - } /* end if */ - else if(relax) { - /* - * Not in the cache, but we're about to overwrite the whole thing - * anyway, so just allocate a buffer for it but don't initialize that - * buffer with the file contents. Count this as a hit instead of a - * miss because we saved ourselves lots of work. - */ -#ifdef H5D_ISTORE_DEBUG - HDputc('w', stderr); - HDfflush(stderr); - rdcc->nhits++; -#endif - if(NULL == (chunk = H5D_istore_chunk_alloc(chunk_size, pline))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk") - - /* In the case that some dataset functions look through this data, - * clear it to all 0s. */ - HDmemset(chunk, 0, chunk_size); - } /* end if */ - else { - H5D_istore_ud1_t tmp_udata; /*B-tree pass-through */ - haddr_t chunk_addr; /* Address of chunk on disk */ - - if(udata!=NULL) - chunk_addr = udata->addr; - else { - /* Point at temporary storage for B-tree pass through */ - udata = &tmp_udata; - - /* - * Not in the cache. Read it from the file and count this as a miss - * if it's in the file or an init if it isn't. - */ - chunk_addr = H5D_istore_get_addr(io_info, udata); - } /* end else */ - - /* Check if the chunk exists on disk */ - if(H5F_addr_defined(chunk_addr)) { - size_t chunk_alloc; /* Allocated chunk size */ - - /* Chunk size on disk isn't [likely] the same size as the final chunk - * size in memory, so allocate memory big enough. */ - H5_ASSIGN_OVERFLOW(chunk_alloc, udata->nbytes, uint32_t, size_t); - if(NULL == (chunk = H5D_istore_chunk_alloc(chunk_alloc, pline))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk") - if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, chunk_alloc, io_info->dxpl_id, chunk) < 0) - HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk") - - if(pline->nused) { - if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), io_info->dxpl_cache->err_detect, - io_info->dxpl_cache->filter_cb, &chunk_alloc, &chunk_alloc, &chunk) < 0) - HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, NULL, "data pipeline read failed") - H5_ASSIGN_OVERFLOW(udata->nbytes, chunk_alloc, size_t, uint32_t); - } /* end if */ -#ifdef H5D_ISTORE_DEBUG - rdcc->nmisses++; -#endif /* H5D_ISTORE_DEBUG */ - } else { - H5D_fill_value_t fill_status; - -#ifdef OLD_WAY - /* Clear the error stack from not finding the chunk on disk */ - H5E_clear_stack(NULL); -#endif /* OLD_WAY */ - - /* Chunk size on disk isn't [likely] the same size as the final chunk - * size in memory, so allocate memory big enough. */ - if(NULL == (chunk = H5D_istore_chunk_alloc(chunk_size, pline))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk") - - if(H5P_is_fill_value_defined(fill, &fill_status) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't tell if fill value defined") - - if(fill->fill_time == H5D_FILL_TIME_ALLOC || - (fill->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED)) { - /* - * The chunk doesn't exist in the file. Replicate the fill - * value throughout the chunk, if the fill value is defined. - */ - - /* Initialize the fill value buffer */ - /* (use the compact dataset storage buffer as the fill value buffer) */ - if(H5D_fill_init(&fb_info, chunk, FALSE, - NULL, NULL, NULL, NULL, - &dset->shared->dcpl_cache.fill, dset->shared->type, - dset->shared->type_id, (size_t)0, chunk_size, io_info->dxpl_id) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize fill buffer info") - fb_info_init = TRUE; - - /* Check for VL datatype & non-default fill value */ - if(fb_info.has_vlen_fill_type) - /* Fill the buffer with VL datatype fill values */ - if(H5D_fill_refill_vl(&fb_info, fb_info.elmts_per_buf, io_info->dxpl_id) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, NULL, "can't refill fill value buffer") - } /* end if */ - else - HDmemset(chunk, 0, chunk_size); -#ifdef H5D_ISTORE_DEBUG - rdcc->ninits++; -#endif /* H5D_ISTORE_DEBUG */ - } /* end else */ - } /* end else */ - HDassert(found || chunk_size > 0); - - if(!found && rdcc->nslots > 0 && chunk_size <= rdcc->nbytes && - (!ent || !ent->locked)) { - /* - * Add the chunk to the cache only if the slot is not already locked. - * Preempt enough things from the cache to make room. - */ - if(ent) { -#ifdef H5D_ISTORE_DEBUG - HDputc('#', stderr); - HDfflush(stderr); -#endif - if(H5D_istore_preempt(io_info, ent, TRUE) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk from cache") - } /* end if */ - if(H5D_istore_prune(io_info, chunk_size) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk(s) from cache") - - /* Create a new entry */ - ent = H5FL_MALLOC(H5D_rdcc_ent_t); - ent->locked = 0; - ent->dirty = FALSE; - H5_ASSIGN_OVERFLOW(ent->chunk_size, chunk_size, size_t, uint32_t); - ent->alloc_size = chunk_size; - for(u = 0; u < layout->u.chunk.ndims; u++) - ent->offset[u] = io_info->store->chunk.offset[u]; - H5_ASSIGN_OVERFLOW(ent->rd_count, chunk_size, size_t, uint32_t); - H5_ASSIGN_OVERFLOW(ent->wr_count, chunk_size, size_t, uint32_t); - ent->chunk = (uint8_t *)chunk; - - /* Add it to the cache */ - HDassert(NULL == rdcc->slot[idx]); - rdcc->slot[idx] = ent; - ent->idx = idx; - rdcc->nbytes += chunk_size; - rdcc->nused++; - - /* Add it to the linked list */ - ent->next = NULL; - if(rdcc->tail) { - rdcc->tail->next = ent; - ent->prev = rdcc->tail; - rdcc->tail = ent; - } /* end if */ - else { - rdcc->head = rdcc->tail = ent; - ent->prev = NULL; - } /* end else */ - found = TRUE; - } else if(!found) { - /* - * The chunk is larger than the entire cache so we don't cache it. - * This is the reason all those arguments have to be repeated for the - * unlock function. - */ - ent = NULL; - idx = UINT_MAX; - } else { - /* - * The chunk is not at the beginning of the cache; move it backward - * by one slot. This is how we implement the LRU preemption - * algorithm. - */ - HDassert(ent); - if(ent->next) { - if(ent->next->next) - ent->next->next->prev = ent; - else - rdcc->tail = ent; - ent->next->prev = ent->prev; - if(ent->prev) - ent->prev->next = ent->next; - else - rdcc->head = ent->next; - ent->prev = ent->next; - ent->next = ent->next->next; - ent->prev->next = ent; - } /* end if */ - } /* end else */ - - /* Lock the chunk into the cache */ - if(ent) { - HDassert(!ent->locked); - ent->locked = TRUE; - chunk = ent->chunk; - } /* end if */ - - if(idx_hint) - *idx_hint = idx; - - /* Set return value */ - ret_value = chunk; - -done: - /* Release the fill buffer info, if it's been initialized */ - if(fb_info_init && H5D_fill_term(&fb_info) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, NULL, "Can't release fill buffer info") - - /* Release the chunk allocated, on error */ - if(!ret_value) - if(chunk) - chunk = H5D_istore_chunk_xfree(chunk, pline); - - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_lock() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_unlock - * - * Purpose: Unlocks a previously locked chunk. The LAYOUT, COMP, and - * OFFSET arguments should be the same as for H5F_rdcc_lock(). - * The DIRTY argument should be set to non-zero if the chunk has - * been modified since it was locked. The IDX_HINT argument is - * the returned index hint from the lock operation and BUF is - * the return value from the lock. - * - * The NACCESSED argument should be the number of bytes accessed - * for reading or writing (depending on the value of DIRTY). - * It's only purpose is to provide additional information to the - * preemption policy. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Robb Matzke - * Thursday, May 21, 1998 - * - *------------------------------------------------------------------------- - */ -herr_t -H5D_istore_unlock(const H5D_io_info_t *io_info, - hbool_t dirty, unsigned idx_hint, void *chunk, uint32_t naccessed) +H5D_istore_idx_init(const H5D_chk_idx_info_t *idx_info) { - const H5O_layout_t *layout=&(io_info->dset->shared->layout); /* Dataset layout */ - const H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk); - H5D_rdcc_ent_t *ent = NULL; - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI_NOINIT(H5D_istore_unlock) + herr_t ret_value = SUCCEED; /* Return value */ - assert(io_info); + FUNC_ENTER_NOAPI_NOINIT(H5D_istore_idx_init) - if(UINT_MAX == idx_hint) { - /* - * It's not in the cache, probably because it's too big. If it's - * dirty then flush it to disk. In any case, free the chunk. - * Note: we have to copy the layout and filter messages so we - * don't discard the `const' qualifier. - */ - if (dirty) { - H5D_rdcc_ent_t x; - - HDmemset(&x, 0, sizeof(x)); - x.dirty = TRUE; - HDassert(sizeof(x.offset[0]) == sizeof(io_info->store->chunk.offset[0])); - HDmemcpy(x.offset, io_info->store->chunk.offset, layout->u.chunk.ndims * sizeof(x.offset[0])); - HDassert(layout->u.chunk.size > 0); - x.chunk_size = layout->u.chunk.size; - H5_ASSIGN_OVERFLOW(x.alloc_size, x.chunk_size, uint32_t, size_t); - x.chunk = (uint8_t *)chunk; - - if(H5D_istore_flush_entry(io_info, &x, TRUE) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer") - } /* end if */ - else { - if(chunk) - chunk = H5D_istore_chunk_xfree(chunk, &(io_info->dset->shared->dcpl_cache.pline)); - } /* end else */ - } /* end if */ - else { - /* Sanity check */ - HDassert(idx_hint < rdcc->nslots); - HDassert(rdcc->slot[idx_hint]); - HDassert(rdcc->slot[idx_hint]->chunk == chunk); + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->layout); - /* - * It's in the cache so unlock it. - */ - ent = rdcc->slot[idx_hint]; - HDassert(ent->locked); - if(dirty) { - ent->dirty = TRUE; - ent->wr_count -= MIN(ent->wr_count, naccessed); - } /* end if */ - else - ent->rd_count -= MIN(ent->rd_count, naccessed); - ent->locked = FALSE; - } /* end else */ + /* Allocate the shared structure */ + if(H5D_istore_shared_create(idx_info->f, idx_info->layout) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_unlock() */ +} /* end H5D_istore_idx_init() */ /*------------------------------------------------------------------------- - * Function: H5D_istore_create + * Function: H5D_istore_idx_create * * Purpose: Creates a new indexed-storage B-tree and initializes the - * istore struct with information about the storage. The + * layout struct with information about the storage. The * struct should be immediately written to the object header. * - * This function must be called before passing ISTORE to any of + * This function must be called before passing LAYOUT to any of * the other indexed storage functions! * - * Return: Non-negative on success (with the ISTORE argument initialized + * Return: Non-negative on success (with the LAYOUT argument initialized * and ready to write to an object header). Negative on failure. * * Programmer: Robb Matzke @@ -2154,138 +895,70 @@ done: * *------------------------------------------------------------------------- */ -herr_t -H5D_istore_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout /*out */) +static herr_t +H5D_istore_idx_create(const H5D_chk_idx_info_t *idx_info) { - H5D_istore_ud0_t udata; -#ifndef NDEBUG - unsigned u; -#endif - herr_t ret_value = SUCCEED; /* Return value */ + H5D_istore_ud0_t udata; /* User data for B-tree callback */ + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(H5D_istore_create, FAIL) + FUNC_ENTER_NOAPI_NOINIT(H5D_istore_idx_create) /* Check args */ - HDassert(f); - HDassert(layout && H5D_CHUNKED == layout->type); - HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); -#ifndef NDEBUG - for(u = 0; u < layout->u.chunk.ndims; u++) - HDassert(layout->u.chunk.dim[u] > 0); -#endif + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->layout); /* Initialize "user" data for B-tree callbacks, etc. */ - udata.mesg = layout; + udata.mesg = idx_info->layout; - if(H5B_create(f, dxpl_id, H5B_ISTORE, &udata, &(layout->u.chunk.addr)/*out*/) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "can't create B-tree") + /* Create the v1 B-tree for the chunk index */ + if(H5B_create(idx_info->f, idx_info->dxpl_id, H5B_ISTORE, &udata, &(idx_info->layout->u.chunk.addr)/*out*/) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create B-tree") done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_create() */ +} /* end H5D_istore_idx_create() */ /*------------------------------------------------------------------------- - * Function: H5D_istore_allocated - * - * Purpose: Return the number of bytes allocated in the file for storage - * of raw data under the specified B-tree (ADDR is the address - * of the B-tree). + * Function: H5D_istore_idx_insert * - * Return: Success: Number of bytes stored in all chunks. + * Purpose: Create the chunk it if it doesn't exist, or reallocate the + * chunk if its size changed. * - * Failure: 0 + * Return: Non-negative on success/Negative on failure * * Programmer: Robb Matzke - * Wednesday, April 21, 1999 - * - *------------------------------------------------------------------------- - */ -hsize_t -H5D_istore_allocated(H5D_t *dset, hid_t dxpl_id) -{ - H5D_io_info_t io_info; /* Temporary I/O info object */ - const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ - H5D_rdcc_ent_t *ent; /*cache entry */ - H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ - H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ - H5D_istore_it_ud1_t udata; - hsize_t ret_value; /* Return value */ - - FUNC_ENTER_NOAPI(H5D_istore_allocated, 0) - - HDassert(dset); - - /* Fill the DXPL cache values for later use */ - if(H5D_get_dxpl_cache(dxpl_id,&dxpl_cache) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, 0, "can't fill dxpl cache") - - /* Construct dataset I/O info */ - H5D_BUILD_IO_INFO_WRT(&io_info, dset, dxpl_cache, dxpl_id, NULL, NULL); - - /* Search for cached chunks that haven't been written out */ - for(ent = rdcc->head; ent; ent = ent->next) { - /* Flush the chunk out to disk, to make certain the size is correct later */ - if (H5D_istore_flush_entry(&io_info, ent, FALSE) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, 0, "cannot flush indexed storage buffer") - } /* end for */ - - HDmemset(&udata, 0, sizeof udata); - udata.common.mesg = &dset->shared->layout; - if(H5B_iterate(dset->oloc.file, dxpl_id, H5B_ISTORE, H5D_istore_iter_allocated, dset->shared->layout.u.chunk.addr, &udata) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over chunk B-tree") - - /* Set return value */ - ret_value = udata.total_storage; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_allocated() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_chunkmap - * - * Purpose: obtain the chunk address and corresponding chunk index - * - * Return: Success: Non-negative on succeed. - * - * Failure: negative value - * - * Programmer: Kent Yang - * November 15, 2005 + * Thursday, May 21, 1998 * *------------------------------------------------------------------------- */ -herr_t -H5D_istore_chunkmap(const H5D_io_info_t *io_info, haddr_t chunk_addr[], - const hsize_t down_chunks[]) +static herr_t +H5D_istore_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) { - H5D_t *dset = io_info->dset; /* Local pointer to dataset info */ - H5D_istore_it_ud5_t udata; - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(H5D_istore_chunkmap, FAIL) + FUNC_ENTER_NOAPI_NOINIT(H5D_istore_idx_insert) - HDassert(dset); - - /* Set up user data for B-tree callback */ - HDmemset(&udata, 0, sizeof(udata)); - udata.common.mesg = &dset->shared->layout; - udata.down_chunks = down_chunks; - udata.chunk_addr = chunk_addr; + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->layout); + HDassert(udata); - /* Build mapping of chunk addresses and indices */ - if(H5B_iterate(dset->oloc.file, io_info->dxpl_id, H5B_ISTORE, H5D_istore_iter_chunkmap, dset->shared->layout.u.chunk.addr, &udata) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to iterate over chunk B-tree") + /* + * Create the chunk it if it doesn't exist, or reallocate the chunk if + * its size changed. + */ + if(H5B_insert(idx_info->f, idx_info->dxpl_id, H5B_ISTORE, idx_info->layout->u.chunk.addr, udata) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk") done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_chunkmap() */ +} /* H5D_istore_idx_insert() */ /*------------------------------------------------------------------------- - * Function: H5D_istore_get_addr + * Function: H5D_istore_idx_get_addr * * Purpose: Get the file address of a chunk if file space has been * assigned. Save the retrieved information in the udata @@ -2298,985 +971,169 @@ done: * *------------------------------------------------------------------------- */ -haddr_t -H5D_istore_get_addr(const H5D_io_info_t *io_info, H5D_istore_ud1_t *_udata) +static haddr_t +H5D_istore_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) { - H5D_istore_ud1_t tmp_udata; /* Information about a chunk */ - H5D_istore_ud1_t *udata; /* Pointer to information about a chunk */ haddr_t ret_value; /* Return value */ - FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_get_addr) - - HDassert(io_info); - HDassert(io_info->dset); - HDassert(io_info->dset->shared->layout.u.chunk.ndims > 0); - HDassert(io_info->store->chunk.offset); - - /* Check for udata struct to return */ - udata = (_udata != NULL ? _udata : &tmp_udata); - - /* Initialize the information about the chunk we are looking for */ - udata->common.mesg = &(io_info->dset->shared->layout); - udata->common.offset = io_info->store->chunk.offset; - udata->nbytes = 0; - udata->filter_mask = 0; - udata->addr = HADDR_UNDEF; - - /* Check for cached information */ - if(!H5D_istore_cinfo_cache_found(&io_info->dset->shared->cache.chunk.last, udata)) { - /* Go get the chunk information */ - if(H5B_find(io_info->dset->oloc.file, io_info->dxpl_id, H5B_ISTORE, io_info->dset->shared->layout.u.chunk.addr, udata) < 0) { - /* Note: don't push error on stack, leave that to next higher level, - * since many times the B-tree is searched in order to determine - * if a chunk exists in the B-tree or not. -QAK - */ + FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_idx_get_addr) + + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->layout); + HDassert(idx_info->layout->u.chunk.ndims > 0); + HDassert(udata); + + /* Go get the chunk information */ + if(H5B_find(idx_info->f, idx_info->dxpl_id, H5B_ISTORE, idx_info->layout->u.chunk.addr, udata) < 0) { + /* Note: don't push error on stack, leave that to next higher level, + * since many times the B-tree is searched in order to determine + * if a chunk exists in the B-tree or not. -QAK + */ #ifdef OLD_WAY - H5E_clear_stack(NULL); + H5E_clear_stack(NULL); - HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, HADDR_UNDEF, "Can't locate chunk info") + HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, HADDR_UNDEF, "Can't locate chunk info") #else /* OLD_WAY */ - /* Cache the fact that the chunk is not in the B-tree */ - H5D_istore_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, udata); - - HGOTO_DONE(HADDR_UNDEF) + HGOTO_DONE(HADDR_UNDEF) #endif /* OLD_WAY */ - } /* end if */ - - /* Cache the information retrieved */ - HDassert(H5F_addr_defined(udata->addr)); - H5D_istore_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, udata); - } /* end else */ + } /* end if */ /* Success! Set the return value */ ret_value = udata->addr; done: FUNC_LEAVE_NOAPI(ret_value) -} /* H5D_istore_get_addr() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_chunk_alloc - * - * Purpose: Allocate space for a chunk in memory. This routine allocates - * memory space for non-filtered chunks from a block free list - * and uses malloc()/free() for filtered chunks. - * - * Return: Pointer to memory for chunk on success/NULL on failure - * - * Programmer: Quincey Koziol - * April 22, 2004 - * - *------------------------------------------------------------------------- - */ -static void * -H5D_istore_chunk_alloc(size_t size, const H5O_pline_t *pline) -{ - void *ret_value = NULL; /* Return value */ - - FUNC_ENTER_NOAPI_NOINIT(H5D_istore_chunk_alloc) - - HDassert(size); - HDassert(pline); - - if(pline->nused > 0) - ret_value = H5MM_malloc(size); - else - ret_value = H5FL_BLK_MALLOC(chunk, size); - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5D_istore_chunk_alloc() */ +} /* H5D_istore_idx_get_addr() */ /*------------------------------------------------------------------------- - * Function: H5D_istore_chunk_xfree + * Function: H5D_istore_idx_iterate_cb * - * Purpose: Free space for a chunk in memory. This routine allocates - * memory space for non-filtered chunks from a block free list - * and uses malloc()/free() for filtered chunks. + * Purpose: Translate the B-tree specific chunk record into a generic + * form and make the callback to the generic chunk callback + * routine. * - * Return: NULL (never fails) + * Return: Success: Non-negative + * Failure: Negative * * Programmer: Quincey Koziol - * April 22, 2004 - * - *------------------------------------------------------------------------- - */ -static void * -H5D_istore_chunk_xfree(void *chk, const H5O_pline_t *pline) -{ - FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_chunk_xfree) - - HDassert(pline); - - if(chk) { - if(pline->nused > 0) - H5MM_xfree(chk); - else - (void)H5FL_BLK_FREE(chunk, chk); - } /* end if */ - - FUNC_LEAVE_NOAPI(NULL) -} /* H5D_istore_chunk_xfree() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_allocate - * - * Purpose: Allocate file space for all chunks that are not allocated yet. - * Return SUCCEED if all needed allocation succeed, otherwise - * FAIL. - * - * Return: Non-negative on success/Negative on failure - * - * Note: Current implementation relies on cache_size being 0, - * thus no chunk is cached and written to disk immediately - * when a chunk is unlocked (via H5F_istore_unlock) - * This should be changed to do a direct flush independent - * of the cache value. - * - * Programmer: Albert Cheng - * June 26, 1998 - * - *------------------------------------------------------------------------- - */ -herr_t -H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite) -{ - H5D_io_info_t io_info; /* Dataset I/O info */ - H5D_storage_t store; /* Dataset storage information */ - hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */ - size_t orig_chunk_size; /* Original size of chunk in bytes */ - unsigned filter_mask = 0; /* Filter mask for chunks that have them */ - const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */ - const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */ - const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */ - H5D_fill_value_t fill_status; /* The fill value status */ - hbool_t should_fill = FALSE; /* Whether fill values should be written */ - H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ - H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ -#ifdef H5_HAVE_PARALLEL - MPI_Comm mpi_comm = MPI_COMM_NULL; /* MPI communicator for file */ - int mpi_rank = (-1); /* This process's rank */ - int mpi_code; /* MPI return code */ - hbool_t blocks_written = FALSE; /* Flag to indicate that chunk was actually written */ - hbool_t using_mpi = FALSE; /* Flag to indicate that the file is being accessed with an MPI-capable file driver */ -#endif /* H5_HAVE_PARALLEL */ - hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */ - int space_ndims; /* Dataset's space rank */ - hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Dataset's dataspace dimensions */ - H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */ - hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */ - hid_t data_dxpl_id; /* DXPL ID to use for raw data I/O operations */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(H5D_istore_allocate, FAIL) - - /* Check args */ - HDassert(dset && H5D_CHUNKED == layout->type); - HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); - HDassert(H5F_addr_defined(layout->u.chunk.addr)); - HDassert(TRUE == H5P_isa_class(dxpl_id, H5P_DATASET_XFER)); - - /* Retrieve the dataset dimensions */ - if((space_ndims = H5S_get_simple_extent_dims(dset->shared->space, space_dim, NULL)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple dataspace info") - space_dim[space_ndims] = layout->u.chunk.dim[space_ndims]; - -#ifdef H5_HAVE_PARALLEL - /* Retrieve MPI parameters */ - if(IS_H5FD_MPI(dset->oloc.file)) { - /* Get the MPI communicator */ - if(MPI_COMM_NULL == (mpi_comm = H5F_mpi_get_comm(dset->oloc.file))) - HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI communicator") - - /* Get the MPI rank */ - if((mpi_rank = H5F_mpi_get_rank(dset->oloc.file)) < 0) - HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI rank") - - /* Set the MPI-capable file driver flag */ - using_mpi = TRUE; - - /* Use the internal "independent" DXPL */ - data_dxpl_id = H5AC_ind_dxpl_id; - } /* end if */ - else { -#endif /* H5_HAVE_PARALLEL */ - /* Use the DXPL we were given */ - data_dxpl_id = dxpl_id; -#ifdef H5_HAVE_PARALLEL - } /* end else */ -#endif /* H5_HAVE_PARALLEL */ - - /* Fill the DXPL cache values for later use */ - if(H5D_get_dxpl_cache(data_dxpl_id, &dxpl_cache) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") - - /* Get original chunk size */ - H5_ASSIGN_OVERFLOW(orig_chunk_size, layout->u.chunk.size, uint32_t, size_t); - - /* Check the dataset's fill-value status */ - if(H5P_is_fill_value_defined(fill, &fill_status) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't tell if fill value defined") - - /* If we are filling the dataset on allocation or "if set" and - * the fill value _is_ set, _and_ we are not overwriting the new blocks, - * or if there are any pipeline filters defined, - * set the "should fill" flag - */ - if((!full_overwrite && (fill->fill_time == H5D_FILL_TIME_ALLOC || - (fill->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED))) - || pline->nused > 0) - should_fill = TRUE; - - /* Check if fill values should be written to chunks */ - if(should_fill) { - /* Initialize the fill value buffer */ - /* (delay allocating fill buffer for VL datatypes until refilling) */ - /* (casting away const OK - QAK) */ - if(H5D_fill_init(&fb_info, NULL, (hbool_t)(pline->nused > 0), - (H5MM_allocate_t)H5D_istore_chunk_alloc, (void *)pline, - (H5MM_free_t)H5D_istore_chunk_xfree, (void *)pline, - &dset->shared->dcpl_cache.fill, dset->shared->type, - dset->shared->type_id, (size_t)0, orig_chunk_size, data_dxpl_id) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info") - fb_info_init = TRUE; - - /* Check if there are filters which need to be applied to the chunk */ - /* (only do this in advance when the chunk info can be re-used (i.e. - * it doesn't contain any non-default VL datatype fill values) - */ - if(!fb_info.has_vlen_fill_type && pline->nused > 0) { - size_t buf_size = orig_chunk_size; - - /* Push the chunk through the filters */ - if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &orig_chunk_size, &buf_size, &fb_info.fill_buf) < 0) - HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed") - } /* end if */ - } /* end if */ - - /* Set up dataset I/O info */ - store.chunk.offset = chunk_offset; - H5D_BUILD_IO_INFO_WRT(&io_info, dset, dxpl_cache, data_dxpl_id, &store, NULL); - - /* Reset the chunk offset indices */ - HDmemset(chunk_offset, 0, (layout->u.chunk.ndims * sizeof(chunk_offset[0]))); - - /* Loop over all chunks */ - carry = FALSE; - while(!carry) { - int i; /* Local index variable */ - - /* Check if the chunk exists yet on disk */ - if(!H5F_addr_defined(H5D_istore_get_addr(&io_info, NULL))) { - const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Raw data chunk cache */ - H5D_rdcc_ent_t *ent; /* Cache entry */ - hbool_t chunk_exists; /* Flag to indicate whether a chunk exists already */ - unsigned u; /* Local index variable */ - - /* Didn't find the chunk on disk */ - chunk_exists = FALSE; - - /* Look for chunk in cache */ - for(ent = rdcc->head; ent && !chunk_exists; ent = ent->next) { - /* Assume a match */ - chunk_exists = TRUE; - for(u = 0; u < layout->u.chunk.ndims; u++) - if(ent->offset[u] != chunk_offset[u]) { - chunk_exists = FALSE; /* Reset if no match */ - break; - } /* end if */ - } /* end for */ - - /* Chunk wasn't in cache either, create it now */ - if(!chunk_exists) { - H5D_istore_ud1_t udata; /* B-tree pass-through for creating chunk */ - size_t chunk_size; /* Size of chunk in bytes, possibly filtered */ - - /* Check for VL datatype & non-default fill value */ - if(fb_info_init && fb_info.has_vlen_fill_type) { - /* Sanity check */ - HDassert(should_fill); - - /* Fill the buffer with VL datatype fill values */ - if(H5D_fill_refill_vl(&fb_info, fb_info.elmts_per_buf, data_dxpl_id) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "can't refill fill value buffer") - - /* Check if there are filters which need to be applied to the chunk */ - if(pline->nused > 0) { - size_t buf_size = orig_chunk_size; - size_t nbytes = fb_info.fill_buf_size; - - /* Push the chunk through the filters */ - if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &nbytes, &buf_size, &fb_info.fill_buf) < 0) - HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed") - - /* Keep the number of bytes the chunk turned in to */ - chunk_size = nbytes; - } /* end if */ - else - H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t); - } /* end if */ - else - chunk_size = orig_chunk_size; - - /* Initialize the chunk information */ - udata.common.mesg = layout; - udata.common.offset = chunk_offset; - H5_ASSIGN_OVERFLOW(udata.nbytes, chunk_size, size_t, uint32_t); - udata.filter_mask = filter_mask; - udata.addr = HADDR_UNDEF; - - /* Allocate the chunk with all processes */ - if(H5B_insert(dset->oloc.file, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, &udata) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk") - - /* Check if fill values should be written to chunks */ - if(should_fill) { - /* Sanity check */ - HDassert(fb_info_init); - -#ifdef H5_HAVE_PARALLEL - /* Check if this file is accessed with an MPI-capable file driver */ - if(using_mpi) { - /* Write the chunks out from only one process */ - /* !! Use the internal "independent" DXPL!! -QAK */ - if(H5_PAR_META_WRITE == mpi_rank) { - HDassert(udata.nbytes == chunk_size); - if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") - } /* end if */ - - /* Indicate that blocks are being written */ - blocks_written = TRUE; - } /* end if */ - else { -#endif /* H5_HAVE_PARALLEL */ - HDassert(udata.nbytes == chunk_size); - if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") -#ifdef H5_HAVE_PARALLEL - } /* end else */ -#endif /* H5_HAVE_PARALLEL */ - } /* end if */ - - /* Release the fill buffer if we need to re-allocate it each time */ - if(fb_info_init && fb_info.has_vlen_fill_type && pline->nused > 0) - H5D_fill_release(&fb_info); - } /* end if */ - } /* end if */ - - /* Increment indices */ - carry = TRUE; - for(i = (int)(space_ndims - 1); i >= 0; --i) { - chunk_offset[i] += layout->u.chunk.dim[i]; - if(chunk_offset[i] >= space_dim[i]) - chunk_offset[i] = 0; - else { - carry = FALSE; - break; - } /* end else */ - } /* end for */ - } /* end while */ - -#ifdef H5_HAVE_PARALLEL - /* Only need to block at the barrier if we actually initialized a chunk */ - /* using an MPI-capable file driver */ - if(using_mpi && blocks_written) { - /* Wait at barrier to avoid race conditions where some processes are - * still writing out chunks and other processes race ahead to read - * them in, getting bogus data. - */ - if(MPI_SUCCESS != (mpi_code = MPI_Barrier(mpi_comm))) - HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code) - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - - /* Reset any cached chunk info for this dataset */ - H5D_istore_cinfo_cache_reset(&dset->shared->cache.chunk.last); - -done: - /* Release the fill buffer info, if it's been initialized */ - if(fb_info_init && H5D_fill_term(&fb_info) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info") - - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_allocate() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_prune_check - * - * Purpose: Search for chunks that are no longer necessary in the B-tree. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu - * March 26, 2002 + * Tuesday, May 20, 2008 * *------------------------------------------------------------------------- */ /* ARGSUSED */ static int -H5D_istore_prune_check(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, - const void *_lt_key, haddr_t UNUSED addr, const void UNUSED *_rt_key, +H5D_istore_idx_iterate_cb(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, + const void *_lt_key, haddr_t addr, const void UNUSED *_rt_key, void *_udata) { - H5D_istore_it_ud3_t *udata = (H5D_istore_it_ud3_t *)_udata; - const H5D_istore_key_t *lt_key = (const H5D_istore_key_t *)_lt_key; - unsigned rank; /*current # of dimensions */ - unsigned u; - int ret_value = H5_ITER_CONT; /* Return value */ - - FUNC_ENTER_NOAPI_NOINIT(H5D_istore_prune_check) - - /* Figure out what chunks are no longer in use for the specified extent and release them */ - rank = udata->common.mesg->u.chunk.ndims - 1; - for(u = 0; u < rank; u++) - /* The LT_KEY is the left key (the one that describes the chunk). It points to a chunk of - * storage that contains the beginning of the logical address space represented by UDATA. - */ - if((hsize_t)lt_key->offset[u] > udata->dims[u]) { - H5D_istore_sl_ck_t *sl_node; /* Skip list node for chunk to remove */ - - /* Allocate space for the shared structure */ - if(NULL == (sl_node = H5FL_MALLOC(H5D_istore_sl_ck_t))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, H5_ITER_ERROR, "memory allocation failed for shared B-tree info") + H5D_istore_it_ud_t *udata = (H5D_istore_it_ud_t *)_udata; /* User data */ + const H5D_istore_key_t *lt_key = (const H5D_istore_key_t *)_lt_key; /* B-tree key for chunk */ + H5D_chunk_rec_t chunk_rec; /* Generic chunk record for callback */ + int ret_value; /* Return value */ - /* Calculate the index of this chunk */ - if(H5V_chunk_index(rank, lt_key->offset, udata->common.mesg->u.chunk.dim, udata->down_chunks, &sl_node->index) < 0) { - H5FL_FREE(H5D_istore_sl_ck_t, sl_node); - HGOTO_ERROR(H5E_IO, H5E_BADRANGE, H5_ITER_ERROR, "can't get chunk index") - } /* end if */ + FUNC_ENTER_NOAPI_NOINIT(H5D_istore_idx_iterate_cb) - /* Store the key for the chunk */ - sl_node->key = *lt_key; + /* Sanity check for memcpy() */ + HDassert(offsetof(H5D_chunk_rec_t, nbytes) == offsetof(H5D_istore_key_t, nbytes)); + HDassert(offsetof(H5D_chunk_rec_t, offset) == offsetof(H5D_istore_key_t, offset)); + HDassert(offsetof(H5D_chunk_rec_t, filter_mask) == offsetof(H5D_istore_key_t, filter_mask)); - /* Insert the chunk description in the skip list */ - if(H5SL_insert(udata->outside, sl_node, &sl_node->index) < 0) { - H5FL_FREE(H5D_istore_sl_ck_t, sl_node); - HGOTO_ERROR(H5E_IO, H5E_CANTINSERT, H5_ITER_ERROR, "can't insert chunk into skip list") - } /* end if */ + /* Compose generic chunk record for callback */ + HDmemcpy(&chunk_rec, lt_key, sizeof(*lt_key)); + chunk_rec.chunk_addr = addr; - /* Break out of loop, we know the chunk is outside the current dimensions */ - break; - } /* end if */ + /* Make "generic chunk" callback */ + if((ret_value = (udata->cb)(&chunk_rec, udata->udata)) < 0) + HERROR(H5E_DATASET, H5E_CALLBACK, "failure in generic chunk iterator callback"); -done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_prune_check() */ +} /* H5D_istore_idx_iterate_cb() */ /*------------------------------------------------------------------------- - * Function: H5D_istore_prune_remove + * Function: H5D_istore_idx_iterate * - * Purpose: Destroy a skip list node for "pruning" chunks, also removes - * the chunk from the B-tree. + * Purpose: Iterate over the chunks in the B-tree index, making a callback + * for each one. * * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol, koziol@hdfgroup.org - * May 3, 2007 + * Programmer: Quincey Koziol + * Tuesday, May 20, 2008 * *------------------------------------------------------------------------- */ -static herr_t -H5D_istore_prune_remove(void *item, void UNUSED *key, void *op_data) +static int +H5D_istore_idx_iterate(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_cb_func_t chunk_cb, void *chunk_udata) { - H5D_istore_sl_ck_t *sl_node = (H5D_istore_sl_ck_t *)item; /* Temporary pointer to chunk to remove */ - H5D_istore_sl_rm_t *rm_info = (H5D_istore_sl_rm_t *)op_data; /* Information needed for removing chunk from B-tree */ - H5D_istore_ud0_t bt_udata; /* User data for B-tree removal routine */ - herr_t ret_value = H5_ITER_CONT; /* Return value */ + H5D_istore_it_ud_t udata; /* User data for B-tree iterator callback */ + int ret_value; /* Return value */ - FUNC_ENTER_NOAPI_NOINIT(H5D_istore_prune_remove) + FUNC_ENTER_NOAPI_NOINIT(H5D_istore_idx_iterate) - /* Sanity checks */ - HDassert(sl_node); - HDassert(rm_info); - - /* Initialize the user data for the B-tree callback */ - bt_udata.mesg = rm_info->mesg; - bt_udata.offset = sl_node->key.offset; - - /* Remove */ - if(H5B_remove(rm_info->f, rm_info->dxpl_id, H5B_ISTORE, rm_info->mesg->u.chunk.addr, &bt_udata) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTINIT, H5_ITER_ERROR, "unable to remove entry") - - /* Free the chunk checking node */ - H5FL_FREE(H5D_istore_sl_ck_t, sl_node); - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5D_istore_prune_remove() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_prune_by_extent - * - * Purpose: This function searches for chunks that are no longer necessary both in the - * raw data cache and in the B-tree. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu - * Algorithm: Robb Matzke - * March 27, 2002 - * - * The algorithm is: - * - * For chunks that are no longer necessary: - * - * 1. Search in the raw data cache for each chunk - * 2. If found then preempt it from the cache - * 3. Search in the B-tree for each chunk - * 4. If found then remove it from the B-tree and deallocate file storage for the chunk - * - * This example shows a 2d dataset of 90x90 with a chunk size of 20x20. - * - * - * 0 20 40 60 80 90 100 - * 0 +---------+---------+---------+---------+-----+...+ - * |:::::X:::::::::::::: : : | : - * |:::::::X:::::::::::: : : | : Key - * |::::::::::X::::::::: : : | : -------- - * |::::::::::::X::::::: : : | : +-+ Dataset - * 20+::::::::::::::::::::.........:.........:.....+...: | | Extent - * | :::::X::::: : : | : +-+ - * | ::::::::::: : : | : - * | ::::::::::: : : | : ... Chunk - * | :::::::X::: : : | : : : Boundary - * 40+.........:::::::::::.........:.........:.....+...: :.: - * | : : : : | : - * | : : : : | : ... Allocated - * | : : : : | : ::: & Filled - * | : : : : | : ::: Chunk - * 60+.........:.........:.........:.........:.....+...: - * | : :::::::X::: : | : X Element - * | : ::::::::::: : | : Written - * | : ::::::::::: : | : - * | : ::::::::::: : | : - * 80+.........:.........:::::::::::.........:.....+...: O Fill Val - * | : : ::::::::::: | : Explicitly - * | : : ::::::X:::: | : Written - * 90+---------+---------+---------+---------+-----+ : - * : : : ::::::::::: : - * 100:.........:.........:.........:::::::::::.........: - * - * - * We have 25 total chunks for this dataset, 5 of which have space - * allocated in the file because they were written to one or more - * elements. These five chunks (and only these five) also have entries in - * the storage B-tree for this dataset. - * - * Now lets say we want to shrink the dataset down to 70x70: - * - * - * 0 20 40 60 70 80 90 100 - * 0 +---------+---------+---------+----+----+-----+...+ - * |:::::X:::::::::::::: : | : | : - * |:::::::X:::::::::::: : | : | : Key - * |::::::::::X::::::::: : | : | : -------- - * |::::::::::::X::::::: : | : | : +-+ Dataset - * 20+::::::::::::::::::::.........:....+....:.....|...: | | Extent - * | :::::X::::: : | : | : +-+ - * | ::::::::::: : | : | : - * | ::::::::::: : | : | : ... Chunk - * | :::::::X::: : | : | : : : Boundary - * 40+.........:::::::::::.........:....+....:.....|...: :.: - * | : : : | : | : - * | : : : | : | : ... Allocated - * | : : : | : | : ::: & Filled - * | : : : | : | : ::: Chunk - * 60+.........:.........:.........:....+....:.....|...: - * | : :::::::X::: | : | : X Element - * | : ::::::::::: | : | : Written - * +---------+---------+---------+----+ : | : - * | : ::::::::::: : | : - * 80+.........:.........:::::::::X:.........:.....|...: O Fill Val - * | : : ::::::::::: | : Explicitly - * | : : ::::::X:::: | : Written - * 90+---------+---------+---------+---------+-----+ : - * : : : ::::::::::: : - * 100:.........:.........:.........:::::::::::.........: - * - * - * That means that the nine chunks along the bottom and right side should - * no longer exist. Of those nine chunks, (0,80), (20,80), (40,80), - * (60,80), (80,80), (80,60), (80,40), (80,20), and (80,0), one is actually allocated - * that needs to be released. - * To release the chunks, we traverse the B-tree to obtain a list of unused - * allocated chunks, and then call H5B_remove() for each chunk. - * - *------------------------------------------------------------------------- - */ -herr_t -H5D_istore_prune_by_extent(const H5D_io_info_t *io_info, const hsize_t *old_dims) -{ - H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */ - const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ - H5D_rdcc_ent_t *ent = NULL, *next = NULL; /*cache entry */ - H5D_istore_it_ud3_t udata; /*B-tree pass-through */ - H5D_istore_sl_rm_t rm_info; /* User data for skip list destroy callback */ - hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */ - hsize_t chunks[H5O_LAYOUT_NDIMS]; /*current number of chunks in each dimension */ - hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */ - unsigned rank; /* Current # of dimensions */ - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(H5D_istore_prune_by_extent, FAIL) + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->layout); + HDassert(chunk_cb); + HDassert(chunk_udata); - /* Check args */ - HDassert(io_info); - HDassert(dset && H5D_CHUNKED == dset->shared->layout.type); - HDassert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS); - HDassert(H5F_addr_defined(dset->shared->layout.u.chunk.addr)); - - /* Go get the rank & dimensions */ - rank = dset->shared->layout.u.chunk.ndims - 1; - if(H5S_get_simple_extent_dims(dset->shared->space, curr_dims, NULL) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions") - - /*------------------------------------------------------------------------- - * Figure out what chunks are no longer in use for the specified extent - * and release them from the linked list raw data cache - *------------------------------------------------------------------------- - */ - for(ent = rdcc->head; ent; ent = next) { - /* Get pointer to next extry in cache, in case this one is evicted */ - next = ent->next; - - /* Check for chunk offset outside of new dimensions */ - for(u = 0; u < rank; u++) - if((hsize_t)ent->offset[u] > curr_dims[u]) { -#ifdef H5D_ISTORE_DEBUG - HDfputs("cache:remove:[", stderr); - for(u = 0; u < rank; u++) - HDfprintf(stderr, "%s%Hd", (u ? ", " : ""), ent->offset[u]); - HDfputs("]\n", stderr); -#endif - - /* Preempt the entry from the cache, but do not flush it to disk */ - if(H5D_istore_preempt(io_info, ent, FALSE) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to preempt chunk") - - /* Break out of loop, chunk is evicted */ - break; - } /* end if */ - } /* end for */ - - /* Round up to the next integer # of chunks, to accomodate partial chunks */ - for(u = 0; u < rank; u++) - chunks[u] = ((old_dims[u] + dset->shared->layout.u.chunk.dim[u]) - 1) / dset->shared->layout.u.chunk.dim[u]; - - /* Get the "down" sizes for each dimension */ - if(H5V_array_down(rank, chunks, down_chunks) < 0) - HGOTO_ERROR(H5E_IO, H5E_BADVALUE, FAIL, "can't compute 'down' sizes") - - /* Initialize the user data for the iteration */ + /* Initialize userdata */ HDmemset(&udata, 0, sizeof udata); - udata.common.mesg = &dset->shared->layout; - udata.dims = curr_dims; - udata.down_chunks = down_chunks; - - /* Initialize the skip list that will hold the chunks outside the dimensions */ - if(NULL == (udata.outside = H5SL_create(H5SL_TYPE_HSIZE, 0.5, (size_t)H5D_ISTORE_DEFAULT_SKIPLIST_HEIGHT))) - HGOTO_ERROR(H5E_IO, H5E_CANTCREATE, FAIL, "can't create skip list for chunks outside new dimensions") - - /* Iterate over chunks in dataset, creating a list of chunks which are - * now completely outside the dataset's dimensions. - * - * Note: It would be more efficient to create a new B-tree routine that - * performed a "remove if" operation on the B-tree and remove all - * the chunks that were outside the dataset's dimensions through - * that routine. However, that's a fair amount of work and it's - * unlikely that shrinking a dataset is a performance critical - * operation. - QAK - */ - if(H5B_iterate(dset->oloc.file, io_info->dxpl_id, H5B_ISTORE, H5D_istore_prune_check, dset->shared->layout.u.chunk.addr, &udata) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to iterate over B-tree") - - /* Set up user data for skip list callback */ - rm_info.f = dset->oloc.file; - rm_info.dxpl_id = io_info->dxpl_id; - rm_info.mesg = &dset->shared->layout; + udata.common.mesg = idx_info->layout; + udata.cb = chunk_cb; + udata.udata = chunk_udata; - /* Destroy the skip list, deleting the chunks in the callback */ - H5SL_destroy(udata.outside, H5D_istore_prune_remove, &rm_info); + /* Iterate over existing chunks */ + if((ret_value = H5B_iterate(idx_info->f, idx_info->dxpl_id, H5B_ISTORE, idx_info->layout->u.chunk.addr, H5D_istore_idx_iterate_cb, &udata)) < 0) + HERROR(H5E_DATASET, H5E_BADITER, "unable to iterate over chunk B-tree"); - /* Reset any cached chunk info for this dataset */ - H5D_istore_cinfo_cache_reset(&dset->shared->cache.chunk.last); - -done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_prune_by_extent() */ +} /* end H5D_istore_idx_iterate() */ /*------------------------------------------------------------------------- - * Function: H5D_istore_remove + * Function: H5D_istore_idx_remove * - * Purpose: Removes chunks that are no longer necessary in the B-tree. + * Purpose: Remove chunk from v1 B-tree index. * * Return: Non-negative on success/Negative on failure * - * Programmer: Robb Matzke - * Pedro Vicente, pvn@ncsa.uiuc.edu - * March 28, 2002 + * Programmer: Quincey Koziol + * Thursday, May 22, 2008 * *------------------------------------------------------------------------- */ -/* ARGSUSED */ -static H5B_ins_t -H5D_istore_remove(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key /*in,out */ , - hbool_t *lt_key_changed /*out */ , - void UNUSED * _udata /*in,out */ , - void UNUSED * _rt_key /*in,out */ , - hbool_t *rt_key_changed /*out */ ) +static herr_t +H5D_istore_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata) { - H5D_istore_key_t *lt_key = (H5D_istore_key_t *)_lt_key; - H5B_ins_t ret_value = H5B_INS_REMOVE; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI_NOINIT(H5D_istore_remove) - - /* Remove raw data chunk from file */ - H5_CHECK_OVERFLOW(lt_key->nbytes, uint32_t, hsize_t); - if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, addr, (hsize_t)lt_key->nbytes) < 0) - HGOTO_ERROR(H5E_STORAGE, H5E_CANTFREE, H5B_INS_ERROR, "unable to free chunk") + FUNC_ENTER_NOAPI_NOINIT(H5D_istore_idx_remove) - /* Mark keys as unchanged */ - *lt_key_changed = FALSE; - *rt_key_changed = FALSE; - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_remove() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_initialize_by_extent - * - * Purpose: This function searches for chunks that have to be initialized with the fill - * value both in the raw data cache and in the B-tree. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu - * April 4, 2002 - * - * Comments: - * - * (See the example of H5D_istore_prune_by_extent) - * Next, there are seven chunks where the database extent boundary is - * within the chunk. We find those seven just like we did with the previous nine. - * Fot the ones that are allocated we initialize the part that lies outside the boundary - * with the fill value. - * - *------------------------------------------------------------------------- - */ -herr_t -H5D_istore_initialize_by_extent(H5D_io_info_t *io_info) -{ - H5S_t *space_chunk = NULL; /* Dataspace for a chunk */ - H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */ - const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset's layout */ - H5D_storage_t store; /* Dataset storage information */ - H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */ - hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */ - hsize_t dset_dims[H5O_LAYOUT_NDIMS]; /* Current dataspace dimensions */ - hsize_t chunk_dims[H5O_LAYOUT_NDIMS]; /* Current chunk dimensions */ - hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Logical location of the chunks */ - hsize_t hyper_start[H5O_LAYOUT_NDIMS]; /* Starting location of hyperslab */ - hsize_t nchunks[H5O_LAYOUT_NDIMS]; /* Current number of chunks in each dimension */ - hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */ - uint32_t elmts_per_chunk; /* Elements in a chunk */ - int srank; /* # of chunk dimensions (signed) */ - unsigned rank; /* # of chunk dimensions */ - hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */ - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(H5D_istore_initialize_by_extent, FAIL) + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->layout); + HDassert(udata); - /* Check args */ - HDassert(io_info); - HDassert(io_info->dset && H5D_CHUNKED == layout->type); - HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); - HDassert(H5F_addr_defined(layout->u.chunk.addr)); - - /* Go get the rank & dimensions */ - if((srank = H5S_get_simple_extent_dims(dset->shared->space, dset_dims, NULL)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions") - H5_ASSIGN_OVERFLOW(rank, srank, int, unsigned); - - /* Set size of lowest chunk dimension (the dataset element size) */ - dset_dims[rank] = layout->u.chunk.dim[rank]; - - /* Compute the number of chunks in dataset & the # of elements in a chunk */ - /* (round up to the next integer # of chunks, to accomodate partial chunks) */ - for(u = 0, elmts_per_chunk = 1; u < rank; u++) { - nchunks[u] = ((dset_dims[u] - 1) / layout->u.chunk.dim[u]) + 1; - elmts_per_chunk *= layout->u.chunk.dim[u]; - } /* end for */ - - /* Get the "down" sizes for each dimension */ - if(H5V_array_down(rank, nchunks, down_chunks) < 0) - HGOTO_ERROR(H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes") - - /* Create a data space for a chunk & set the extent */ - for(u = 0; u < rank; u++) - chunk_dims[u] = layout->u.chunk.dim[u]; - if(NULL == (space_chunk = H5S_create_simple(rank, chunk_dims, NULL))) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace") - - /* Point to local dataset storage info */ - HDassert(io_info->store == NULL); /* Make certain we aren't blowing anything away */ - io_info->store = &store; - - /* Reset hyperslab start array */ - HDmemset(hyper_start, 0, sizeof(hyper_start)); - - /* Initialize current chunk offset to the origin (0, 0, 0, ...) */ - HDmemset(chunk_offset, 0, sizeof(chunk_offset)); - - /* Loop over all chunks */ - carry = FALSE; - while(!carry) { - hbool_t found; /* Initialize this entry */ - int i; /* Local index variable */ - - /* - * Figure out what chunks have to be initialized. These are the chunks where the dataspace - * extent boundary is within the chunk - */ - found = FALSE; - for(u = 0; u < rank; u++) - if((chunk_offset[u] + layout->u.chunk.dim[u]) > dset_dims[u]) { - found = TRUE; - break; - } /* end if */ - - if(found) { - H5S_sel_iter_t chunk_iter; /* Memory selection iteration info */ - hssize_t nelmts; /* Number of data elements */ - hsize_t count[H5O_LAYOUT_NDIMS]; /* Element count of hyperslab */ - uint8_t *chunk; /* The file chunk */ - unsigned idx_hint; /* Which chunk we're dealing with */ - hsize_t bytes_accessed; /* Bytes accessed in chunk */ - - /* Initialize the fill value buffer, if necessary */ - if(!fb_info_init) { - H5_CHECK_OVERFLOW(elmts_per_chunk, uint32_t, size_t); - if(H5D_fill_init(&fb_info, NULL, FALSE, NULL, NULL, NULL, NULL, - &dset->shared->dcpl_cache.fill, - dset->shared->type, dset->shared->type_id, (size_t)elmts_per_chunk, - io_info->dxpl_cache->max_temp_buf, io_info->dxpl_id) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info") - fb_info_init = TRUE; - } /* end if */ - - /* Compute the # of elements to leave with existing value, in each dimension */ - for(u = 0; u < rank; u++) - count[u] = MIN(layout->u.chunk.dim[u], (dset_dims[u] - chunk_offset[u])); - -#ifdef H5D_ISTORE_DEBUG - HDfputs("cache:initialize:offset:[", stdout); - for(u = 0; u < rank; u++) - HDfprintf(stdout, "%s%Hd", u ? ", " : "", chunk_offset[u]); - HDfputs("]", stdout); - HDfputs(":count:[", stdout); - for(u = 0; u < rank; u++) - HDfprintf(stdout, "%s%Hd", u ? ", " : "", count[u]); - HDfputs("]\n", stdout); -#endif - - /* Select all elements in chunk, to begin with */ - if(H5S_select_all(space_chunk, TRUE) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to select space") - - /* "Subtract out" the elements to keep */ - if(H5S_select_hyperslab(space_chunk, H5S_SELECT_NOTB, hyper_start, NULL, count, NULL) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to select hyperslab") - - /* Calculate the index of this chunk */ - if(H5V_chunk_index(rank, chunk_offset, layout->u.chunk.dim, down_chunks, &store.chunk.index) < 0) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") - - /* Lock the chunk into the cache, to get a pointer to the chunk buffer */ - store.chunk.offset = chunk_offset; - if(NULL == (chunk = (uint8_t *)H5D_istore_lock(io_info, NULL, FALSE, &idx_hint))) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk") - - - /* Fill the selection in the memory buffer */ - /* Use the size of the elements in the chunk directly instead of */ - /* relying on the fill.size, which might be set to 0 if there is */ - /* no fill-value defined for the dataset -QAK */ - - /* Get the number of elements in the selection */ - nelmts = H5S_GET_SELECT_NPOINTS(space_chunk); - HDassert(nelmts >= 0); - H5_CHECK_OVERFLOW(nelmts, hssize_t, size_t); - - /* Check for VL datatype & non-default fill value */ - if(fb_info.has_vlen_fill_type) - /* Re-fill the buffer to use for this I/O operation */ - if(H5D_fill_refill_vl(&fb_info, (size_t)nelmts, io_info->dxpl_id) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "can't refill fill value buffer") - - /* Create a selection iterator for scattering the elements to memory buffer */ - H5_CHECK_OVERFLOW(layout->u.chunk.dim[rank], uint32_t, size_t); - if(H5S_select_iter_init(&chunk_iter, space_chunk, (size_t)layout->u.chunk.dim[rank]) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize chunk selection information") - - /* Scatter the data into memory */ - if(H5D_scatter_mem(fb_info.fill_buf, space_chunk, &chunk_iter, (size_t)nelmts, io_info->dxpl_cache, chunk/*out*/) < 0) { - H5S_SELECT_ITER_RELEASE(&chunk_iter); - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "scatter failed") - } /* end if */ - - /* Release the selection iterator */ - if(H5S_SELECT_ITER_RELEASE(&chunk_iter) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") - - - /* The number of bytes accessed in the chunk */ - /* (i.e. the bytes replaced with fill values) */ - bytes_accessed = nelmts * layout->u.chunk.dim[rank]; - - /* Release lock on chunk */ - H5_CHECK_OVERFLOW(bytes_accessed, hsize_t, uint32_t); - if(H5D_istore_unlock(io_info, TRUE, idx_hint, chunk, (uint32_t)bytes_accessed) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk") - } /* end if */ - - /* Increment indices */ - carry = TRUE; - for(i = (int)(rank - 1); i >= 0; --i) { - chunk_offset[i] += layout->u.chunk.dim[i]; - if(chunk_offset[i] >= dset_dims[i]) - chunk_offset[i] = 0; - else { - carry = FALSE; - break; - } /* end else */ - } /* end for */ - } /* end while */ + /* Remove the chunk from the v1 B-tree index and release the space for the + * chunk (in the B-tree callback). + */ + if(H5B_remove(idx_info->f, idx_info->dxpl_id, H5B_ISTORE, idx_info->layout->u.chunk.addr, udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to remove chunk entry") done: - /* Release resources */ - if(space_chunk && H5S_close(space_chunk) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release dataspace") - if(fb_info_init && H5D_fill_term(&fb_info) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info") - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_initialize_by_extent() */ +} /* H5D_istore_idx_remove() */ /*------------------------------------------------------------------------- - * Function: H5D_istore_delete + * Function: H5D_istore_idx_delete * - * Purpose: Delete raw data storage for entire dataset (i.e. all chunks) + * Purpose: Delete v1 B-tree index and raw data storage for entire dataset + * (i.e. all chunks) * * Return: Success: Non-negative * Failure: negative @@ -3286,365 +1143,123 @@ done: * *------------------------------------------------------------------------- */ -herr_t -H5D_istore_delete(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout) +static herr_t +H5D_istore_idx_delete(const H5D_chk_idx_info_t *idx_info) { - herr_t ret_value=SUCCEED; /* Return value */ + H5O_layout_t tmp_layout; /* Local copy of layout info */ + H5D_istore_ud0_t udata; /* User data for B-tree iterator call */ + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(H5D_istore_delete, FAIL) + FUNC_ENTER_NOAPI_NOINIT(H5D_istore_idx_delete) - /* Check if the B-tree has been created in the file */ - if(H5F_addr_defined(layout->u.chunk.addr)) { - H5O_layout_t tmp_layout=*layout;/* Local copy of layout info */ - H5D_istore_ud0_t udata; /* User data for B-tree iterator call */ + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->layout); + HDassert(H5F_addr_defined(idx_info->layout->u.chunk.addr)); - /* Set up user data for B-tree deletion */ - HDmemset(&udata, 0, sizeof udata); - udata.mesg = &tmp_layout; + /* Set up user data for B-tree deletion */ + HDmemset(&udata, 0, sizeof udata); + tmp_layout = *idx_info->layout; + udata.mesg = &tmp_layout; - /* Allocate the shared structure */ - if(H5D_istore_shared_create(f, &tmp_layout)<0) - HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") + /* Set up the shared structure */ + if(H5D_istore_shared_create(idx_info->f, &tmp_layout) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") - /* Delete entire B-tree */ - if(H5B_delete(f, dxpl_id, H5B_ISTORE, tmp_layout.u.chunk.addr, &udata)<0) - HGOTO_ERROR(H5E_IO, H5E_CANTDELETE, 0, "unable to delete chunk B-tree") + /* Delete entire B-tree */ + if(H5B_delete(idx_info->f, idx_info->dxpl_id, H5B_ISTORE, tmp_layout.u.chunk.addr, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to delete chunk B-tree") - /* Free the raw B-tree node buffer */ - if(tmp_layout.u.chunk.btree_shared==NULL) - HGOTO_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "ref-counted page nil") - if(H5RC_DEC(tmp_layout.u.chunk.btree_shared)<0) - HGOTO_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page") - } /* end if */ + /* Free the raw B-tree node buffer */ + if(NULL == tmp_layout.u.chunk.btree_shared) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "ref-counted page nil") + if(H5RC_DEC(tmp_layout.u.chunk.btree_shared) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page") done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_delete() */ +} /* end H5D_istore_idx_delete() */ /*------------------------------------------------------------------------- - * Function: H5D_istore_update_cache + * Function: H5D_istore_idx_copy_setup * - * Purpose: Update any cached chunks index values after the dataspace - * size has changed + * Purpose: Set up any necessary information for copying chunks * - * Return: Success: Non-negative - * Failure: negative + * Return: Non-negative on success/Negative on failure * * Programmer: Quincey Koziol - * Saturday, May 29, 2004 + * Thursday, May 29, 2008 * *------------------------------------------------------------------------- */ -herr_t -H5D_istore_update_cache(H5D_t *dset, hid_t dxpl_id) +static herr_t +H5D_istore_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, + const H5D_chk_idx_info_t *idx_info_dst) { - H5D_io_info_t io_info; /* Temporary I/O info object */ - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ - H5D_rdcc_ent_t *ent, *next; /*cache entry */ - H5D_rdcc_ent_t *old_ent; /* Old cache entry */ - H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ - H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ - unsigned rank; /*current # of dimensions */ - hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */ - hsize_t chunks[H5O_LAYOUT_NDIMS]; /*current number of chunks in each dimension */ - hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */ - unsigned u; /*counters */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(H5D_istore_update_cache, FAIL) - - /* Check args */ - HDassert(dset && H5D_CHUNKED == dset->shared->layout.type); - HDassert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS); - - /* Get the rank */ - rank = dset->shared->layout.u.chunk.ndims-1; - HDassert(rank > 0); + herr_t ret_value = SUCCEED; /* Return value */ - /* 1-D dataset's chunks can't have their index change */ - if(rank == 1) - HGOTO_DONE(SUCCEED) + FUNC_ENTER_NOAPI_NOINIT(H5D_istore_idx_copy_setup) - /* Go get the dimensions */ - if(H5S_get_simple_extent_dims(dset->shared->space, curr_dims, NULL) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions") + HDassert(idx_info_src); + HDassert(idx_info_src->f); + HDassert(idx_info_src->layout); + HDassert(idx_info_dst); + HDassert(idx_info_dst->f); + HDassert(idx_info_dst->layout); + HDassert(!H5F_addr_defined(idx_info_dst->layout->u.chunk.addr)); - /* Round up to the next integer # of chunks, to accomodate partial chunks */ - for(u = 0; u < rank; u++) - chunks[u] = ((curr_dims[u] + dset->shared->layout.u.chunk.dim[u]) - 1) / dset->shared->layout.u.chunk.dim[u]; - - /* Get the "down" sizes for each dimension */ - if(H5V_array_down(rank, chunks, down_chunks) < 0) - HGOTO_ERROR(H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes") - - /* Fill the DXPL cache values for later use */ - if(H5D_get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") - - /* Construct dataset I/O info */ - H5D_BUILD_IO_INFO_WRT(&io_info, dset, dxpl_cache, dxpl_id, NULL, NULL); - - /* Recompute the index for each cached chunk that is in a dataset */ - for(ent = rdcc->head; ent; ent = next) { - hsize_t idx; /* Chunk index */ - unsigned old_idx; /* Previous index number */ - - /* Get the pointer to the next cache entry */ - next = ent->next; - - /* Calculate the index of this chunk */ - if(H5V_chunk_index(rank, ent->offset, dset->shared->layout.u.chunk.dim, down_chunks, &idx) < 0) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") - - /* Compute the index for the chunk entry */ - old_idx = ent->idx; /* Save for later */ - ent->idx = H5D_CHUNK_HASH(dset->shared, idx); - - if(old_idx != ent->idx) { - /* Check if there is already a chunk at this chunk's new location */ - old_ent = rdcc->slot[ent->idx]; - if(old_ent != NULL) { - HDassert(old_ent->locked == 0); - - /* Check if we are removing the entry we would walk to next */ - if(old_ent == next) - next = old_ent->next; - - /* Remove the old entry from the cache */ - if(H5D_istore_preempt(&io_info, old_ent, TRUE) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks") - } /* end if */ + /* Create shared B-tree info for each file */ + if(H5D_istore_shared_create(idx_info_src->f, idx_info_src->layout) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for source shared B-tree info") + if(H5D_istore_shared_create(idx_info_dst->f, idx_info_dst->layout) < 0) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for destination shared B-tree info") - /* Insert this chunk into correct location in hash table */ - rdcc->slot[ent->idx] = ent; - - /* Null out previous location */ - rdcc->slot[old_idx] = NULL; - } /* end if */ - } /* end for */ + /* Create the root of the B-tree that describes chunked storage in the dest. file */ + if(H5D_istore_idx_create(idx_info_dst) < 0) + HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize chunked storage") done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5F_istore_update_cache() */ +} /* end H5D_istore_idx_copy_setup() */ /*------------------------------------------------------------------------- - * Function: H5D_istore_copy + * Function: H5D_istore_idx_copy_shutdown * - * Purpose: copy an indexed storage B-tree from SRC file to DST file. + * Purpose: Shutdown any information from copying chunks * - * Return: Non-negative on success (with the ISTORE argument initialized - * and ready to write to an object header). Negative on failure. + * Return: Non-negative on success/Negative on failure * - * Programmer: Peter Cao - * August 20, 2005 + * Programmer: Quincey Koziol + * Thursday, May 29, 2008 * *------------------------------------------------------------------------- */ -herr_t -H5D_istore_copy(H5F_t *f_src, H5O_layout_t *layout_src, H5F_t *f_dst, - H5O_layout_t *layout_dst, H5T_t *dt_src, H5O_copy_t *cpy_info, H5O_pline_t *pline, hid_t dxpl_id) +static herr_t +H5D_istore_idx_copy_shutdown(H5O_layout_t *layout_src, H5O_layout_t *layout_dst) { - H5D_istore_it_ud4_t udata; - H5T_path_t *tpath_src_mem = NULL, *tpath_mem_dst = NULL; /* Datatype conversion paths */ - hid_t tid_src = -1; /* Datatype ID for source datatype */ - hid_t tid_dst = -1; /* Datatype ID for destination datatype */ - hid_t tid_mem = -1; /* Datatype ID for memory datatype */ - size_t buf_size; /* Size of copy buffer */ - size_t reclaim_buf_size; /* Size of reclaim buffer */ - void *buf = NULL; /* Buffer for copying data */ - void *bkg = NULL; /* Buffer for background during type conversion */ - void *reclaim_buf = NULL; /* Buffer for reclaiming data */ - H5S_t *buf_space = NULL; /* Dataspace describing buffer */ - hid_t sid_buf = -1; /* ID for buffer dataspace */ - uint32_t nelmts = 0; /* Number of elements in buffer */ - hbool_t do_convert = FALSE; /* Indicate that type conversions should be performed */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(H5D_istore_copy, FAIL) - - /* Check args */ - HDassert(f_src); - HDassert(f_dst); - HDassert(layout_src && H5D_CHUNKED == layout_src->type); - HDassert(layout_dst && H5D_CHUNKED == layout_dst->type); - HDassert(dt_src); - - /* Create datatype ID for src datatype */ - if((tid_src = H5I_register(H5I_DATATYPE, dt_src)) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register source file datatype") - - /* Create shared B-tree info for each file */ - if(H5D_istore_shared_create(f_src, layout_src) < 0) - HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") - if(H5D_istore_shared_create(f_dst, layout_dst) < 0) - HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") - - /* Check if we need to create the B-tree in the dest. file */ - if(layout_dst->u.chunk.addr == HADDR_UNDEF) { - /* Create the root of the B-tree that describes chunked storage */ - if(H5D_istore_create(f_dst, dxpl_id, layout_dst) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize chunked storage") - } /* end if */ - - /* If there's a VLEN source datatype, set up type conversion information */ - if(H5T_detect_class(dt_src, H5T_VLEN) > 0) { - H5T_t *dt_dst; /* Destination datatype */ - H5T_t *dt_mem; /* Memory datatype */ - size_t mem_dt_size; /* Memory datatype size */ - size_t tmp_dt_size; /* Temp. datatype size */ - size_t max_dt_size; /* Max atatype size */ - hsize_t buf_dim; /* Dimension for buffer */ - unsigned u; - - /* create a memory copy of the variable-length datatype */ - if(NULL == (dt_mem = H5T_copy(dt_src, H5T_COPY_TRANSIENT))) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to copy") - if((tid_mem = H5I_register(H5I_DATATYPE, dt_mem)) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register memory datatype") - - /* create variable-length datatype at the destinaton file */ - if(NULL == (dt_dst = H5T_copy(dt_src, H5T_COPY_TRANSIENT))) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to copy") - if(H5T_set_loc(dt_dst, f_dst, H5T_LOC_DISK) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "cannot mark datatype on disk") - if((tid_dst = H5I_register(H5I_DATATYPE, dt_dst)) < 0) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register destination file datatype") - - /* Set up the conversion functions */ - if(NULL == (tpath_src_mem = H5T_path_find(dt_src, dt_mem, NULL, NULL, dxpl_id, FALSE))) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to convert between src and mem datatypes") - if(NULL == (tpath_mem_dst = H5T_path_find(dt_mem, dt_dst, NULL, NULL, dxpl_id, FALSE))) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to convert between mem and dst datatypes") - - /* Determine largest datatype size */ - if(0 == (max_dt_size = H5T_get_size(dt_src))) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to determine datatype size") - if(0 == (mem_dt_size = H5T_get_size(dt_mem))) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to determine datatype size") - max_dt_size = MAX(max_dt_size, mem_dt_size); - if(0 == (tmp_dt_size = H5T_get_size(dt_dst))) - HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to determine datatype size") - max_dt_size = MAX(max_dt_size, tmp_dt_size); - - /* Compute the number of elements per chunk */ - nelmts = 1; - for(u = 0; u < (layout_src->u.chunk.ndims - 1); u++) - nelmts *= layout_src->u.chunk.dim[u]; - - /* Create the space and set the initial extent */ - buf_dim = nelmts; - if(NULL == (buf_space = H5S_create_simple((unsigned)1, &buf_dim, NULL))) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace") - - /* Atomize */ - if((sid_buf = H5I_register(H5I_DATASPACE, buf_space)) < 0) { - H5S_close(buf_space); - HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to register dataspace ID") - } /* end if */ - - /* Set initial buffer sizes */ - buf_size = nelmts * max_dt_size; - reclaim_buf_size = nelmts * mem_dt_size; - - /* Allocate memory for reclaim buf */ - if(NULL == (reclaim_buf = H5MM_malloc(reclaim_buf_size))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for raw data chunk") - - /* Indicate that type conversion should be performed */ - do_convert = TRUE; - } /* end if */ - else { - if(H5T_get_class(dt_src, FALSE) == H5T_REFERENCE) { - /* Indicate that type conversion should be performed */ - do_convert = TRUE; - } /* end if */ - - H5_ASSIGN_OVERFLOW(buf_size, layout_src->u.chunk.size, uint32_t, size_t); - reclaim_buf_size = 0; - } /* end else */ - - /* Set up conversion buffer, if appropriate */ - if(do_convert) { - /* Allocate background memory for converting the chunk */ - if(NULL == (bkg = H5MM_malloc(buf_size))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for raw data chunk") - - /* Check for reference datatype and no expanding references & clear background buffer */ - if(!cpy_info->expand_ref && - ((H5T_get_class(dt_src, FALSE) == H5T_REFERENCE) && (f_src != f_dst))) - /* Reset value to zero */ - HDmemset(bkg, 0, buf_size); - } /* end if */ + herr_t ret_value = SUCCEED; /* Return value */ - /* Allocate memory for copying the chunk */ - if(NULL == (buf = H5MM_malloc(buf_size))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for raw data chunk") + FUNC_ENTER_NOAPI_NOINIT(H5D_istore_idx_copy_shutdown) - /* Initialize the callback structure for the source */ - HDmemset(&udata, 0, sizeof udata); - udata.common.mesg = layout_src; - udata.file_src = f_src; - udata.addr_dst = layout_dst->u.chunk.addr; - udata.buf = buf; - udata.bkg = bkg; - udata.buf_size = buf_size; - udata.tid_src = tid_src; - udata.tid_mem = tid_mem; - udata.tid_dst = tid_dst; - udata.dt_src = dt_src; - udata.do_convert = do_convert; - udata.tpath_src_mem = tpath_src_mem; - udata.tpath_mem_dst = tpath_mem_dst; - udata.reclaim_buf = reclaim_buf; - udata.reclaim_buf_size = reclaim_buf_size; - udata.buf_space = buf_space; - udata.nelmts = nelmts; - udata.pline = pline; - udata.file_dst = f_dst; - udata.cpy_info = cpy_info; - - /* copy the chunked data by iteration */ - if(H5B_iterate(f_src, dxpl_id, H5B_ISTORE, H5D_istore_iter_copy, layout_src->u.chunk.addr, &udata) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to iterate over chunk B-tree") - - /* I/O buffers may have been re-allocated */ - buf = udata.buf; - bkg = udata.bkg; - -done: - if(sid_buf > 0) - if(H5I_dec_ref(sid_buf) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't decrement temporary dataspace ID") - if(tid_src > 0) - if(H5I_dec_ref(tid_src) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't decrement temporary datatype ID") - if(tid_dst > 0) - if(H5I_dec_ref(tid_dst) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't decrement temporary datatype ID") - if(tid_mem > 0) - if(H5I_dec_ref(tid_mem) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't decrement temporary datatype ID") - if(buf) - H5MM_xfree(buf); - if(bkg) - H5MM_xfree(bkg); - if(reclaim_buf) - H5MM_xfree(reclaim_buf); + HDassert(layout_src); + HDassert(layout_dst); + /* Decrement refcount on shared B-tree info */ if(H5RC_DEC(layout_src->u.chunk.btree_shared) < 0) - HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page") - + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "unable to decrement ref-counted page") if(H5RC_DEC(layout_dst->u.chunk.btree_shared) < 0) - HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page") + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "unable to decrement ref-counted page") +done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_copy() */ +} /* end H5D_istore_idx_copy_shutdown() */ /*------------------------------------------------------------------------- - * Function: H5D_istore_bh_size + * Function: H5D_istore_idx_size * * Purpose: Retrieve the amount of B-tree storage for chunked dataset * @@ -3657,88 +1272,53 @@ done: *------------------------------------------------------------------------- */ herr_t -H5D_istore_bh_info(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout, hsize_t *btree_size) +H5D_istore_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) { - H5D_istore_it_ud1_t udata; /* User-data for loading istore nodes */ - H5B_info_ud_t bh_udata; /* User-data for B-tree size iteration */ - herr_t ret_value = SUCCEED; + H5D_istore_ud0_t udata; /* User-data for loading istore nodes */ + H5B_info_t bt_info; /* B-tree info */ + hbool_t shared_init = FALSE; /* Whether shared B-tree info is initialized */ + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(H5D_istore_bh_info, FAIL) + FUNC_ENTER_NOAPI(H5D_istore_idx_size, FAIL) /* Check args */ - HDassert(f); - HDassert(layout); - HDassert(btree_size); + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->layout); + HDassert(index_size); /* Initialize the shared info for the B-tree traversal */ - if(H5D_istore_shared_create(f, layout) < 0) + if(H5D_istore_shared_create(idx_info->f, idx_info->layout) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") + shared_init = TRUE; /* Initialize istore node user-data */ HDmemset(&udata, 0, sizeof udata); - udata.common.mesg = layout; + udata.mesg = idx_info->layout; - /* Iterate over B-tree, accumulating metadata size */ - bh_udata.udata = &udata; - bh_udata.btree_size = btree_size; - if(H5B_iterate_size(f, dxpl_id, H5B_ISTORE, NULL, layout->u.chunk.addr, &bh_udata) < 0) + /* Get metadata information for B-tree */ + if(H5B_get_info(idx_info->f, idx_info->dxpl_id, H5B_ISTORE, idx_info->layout->u.chunk.addr, &bt_info, NULL, &udata) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to iterate over chunk B-tree") -done: - if(layout->u.chunk.btree_shared == NULL) - HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "ref-counted page nil") - if(H5RC_DEC(layout->u.chunk.btree_shared) < 0) - HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page") - - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_bh_info() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_istore_dump_btree - * - * Purpose: Prints information about the storage B-tree to the specified - * stream. - * - * Return: Success: Non-negative - * - * Failure: negative - * - * Programmer: Robb Matzke - * Wednesday, April 28, 1999 - * - *------------------------------------------------------------------------- - */ -herr_t -H5D_istore_dump_btree(H5F_t *f, hid_t dxpl_id, FILE *stream, unsigned ndims, haddr_t addr) -{ - H5O_layout_t layout; - H5D_istore_it_ud2_t udata; - herr_t ret_value=SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(H5D_istore_dump_btree, FAIL) - - HDmemset(&udata, 0, sizeof udata); - layout.u.chunk.ndims = ndims; - udata.common.mesg = &layout; - udata.stream = stream; - if(stream) - HDfprintf(stream, " Address: %a\n", addr); - if(H5B_iterate(f, dxpl_id, H5B_ISTORE, H5D_istore_iter_dump, addr, &udata)<0) - HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over chunk B-tree") + /* Set the size of the B-tree */ + *index_size = bt_info.size; done: + if(shared_init) { + if(idx_info->layout->u.chunk.btree_shared == NULL) + HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "ref-counted page nil") + if(H5RC_DEC(idx_info->layout->u.chunk.btree_shared) < 0) + HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page") + } /* end if */ + FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_dump_btree() */ +} /* end H5D_istore_idx_size() */ -#ifdef H5D_ISTORE_DEBUG /*------------------------------------------------------------------------- - * Function: H5D_istore_stats + * Function: H5D_istore_idx_dest * - * Purpose: Print raw data cache statistics to the debug stream. If - * HEADERS is non-zero then print table column headers, - * otherwise assume that the H5AC layer has already printed them. + * Purpose: Release indexing information in memory. * * Return: Non-negative on success/Negative on failure * @@ -3747,53 +1327,26 @@ done: * *------------------------------------------------------------------------- */ -herr_t -H5D_istore_stats (H5D_t *dset, hbool_t headers) +static herr_t +H5D_istore_idx_dest(const H5D_chk_idx_info_t *idx_info) { - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); - double miss_rate; - char ascii[32]; - herr_t ret_value=SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(H5D_istore_stats, FAIL) - - if (!H5DEBUG(AC)) - HGOTO_DONE(SUCCEED) - - if (headers) { - fprintf(H5DEBUG(AC), "H5D: raw data cache statistics\n"); - fprintf(H5DEBUG(AC), " %-18s %8s %8s %8s %8s+%-8s\n", - "Layer", "Hits", "Misses", "MissRate", "Inits", "Flushes"); - fprintf(H5DEBUG(AC), " %-18s %8s %8s %8s %8s-%-8s\n", - "-----", "----", "------", "--------", "-----", "-------"); - } + herr_t ret_value = SUCCEED; /* Return value */ -#ifdef H5AC_DEBUG - if (H5DEBUG(AC)) headers = TRUE; -#endif + FUNC_ENTER_NOAPI_NOINIT(H5D_istore_idx_dest) - if (headers) { - if (rdcc->nhits>0 || rdcc->nmisses>0) { - miss_rate = 100.0 * rdcc->nmisses / - (rdcc->nhits + rdcc->nmisses); - } else { - miss_rate = 0.0; - } - if (miss_rate > 100) { - sprintf(ascii, "%7d%%", (int) (miss_rate + 0.5)); - } else { - sprintf(ascii, "%7.2f%%", miss_rate); - } + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->layout); - fprintf(H5DEBUG(AC), " %-18s %8u %8u %7s %8d+%-9ld\n", - "raw data chunks", rdcc->nhits, rdcc->nmisses, ascii, - rdcc->ninits, (long)(rdcc->nflushes)-(long)(rdcc->ninits)); - } + /* Free the raw B-tree node buffer */ + if(idx_info->layout->u.chunk.btree_shared == NULL) + HGOTO_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "ref-counted page nil") + if(H5RC_DEC(idx_info->layout->u.chunk.btree_shared) < 0) + HGOTO_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page") done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D_istore_stats() */ -#endif /* H5D_ISTORE_DEBUG */ +} /* end H5D_istore_idx_dest() */ /*------------------------------------------------------------------------- @@ -3812,17 +1365,20 @@ herr_t H5D_istore_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent, int fwidth, unsigned ndims) { - H5O_layout_t layout; H5D_istore_ud0_t udata; /* B-tree user data */ + H5O_layout_t layout; /* Layout information for B-tree callback */ + hbool_t shared_init = FALSE; /* Whether B-tree shared info is initialized */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(H5D_istore_debug,FAIL) + FUNC_ENTER_NOAPI(H5D_istore_debug, FAIL) + /* Set up "fake" layout info */ layout.u.chunk.ndims = ndims; /* Allocate the shared structure */ - if(H5D_istore_shared_create(f, &layout)<0) + if(H5D_istore_shared_create(f, &layout) < 0) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") + shared_init = TRUE; /* Set up B-tree user data */ HDmemset(&udata, 0, sizeof udata); @@ -3830,13 +1386,15 @@ H5D_istore_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int inden (void)H5B_debug(f, dxpl_id, addr, stream, indent, fwidth, H5B_ISTORE, &udata); - /* Free the raw B-tree node buffer */ - if(layout.u.chunk.btree_shared==NULL) - HGOTO_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "ref-counted page nil") - if(H5RC_DEC(layout.u.chunk.btree_shared)<0) - HGOTO_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page") - done: + if(shared_init) { + /* Free the raw B-tree node buffer */ + if(layout.u.chunk.btree_shared == NULL) + HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "ref-counted page nil") + if(H5RC_DEC(layout.u.chunk.btree_shared) < 0) + HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page") + } /* end if */ + FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_istore_debug() */ diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index dd0c602..a87c9d3 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -822,7 +822,6 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type * equivalent of compressed contiguous datasets - QAK] */ if(total_chunks == 1) { - H5D_storage_t chk_store; /* Temporary storage info for chunk address lookup */ hsize_t coords[H5O_LAYOUT_NDIMS]; /* Coordinates of chunk in file dataset's dataspace */ H5SL_node_t *chunk_node; /* Pointer to chunk node for selection */ H5S_t *fspace; /* Dataspace describing chunk & selection in it */ @@ -833,10 +832,7 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type HDmemset(coords, 0, sizeof(coords)); /* Look up address of chunk */ - io_info->store = &chk_store; - chk_store.chunk.offset = coords; - chk_store.chunk.index = 0; - if(HADDR_UNDEF == (ctg_store.contig.dset_addr = H5D_istore_get_addr(io_info, NULL))) + if(HADDR_UNDEF == (ctg_store.contig.dset_addr = H5D_chunk_get_addr(io_info->dset, io_info->dxpl_id, coords, NULL))) HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk info from skipped list") /* Check for this process having selection in this chunk */ @@ -968,7 +964,7 @@ if(H5DEBUG(D)) total_chunk_addr_array = H5MM_malloc(sizeof(haddr_t) * total_chunks); /* Retrieve chunk address map */ - if(H5D_istore_chunkmap(io_info, total_chunk_addr_array, fm->down_chunks) < 0) + if(H5D_chunk_addrmap(io_info, total_chunk_addr_array, fm->down_chunks) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address") /* Get chunk with lowest address */ @@ -1181,7 +1177,7 @@ if(H5DEBUG(D)) /* Check if this process has somethign to do with this chunk */ if(chunk_info) { H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */ - H5D_istore_ud1_t udata; /* B-tree pass-through */ + H5D_chunk_ud_t udata; /* B-tree pass-through */ void *chunk; /* Pointer to the data chunk in cache */ uint32_t accessed_bytes; /* Total accessed size in a chunk */ unsigned idx_hint = 0; /* Cache index hint */ @@ -1197,17 +1193,15 @@ if(H5DEBUG(D)) /* Load the chunk into cache. But if the whole chunk is written, * simply allocate space instead of load the chunk. */ - if(HADDR_UNDEF == (caddr = H5D_istore_get_addr(io_info, &udata))) + if(HADDR_UNDEF == (caddr = H5D_chunk_get_addr(io_info->dset, io_info->dxpl_id, chunk_info->coords, &udata))) HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk info from skipped list") /* Load the chunk into cache and lock it. */ if(H5D_chunk_cacheable(io_info, caddr)) { hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */ - size_t tmp_accessed_bytes; /* Total accessed size in a chunk */ /* Compute # of bytes accessed in chunk */ - tmp_accessed_bytes = chunk_info->chunk_points * type_info->src_type_size; - H5_ASSIGN_OVERFLOW(accessed_bytes, tmp_accessed_bytes, size_t, uint32_t); + accessed_bytes = chunk_info->chunk_points * type_info->src_type_size; /* Determine if we will access all the data in the chunk */ if(((io_info->op_type == H5D_IO_OP_WRITE) && (accessed_bytes != ctg_store.contig.dset_size)) @@ -1215,7 +1209,7 @@ if(H5DEBUG(D)) entire_chunk = FALSE; /* Lock the chunk into the cache */ - if(NULL == (chunk = H5D_istore_lock(io_info, &udata, entire_chunk, &idx_hint))) + if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk, &idx_hint))) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") /* Set up the storage buffer information for this chunk */ @@ -1247,7 +1241,7 @@ if(H5DEBUG(D)) } /* end else */ /* Release the cache lock on the chunk. */ - if(chunk && H5D_istore_unlock(io_info, (io_info->op_type == H5D_IO_OP_WRITE), idx_hint, chunk, accessed_bytes) < 0) + if(chunk && H5D_chunk_unlock(io_info, (io_info->op_type == H5D_IO_OP_WRITE), idx_hint, chunk, accessed_bytes) < 0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk") } /* end if */ #else /* !defined(H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS) || !defined(H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS) */ @@ -1377,7 +1371,7 @@ if(H5DEBUG(D)) { while(chunk_node) { H5D_chunk_info_t *chunk_info; /* chunk information */ haddr_t chunk_addr; /* Address of chunk in file */ - H5D_istore_ud1_t udata; /* B-tree pass-through */ + H5D_chunk_ud_t udata; /* B-tree pass-through */ hbool_t make_ind, make_coll; /* Flags to indicate that the MPI mode should change */ /* Get the actual chunk information from the skip list node */ @@ -1414,7 +1408,7 @@ if(H5DEBUG(D)) { #endif /* H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS */ /* Retrieve the chunk's address */ - if(HADDR_UNDEF == (chunk_addr = H5D_istore_get_addr(io_info, &udata))) + if(HADDR_UNDEF == (chunk_addr = H5D_chunk_get_addr(io_info->dset, io_info->dxpl_id, chunk_info->coords, &udata))) HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk info from skipped list") /* Independent I/O */ @@ -1431,11 +1425,9 @@ if(H5DEBUG(D)) { /* Load the chunk into cache and lock it. */ if(H5D_chunk_cacheable(io_info, chunk_addr)) { hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */ - size_t tmp_accessed_bytes; /* Total accessed size in a chunk */ /* Compute # of bytes accessed in chunk */ - tmp_accessed_bytes = chunk_info->chunk_points * type_info->src_type_size; - H5_ASSIGN_OVERFLOW(accessed_bytes, tmp_accessed_bytes, size_t, uint32_t); + accessed_bytes = chunk_info->chunk_points * type_info->src_type_size; /* Determine if we will access all the data in the chunk */ if(((io_info->op_type == H5D_IO_OP_WRITE) && (accessed_bytes != ctg_store.contig.dset_size)) @@ -1443,7 +1435,7 @@ if(H5DEBUG(D)) { entire_chunk = FALSE; /* Lock the chunk into the cache */ - if(NULL == (chunk = H5D_istore_lock(io_info, &udata, entire_chunk, &idx_hint))) + if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk, &idx_hint))) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") /* Set up the storage buffer information for this chunk */ @@ -1476,7 +1468,7 @@ if(H5DEBUG(D)) { /* Release the cache lock on the chunk. */ if(chunk) - if(H5D_istore_unlock(io_info, (io_info->op_type == H5D_IO_OP_WRITE), idx_hint, chunk, accessed_bytes) < 0) + if(H5D_chunk_unlock(io_info, (io_info->op_type == H5D_IO_OP_WRITE), idx_hint, chunk, accessed_bytes) < 0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk") } /* end if */ else { /*collective I/O */ @@ -1657,7 +1649,6 @@ H5D_sort_chunk(H5D_io_info_t *io_info, const H5D_chunk_map_t *fm, H5D_chunk_info_t *chunk_info; /* Current chunking info. of this node. */ haddr_t chunk_addr; /* Current chunking address of this node */ haddr_t *total_chunk_addr_array = NULL; /* The array of chunk address for the total number of chunk */ - H5D_storage_t store; /*union of EFL and chunk pointer in file space */ hbool_t do_sort = FALSE; /* Whether the addresses need to be sorted */ int bsearch_coll_chunk_threshold; int many_chunk_opt = H5D_OBTAIN_ONE_CHUNK_ADDR_IND; @@ -1706,7 +1697,7 @@ if(H5DEBUG(D)) if((mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file)) < 0) HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi rank") if(mpi_rank == 0) { - if(H5D_istore_chunkmap(io_info, total_chunk_addr_array, fm->down_chunks) < 0) + if(H5D_chunk_addrmap(io_info, total_chunk_addr_array, fm->down_chunks) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address") } /* end if */ @@ -1715,9 +1706,6 @@ if(H5DEBUG(D)) HMPI_GOTO_ERROR(FAIL, "MPI_BCast failed", mpi_code) } /* end if */ - /* Set dataset storage for I/O info */ - io_info->store = &store; - /* Start at first node in chunk skip list */ i = 0; if(NULL == (chunk_node = H5SL_first(fm->sel_chunks))) @@ -1729,9 +1717,7 @@ if(H5DEBUG(D)) HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk info from skipped list") if(many_chunk_opt == H5D_OBTAIN_ONE_CHUNK_ADDR_IND) { - store.chunk.offset = chunk_info->coords; - store.chunk.index = chunk_info->index; - if(HADDR_UNDEF == (chunk_addr = H5D_istore_get_addr(io_info, NULL))) + if(HADDR_UNDEF == (chunk_addr = H5D_chunk_get_addr(io_info->dset, io_info->dxpl_id, chunk_info->coords, NULL))) HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk info from skipped list") } /* end if */ else @@ -1846,7 +1832,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info, H5D_chunk_map_t *fm, #if defined(H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS) && defined(H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS) chunk_opt_mode = (H5FD_mpio_chunk_opt_t)H5P_peek_unsigned(dx_plist, H5D_XFER_MPIO_CHUNK_OPT_HARD_NAME); if((chunk_opt_mode == H5FD_MPIO_CHUNK_MULTI_IO) || (percent_nproc_per_chunk == 0)) { - if(H5D_istore_chunkmap(io_info, chunk_addr, fm->down_chunks) < 0) + if(H5D_chunk_addrmap(io_info, chunk_addr, fm->down_chunks) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address"); for(ic = 0; ic < total_chunks; ic++) assign_io_mode[ic] = H5D_CHUNK_IO_MODE_COL; @@ -1903,7 +1889,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info, H5D_chunk_map_t *fm, #endif /* calculating the chunk address */ - if(H5D_istore_chunkmap(io_info, chunk_addr, fm->down_chunks) < 0) { + if(H5D_chunk_addrmap(io_info, chunk_addr, fm->down_chunks) < 0) { HDfree(nproc_per_chunk); #if !defined(H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS) || !defined(H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS) HDfree(ind_this_chunk); diff --git a/src/H5Doh.c b/src/H5Doh.c index 37f011b..e99f9ee 100644 --- a/src/H5Doh.c +++ b/src/H5Doh.c @@ -361,7 +361,7 @@ H5O_dset_bh_info(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5_ih_info_t *bh_info) /* Check for chunked dataset storage */ if((layout.type == H5D_CHUNKED) && H5F_addr_defined(layout.u.chunk.addr)) - if(H5D_istore_bh_info(f, dxpl_id, &layout, &(bh_info->index_size)) < 0) + if(H5D_chunk_bh_info(f, dxpl_id, &layout, &(bh_info->index_size)) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "can't determine chunked dataset btree info") done: diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 1b20ad8..33f628d 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -100,7 +100,9 @@ struct H5D_io_info_t; struct H5D_chunk_map_t; /* Function pointers for I/O on particular types of dataset layouts */ -typedef herr_t (*H5D_layout_init_func_t)(const struct H5D_io_info_t *io_info, +typedef herr_t (*H5D_layout_new_func_t)(H5F_t *f, hid_t dxpl_id, + H5D_t *dset, const H5P_genplist_t *dc_plist); +typedef herr_t (*H5D_layout_io_init_func_t)(const struct H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, struct H5D_chunk_map_t *cm); @@ -116,11 +118,12 @@ typedef ssize_t (*H5D_layout_readvv_func_t)(const struct H5D_io_info_t *io_info, typedef ssize_t (*H5D_layout_writevv_func_t)(const struct H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[]); -typedef herr_t (*H5D_layout_term_func_t)(const struct H5D_chunk_map_t *cm); +typedef herr_t (*H5D_layout_io_term_func_t)(const struct H5D_chunk_map_t *cm); /* Typedef for grouping layout I/O routines */ typedef struct H5D_layout_ops_t { - H5D_layout_init_func_t init; /* I/O initialization routine */ + H5D_layout_new_func_t new; /* Layout constructor for new datasets */ + H5D_layout_io_init_func_t io_init; /* I/O initialization routine */ H5D_layout_read_func_t ser_read; /* High-level I/O routine for reading data in serial */ H5D_layout_write_func_t ser_write; /* High-level I/O routine for writing data in serial */ #ifdef H5_HAVE_PARALLEL @@ -129,7 +132,7 @@ typedef struct H5D_layout_ops_t { #endif /* H5_HAVE_PARALLEL */ H5D_layout_readvv_func_t readvv; /* Low-level I/O routine for reading data */ H5D_layout_writevv_func_t writevv; /* Low-level I/O routine for writing data */ - H5D_layout_term_func_t term; /* I/O shutdown routine */ + H5D_layout_io_term_func_t io_term; /* I/O shutdown routine */ } H5D_layout_ops_t; /* Function pointers for either multiple or single block I/O access */ @@ -201,6 +204,96 @@ typedef struct H5D_io_info_t { } u; } H5D_io_info_t; + +/******************/ +/* Chunk typedefs */ +/******************/ + +/* Typedef for chunked dataset index operation info */ +typedef struct H5D_chk_idx_info_t { + H5F_t *f; /* File pointer for operation */ + hid_t dxpl_id; /* DXPL ID for operation */ + H5O_layout_t *layout; /* Layout info for chunks */ +} H5D_chk_idx_info_t; + +/* + * "Generic" chunk record. Each chunk is keyed by the minimum logical + * N-dimensional coordinates and the datatype size of the chunk. + * The fastest-varying dimension is assumed to reference individual bytes of + * the array, so a 100-element 1-D array of 4-byte integers would really be a + * 2-D array with the slow varying dimension of size 100 and the fast varying + * dimension of size 4 (the storage dimensionality has very little to do with + * the real dimensionality). + * + * The chunk's file address, filter mask and size on disk are not key values. + */ +typedef struct H5D_chunk_rec_t { + uint32_t nbytes; /* Size of stored data */ + hsize_t offset[H5O_LAYOUT_NDIMS]; /* Logical offset to start*/ + unsigned filter_mask; /* Excluded filters */ + haddr_t chunk_addr; /* Address of chunk in file */ +} H5D_chunk_rec_t; + +/* + * Common data exchange structure for indexed storage nodes. This structure is + * passed through the indexing layer to the methods for the objects + * to which the index points. + */ +typedef struct H5D_chunk_common_ud_t { + /* downward */ + const H5O_layout_t *mesg; /*layout message */ + const hsize_t *offset; /*logical offset of chunk*/ +} H5D_chunk_common_ud_t; + +/* B-tree callback info for various operations */ +typedef struct H5D_chunk_ud_t { + H5D_chunk_common_ud_t common; /* Common info for B-tree user data (must be first) */ + + /* Upward */ + uint32_t nbytes; /*size of stored data */ + unsigned filter_mask; /*excluded filters */ + haddr_t addr; /*file address of chunk */ +} H5D_chunk_ud_t; + +/* Typedef for "generic" chunk callbacks */ +typedef int (*H5D_chunk_cb_func_t)(const H5D_chunk_rec_t *chunk_rec, + void *udata); + +/* Typedefs for chunk operations */ +typedef herr_t (*H5D_chunk_init_func_t)(const H5D_chk_idx_info_t *idx_info); +typedef herr_t (*H5D_chunk_create_func_t)(const H5D_chk_idx_info_t *idx_info); +typedef herr_t (*H5D_chunk_insert_func_t)(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_ud_t *udata); +typedef haddr_t (*H5D_chunk_get_addr_func_t)(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_ud_t *udata); +typedef int (*H5D_chunk_iterate_func_t)(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); +typedef herr_t (*H5D_chunk_remove_func_t)(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_common_ud_t *udata); +typedef herr_t (*H5D_chunk_delete_func_t)(const H5D_chk_idx_info_t *idx_info); +typedef herr_t (*H5D_chunk_copy_setup_func_t)(const H5D_chk_idx_info_t *idx_info_src, + const H5D_chk_idx_info_t *idx_info_dst); +typedef herr_t (*H5D_chunk_copy_shutdown_func_t)(H5O_layout_t *layout_src, + H5O_layout_t *layout_dst); +typedef herr_t (*H5D_chunk_size_func_t)(const H5D_chk_idx_info_t *idx_info, + hsize_t *idx_size); +typedef herr_t (*H5D_chunk_dest_func_t)(const H5D_chk_idx_info_t *idx_info); + +/* Typedef for grouping chunk I/O routines */ +typedef struct H5D_chunk_ops_t { + H5D_chunk_init_func_t init; /* Routine to initialize indexing information in memory */ + H5D_chunk_create_func_t create; /* Routine to create chunk index */ + H5D_chunk_insert_func_t insert; /* Routine to insert a chunk into an index */ + H5D_chunk_get_addr_func_t get_addr; /* Routine to retrieve address of chunk in file */ + H5D_chunk_iterate_func_t iterate; /* Routine to iterate over chunks */ + H5D_chunk_remove_func_t remove; /* Routine to remove a chunk from an index */ + H5D_chunk_delete_func_t delete; /* Routine to delete index & all chunks from file*/ + H5D_chunk_copy_setup_func_t copy_setup; /* Routine to perform any necessary setup for copying chunks */ + H5D_chunk_copy_shutdown_func_t copy_shutdown; /* Routine to perform any necessary shutdown for copying chunks */ + H5D_chunk_size_func_t size; /* Routine to get size of indexing information */ + H5D_chunk_dest_func_t dest; /* Routine to destroy indexing information in memory */ +} H5D_chunk_ops_t; + /* Structure holding information about a chunk's selection for mapping */ typedef struct H5D_chunk_info_t { hsize_t index; /* "Index" of chunk in dataset */ @@ -212,6 +305,40 @@ typedef struct H5D_chunk_info_t { unsigned mspace_shared; /* Indicate that the memory space for a chunk is shared and shouldn't be freed */ } H5D_chunk_info_t; +/* Main structure holding the mapping between file chunks and memory */ +typedef struct H5D_chunk_map_t { + H5O_layout_t *layout; /* Dataset layout information*/ + hsize_t nelmts; /* Number of elements selected in file & memory dataspaces */ + + const H5S_t *file_space; /* Pointer to the file dataspace */ + unsigned f_ndims; /* Number of dimensions for file dataspace */ + hsize_t f_dims[H5O_LAYOUT_NDIMS]; /* File dataspace dimensions */ + + const H5S_t *mem_space; /* Pointer to the memory dataspace */ + H5S_t *mchunk_tmpl; /* Dataspace template for new memory chunks */ + H5S_sel_iter_t mem_iter; /* Iterator for elements in memory selection */ + unsigned m_ndims; /* Number of dimensions for memory dataspace */ + H5S_sel_type msel_type; /* Selection type in memory */ + + H5SL_t *sel_chunks; /* Skip list containing information for each chunk selected */ + + H5S_t *single_space; /* Dataspace for single chunk */ + H5D_chunk_info_t *single_chunk_info; /* Pointer to single chunk's info */ + hbool_t use_single; /* Whether I/O is on a single element */ + + hsize_t last_index; /* Index of last chunk operated on */ + H5D_chunk_info_t *last_chunk_info; /* Pointer to last chunk's info */ + + hsize_t chunks[H5O_LAYOUT_NDIMS]; /* Number of chunks in each dimension */ + hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in each dimension */ + hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */ + +#ifdef H5_HAVE_PARALLEL + hsize_t total_chunks; /* Number of chunks covered by dataspace */ + H5D_chunk_info_t **select_chunk; /* Store the information about whether this chunk is selected or not */ +#endif /* H5_HAVE_PARALLEL */ +} H5D_chunk_map_t; + /* Cached information about a particular chunk */ typedef struct H5D_chunk_cached_t{ hbool_t valid; /*whether cache info is valid*/ @@ -223,12 +350,12 @@ typedef struct H5D_chunk_cached_t{ /* The raw data chunk cache */ typedef struct H5D_rdcc_t { -#ifdef H5D_ISTORE_DEBUG - unsigned ninits; /* Number of chunk creations */ - unsigned nhits; /* Number of cache hits */ - unsigned nmisses;/* Number of cache misses */ - unsigned nflushes;/* Number of cache flushes */ -#endif /* H5D_ISTORE_DEBUG */ + struct { + unsigned ninits; /* Number of chunk creations */ + unsigned nhits; /* Number of cache hits */ + unsigned nmisses;/* Number of cache misses */ + unsigned nflushes;/* Number of cache flushes */ + } stats; size_t nbytes; /* Current cached raw data in bytes */ size_t nslots; /* Number of chunk slots allocated */ struct H5D_rdcc_ent_t *head; /* Head of doubly linked list */ @@ -266,7 +393,6 @@ typedef struct H5D_shared_t { hid_t dcpl_id; /* dataset creation property id */ H5D_dcpl_cache_t dcpl_cache; /* Cached DCPL values */ H5O_layout_t layout; /* data layout */ - const H5D_layout_ops_t *layout_ops; /* Pointer to data layout I/O operations */ hbool_t checked_filters;/* TRUE if dataset passes can_apply check */ /* Buffered/cached information for types of raw data storage*/ @@ -295,40 +421,6 @@ typedef enum { } H5D_time_alloc_t; -/* Main structure holding the mapping between file chunks and memory */ -typedef struct H5D_chunk_map_t { - H5O_layout_t *layout; /* Dataset layout information*/ - hsize_t nelmts; /* Number of elements selected in file & memory dataspaces */ - - const H5S_t *file_space; /* Pointer to the file dataspace */ - unsigned f_ndims; /* Number of dimensions for file dataspace */ - hsize_t f_dims[H5O_LAYOUT_NDIMS]; /* File dataspace dimensions */ - - const H5S_t *mem_space; /* Pointer to the memory dataspace */ - H5S_t *mchunk_tmpl; /* Dataspace template for new memory chunks */ - H5S_sel_iter_t mem_iter; /* Iterator for elements in memory selection */ - unsigned m_ndims; /* Number of dimensions for memory dataspace */ - H5S_sel_type msel_type; /* Selection type in memory */ - - H5SL_t *sel_chunks; /* Skip list containing information for each chunk selected */ - - H5S_t *single_space; /* Dataspace for single chunk */ - H5D_chunk_info_t *single_chunk_info; /* Pointer to single chunk's info */ - hbool_t use_single; /* Whether I/O is on a single element */ - - hsize_t last_index; /* Index of last chunk operated on */ - H5D_chunk_info_t *last_chunk_info; /* Pointer to last chunk's info */ - - hsize_t chunks[H5O_LAYOUT_NDIMS]; /* Number of chunks in each dimension */ - hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in each dimension */ - hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */ - -#ifdef H5_HAVE_PARALLEL - hsize_t total_chunks; /* Number of chunks covered by dataspace */ - H5D_chunk_info_t **select_chunk; /* Store the information about whether this chunk is selected or not */ -#endif /* H5_HAVE_PARALLEL */ -} H5D_chunk_map_t; - /* Typedef for dataset creation operation */ typedef struct { hid_t type_id; /* Datatype for dataset */ @@ -361,51 +453,6 @@ typedef struct { hbool_t has_vlen_fill_type; /* Whether the datatype for the fill value has a variable-length component */ } H5D_fill_buf_info_t; -/*************************/ -/* For chunk lock */ -/*************************/ -/* - * B-tree key. A key contains the minimum logical N-dimensional address and - * the logical size of the chunk to which this key refers. The - * fastest-varying dimension is assumed to reference individual bytes of the - * array, so a 100-element 1-d array of 4-byte integers would really be a 2-d - * array with the slow varying dimension of size 100 and the fast varying - * dimension of size 4 (the storage dimensionality has very little to do with - * the real dimensionality). - * - * Only the first few values of the OFFSET and SIZE fields are actually - * stored on disk, depending on the dimensionality. - * - * The chunk's file address is part of the B-tree and not part of the key. - */ -typedef struct H5D_istore_key_t { - uint32_t nbytes; /*size of stored data */ - hsize_t offset[H5O_LAYOUT_NDIMS]; /*logical offset to start*/ - unsigned filter_mask; /*excluded filters */ -} H5D_istore_key_t; - - -/* - * Common data exchange structure for indexed storage nodes. This structure is - * passed through the B-link tree layer to the methods for the objects - * to which the B-link tree points. - */ -typedef struct H5D_istore_bt_ud_common_t { - /* downward */ - const H5O_layout_t *mesg; /*layout message */ - const hsize_t *offset; /*logical offset of chunk*/ -} H5D_istore_bt_ud_common_t; - -/* B-tree callback info for various operations */ -typedef struct H5D_istore_ud1_t { - H5D_istore_bt_ud_common_t common; /* Common info for B-tree user data (must be first) */ - - /* Upward */ - uint32_t nbytes; /*size of stored data */ - unsigned filter_mask; /*excluded filters */ - haddr_t addr; /*file address of chunk */ -} H5D_istore_ud1_t; - /* Internal data structure for computing variable-length dataset's total size */ typedef struct { hid_t dataset_id; /* ID of the dataset we are working on */ @@ -424,6 +471,7 @@ typedef struct H5D_rdcc_ent_t { hsize_t offset[H5O_LAYOUT_NDIMS]; /*chunk name */ uint32_t rd_count; /*bytes remaining to be read */ uint32_t wr_count; /*bytes remaining to be written */ + haddr_t chunk_addr; /*address of chunk in file */ uint32_t chunk_size; /*size of a chunk */ size_t alloc_size; /*amount allocated for the chunk */ uint8_t *chunk; /*the unfiltered chunk data */ @@ -439,12 +487,15 @@ typedef H5D_rdcc_ent_t *H5D_rdcc_ent_ptr_t; /* For free lists */ /*****************************/ extern H5D_dxpl_cache_t H5D_def_dxpl_cache; -/* Storage layout classes */ +/* Storage layout class I/O operations */ H5_DLLVAR const H5D_layout_ops_t H5D_LOPS_CONTIG[1]; H5_DLLVAR const H5D_layout_ops_t H5D_LOPS_EFL[1]; H5_DLLVAR const H5D_layout_ops_t H5D_LOPS_COMPACT[1]; H5_DLLVAR const H5D_layout_ops_t H5D_LOPS_CHUNK[1]; +/* Chunked layout operations */ +H5_DLLVAR const H5D_chunk_ops_t H5D_COPS_ISTORE[1]; + /******************************/ /* Package Private Prototypes */ @@ -457,7 +508,7 @@ H5_DLL H5D_t *H5D_create_named(const H5G_loc_t *loc, const char *name, hid_t dapl_id, hid_t dxpl_id); H5_DLL herr_t H5D_get_space_status(H5D_t *dset, H5D_space_status_t *allocation, hid_t dxpl_id); -H5_DLL herr_t H5D_alloc_storage(H5F_t *f, hid_t dxpl_id, H5D_t *dset, H5D_time_alloc_t time_alloc, +H5_DLL herr_t H5D_alloc_storage(H5D_t *dset, hid_t dxpl_id, H5D_time_alloc_t time_alloc, hbool_t full_overwrite); H5_DLL hsize_t H5D_get_storage_size(H5D_t *dset, hid_t dxpl_id); H5_DLL haddr_t H5D_get_offset(const H5D_t *dset); @@ -490,7 +541,7 @@ H5_DLL herr_t H5D_scatgath_write(const H5D_io_info_t *io_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space); /* Functions that operate on contiguous storage */ -H5_DLL herr_t H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout); +H5_DLL herr_t H5D_contig_alloc(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout); H5_DLL herr_t H5D_contig_fill(H5D_t *dset, hid_t dxpl_id); H5_DLL haddr_t H5D_contig_get_addr(const H5D_t *dset); H5_DLL herr_t H5D_contig_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, @@ -510,45 +561,41 @@ H5_DLL herr_t H5D_contig_copy(H5F_t *f_src, const H5O_layout_t *layout_src, H5F_ /* Functions that operate on chunked dataset storage */ H5_DLL hbool_t H5D_chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr); +H5_DLL herr_t H5D_chunk_cinfo_cache_reset(H5D_chunk_cached_t *last); +H5_DLL herr_t H5D_chunk_create(H5D_t *dset /*in,out*/, hid_t dxpl_id); +H5_DLL herr_t H5D_chunk_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset); +H5_DLL haddr_t H5D_chunk_get_addr(const H5D_t *dset, hid_t dxpl_id, + const hsize_t *chunk_offset, H5D_chunk_ud_t *udata); +H5_DLL void *H5D_chunk_lock(const H5D_io_info_t *io_info, + H5D_chunk_ud_t *udata, hbool_t relax, unsigned *idx_hint/*in,out*/); +H5_DLL herr_t H5D_chunk_unlock(const H5D_io_info_t *io_info, + hbool_t dirty, unsigned idx_hint, void *chunk, uint32_t naccessed); +H5_DLL herr_t H5D_chunk_flush(H5D_t *dset, hid_t dxpl_id, unsigned flags); +H5_DLL herr_t H5D_chunk_allocated(H5D_t *dset, hid_t dxpl_id, hsize_t *nbytes); +H5_DLL herr_t H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite); +H5_DLL herr_t H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, + const hsize_t *old_dims); +#ifdef H5_HAVE_PARALLEL +H5_DLL herr_t H5D_chunk_addrmap(const H5D_io_info_t *io_info, + haddr_t chunk_addr[], const hsize_t down_chunks[]); +#endif /* H5_HAVE_PARALLEL */ +H5_DLL herr_t H5D_chunk_update_cache(H5D_t *dset, hid_t dxpl_id); +H5_DLL herr_t H5D_chunk_copy(H5F_t *f_src, H5O_layout_t *layout_src, + H5F_t *f_dst, H5O_layout_t *layout_dst, H5T_t *src_dtype, + H5O_copy_t *cpy_info, H5O_pline_t *pline, hid_t dxpl_id); +H5_DLL herr_t H5D_chunk_bh_info(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout, + hsize_t *btree_size); +H5_DLL herr_t H5D_chunk_dump_index(H5D_t *dset, hid_t dxpl_id, FILE *stream); +H5_DLL herr_t H5D_chunk_dest(H5F_t *f, hid_t dxpl_id, H5D_t *dset); +#ifdef H5D_CHUNK_DEBUG +H5_DLL herr_t H5D_chunk_stats(const H5D_t *dset, hbool_t headers); +#endif /* H5D_CHUNK_DEBUG */ /* Functions that operate on compact dataset storage */ H5_DLL herr_t H5D_compact_fill(H5D_t *dset, hid_t dxpl_id); H5_DLL herr_t H5D_compact_copy(H5F_t *f_src, H5O_layout_t *layout_src, H5F_t *f_dst, H5O_layout_t *layout_dst, H5T_t *src_dtype, H5O_copy_t *cpy_info, hid_t dxpl_id); -/* Functions that operate on indexed storage */ -/* forward reference for collective-chunk IO use */ -struct H5D_istore_ud1_t; /*define in H5Distore.c*/ -H5_DLL herr_t H5D_istore_init (const H5F_t *f, const H5D_t *dset); -H5_DLL herr_t H5D_istore_flush (H5D_t *dset, hid_t dxpl_id, unsigned flags); -H5_DLL herr_t H5D_istore_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout); -H5_DLL herr_t H5D_istore_dest (H5D_t *dset, hid_t dxpl_id); -H5_DLL herr_t H5D_istore_allocate (H5D_t *dset, hid_t dxpl_id, - hbool_t full_overwrite); -H5_DLL hsize_t H5D_istore_allocated(H5D_t *dset, hid_t dxpl_id); -H5_DLL herr_t H5D_istore_bh_info(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout, - hsize_t *btree_size); -H5_DLL herr_t H5D_istore_prune_by_extent(const H5D_io_info_t *io_info, - const hsize_t *old_dims); -H5_DLL herr_t H5D_istore_initialize_by_extent(H5D_io_info_t *io_info); -H5_DLL herr_t H5D_istore_update_cache(H5D_t *dset, hid_t dxpl_id); -H5_DLL herr_t H5D_istore_dump_btree(H5F_t *f, hid_t dxpl_id, FILE *stream, unsigned ndims, - haddr_t addr); -H5_DLL herr_t H5D_istore_chunkmap(const H5D_io_info_t *io_info, - haddr_t chunk_addr[], const hsize_t down_chunks[]); -#ifdef H5D_ISTORE_DEBUG -H5_DLL herr_t H5D_istore_stats (H5D_t *dset, hbool_t headers); -#endif /* H5D_ISTORE_DEBUG */ -H5_DLL haddr_t H5D_istore_get_addr(const H5D_io_info_t *io_info, - struct H5D_istore_ud1_t *_udata); -H5_DLL herr_t H5D_istore_copy(H5F_t *f_src, H5O_layout_t *layout_src, - H5F_t *f_dst, H5O_layout_t *layout_dst, H5T_t *src_dtype, - H5O_copy_t *cpy_info, H5O_pline_t *pline, hid_t dxpl_id); -H5_DLL void * H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata, - hbool_t relax, unsigned *idx_hint/*in,out*/); -H5_DLL herr_t H5D_istore_unlock(const H5D_io_info_t *io_info, - hbool_t dirty, unsigned idx_hint, void *chunk, uint32_t naccessed); - /* Functions that perform fill value operations on datasets */ H5_DLL herr_t H5D_fill(const void *fill, const H5T_t *fill_type, void *buf, const H5T_t *buf_type, const H5S_t *space, hid_t dxpl_id); diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h index 8cf134f..f1180a3 100644 --- a/src/H5Dprivate.h +++ b/src/H5Dprivate.h @@ -154,9 +154,10 @@ H5_DLL herr_t H5D_vlen_reclaim(hid_t type_id, H5S_t *space, hid_t plist_id, H5_DLL herr_t H5D_contig_delete(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout); +/* Functions that operate on chunked storage */ +H5_DLL herr_t H5D_chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout); + /* Functions that operate on indexed storage */ -H5_DLL herr_t H5D_istore_delete(H5F_t *f, hid_t dxpl_id, - const H5O_layout_t *layout); H5_DLL herr_t H5D_istore_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent, int fwidth, unsigned ndims); diff --git a/src/H5Gname.c b/src/H5Gname.c index f5db1d0..1efb91e 100644 --- a/src/H5Gname.c +++ b/src/H5Gname.c @@ -67,9 +67,6 @@ typedef struct H5G_gnba_iter_t { /* Declare extern the PQ free list for the wrapped strings */ H5FL_BLK_EXTERN(str_buf); -/* Declare the free list to manage haddr_t's */ -H5FL_DEFINE_STATIC(haddr_t); - /* PRIVATE PROTOTYPES */ static htri_t H5G_common_path(const H5RS_str_t *fullpath_r, const H5RS_str_t *prefix_r); static H5RS_str_t *H5G_build_fullpath(const char *prefix, const char *name); diff --git a/src/H5Gnode.c b/src/H5Gnode.c index 92d5717..f494f99 100644 --- a/src/H5Gnode.c +++ b/src/H5Gnode.c @@ -74,7 +74,6 @@ typedef struct H5G_node_t { /* PRIVATE PROTOTYPES */ static herr_t H5G_node_serialize(H5F_t *f, H5G_node_t *sym, size_t size, uint8_t *buf); static size_t H5G_node_size_real(const H5F_t *f); -static herr_t H5G_node_shared_free(void *shared); /* Metadata cache callbacks */ static H5G_node_t *H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_udata1, @@ -146,12 +145,6 @@ H5FL_DEFINE_STATIC(H5G_node_t); /* Declare a free list to manage sequences of H5G_entry_t's */ H5FL_SEQ_DEFINE_STATIC(H5G_entry_t); -/* Declare a free list to manage the native key offset sequence information */ -H5FL_SEQ_DEFINE_STATIC(size_t); - -/* Declare a free list to manage the raw page information */ -H5FL_BLK_DEFINE_STATIC(grp_page); - /*------------------------------------------------------------------------- * Function: H5G_node_get_shared @@ -1590,39 +1583,26 @@ herr_t H5G_node_init(H5F_t *f) { H5B_shared_t *shared; /* Shared B-tree node info */ - size_t u; /* Local index variable */ + size_t sizeof_rkey; /* Size of raw (disk) key */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5G_node_init, FAIL); /* Check arguments. */ - assert(f); + HDassert(f); - /* Allocate space for the shared structure */ - if(NULL==(shared=H5FL_MALLOC(H5B_shared_t))) - HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for shared B-tree info") - - /* Set up the "global" information for this file's groups */ - shared->type= H5B_SNODE; - shared->two_k=2*H5F_KVALUE(f,H5B_SNODE); - shared->sizeof_rkey = H5F_SIZEOF_SIZE(f); /*the name offset */ - assert(shared->sizeof_rkey); - shared->sizeof_rnode = H5B_nodesize(f, shared, &shared->sizeof_keys); - assert(shared->sizeof_rnode); - if(NULL==(shared->page=H5FL_BLK_MALLOC(grp_page,shared->sizeof_rnode))) - HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree page") -#ifdef H5_CLEAR_MEMORY -HDmemset(shared->page, 0, shared->sizeof_rnode); -#endif /* H5_CLEAR_MEMORY */ - if(NULL==(shared->nkey=H5FL_SEQ_MALLOC(size_t,(size_t)(2*H5F_KVALUE(f,H5B_SNODE)+1)))) - HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree page") - - /* Initialize the offsets into the native key buffer */ - for(u=0; u<(2*H5F_KVALUE(f,H5B_SNODE)+1); u++) - shared->nkey[u]=u*H5B_SNODE->sizeof_nkey; + /* Set the raw key size */ + sizeof_rkey = H5F_SIZEOF_SIZE(f); /*name offset */ + + /* Allocate & initialize global info for the shared structure */ + if(NULL == (shared = H5B_shared_new(f, H5B_SNODE, sizeof_rkey))) + HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, FAIL, "memory allocation failed for shared B-tree info") + + /* Set up the "local" information for this file's groups */ + /* <none> */ /* Make shared B-tree info reference counted */ - if(NULL==(f->shared->grp_btree_shared=H5RC_create(shared,H5G_node_shared_free))) + if(NULL == (f->shared->grp_btree_shared = H5RC_create(shared, H5B_shared_free))) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't create ref-count wrapper for shared B-tree info") done: @@ -1662,40 +1642,6 @@ H5G_node_close(const H5F_t *f) /*------------------------------------------------------------------------- - * Function: H5G_node_shared_free - * - * Purpose: Free B-tree shared info - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * Thursday, July 8, 2004 - * - * Modifications: - * - *------------------------------------------------------------------------- - */ -static herr_t -H5G_node_shared_free (void *_shared) -{ - H5B_shared_t *shared = (H5B_shared_t *)_shared; - - FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5G_node_shared_free) - - /* Free the raw B-tree node buffer */ - H5FL_BLK_FREE(grp_page,shared->page); - - /* Free the B-tree native key offsets buffer */ - H5FL_SEQ_FREE(size_t,shared->nkey); - - /* Free the shared B-tree info */ - H5FL_FREE(H5B_shared_t,shared); - - FUNC_LEAVE_NOAPI(SUCCEED) -} /* end H5G_node_shared_free() */ - - -/*------------------------------------------------------------------------- * Function: H5G_node_copy * * Purpose: This function gets called during a group iterate operation diff --git a/src/H5Gstab.c b/src/H5Gstab.c index 61af068..a5280b5 100644 --- a/src/H5Gstab.c +++ b/src/H5Gstab.c @@ -518,7 +518,7 @@ H5G_stab_iterate(const H5O_loc_t *oloc, hid_t dxpl_id, H5_iter_order_t order, udata.op_data = op_data; /* Iterate over the group members */ - if((ret_value = H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, H5G_node_iterate, stab.btree_addr, &udata)) < 0) + if((ret_value = H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_iterate, &udata)) < 0) HERROR(H5E_SYM, H5E_CANTNEXT, "iteration operator failed"); /* Check for too high of a starting index (ex post facto :-) */ @@ -535,8 +535,7 @@ H5G_stab_iterate(const H5O_loc_t *oloc, hid_t dxpl_id, H5_iter_order_t order, udata.ltable = <able; /* Iterate over the group members */ - if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, H5G_node_build_table, - stab.btree_addr, &udata) < 0) + if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_build_table, &udata) < 0) HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to build link table") /* Check for skipping out of bounds */ @@ -595,7 +594,7 @@ H5G_stab_count(H5O_loc_t *oloc, hsize_t *num_objs, hid_t dxpl_id) HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to determine local heap address") /* Iterate over the group members */ - if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, H5G_node_sumup, stab.btree_addr, num_objs) < 0) + if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_sumup, num_objs) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "iteration operator failed") done: @@ -616,10 +615,12 @@ done: *------------------------------------------------------------------------- */ herr_t -H5G_stab_bh_size(H5F_t *f, hid_t dxpl_id, const H5O_stab_t *stab, H5_ih_info_t *bh_info) +H5G_stab_bh_size(H5F_t *f, hid_t dxpl_id, const H5O_stab_t *stab, + H5_ih_info_t *bh_info) { - H5B_info_ud_t bh_udata; /* User-data for B-tree callbacks */ - herr_t ret_value = SUCCEED; + hsize_t snode_size; /* Symbol table node size */ + H5B_info_t bt_info; /* B-tree node info */ + herr_t ret_value = SUCCEED; FUNC_ENTER_NOAPI(H5G_stab_bh_size, FAIL) @@ -628,14 +629,16 @@ H5G_stab_bh_size(H5F_t *f, hid_t dxpl_id, const H5O_stab_t *stab, H5_ih_info_t * HDassert(stab); HDassert(bh_info); - /* Set up user data for B-tree callback */ - bh_udata.udata = NULL; - bh_udata.btree_size = &(bh_info->index_size); + /* Set up user data for B-tree iteration */ + snode_size = 0; /* Get the B-tree & symbol table node size info */ - if(H5B_iterate_size(f, dxpl_id, H5B_SNODE, H5G_node_iterate_size, stab->btree_addr, &bh_udata) < 0) + if(H5B_get_info(f, dxpl_id, H5B_SNODE, stab->btree_addr, &bt_info, H5G_node_iterate_size, &snode_size) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "iteration operator failed") + /* Add symbol table & B-tree node sizes to index info */ + bh_info->index_size += snode_size + bt_info.size; + /* Get the size of the local heap for the group */ if(H5HL_heapsize(f, dxpl_id, stab->heap_addr, &(bh_info->heap_size)) < 0) HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "iteration operator failed") @@ -723,7 +726,7 @@ H5G_stab_get_name_by_idx(H5O_loc_t *oloc, H5_iter_order_t order, hsize_t n, hsize_t nlinks = 0; /* Number of links in group */ /* Iterate over the symbol table nodes, to count the links */ - if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, H5G_node_sumup, stab.btree_addr, &nlinks) < 0) + if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_sumup, &nlinks) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "iteration operator failed") /* Map decreasing iteration order index to increasing iteration order index */ @@ -739,7 +742,7 @@ H5G_stab_get_name_by_idx(H5O_loc_t *oloc, H5_iter_order_t order, hsize_t n, udata.name = NULL; /* Iterate over the group members */ - if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, H5G_node_by_idx, stab.btree_addr, &udata) < 0) + if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_by_idx, &udata) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "iteration operator failed") /* If we don't know the name now, we almost certainly went out of bounds */ @@ -947,7 +950,7 @@ H5G_stab_lookup_by_idx(H5O_loc_t *grp_oloc, H5_iter_order_t order, hsize_t n, hsize_t nlinks = 0; /* Number of links in group */ /* Iterate over the symbol table nodes, to count the links */ - if(H5B_iterate(grp_oloc->file, dxpl_id, H5B_SNODE, H5G_node_sumup, stab.btree_addr, &nlinks) < 0) + if(H5B_iterate(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_sumup, &nlinks) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "iteration operator failed") /* Map decreasing iteration order index to increasing iteration order index */ @@ -964,7 +967,7 @@ H5G_stab_lookup_by_idx(H5O_loc_t *grp_oloc, H5_iter_order_t order, hsize_t n, udata.found = FALSE; /* Iterate over the group members */ - if(H5B_iterate(grp_oloc->file, dxpl_id, H5B_SNODE, H5G_node_by_idx, stab.btree_addr, &udata) < 0) + if(H5B_iterate(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_by_idx, &udata) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "iteration operator failed") /* If we didn't find the link, we almost certainly went out of bounds */ @@ -1076,7 +1079,7 @@ H5G_stab_get_type_by_idx(H5O_loc_t *oloc, hsize_t idx, hid_t dxpl_id) udata.type = H5G_UNKNOWN; /* Iterate over the group members */ - if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, H5G_node_by_idx, stab.btree_addr, &udata) < 0) + if(H5B_iterate(oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, H5G_node_by_idx, &udata) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5G_UNKNOWN, "iteration operator failed") /* If we don't know the type now, we almost certainly went out of bounds */ diff --git a/src/H5Olayout.c b/src/H5Olayout.c index e750d48..1f371f3 100644 --- a/src/H5Olayout.c +++ b/src/H5Olayout.c @@ -136,11 +136,30 @@ H5O_layout_decode(H5F_t *f, hid_t UNUSED dxpl_id, unsigned UNUSED mesg_flags, p += 5; /* Address */ - if(mesg->type == H5D_CONTIGUOUS) + if(mesg->type == H5D_CONTIGUOUS) { H5F_addr_decode(f, &p, &(mesg->u.contig.addr)); - else if(mesg->type == H5D_CHUNKED) + + /* Set the layout operations */ + mesg->ops = H5D_LOPS_CONTIG; + } /* end if */ + else if(mesg->type == H5D_CHUNKED) { H5F_addr_decode(f, &p, &(mesg->u.chunk.addr)); + /* Set the layout operations */ + mesg->ops = H5D_LOPS_CHUNK; + + /* Set the chunk operations */ + /* (Only "istore" indexing type currently supported */ + mesg->u.chunk.ops = H5D_COPS_ISTORE; + } /* end if */ + else { + /* Sanity check */ + HDassert(mesg->type == H5D_COMPACT); + + /* Set the layout operations */ + mesg->ops = H5D_LOPS_COMPACT; + } /* end else */ + /* Read the size */ if(mesg->type != H5D_CHUNKED) { /* Don't compute size of contiguous storage here, due to possible @@ -185,11 +204,17 @@ H5O_layout_decode(H5F_t *f, hid_t UNUSED dxpl_id, unsigned UNUSED mesg_flags, HDmemcpy(mesg->u.compact.buf, p, mesg->u.compact.size); p += mesg->u.compact.size; } /* end if */ + + /* Set the layout operations */ + mesg->ops = H5D_LOPS_COMPACT; break; case H5D_CONTIGUOUS: H5F_addr_decode(f, &p, &(mesg->u.contig.addr)); H5F_DECODE_LENGTH(f, p, mesg->u.contig.size); + + /* Set the layout operations */ + mesg->ops = H5D_LOPS_CONTIG; break; case H5D_CHUNKED: @@ -208,6 +233,13 @@ H5O_layout_decode(H5F_t *f, hid_t UNUSED dxpl_id, unsigned UNUSED mesg_flags, /* Compute chunk size */ for(u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < mesg->u.chunk.ndims; u++) mesg->u.chunk.size *= mesg->u.chunk.dim[u]; + + /* Set the layout operations */ + mesg->ops = H5D_LOPS_CHUNK; + + /* Set the chunk operations */ + /* (Only "istore" indexing type currently supported */ + mesg->u.chunk.ops = H5D_COPS_ISTORE; break; default: @@ -576,8 +608,8 @@ H5O_layout_delete(H5F_t *f, hid_t dxpl_id, H5O_t UNUSED *open_oh, void *_mesg) break; case H5D_CHUNKED: /* Chunked blocks on disk */ - /* Free the file space for the raw data */ - if(H5D_istore_delete(f, dxpl_id, mesg) < 0) + /* Free the file space for the index & chunk raw data */ + if(H5D_chunk_delete(f, dxpl_id, mesg) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to free raw data") break; @@ -670,7 +702,7 @@ H5O_layout_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst, layout_dst->u.chunk.addr = HADDR_UNDEF; /* create chunked layout */ - if(H5D_istore_copy(file_src, layout_src, file_dst, layout_dst, udata->src_dtype, cpy_info, udata->src_pline, dxpl_id) < 0) + if(H5D_chunk_copy(file_src, layout_src, file_dst, layout_dst, udata->src_dtype, cpy_info, udata->src_pline, dxpl_id) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to copy chunked storage") /* Freed by copy routine */ diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h index 3c5204f..36ff226 100644 --- a/src/H5Oprivate.h +++ b/src/H5Oprivate.h @@ -326,6 +326,10 @@ typedef struct H5O_efl_t { */ #define H5O_LAYOUT_NDIMS (H5S_MAX_RANK+1) +/* Forward declaration of structs used below */ +struct H5D_layout_ops_t; /* Defined in H5Dpkg.h */ +struct H5D_chunk_ops_t; /* Defined in H5Dpkg.h */ + typedef struct H5O_layout_contig_t { haddr_t addr; /* File address of data */ hsize_t size; /* Size of data in bytes */ @@ -337,6 +341,7 @@ typedef struct H5O_layout_chunk_t { uint32_t dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in elements */ uint32_t size; /* Size of chunk in bytes */ H5RC_t *btree_shared; /* Ref-counted info for B-tree nodes */ + const struct H5D_chunk_ops_t *ops; /* Pointer to chunked layout operations */ } H5O_layout_chunk_t; typedef struct H5O_layout_compact_t { @@ -348,6 +353,7 @@ typedef struct H5O_layout_compact_t { typedef struct H5O_layout_t { H5D_layout_t type; /* Type of layout */ unsigned version; /* Version of message */ + const struct H5D_layout_ops_t *ops; /* Pointer to data layout I/O operations */ union { H5O_layout_contig_t contig; /* Information for contiguous layout */ H5O_layout_chunk_t chunk; /* Information for chunked layout */ diff --git a/src/H5Ostab.c b/src/H5Ostab.c index 978b9c5..231f2ce 100644 --- a/src/H5Ostab.c +++ b/src/H5Ostab.c @@ -386,7 +386,7 @@ H5O_stab_post_copy_file(const H5O_loc_t *src_oloc, const void *mesg_src, H5O_loc udata.cpy_info = cpy_info; /* Iterate over objects in group, copying them */ - if((H5B_iterate(src_oloc->file, dxpl_id, H5B_SNODE, H5G_node_copy, stab_src->btree_addr, &udata)) < 0) + if((H5B_iterate(src_oloc->file, dxpl_id, H5B_SNODE, stab_src->btree_addr, H5G_node_copy, &udata)) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "iteration operator failed") done: diff --git a/src/H5Pint.c b/src/H5Pint.c index 46eb293..233e187 100644 --- a/src/H5Pint.c +++ b/src/H5Pint.c @@ -3556,7 +3556,7 @@ done: REVISION LOG --------------------------------------------------------------------------*/ herr_t -H5P_get(H5P_genplist_t *plist, const char *name, void *value) +H5P_get(const H5P_genplist_t *plist, const char *name, void *value) { H5P_genclass_t *tclass; /* Temporary class pointer */ H5P_genprop_t *prop; /* Temporary property pointer */ diff --git a/src/H5Pprivate.h b/src/H5Pprivate.h index a96df21..adb57c1 100644 --- a/src/H5Pprivate.h +++ b/src/H5Pprivate.h @@ -59,7 +59,7 @@ H5_DLL herr_t H5P_init(void); H5_DLL herr_t H5P_close(void *_plist); H5_DLL hid_t H5P_create_id(H5P_genclass_t *pclass); H5_DLL hid_t H5P_copy_plist(H5P_genplist_t *old_plist); -H5_DLL herr_t H5P_get(H5P_genplist_t *plist, const char *name, void *value); +H5_DLL herr_t H5P_get(const H5P_genplist_t *plist, const char *name, void *value); H5_DLL herr_t H5P_set(H5P_genplist_t *plist, const char *name, const void *value); H5_DLL herr_t H5P_insert(H5P_genplist_t *plist, const char *name, size_t size, void *value, H5P_prp_set_func_t prp_set, H5P_prp_get_func_t prp_get, @@ -43,7 +43,6 @@ static herr_t H5S_set_extent_simple (H5S_t *space, unsigned rank, static htri_t H5S_is_simple(const H5S_t *sdim); static herr_t H5S_encode(H5S_t *obj, unsigned char *buf, size_t *nalloc); static H5S_t *H5S_decode(const unsigned char *buf); -static htri_t H5S_extent_equal(const H5S_t *ds1, const H5S_t *ds2); #ifdef H5_HAVE_PARALLEL /* Global vars whose value can be set from environment variable also */ @@ -1933,7 +1932,7 @@ done: DESCRIPTION Compare two dataspaces if their extents are identical. --------------------------------------------------------------------------*/ -static htri_t +htri_t H5S_extent_equal(const H5S_t *ds1, const H5S_t *ds2) { unsigned u; /* Local index variable */ diff --git a/src/H5Sprivate.h b/src/H5Sprivate.h index 8744872..e78fb51 100644 --- a/src/H5Sprivate.h +++ b/src/H5Sprivate.h @@ -196,7 +196,9 @@ H5_DLL herr_t H5S_debug(H5F_t *f, hid_t dxpl_id, const void *_mesg, FILE *stream H5_DLL int H5S_extend(H5S_t *space, const hsize_t *size); #endif /* H5_NO_DEPRECATED_SYMBOLS */ +/* Operations on dataspace extents */ H5_DLL hsize_t H5S_extent_nelem(const H5S_extent_t *ext); +H5_DLL htri_t H5S_extent_equal(const H5S_t *ds1, const H5S_t *ds2); /* Operations on selections */ H5_DLL herr_t H5S_select_deserialize(H5S_t *space, const uint8_t *buf); |