summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorNeil Fortner <nfortne2@hdfgroup.org>2015-05-21 17:13:16 (GMT)
committerNeil Fortner <nfortne2@hdfgroup.org>2015-05-21 17:13:16 (GMT)
commit948722cde8db7f53866393ada4c08b88b2a91e3e (patch)
treeafbf15d2ff270b439b9c202d8a89c5d783871270 /src
parentf7e10b55ab75857e8a5bade01749842b060b5783 (diff)
parent194d647721cb4f71e6330d11c244b3d7db8309e7 (diff)
downloadhdf5-948722cde8db7f53866393ada4c08b88b2a91e3e.zip
hdf5-948722cde8db7f53866393ada4c08b88b2a91e3e.tar.gz
hdf5-948722cde8db7f53866393ada4c08b88b2a91e3e.tar.bz2
[svn-r27103] Merge revisions 26780 through 27102 from trunk to vds branch.
Tested: ummon
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt12
-rw-r--r--src/H5A.c26
-rw-r--r--src/H5AC.c2
-rw-r--r--src/H5ACprivate.h51
-rw-r--r--src/H5Abtree2.c132
-rw-r--r--src/H5Adense.c4
-rw-r--r--src/H5Aint.c326
-rw-r--r--src/H5Apkg.h10
-rw-r--r--src/H5B.c110
-rw-r--r--src/H5B2.c78
-rw-r--r--src/H5B2cache.c40
-rw-r--r--src/H5B2dbg.c31
-rw-r--r--src/H5B2hdr.c94
-rw-r--r--src/H5B2int.c536
-rw-r--r--src/H5B2pkg.h86
-rw-r--r--src/H5B2stat.c2
-rw-r--r--src/H5B2test.c110
-rw-r--r--src/H5Bcache.c7
-rw-r--r--src/H5Bdbg.c8
-rw-r--r--src/H5Bpkg.h4
-rw-r--r--src/H5Bprivate.h1
-rw-r--r--src/H5C.c385
-rw-r--r--src/H5Cpkg.h34
-rw-r--r--src/H5Cprivate.h52
-rw-r--r--src/H5Dbtree.c288
-rw-r--r--src/H5Dchunk.c1262
-rw-r--r--src/H5Dcompact.c17
-rw-r--r--src/H5Dcontig.c43
-rw-r--r--src/H5Ddeprec.c75
-rw-r--r--src/H5Defl.c19
-rw-r--r--src/H5Dint.c118
-rw-r--r--src/H5Dio.c15
-rw-r--r--src/H5Dmpio.c21
-rw-r--r--src/H5Dpkg.h47
-rw-r--r--src/H5Dprivate.h2
-rw-r--r--src/H5EA.c14
-rw-r--r--src/H5EAcache.c26
-rw-r--r--src/H5EAdbg.c30
-rw-r--r--src/H5EAhdr.c66
-rw-r--r--src/H5EApkg.h21
-rw-r--r--src/H5F.c12
-rw-r--r--src/H5FA.c20
-rw-r--r--src/H5FAcache.c15
-rw-r--r--src/H5FAdbg.c10
-rw-r--r--src/H5FAdblock.c32
-rw-r--r--src/H5FAhdr.c66
-rw-r--r--src/H5FApkg.h14
-rw-r--r--src/H5FD.c59
-rw-r--r--src/H5FDcore.c4
-rw-r--r--src/H5FDdirect.c2
-rw-r--r--src/H5FDfamily.c42
-rw-r--r--src/H5FDlog.c2
-rw-r--r--src/H5FDmulti.c47
-rw-r--r--src/H5FDprivate.h4
-rw-r--r--src/H5FDsec2.c2
-rw-r--r--src/H5FS.c24
-rw-r--r--src/H5FScache.c18
-rw-r--r--src/H5FSpkg.h4
-rw-r--r--src/H5Faccum.c14
-rw-r--r--src/H5Fdeprec.c2
-rw-r--r--src/H5Fint.c4
-rw-r--r--src/H5Fio.c3
-rw-r--r--src/H5Fpkg.h8
-rw-r--r--src/H5Fprivate.h7
-rw-r--r--src/H5Fpublic.h6
-rw-r--r--src/H5Fsuper.c48
-rw-r--r--src/H5Fsuper_cache.c45
-rw-r--r--src/H5Gcache.c16
-rw-r--r--src/H5Gdeprec.c2
-rw-r--r--src/H5Glink.c2
-rw-r--r--src/H5HFcache.c950
-rw-r--r--src/H5HFdbg.c6
-rw-r--r--src/H5HFdblock.c4
-rw-r--r--src/H5HFdtable.c4
-rw-r--r--src/H5HFhdr.c2
-rw-r--r--src/H5HFhuge.c6
-rw-r--r--src/H5HLcache.c47
-rw-r--r--src/H5HLpkg.h6
-rw-r--r--src/H5I.c71
-rw-r--r--src/H5MF.c2
-rw-r--r--src/H5O.c4
-rw-r--r--src/H5Oattr.c2
-rw-r--r--src/H5Ocache.c90
-rw-r--r--src/H5Ofill.c2
-rw-r--r--src/H5Omessage.c46
-rw-r--r--src/H5Oprivate.h4
-rw-r--r--src/H5PL.c3
-rw-r--r--src/H5Pencdec.c4
-rw-r--r--src/H5Pgcpl.c2
-rw-r--r--src/H5Rpublic.h2
-rw-r--r--src/H5S.c11
-rw-r--r--src/H5SMcache.c35
-rw-r--r--src/H5Shyper.c20
-rw-r--r--src/H5Smpio.c6
-rw-r--r--src/H5Sselect.c4
-rw-r--r--src/H5T.c170
-rw-r--r--src/H5Tarray.c2
-rw-r--r--src/H5Tconv.c18
-rw-r--r--src/H5Tfloat.c2
-rw-r--r--src/H5Tnative.c6
-rw-r--r--src/H5VM.c180
-rw-r--r--src/H5VMprivate.h35
-rw-r--r--src/H5Zdeflate.c6
-rw-r--r--src/H5Znbit.c2
-rw-r--r--src/H5Zscaleoffset.c2
-rw-r--r--src/H5Zszip.c12
-rw-r--r--src/H5detect.c303
-rw-r--r--src/H5private.h7
-rw-r--r--src/H5public.h4
-rw-r--r--src/H5trace.c4
-rw-r--r--src/Makefile.in2
-rw-r--r--src/libhdf5.settings.in2
112 files changed, 3551 insertions, 3278 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index ba345fa..e7b6b0c 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -771,16 +771,8 @@ set (H5_PRIVATE_HEADERS
# Setup the H5Detect utility which generates H5Tinit with platform
# specific type checks inside
#-----------------------------------------------------------------------------
-#add_executable (H5detect ${HDF5_SRC_DIR}/H5detect.c)
-#TARGET_C_PROPERTIES (H5detect STATIC " " " ")
-IF (CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX)
- SET (LOCAL_OPT_FLAG "-O0")
-ELSE (CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX)
- SET (LOCAL_OPT_FLAG " ")
-ENDIF (CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX)
-ADD_EXECUTABLE (H5detect ${HDF5_SRC_DIR}/H5detect.c)
-TARGET_C_PROPERTIES (H5detect STATIC ${LOCAL_OPT_FLAG} " ")
-
+add_executable (H5detect ${HDF5_SRC_DIR}/H5detect.c)
+TARGET_C_PROPERTIES (H5detect STATIC " " " ")
if (MSVC OR MINGW)
target_link_libraries (H5detect "ws2_32.lib")
endif (MSVC OR MINGW)
diff --git a/src/H5A.c b/src/H5A.c
index d474234..a20f730 100644
--- a/src/H5A.c
+++ b/src/H5A.c
@@ -21,7 +21,7 @@
#define H5O_PACKAGE /*suppress error about including H5Opkg */
/* Interface initialization */
-#define H5_INTERFACE_INIT_FUNC H5A_init_interface
+#define H5_INTERFACE_INIT_FUNC H5A__init_interface
/***********/
@@ -125,9 +125,9 @@ done:
/*--------------------------------------------------------------------------
NAME
- H5A_init_interface -- Initialize interface-specific information
+ H5A__init_interface -- Initialize interface-specific information
USAGE
- herr_t H5A_init_interface()
+ herr_t H5A__init_interface()
RETURNS
Non-negative on success/Negative on failure
@@ -136,11 +136,11 @@ DESCRIPTION
--------------------------------------------------------------------------*/
static herr_t
-H5A_init_interface(void)
+H5A__init_interface(void)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/*
* Create attribute ID type.
@@ -150,7 +150,7 @@ H5A_init_interface(void)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5A_init_interface() */
+} /* end H5A__init_interface() */
/*--------------------------------------------------------------------------
@@ -407,7 +407,7 @@ H5Aopen(hid_t loc_id, const char *attr_name, hid_t UNUSED aapl_id)
HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, FAIL, "unable to load attribute info from object header for attribute: '%s'", attr_name)
/* Finish initializing attribute */
- if(H5A_open_common(&loc, attr) < 0)
+ if(H5A__open_common(&loc, attr) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, FAIL, "unable to initialize attribute")
/* Register the attribute and get an ID for it */
@@ -595,7 +595,7 @@ H5Awrite(hid_t attr_id, hid_t dtype_id, const void *buf)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "null attribute buffer")
/* Go write the actual data to the attribute */
- if((ret_value = H5A_write(attr, mem_type, buf, H5AC_dxpl_id)) < 0)
+ if((ret_value = H5A__write(attr, mem_type, buf, H5AC_dxpl_id)) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_WRITEERROR, FAIL, "unable to write attribute")
done:
@@ -638,7 +638,7 @@ H5Aread(hid_t attr_id, hid_t dtype_id, void *buf)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "null attribute buffer")
/* Go write the actual data to the attribute */
- if((ret_value = H5A_read(attr, mem_type, buf, H5AC_ind_dxpl_id)) < 0)
+ if((ret_value = H5A__read(attr, mem_type, buf, H5AC_ind_dxpl_id)) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_READERROR, FAIL, "unable to read attribute")
done:
@@ -799,7 +799,7 @@ H5Aget_name(hid_t attr_id, size_t buf_size, char *buf)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid buffer")
/* Call private function in turn */
- if(0 > (ret_value = H5A_get_name(my_attr, buf_size, buf)))
+ if(0 > (ret_value = H5A__get_name(my_attr, buf_size, buf)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, FAIL, "can't get attribute name")
done:
@@ -942,7 +942,7 @@ H5Aget_info(hid_t attr_id, H5A_info_t *ainfo)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an attribute")
/* Get the attribute information */
- if(H5A_get_info(attr, ainfo) < 0)
+ if(H5A__get_info(attr, ainfo) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, FAIL, "unable to get attribute info")
done:
@@ -996,7 +996,7 @@ H5Aget_info_by_name(hid_t loc_id, const char *obj_name, const char *attr_name,
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "can't open attribute")
/* Get the attribute information */
- if(H5A_get_info(attr, ainfo) < 0)
+ if(H5A__get_info(attr, ainfo) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, FAIL, "unable to get attribute info")
done:
@@ -1058,7 +1058,7 @@ H5Aget_info_by_idx(hid_t loc_id, const char *obj_name, H5_index_t idx_type,
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "can't open attribute")
/* Get the attribute information */
- if(H5A_get_info(attr, ainfo) < 0)
+ if(H5A__get_info(attr, ainfo) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, FAIL, "unable to get attribute info")
done:
diff --git a/src/H5AC.c b/src/H5AC.c
index 7ed5047..e6bcbb6 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -598,7 +598,7 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
#ifdef H5_HAVE_PARALLEL
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index ccecd83..cf4c122 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -42,6 +42,7 @@
#define H5AC__TRACE_FILE_ENABLED 0
#endif /* H5_METADATA_TRACE_FILE */
+/* Global metadata tag values */
#define H5AC__INVALID_TAG (haddr_t)0
#define H5AC__IGNORE_TAG (haddr_t)1
#define H5AC__SUPERBLOCK_TAG (haddr_t)2
@@ -339,7 +340,7 @@ H5_DLLVAR hid_t H5AC_ind_dxpl_id;
H5_DLL herr_t H5AC_init(void);
H5_DLL herr_t H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr);
H5_DLL herr_t H5AC_get_entry_status(const H5F_t *f, haddr_t addr,
- unsigned * status_ptr);
+ unsigned *status_ptr);
H5_DLL herr_t H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
haddr_t addr, void *thing, unsigned int flags);
H5_DLL herr_t H5AC_pin_protected_entry(void *thing);
@@ -349,57 +350,35 @@ H5_DLL void * H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
H5_DLL herr_t H5AC_resize_entry(void *thing, size_t new_size);
H5_DLL herr_t H5AC_unpin_entry(void *thing);
H5_DLL herr_t H5AC_destroy_flush_dependency(void *parent_thing, void *child_thing);
-H5_DLL herr_t H5AC_unprotect(H5F_t *f, hid_t dxpl_id,
- const H5AC_class_t *type, haddr_t addr,
- void *thing, unsigned flags);
+H5_DLL herr_t H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
+ haddr_t addr, void *thing, unsigned flags);
H5_DLL herr_t H5AC_flush(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5AC_mark_entry_dirty(void *thing);
H5_DLL herr_t H5AC_move_entry(H5F_t *f, const H5AC_class_t *type,
- haddr_t old_addr, haddr_t new_addr);
-
+ haddr_t old_addr, haddr_t new_addr);
H5_DLL herr_t H5AC_dest(H5F_t *f, hid_t dxpl_id);
-
H5_DLL herr_t H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id,
- const H5AC_class_t *type, haddr_t addr,
- unsigned flags);
-
+ const H5AC_class_t *type, haddr_t addr, unsigned flags);
H5_DLL herr_t H5AC_set_sync_point_done_callback(H5C_t *cache_ptr,
void (*sync_point_done)(int num_writes, haddr_t *written_entries_tbl));
-
H5_DLL herr_t H5AC_set_write_done_callback(H5C_t * cache_ptr,
void (* write_done)(void));
H5_DLL herr_t H5AC_stats(const H5F_t *f);
-
H5_DLL herr_t H5AC_dump_cache(const H5F_t *f);
-
H5_DLL herr_t H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
- H5AC_cache_config_t *config_ptr);
-
-H5_DLL herr_t H5AC_get_cache_size(H5AC_t * cache_ptr,
- size_t * max_size_ptr,
- size_t * min_clean_size_ptr,
- size_t * cur_size_ptr,
- int32_t * cur_num_entries_ptr);
-
-H5_DLL herr_t H5AC_get_cache_hit_rate(H5AC_t * cache_ptr,
- double * hit_rate_ptr);
-
-H5_DLL herr_t H5AC_reset_cache_hit_rate_stats(H5AC_t * cache_ptr);
-
+ H5AC_cache_config_t *config_ptr);
+H5_DLL herr_t H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr,
+ size_t *min_clean_size_ptr, size_t *cur_size_ptr, int32_t *cur_num_entries_ptr);
+H5_DLL herr_t H5AC_get_cache_hit_rate(H5AC_t *cache_ptr, double *hit_rate_ptr);
+H5_DLL herr_t H5AC_reset_cache_hit_rate_stats(H5AC_t *cache_ptr);
H5_DLL herr_t H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr,
- H5AC_cache_config_t *config_ptr);
-
-H5_DLL herr_t H5AC_validate_config(H5AC_cache_config_t * config_ptr);
-
-H5_DLL herr_t H5AC_close_trace_file( H5AC_t * cache_ptr);
-
-H5_DLL herr_t H5AC_open_trace_file(H5AC_t * cache_ptr,
- const char * trace_file_name);
+ H5AC_cache_config_t *config_ptr);
+H5_DLL herr_t H5AC_validate_config(H5AC_cache_config_t *config_ptr);
+H5_DLL herr_t H5AC_close_trace_file(H5AC_t *cache_ptr);
+H5_DLL herr_t H5AC_open_trace_file(H5AC_t *cache_ptr, const char *trace_file_name);
H5_DLL herr_t H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t * prev_tag);
-
H5_DLL herr_t H5AC_retag_copied_metadata(H5F_t * f, haddr_t metadata_tag);
-
H5_DLL herr_t H5AC_ignore_tags(H5F_t * f);
#ifdef H5_HAVE_PARALLEL
diff --git a/src/H5Abtree2.c b/src/H5Abtree2.c
index 9b7dba6..5e74e55 100644
--- a/src/H5Abtree2.c
+++ b/src/H5Abtree2.c
@@ -79,27 +79,27 @@ typedef struct H5A_fh_ud_cmp_t {
/* v2 B-tree function callbacks */
/* v2 B-tree driver callbacks for 'creation order' index */
-static herr_t H5A_dense_btree2_corder_store(void *native, const void *udata);
-static herr_t H5A_dense_btree2_corder_compare(const void *rec1, const void *rec2);
-static herr_t H5A_dense_btree2_corder_encode(uint8_t *raw, const void *native,
+static herr_t H5A__dense_btree2_corder_store(void *native, const void *udata);
+static herr_t H5A__dense_btree2_corder_compare(const void *rec1, const void *rec2);
+static herr_t H5A__dense_btree2_corder_encode(uint8_t *raw, const void *native,
void *ctx);
-static herr_t H5A_dense_btree2_corder_decode(const uint8_t *raw, void *native,
+static herr_t H5A__dense_btree2_corder_decode(const uint8_t *raw, void *native,
void *ctx);
-static herr_t H5A_dense_btree2_corder_debug(FILE *stream, const H5F_t *f, hid_t dxpl_id,
+static herr_t H5A__dense_btree2_corder_debug(FILE *stream, const H5F_t *f, hid_t dxpl_id,
int indent, int fwidth, const void *record, const void *_udata);
/* v2 B-tree driver callbacks for 'name' index */
-static herr_t H5A_dense_btree2_name_store(void *native, const void *udata);
-static herr_t H5A_dense_btree2_name_compare(const void *rec1, const void *rec2);
-static herr_t H5A_dense_btree2_name_encode(uint8_t *raw, const void *native,
+static herr_t H5A__dense_btree2_name_store(void *native, const void *udata);
+static herr_t H5A__dense_btree2_name_compare(const void *rec1, const void *rec2);
+static herr_t H5A__dense_btree2_name_encode(uint8_t *raw, const void *native,
void *ctx);
-static herr_t H5A_dense_btree2_name_decode(const uint8_t *raw, void *native,
+static herr_t H5A__dense_btree2_name_decode(const uint8_t *raw, void *native,
void *ctx);
-static herr_t H5A_dense_btree2_name_debug(FILE *stream, const H5F_t *f, hid_t dxpl_id,
+static herr_t H5A__dense_btree2_name_debug(FILE *stream, const H5F_t *f, hid_t dxpl_id,
int indent, int fwidth, const void *record, const void *_udata);
/* Fractal heap function callbacks */
-static herr_t H5A_dense_fh_name_cmp(const void *obj, size_t obj_len, void *op_data);
+static herr_t H5A__dense_fh_name_cmp(const void *obj, size_t obj_len, void *op_data);
/*********************/
@@ -112,11 +112,11 @@ const H5B2_class_t H5A_BT2_NAME[1]={{ /* B-tree class information */
sizeof(H5A_dense_bt2_name_rec_t), /* Size of native record */
NULL, /* Create client callback context */
NULL, /* Destroy client callback context */
- H5A_dense_btree2_name_store, /* Record storage callback */
- H5A_dense_btree2_name_compare, /* Record comparison callback */
- H5A_dense_btree2_name_encode, /* Record encoding callback */
- H5A_dense_btree2_name_decode, /* Record decoding callback */
- H5A_dense_btree2_name_debug, /* Record debugging callback */
+ H5A__dense_btree2_name_store, /* Record storage callback */
+ H5A__dense_btree2_name_compare, /* Record comparison callback */
+ H5A__dense_btree2_name_encode, /* Record encoding callback */
+ H5A__dense_btree2_name_decode, /* Record decoding callback */
+ H5A__dense_btree2_name_debug, /* Record debugging callback */
NULL, /* Create debugging context */
NULL /* Destroy debugging context */
}};
@@ -128,11 +128,11 @@ const H5B2_class_t H5A_BT2_CORDER[1]={{ /* B-tree class information */
sizeof(H5A_dense_bt2_corder_rec_t),/* Size of native record */
NULL, /* Create client callback context */
NULL, /* Destroy client callback context */
- H5A_dense_btree2_corder_store, /* Record storage callback */
- H5A_dense_btree2_corder_compare, /* Record comparison callback */
- H5A_dense_btree2_corder_encode, /* Record encoding callback */
- H5A_dense_btree2_corder_decode, /* Record decoding callback */
- H5A_dense_btree2_corder_debug, /* Record debugging callback */
+ H5A__dense_btree2_corder_store, /* Record storage callback */
+ H5A__dense_btree2_corder_compare, /* Record comparison callback */
+ H5A__dense_btree2_corder_encode, /* Record encoding callback */
+ H5A__dense_btree2_corder_decode, /* Record decoding callback */
+ H5A__dense_btree2_corder_debug, /* Record debugging callback */
NULL, /* Create debugging context */
NULL /* Destroy debugging context */
}};
@@ -150,7 +150,7 @@ const H5B2_class_t H5A_BT2_CORDER[1]={{ /* B-tree class information */
/*-------------------------------------------------------------------------
- * Function: H5A_dense_fh_name_cmp
+ * Function: H5A__dense_fh_name_cmp
*
* Purpose: Compares the name of a attribute in a fractal heap to another
* name
@@ -164,14 +164,14 @@ const H5B2_class_t H5A_BT2_CORDER[1]={{ /* B-tree class information */
*-------------------------------------------------------------------------
*/
static herr_t
-H5A_dense_fh_name_cmp(const void *obj, size_t UNUSED obj_len, void *_udata)
+H5A__dense_fh_name_cmp(const void *obj, size_t UNUSED obj_len, void *_udata)
{
H5A_fh_ud_cmp_t *udata = (H5A_fh_ud_cmp_t *)_udata; /* User data for 'op' callback */
H5A_t *attr = NULL; /* Pointer to attribute created from heap object */
hbool_t took_ownership = FALSE; /* Whether the "found" operator took ownership of the attribute */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Decode attribute information */
if(NULL == (attr = (H5A_t *)H5O_msg_decode(udata->f, udata->dxpl_id, NULL, H5O_ATTR_ID, (const unsigned char *)obj)))
@@ -200,11 +200,11 @@ done:
H5O_msg_free(H5O_ATTR_ID, attr);
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5A_dense_fh_name_cmp() */
+} /* end H5A__dense_fh_name_cmp() */
/*-------------------------------------------------------------------------
- * Function: H5A_dense_btree2_name_store
+ * Function: H5A__dense_btree2_name_store
*
* Purpose: Store user information into native record for v2 B-tree
*
@@ -217,12 +217,12 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5A_dense_btree2_name_store(void *_nrecord, const void *_udata)
+H5A__dense_btree2_name_store(void *_nrecord, const void *_udata)
{
const H5A_bt2_ud_ins_t *udata = (const H5A_bt2_ud_ins_t *)_udata;
H5A_dense_bt2_name_rec_t *nrecord = (H5A_dense_bt2_name_rec_t *)_nrecord;
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Copy user information info native record */
nrecord->id = udata->id;
@@ -231,11 +231,11 @@ H5A_dense_btree2_name_store(void *_nrecord, const void *_udata)
nrecord->hash = udata->common.name_hash;
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5A_dense_btree2_name_store() */
+} /* H5A__dense_btree2_name_store() */
/*-------------------------------------------------------------------------
- * Function: H5A_dense_btree2_name_compare
+ * Function: H5A__dense_btree2_name_compare
*
* Purpose: Compare two native information records, according to some key
*
@@ -249,13 +249,13 @@ H5A_dense_btree2_name_store(void *_nrecord, const void *_udata)
*-------------------------------------------------------------------------
*/
static herr_t
-H5A_dense_btree2_name_compare(const void *_bt2_udata, const void *_bt2_rec)
+H5A__dense_btree2_name_compare(const void *_bt2_udata, const void *_bt2_rec)
{
const H5A_bt2_ud_common_t *bt2_udata = (const H5A_bt2_ud_common_t *)_bt2_udata;
const H5A_dense_bt2_name_rec_t *bt2_rec = (const H5A_dense_bt2_name_rec_t *)_bt2_rec;
herr_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Sanity check */
HDassert(bt2_udata);
@@ -294,7 +294,7 @@ H5A_dense_btree2_name_compare(const void *_bt2_udata, const void *_bt2_rec)
HDassert(fheap);
/* Check if the user's attribute and the B-tree's attribute have the same name */
- status = H5HF_op(fheap, bt2_udata->dxpl_id, &bt2_rec->id, H5A_dense_fh_name_cmp, &fh_udata);
+ status = H5HF_op(fheap, bt2_udata->dxpl_id, &bt2_rec->id, H5A__dense_fh_name_cmp, &fh_udata);
HDassert(status >= 0);
/* Callback will set comparison value */
@@ -302,11 +302,11 @@ H5A_dense_btree2_name_compare(const void *_bt2_udata, const void *_bt2_rec)
} /* end else */
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5A_dense_btree2_name_compare() */
+} /* H5A__dense_btree2_name_compare() */
/*-------------------------------------------------------------------------
- * Function: H5A_dense_btree2_name_encode
+ * Function: H5A__dense_btree2_name_encode
*
* Purpose: Encode native information into raw form for storing on disk
*
@@ -319,11 +319,11 @@ H5A_dense_btree2_name_compare(const void *_bt2_udata, const void *_bt2_rec)
*-------------------------------------------------------------------------
*/
static herr_t
-H5A_dense_btree2_name_encode(uint8_t *raw, const void *_nrecord, void UNUSED *ctx)
+H5A__dense_btree2_name_encode(uint8_t *raw, const void *_nrecord, void UNUSED *ctx)
{
const H5A_dense_bt2_name_rec_t *nrecord = (const H5A_dense_bt2_name_rec_t *)_nrecord;
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Encode the record's fields */
HDmemcpy(raw, nrecord->id.id, (size_t)H5O_FHEAP_ID_LEN);
@@ -333,11 +333,11 @@ H5A_dense_btree2_name_encode(uint8_t *raw, const void *_nrecord, void UNUSED *ct
UINT32ENCODE(raw, nrecord->hash)
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5A_dense_btree2_name_encode() */
+} /* H5A__dense_btree2_name_encode() */
/*-------------------------------------------------------------------------
- * Function: H5A_dense_btree2_name_decode
+ * Function: H5A__dense_btree2_name_decode
*
* Purpose: Decode raw disk form of record into native form
*
@@ -350,11 +350,11 @@ H5A_dense_btree2_name_encode(uint8_t *raw, const void *_nrecord, void UNUSED *ct
*-------------------------------------------------------------------------
*/
static herr_t
-H5A_dense_btree2_name_decode(const uint8_t *raw, void *_nrecord, void UNUSED *ctx)
+H5A__dense_btree2_name_decode(const uint8_t *raw, void *_nrecord, void UNUSED *ctx)
{
H5A_dense_bt2_name_rec_t *nrecord = (H5A_dense_bt2_name_rec_t *)_nrecord;
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Decode the record's fields */
HDmemcpy(nrecord->id.id, raw, (size_t)H5O_FHEAP_ID_LEN);
@@ -364,11 +364,11 @@ H5A_dense_btree2_name_decode(const uint8_t *raw, void *_nrecord, void UNUSED *ct
UINT32DECODE(raw, nrecord->hash)
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5A_dense_btree2_name_decode() */
+} /* H5A__dense_btree2_name_decode() */
/*-------------------------------------------------------------------------
- * Function: H5A_dense_btree2_name_debug
+ * Function: H5A__dense_btree2_name_debug
*
* Purpose: Debug native form of record
*
@@ -381,23 +381,23 @@ H5A_dense_btree2_name_decode(const uint8_t *raw, void *_nrecord, void UNUSED *ct
*-------------------------------------------------------------------------
*/
static herr_t
-H5A_dense_btree2_name_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
+H5A__dense_btree2_name_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
int indent, int fwidth, const void *_nrecord, const void UNUSED *_udata)
{
const H5A_dense_bt2_name_rec_t *nrecord = (const H5A_dense_bt2_name_rec_t *)_nrecord;
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
HDfprintf(stream, "%*s%-*s {%016Hx, %02x, %u, %08lx}\n", indent, "", fwidth,
"Record:",
(hsize_t)nrecord->id.val, (unsigned)nrecord->flags, (unsigned)nrecord->corder, (unsigned long)nrecord->hash);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5A_dense_btree2_name_debug() */
+} /* H5A__dense_btree2_name_debug() */
/*-------------------------------------------------------------------------
- * Function: H5A_dense_btree2_corder_store
+ * Function: H5A__dense_btree2_corder_store
*
* Purpose: Store user information into native record for v2 B-tree
*
@@ -410,12 +410,12 @@ H5A_dense_btree2_name_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dx
*-------------------------------------------------------------------------
*/
static herr_t
-H5A_dense_btree2_corder_store(void *_nrecord, const void *_udata)
+H5A__dense_btree2_corder_store(void *_nrecord, const void *_udata)
{
const H5A_bt2_ud_ins_t *udata = (const H5A_bt2_ud_ins_t *)_udata;
H5A_dense_bt2_corder_rec_t *nrecord = (H5A_dense_bt2_corder_rec_t *)_nrecord;
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Copy user information info native record */
nrecord->id = udata->id;
@@ -423,11 +423,11 @@ H5A_dense_btree2_corder_store(void *_nrecord, const void *_udata)
nrecord->corder = udata->common.corder;
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5A_dense_btree2_corder_store() */
+} /* H5A__dense_btree2_corder_store() */
/*-------------------------------------------------------------------------
- * Function: H5A_dense_btree2_corder_compare
+ * Function: H5A__dense_btree2_corder_compare
*
* Purpose: Compare two native information records, according to some key
*
@@ -441,13 +441,13 @@ H5A_dense_btree2_corder_store(void *_nrecord, const void *_udata)
*-------------------------------------------------------------------------
*/
static herr_t
-H5A_dense_btree2_corder_compare(const void *_bt2_udata, const void *_bt2_rec)
+H5A__dense_btree2_corder_compare(const void *_bt2_udata, const void *_bt2_rec)
{
const H5A_bt2_ud_common_t *bt2_udata = (const H5A_bt2_ud_common_t *)_bt2_udata;
const H5A_dense_bt2_corder_rec_t *bt2_rec = (const H5A_dense_bt2_corder_rec_t *)_bt2_rec;
herr_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Sanity check */
HDassert(bt2_udata);
@@ -462,11 +462,11 @@ H5A_dense_btree2_corder_compare(const void *_bt2_udata, const void *_bt2_rec)
ret_value = 0;
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5A_dense_btree2_corder_compare() */
+} /* H5A__dense_btree2_corder_compare() */
/*-------------------------------------------------------------------------
- * Function: H5A_dense_btree2_corder_encode
+ * Function: H5A__dense_btree2_corder_encode
*
* Purpose: Encode native information into raw form for storing on disk
*
@@ -479,11 +479,11 @@ H5A_dense_btree2_corder_compare(const void *_bt2_udata, const void *_bt2_rec)
*-------------------------------------------------------------------------
*/
static herr_t
-H5A_dense_btree2_corder_encode(uint8_t *raw, const void *_nrecord, void UNUSED *ctx)
+H5A__dense_btree2_corder_encode(uint8_t *raw, const void *_nrecord, void UNUSED *ctx)
{
const H5A_dense_bt2_corder_rec_t *nrecord = (const H5A_dense_bt2_corder_rec_t *)_nrecord;
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Encode the record's fields */
HDmemcpy(raw, nrecord->id.id, (size_t)H5O_FHEAP_ID_LEN);
@@ -492,11 +492,11 @@ H5A_dense_btree2_corder_encode(uint8_t *raw, const void *_nrecord, void UNUSED *
UINT32ENCODE(raw, nrecord->corder)
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5A_dense_btree2_corder_encode() */
+} /* H5A__dense_btree2_corder_encode() */
/*-------------------------------------------------------------------------
- * Function: H5A_dense_btree2_corder_decode
+ * Function: H5A__dense_btree2_corder_decode
*
* Purpose: Decode raw disk form of record into native form
*
@@ -509,11 +509,11 @@ H5A_dense_btree2_corder_encode(uint8_t *raw, const void *_nrecord, void UNUSED *
*-------------------------------------------------------------------------
*/
static herr_t
-H5A_dense_btree2_corder_decode(const uint8_t *raw, void *_nrecord, void UNUSED *ctx)
+H5A__dense_btree2_corder_decode(const uint8_t *raw, void *_nrecord, void UNUSED *ctx)
{
H5A_dense_bt2_corder_rec_t *nrecord = (H5A_dense_bt2_corder_rec_t *)_nrecord;
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Decode the record's fields */
HDmemcpy(nrecord->id.id, raw, (size_t)H5O_FHEAP_ID_LEN);
@@ -522,11 +522,11 @@ H5A_dense_btree2_corder_decode(const uint8_t *raw, void *_nrecord, void UNUSED *
UINT32DECODE(raw, nrecord->corder)
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5A_dense_btree2_corder_decode() */
+} /* H5A__dense_btree2_corder_decode() */
/*-------------------------------------------------------------------------
- * Function: H5A_dense_btree2_corder_debug
+ * Function: H5A__dense_btree2_corder_debug
*
* Purpose: Debug native form of record
*
@@ -539,17 +539,17 @@ H5A_dense_btree2_corder_decode(const uint8_t *raw, void *_nrecord, void UNUSED *
*-------------------------------------------------------------------------
*/
static herr_t
-H5A_dense_btree2_corder_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
+H5A__dense_btree2_corder_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
int indent, int fwidth, const void *_nrecord, const void UNUSED *_udata)
{
const H5A_dense_bt2_corder_rec_t *nrecord = (const H5A_dense_bt2_corder_rec_t *)_nrecord;
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
HDfprintf(stream, "%*s%-*s {%016Hx, %02x, %u}\n", indent, "", fwidth,
"Record:",
(hsize_t)nrecord->id.val, (unsigned)nrecord->flags, (unsigned)nrecord->corder);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5A_dense_btree2_corder_debug() */
+} /* H5A__dense_btree2_corder_debug() */
diff --git a/src/H5Adense.c b/src/H5Adense.c
index 0b73a20..674ab5d 100644
--- a/src/H5Adense.c
+++ b/src/H5Adense.c
@@ -538,7 +538,7 @@ H5A_dense_insert(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, H5A_t *attr)
udata.common.shared_fheap = shared_fheap;
udata.common.name = attr->shared->name;
udata.common.name_hash = H5_checksum_lookup3(attr->shared->name, HDstrlen(attr->shared->name), 0);
- H5_ASSIGN_OVERFLOW(udata.common.flags, mesg_flags, unsigned, uint8_t);
+ H5_CHECKED_ASSIGN(udata.common.flags, uint8_t, mesg_flags, unsigned);
udata.common.corder = attr->shared->crt_idx;
udata.common.found_op = NULL;
udata.common.found_op_data = NULL;
@@ -1082,7 +1082,7 @@ H5A__dense_iterate_bt2_cb(const void *_record, void *_bt2_udata)
H5A_info_t ainfo; /* Info for attribute */
/* Get the attribute information */
- if(H5A_get_info(fh_udata.attr, &ainfo) < 0)
+ if(H5A__get_info(fh_udata.attr, &ainfo) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, H5_ITER_ERROR, "unable to get attribute info")
/* Make the application callback */
diff --git a/src/H5Aint.c b/src/H5Aint.c
index e28c2cc..9012f6d 100644
--- a/src/H5Aint.c
+++ b/src/H5Aint.c
@@ -92,7 +92,7 @@ typedef struct {
static herr_t H5A__compact_build_table_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/,
unsigned sequence, unsigned *oh_flags_ptr, void *_udata/*in,out*/);
-static herr_t H5A_dense_build_table_cb(const H5A_t *attr, void *_udata);
+static herr_t H5A__dense_build_table_cb(const H5A_t *attr, void *_udata);
static int H5A__attr_cmp_name_inc(const void *attr1, const void *attr2);
static int H5A__attr_cmp_name_dec(const void *attr1, const void *attr2);
static int H5A__attr_cmp_corder_inc(const void *attr1, const void *attr2);
@@ -255,7 +255,7 @@ H5A_create(const H5G_loc_t *loc, const char *name, const H5T_t *type,
/* Get # of elements for attribute's dataspace */
if((snelmts = H5S_GET_EXTENT_NPOINTS(attr->shared->ds)) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTCOUNT, NULL, "dataspace is invalid")
- H5_ASSIGN_OVERFLOW(nelmts, snelmts, hssize_t, size_t);
+ H5_CHECKED_ASSIGN(nelmts, size_t, snelmts, hssize_t);
HDassert(attr->shared->dt_size > 0);
HDassert(attr->shared->ds_size > 0);
@@ -283,13 +283,13 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5A_open_common
+ * Function: H5A__open_common
*
* Purpose:
* Finishes initializing an attributes the open
*
* Usage:
- * herr_t H5A_open_common(loc, name, dxpl_id)
+ * herr_t H5A__open_common(loc, name, dxpl_id)
* const H5G_loc_t *loc; IN: Pointer to group location for object
* H5A_t *attr; IN/OUT: Pointer to attribute to initialize
*
@@ -301,11 +301,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5A_open_common(const H5G_loc_t *loc, H5A_t *attr)
+H5A__open_common(const H5G_loc_t *loc, H5A_t *attr)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* check args */
HDassert(loc);
@@ -336,7 +336,7 @@ H5A_open_common(const H5G_loc_t *loc, H5A_t *attr)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5A_open_common() */
+} /* H5A__open_common() */
/*-------------------------------------------------------------------------
@@ -383,7 +383,7 @@ H5A_open_by_idx(const H5G_loc_t *loc, const char *obj_name, H5_index_t idx_type,
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, NULL, "unable to load attribute info from object header")
/* Finish initializing attribute */
- if(H5A_open_common(&obj_loc, attr) < 0)
+ if(H5A__open_common(&obj_loc, attr) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, NULL, "unable to initialize attribute")
/* Set return value */
@@ -448,7 +448,7 @@ H5A_open_by_name(const H5G_loc_t *loc, const char *obj_name, const char *attr_na
HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, NULL, "unable to load attribute info from object header")
/* Finish initializing attribute */
- if(H5A_open_common(loc, attr) < 0)
+ if(H5A__open_common(loc, attr) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, NULL, "unable to initialize attribute")
/* Set return value */
@@ -470,11 +470,114 @@ done:
/*--------------------------------------------------------------------------
NAME
- H5A_write
+ H5A__read
+ PURPOSE
+ Actually read in data from an attribute
+ USAGE
+ herr_t H5A__read (attr, mem_type, buf)
+ H5A_t *attr; IN: Attribute to read
+ const H5T_t *mem_type; IN: Memory datatype of buffer
+ void *buf; IN: Buffer for data to read
+ RETURNS
+ Non-negative on success/Negative on failure
+
+ DESCRIPTION
+ This function reads a complete attribute from disk.
+--------------------------------------------------------------------------*/
+herr_t
+H5A__read(const H5A_t *attr, const H5T_t *mem_type, void *buf, hid_t dxpl_id)
+{
+ uint8_t *tconv_buf = NULL; /* datatype conv buffer*/
+ uint8_t *bkg_buf = NULL; /* background buffer */
+ hssize_t snelmts; /* elements in attribute */
+ size_t nelmts; /* elements in attribute*/
+ H5T_path_t *tpath = NULL; /* type conversion info */
+ hid_t src_id = -1, dst_id = -1;/* temporary type atoms*/
+ size_t src_type_size; /* size of source type */
+ size_t dst_type_size; /* size of destination type */
+ size_t buf_size; /* desired buffer size */
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ HDassert(attr);
+ HDassert(mem_type);
+ HDassert(buf);
+
+ /* Create buffer for data to store on disk */
+ if((snelmts = H5S_GET_EXTENT_NPOINTS(attr->shared->ds)) < 0)
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
+ H5_CHECKED_ASSIGN(nelmts, size_t, snelmts, hssize_t);
+
+ if(nelmts > 0) {
+ /* Get the memory and file datatype sizes */
+ src_type_size = H5T_GET_SIZE(attr->shared->dt);
+ dst_type_size = H5T_GET_SIZE(mem_type);
+
+ /* Check if the attribute has any data yet, if not, fill with zeroes */
+ if(attr->obj_opened && !attr->shared->data)
+ HDmemset(buf, 0, (dst_type_size * nelmts));
+ else { /* Attribute exists and has a value */
+ /* Convert memory buffer into disk buffer */
+ /* Set up type conversion function */
+ if(NULL == (tpath = H5T_path_find(attr->shared->dt, mem_type, NULL, NULL, dxpl_id, FALSE)))
+ HGOTO_ERROR(H5E_ATTR, H5E_UNSUPPORTED, FAIL, "unable to convert between src and dst datatypes")
+
+ /* Check for type conversion required */
+ if(!H5T_path_noop(tpath)) {
+ if((src_id = H5I_register(H5I_DATATYPE, H5T_copy(attr->shared->dt, H5T_COPY_ALL), FALSE)) < 0 ||
+ (dst_id = H5I_register(H5I_DATATYPE, H5T_copy(mem_type, H5T_COPY_ALL), FALSE)) < 0)
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTREGISTER, FAIL, "unable to register types for conversion")
+
+ /* Get the maximum buffer size needed and allocate it */
+ buf_size = nelmts * MAX(src_type_size, dst_type_size);
+ if(NULL == (tconv_buf = H5FL_BLK_MALLOC(attr_buf, buf_size)))
+ HGOTO_ERROR(H5E_ATTR, H5E_NOSPACE, FAIL, "memory allocation failed")
+ if(NULL == (bkg_buf = H5FL_BLK_CALLOC(attr_buf, buf_size)))
+ HGOTO_ERROR(H5E_ATTR, H5E_NOSPACE, FAIL, "memory allocation failed")
+
+ /* Copy the attribute data into the buffer for conversion */
+ HDmemcpy(tconv_buf, attr->shared->data, (src_type_size * nelmts));
+
+ /* Perform datatype conversion. */
+ if(H5T_convert(tpath, src_id, dst_id, nelmts, (size_t)0, (size_t)0, tconv_buf, bkg_buf, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTENCODE, FAIL, "datatype conversion failed")
+
+ /* Copy the converted data into the user's buffer */
+ HDmemcpy(buf, tconv_buf, (dst_type_size * nelmts));
+ } /* end if */
+ /* No type conversion necessary */
+ else {
+ HDassert(dst_type_size == src_type_size);
+
+ /* Copy the attribute data into the user's buffer */
+ HDmemcpy(buf, attr->shared->data, (dst_type_size * nelmts));
+ } /* end else */
+ } /* end else */
+ } /* end if */
+
+done:
+ /* Release resources */
+ if(src_id >= 0 && H5I_dec_ref(src_id) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "unable to close temporary object")
+ if(dst_id >= 0 && H5I_dec_ref(dst_id) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "unable to close temporary object")
+ if(tconv_buf)
+ tconv_buf = H5FL_BLK_FREE(attr_buf, tconv_buf);
+ if(bkg_buf)
+ bkg_buf = H5FL_BLK_FREE(attr_buf, bkg_buf);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5A__read() */
+
+
+/*--------------------------------------------------------------------------
+ NAME
+ H5A__write
PURPOSE
Actually write out data to an attribute
USAGE
- herr_t H5A_write (attr, mem_type, buf)
+ herr_t H5A__write (attr, mem_type, buf)
H5A_t *attr; IN: Attribute to write
const H5T_t *mem_type; IN: Memory datatype of buffer
const void *buf; IN: Buffer of data to write
@@ -485,7 +588,7 @@ done:
This function writes a complete attribute to disk.
--------------------------------------------------------------------------*/
herr_t
-H5A_write(H5A_t *attr, const H5T_t *mem_type, const void *buf, hid_t dxpl_id)
+H5A__write(H5A_t *attr, const H5T_t *mem_type, const void *buf, hid_t dxpl_id)
{
uint8_t *tconv_buf = NULL; /* datatype conv buffer */
hbool_t tconv_owned = FALSE; /* Whether the datatype conv buffer is owned by attribute */
@@ -508,7 +611,7 @@ H5A_write(H5A_t *attr, const H5T_t *mem_type, const void *buf, hid_t dxpl_id)
/* Get # of elements for attribute's dataspace */
if((snelmts = H5S_GET_EXTENT_NPOINTS(attr->shared->ds)) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
- H5_ASSIGN_OVERFLOW(nelmts, snelmts, hssize_t, size_t);
+ H5_CHECKED_ASSIGN(nelmts, size_t, snelmts, hssize_t);
/* If there's actually data elements for the attribute, make a copy of the data passed in */
if(nelmts > 0) {
@@ -579,110 +682,53 @@ done:
bkg_buf = H5FL_BLK_FREE(attr_buf, bkg_buf);
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
-} /* H5A_write() */
+} /* H5A__write() */
/*--------------------------------------------------------------------------
NAME
- H5A_read
+ H5A__get_name
PURPOSE
- Actually read in data from an attribute
- USAGE
- herr_t H5A_read (attr, mem_type, buf)
- H5A_t *attr; IN: Attribute to read
- const H5T_t *mem_type; IN: Memory datatype of buffer
- void *buf; IN: Buffer for data to read
+ Private function for H5Aget_name. Gets a copy of the name for an
+ attribute
RETURNS
- Non-negative on success/Negative on failure
-
+ This function returns the length of the attribute's name (which may be
+ longer than 'buf_size') on success or negative for failure.
DESCRIPTION
- This function reads a complete attribute from disk.
+ This function retrieves the name of an attribute for an attribute ID.
+ Up to 'buf_size' characters are stored in 'buf' followed by a '\0' string
+ terminator. If the name of the attribute is longer than 'buf_size'-1,
+ the string terminator is stored in the last position of the buffer to
+ properly terminate the string.
--------------------------------------------------------------------------*/
-herr_t
-H5A_read(const H5A_t *attr, const H5T_t *mem_type, void *buf, hid_t dxpl_id)
+ssize_t
+H5A__get_name(H5A_t *attr, size_t buf_size, char *buf)
{
- uint8_t *tconv_buf = NULL; /* datatype conv buffer*/
- uint8_t *bkg_buf = NULL; /* background buffer */
- hssize_t snelmts; /* elements in attribute */
- size_t nelmts; /* elements in attribute*/
- H5T_path_t *tpath = NULL; /* type conversion info */
- hid_t src_id = -1, dst_id = -1;/* temporary type atoms*/
- size_t src_type_size; /* size of source type */
- size_t dst_type_size; /* size of destination type */
- size_t buf_size; /* desired buffer size */
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_NOAPI_NOINIT
-
- HDassert(attr);
- HDassert(mem_type);
- HDassert(buf);
-
- /* Create buffer for data to store on disk */
- if((snelmts = H5S_GET_EXTENT_NPOINTS(attr->shared->ds)) < 0)
- HGOTO_ERROR(H5E_ATTR, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
- H5_ASSIGN_OVERFLOW(nelmts, snelmts, hssize_t, size_t);
-
- if(nelmts > 0) {
- /* Get the memory and file datatype sizes */
- src_type_size = H5T_GET_SIZE(attr->shared->dt);
- dst_type_size = H5T_GET_SIZE(mem_type);
-
- /* Check if the attribute has any data yet, if not, fill with zeroes */
- if(attr->obj_opened && !attr->shared->data)
- HDmemset(buf, 0, (dst_type_size * nelmts));
- else { /* Attribute exists and has a value */
- /* Convert memory buffer into disk buffer */
- /* Set up type conversion function */
- if(NULL == (tpath = H5T_path_find(attr->shared->dt, mem_type, NULL, NULL, dxpl_id, FALSE)))
- HGOTO_ERROR(H5E_ATTR, H5E_UNSUPPORTED, FAIL, "unable to convert between src and dst datatypes")
-
- /* Check for type conversion required */
- if(!H5T_path_noop(tpath)) {
- if((src_id = H5I_register(H5I_DATATYPE, H5T_copy(attr->shared->dt, H5T_COPY_ALL), FALSE)) < 0 ||
- (dst_id = H5I_register(H5I_DATATYPE, H5T_copy(mem_type, H5T_COPY_ALL), FALSE)) < 0)
- HGOTO_ERROR(H5E_ATTR, H5E_CANTREGISTER, FAIL, "unable to register types for conversion")
+ size_t copy_len, nbytes;
+ ssize_t ret_value;
- /* Get the maximum buffer size needed and allocate it */
- buf_size = nelmts * MAX(src_type_size, dst_type_size);
- if(NULL == (tconv_buf = H5FL_BLK_MALLOC(attr_buf, buf_size)))
- HGOTO_ERROR(H5E_ATTR, H5E_NOSPACE, FAIL, "memory allocation failed")
- if(NULL == (bkg_buf = H5FL_BLK_CALLOC(attr_buf, buf_size)))
- HGOTO_ERROR(H5E_ATTR, H5E_NOSPACE, FAIL, "memory allocation failed")
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
- /* Copy the attribute data into the buffer for conversion */
- HDmemcpy(tconv_buf, attr->shared->data, (src_type_size * nelmts));
+ /* get the real attribute length */
+ nbytes = HDstrlen(attr->shared->name);
+ HDassert((ssize_t)nbytes >= 0); /*overflow, pretty unlikely --rpm*/
- /* Perform datatype conversion. */
- if(H5T_convert(tpath, src_id, dst_id, nelmts, (size_t)0, (size_t)0, tconv_buf, bkg_buf, dxpl_id) < 0)
- HGOTO_ERROR(H5E_ATTR, H5E_CANTENCODE, FAIL, "datatype conversion failed")
+ /* compute the string length which will fit into the user's buffer */
+ copy_len = MIN(buf_size - 1, nbytes);
- /* Copy the converted data into the user's buffer */
- HDmemcpy(buf, tconv_buf, (dst_type_size * nelmts));
- } /* end if */
- /* No type conversion necessary */
- else {
- HDassert(dst_type_size == src_type_size);
+ /* Copy all/some of the name */
+ if(buf && copy_len > 0) {
+ HDmemcpy(buf, attr->shared->name, copy_len);
- /* Copy the attribute data into the user's buffer */
- HDmemcpy(buf, attr->shared->data, (dst_type_size * nelmts));
- } /* end else */
- } /* end else */
+ /* Terminate the string */
+ buf[copy_len]='\0';
} /* end if */
-done:
- /* Release resources */
- if(src_id >= 0 && H5I_dec_ref(src_id) < 0)
- HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "unable to close temporary object")
- if(dst_id >= 0 && H5I_dec_ref(dst_id) < 0)
- HDONE_ERROR(H5E_ATTR, H5E_CANTDEC, FAIL, "unable to close temporary object")
- if(tconv_buf)
- tconv_buf = H5FL_BLK_FREE(attr_buf, tconv_buf);
- if(bkg_buf)
- bkg_buf = H5FL_BLK_FREE(attr_buf, bkg_buf);
+ /* Set return value */
+ ret_value = (ssize_t)nbytes;
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5A_read() */
+} /* H5A__get_name() */
/*-------------------------------------------------------------------------
@@ -826,54 +872,8 @@ done:
} /* end H5Aget_create_plist() */
-/*--------------------------------------------------------------------------
- NAME
- H5A_get_name
- PURPOSE
- Private function for H5Aget_name. Gets a copy of the name for an
- attribute
- RETURNS
- This function returns the length of the attribute's name (which may be
- longer than 'buf_size') on success or negative for failure.
- DESCRIPTION
- This function retrieves the name of an attribute for an attribute ID.
- Up to 'buf_size' characters are stored in 'buf' followed by a '\0' string
- terminator. If the name of the attribute is longer than 'buf_size'-1,
- the string terminator is stored in the last position of the buffer to
- properly terminate the string.
---------------------------------------------------------------------------*/
-ssize_t
-H5A_get_name(H5A_t *attr, size_t buf_size, char *buf)
-{
- size_t copy_len, nbytes;
- ssize_t ret_value;
-
- FUNC_ENTER_NOAPI_NOERR
-
- /* get the real attribute length */
- nbytes = HDstrlen(attr->shared->name);
- HDassert((ssize_t)nbytes >= 0); /*overflow, pretty unlikely --rpm*/
-
- /* compute the string length which will fit into the user's buffer */
- copy_len = MIN(buf_size - 1, nbytes);
-
- /* Copy all/some of the name */
- if(buf && copy_len > 0) {
- HDmemcpy(buf, attr->shared->name, copy_len);
-
- /* Terminate the string */
- buf[copy_len]='\0';
- } /* end if */
-
- /* Set return value */
- ret_value = (ssize_t)nbytes;
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5A_get_name() */
-
-
/*-------------------------------------------------------------------------
- * Function: H5A_get_info
+ * Function: H5A__get_info
*
* Purpose: Retrieve information about an attribute.
*
@@ -886,7 +886,7 @@ H5A_get_name(H5A_t *attr, size_t buf_size, char *buf)
*-------------------------------------------------------------------------
*/
herr_t
-H5A_get_info(const H5A_t *attr, H5A_info_t *ainfo)
+H5A__get_info(const H5A_t *attr, H5A_info_t *ainfo)
{
FUNC_ENTER_NOAPI_NOERR
@@ -907,7 +907,7 @@ H5A_get_info(const H5A_t *attr, H5A_info_t *ainfo)
} /* end else */
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5A_get_info() */
+} /* end H5A__get_info() */
/*-------------------------------------------------------------------------
@@ -1347,7 +1347,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5A_dense_build_table_cb
+ * Function: H5A__dense_build_table_cb
*
* Purpose: Callback routine for building table of attributes from dense
* attribute storage.
@@ -1362,12 +1362,12 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5A_dense_build_table_cb(const H5A_t *attr, void *_udata)
+H5A__dense_build_table_cb(const H5A_t *attr, void *_udata)
{
H5A_dense_bt_ud_t *udata = (H5A_dense_bt_ud_t *)_udata; /* 'User data' passed in */
herr_t ret_value = H5_ITER_CONT; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* check arguments */
HDassert(attr);
@@ -1387,7 +1387,7 @@ H5A_dense_build_table_cb(const H5A_t *attr, void *_udata)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5A_dense_build_table_cb() */
+} /* end H5A__dense_build_table_cb() */
/*-------------------------------------------------------------------------
@@ -1453,7 +1453,7 @@ H5A_dense_build_table(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
/* Build iterator operator */
attr_op.op_type = H5A_ATTR_OP_LIB;
- attr_op.u.lib_op = H5A_dense_build_table_cb;
+ attr_op.u.lib_op = H5A__dense_build_table_cb;
/* Iterate over the links in the group, building a table of the link messages */
if(H5A_dense_iterate(f, dxpl_id, (hid_t)0, ainfo, H5_INDEX_NAME,
@@ -1680,7 +1680,7 @@ H5A_attr_iterate_table(const H5A_attr_table_t *atable, hsize_t skip,
*last_attr = skip;
/* Iterate over attribute messages */
- H5_ASSIGN_OVERFLOW(/* To: */ u, /* From: */ skip, /* From: */ hsize_t, /* To: */ size_t)
+ H5_CHECKED_ASSIGN(u, size_t, skip, hsize_t)
for(; u < atable->nattrs && !ret_value; u++) {
/* Check which type of callback to make */
switch(attr_op->op_type) {
@@ -1689,7 +1689,7 @@ H5A_attr_iterate_table(const H5A_attr_table_t *atable, hsize_t skip,
H5A_info_t ainfo; /* Info for attribute */
/* Get the attribute information */
- if(H5A_get_info(atable->attrs[u], &ainfo) < 0)
+ if(H5A__get_info(atable->attrs[u], &ainfo) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, H5_ITER_ERROR, "unable to get attribute info")
/* Make the application callback */
@@ -2012,7 +2012,11 @@ H5A_attr_copy_file(const H5A_t *attr_src, H5F_t *file_dst, hbool_t *recompute_si
*recompute_size = TRUE;
/* Compute the size of the data */
- H5_ASSIGN_OVERFLOW(attr_dst->shared->data_size, H5S_GET_EXTENT_NPOINTS(attr_dst->shared->ds) * H5T_get_size(attr_dst->shared->dt), hssize_t, size_t);
+ /* NOTE: This raises warnings. If we are going to be serious about
+ * expecting overflow here, we should implement testing similar to
+ * that described in CERT bulletins INT30-C and INT32-C.
+ */
+ H5_CHECKED_ASSIGN(attr_dst->shared->data_size, size_t, H5S_GET_EXTENT_NPOINTS(attr_dst->shared->ds) * H5T_get_size(attr_dst->shared->dt), hssize_t);
/* Copy (& convert) the data, if necessary */
if(attr_src->shared->data) {
@@ -2253,7 +2257,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5A_dense_post_copy_file_cb
+ * Function: H5A__dense_post_copy_file_cb
*
* Purpose: Callback routine for copying a dense attribute from SRC to DST.
*
@@ -2267,13 +2271,13 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5A_dense_post_copy_file_cb(const H5A_t *attr_src, void *_udata)
+H5A__dense_post_copy_file_cb(const H5A_t *attr_src, void *_udata)
{
H5A_dense_file_cp_ud_t *udata = (H5A_dense_file_cp_ud_t *)_udata;
H5A_t *attr_dst = NULL;
herr_t ret_value = H5_ITER_CONT; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* check arguments */
HDassert(attr_src);
@@ -2309,7 +2313,7 @@ done:
HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close destination attribute")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5A_dense_post_copy_file_cb() */
+} /* end H5A__dense_post_copy_file_cb() */
/*-------------------------------------------------------------------------
@@ -2350,7 +2354,7 @@ H5A_dense_post_copy_file_all(const H5O_loc_t *src_oloc, const H5O_ainfo_t *ainfo
udata.oloc_dst = dst_oloc;
attr_op.op_type = H5A_ATTR_OP_LIB;
- attr_op.u.lib_op = H5A_dense_post_copy_file_cb;
+ attr_op.u.lib_op = H5A__dense_post_copy_file_cb;
if(H5A_dense_iterate(src_oloc->file, dxpl_id, (hid_t)0, ainfo_src, H5_INDEX_NAME,
diff --git a/src/H5Apkg.h b/src/H5Apkg.h
index f587f81..d3fcd53 100644
--- a/src/H5Apkg.h
+++ b/src/H5Apkg.h
@@ -192,12 +192,9 @@ H5_DLL H5A_t *H5A_open_by_name(const H5G_loc_t *loc, const char *obj_name,
const char *attr_name, hid_t lapl_id, hid_t dxpl_id);
H5_DLL H5A_t *H5A_open_by_idx(const H5G_loc_t *loc, const char *obj_name,
H5_index_t idx_type, H5_iter_order_t order, hsize_t n, hid_t lapl_id, hid_t dxpl_id);
-H5_DLL herr_t H5A_open_common(const H5G_loc_t *loc, H5A_t *attr);
-H5_DLL herr_t H5A_write(H5A_t *attr, const H5T_t *mem_type, const void *buf, hid_t dxpl_id);
-H5_DLL herr_t H5A_read(const H5A_t *attr, const H5T_t *mem_type, void *buf, hid_t dxpl_id);
-H5_DLL ssize_t H5A_get_name(H5A_t *attr, size_t buf_size, char *buf);
+H5_DLL herr_t H5A__open_common(const H5G_loc_t *loc, H5A_t *attr);
H5_DLL H5A_t *H5A_copy(H5A_t *new_attr, const H5A_t *old_attr);
-H5_DLL herr_t H5A_get_info(const H5A_t *attr, H5A_info_t *ainfo);
+H5_DLL herr_t H5A__get_info(const H5A_t *attr, H5A_info_t *ainfo);
H5_DLL hid_t H5A_get_type(H5A_t *attr);
H5_DLL hid_t H5A_get_space(H5A_t *attr);
H5_DLL hid_t H5A_get_create_plist(H5A_t* attr);
@@ -209,6 +206,9 @@ H5_DLL herr_t H5A_rename_by_name(H5G_loc_t loc, const char *obj_name, const char
const char *new_attr_name, hid_t lapl_id, hid_t dxpl_id);
H5_DLL htri_t H5A_exists_by_name(H5G_loc_t loc, const char *obj_name, const char *attr_name,
hid_t lapl_id, hid_t dxpl_id);
+H5_DLL herr_t H5A__write(H5A_t *attr, const H5T_t *mem_type, const void *buf, hid_t dxpl_id);
+H5_DLL herr_t H5A__read(const H5A_t *attr, const H5T_t *mem_type, void *buf, hid_t dxpl_id);
+H5_DLL ssize_t H5A__get_name(H5A_t *attr, size_t buf_size, char *buf);
/* Attribute "dense" storage routines */
H5_DLL herr_t H5A_dense_create(H5F_t *f, hid_t dxpl_id, H5O_ainfo_t *ainfo);
diff --git a/src/H5B.c b/src/H5B.c
index 621209f..765a57e 100644
--- a/src/H5B.c
+++ b/src/H5B.c
@@ -134,7 +134,7 @@ typedef struct H5B_iter_ud_t {
} H5B_info_ud_t;
/* Convenience struct for the arguments needed to unprotect a b-tree after a
- * call to H5B_iterate_helper() or H5B_split() */
+ * call to H5B__iterate_helper() or H5B__split() */
typedef struct H5B_ins_ud_t {
H5B_t *bt; /* B-tree */
haddr_t addr; /* B-tree address */
@@ -145,7 +145,7 @@ typedef struct H5B_ins_ud_t {
/********************/
/* Local Prototypes */
/********************/
-static H5B_ins_t H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
+static H5B_ins_t H5B__insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
const H5B_class_t *type,
uint8_t *lt_key,
hbool_t *lt_key_changed,
@@ -153,13 +153,13 @@ static H5B_ins_t H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
uint8_t *rt_key,
hbool_t *rt_key_changed,
H5B_ins_ud_t *split_bt_ud/*out*/);
-static herr_t H5B_insert_child(H5B_t *bt, unsigned *bt_flags,
+static herr_t H5B__insert_child(H5B_t *bt, unsigned *bt_flags,
unsigned idx, haddr_t child,
H5B_ins_t anchor, const void *md_key);
-static herr_t H5B_split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
+static herr_t H5B__split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
unsigned idx, void *udata,
H5B_ins_ud_t *split_bt_ud/*out*/);
-static H5B_t * H5B_copy(const H5B_t *old_bt);
+static H5B_t * H5B__copy(const H5B_t *old_bt);
/*********************/
@@ -258,7 +258,7 @@ H5B_create(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, void *udata,
if(H5AC_insert_entry(f, dxpl_id, H5AC_BT, *addr_p, bt, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "can't add B-tree root node to cache")
#ifdef H5B_DEBUG
- H5B_assert(f, dxpl_id, *addr_p, shared->type, udata);
+ H5B__assert(f, dxpl_id, *addr_p, shared->type, udata);
#endif
done:
@@ -269,7 +269,7 @@ done:
} /* end if */
if(bt)
/* Destroy B-tree node */
- if(H5B_node_dest(bt) < 0)
+ if(H5B__node_dest(bt) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree node")
} /* end if */
@@ -375,7 +375,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5B_split
+ * Function: H5B__split
*
* Purpose: Split a single node into two nodes. The old node will
* contain the left children and the new node will contain the
@@ -397,7 +397,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B_split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud, unsigned idx,
+H5B__split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud, unsigned idx,
void *udata, H5B_ins_ud_t *split_bt_ud/*out*/)
{
H5P_genplist_t *dx_plist; /* Data transfer property list */
@@ -407,7 +407,7 @@ H5B_split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud, unsigned idx,
double split_ratios[3]; /* B-tree split ratios */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/*
* Check arguments.
@@ -446,7 +446,7 @@ H5B_split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud, unsigned idx,
side = "LEFT";
else
side = "MIDDLE";
- fprintf(H5DEBUG(B), "H5B_split: %3u {%5.3f,%5.3f,%5.3f} %6s",
+ fprintf(H5DEBUG(B), "H5B__split: %3u {%5.3f,%5.3f,%5.3f} %6s",
shared->two_k, split_ratios[0], split_ratios[1], split_ratios[2], side);
}
#endif
@@ -539,7 +539,7 @@ done:
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B_split() */
+} /* end H5B__split() */
/*-------------------------------------------------------------------------
@@ -601,7 +601,7 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to locate root of B-tree")
/* Insert the object */
- if((int)(my_ins = H5B_insert_helper(f, dxpl_id, &bt_ud, type, lt_key,
+ if((int)(my_ins = H5B__insert_helper(f, dxpl_id, &bt_ud, type, lt_key,
&lt_key_changed, md_key, udata, rt_key, &rt_key_changed,
&split_bt_ud/*out*/)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to insert key")
@@ -638,7 +638,7 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void
*/
/* Make a copy of the old root information */
- if(NULL == (new_root_bt = H5B_copy(bt_ud.bt)))
+ if(NULL == (new_root_bt = H5B__copy(bt_ud.bt)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to copy old root")
/* Unprotect the old root so we can move it. Also force it to be marked
@@ -677,7 +677,7 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void
done:
if(ret_value < 0)
- if(new_root_bt && H5B_node_dest(new_root_bt) < 0)
+ if(new_root_bt && H5B__node_dest(new_root_bt) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTRELEASE, FAIL, "unable to free B-tree root node");
if(bt_ud.bt)
@@ -690,7 +690,7 @@ done:
#ifdef H5B_DEBUG
if(ret_value >= 0)
- H5B_assert(f, dxpl_id, addr, type, udata);
+ H5B__assert(f, dxpl_id, addr, type, udata);
#endif
FUNC_LEAVE_NOAPI(ret_value)
@@ -698,7 +698,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5B_insert_child
+ * Function: H5B__insert_child
*
* Purpose: Insert a child to the left or right of child[IDX] depending
* on whether ANCHOR is H5B_INS_LEFT or H5B_INS_RIGHT. The BT
@@ -713,13 +713,13 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B_insert_child(H5B_t *bt, unsigned *bt_flags, unsigned idx,
+H5B__insert_child(H5B_t *bt, unsigned *bt_flags, unsigned idx,
haddr_t child, H5B_ins_t anchor, const void *md_key)
{
H5B_shared_t *shared; /* Pointer to shared B-tree info */
uint8_t *base; /* Base offset for move */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
HDassert(bt);
HDassert(bt_flags);
@@ -771,7 +771,7 @@ H5B_insert_child(H5B_t *bt, unsigned *bt_flags, unsigned idx,
/*-------------------------------------------------------------------------
- * Function: H5B_insert_helper
+ * Function: H5B__insert_helper
*
* Purpose: Inserts the item UDATA into the tree rooted at ADDR and having
* the specified type.
@@ -802,7 +802,7 @@ H5B_insert_child(H5B_t *bt, unsigned *bt_flags, unsigned idx,
*-------------------------------------------------------------------------
*/
static H5B_ins_t
-H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
+H5B__insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
const H5B_class_t *type,
uint8_t *lt_key, hbool_t *lt_key_changed,
uint8_t *md_key, void *udata,
@@ -820,7 +820,7 @@ H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
H5B_ins_t my_ins = H5B_INS_ERROR;
H5B_ins_t ret_value = H5B_INS_ERROR; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/*
* Check arguments
@@ -904,7 +904,7 @@ H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node")
- if((int)(my_ins = H5B_insert_helper(f, dxpl_id, &child_bt_ud, type,
+ if((int)(my_ins = H5B__insert_helper(f, dxpl_id, &child_bt_ud, type,
H5B_NKEY(bt,shared,idx), lt_key_changed, md_key,
udata, H5B_NKEY(bt, shared, idx + 1), rt_key_changed,
&new_child_bt_ud/*out*/)) < 0)
@@ -950,7 +950,7 @@ H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node")
- if((int)(my_ins = H5B_insert_helper(f, dxpl_id, &child_bt_ud, type,
+ if((int)(my_ins = H5B__insert_helper(f, dxpl_id, &child_bt_ud, type,
H5B_NKEY(bt, shared, idx), lt_key_changed, md_key, udata,
H5B_NKEY(bt, shared, idx + 1), rt_key_changed,
&new_child_bt_ud/*out*/)) < 0)
@@ -1005,7 +1005,7 @@ H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node")
- if((int)(my_ins = H5B_insert_helper(f, dxpl_id, &child_bt_ud, type,
+ if((int)(my_ins = H5B__insert_helper(f, dxpl_id, &child_bt_ud, type,
H5B_NKEY(bt, shared, idx), lt_key_changed, md_key, udata,
H5B_NKEY(bt, shared, idx + 1), rt_key_changed, &new_child_bt_ud/*out*/)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, H5B_INS_ERROR, "can't insert subtree")
@@ -1064,7 +1064,7 @@ H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
* If this node is full then split it before inserting the new child.
*/
if(bt->nchildren == shared->two_k) {
- if(H5B_split(f, dxpl_id, bt_ud, idx, udata, split_bt_ud/*out*/) < 0)
+ if(H5B__split(f, dxpl_id, bt_ud, idx, udata, split_bt_ud/*out*/) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, H5B_INS_ERROR, "unable to split node")
if(idx < bt->nchildren) {
tmp_bt = bt;
@@ -1081,7 +1081,7 @@ H5B_insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
} /* end else */
/* Insert the child */
- if(H5B_insert_child(tmp_bt, tmp_bt_flags_ptr, idx, new_child_bt_ud.addr, my_ins, md_key) < 0)
+ if(H5B__insert_child(tmp_bt, tmp_bt_flags_ptr, idx, new_child_bt_ud.addr, my_ins, md_key) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, H5B_INS_ERROR, "can't insert child")
} /* end else-if */
@@ -1119,7 +1119,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5B_iterate_helper
+ * Function: H5B__iterate_helper
*
* Purpose: Calls the list callback for each leaf node of the
* B-tree, passing it the caller's UDATA structure.
@@ -1133,7 +1133,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B_iterate_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
+H5B__iterate_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
H5B_operator_t op, void *udata)
{
H5B_t *bt = NULL; /* Pointer to current B-tree node */
@@ -1143,7 +1143,7 @@ H5B_iterate_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t add
unsigned u; /* Local index variable */
herr_t ret_value = H5_ITER_CONT; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/*
* Check arguments.
@@ -1170,7 +1170,7 @@ H5B_iterate_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t add
/* Iterate over node's children */
for(u = 0; u < bt->nchildren && ret_value == H5_ITER_CONT; u++) {
if(bt->level > 0)
- ret_value = H5B_iterate_helper(f, dxpl_id, type, bt->child[u], op, udata);
+ ret_value = H5B__iterate_helper(f, dxpl_id, type, bt->child[u], op, udata);
else
ret_value = (*op)(f, dxpl_id, H5B_NKEY(bt, shared, u), bt->child[u], H5B_NKEY(bt, shared, u + 1), udata);
if(ret_value < 0)
@@ -1182,7 +1182,7 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to release B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B_iterate_helper() */
+} /* end H5B__iterate_helper() */
/*-------------------------------------------------------------------------
@@ -1217,7 +1217,7 @@ H5B_iterate(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
HDassert(udata);
/* Iterate over the B-tree records */
- if((ret_value = H5B_iterate_helper(f, dxpl_id, type, addr, op, udata)) < 0)
+ if((ret_value = H5B__iterate_helper(f, dxpl_id, type, addr, op, udata)) < 0)
HERROR(H5E_BTREE, H5E_BADITER, "B-tree iteration failed");
FUNC_LEAVE_NOAPI(ret_value)
@@ -1225,7 +1225,7 @@ H5B_iterate(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
/*-------------------------------------------------------------------------
- * Function: H5B_remove_helper
+ * Function: H5B__remove_helper
*
* Purpose: The recursive part of removing an item from a B-tree. The
* sub B-tree that is being considered is located at ADDR and
@@ -1249,7 +1249,7 @@ H5B_iterate(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
*-------------------------------------------------------------------------
*/
static H5B_ins_t
-H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type,
+H5B__remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type,
int level, uint8_t *lt_key/*out*/,
hbool_t *lt_key_changed/*out*/, void *udata,
uint8_t *rt_key/*out*/, hbool_t *rt_key_changed/*out*/)
@@ -1263,7 +1263,7 @@ H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type
int cmp = 1; /* Key comparison value */
H5B_ins_t ret_value = H5B_INS_ERROR;
- FUNC_ENTER_NOAPI(H5B_INS_ERROR)
+ FUNC_ENTER_STATIC
HDassert(f);
HDassert(H5F_addr_defined(addr));
@@ -1308,7 +1308,7 @@ H5B_remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type
HDassert(idx < bt->nchildren);
if(bt->level > 0) {
/* We're at an internal node -- call recursively */
- if((int)(ret_value = H5B_remove_helper(f, dxpl_id,
+ if((int)(ret_value = H5B__remove_helper(f, dxpl_id,
bt->child[idx], type, level + 1, H5B_NKEY(bt, shared, idx)/*out*/,
lt_key_changed/*out*/, udata, H5B_NKEY(bt, shared, idx + 1)/*out*/,
rt_key_changed/*out*/)) < 0)
@@ -1555,7 +1555,7 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to release node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B_remove_helper() */
+} /* end H5B__remove_helper() */
/*-------------------------------------------------------------------------
@@ -1594,12 +1594,12 @@ H5B_remove(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void
HDassert(H5F_addr_defined(addr));
/* The actual removal */
- if(H5B_remove_helper(f, dxpl_id, addr, type, 0, lt_key, &lt_key_changed,
+ if(H5B__remove_helper(f, dxpl_id, addr, type, 0, lt_key, &lt_key_changed,
udata, rt_key, &rt_key_changed) == H5B_INS_ERROR)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to remove entry from B-tree")
#ifdef H5B_DEBUG
- H5B_assert(f, dxpl_id, addr, type, udata);
+ H5B__assert(f, dxpl_id, addr, type, udata);
#endif
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1789,7 +1789,7 @@ H5B_shared_free(void *_shared)
/*-------------------------------------------------------------------------
- * Function: H5B_copy
+ * Function: H5B__copy
*
* Purpose: Deep copies an existing H5B_t node.
*
@@ -1804,13 +1804,13 @@ H5B_shared_free(void *_shared)
*-------------------------------------------------------------------------
*/
static H5B_t *
-H5B_copy(const H5B_t *old_bt)
+H5B__copy(const H5B_t *old_bt)
{
H5B_t *new_node = NULL;
H5B_shared_t *shared; /* Pointer to shared B-tree info */
H5B_t *ret_value;
- FUNC_ENTER_NOAPI(NULL)
+ FUNC_ENTER_STATIC
/*
* Check arguments.
@@ -1853,11 +1853,11 @@ done:
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B_copy() */
+} /* end H5B__copy() */
/*-------------------------------------------------------------------------
- * Function: H5B_get_info_helper
+ * Function: H5B__get_info_helper
*
* Purpose: Walks the B-tree nodes, getting information for all of them.
*
@@ -1870,7 +1870,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B_get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
+H5B__get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
const H5B_info_ud_t *info_udata)
{
H5B_t *bt = NULL; /* Pointer to current B-tree node */
@@ -1883,7 +1883,7 @@ H5B_get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t ad
haddr_t left_child; /* Address of left-most child in node */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/*
* Check arguments.
@@ -1951,7 +1951,7 @@ H5B_get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t ad
/* Check for another "row" of B-tree nodes to iterate over */
if(level > 0) {
/* Keep following the left-most child until we reach a leaf node. */
- if(H5B_get_info_helper(f, dxpl_id, type, left_child, info_udata) < 0)
+ if(H5B__get_info_helper(f, dxpl_id, type, left_child, info_udata) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "unable to list B-tree node")
} /* end if */
@@ -1960,7 +1960,7 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B_get_info_helper() */
+} /* end H5B__get_info_helper() */
/*-------------------------------------------------------------------------
@@ -2001,13 +2001,13 @@ H5B_get_info(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
info_udata.udata = udata;
/* Iterate over the B-tree nodes */
- if(H5B_get_info_helper(f, dxpl_id, type, addr, &info_udata) < 0)
+ if(H5B__get_info_helper(f, dxpl_id, type, addr, &info_udata) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_BADITER, FAIL, "B-tree iteration failed")
/* Iterate over the B-tree records, making any "leaf" callbacks */
/* (Only if operator defined) */
if(op)
- if((ret_value = H5B_iterate_helper(f, dxpl_id, type, addr, op, udata)) < 0)
+ if((ret_value = H5B__iterate_helper(f, dxpl_id, type, addr, op, udata)) < 0)
HERROR(H5E_BTREE, H5E_BADITER, "B-tree iteration failed");
done:
@@ -2072,7 +2072,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5B_node_dest
+ * Function: H5B__node_dest
*
* Purpose: Destroy/release a B-tree node
*
@@ -2086,9 +2086,9 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B_node_dest(H5B_t *bt)
+H5B__node_dest(H5B_t *bt)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_PACKAGE_NOERR
/* check arguments */
HDassert(bt);
@@ -2100,5 +2100,5 @@ H5B_node_dest(H5B_t *bt)
bt = H5FL_FREE(H5B_t, bt);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5B_node_dest() */
+} /* end H5B__node_dest() */
diff --git a/src/H5B2.c b/src/H5B2.c
index 7d7345e..0c0f24f 100644
--- a/src/H5B2.c
+++ b/src/H5B2.c
@@ -146,7 +146,7 @@ H5B2_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam, void *ctx_udat
HDcompile_assert(H5B2_NUM_BTREE_ID == NELMTS(H5B2_client_class_g));
/* Create shared v2 B-tree header */
- if(HADDR_UNDEF == (hdr_addr = H5B2_hdr_create(f, dxpl_id, cparam, ctx_udata)))
+ if(HADDR_UNDEF == (hdr_addr = H5B2__hdr_create(f, dxpl_id, cparam, ctx_udata)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, NULL, "can't create v2 B-tree header")
/* Create v2 B-tree wrapper */
@@ -161,11 +161,11 @@ H5B2_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam, void *ctx_udat
/* Point v2 B-tree wrapper at header and bump it's ref count */
bt2->hdr = hdr;
- if(H5B2_hdr_incr(bt2->hdr) < 0)
+ if(H5B2__hdr_incr(bt2->hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, NULL, "can't increment reference count on shared v2 B-tree header")
/* Increment # of files using this v2 B-tree header */
- if(H5B2_hdr_fuse_incr(bt2->hdr) < 0)
+ if(H5B2__hdr_fuse_incr(bt2->hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, NULL, "can't increment file reference count on shared v2 B-tree header")
/* Set file pointer for this v2 B-tree open context */
@@ -229,11 +229,11 @@ H5B2_open(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata)
/* Point v2 B-tree wrapper at header */
bt2->hdr = hdr;
- if(H5B2_hdr_incr(bt2->hdr) < 0)
+ if(H5B2__hdr_incr(bt2->hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, NULL, "can't increment reference count on shared v2 B-tree header")
/* Increment # of files using this v2 B-tree header */
- if(H5B2_hdr_fuse_incr(bt2->hdr) < 0)
+ if(H5B2__hdr_fuse_incr(bt2->hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, NULL, "can't increment file reference count on shared v2 B-tree header")
/* Set file pointer for this v2 B-tree open context */
@@ -287,28 +287,28 @@ H5B2_insert(H5B2_t *bt2, hid_t dxpl_id, void *udata)
/* Check if the root node is allocated yet */
if(!H5F_addr_defined(hdr->root.addr)) {
/* Create root node as leaf node in B-tree */
- if(H5B2_create_leaf(hdr, dxpl_id, &(hdr->root)) < 0)
+ if(H5B2__create_leaf(hdr, dxpl_id, &(hdr->root)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create root node")
} /* end if */
/* Check if we need to split the root node (equiv. to a 1->2 node split) */
else if(hdr->root.node_nrec == hdr->node_info[hdr->depth].split_nrec) {
/* Split root node */
- if(H5B2_split_root(hdr, dxpl_id) < 0)
+ if(H5B2__split_root(hdr, dxpl_id) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to split root node")
} /* end if */
/* Attempt to insert record into B-tree */
if(hdr->depth > 0) {
- if(H5B2_insert_internal(hdr, dxpl_id, hdr->depth, NULL, &hdr->root, H5B2_POS_ROOT, udata) < 0)
+ if(H5B2__insert_internal(hdr, dxpl_id, hdr->depth, NULL, &hdr->root, H5B2_POS_ROOT, udata) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree internal node")
} /* end if */
else {
- if(H5B2_insert_leaf(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, udata) < 0)
+ if(H5B2__insert_leaf(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, udata) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree leaf node")
} /* end else */
/* Mark B-tree header as dirty */
- if(H5B2_hdr_dirty(hdr) < 0)
+ if(H5B2__hdr_dirty(hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTMARKDIRTY, FAIL, "unable to mark B-tree header dirty")
done:
@@ -385,7 +385,7 @@ H5B2_iterate(H5B2_t *bt2, hid_t dxpl_id, H5B2_operator_t op, void *op_data)
/* Iterate through records */
if(hdr->root.node_nrec > 0) {
/* Iterate through nodes */
- if((ret_value = H5B2_iterate_node(hdr, dxpl_id, hdr->depth, &hdr->root, op, op_data)) < 0)
+ if((ret_value = H5B2__iterate_node(hdr, dxpl_id, hdr->depth, &hdr->root, op, op_data)) < 0)
HERROR(H5E_BTREE, H5E_CANTLIST, "node iteration failed");
} /* end if */
@@ -422,7 +422,7 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op,
{
H5B2_hdr_t *hdr; /* Pointer to the B-tree header */
H5B2_node_ptr_t curr_node_ptr; /* Node pointer info for current node */
- unsigned depth; /* Current depth of the tree */
+ uint16_t depth; /* Current depth of the tree */
int cmp; /* Comparison value of records */
unsigned idx; /* Location of record which matches key */
H5B2_nodepos_t curr_pos; /* Position of the current node */
@@ -479,11 +479,11 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op,
H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */
/* Lock B-tree current node */
- if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_READ)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
/* Locate node pointer for child */
- cmp = H5B2_locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
+ cmp = H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
if(cmp > 0)
idx++;
@@ -542,11 +542,11 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op,
H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */
/* Lock B-tree leaf node */
- if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Locate record */
- cmp = H5B2_locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx);
+ cmp = H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx);
if(cmp != 0) {
/* Unlock leaf node */
@@ -622,7 +622,7 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
{
H5B2_hdr_t *hdr; /* Pointer to the B-tree header */
H5B2_node_ptr_t curr_node_ptr; /* Node pointer info for current node */
- unsigned depth; /* Current depth of the tree */
+ uint16_t depth; /* Current depth of the tree */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -662,7 +662,7 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
unsigned u; /* Local index variable */
/* Lock B-tree current node */
- if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_READ)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
/* Search for record with correct index */
@@ -734,7 +734,7 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */
/* Lock B-tree leaf node */
- if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Sanity check index */
@@ -798,7 +798,7 @@ H5B2_remove(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_remove_t op,
if(hdr->depth > 0) {
hbool_t depth_decreased = FALSE; /* Flag to indicate whether the depth of the B-tree decreased */
- if(H5B2_remove_internal(hdr, dxpl_id, &depth_decreased, NULL, hdr->depth,
+ if(H5B2__remove_internal(hdr, dxpl_id, &depth_decreased, NULL, hdr->depth,
&(hdr->cache_info), NULL, H5B2_POS_ROOT, &hdr->root, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node")
@@ -817,7 +817,7 @@ H5B2_remove(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_remove_t op,
} /* end for */
} /* end if */
else {
- if(H5B2_remove_leaf(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, udata, op, op_data) < 0)
+ if(H5B2__remove_leaf(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node")
} /* end else */
@@ -825,7 +825,7 @@ H5B2_remove(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_remove_t op,
hdr->root.all_nrec--;
/* Mark B-tree header as dirty */
- if(H5B2_hdr_dirty(hdr) < 0)
+ if(H5B2__hdr_dirty(hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTMARKDIRTY, FAIL, "unable to mark B-tree header dirty")
done:
@@ -880,7 +880,7 @@ H5B2_remove_by_idx(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order,
if(hdr->depth > 0) {
hbool_t depth_decreased = FALSE; /* Flag to indicate whether the depth of the B-tree decreased */
- if(H5B2_remove_internal_by_idx(hdr, dxpl_id, &depth_decreased, NULL, hdr->depth,
+ if(H5B2__remove_internal_by_idx(hdr, dxpl_id, &depth_decreased, NULL, hdr->depth,
&(hdr->cache_info), NULL, &hdr->root, H5B2_POS_ROOT, idx, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node")
@@ -899,7 +899,7 @@ H5B2_remove_by_idx(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order,
} /* end for */
} /* end if */
else {
- if(H5B2_remove_leaf_by_idx(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, (unsigned)idx, op, op_data) < 0)
+ if(H5B2__remove_leaf_by_idx(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, (unsigned)idx, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node")
} /* end else */
@@ -907,7 +907,7 @@ H5B2_remove_by_idx(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order,
hdr->root.all_nrec--;
/* Mark B-tree header as dirty */
- if(H5B2_hdr_dirty(hdr) < 0)
+ if(H5B2__hdr_dirty(hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTMARKDIRTY, FAIL, "unable to mark B-tree header dirty")
done:
@@ -994,11 +994,11 @@ H5B2_neighbor(H5B2_t *bt2, hid_t dxpl_id, H5B2_compare_t range, void *udata,
/* Attempt to find neighbor record in B-tree */
if(hdr->depth > 0) {
- if(H5B2_neighbor_internal(hdr, dxpl_id, hdr->depth, &hdr->root, NULL, range, udata, op, op_data) < 0)
+ if(H5B2__neighbor_internal(hdr, dxpl_id, hdr->depth, &hdr->root, NULL, range, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree internal node")
} /* end if */
else {
- if(H5B2_neighbor_leaf(hdr, dxpl_id, &hdr->root, NULL, range, udata, op, op_data) < 0)
+ if(H5B2__neighbor_leaf(hdr, dxpl_id, &hdr->root, NULL, range, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree leaf node")
} /* end else */
@@ -1034,7 +1034,7 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op,
H5B2_hdr_t *hdr; /* Pointer to the B-tree header */
H5B2_node_ptr_t curr_node_ptr; /* Node pointer info for current node */
H5B2_nodepos_t curr_pos; /* Position of current node */
- unsigned depth; /* Current depth of the tree */
+ uint16_t depth; /* Current depth of the tree */
int cmp; /* Comparison value of records */
unsigned idx; /* Location of record which matches key */
herr_t ret_value = SUCCEED; /* Return value */
@@ -1070,11 +1070,11 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op,
H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */
/* Lock B-tree current node */
- if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_WRITE)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
/* Locate node pointer for child */
- cmp = H5B2_locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
+ cmp = H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
if(cmp > 0)
idx++;
@@ -1142,11 +1142,11 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op,
hbool_t changed = FALSE;/* Whether the 'modify' callback changed the record */
/* Lock B-tree leaf node */
- if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_WRITE)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Locate record */
- cmp = H5B2_locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx);
+ cmp = H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx);
if(cmp != 0) {
/* Unlock leaf node */
@@ -1238,7 +1238,7 @@ H5B2_close(H5B2_t *bt2, hid_t dxpl_id)
HDassert(bt2->f);
/* Decrement file reference & check if this is the last open v2 B-tree using the shared B-tree header */
- if(0 == H5B2_hdr_fuse_decr(bt2->hdr)) {
+ if(0 == H5B2__hdr_fuse_decr(bt2->hdr)) {
/* Set the shared v2 B-tree header's file context for this operation */
bt2->hdr->f = bt2->f;
@@ -1283,22 +1283,22 @@ H5B2_close(H5B2_t *bt2, hid_t dxpl_id)
hdr->f = bt2->f;
/* Decrement the reference count on the B-tree header */
- /* (don't put in H5B2_hdr_fuse_decr() as the B-tree header may be evicted
+ /* (don't put in H5B2__hdr_fuse_decr() as the B-tree header may be evicted
* immediately -QAK)
*/
- if(H5B2_hdr_decr(bt2->hdr) < 0)
+ if(H5B2__hdr_decr(bt2->hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDEC, FAIL, "can't decrement reference count on shared v2 B-tree header")
/* Delete v2 B-tree, starting with header (unprotects header) */
- if(H5B2_hdr_delete(hdr, dxpl_id) < 0)
+ if(H5B2__hdr_delete(hdr, dxpl_id) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to delete v2 B-tree")
} /* end if */
else {
/* Decrement the reference count on the B-tree header */
- /* (don't put in H5B2_hdr_fuse_decr() as the B-tree header may be evicted
+ /* (don't put in H5B2__hdr_fuse_decr() as the B-tree header may be evicted
* immediately -QAK)
*/
- if(H5B2_hdr_decr(bt2->hdr) < 0)
+ if(H5B2__hdr_decr(bt2->hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDEC, FAIL, "can't decrement reference count on shared v2 B-tree header")
} /* end else */
@@ -1368,7 +1368,7 @@ HDfprintf(stderr, "%s: addr = %a\n", FUNC, addr);
hdr->f = f;
/* Delete v2 B-tree now, starting with header (unprotects header) */
- if(H5B2_hdr_delete(hdr, dxpl_id) < 0)
+ if(H5B2__hdr_delete(hdr, dxpl_id) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to delete v2 B-tree")
hdr = NULL;
} /* end if */
diff --git a/src/H5B2cache.c b/src/H5B2cache.c
index 1e2a6a3..9e01c03 100644
--- a/src/H5B2cache.c
+++ b/src/H5B2cache.c
@@ -173,7 +173,7 @@ H5B2__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HDassert(udata);
/* Allocate new B-tree header and reset cache info */
- if(NULL == (hdr = H5B2_hdr_alloc(udata->f)))
+ if(NULL == (hdr = H5B2__hdr_alloc(udata->f)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "allocation failed for B-tree header")
/* Wrap the local buffer for serialized header info */
@@ -238,7 +238,7 @@ H5B2__cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Initialize B-tree header info */
cparam.cls = H5B2_client_class_g[id];
- if(H5B2_hdr_init(hdr, &cparam, udata->ctx_udata, depth) < 0)
+ if(H5B2__hdr_init(hdr, &cparam, udata->ctx_udata, depth) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, NULL, "can't initialize B-tree header info")
/* Set the B-tree header's address */
@@ -252,7 +252,7 @@ done:
if(wb && H5WB_unwrap(wb) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CLOSEERROR, NULL, "can't close wrapped buffer")
if(!ret_value && hdr)
- if(H5B2_hdr_free(hdr) < 0)
+ if(H5B2__hdr_free(hdr) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTRELEASE, NULL, "can't release v2 B-tree header")
FUNC_LEAVE_NOAPI(ret_value)
@@ -282,7 +282,7 @@ H5B2__cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
FUNC_ENTER_STATIC
- /* check arguments */
+ /* Check arguments */
HDassert(f);
HDassert(H5F_addr_defined(addr));
HDassert(hdr);
@@ -399,7 +399,7 @@ H5B2__cache_hdr_dest(H5F_t *f, H5B2_hdr_t *hdr)
} /* end if */
/* Release B-tree header info */
- if(H5B2_hdr_free(hdr) < 0)
+ if(H5B2__hdr_free(hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free v2 B-tree header info")
done:
@@ -464,7 +464,7 @@ H5B2__cache_hdr_size(const H5F_t UNUSED *f, const H5B2_hdr_t *hdr, size_t *size_
{
FUNC_ENTER_STATIC_NOERR
- /* check arguments */
+ /* Check arguments */
HDassert(f);
HDassert(hdr);
HDassert(size_ptr);
@@ -519,7 +519,7 @@ H5B2__cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
udata->hdr->f = f;
/* Increment ref. count on B-tree header */
- if(H5B2_hdr_incr(udata->hdr) < 0)
+ if(H5B2__hdr_incr(udata->hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, NULL, "can't increment ref. count on B-tree header")
/* Share B-tree information */
@@ -601,7 +601,7 @@ H5B2__cache_internal_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
done:
if(!ret_value && internal)
- if(H5B2_internal_free(internal) < 0)
+ if(H5B2__internal_free(internal) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, NULL, "unable to destroy B-tree internal node")
FUNC_LEAVE_NOAPI(ret_value)
@@ -628,7 +628,7 @@ H5B2__cache_internal_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t add
FUNC_ENTER_STATIC
- /* check arguments */
+ /* Check arguments */
HDassert(f);
HDassert(H5F_addr_defined(addr));
HDassert(internal);
@@ -742,7 +742,7 @@ H5B2__cache_internal_dest(H5F_t *f, H5B2_internal_t *internal)
} /* end if */
/* Release v2 b-tree internal node */
- if(H5B2_internal_free(internal) < 0)
+ if(H5B2__internal_free(internal) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree internal node")
done:
@@ -807,7 +807,7 @@ H5B2__cache_internal_size(const H5F_t UNUSED *f, const H5B2_internal_t *internal
{
FUNC_ENTER_STATIC_NOERR
- /* check arguments */
+ /* Check arguments */
HDassert(internal);
HDassert(internal->hdr);
HDassert(size_ptr);
@@ -854,14 +854,14 @@ H5B2__cache_leaf_load(H5F_t UNUSED *f, hid_t dxpl_id, haddr_t addr, void *_udata
/* Allocate new leaf node and reset cache info */
if(NULL == (leaf = H5FL_MALLOC(H5B2_leaf_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "memory allocation failed")
HDmemset(&leaf->cache_info, 0, sizeof(H5AC_info_t));
/* Set the B-tree header's file context for this operation */
udata->hdr->f = udata->f;
/* Increment ref. count on B-tree header */
- if(H5B2_hdr_incr(udata->hdr) < 0)
+ if(H5B2__hdr_incr(udata->hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, NULL, "can't increment ref. count on B-tree header")
/* Share B-tree header information */
@@ -875,12 +875,12 @@ H5B2__cache_leaf_load(H5F_t UNUSED *f, hid_t dxpl_id, haddr_t addr, void *_udata
/* Magic number */
if(HDmemcmp(p, H5B2_LEAF_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, NULL, "wrong B-tree leaf node signature")
+ HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "wrong B-tree leaf node signature")
p += H5_SIZEOF_MAGIC;
/* Version */
if(*p++ != H5B2_LEAF_VERSION)
- HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, NULL, "wrong B-tree leaf node version")
+ HGOTO_ERROR(H5E_BTREE, H5E_BADRANGE, NULL, "wrong B-tree leaf node version")
/* B-tree type */
if(*p++ != (uint8_t)udata->hdr->cls->id)
@@ -888,7 +888,7 @@ H5B2__cache_leaf_load(H5F_t UNUSED *f, hid_t dxpl_id, haddr_t addr, void *_udata
/* Allocate space for the native keys in memory */
if(NULL == (leaf->leaf_native = (uint8_t *)H5FL_FAC_MALLOC(udata->hdr->node_info[0].nat_rec_fac)))
- HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, NULL, "memory allocation failed for B-tree leaf native keys")
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "memory allocation failed for B-tree leaf native keys")
/* Set the number of records in the leaf */
leaf->nrec = udata->nrec;
@@ -905,7 +905,7 @@ H5B2__cache_leaf_load(H5F_t UNUSED *f, hid_t dxpl_id, haddr_t addr, void *_udata
native += udata->hdr->cls->nrec_size;
} /* end for */
- /* Compute checksum on internal node */
+ /* Compute checksum on leaf node */
computed_chksum = H5_checksum_metadata(udata->hdr->page, (size_t)(p - (const uint8_t *)udata->hdr->page), 0);
/* Metadata checksum */
@@ -923,7 +923,7 @@ H5B2__cache_leaf_load(H5F_t UNUSED *f, hid_t dxpl_id, haddr_t addr, void *_udata
done:
if(!ret_value && leaf)
- if(H5B2_leaf_free(leaf) < 0)
+ if(H5B2__leaf_free(leaf) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, NULL, "unable to destroy B-tree leaf node")
FUNC_LEAVE_NOAPI(ret_value)
@@ -950,7 +950,7 @@ H5B2__cache_leaf_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H
FUNC_ENTER_STATIC
- /* check arguments */
+ /* Check arguments */
HDassert(f);
HDassert(H5F_addr_defined(addr));
HDassert(leaf);
@@ -1050,7 +1050,7 @@ H5B2__cache_leaf_dest(H5F_t *f, H5B2_leaf_t *leaf)
} /* end if */
/* Destroy v2 b-tree leaf node */
- if(H5B2_leaf_free(leaf) < 0)
+ if(H5B2__leaf_free(leaf) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree leaf node")
done:
diff --git a/src/H5B2dbg.c b/src/H5B2dbg.c
index 3e5d55a..50283f8 100644
--- a/src/H5B2dbg.c
+++ b/src/H5B2dbg.c
@@ -74,7 +74,7 @@
/*-------------------------------------------------------------------------
- * Function: H5B2_hdr_debug
+ * Function: H5B2__hdr_debug
*
* Purpose: Prints debugging info about a B-tree header.
*
@@ -87,7 +87,7 @@
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int fwidth,
+H5B2__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int fwidth,
const H5B2_class_t *type, haddr_t obj_addr)
{
H5B2_hdr_t *hdr = NULL; /* B-tree header info */
@@ -97,7 +97,7 @@ H5B2_hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
H5B2_hdr_cache_ud_t cache_udata; /* User-data for callback */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_PACKAGE
/*
* Check arguments.
@@ -185,11 +185,11 @@ done:
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_hdr_debug() */
+} /* end H5B2__hdr_debug() */
/*-------------------------------------------------------------------------
- * Function: H5B2_int_debug
+ * Function: H5B2__int_debug
*
* Purpose: Prints debugging info about a B-tree internal node
*
@@ -202,7 +202,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int fwidth,
+H5B2__int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int fwidth,
const H5B2_class_t *type, haddr_t hdr_addr, unsigned nrec, unsigned depth, haddr_t obj_addr)
{
H5B2_hdr_t *hdr = NULL; /* B-tree header */
@@ -213,7 +213,7 @@ H5B2_int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
H5B2_hdr_cache_ud_t cache_udata; /* User-data for callback */
herr_t ret_value=SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_PACKAGE
/*
* Check arguments.
@@ -251,7 +251,9 @@ H5B2_int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
/*
* Load the B-tree internal node
*/
- if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, addr, nrec, depth, H5AC_READ)))
+ H5_CHECK_OVERFLOW(nrec, unsigned, uint16_t);
+ H5_CHECK_OVERFLOW(depth, unsigned, uint16_t);
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, addr, (uint16_t)nrec, (uint16_t)depth, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree internal node")
/* Print opening message */
@@ -317,11 +319,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree internal node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_int_debug() */
+} /* end H5B2__int_debug() */
/*-------------------------------------------------------------------------
- * Function: H5B2_leaf_debug
+ * Function: H5B2__leaf_debug
*
* Purpose: Prints debugging info about a B-tree leaf node
*
@@ -334,7 +336,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int fwidth,
+H5B2__leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int fwidth,
const H5B2_class_t *type, haddr_t hdr_addr, unsigned nrec, haddr_t obj_addr)
{
H5B2_hdr_t *hdr = NULL; /* B-tree header */
@@ -345,7 +347,7 @@ H5B2_leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
char temp_str[128]; /* Temporary string, for formatting */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_PACKAGE
/*
* Check arguments.
@@ -383,7 +385,8 @@ H5B2_leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
/*
* Load the B-tree leaf node
*/
- if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, addr, nrec, H5AC_READ)))
+ H5_CHECK_OVERFLOW(nrec, unsigned, uint16_t);
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, addr, (uint16_t)nrec, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Print opening message */
@@ -430,5 +433,5 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_PROTECT, FAIL, "unable to release B-tree leaf node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_leaf_debug() */
+} /* end H5B2__leaf_debug() */
diff --git a/src/H5B2hdr.c b/src/H5B2hdr.c
index 452a35d..97b979e 100644
--- a/src/H5B2hdr.c
+++ b/src/H5B2hdr.c
@@ -94,7 +94,7 @@ H5FL_SEQ_DEFINE(H5B2_node_info_t);
/*-------------------------------------------------------------------------
- * Function: H5B2_hdr_init
+ * Function: H5B2__hdr_init
*
* Purpose: Allocate & initialize B-tree header info
*
@@ -107,7 +107,7 @@ H5FL_SEQ_DEFINE(H5B2_node_info_t);
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam, void *ctx_udata,
+H5B2__hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam, void *ctx_udata,
uint16_t depth)
{
size_t sz_max_nrec; /* Temporary variable for range checking */
@@ -115,7 +115,7 @@ H5B2_hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam, void *ctx_udata,
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/*
* Check arguments.
@@ -160,7 +160,7 @@ HDmemset(hdr->page, 0, hdr->node_size);
/* Initialize leaf node info */
sz_max_nrec = H5B2_NUM_LEAF_REC(hdr->node_size, hdr->rrec_size);
- H5_ASSIGN_OVERFLOW(/* To: */ hdr->node_info[0].max_nrec, /* From: */ sz_max_nrec, /* From: */ size_t, /* To: */ unsigned)
+ H5_CHECKED_ASSIGN(hdr->node_info[0].max_nrec, unsigned, sz_max_nrec, size_t)
hdr->node_info[0].split_nrec = (hdr->node_info[0].max_nrec * hdr->split_percent) / 100;
hdr->node_info[0].merge_nrec = (hdr->node_info[0].max_nrec * hdr->merge_percent) / 100;
hdr->node_info[0].cum_max_nrec = hdr->node_info[0].max_nrec;
@@ -182,14 +182,14 @@ HDmemset(hdr->page, 0, hdr->node_size);
/* Compute size to store # of records in each node */
/* (uses leaf # of records because its the largest) */
u_max_nrec_size = H5VM_limit_enc_size((uint64_t)hdr->node_info[0].max_nrec);
- H5_ASSIGN_OVERFLOW(/* To: */ hdr->max_nrec_size, /* From: */ u_max_nrec_size, /* From: */ unsigned, /* To: */ uint8_t)
+ H5_CHECKED_ASSIGN(hdr->max_nrec_size, uint8_t, u_max_nrec_size, unsigned)
HDassert(hdr->max_nrec_size <= H5B2_SIZEOF_RECORDS_PER_NODE);
/* Initialize internal node info */
if(depth > 0) {
for(u = 1; u < (unsigned)(depth + 1); u++) {
sz_max_nrec = H5B2_NUM_INT_REC(hdr, u);
- H5_ASSIGN_OVERFLOW(/* To: */ hdr->node_info[u].max_nrec, /* From: */ sz_max_nrec, /* From: */ size_t, /* To: */ unsigned)
+ H5_CHECKED_ASSIGN(hdr->node_info[u].max_nrec, unsigned, sz_max_nrec, size_t)
HDassert(hdr->node_info[u].max_nrec <= hdr->node_info[u - 1].max_nrec);
hdr->node_info[u].split_nrec = (hdr->node_info[u].max_nrec * hdr->split_percent) / 100;
@@ -198,7 +198,7 @@ HDmemset(hdr->page, 0, hdr->node_size);
hdr->node_info[u].cum_max_nrec = ((hdr->node_info[u].max_nrec + 1) *
hdr->node_info[u - 1].cum_max_nrec) + hdr->node_info[u].max_nrec;
u_max_nrec_size = H5VM_limit_enc_size((uint64_t)hdr->node_info[u].cum_max_nrec);
- H5_ASSIGN_OVERFLOW(/* To: */ hdr->node_info[u].cum_max_nrec_size, /* From: */ u_max_nrec_size, /* From: */ unsigned, /* To: */ uint8_t)
+ H5_CHECKED_ASSIGN(hdr->node_info[u].cum_max_nrec_size, uint8_t, u_max_nrec_size, unsigned)
if(NULL == (hdr->node_info[u].nat_rec_fac = H5FL_fac_init(hdr->cls->nrec_size * hdr->node_info[u].max_nrec)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "can't create node native key block factory")
@@ -215,15 +215,15 @@ HDmemset(hdr->page, 0, hdr->node_size);
done:
if(ret_value < 0)
- if(H5B2_hdr_free(hdr) < 0)
+ if(H5B2__hdr_free(hdr) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free shared v2 B-tree info")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_hdr_init() */
+} /* end H5B2__hdr_init() */
/*-------------------------------------------------------------------------
- * Function: H5B2_hdr_alloc
+ * Function: H5B2__hdr_alloc
*
* Purpose: Allocate B-tree header
*
@@ -236,12 +236,12 @@ done:
*-------------------------------------------------------------------------
*/
H5B2_hdr_t *
-H5B2_hdr_alloc(H5F_t *f)
+H5B2__hdr_alloc(H5F_t *f)
{
H5B2_hdr_t *hdr = NULL; /* v2 B-tree header */
H5B2_hdr_t *ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/*
* Check arguments.
@@ -265,11 +265,11 @@ H5B2_hdr_alloc(H5F_t *f)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_hdr_alloc() */
+} /* end H5B2__hdr_alloc() */
/*-------------------------------------------------------------------------
- * Function: H5HF_hdr_create
+ * Function: H5B2__hdr_create
*
* Purpose: Create new fractal heap header
*
@@ -282,13 +282,13 @@ done:
*-------------------------------------------------------------------------
*/
haddr_t
-H5B2_hdr_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam,
+H5B2__hdr_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam,
void *ctx_udata)
{
H5B2_hdr_t *hdr = NULL; /* The new v2 B-tree header information */
haddr_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/*
* Check arguments.
@@ -297,11 +297,11 @@ H5B2_hdr_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam,
HDassert(cparam);
/* Allocate v2 B-tree header */
- if(NULL == (hdr = H5B2_hdr_alloc(f)))
+ if(NULL == (hdr = H5B2__hdr_alloc(f)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, HADDR_UNDEF, "allocation failed for B-tree header")
/* Initialize shared B-tree info */
- if(H5B2_hdr_init(hdr, cparam, ctx_udata, (uint16_t)0) < 0)
+ if(H5B2__hdr_init(hdr, cparam, ctx_udata, (uint16_t)0) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, HADDR_UNDEF, "can't create shared B-tree info")
/* Allocate space for the header on disk */
@@ -317,15 +317,15 @@ H5B2_hdr_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam,
done:
if(!H5F_addr_defined(ret_value) && hdr)
- if(H5B2_hdr_free(hdr) < 0)
+ if(H5B2__hdr_free(hdr) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTRELEASE, HADDR_UNDEF, "unable to release v2 B-tree header")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_hdr_create() */
+} /* end H5B2__hdr_create() */
/*-------------------------------------------------------------------------
- * Function: H5B2_hdr_incr
+ * Function: H5B2__hdr_incr
*
* Purpose: Increment reference count on B-tree header
*
@@ -338,11 +338,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_hdr_incr(H5B2_hdr_t *hdr)
+H5B2__hdr_incr(H5B2_hdr_t *hdr)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Sanity checks */
HDassert(hdr);
@@ -357,11 +357,11 @@ H5B2_hdr_incr(H5B2_hdr_t *hdr)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_incr_hdr() */
+} /* end H5B2__hdr_incr() */
/*-------------------------------------------------------------------------
- * Function: H5B2_hdr_decr
+ * Function: H5B2__hdr_decr
*
* Purpose: Decrement reference count on B-tree header
*
@@ -374,11 +374,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_hdr_decr(H5B2_hdr_t *hdr)
+H5B2__hdr_decr(H5B2_hdr_t *hdr)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Sanity check */
HDassert(hdr);
@@ -394,11 +394,11 @@ H5B2_hdr_decr(H5B2_hdr_t *hdr)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_hdr_decr() */
+} /* end H5B2__hdr_decr() */
/*-------------------------------------------------------------------------
- * Function: H5B2_hdr_fuse_incr
+ * Function: H5B2__hdr_fuse_incr
*
* Purpose: Increment file reference count on shared v2 B-tree header
*
@@ -411,7 +411,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_hdr_fuse_incr(H5B2_hdr_t *hdr)
+H5B2__hdr_fuse_incr(H5B2_hdr_t *hdr)
{
FUNC_ENTER_NOAPI_NOINIT_NOERR
@@ -422,11 +422,11 @@ H5B2_hdr_fuse_incr(H5B2_hdr_t *hdr)
hdr->file_rc++;
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5B2_hdr_fuse_incr() */
+} /* end H5B2__hdr_fuse_incr() */
/*-------------------------------------------------------------------------
- * Function: H5B2_hdr_fuse_decr
+ * Function: H5B2__hdr_fuse_decr
*
* Purpose: Decrement file reference count on shared v2 B-tree header
*
@@ -439,7 +439,7 @@ H5B2_hdr_fuse_incr(H5B2_hdr_t *hdr)
*-------------------------------------------------------------------------
*/
size_t
-H5B2_hdr_fuse_decr(H5B2_hdr_t *hdr)
+H5B2__hdr_fuse_decr(H5B2_hdr_t *hdr)
{
FUNC_ENTER_NOAPI_NOINIT_NOERR
@@ -451,11 +451,11 @@ H5B2_hdr_fuse_decr(H5B2_hdr_t *hdr)
hdr->file_rc--;
FUNC_LEAVE_NOAPI(hdr->file_rc)
-} /* end H5B2_hdr_fuse_decr() */
+} /* end H5B2__hdr_fuse_decr() */
/*-------------------------------------------------------------------------
- * Function: H5B2_hdr_dirty
+ * Function: H5B2__hdr_dirty
*
* Purpose: Mark B-tree header as dirty
*
@@ -468,11 +468,11 @@ H5B2_hdr_fuse_decr(H5B2_hdr_t *hdr)
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_hdr_dirty(H5B2_hdr_t *hdr)
+H5B2__hdr_dirty(H5B2_hdr_t *hdr)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Sanity check */
HDassert(hdr);
@@ -483,11 +483,11 @@ H5B2_hdr_dirty(H5B2_hdr_t *hdr)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_hdr_dirty() */
+} /* end H5B2__hdr_dirty() */
/*-------------------------------------------------------------------------
- * Function: H5B2_hdr_free
+ * Function: H5B2__hdr_free
*
* Purpose: Free B-tree header info
*
@@ -500,11 +500,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_hdr_free(H5B2_hdr_t *hdr)
+H5B2__hdr_free(H5B2_hdr_t *hdr)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Sanity check */
HDassert(hdr);
@@ -557,11 +557,11 @@ H5B2_hdr_free(H5B2_hdr_t *hdr)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_hdr_free() */
+} /* end H5B2__hdr_free() */
/*-------------------------------------------------------------------------
- * Function: H5B2_hdr_delete
+ * Function: H5B2__hdr_delete
*
* Purpose: Delete a v2 B-tree, starting with the header
*
@@ -574,12 +574,12 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_hdr_delete(H5B2_hdr_t *hdr, hid_t dxpl_id)
+H5B2__hdr_delete(H5B2_hdr_t *hdr, hid_t dxpl_id)
{
unsigned cache_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting v2 B-tree header */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_PACKAGE
/* Sanity check */
HDassert(hdr);
@@ -600,7 +600,7 @@ H5B2_hdr_delete(H5B2_hdr_t *hdr, hid_t dxpl_id)
/* Delete all nodes in B-tree */
if(H5F_addr_defined(hdr->root.addr))
- if(H5B2_delete_node(hdr, dxpl_id, hdr->depth, &hdr->root, hdr->remove_op, hdr->remove_op_data) < 0)
+ if(H5B2__delete_node(hdr, dxpl_id, hdr->depth, &hdr->root, hdr->remove_op, hdr->remove_op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to delete B-tree nodes")
/* Indicate that the heap header should be deleted & file space freed */
@@ -612,5 +612,5 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree header")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_hdr_delete() */
+} /* end H5B2__hdr_delete() */
diff --git a/src/H5B2int.c b/src/H5B2int.c
index ef83e93..96636d5 100644
--- a/src/H5B2int.c
+++ b/src/H5B2int.c
@@ -61,29 +61,29 @@
/********************/
/* Helper functions */
-static herr_t H5B2_create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
- H5B2_node_ptr_t *node_ptr, unsigned depth);
-static herr_t H5B2_split1(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+static herr_t H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr,
H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx);
-static herr_t H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+static herr_t H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_internal_t *internal, unsigned idx);
-static herr_t H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+static herr_t H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx);
-static herr_t H5B2_merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+static herr_t H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr,
H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx);
-static herr_t H5B2_merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+static herr_t H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr,
H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx);
-static herr_t H5B2_swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+static herr_t H5B2__swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_internal_t *internal, unsigned *internal_flags_ptr,
unsigned idx, void *swap_loc);
+static herr_t H5B2__create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
+ H5B2_node_ptr_t *node_ptr, uint16_t depth);
#ifdef H5B2_DEBUG
-static herr_t H5B2_assert_leaf(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf);
-static herr_t H5B2_assert_leaf2(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf, const H5B2_leaf_t *leaf2);
-static herr_t H5B2_assert_internal(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal);
-static herr_t H5B2_assert_internal2(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal, const H5B2_internal_t *internal2);
+static herr_t H5B2__assert_leaf(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf);
+static herr_t H5B2__assert_leaf2(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf, const H5B2_leaf_t *leaf2);
+static herr_t H5B2__assert_internal(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal);
+static herr_t H5B2__assert_internal2(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal, const H5B2_internal_t *internal2);
#endif /* H5B2_DEBUG */
/*********************/
@@ -112,7 +112,7 @@ H5FL_SEQ_EXTERN(H5B2_node_info_t);
/*-------------------------------------------------------------------------
- * Function: H5B2_locate_record
+ * Function: H5B2__locate_record
*
* Purpose: Performs a binary search to locate a record in a sorted
* array of records.
@@ -133,14 +133,14 @@ H5FL_SEQ_EXTERN(H5B2_node_info_t);
*-------------------------------------------------------------------------
*/
int
-H5B2_locate_record(const H5B2_class_t *type, unsigned nrec, size_t *rec_off,
+H5B2__locate_record(const H5B2_class_t *type, unsigned nrec, size_t *rec_off,
const uint8_t *native, const void *udata, unsigned *idx)
{
unsigned lo = 0, hi; /* Low & high index values */
unsigned my_idx = 0; /* Final index value */
int cmp = -1; /* Key comparison value */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_PACKAGE_NOERR
hi = nrec;
while(lo < hi && cmp) {
@@ -154,11 +154,11 @@ H5B2_locate_record(const H5B2_class_t *type, unsigned nrec, size_t *rec_off,
*idx = my_idx;
FUNC_LEAVE_NOAPI(cmp)
-} /* end H5B2_locate_record */
+} /* end H5B2__locate_record */
/*-------------------------------------------------------------------------
- * Function: H5B2_split1
+ * Function: H5B2__split1
*
* Purpose: Perform a 1->2 node split
*
@@ -172,7 +172,7 @@ H5B2_locate_record(const H5B2_class_t *type, unsigned nrec, size_t *rec_off,
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_split1(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr,
H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx)
{
@@ -187,7 +187,7 @@ H5B2_split1(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments. */
HDassert(hdr);
@@ -206,7 +206,7 @@ H5B2_split1(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
/* Create new internal node */
internal->node_ptrs[idx + 1].all_nrec = internal->node_ptrs[idx + 1].node_nrec = 0;
- if(H5B2_create_internal(hdr, dxpl_id, &(internal->node_ptrs[idx + 1]), (depth - 1)) < 0)
+ if(H5B2__create_internal(hdr, dxpl_id, &(internal->node_ptrs[idx + 1]), (uint16_t)(depth - 1)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create new internal node")
/* Setup information for unlocking child nodes */
@@ -215,9 +215,9 @@ H5B2_split1(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Protect both leaves */
- if(NULL == (left_int = H5B2_protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (depth - 1), H5AC_WRITE)))
+ if(NULL == (left_int = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (right_int = H5B2_protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (depth - 1), H5AC_WRITE)))
+ if(NULL == (right_int = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for child nodes */
@@ -235,7 +235,7 @@ H5B2_split1(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
/* Create new leaf node */
internal->node_ptrs[idx + 1].all_nrec = internal->node_ptrs[idx + 1].node_nrec = 0;
- if(H5B2_create_leaf(hdr, dxpl_id, &(internal->node_ptrs[idx + 1])) < 0)
+ if(H5B2__create_leaf(hdr, dxpl_id, &(internal->node_ptrs[idx + 1])) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create new leaf node")
/* Setup information for unlocking child nodes */
@@ -244,9 +244,9 @@ H5B2_split1(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Protect both leaves */
- if(NULL == (left_leaf = H5B2_protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2_protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for child nodes */
@@ -322,14 +322,14 @@ H5B2_split1(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
*parent_cache_info_flags_ptr |= H5AC__DIRTIED_FLAG;
#ifdef H5B2_DEBUG
- H5B2_assert_internal((hsize_t)0, hdr, internal);
+ H5B2__assert_internal((hsize_t)0, hdr, internal);
if(depth > 1) {
- H5B2_assert_internal2(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)left_child, (H5B2_internal_t *)right_child);
- H5B2_assert_internal2(internal->node_ptrs[idx + 1].all_nrec, hdr, (H5B2_internal_t *)right_child, (H5B2_internal_t *)left_child);
+ H5B2__assert_internal2(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)left_child, (H5B2_internal_t *)right_child);
+ H5B2__assert_internal2(internal->node_ptrs[idx + 1].all_nrec, hdr, (H5B2_internal_t *)right_child, (H5B2_internal_t *)left_child);
} /* end if */
else {
- H5B2_assert_leaf2(hdr, (H5B2_leaf_t *)left_child, (H5B2_leaf_t *)right_child);
- H5B2_assert_leaf(hdr, (H5B2_leaf_t *)right_child);
+ H5B2__assert_leaf2(hdr, (H5B2_leaf_t *)left_child, (H5B2_leaf_t *)right_child);
+ H5B2__assert_leaf(hdr, (H5B2_leaf_t *)right_child);
} /* end else */
#endif /* H5B2_DEBUG */
@@ -341,11 +341,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_split1() */
+} /* end H5B2__split1() */
/*-------------------------------------------------------------------------
- * Function: H5B2_split_root
+ * Function: H5B2__split_root
*
* Purpose: Split the root node
*
@@ -359,7 +359,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_split_root(H5B2_hdr_t *hdr, hid_t dxpl_id)
+H5B2__split_root(H5B2_hdr_t *hdr, hid_t dxpl_id)
{
H5B2_internal_t *new_root = NULL; /* Pointer to new root node */
unsigned new_root_flags = H5AC__NO_FLAGS_SET; /* Cache flags for new root node */
@@ -368,7 +368,7 @@ H5B2_split_root(H5B2_hdr_t *hdr, hid_t dxpl_id)
unsigned u_max_nrec_size; /* Temporary variable for range checking */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -382,13 +382,13 @@ H5B2_split_root(H5B2_hdr_t *hdr, hid_t dxpl_id)
/* Update node info for new depth of tree */
sz_max_nrec = H5B2_NUM_INT_REC(hdr, hdr->depth);
- H5_ASSIGN_OVERFLOW(/* To: */ hdr->node_info[hdr->depth].max_nrec, /* From: */ sz_max_nrec, /* From: */ size_t, /* To: */ unsigned)
+ H5_CHECKED_ASSIGN(hdr->node_info[hdr->depth].max_nrec, unsigned, sz_max_nrec, size_t)
hdr->node_info[hdr->depth].split_nrec = (hdr->node_info[hdr->depth].max_nrec * hdr->split_percent) / 100;
hdr->node_info[hdr->depth].merge_nrec = (hdr->node_info[hdr->depth].max_nrec * hdr->merge_percent) / 100;
hdr->node_info[hdr->depth].cum_max_nrec = ((hdr->node_info[hdr->depth].max_nrec + 1) *
hdr->node_info[hdr->depth - 1].cum_max_nrec) + hdr->node_info[hdr->depth].max_nrec;
u_max_nrec_size = H5VM_limit_enc_size((uint64_t)hdr->node_info[hdr->depth].cum_max_nrec);
- H5_ASSIGN_OVERFLOW(/* To: */ hdr->node_info[hdr->depth].cum_max_nrec_size, /* From: */ u_max_nrec_size, /* From: */ unsigned, /* To: */ uint8_t)
+ H5_CHECKED_ASSIGN(hdr->node_info[hdr->depth].cum_max_nrec_size, uint8_t, u_max_nrec_size, unsigned)
if(NULL == (hdr->node_info[hdr->depth].nat_rec_fac = H5FL_fac_init(hdr->cls->nrec_size * hdr->node_info[hdr->depth].max_nrec)))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create node native key block factory")
if(NULL == (hdr->node_info[hdr->depth].node_ptr_fac = H5FL_fac_init(sizeof(H5B2_node_ptr_t) * (hdr->node_info[hdr->depth].max_nrec + 1))))
@@ -399,18 +399,18 @@ H5B2_split_root(H5B2_hdr_t *hdr, hid_t dxpl_id)
/* Create new internal node to use as root */
hdr->root.node_nrec = 0;
- if(H5B2_create_internal(hdr, dxpl_id, &(hdr->root), hdr->depth) < 0)
+ if(H5B2__create_internal(hdr, dxpl_id, &(hdr->root), hdr->depth) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create new internal node")
/* Protect new root node */
- if(NULL == (new_root = H5B2_protect_internal(hdr, dxpl_id, hdr->root.addr, hdr->root.node_nrec, hdr->depth, H5AC_WRITE)))
+ if(NULL == (new_root = H5B2__protect_internal(hdr, dxpl_id, hdr->root.addr, hdr->root.node_nrec, hdr->depth, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Set first node pointer in root node to old root node pointer info */
new_root->node_ptrs[0] = old_root_ptr;
/* Split original root node */
- if(H5B2_split1(hdr, dxpl_id, hdr->depth, &(hdr->root), NULL, new_root, &new_root_flags, 0) < 0)
+ if(H5B2__split1(hdr, dxpl_id, hdr->depth, &(hdr->root), NULL, new_root, &new_root_flags, 0) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to split old root node")
done:
@@ -419,11 +419,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree internal node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_split_root() */
+} /* end H5B2__split_root() */
/*-------------------------------------------------------------------------
- * Function: H5B2_redistribute2
+ * Function: H5B2__redistribute2
*
* Purpose: Redistribute records between two nodes
*
@@ -437,7 +437,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_internal_t *internal, unsigned idx)
{
const H5AC_class_t *child_class; /* Pointer to child node's class info */
@@ -450,7 +450,7 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments. */
HDassert(hdr);
@@ -467,9 +467,9 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
- if(NULL == (left_internal = H5B2_protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (depth - 1), H5AC_WRITE)))
+ if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_internal = H5B2_protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (depth - 1), H5AC_WRITE)))
+ if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for child nodes */
@@ -492,9 +492,9 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
- if(NULL == (left_leaf = H5B2_protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2_protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for child nodes */
@@ -507,14 +507,14 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
} /* end else */
#ifdef H5B2_DEBUG
- H5B2_assert_internal((hsize_t)0, hdr, internal);
+ H5B2__assert_internal((hsize_t)0, hdr, internal);
if(depth > 1) {
- H5B2_assert_internal2(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)left_child, (H5B2_internal_t *)right_child);
- H5B2_assert_internal2(internal->node_ptrs[idx + 1].all_nrec, hdr, (H5B2_internal_t *)right_child, (H5B2_internal_t *)left_child);
+ H5B2__assert_internal2(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)left_child, (H5B2_internal_t *)right_child);
+ H5B2__assert_internal2(internal->node_ptrs[idx + 1].all_nrec, hdr, (H5B2_internal_t *)right_child, (H5B2_internal_t *)left_child);
} /* end if */
else {
- H5B2_assert_leaf2(hdr, (H5B2_leaf_t *)left_child, (H5B2_leaf_t *)right_child);
- H5B2_assert_leaf(hdr, (H5B2_leaf_t *)right_child);
+ H5B2__assert_leaf2(hdr, (H5B2_leaf_t *)left_child, (H5B2_leaf_t *)right_child);
+ H5B2__assert_leaf(hdr, (H5B2_leaf_t *)right_child);
} /* end else */
#endif /* H5B2_DEBUG */
@@ -546,7 +546,7 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
/* Count the number of records being moved */
for(u = 0; u < move_nrec; u++)
moved_nrec += right_node_ptrs[u].all_nrec;
- H5_ASSIGN_OVERFLOW(/* To: */ left_moved_nrec, /* From: */ moved_nrec, /* From: */ hsize_t, /* To: */ hssize_t)
+ H5_CHECKED_ASSIGN(left_moved_nrec, hssize_t, moved_nrec, hsize_t)
right_moved_nrec -= (hssize_t)moved_nrec;
/* Copy node pointers from right node to left */
@@ -600,7 +600,7 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
for(u = 0; u < move_nrec; u++)
moved_nrec += right_node_ptrs[u].all_nrec;
left_moved_nrec -= (hssize_t)moved_nrec;
- H5_ASSIGN_OVERFLOW(/* To: */ right_moved_nrec, /* From: */ moved_nrec, /* From: */ hsize_t, /* To: */ hssize_t)
+ H5_CHECKED_ASSIGN(right_moved_nrec, hssize_t, moved_nrec, hsize_t)
} /* end if */
/* Update number of records in child nodes */
@@ -627,14 +627,14 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
} /* end else */
#ifdef H5B2_DEBUG
- H5B2_assert_internal((hsize_t)0, hdr, internal);
+ H5B2__assert_internal((hsize_t)0, hdr, internal);
if(depth > 1) {
- H5B2_assert_internal2(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)left_child, (H5B2_internal_t *)right_child);
- H5B2_assert_internal2(internal->node_ptrs[idx + 1].all_nrec, hdr, (H5B2_internal_t *)right_child, (H5B2_internal_t *)left_child);
+ H5B2__assert_internal2(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)left_child, (H5B2_internal_t *)right_child);
+ H5B2__assert_internal2(internal->node_ptrs[idx + 1].all_nrec, hdr, (H5B2_internal_t *)right_child, (H5B2_internal_t *)left_child);
} /* end if */
else {
- H5B2_assert_leaf2(hdr, (H5B2_leaf_t *)left_child, (H5B2_leaf_t *)right_child);
- H5B2_assert_leaf(hdr, (H5B2_leaf_t *)right_child);
+ H5B2__assert_leaf2(hdr, (H5B2_leaf_t *)left_child, (H5B2_leaf_t *)right_child);
+ H5B2__assert_leaf(hdr, (H5B2_leaf_t *)right_child);
} /* end else */
#endif /* H5B2_DEBUG */
@@ -646,11 +646,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_redistribute2() */
+} /* end H5B2__redistribute2() */
/*-------------------------------------------------------------------------
- * Function: H5B2_redistribute3
+ * Function: H5B2__redistribute3
*
* Purpose: Redistribute records between three nodes
*
@@ -664,7 +664,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx)
{
H5B2_node_ptr_t *left_node_ptrs = NULL, *right_node_ptrs = NULL; /* Pointers to childs' node pointer info */
@@ -684,7 +684,7 @@ H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
unsigned middle_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments. */
HDassert(hdr);
@@ -704,11 +704,11 @@ H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
- if(NULL == (left_internal = H5B2_protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (depth - 1), H5AC_WRITE)))
+ if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (middle_internal = H5B2_protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (depth - 1), H5AC_WRITE)))
+ if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (right_internal = H5B2_protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (depth - 1), H5AC_WRITE)))
+ if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for child nodes */
@@ -737,11 +737,11 @@ H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
- if(NULL == (left_leaf = H5B2_protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (middle_leaf = H5B2_protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
+ if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2_protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for child nodes */
@@ -1009,17 +1009,17 @@ H5B2_redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
}
#endif /* QAK */
#ifdef H5B2_DEBUG
- H5B2_assert_internal((hsize_t)0, hdr, internal);
+ H5B2__assert_internal((hsize_t)0, hdr, internal);
if(depth > 1) {
- H5B2_assert_internal2(internal->node_ptrs[idx - 1].all_nrec, hdr, (H5B2_internal_t *)left_child, (H5B2_internal_t *)middle_child);
- H5B2_assert_internal2(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)middle_child, (H5B2_internal_t *)left_child);
- H5B2_assert_internal2(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)middle_child, (H5B2_internal_t *)right_child);
- H5B2_assert_internal2(internal->node_ptrs[idx + 1].all_nrec, hdr, (H5B2_internal_t *)right_child, (H5B2_internal_t *)middle_child);
+ H5B2__assert_internal2(internal->node_ptrs[idx - 1].all_nrec, hdr, (H5B2_internal_t *)left_child, (H5B2_internal_t *)middle_child);
+ H5B2__assert_internal2(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)middle_child, (H5B2_internal_t *)left_child);
+ H5B2__assert_internal2(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)middle_child, (H5B2_internal_t *)right_child);
+ H5B2__assert_internal2(internal->node_ptrs[idx + 1].all_nrec, hdr, (H5B2_internal_t *)right_child, (H5B2_internal_t *)middle_child);
} /* end if */
else {
- H5B2_assert_leaf2(hdr, (H5B2_leaf_t *)left_child, (H5B2_leaf_t *)middle_child);
- H5B2_assert_leaf2(hdr, (H5B2_leaf_t *)middle_child, (H5B2_leaf_t *)right_child);
- H5B2_assert_leaf(hdr, (H5B2_leaf_t *)right_child);
+ H5B2__assert_leaf2(hdr, (H5B2_leaf_t *)left_child, (H5B2_leaf_t *)middle_child);
+ H5B2__assert_leaf2(hdr, (H5B2_leaf_t *)middle_child, (H5B2_leaf_t *)right_child);
+ H5B2__assert_leaf(hdr, (H5B2_leaf_t *)right_child);
} /* end else */
#endif /* H5B2_DEBUG */
@@ -1033,11 +1033,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_redistribute3() */
+} /* end H5B2__redistribute3() */
/*-------------------------------------------------------------------------
- * Function: H5B2_merge2
+ * Function: H5B2__merge2
*
* Purpose: Perform a 2->1 node merge
*
@@ -1052,7 +1052,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr,
H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx)
{
@@ -1065,7 +1065,7 @@ H5B2_merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments. */
HDassert(hdr);
@@ -1084,9 +1084,9 @@ H5B2_merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
- if(NULL == (left_internal = H5B2_protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (depth - 1), H5AC_WRITE)))
+ if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (right_internal = H5B2_protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (depth - 1), H5AC_WRITE)))
+ if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for accessing child node information */
@@ -1109,9 +1109,9 @@ H5B2_merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
- if(NULL == (left_leaf = H5B2_protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2_protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for accessing child node information */
@@ -1169,11 +1169,11 @@ H5B2_merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
*parent_cache_info_flags_ptr |= H5AC__DIRTIED_FLAG;
#ifdef H5B2_DEBUG
- H5B2_assert_internal((hsize_t)0, hdr, internal);
+ H5B2__assert_internal((hsize_t)0, hdr, internal);
if(depth > 1)
- H5B2_assert_internal(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)left_child);
+ H5B2__assert_internal(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)left_child);
else
- H5B2_assert_leaf(hdr, (H5B2_leaf_t *)left_child);
+ H5B2__assert_leaf(hdr, (H5B2_leaf_t *)left_child);
#endif /* H5B2_DEBUG */
done:
@@ -1186,11 +1186,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_merge2() */
+} /* end H5B2__merge2() */
/*-------------------------------------------------------------------------
- * Function: H5B2_merge3
+ * Function: H5B2__merge3
*
* Purpose: Perform a 3->2 node merge
*
@@ -1205,7 +1205,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr,
H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx)
{
@@ -1225,7 +1225,7 @@ H5B2_merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
unsigned middle_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments. */
HDassert(hdr);
@@ -1246,11 +1246,11 @@ H5B2_merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
- if(NULL == (left_internal = H5B2_protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (depth - 1), H5AC_WRITE)))
+ if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (middle_internal = H5B2_protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (depth - 1), H5AC_WRITE)))
+ if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (right_internal = H5B2_protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (depth - 1), H5AC_WRITE)))
+ if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for accessing child node information */
@@ -1279,11 +1279,11 @@ H5B2_merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
- if(NULL == (left_leaf = H5B2_protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (middle_leaf = H5B2_protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
+ if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2_protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for accessing child node information */
@@ -1391,14 +1391,14 @@ H5B2_merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
*parent_cache_info_flags_ptr |= H5AC__DIRTIED_FLAG;
#ifdef H5B2_DEBUG
- H5B2_assert_internal((hsize_t)0, hdr, internal);
+ H5B2__assert_internal((hsize_t)0, hdr, internal);
if(depth > 1) {
- H5B2_assert_internal2(internal->node_ptrs[idx - 1].all_nrec, hdr, (H5B2_internal_t *)left_child, (H5B2_internal_t *)middle_child);
- H5B2_assert_internal(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)middle_child);
+ H5B2__assert_internal2(internal->node_ptrs[idx - 1].all_nrec, hdr, (H5B2_internal_t *)left_child, (H5B2_internal_t *)middle_child);
+ H5B2__assert_internal(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)middle_child);
} /* end if */
else {
- H5B2_assert_leaf2(hdr, (H5B2_leaf_t *)left_child, (H5B2_leaf_t *)middle_child);
- H5B2_assert_leaf(hdr, (H5B2_leaf_t *)middle_child);
+ H5B2__assert_leaf2(hdr, (H5B2_leaf_t *)left_child, (H5B2_leaf_t *)middle_child);
+ H5B2__assert_leaf(hdr, (H5B2_leaf_t *)middle_child);
} /* end else */
#endif /* H5B2_DEBUG */
@@ -1414,11 +1414,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_merge3() */
+} /* end H5B2__merge3() */
/*-------------------------------------------------------------------------
- * Function: H5B2_swap_leaf
+ * Function: H5B2__swap_leaf
*
* Purpose: Swap a record in a node with a record in a leaf node
*
@@ -1433,7 +1433,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+H5B2__swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_internal_t *internal, unsigned *internal_flags_ptr,
unsigned idx, void *swap_loc)
{
@@ -1443,7 +1443,7 @@ H5B2_swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
uint8_t *child_native; /* Pointer to child's native records */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments. */
HDassert(hdr);
@@ -1460,7 +1460,7 @@ H5B2_swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
child_addr = internal->node_ptrs[idx].addr;
/* Lock B-tree child nodes */
- if(NULL == (child_internal = H5B2_protect_internal(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, (depth - 1), H5AC_WRITE)))
+ if(NULL == (child_internal = H5B2__protect_internal(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for accessing child node information */
@@ -1475,7 +1475,7 @@ H5B2_swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
child_addr = internal->node_ptrs[idx].addr;
/* Lock B-tree child node */
- if(NULL == (child_leaf = H5B2_protect_leaf(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
+ if(NULL == (child_leaf = H5B2__protect_leaf(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for accessing child node information */
@@ -1492,11 +1492,11 @@ H5B2_swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
*internal_flags_ptr |= H5AC__DIRTIED_FLAG;
#ifdef H5B2_DEBUG
- H5B2_assert_internal((hsize_t)0, hdr, internal);
+ H5B2__assert_internal((hsize_t)0, hdr, internal);
if(depth > 1)
- H5B2_assert_internal(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)child);
+ H5B2__assert_internal(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)child);
else
- H5B2_assert_leaf(hdr, (H5B2_leaf_t *)child);
+ H5B2__assert_leaf(hdr, (H5B2_leaf_t *)child);
#endif /* H5B2_DEBUG */
done:
@@ -1505,11 +1505,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_swap_leaf() */
+} /* end H5B2__swap_leaf() */
/*-------------------------------------------------------------------------
- * Function: H5B2_insert_leaf
+ * Function: H5B2__insert_leaf
*
* Purpose: Adds a new record to a B-tree leaf node.
*
@@ -1522,7 +1522,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
+H5B2__insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
H5B2_nodepos_t curr_pos, void *udata)
{
H5B2_leaf_t *leaf; /* Pointer to leaf node */
@@ -1530,7 +1530,7 @@ H5B2_insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
unsigned idx; /* Location of record which matches key */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -1538,7 +1538,7 @@ H5B2_insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
HDassert(H5F_addr_defined(curr_node_ptr->addr));
/* Lock current B-tree node */
- if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC_WRITE)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Must have a leaf node with enough space to insert a record now */
@@ -1553,7 +1553,7 @@ H5B2_insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
idx = 0;
else {
/* Find correct location to insert this record */
- if((cmp = H5B2_locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx)) == 0)
+ if((cmp = H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx)) == 0)
HGOTO_ERROR(H5E_BTREE, H5E_EXISTS, FAIL, "record is already in B-tree")
if(cmp > 0)
idx++;
@@ -1601,11 +1601,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_insert_leaf() */
+} /* H5B2__insert_leaf() */
/*-------------------------------------------------------------------------
- * Function: H5B2_insert_internal
+ * Function: H5B2__insert_internal
*
* Purpose: Adds a new record to a B-tree node.
*
@@ -1618,7 +1618,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+H5B2__insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
unsigned *parent_cache_info_flags_ptr, H5B2_node_ptr_t *curr_node_ptr,
H5B2_nodepos_t curr_pos, void *udata)
{
@@ -1628,7 +1628,7 @@ H5B2_insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
H5B2_nodepos_t next_pos = H5B2_POS_MIDDLE; /* Position of node */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -1637,7 +1637,7 @@ H5B2_insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
HDassert(H5F_addr_defined(curr_node_ptr->addr));
/* Lock current B-tree node */
- if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Split or redistribute child node pointers, if necessary */
@@ -1647,7 +1647,7 @@ H5B2_insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
size_t split_nrec; /* Number of records to split node at */
/* Locate node pointer for child */
- if((cmp = H5B2_locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx)) == 0)
+ if((cmp = H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx)) == 0)
HGOTO_ERROR(H5E_BTREE, H5E_EXISTS, FAIL, "record is already in B-tree")
if(cmp > 0)
idx++;
@@ -1668,22 +1668,22 @@ H5B2_insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
/* Attempt to redistribute records among children */
if(idx == 0) { /* Left-most child */
if(retries > 0 && (internal->node_ptrs[idx + 1].node_nrec < split_nrec)) {
- if(H5B2_redistribute2(hdr, dxpl_id, depth, internal, idx) < 0)
+ if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, idx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records")
} /* end if */
else {
- if(H5B2_split1(hdr, dxpl_id, depth, curr_node_ptr,
+ if(H5B2__split1(hdr, dxpl_id, depth, curr_node_ptr,
parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to split child node")
} /* end else */
} /* end if */
else if(idx == internal->nrec) { /* Right-most child */
if(retries > 0 && (internal->node_ptrs[idx - 1].node_nrec < split_nrec)) {
- if(H5B2_redistribute2(hdr, dxpl_id, depth, internal, (idx - 1)) < 0)
+ if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, (idx - 1)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records")
} /* end if */
else {
- if(H5B2_split1(hdr, dxpl_id, depth, curr_node_ptr,
+ if(H5B2__split1(hdr, dxpl_id, depth, curr_node_ptr,
parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to split child node")
} /* end else */
@@ -1691,11 +1691,11 @@ H5B2_insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
else { /* Middle child */
if(retries > 0 && ((internal->node_ptrs[idx + 1].node_nrec < split_nrec) ||
(internal->node_ptrs[idx - 1].node_nrec < split_nrec))) {
- if(H5B2_redistribute3(hdr, dxpl_id, depth, internal, &internal_flags, idx) < 0)
+ if(H5B2__redistribute3(hdr, dxpl_id, depth, internal, &internal_flags, idx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records")
} /* end if */
else {
- if(H5B2_split1(hdr, dxpl_id, depth, curr_node_ptr,
+ if(H5B2__split1(hdr, dxpl_id, depth, curr_node_ptr,
parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to split child node")
} /* end else */
@@ -1703,7 +1703,7 @@ H5B2_insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
/* Locate node pointer for child (after split/redistribute) */
/* Actually, this can be easily updated (for 2-node redistrib.) and shouldn't require re-searching */
- if((cmp = H5B2_locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx)) == 0)
+ if((cmp = H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx)) == 0)
HGOTO_ERROR(H5E_BTREE, H5E_EXISTS, FAIL, "record is already in B-tree")
if(cmp > 0)
idx++;
@@ -1727,11 +1727,11 @@ H5B2_insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
/* Attempt to insert node */
if(depth > 1) {
- if(H5B2_insert_internal(hdr, dxpl_id, (depth - 1), &internal_flags, &internal->node_ptrs[idx], next_pos, udata) < 0)
+ if(H5B2__insert_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &internal_flags, &internal->node_ptrs[idx], next_pos, udata) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree internal node")
} /* end if */
else {
- if(H5B2_insert_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], next_pos, udata) < 0)
+ if(H5B2__insert_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], next_pos, udata) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree leaf node")
} /* end else */
@@ -1747,11 +1747,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_insert_internal() */
+} /* H5B2__insert_internal() */
/*-------------------------------------------------------------------------
- * Function: H5B2_create_leaf
+ * Function: H5B2__create_leaf
*
* Purpose: Creates empty leaf node of a B-tree and update node pointer
* to point to it.
@@ -1765,12 +1765,12 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *node_ptr)
+H5B2__create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *node_ptr)
{
H5B2_leaf_t *leaf = NULL; /* Pointer to new leaf node created */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -1784,7 +1784,7 @@ H5B2_create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *node_ptr)
HDmemset(&leaf->cache_info, 0, sizeof(H5AC_info_t));
/* Increment ref. count on B-tree header */
- if(H5B2_hdr_incr(hdr) < 0)
+ if(H5B2__hdr_incr(hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, FAIL, "can't increment ref. count on B-tree header")
/* Share B-tree header information */
@@ -1811,16 +1811,16 @@ HDmemset(leaf->leaf_native, 0, hdr->cls->nrec_size * hdr->node_info[0].max_nrec)
done:
if(ret_value < 0) {
if(leaf)
- if(H5B2_leaf_free(leaf) < 0)
+ if(H5B2__leaf_free(leaf) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree leaf node")
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_create_leaf() */
+} /* H5B2__create_leaf() */
/*-------------------------------------------------------------------------
- * Function: H5B2_protect_leaf
+ * Function: H5B2__protect_leaf
*
* Purpose: "Protect" an leaf node in the metadata cache
*
@@ -1833,13 +1833,13 @@ done:
*-------------------------------------------------------------------------
*/
H5B2_leaf_t *
-H5B2_protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, unsigned nrec,
+H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, uint16_t nrec,
H5AC_protect_t rw)
{
H5B2_leaf_cache_ud_t udata; /* User-data for callback */
H5B2_leaf_t *ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -1848,7 +1848,7 @@ H5B2_protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, unsigned nrec,
/* Set up user data for callback */
udata.f = hdr->f;
udata.hdr = hdr;
- H5_ASSIGN_OVERFLOW(/* To: */ udata.nrec, /* From: */ nrec, /* From: */ unsigned, /* To: */ uint16_t)
+ H5_CHECKED_ASSIGN(udata.nrec, uint16_t, nrec, unsigned)
/* Protect the leaf node */
if(NULL == (ret_value = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, addr, &udata, rw)))
@@ -1856,11 +1856,11 @@ H5B2_protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, unsigned nrec,
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_protect_leaf() */
+} /* H5B2__protect_leaf() */
/*-------------------------------------------------------------------------
- * Function: H5B2_create_internal
+ * Function: H5B2__create_internal
*
* Purpose: Creates empty internal node of a B-tree and update node pointer
* to point to it.
@@ -1874,13 +1874,13 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *node_ptr,
- unsigned depth)
+H5B2__create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *node_ptr,
+ uint16_t depth)
{
H5B2_internal_t *internal = NULL; /* Pointer to new internal node created */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments. */
HDassert(hdr);
@@ -1895,7 +1895,7 @@ H5B2_create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *node_ptr,
HDmemset(&internal->cache_info, 0, sizeof(H5AC_info_t));
/* Increment ref. count on B-tree header */
- if(H5B2_hdr_incr(hdr) < 0)
+ if(H5B2__hdr_incr(hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, FAIL, "can't increment ref. count on B-tree header")
/* Share B-tree header information */
@@ -1917,7 +1917,7 @@ HDmemset(internal->node_ptrs, 0, sizeof(H5B2_node_ptr_t) * (hdr->node_info[depth
/* Set number of records & depth of the node */
internal->nrec = 0;
- internal->depth = (uint16_t)depth;
+ internal->depth = depth;
/* Allocate space on disk for the internal node */
if(HADDR_UNDEF == (node_ptr->addr = H5MF_alloc(hdr->f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)hdr->node_size)))
@@ -1930,16 +1930,16 @@ HDmemset(internal->node_ptrs, 0, sizeof(H5B2_node_ptr_t) * (hdr->node_info[depth
done:
if(ret_value < 0) {
if(internal)
- if(H5B2_internal_free(internal) < 0)
+ if(H5B2__internal_free(internal) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree internal node")
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_create_internal() */
+} /* H5B2__create_internal() */
/*-------------------------------------------------------------------------
- * Function: H5B2_protect_internal
+ * Function: H5B2__protect_internal
*
* Purpose: "Protect" an internal node in the metadata cache
*
@@ -1952,13 +1952,13 @@ done:
*-------------------------------------------------------------------------
*/
H5B2_internal_t *
-H5B2_protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
- unsigned nrec, unsigned depth, H5AC_protect_t rw)
+H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
+ uint16_t nrec, uint16_t depth, H5AC_protect_t rw)
{
H5B2_internal_cache_ud_t udata; /* User data to pass through to cache 'deserialize' callback */
H5B2_internal_t *ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -1968,8 +1968,8 @@ H5B2_protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
/* Set up user data for callback */
udata.f = hdr->f;
udata.hdr = hdr;
- H5_ASSIGN_OVERFLOW(/* To: */ udata.nrec, /* From: */ nrec, /* From: */ unsigned, /* To: */ uint16_t)
- H5_ASSIGN_OVERFLOW(/* To: */ udata.depth, /* From: */ depth, /* From: */ unsigned, /* To: */ uint16_t)
+ udata.nrec = nrec;
+ udata.depth = depth;
/* Protect the internal node */
if(NULL == (ret_value = (H5B2_internal_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_INT, addr, &udata, rw)))
@@ -1977,11 +1977,11 @@ H5B2_protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_protect_internal() */
+} /* H5B2__protect_internal() */
/*-------------------------------------------------------------------------
- * Function: H5B2_iterate_node
+ * Function: H5B2__iterate_node
*
* Purpose: Iterate over all the records from a B-tree node, in "in-order"
* order, making a callback for each record.
@@ -1998,7 +1998,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
const H5B2_node_ptr_t *curr_node, H5B2_operator_t op, void *op_data)
{
const H5AC_class_t *curr_node_class = NULL; /* Pointer to current node's class info */
@@ -2009,7 +2009,7 @@ H5B2_iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
unsigned u; /* Local index */
herr_t ret_value = H5_ITER_CONT; /* Iterator return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -2021,7 +2021,7 @@ H5B2_iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
H5B2_internal_t *internal; /* Pointer to internal node */
/* Lock the current B-tree node */
- if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_READ)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Set up information about current node */
@@ -2040,7 +2040,7 @@ H5B2_iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
H5B2_leaf_t *leaf; /* Pointer to leaf node */
/* Lock the current B-tree node */
- if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC_READ)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Set up information about current node */
@@ -2065,7 +2065,7 @@ H5B2_iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
for(u = 0; u < curr_node->node_nrec && !ret_value; u++) {
/* Descend into child node, if current node is an internal node */
if(depth > 0) {
- if((ret_value = H5B2_iterate_node(hdr, dxpl_id, (depth - 1), &(node_ptrs[u]), op, op_data)) < 0)
+ if((ret_value = H5B2__iterate_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(node_ptrs[u]), op, op_data)) < 0)
HERROR(H5E_BTREE, H5E_CANTLIST, "node iteration failed");
} /* end if */
@@ -2078,7 +2078,7 @@ H5B2_iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
/* Descend into last child node, if current node is an internal node */
if(!ret_value && depth > 0) {
- if((ret_value = H5B2_iterate_node(hdr, dxpl_id, (depth - 1), &(node_ptrs[u]), op, op_data)) < 0)
+ if((ret_value = H5B2__iterate_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(node_ptrs[u]), op, op_data)) < 0)
HERROR(H5E_BTREE, H5E_CANTLIST, "node iteration failed");
} /* end if */
@@ -2090,11 +2090,11 @@ done:
native = (uint8_t *)H5FL_FAC_FREE(hdr->node_info[depth].nat_rec_fac, native);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_iterate_node() */
+} /* H5B2__iterate_node() */
/*-------------------------------------------------------------------------
- * Function: H5B2_remove_leaf
+ * Function: H5B2__remove_leaf
*
* Purpose: Removes a record from a B-tree leaf node.
*
@@ -2107,7 +2107,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
+H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
H5B2_nodepos_t curr_pos, void *udata, H5B2_remove_t op, void *op_data)
{
H5B2_leaf_t *leaf; /* Pointer to leaf node */
@@ -2116,7 +2116,7 @@ H5B2_remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
unsigned idx; /* Location of record which matches key */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -2125,7 +2125,7 @@ H5B2_remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
/* Lock current B-tree node */
leaf_addr = curr_node_ptr->addr;
- if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC_WRITE)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Sanity check number of records */
@@ -2133,7 +2133,7 @@ H5B2_remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
HDassert(leaf->nrec == curr_node_ptr->node_nrec);
/* Find correct location to remove this record */
- if(H5B2_locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx) != 0)
+ if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx) != 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "record is not in B-tree")
/* Check for invalidating the min/max record for the tree */
@@ -2190,11 +2190,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_remove_leaf() */
+} /* H5B2__remove_leaf() */
/*-------------------------------------------------------------------------
- * Function: H5B2_remove_internal
+ * Function: H5B2__remove_internal
*
* Purpose: Removes a record from a B-tree node.
*
@@ -2207,8 +2207,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
- void *swap_loc, unsigned depth, H5AC_info_t *parent_cache_info,
+H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
+ void *swap_loc, uint16_t depth, H5AC_info_t *parent_cache_info,
unsigned *parent_cache_info_flags_ptr, H5B2_nodepos_t curr_pos,
H5B2_node_ptr_t *curr_node_ptr, void *udata, H5B2_remove_t op, void *op_data)
{
@@ -2223,7 +2223,7 @@ H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
hbool_t collapsed_root = FALSE; /* Whether the root was collapsed */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -2234,7 +2234,7 @@ H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
/* Lock current B-tree node */
internal_addr = curr_node_ptr->addr;
- if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Determine the correct number of records to merge at */
@@ -2246,7 +2246,7 @@ H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
((internal->node_ptrs[0].node_nrec + internal->node_ptrs[1].node_nrec) <= ((merge_nrec * 2) + 1))) {
/* Merge children of root node */
- if(H5B2_merge2(hdr, dxpl_id, depth, curr_node_ptr,
+ if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr,
parent_cache_info_flags_ptr, internal, &internal_flags, 0) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node")
@@ -2281,7 +2281,7 @@ H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
if(swap_loc)
idx = 0;
else {
- cmp = H5B2_locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
+ cmp = H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
if(cmp >= 0)
idx++;
} /* end else */
@@ -2300,27 +2300,27 @@ H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
/* (NOTE: These 2-node redistributions should actually get the
* record to promote from the node with more records. - QAK)
*/
- /* (NOTE: This code is the same in both H5B2_remove_internal() and
- * H5B2_remove_internal_by_idx(), fix bugs in both places! - QAK)
+ /* (NOTE: This code is the same in both H5B2__remove_internal() and
+ * H5B2__remove_internal_by_idx(), fix bugs in both places! - QAK)
*/
if(idx == 0) { /* Left-most child */
if(retries > 0 && (internal->node_ptrs[idx + 1].node_nrec > merge_nrec)) {
- if(H5B2_redistribute2(hdr, dxpl_id, depth, internal, idx) < 0)
+ if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, idx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records")
} /* end if */
else {
- if(H5B2_merge2(hdr, dxpl_id, depth, curr_node_ptr,
+ if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr,
parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node")
} /* end else */
} /* end if */
else if(idx == internal->nrec) { /* Right-most child */
if(retries > 0 && (internal->node_ptrs[idx - 1].node_nrec > merge_nrec)) {
- if(H5B2_redistribute2(hdr, dxpl_id, depth, internal, (idx - 1)) < 0)
+ if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, (idx - 1)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records")
} /* end if */
else {
- if(H5B2_merge2(hdr, dxpl_id, depth, curr_node_ptr,
+ if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr,
parent_cache_info_flags_ptr, internal, &internal_flags, (idx - 1)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node")
} /* end else */
@@ -2328,11 +2328,11 @@ H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
else { /* Middle child */
if(retries > 0 && ((internal->node_ptrs[idx + 1].node_nrec > merge_nrec) ||
(internal->node_ptrs[idx - 1].node_nrec > merge_nrec))) {
- if(H5B2_redistribute3(hdr, dxpl_id, depth, internal, &internal_flags, idx) < 0)
+ if(H5B2__redistribute3(hdr, dxpl_id, depth, internal, &internal_flags, idx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records")
} /* end if */
else {
- if(H5B2_merge3(hdr, dxpl_id, depth, curr_node_ptr,
+ if(H5B2__merge3(hdr, dxpl_id, depth, curr_node_ptr,
parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node")
} /* end else */
@@ -2343,7 +2343,7 @@ H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
idx = 0;
else {
/* Actually, this can be easily updated (for 2-node redistrib.) and shouldn't require re-searching */
- cmp = H5B2_locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
+ cmp = H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
if(cmp >= 0)
idx++;
} /* end else */
@@ -2358,7 +2358,7 @@ H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
/* Swap record to delete with record from leaf, if we are the last internal node */
if(swap_loc && depth == 1)
- if(H5B2_swap_leaf(hdr, dxpl_id, depth, internal, &internal_flags, idx, swap_loc) < 0)
+ if(H5B2__swap_leaf(hdr, dxpl_id, depth, internal, &internal_flags, idx, swap_loc) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSWAP, FAIL, "Can't swap records in B-tree")
/* Set pointers for advancing to child node */
@@ -2381,12 +2381,12 @@ H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
/* Attempt to remove record from child node */
if(depth > 1) {
- if(H5B2_remove_internal(hdr, dxpl_id, depth_decreased, swap_loc, depth - 1,
+ if(H5B2__remove_internal(hdr, dxpl_id, depth_decreased, swap_loc, (uint16_t)(depth - 1),
new_cache_info, new_cache_info_flags_ptr, next_pos, new_node_ptr, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node")
} /* end if */
else {
- if(H5B2_remove_leaf(hdr, dxpl_id, new_node_ptr, next_pos, udata, op, op_data) < 0)
+ if(H5B2__remove_leaf(hdr, dxpl_id, new_node_ptr, next_pos, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node")
} /* end else */
@@ -2398,7 +2398,7 @@ H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
internal_flags |= H5AC__DIRTIED_FLAG;
#ifdef H5B2_DEBUG
- H5B2_assert_internal((!collapsed_root ? (curr_node_ptr->all_nrec - 1) : new_node_ptr->all_nrec), hdr, internal);
+ H5B2__assert_internal((!collapsed_root ? (curr_node_ptr->all_nrec - 1) : new_node_ptr->all_nrec), hdr, internal);
#endif /* H5B2_DEBUG */
done:
@@ -2407,11 +2407,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_remove_internal() */
+} /* H5B2__remove_internal() */
/*-------------------------------------------------------------------------
- * Function: H5B2_remove_leaf_by_idx
+ * Function: H5B2__remove_leaf_by_idx
*
* Purpose: Removes a record from a B-tree leaf node, according to the
* offset in the B-tree records.
@@ -2425,7 +2425,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
+H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos,
unsigned idx, H5B2_remove_t op, void *op_data)
{
@@ -2434,7 +2434,7 @@ H5B2_remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
unsigned leaf_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting leaf node */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -2443,7 +2443,7 @@ H5B2_remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Lock B-tree leaf node */
leaf_addr = curr_node_ptr->addr;
- if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC_WRITE)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Sanity check number of records */
@@ -2505,11 +2505,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_remove_leaf_by_idx() */
+} /* H5B2__remove_leaf_by_idx() */
/*-------------------------------------------------------------------------
- * Function: H5B2_remove_internal_by_idx
+ * Function: H5B2__remove_internal_by_idx
*
* Purpose: Removes a record from a B-tree node, according to the offset
* in the B-tree records
@@ -2523,8 +2523,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
- hbool_t *depth_decreased, void *swap_loc, unsigned depth,
+H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
+ hbool_t *depth_decreased, void *swap_loc, uint16_t depth,
H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, hsize_t n,
H5B2_remove_t op, void *op_data)
@@ -2540,7 +2540,7 @@ H5B2_remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
hbool_t collapsed_root = FALSE; /* Whether the root was collapsed */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -2551,7 +2551,7 @@ H5B2_remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Lock current B-tree node */
internal_addr = curr_node_ptr->addr;
- if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
HDassert(internal->nrec == curr_node_ptr->node_nrec);
HDassert(depth == hdr->depth || internal->nrec > 1);
@@ -2566,7 +2566,7 @@ H5B2_remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
HDassert(depth == hdr->depth);
/* Merge children of root node */
- if(H5B2_merge2(hdr, dxpl_id, depth, curr_node_ptr,
+ if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr,
parent_cache_info_flags_ptr, internal, &internal_flags, 0) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node")
@@ -2643,27 +2643,27 @@ H5B2_remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* (NOTE: These 2-node redistributions should actually get the
* record to promote from the node with more records. - QAK)
*/
- /* (NOTE: This code is the same in both H5B2_remove_internal() and
- * H5B2_remove_internal_by_idx(), fix bugs in both places! - QAK)
+ /* (NOTE: This code is the same in both H5B2__remove_internal() and
+ * H5B2__remove_internal_by_idx(), fix bugs in both places! - QAK)
*/
if(idx == 0) { /* Left-most child */
if(retries > 0 && (internal->node_ptrs[idx + 1].node_nrec > merge_nrec)) {
- if(H5B2_redistribute2(hdr, dxpl_id, depth, internal, idx) < 0)
+ if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, idx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records")
} /* end if */
else {
- if(H5B2_merge2(hdr, dxpl_id, depth, curr_node_ptr,
+ if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr,
parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node")
} /* end else */
} /* end if */
else if(idx == internal->nrec) { /* Right-most child */
if(retries > 0 && (internal->node_ptrs[idx - 1].node_nrec > merge_nrec)) {
- if(H5B2_redistribute2(hdr, dxpl_id, depth, internal, (idx - 1)) < 0)
+ if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, (idx - 1)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records")
} /* end if */
else {
- if(H5B2_merge2(hdr, dxpl_id, depth, curr_node_ptr,
+ if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr,
parent_cache_info_flags_ptr, internal, &internal_flags, (idx - 1)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node")
} /* end else */
@@ -2671,11 +2671,11 @@ H5B2_remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
else { /* Middle child */
if(retries > 0 && ((internal->node_ptrs[idx + 1].node_nrec > merge_nrec) ||
(internal->node_ptrs[idx - 1].node_nrec > merge_nrec))) {
- if(H5B2_redistribute3(hdr, dxpl_id, depth, internal, &internal_flags, idx) < 0)
+ if(H5B2__redistribute3(hdr, dxpl_id, depth, internal, &internal_flags, idx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records")
} /* end if */
else {
- if(H5B2_merge3(hdr, dxpl_id, depth, curr_node_ptr,
+ if(H5B2__merge3(hdr, dxpl_id, depth, curr_node_ptr,
parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node")
} /* end else */
@@ -2730,7 +2730,7 @@ H5B2_remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Swap record to delete with record from leaf, if we are the last internal node */
if(swap_loc && depth == 1)
- if(H5B2_swap_leaf(hdr, dxpl_id, depth, internal, &internal_flags, idx, swap_loc) < 0)
+ if(H5B2__swap_leaf(hdr, dxpl_id, depth, internal, &internal_flags, idx, swap_loc) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSWAP, FAIL, "can't swap records in B-tree")
/* Set pointers for advancing to child node */
@@ -2753,12 +2753,12 @@ H5B2_remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Attempt to remove record from child node */
if(depth > 1) {
- if(H5B2_remove_internal_by_idx(hdr, dxpl_id, depth_decreased, swap_loc, depth - 1,
+ if(H5B2__remove_internal_by_idx(hdr, dxpl_id, depth_decreased, swap_loc, (uint16_t)(depth - 1),
new_cache_info, new_cache_info_flags_ptr, new_node_ptr, next_pos, n, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node")
} /* end if */
else {
- if(H5B2_remove_leaf_by_idx(hdr, dxpl_id, new_node_ptr, next_pos, (unsigned)n, op, op_data) < 0)
+ if(H5B2__remove_leaf_by_idx(hdr, dxpl_id, new_node_ptr, next_pos, (unsigned)n, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node")
} /* end else */
@@ -2770,7 +2770,7 @@ H5B2_remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
internal_flags |= H5AC__DIRTIED_FLAG;
#ifdef H5B2_DEBUG
- H5B2_assert_internal((!collapsed_root ? (curr_node_ptr->all_nrec - 1) : new_node_ptr->all_nrec), hdr, internal);
+ H5B2__assert_internal((!collapsed_root ? (curr_node_ptr->all_nrec - 1) : new_node_ptr->all_nrec), hdr, internal);
#endif /* H5B2_DEBUG */
done:
@@ -2779,11 +2779,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_remove_internal_by_idx() */
+} /* H5B2__remove_internal_by_idx() */
/*-------------------------------------------------------------------------
- * Function: H5B2_neighbor_leaf
+ * Function: H5B2__neighbor_leaf
*
* Purpose: Locate a record relative to the specified information in a
* B-tree leaf node and return that information by filling in
@@ -2809,7 +2809,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
+H5B2__neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
void *neighbor_loc, H5B2_compare_t comp, void *udata, H5B2_found_t op,
void *op_data)
{
@@ -2818,7 +2818,7 @@ H5B2_neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_pt
int cmp = 0; /* Comparison value of records */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -2827,11 +2827,11 @@ H5B2_neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_pt
HDassert(op);
/* Lock current B-tree node */
- if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC_READ)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Locate node pointer for child */
- cmp = H5B2_locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx);
+ cmp = H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx);
if(cmp > 0)
idx++;
else
@@ -2865,11 +2865,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_neighbor_leaf() */
+} /* H5B2__neighbor_leaf() */
/*-------------------------------------------------------------------------
- * Function: H5B2_neighbor_internal
+ * Function: H5B2__neighbor_internal
*
* Purpose: Locate a record relative to the specified information in a
* B-tree internal node and return that information by filling in
@@ -2895,7 +2895,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+H5B2__neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_node_ptr_t *curr_node_ptr, void *neighbor_loc, H5B2_compare_t comp,
void *udata, H5B2_found_t op, void *op_data)
{
@@ -2904,7 +2904,7 @@ H5B2_neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
int cmp = 0; /* Comparison value of records */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -2914,11 +2914,11 @@ H5B2_neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
HDassert(op);
/* Lock current B-tree node */
- if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC_READ)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Locate node pointer for child */
- cmp = H5B2_locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
+ cmp = H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
if(cmp > 0)
idx++;
@@ -2936,11 +2936,11 @@ H5B2_neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
/* Attempt to find neighboring record */
if(depth > 1) {
- if(H5B2_neighbor_internal(hdr, dxpl_id, depth - 1, &internal->node_ptrs[idx], neighbor_loc, comp, udata, op, op_data) < 0)
+ if(H5B2__neighbor_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &internal->node_ptrs[idx], neighbor_loc, comp, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree internal node")
} /* end if */
else {
- if(H5B2_neighbor_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], neighbor_loc, comp, udata, op, op_data) < 0)
+ if(H5B2__neighbor_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], neighbor_loc, comp, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree leaf node")
} /* end else */
@@ -2950,11 +2950,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_neighbor_internal() */
+} /* H5B2__neighbor_internal() */
/*-------------------------------------------------------------------------
- * Function: H5B2_delete_node
+ * Function: H5B2__delete_node
*
* Purpose: Iterate over all the nodes in a B-tree node deleting them
* after they no longer have any children
@@ -2968,7 +2968,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
const H5B2_node_ptr_t *curr_node, H5B2_remove_t op, void *op_data)
{
const H5AC_class_t *curr_node_class = NULL; /* Pointer to current node's class info */
@@ -2976,7 +2976,7 @@ H5B2_delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
uint8_t *native; /* Pointers to node's native records */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -2987,7 +2987,7 @@ H5B2_delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
unsigned u; /* Local index */
/* Lock the current B-tree node */
- if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_WRITE)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Set up information about current node */
@@ -2997,14 +2997,14 @@ H5B2_delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
/* Descend into children */
for(u = 0; u < internal->nrec + (unsigned)1; u++)
- if(H5B2_delete_node(hdr, dxpl_id, (depth - 1), &(internal->node_ptrs[u]), op, op_data) < 0)
+ if(H5B2__delete_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[u]), op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "node descent failed")
} /* end if */
else {
H5B2_leaf_t *leaf; /* Pointer to leaf node */
/* Lock the current B-tree node */
- if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC_WRITE)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC_WRITE)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Set up information about current node */
@@ -3031,11 +3031,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_delete_node() */
+} /* H5B2__delete_node() */
/*-------------------------------------------------------------------------
- * Function: H5B2_node_size
+ * Function: H5B2__node_size
*
* Purpose: Iterate over all the records from a B-tree node, collecting
* btree storage info.
@@ -3048,13 +3048,13 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+H5B2__node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
const H5B2_node_ptr_t *curr_node, hsize_t *btree_size)
{
H5B2_internal_t *internal = NULL; /* Pointer to internal node */
herr_t ret_value = SUCCEED; /* Iterator return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(hdr);
@@ -3063,7 +3063,7 @@ H5B2_node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
HDassert(depth > 0);
/* Lock the current B-tree node */
- if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_READ)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Recursively descend into child nodes, if we are above the "twig" level in the B-tree */
@@ -3072,7 +3072,7 @@ H5B2_node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
/* Descend into children */
for(u = 0; u < internal->nrec + (unsigned)1; u++)
- if(H5B2_node_size(hdr, dxpl_id, (depth - 1), &(internal->node_ptrs[u]), btree_size) < 0)
+ if(H5B2__node_size(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[u]), btree_size) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "node iteration failed")
} /* end if */
else /* depth is 1: count all the leaf nodes from this node */
@@ -3086,11 +3086,11 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_node_size() */
+} /* H5B2__node_size() */
/*-------------------------------------------------------------------------
- * Function: H5B2_internal_free
+ * Function: H5B2__internal_free
*
* Purpose: Destroys a B-tree internal node in memory.
*
@@ -3103,11 +3103,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_internal_free(H5B2_internal_t *internal)
+H5B2__internal_free(H5B2_internal_t *internal)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/*
* Check arguments.
@@ -3123,7 +3123,7 @@ H5B2_internal_free(H5B2_internal_t *internal)
internal->node_ptrs = (H5B2_node_ptr_t *)H5FL_FAC_FREE(internal->hdr->node_info[internal->depth].node_ptr_fac, internal->node_ptrs);
/* Decrement ref. count on B-tree header */
- if(H5B2_hdr_decr(internal->hdr) < 0)
+ if(H5B2__hdr_decr(internal->hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDEC, FAIL, "can't decrement ref. count on B-tree header")
/* Free B-tree internal node info */
@@ -3131,11 +3131,11 @@ H5B2_internal_free(H5B2_internal_t *internal)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_internal_free() */
+} /* end H5B2__internal_free() */
/*-------------------------------------------------------------------------
- * Function: H5B2_leaf_free
+ * Function: H5B2__leaf_free
*
* Purpose: Destroys a B-tree leaf node in memory.
*
@@ -3148,11 +3148,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_leaf_free(H5B2_leaf_t *leaf)
+H5B2__leaf_free(H5B2_leaf_t *leaf)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/*
* Check arguments.
@@ -3164,7 +3164,7 @@ H5B2_leaf_free(H5B2_leaf_t *leaf)
leaf->leaf_native = (uint8_t *)H5FL_FAC_FREE(leaf->hdr->node_info[0].nat_rec_fac, leaf->leaf_native);
/* Decrement ref. count on B-tree header */
- if(H5B2_hdr_decr(leaf->hdr) < 0)
+ if(H5B2__hdr_decr(leaf->hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDEC, FAIL, "can't decrement ref. count on B-tree header")
/* Free B-tree leaf node info */
@@ -3172,12 +3172,12 @@ H5B2_leaf_free(H5B2_leaf_t *leaf)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B2_leaf_free() */
+} /* end H5B2__leaf_free() */
#ifdef H5B2_DEBUG
/*-------------------------------------------------------------------------
- * Function: H5B2_assert_leaf
+ * Function: H5B2__assert_leaf
*
* Purpose: Verify than a leaf node is mostly sane
*
@@ -3190,17 +3190,17 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_assert_leaf(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf)
+H5B2__assert_leaf(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf)
{
/* General sanity checking on node */
HDassert(leaf->nrec <= hdr->node_info->split_nrec);
return(0);
-} /* end H5B2_assert_leaf() */
+} /* end H5B2__assert_leaf() */
/*-------------------------------------------------------------------------
- * Function: H5B2_assert_leaf2
+ * Function: H5B2__assert_leaf2
*
* Purpose: Verify than a leaf node is mostly sane
*
@@ -3213,17 +3213,17 @@ H5B2_assert_leaf(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf)
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_assert_leaf2(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf, const H5B2_leaf_t UNUSED *leaf2)
+H5B2__assert_leaf2(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf, const H5B2_leaf_t UNUSED *leaf2)
{
/* General sanity checking on node */
HDassert(leaf->nrec <= hdr->node_info->split_nrec);
return(0);
-} /* end H5B2_assert_leaf2() */
+} /* end H5B2__assert_leaf2() */
/*-------------------------------------------------------------------------
- * Function: H5B2_assert_internal
+ * Function: H5B2__assert_internal
*
* Purpose: Verify than an internal node is mostly sane
*
@@ -3236,7 +3236,7 @@ H5B2_assert_leaf2(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf, const H5B2_lea
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_assert_internal(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal)
+H5B2__assert_internal(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal)
{
hsize_t tot_all_nrec; /* Total number of records at or below this node */
uint16_t u, v; /* Local index variables */
@@ -3260,11 +3260,11 @@ H5B2_assert_internal(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_
HDassert(tot_all_nrec == parent_all_nrec);
return(0);
-} /* end H5B2_assert_internal() */
+} /* end H5B2__assert_internal() */
/*-------------------------------------------------------------------------
- * Function: H5B2_assert_internal2
+ * Function: H5B2__assert_internal2
*
* Purpose: Verify than internal nodes are mostly sane
*
@@ -3277,7 +3277,7 @@ H5B2_assert_internal(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_assert_internal2(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal, const H5B2_internal_t *internal2)
+H5B2__assert_internal2(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal, const H5B2_internal_t *internal2)
{
hsize_t tot_all_nrec; /* Total number of records at or below this node */
uint16_t u, v; /* Local index variables */
@@ -3303,6 +3303,6 @@ H5B2_assert_internal2(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2
HDassert(tot_all_nrec == parent_all_nrec);
return(0);
-} /* end H5B2_assert_internal2() */
+} /* end H5B2__assert_internal2() */
#endif /* H5B2_DEBUG */
diff --git a/src/H5B2pkg.h b/src/H5B2pkg.h
index 7a538bd..72476eb 100644
--- a/src/H5B2pkg.h
+++ b/src/H5B2pkg.h
@@ -228,15 +228,15 @@ typedef struct H5B2_hdr_cache_ud_t {
typedef struct H5B2_internal_cache_ud_t {
H5F_t *f; /* File that v2 b-tree header is within */
H5B2_hdr_t *hdr; /* v2 B-tree header */
- unsigned nrec; /* Number of records in node to load */
- unsigned depth; /* Depth of node to load */
+ uint16_t nrec; /* Number of records in node to load */
+ uint16_t depth; /* Depth of node to load */
} H5B2_internal_cache_ud_t;
/* Callback info for loading a free space leaf node into the cache */
typedef struct H5B2_leaf_cache_ud_t {
H5F_t *f; /* File that v2 b-tree header is within */
H5B2_hdr_t *hdr; /* v2 B-tree header */
- unsigned nrec; /* Number of records in node to load */
+ uint16_t nrec; /* Number of records in node to load */
} H5B2_leaf_cache_ud_t;
#ifdef H5B2_TESTING
@@ -281,88 +281,88 @@ extern const H5B2_class_t *const H5B2_client_class_g[H5B2_NUM_BTREE_ID];
/******************************/
/* Routines for managing B-tree header info */
-H5_DLL H5B2_hdr_t *H5B2_hdr_alloc(H5F_t *f);
-H5_DLL haddr_t H5B2_hdr_create(H5F_t *f, hid_t dxpl_id,
+H5_DLL H5B2_hdr_t *H5B2__hdr_alloc(H5F_t *f);
+H5_DLL haddr_t H5B2__hdr_create(H5F_t *f, hid_t dxpl_id,
const H5B2_create_t *cparam, void *ctx_udata);
-H5_DLL herr_t H5B2_hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam,
+H5_DLL herr_t H5B2__hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam,
void *ctx_udata, uint16_t depth);
-H5_DLL herr_t H5B2_hdr_incr(H5B2_hdr_t *hdr);
-H5_DLL herr_t H5B2_hdr_decr(H5B2_hdr_t *hdr);
-H5_DLL herr_t H5B2_hdr_fuse_incr(H5B2_hdr_t *hdr);
-H5_DLL size_t H5B2_hdr_fuse_decr(H5B2_hdr_t *hdr);
-H5_DLL herr_t H5B2_hdr_dirty(H5B2_hdr_t *hdr);
-H5_DLL herr_t H5B2_hdr_delete(H5B2_hdr_t *hdr, hid_t dxpl_id);
+H5_DLL herr_t H5B2__hdr_incr(H5B2_hdr_t *hdr);
+H5_DLL herr_t H5B2__hdr_decr(H5B2_hdr_t *hdr);
+H5_DLL herr_t H5B2__hdr_fuse_incr(H5B2_hdr_t *hdr);
+H5_DLL size_t H5B2__hdr_fuse_decr(H5B2_hdr_t *hdr);
+H5_DLL herr_t H5B2__hdr_dirty(H5B2_hdr_t *hdr);
+H5_DLL herr_t H5B2__hdr_delete(H5B2_hdr_t *hdr, hid_t dxpl_id);
/* Routines for operating on leaf nodes */
-H5B2_leaf_t *H5B2_protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
- unsigned nrec, H5AC_protect_t rw);
+H5B2_leaf_t *H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
+ uint16_t nrec, H5AC_protect_t rw);
/* Routines for operating on internal nodes */
-H5_DLL H5B2_internal_t *H5B2_protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
- haddr_t addr, unsigned nrec, unsigned depth, H5AC_protect_t rw);
+H5_DLL H5B2_internal_t *H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
+ haddr_t addr, uint16_t nrec, uint16_t depth, H5AC_protect_t rw);
/* Routines for allocating nodes */
-H5_DLL herr_t H5B2_split_root(H5B2_hdr_t *hdr, hid_t dxpl_id);
-H5_DLL herr_t H5B2_create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
+H5_DLL herr_t H5B2__split_root(H5B2_hdr_t *hdr, hid_t dxpl_id);
+H5_DLL herr_t H5B2__create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
H5B2_node_ptr_t *node_ptr);
/* Routines for releasing structures */
-H5_DLL herr_t H5B2_hdr_free(H5B2_hdr_t *hdr);
-H5_DLL herr_t H5B2_leaf_free(H5B2_leaf_t *l);
-H5_DLL herr_t H5B2_internal_free(H5B2_internal_t *i);
+H5_DLL herr_t H5B2__hdr_free(H5B2_hdr_t *hdr);
+H5_DLL herr_t H5B2__leaf_free(H5B2_leaf_t *l);
+H5_DLL herr_t H5B2__internal_free(H5B2_internal_t *i);
/* Routines for inserting records */
-H5_DLL herr_t H5B2_insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
- unsigned depth, unsigned *parent_cache_info_flags_ptr,
+H5_DLL herr_t H5B2__insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
+ uint16_t depth, unsigned *parent_cache_info_flags_ptr,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *udata);
-H5_DLL herr_t H5B2_insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
+H5_DLL herr_t H5B2__insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *udata);
/* Routines for iterating over nodes/records */
-H5_DLL herr_t H5B2_iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+H5_DLL herr_t H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
const H5B2_node_ptr_t *curr_node, H5B2_operator_t op, void *op_data);
-H5_DLL herr_t H5B2_node_size(H5B2_hdr_t *hdr, hid_t dxpl_id,
- unsigned depth, const H5B2_node_ptr_t *curr_node, hsize_t *op_data);
+H5_DLL herr_t H5B2__node_size(H5B2_hdr_t *hdr, hid_t dxpl_id,
+ uint16_t depth, const H5B2_node_ptr_t *curr_node, hsize_t *op_data);
/* Routines for locating records */
-H5_DLL int H5B2_locate_record(const H5B2_class_t *type, unsigned nrec,
+H5_DLL int H5B2__locate_record(const H5B2_class_t *type, unsigned nrec,
size_t *rec_off, const uint8_t *native, const void *udata, unsigned *idx);
-H5_DLL herr_t H5B2_neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
- unsigned depth, H5B2_node_ptr_t *curr_node_ptr, void *neighbor_loc,
+H5_DLL herr_t H5B2__neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
+ uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, void *neighbor_loc,
H5B2_compare_t comp, void *udata, H5B2_found_t op, void *op_data);
-H5_DLL herr_t H5B2_neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
+H5_DLL herr_t H5B2__neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
H5B2_node_ptr_t *curr_node_ptr, void *neighbor_loc,
H5B2_compare_t comp, void *udata, H5B2_found_t op, void *op_data);
/* Routines for removing records */
-H5_DLL herr_t H5B2_remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
- hbool_t *depth_decreased, void *swap_loc, unsigned depth,
- H5AC_info_t *parent_cache_info, hbool_t * parent_cache_info_dirtied_ptr,
+H5_DLL herr_t H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
+ hbool_t *depth_decreased, void *swap_loc, uint16_t depth,
+ H5AC_info_t *parent_cache_info, hbool_t *parent_cache_info_dirtied_ptr,
H5B2_nodepos_t curr_pos, H5B2_node_ptr_t *curr_node_ptr, void *udata,
H5B2_remove_t op, void *op_data);
-H5_DLL herr_t H5B2_remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
+H5_DLL herr_t H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos,
void *udata, H5B2_remove_t op, void *op_data);
-H5_DLL herr_t H5B2_remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
- hbool_t *depth_decreased, void *swap_loc, unsigned depth,
- H5AC_info_t *parent_cache_info, hbool_t * parent_cache_info_dirtied_ptr,
+H5_DLL herr_t H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
+ hbool_t *depth_decreased, void *swap_loc, uint16_t depth,
+ H5AC_info_t *parent_cache_info, hbool_t *parent_cache_info_dirtied_ptr,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, hsize_t idx,
H5B2_remove_t op, void *op_data);
-H5_DLL herr_t H5B2_remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
+H5_DLL herr_t H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos,
unsigned idx, H5B2_remove_t op, void *op_data);
/* Routines for deleting nodes */
-H5_DLL herr_t H5B2_delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
+H5_DLL herr_t H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
const H5B2_node_ptr_t *curr_node, H5B2_remove_t op, void *op_data);
/* Debugging routines for dumping file structures */
-H5_DLL herr_t H5B2_hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+H5_DLL herr_t H5B2__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr,
FILE *stream, int indent, int fwidth, const H5B2_class_t *type, haddr_t obj_addr);
-H5_DLL herr_t H5B2_int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+H5_DLL herr_t H5B2__int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr,
FILE *stream, int indent, int fwidth, const H5B2_class_t *type,
haddr_t hdr_addr, unsigned nrec, unsigned depth, haddr_t obj_addr);
-H5_DLL herr_t H5B2_leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+H5_DLL herr_t H5B2__leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr,
FILE *stream, int indent, int fwidth, const H5B2_class_t *type,
haddr_t hdr_addr, unsigned nrec, haddr_t obj_addr);
diff --git a/src/H5B2stat.c b/src/H5B2stat.c
index 5d159ed..bdb4a1f 100644
--- a/src/H5B2stat.c
+++ b/src/H5B2stat.c
@@ -139,7 +139,7 @@ H5B2_size(H5B2_t *bt2, hid_t dxpl_id, hsize_t *btree_size)
*btree_size += hdr->node_size;
else
/* Iterate through nodes */
- if(H5B2_node_size(hdr, dxpl_id, hdr->depth, &hdr->root, btree_size) < 0)
+ if(H5B2__node_size(hdr, dxpl_id, hdr->depth, &hdr->root, btree_size) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "node iteration failed")
} /* end if */
diff --git a/src/H5B2test.c b/src/H5B2test.c
index 8507d6e..d3149a7 100644
--- a/src/H5B2test.c
+++ b/src/H5B2test.c
@@ -61,15 +61,15 @@ typedef struct H5B2_test_ctx_t {
/* Local Prototypes */
/********************/
-static void *H5B2_test_crt_context(void *udata);
-static herr_t H5B2_test_dst_context(void *ctx);
-static herr_t H5B2_test_store(void *nrecord, const void *udata);
-static herr_t H5B2_test_compare(const void *rec1, const void *rec2);
-static herr_t H5B2_test_encode(uint8_t *raw, const void *nrecord, void *ctx);
-static herr_t H5B2_test_decode(const uint8_t *raw, void *nrecord, void *ctx);
-static herr_t H5B2_test_debug(FILE *stream, const H5F_t *f, hid_t dxpl_id,
+static void *H5B2__test_crt_context(void *udata);
+static herr_t H5B2__test_dst_context(void *ctx);
+static herr_t H5B2__test_store(void *nrecord, const void *udata);
+static herr_t H5B2__test_compare(const void *rec1, const void *rec2);
+static herr_t H5B2__test_encode(uint8_t *raw, const void *nrecord, void *ctx);
+static herr_t H5B2__test_decode(const uint8_t *raw, void *nrecord, void *ctx);
+static herr_t H5B2__test_debug(FILE *stream, const H5F_t *f, hid_t dxpl_id,
int indent, int fwidth, const void *record, const void *_udata);
-static void *H5B2_test_crt_dbg_context(H5F_t *f, hid_t dxpl_id, haddr_t addr);
+static void *H5B2__test_crt_dbg_context(H5F_t *f, hid_t dxpl_id, haddr_t addr);
/*********************/
@@ -80,15 +80,15 @@ const H5B2_class_t H5B2_TEST[1]={{ /* B-tree class information */
H5B2_TEST_ID, /* Type of B-tree */
"H5B2_TEST_ID", /* Name of B-tree class */
sizeof(hsize_t), /* Size of native record */
- H5B2_test_crt_context, /* Create client callback context */
- H5B2_test_dst_context, /* Destroy client callback context */
- H5B2_test_store, /* Record storage callback */
- H5B2_test_compare, /* Record comparison callback */
- H5B2_test_encode, /* Record encoding callback */
- H5B2_test_decode, /* Record decoding callback */
- H5B2_test_debug, /* Record debugging callback */
- H5B2_test_crt_dbg_context, /* Create debugging context */
- H5B2_test_dst_context /* Destroy debugging context */
+ H5B2__test_crt_context, /* Create client callback context */
+ H5B2__test_dst_context, /* Destroy client callback context */
+ H5B2__test_store, /* Record storage callback */
+ H5B2__test_compare, /* Record comparison callback */
+ H5B2__test_encode, /* Record encoding callback */
+ H5B2__test_decode, /* Record decoding callback */
+ H5B2__test_debug, /* Record debugging callback */
+ H5B2__test_crt_dbg_context, /* Create debugging context */
+ H5B2__test_dst_context /* Destroy debugging context */
}};
@@ -107,7 +107,7 @@ H5FL_DEFINE_STATIC(H5B2_test_ctx_t);
/*-------------------------------------------------------------------------
- * Function: H5B2_test_crt_context
+ * Function: H5B2__test_crt_context
*
* Purpose: Create client callback context
*
@@ -120,13 +120,13 @@ H5FL_DEFINE_STATIC(H5B2_test_ctx_t);
*-------------------------------------------------------------------------
*/
static void *
-H5B2_test_crt_context(void *_f)
+H5B2__test_crt_context(void *_f)
{
H5F_t *f = (H5F_t *)_f; /* User data for building callback context */
H5B2_test_ctx_t *ctx; /* Callback context structure */
void *ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Sanity check */
HDassert(f);
@@ -143,11 +143,11 @@ H5B2_test_crt_context(void *_f)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_test_crt_context() */
+} /* H5B2__test_crt_context() */
/*-------------------------------------------------------------------------
- * Function: H5B2_test_dst_context
+ * Function: H5B2__test_dst_context
*
* Purpose: Destroy client callback context
*
@@ -160,11 +160,11 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_test_dst_context(void *_ctx)
+H5B2__test_dst_context(void *_ctx)
{
H5B2_test_ctx_t *ctx = (H5B2_test_ctx_t *)_ctx; /* Callback context structure */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Sanity check */
HDassert(ctx);
@@ -173,11 +173,11 @@ H5B2_test_dst_context(void *_ctx)
ctx = H5FL_FREE(H5B2_test_ctx_t, ctx);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5B2_test_dst_context() */
+} /* H5B2__test_dst_context() */
/*-------------------------------------------------------------------------
- * Function: H5B2_test_store
+ * Function: H5B2__test_store
*
* Purpose: Store native information into record for B-tree
*
@@ -190,18 +190,18 @@ H5B2_test_dst_context(void *_ctx)
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_test_store(void *nrecord, const void *udata)
+H5B2__test_store(void *nrecord, const void *udata)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
*(hsize_t *)nrecord = *(const hsize_t *)udata;
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5B2_test_store() */
+} /* H5B2__test_store() */
/*-------------------------------------------------------------------------
- * Function: H5B2_test_compare
+ * Function: H5B2__test_compare
*
* Purpose: Compare two native information records, according to some key
*
@@ -215,16 +215,16 @@ H5B2_test_store(void *nrecord, const void *udata)
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_test_compare(const void *rec1, const void *rec2)
+H5B2__test_compare(const void *rec1, const void *rec2)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
FUNC_LEAVE_NOAPI((herr_t)(*(const hssize_t *)rec1 - *(const hssize_t *)rec2))
-} /* H5B2_test_compare() */
+} /* H5B2__test_compare() */
/*-------------------------------------------------------------------------
- * Function: H5B2_test_encode
+ * Function: H5B2__test_encode
*
* Purpose: Encode native information into raw form for storing on disk
*
@@ -237,11 +237,11 @@ H5B2_test_compare(const void *rec1, const void *rec2)
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_test_encode(uint8_t *raw, const void *nrecord, void *_ctx)
+H5B2__test_encode(uint8_t *raw, const void *nrecord, void *_ctx)
{
H5B2_test_ctx_t *ctx = (H5B2_test_ctx_t *)_ctx; /* Callback context structure */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Sanity check */
HDassert(ctx);
@@ -249,11 +249,11 @@ H5B2_test_encode(uint8_t *raw, const void *nrecord, void *_ctx)
H5F_ENCODE_LENGTH_LEN(raw, *(const hsize_t *)nrecord, ctx->sizeof_size);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5B2_test_encode() */
+} /* H5B2__test_encode() */
/*-------------------------------------------------------------------------
- * Function: H5B2_test_decode
+ * Function: H5B2__test_decode
*
* Purpose: Decode raw disk form of record into native form
*
@@ -266,11 +266,11 @@ H5B2_test_encode(uint8_t *raw, const void *nrecord, void *_ctx)
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_test_decode(const uint8_t *raw, void *nrecord, void *_ctx)
+H5B2__test_decode(const uint8_t *raw, void *nrecord, void *_ctx)
{
H5B2_test_ctx_t *ctx = (H5B2_test_ctx_t *)_ctx; /* Callback context structure */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Sanity check */
HDassert(ctx);
@@ -278,11 +278,11 @@ H5B2_test_decode(const uint8_t *raw, void *nrecord, void *_ctx)
H5F_DECODE_LENGTH_LEN(raw, *(hsize_t *)nrecord, ctx->sizeof_size);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5B2_test_decode() */
+} /* H5B2__test_decode() */
/*-------------------------------------------------------------------------
- * Function: H5B2_test_debug
+ * Function: H5B2__test_debug
*
* Purpose: Debug native form of record
*
@@ -295,11 +295,11 @@ H5B2_test_decode(const uint8_t *raw, void *nrecord, void *_ctx)
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2_test_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
+H5B2__test_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
int indent, int fwidth, const void *record,
const void UNUSED *_udata)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
HDassert (record);
@@ -307,11 +307,11 @@ H5B2_test_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
*(const hsize_t *)record);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5B2_test_debug() */
+} /* H5B2__test_debug() */
/*-------------------------------------------------------------------------
- * Function: H5B2_test_crt_dbg_context
+ * Function: H5B2__test_crt_dbg_context
*
* Purpose: Create context for debugging callback
*
@@ -324,12 +324,12 @@ H5B2_test_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
*-------------------------------------------------------------------------
*/
static void *
-H5B2_test_crt_dbg_context(H5F_t *f, hid_t UNUSED dxpl_id, haddr_t UNUSED addr)
+H5B2__test_crt_dbg_context(H5F_t *f, hid_t UNUSED dxpl_id, haddr_t UNUSED addr)
{
H5B2_test_ctx_t *ctx; /* Callback context structure */
void *ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Sanity check */
HDassert(f);
@@ -346,7 +346,7 @@ H5B2_test_crt_dbg_context(H5F_t *f, hid_t UNUSED dxpl_id, haddr_t UNUSED addr)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5B2_test_crt_dbg_context() */
+} /* H5B2__test_crt_dbg_context() */
/*-------------------------------------------------------------------------
@@ -397,7 +397,7 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata,
{
H5B2_hdr_t *hdr; /* Pointer to the B-tree header */
H5B2_node_ptr_t curr_node_ptr; /* Node pointer info for current node */
- unsigned depth; /* Current depth of the tree */
+ uint16_t depth; /* Current depth of the tree */
int cmp; /* Comparison value of records */
unsigned idx; /* Location of record which matches key */
herr_t ret_value = SUCCEED; /* Return value */
@@ -430,11 +430,11 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata,
H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */
/* Lock B-tree current node */
- if(NULL == (internal = H5B2_protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_READ)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
/* Locate node pointer for child */
- cmp = H5B2_locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
+ cmp = H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
if(cmp > 0)
idx++;
@@ -470,11 +470,11 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata,
H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */
/* Lock B-tree leaf node */
- if(NULL == (leaf = H5B2_protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Locate record */
- cmp = H5B2_locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx);
+ cmp = H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx);
/* Unlock current node */
if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr.addr, leaf, H5AC__NO_FLAGS_SET) < 0)
@@ -499,7 +499,7 @@ done:
*
* Purpose: Determine the depth of a node holding a record in the B-tree
*
- * Note: Just a simple wrapper around the H5B2_get_node_info_test() routine
+ * Note: Just a simple wrapper around the H5B2__get_node_info_test() routine
*
* Return: Success: non-negative depth of the node where the record
* was found
diff --git a/src/H5Bcache.c b/src/H5Bcache.c
index 2992986..07b594d 100644
--- a/src/H5Bcache.c
+++ b/src/H5Bcache.c
@@ -115,6 +115,7 @@ H5B__load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HDassert(H5F_addr_defined(addr));
HDassert(udata);
+ /* Allocate the B-tree node in memory */
if(NULL == (bt = H5FL_MALLOC(H5B_t)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "can't allocate B-tree struct")
HDmemset(&bt->cache_info, 0, sizeof(H5AC_info_t));
@@ -141,7 +142,7 @@ H5B__load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* magic number */
if(HDmemcmp(p, H5B_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, NULL, "wrong B-tree signature")
+ HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "wrong B-tree signature")
p += 4;
/* node type and level */
@@ -185,7 +186,7 @@ H5B__load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
done:
if(!ret_value && bt)
- if(H5B_node_dest(bt) < 0)
+ if(H5B__node_dest(bt) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, NULL, "unable to destroy B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
@@ -328,7 +329,7 @@ H5B__dest(H5F_t *f, H5B_t *bt)
} /* end if */
/* Destroy B-tree node */
- if(H5B_node_dest(bt) < 0)
+ if(H5B__node_dest(bt) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree node")
done:
diff --git a/src/H5Bdbg.c b/src/H5Bdbg.c
index 3c81c26..526a647 100644
--- a/src/H5Bdbg.c
+++ b/src/H5Bdbg.c
@@ -156,7 +156,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5B_assert
+ * Function: H5B__assert
*
* Purpose: Verifies that the tree is structured correctly.
*
@@ -171,7 +171,7 @@ done:
*/
#ifdef H5B_DEBUG
herr_t
-H5B_assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void *udata)
+H5B__assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void *udata)
{
H5B_t *bt = NULL;
H5UC_t *rc_shared; /* Ref-counted shared info */
@@ -189,7 +189,7 @@ H5B_assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void
struct child_t *next;
} *head = NULL, *tail = NULL, *prev = NULL, *cur = NULL, *tmp = NULL;
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
if(0 == ncalls++) {
if(H5DEBUG(B))
@@ -285,6 +285,6 @@ H5B_assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5B_assert() */
+} /* end H5B__assert() */
#endif /* H5B_DEBUG */
diff --git a/src/H5Bpkg.h b/src/H5Bpkg.h
index 598b122..374fcb5 100644
--- a/src/H5Bpkg.h
+++ b/src/H5Bpkg.h
@@ -88,9 +88,9 @@ H5FL_EXTERN(H5B_t);
/******************************/
/* Package Private Prototypes */
/******************************/
-H5_DLL herr_t H5B_node_dest(H5B_t *bt);
+H5_DLL herr_t H5B__node_dest(H5B_t *bt);
#ifdef H5B_DEBUG
-herr_t H5B_assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type,
+herr_t H5B__assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type,
void *udata);
#endif
diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h
index 9e95c15..02fb82c 100644
--- a/src/H5Bprivate.h
+++ b/src/H5Bprivate.h
@@ -100,6 +100,7 @@ typedef struct H5B_shared_t {
size_t sizeof_len; /* Size of file lengths (in bytes) */
uint8_t *page; /* Disk page */
size_t *nkey; /* Offsets of each native key in native key buffer */
+ void *udata; /* 'Local' info for a B-tree */
} H5B_shared_t;
/*
diff --git a/src/H5C.c b/src/H5C.c
index a28364a..3f11493 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -229,6 +229,8 @@ const H5C_class_t epoch_marker_class =
/* size = */ &H5C_epoch_marker_size
};
+
+
/***************************************************************************
* Class functions for H5C__EPOCH_MAKER_TYPE:
*
@@ -255,6 +257,8 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
}
+
+
static herr_t
H5C_epoch_marker_flush(H5F_t UNUSED *f,
hid_t UNUSED dxpl_id,
@@ -274,6 +278,8 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
}
+
+
static herr_t
H5C_epoch_marker_dest(H5F_t UNUSED * f,
void UNUSED * thing)
@@ -784,7 +790,7 @@ H5C_apply_candidate_list(H5F_t * f,
entries_flushed++;
#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
- HDfprintf(stdout, "%s:%d: flushing 0x%llx.\n", FUNC, mpi_rank,
+ HDfprintf(stdout, "%s:%d: flushing 0x%llx.\n", FUNC, mpi_rank,
(long long)flush_ptr->addr);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
@@ -1604,48 +1610,38 @@ H5C_expunge_entry(H5F_t * f,
FUNC_ENTER_NOAPI(FAIL)
- HDassert( f );
- HDassert( f->shared );
+ HDassert(f);
+ HDassert(f->shared);
cache_ptr = f->shared->cache;
- HDassert( cache_ptr );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( type );
- HDassert( type->clear );
- HDassert( type->dest );
- HDassert( H5F_addr_defined(addr) );
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(type);
+ HDassert(type->clear);
+ HDassert(type->dest);
+ HDassert(H5F_addr_defined(addr));
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "LRU extreme sanity check failed on entry.\n");
- }
+ if(H5C_validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU extreme sanity check failed on entry.\n");
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+ /* Look for entry in cache */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
-
- if ( ( entry_ptr == NULL ) || ( entry_ptr->type != type ) ) {
-
+ if((entry_ptr == NULL) || (entry_ptr->type != type))
/* the target doesn't exist in the cache, so we are done. */
HGOTO_DONE(SUCCEED)
- }
-
- HDassert( entry_ptr->addr == addr );
- HDassert( entry_ptr->type == type );
-
- if ( entry_ptr->is_protected ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, \
- "Target entry is protected.")
- }
- if ( entry_ptr->is_pinned ) {
+ HDassert(entry_ptr->addr == addr);
+ HDassert(entry_ptr->type == type);
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, \
- "Target entry is pinned.")
- }
+ /* Check for entry being pinned or protected */
+ if(entry_ptr->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected.")
+ if(entry_ptr->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned.")
/* Pass along 'free file space' flag to cache client */
entry_ptr->free_file_space_on_destroy = ( (flags & H5C__FREE_FILE_SPACE_FLAG) != 0 );
@@ -1670,17 +1666,13 @@ H5C_expunge_entry(H5F_t * f,
}
done:
-
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "LRU extreme sanity check failed on exit.\n");
- }
+ if(H5C_validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "LRU extreme sanity check failed on exit.\n");
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C_expunge_entry() */
@@ -1965,8 +1957,8 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
FALSE);
if ( status < 0 ) {
- /* This shouldn't happen -- if it does, we are toast
- * so just scream and die.
+ /* This shouldn't happen -- if it does,
+ * we are toast so just scream and die.
*/
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
"dirty pinned entry flush failed.")
@@ -1983,7 +1975,7 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
* aren't trying to do a destroy here, so that
* is not an issue.
*/
- if(entry_ptr->flush_dep_height == curr_flush_dep_height ){
+ if(entry_ptr->flush_dep_height == curr_flush_dep_height ) {
#if H5C_DO_SANITY_CHECKS
flushed_entries_count++;
flushed_entries_size += entry_ptr->size;
@@ -1998,8 +1990,8 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
FALSE);
if ( status < 0 ) {
- /* This shouldn't happen -- if it does, we are
- * toast so just scream and die.
+ /* This shouldn't happen -- if it does,
+ * we are toast so just scream and die.
*/
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
"Can't flush entry.")
@@ -2111,7 +2103,7 @@ H5C_flush_to_min_clean(H5F_t * f,
hid_t primary_dxpl_id,
hid_t secondary_dxpl_id)
{
- H5C_t * cache_ptr;
+ H5C_t * cache_ptr;
herr_t result;
hbool_t first_flush = TRUE;
hbool_t write_permitted;
@@ -2396,41 +2388,28 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_get_cache_hit_rate(H5C_t * cache_ptr,
- double * hit_rate_ptr)
-
+H5C_get_cache_hit_rate(H5C_t * cache_ptr, double * hit_rate_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) {
-
+ if((cache_ptr == NULL ) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
- }
-
- if ( hit_rate_ptr == NULL ) {
-
+ if(hit_rate_ptr == NULL)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad hit_rate_ptr on entry.")
- }
- HDassert( cache_ptr->cache_hits >= 0 );
- HDassert( cache_ptr->cache_accesses >= cache_ptr->cache_hits );
-
- if ( cache_ptr->cache_accesses > 0 ) {
+ HDassert(cache_ptr->cache_hits >= 0);
+ HDassert(cache_ptr->cache_accesses >= cache_ptr->cache_hits);
+ if(cache_ptr->cache_accesses > 0)
*hit_rate_ptr = ((double)(cache_ptr->cache_hits)) /
((double)(cache_ptr->cache_accesses));
-
- } else {
-
+ else
*hit_rate_ptr = 0.0f;
- }
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C_get_cache_hit_rate() */
@@ -2686,10 +2665,10 @@ H5C_insert_entry(H5F_t * f,
herr_t result;
hbool_t first_flush = TRUE;
hbool_t insert_pinned;
- hbool_t flush_last;
+ hbool_t flush_last;
#ifdef H5_HAVE_PARALLEL
- hbool_t flush_collectively;
-#endif
+ hbool_t flush_collectively;
+#endif /* H5_HAVE_PARALLEL */
hbool_t set_flush_marker;
hbool_t write_permitted = TRUE;
size_t empty_space;
@@ -2731,7 +2710,7 @@ H5C_insert_entry(H5F_t * f,
flush_last = ( (flags & H5C__FLUSH_LAST_FLAG) != 0 );
#ifdef H5_HAVE_PARALLEL
flush_collectively = ( (flags & H5C__FLUSH_COLLECTIVELY_FLAG) != 0 );
-#endif
+#endif /* H5_HAVE_PARALLEL */
entry_ptr = (H5C_cache_entry_t *)thing;
@@ -2741,19 +2720,12 @@ H5C_insert_entry(H5F_t * f,
H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
- if ( test_entry_ptr != NULL ) {
-
- if ( test_entry_ptr == entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "entry already in cache.")
-
- } else {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "duplicate entry in cache.")
- }
- }
+ if(test_entry_ptr != NULL) {
+ if(test_entry_ptr == entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache.")
+ else
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache.")
+ } /* end if */
#ifndef NDEBUG
entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
@@ -2829,28 +2801,17 @@ H5C_insert_entry(H5F_t * f,
}
}
- if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
-
+ if(cache_ptr->index_size >= cache_ptr->max_cache_size)
empty_space = 0;
-
- } else {
-
+ else
empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
- }
-
- if ( ( cache_ptr->evictions_enabled )
- &&
+ if ( ( cache_ptr->evictions_enabled ) &&
( ( (cache_ptr->index_size + entry_ptr->size) >
- cache_ptr->max_cache_size
- )
+ cache_ptr->max_cache_size)
||
- (
- ( ( empty_space + cache_ptr->clean_index_size ) <
- cache_ptr->min_clean_size )
- )
- )
- ) {
+ ( ( ( empty_space + cache_ptr->clean_index_size ) <
+ cache_ptr->min_clean_size ) ) ) ) {
size_t space_needed;
@@ -3364,11 +3325,6 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
- * JRM -- 11/5/08
- * On review this function looks like no change is needed to
- * support the new clean_index_size and dirty_index_size
- * fields of H5C_t.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -3377,11 +3333,11 @@ H5C_move_entry(H5C_t * cache_ptr,
haddr_t old_addr,
haddr_t new_addr)
{
+ H5C_cache_entry_t * entry_ptr = NULL;
+ H5C_cache_entry_t * test_entry_ptr = NULL;
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
hbool_t was_dirty;
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
- H5C_cache_entry_t * entry_ptr = NULL;
- H5C_cache_entry_t * test_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
hbool_t removed_entry_from_slist = FALSE;
#endif /* H5C_DO_SANITY_CHECKS */
@@ -3881,10 +3837,7 @@ H5C_protect(H5F_t * f,
}
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
- if ( (flags & H5C__READ_ONLY_FLAG) != 0 )
- {
- read_only = TRUE;
- }
+ read_only = ( (flags & H5C__READ_ONLY_FLAG) != 0 );
/* first check to see if the target is in cache */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL)
@@ -3895,7 +3848,7 @@ H5C_protect(H5F_t * f,
if(entry_ptr->type != type)
HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type")
- #if H5C_DO_TAGGING_SANITY_CHECKS
+#if H5C_DO_TAGGING_SANITY_CHECKS
/* The entry is already in the cache, but make sure that the tag value
being passed in via dxpl is still legal. This will ensure that had
the entry NOT been in the cache, tagging was still set up correctly
@@ -3918,8 +3871,8 @@ H5C_protect(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed");
} /* end if */
- #endif
-
+#endif
+
hit = TRUE;
thing = (void *)entry_ptr;
@@ -3958,21 +3911,15 @@ H5C_protect(H5F_t * f,
}
}
- if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
-
+ if(cache_ptr->index_size >= cache_ptr->max_cache_size)
empty_space = 0;
-
- } else {
-
+ else
empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
- }
-
/* try to free up if necceary and if evictions are permitted. Note
* that if evictions are enabled, we will call H5C_make_space_in_cache()
* regardless if the min_free_space requirement is not met.
*/
-
if ( ( cache_ptr->evictions_enabled ) &&
( ( (cache_ptr->index_size + entry_ptr->size) >
cache_ptr->max_cache_size)
@@ -3984,12 +3931,9 @@ H5C_protect(H5F_t * f,
size_t space_needed;
- if ( empty_space <= entry_ptr->size ) {
-
+ if(empty_space <= entry_ptr->size)
cache_ptr->cache_full = TRUE;
- }
-
if ( cache_ptr->check_write_permitted != NULL ) {
result = (cache_ptr->check_write_permitted)(f,
@@ -4195,25 +4139,18 @@ H5C_protect(H5F_t * f,
* into complience.
*/
- if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
-
+ if(cache_ptr->index_size >= cache_ptr->max_cache_size)
empty_space = 0;
-
- } else {
-
+ else
empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
- }
-
if ( ( cache_ptr->index_size > cache_ptr->max_cache_size )
||
( ( empty_space + cache_ptr->clean_index_size ) <
cache_ptr->min_clean_size) ) {
- if ( cache_ptr->index_size > cache_ptr->max_cache_size ) {
-
+ if(cache_ptr->index_size > cache_ptr->max_cache_size)
cache_ptr->cache_full = TRUE;
- }
result = H5C_make_space_in_cache(f, primary_dxpl_id,
secondary_dxpl_id,
@@ -4651,9 +4588,7 @@ H5C_set_prefix(H5C_t * cache_ptr, char * prefix)
cache_ptr->prefix[H5C__PREFIX_LEN - 1] = '\0';
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C_set_prefix() */
@@ -5564,7 +5499,7 @@ H5C_unprotect(H5F_t * f,
void * thing,
unsigned int flags)
{
- H5C_t * cache_ptr;
+ H5C_t * cache_ptr;
hbool_t deleted;
hbool_t dirtied;
hbool_t set_flush_marker;
@@ -5582,13 +5517,13 @@ H5C_unprotect(H5F_t * f,
FUNC_ENTER_NOAPI(FAIL)
- deleted = ( (flags & H5C__DELETED_FLAG) != 0 );
- dirtied = ( (flags & H5C__DIRTIED_FLAG) != 0 );
- set_flush_marker = ( (flags & H5C__SET_FLUSH_MARKER_FLAG) != 0 );
- pin_entry = ( (flags & H5C__PIN_ENTRY_FLAG) != 0 );
- unpin_entry = ( (flags & H5C__UNPIN_ENTRY_FLAG) != 0 );
- free_file_space = ( (flags & H5C__FREE_FILE_SPACE_FLAG) != 0 );
- take_ownership = ( (flags & H5C__TAKE_OWNERSHIP_FLAG) != 0 );
+ deleted = ((flags & H5C__DELETED_FLAG) != 0);
+ dirtied = ((flags & H5C__DIRTIED_FLAG) != 0);
+ set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0);
+ pin_entry = ((flags & H5C__PIN_ENTRY_FLAG) != 0);
+ unpin_entry = ((flags & H5C__UNPIN_ENTRY_FLAG) != 0);
+ free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
+ take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
HDassert( f );
HDassert( f->shared );
@@ -5632,49 +5567,39 @@ H5C_unprotect(H5F_t * f,
* the ro_ref_counter. Don't actually unprotect until the ref count
* drops to zero.
*/
- if ( entry_ptr->ro_ref_count > 1 ) {
-
- HDassert( entry_ptr->is_protected );
- HDassert( entry_ptr->is_read_only );
-
- if ( dirtied ) {
+ if(entry_ptr->ro_ref_count > 1) {
+ /* Sanity check */
+ HDassert(entry_ptr->is_protected);
+ HDassert(entry_ptr->is_read_only);
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
- "Read only entry modified(1)??")
- }
+ if(dirtied)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??")
+ /* Reduce the RO ref count */
(entry_ptr->ro_ref_count)--;
/* Pin or unpin the entry as requested. */
- if ( pin_entry ) {
-
+ if(pin_entry) {
/* Pin the entry from a client */
if(H5C_pin_entry_from_client(cache_ptr, entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
-
- } else if ( unpin_entry ) {
-
+ } else if(unpin_entry) {
/* Unpin the entry from a client */
if(H5C_unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client")
-
- }
+ } /* end if */
} else {
+ if(entry_ptr->is_read_only) {
+ /* Sanity check */
+ HDassert(entry_ptr->ro_ref_count == 1);
- if ( entry_ptr->is_read_only ) {
-
- HDassert( entry_ptr->ro_ref_count == 1 );
-
- if ( dirtied ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
- "Read only entry modified(2)??")
- }
+ if(dirtied)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??")
entry_ptr->is_read_only = FALSE;
entry_ptr->ro_ref_count = 0;
- }
+ } /* end if */
#ifdef H5_HAVE_PARALLEL
/* When the H5C code is used to implement the metadata cache in the
@@ -5692,47 +5617,36 @@ H5C_unprotect(H5F_t * f,
* are contiguous, with only one dirty flag, we have to let the supplied
* functions deal with the reseting the is_dirty flag.
*/
- if ( entry_ptr->clear_on_unprotect ) {
-
- HDassert( entry_ptr->is_dirty );
+ if(entry_ptr->clear_on_unprotect) {
+ /* Sanity check */
+ HDassert(entry_ptr->is_dirty);
entry_ptr->clear_on_unprotect = FALSE;
-
- if ( ! dirtied ) {
-
+ if(!dirtied)
clear_entry = TRUE;
- }
- }
+ } /* end if */
#endif /* H5_HAVE_PARALLEL */
- if ( ! (entry_ptr->is_protected) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
- "Entry already unprotected??")
- }
-
- /* mark the entry as dirty if appropriate */
- entry_ptr->is_dirty = ( (entry_ptr->is_dirty) || dirtied );
+ if(!entry_ptr->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Entry already unprotected??")
- if ( ( was_clean ) && ( entry_ptr->is_dirty ) ) {
+ /* Mark the entry as dirty if appropriate */
+ entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied);
+ /* Update index for newly dirtied entry */
+ if(was_clean && entry_ptr->is_dirty)
H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
- }
/* Pin or unpin the entry as requested. */
- if ( pin_entry ) {
-
+ if(pin_entry) {
/* Pin the entry from a client */
if(H5C_pin_entry_from_client(cache_ptr, entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
-
- } else if ( unpin_entry ) {
-
+ } else if(unpin_entry) {
/* Unpin the entry from a client */
if(H5C_unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client")
-
- }
+ } /* end if */
/* H5C__UPDATE_RP_FOR_UNPROTECT will place the unprotected entry on
* the pinned entry list if entry_ptr->is_pinned is TRUE.
@@ -5744,16 +5658,11 @@ H5C_unprotect(H5F_t * f,
/* if the entry is dirty, 'or' its flush_marker with the set flush flag,
* and then add it to the skip list if it isn't there already.
*/
-
- if ( entry_ptr->is_dirty ) {
-
+ if(entry_ptr->is_dirty) {
entry_ptr->flush_marker |= set_flush_marker;
-
- if ( ! (entry_ptr->in_slist) ) {
-
+ if(!entry_ptr->in_slist)
H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
- }
- }
+ } /* end if */
/* this implementation of the "deleted" option is a bit inefficient, as
* we re-insert the entry to be deleted into the replacement policy
@@ -5798,10 +5707,8 @@ H5C_unprotect(H5F_t * f,
entry_ptr->free_file_space_on_destroy = free_file_space;
/* Set the "take ownership" flag for the flush, if needed */
- if ( take_ownership) {
-
+ if(take_ownership)
flush_flags |= H5C__TAKE_OWNERSHIP_FLAG;
- }
if ( H5C_flush_single_entry(f,
primary_dxpl_id,
@@ -7854,7 +7761,6 @@ H5C_flush_invalidate_cache(H5F_t * f,
* may be created by the flush call backs. Thus it is possible
* that the slist will not be empty after we finish the scan.
*/
-
if ( cache_ptr->slist_len == 0 ) {
node_ptr = NULL;
@@ -7875,7 +7781,6 @@ H5C_flush_invalidate_cache(H5F_t * f,
HDassert( next_entry_ptr->in_slist );
}
-
#if H5C_DO_SANITY_CHECKS
/* Depending on circumstances, H5C_flush_single_entry() will
* remove dirty entries from the slist as it flushes them.
@@ -7886,10 +7791,12 @@ H5C_flush_invalidate_cache(H5F_t * f,
initial_slist_size = cache_ptr->slist_size;
/* There is also the possibility that entries will be
- * dirtied, resized, and/or moved as the result of
- * calls to the flush callbacks. We use the slist_len_increase
- * and slist_size_increase increase fields in struct H5C_t
- * to track these changes for purpose of sanity checking.
+ * dirtied, resized, moved, and/or removed from the cache
+ * as the result of calls to the flush callbacks. We use
+ * the slist_len_increase and slist_size_increase increase
+ * fields in struct H5C_t to track these changes for purpose
+ * of sanity checking.
+ *
* To this end, we must zero these fields before we start
* the pass through the slist.
*/
@@ -7988,7 +7895,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
( cache_ptr->num_last_entries >=
cache_ptr->slist_len ) ) ) {
- #if H5C_DO_SANITY_CHECKS
+#if H5C_DO_SANITY_CHECKS
/* update actual_slist_len & actual_slist_size before
* the flush. Note that the entry will be removed
* from the slist after the flush, and thus may be
@@ -8001,7 +7908,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
*/
actual_slist_len++;
actual_slist_size += entry_ptr->size;
- #endif /* H5C_DO_SANITY_CHECKS */
+#endif /* H5C_DO_SANITY_CHECKS */
if ( entry_ptr->is_protected ) {
@@ -8028,8 +7935,8 @@ H5C_flush_invalidate_cache(H5F_t * f,
FALSE);
if ( status < 0 ) {
- /* This shouldn't happen -- if it does, we are toast
- * so just scream and die.
+ /* This shouldn't happen -- if it does, we
+ * are toast so just scream and die.
*/
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
@@ -8054,8 +7961,8 @@ H5C_flush_invalidate_cache(H5F_t * f,
TRUE);
if ( status < 0 ) {
- /* This shouldn't happen -- if it does, we are toast so
- * just scream and die.
+ /* This shouldn't happen -- if it does, we
+ * are toast so just scream and die.
*/
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
@@ -8123,8 +8030,9 @@ H5C_flush_invalidate_cache(H5F_t * f,
if ( entry_ptr->is_protected ) {
- /* we have major problems -- but lets flush and destroy
- * everything we can before we flag an error.
+ /* we have major problems -- but lets flush and
+ * destroy everything we can before we flag an
+ * error.
*/
protected_entries++;
@@ -8148,8 +8056,8 @@ H5C_flush_invalidate_cache(H5F_t * f,
TRUE);
if ( status < 0 ) {
- /* This shouldn't happen -- if it does, we are toast so
- * just scream and die.
+ /* This shouldn't happen -- if it does,
+ * we are toast so just scream and die.
*/
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
@@ -8304,10 +8212,10 @@ done:
* primary_dxpl_id, and secondary_dxpl_id are all irrelevent,
* and the call can't be part of a sequence of flushes.
*
- * If the caller knows the address of the TBBT node at
+ * If the caller knows the address of the skip list node at
* which the target entry resides, it can avoid a lookup
* by supplying that address in the tgt_node_ptr parameter.
- * If this parameter is NULL, the function will do a TBBT
+ * If this parameter is NULL, the function will do a skip list
* search for the entry instead.
*
* The function does nothing silently if there is no entry
@@ -8332,9 +8240,9 @@ H5C_flush_single_entry(H5F_t * f,
hbool_t del_entry_from_slist_on_destroy)
{
H5C_t * cache_ptr = f->shared->cache;
- hbool_t destroy;
- hbool_t clear_only;
- hbool_t take_ownership;
+ hbool_t destroy; /* external flag */
+ hbool_t clear_only; /* external flag */
+ hbool_t take_ownership; /* external flag */
hbool_t was_dirty;
hbool_t destroy_entry;
herr_t status;
@@ -8351,12 +8259,13 @@ H5C_flush_single_entry(H5F_t * f,
HDassert( H5F_addr_defined(addr) );
HDassert( first_flush_ptr );
- destroy = ( (flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 );
- clear_only = ( (flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
- take_ownership = ( (flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
+ /* setup external flags from the flags parameter */
+ destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
+ clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
+ take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
/* Set the flag for destroying the entry, based on the 'take ownership'
- * and 'destroy' flags
+ * and 'destroy' flags
*/
if(take_ownership)
destroy_entry = FALSE;
@@ -8366,6 +8275,7 @@ H5C_flush_single_entry(H5F_t * f,
/* attempt to find the target entry in the hash table */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
+ /* run initial sanity checks */
#if H5C_DO_SANITY_CHECKS
if ( entry_ptr != NULL ) {
@@ -8526,11 +8436,10 @@ H5C_flush_single_entry(H5F_t * f,
#if H5C_DO_SANITY_CHECKS
if ( ( entry_ptr->is_dirty ) &&
( cache_ptr->check_write_permitted == NULL ) &&
- ( ! (cache_ptr->write_permitted) ) ) {
+ ( ! (cache_ptr->write_permitted) ) )
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Write when writes are always forbidden!?!?!")
- }
#endif /* H5C_DO_SANITY_CHECKS */
if ( destroy ) {
@@ -8598,12 +8507,11 @@ H5C_flush_single_entry(H5F_t * f,
* If that ceases to be the case, further
* tests will be necessary.
*/
- if ( cache_ptr->aux_ptr != NULL ) {
+ if ( cache_ptr->aux_ptr != NULL )
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"resize/move in serialize occured in parallel case.")
- }
}
#endif /* H5_HAVE_PARALLEL */
}
@@ -8701,6 +8609,7 @@ H5C_flush_single_entry(H5F_t * f,
}
+ /* reset the flush_in progress flag */
entry_ptr->flush_in_progress = FALSE;
}
@@ -8740,10 +8649,6 @@ done:
*
* Programmer: John Mainzer, 5/18/04
*
- * QAK -- 1/31/08
- * Added initialization for the new free_file_space_on_destroy
- * field.
- *
*-------------------------------------------------------------------------
*/
static void *
@@ -8941,7 +8846,6 @@ H5C_make_space_in_cache(H5F_t * f,
if ( write_permitted ) {
initial_list_len = cache_ptr->LRU_list_len;
-
entry_ptr = cache_ptr->LRU_tail_ptr;
if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
@@ -8997,6 +8901,7 @@ H5C_make_space_in_cache(H5F_t * f,
cache_ptr->entries_scanned_to_make_space++;
}
#endif /* H5C_COLLECT_CACHE_STATS */
+
result = H5C_flush_single_entry(f,
primary_dxpl_id,
secondary_dxpl_id,
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 0fdaa79..ae6bdad 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -177,8 +177,8 @@
* entry is flushed to disk.
*
*
- * In cases where memory is plentiful, and performance is an issue, it
- * is useful to disable all cache evictions, and thereby postpone metadata
+ * In cases where memory is plentiful, and performance is an issue, it may
+ * be useful to disable all cache evictions, and thereby postpone metadata
* writes. The following field is used to implement this.
*
* evictions_enabled: Boolean flag that is initialized to TRUE. When
@@ -283,14 +283,14 @@
* some optimizations when I get to it.
*
* num_last_entries: The number of entries in the cache that can only be
- * flushed after all other entries in the cache have
- * been flushed. At this time, this will only ever be
- * one entry (the superblock), and the code has been
- * protected with HDasserts to enforce this. This restraint
- * can certainly be relaxed in the future if the need for
- * multiple entries being flushed last arises, though
- * explicit tests for that case should be added when said
- * HDasserts are removed.
+ * flushed after all other entries in the cache have
+ * been flushed. At this time, this will only ever be
+ * one entry (the superblock), and the code has been
+ * protected with HDasserts to enforce this. This restraint
+ * can certainly be relaxed in the future if the need for
+ * multiple entries being flushed last arises, though
+ * explicit tests for that case should be added when said
+ * HDasserts are removed.
*
* With the addition of the fractal heap, the cache must now deal with
* the case in which entries may be dirtied, moved, or have their sizes
@@ -354,7 +354,8 @@
* flush.
*
* Since pinned entries cannot be evicted, they must be kept on a pinned
- * entry list, instead of being entrusted to the replacement policy code.
+ * entry list (pel), instead of being entrusted to the replacement policy
+ * code.
*
* Maintaining the pinned entry list requires the following fields:
*
@@ -382,7 +383,8 @@
*
* While there has been interest in several replacement policies for
* this cache, the initial development schedule is tight. Thus I have
- * elected to support only a modified LRU policy for the first cut.
+ * elected to support only a modified LRU (least recently used) policy
+ * for the first cut.
*
* To further simplify matters, I have simply included the fields needed
* by the modified LRU in this structure. When and if we add support for
@@ -679,7 +681,7 @@
* equal to the array index has been evicted from the cache in
* the current epoch.
*
- * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
* are used to record the number of times an entry with type
* id equal to the array index has been moved in the current
* epoch.
@@ -714,7 +716,7 @@
* with type id equal to the array index has been flushed while
* pinned in the current epoch.
*
- * pinned_cleared: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
+ * pinned_clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
* cells are used to record the number of times an entry
* with type id equal to the array index has been cleared while
* pinned in the current epoch.
@@ -2350,7 +2352,7 @@ if ( (cache_ptr)->index_size != \
HDassert( (new_size) <= (cache_ptr)->slist_size ); \
HDassert( ( (cache_ptr)->slist_len > 1 ) || \
( (cache_ptr)->slist_size == (new_size) ) ); \
-} /* H5C__REMOVE_ENTRY_FROM_SLIST */
+} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */
#else /* H5C_DO_SANITY_CHECKS */
@@ -2371,7 +2373,7 @@ if ( (cache_ptr)->index_size != \
HDassert( (new_size) <= (cache_ptr)->slist_size ); \
HDassert( ( (cache_ptr)->slist_len > 1 ) || \
( (cache_ptr)->slist_size == (new_size) ) ); \
-} /* H5C__REMOVE_ENTRY_FROM_SLIST */
+} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */
#endif /* H5C_DO_SANITY_CHECKS */
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 38b9469..c9679f4 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -292,16 +292,15 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* dirtied while protected.
*
* This field is set to FALSE in the protect call, and may
- * be set to TRUE by the
- * H5C_mark_entry_dirty()
- * call at an time prior to the unprotect call.
+ * be set to TRUE by the H5C_mark_entry_dirty() call at any
+ * time prior to the unprotect call.
*
- * The H5C_mark_entry_dirty() call exists
- * as a convenience function for the fractal heap code which
- * may not know if an entry is protected or pinned, but knows
- * that is either protected or pinned. The dirtied field was
- * added as in the parallel case, it is necessary to know
- * whether a protected entry was dirty prior to the protect call.
+ * The H5C_mark_entry_dirty() call exists as a convenience
+ * function for the fractal heap code which may not know if
+ * an entry is protected or pinned, but knows that is either
+ * protected or pinned. The dirtied field was added as in
+ * the parallel case, it is necessary to know whether a
+ * protected entry is dirty prior to the protect call.
*
* is_protected: Boolean flag indicating whether this entry is protected
* (or locked, to use more conventional terms). When it is
@@ -372,21 +371,22 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
* the entry is flushed for whatever reason.
*
* flush_me_last: Boolean flag indicating that this entry should not be
- * flushed from the cache until all other entries without
- * the flush_me_last flag set have been flushed.
+ * flushed from the cache until all other entries without
+ * the flush_me_last flag set have been flushed.
*
* flush_me_collectively: Boolean flag indicating that this entry needs
- * to be flushed collectively when in a parallel
- * situation.
+ * to be flushed collectively when in a parallel situation.
*
- * Note: At this time, the flush_me_last and flush_me_collectively
- * flags will only be applied to one entry, the superblock,
- * and the code utilizing these flags is protected with HDasserts
- * to enforce this. This restraint can certainly be relaxed in
- * the future if the the need for multiple entries getting flushed
- * last or collectively arises, though the code allowing for that
- * will need to be expanded and tested appropriately if that
- * functionality is desired.
+ * Note:
+ *
+ * At this time, the flush_me_last and flush_me_collectively
+ * flags will only be applied to one entry, the superblock,
+ * and the code utilizing these flags is protected with HDasserts
+ * to enforce this. This restraint can certainly be relaxed in
+ * the future if the the need for multiple entries getting flushed
+ * last or collectively arises, though the code allowing for that
+ * will need to be expanded and tested appropriately if that
+ * functionality is desired.
*
* clear_on_unprotect: Boolean flag used only in PHDF5. When H5C is used
* to implement the metadata cache In the parallel case, only
@@ -608,7 +608,7 @@ typedef struct H5C_cache_entry_t
#ifdef H5_HAVE_PARALLEL
hbool_t flush_me_collectively;
hbool_t clear_on_unprotect;
- hbool_t flush_immediately;
+ hbool_t flush_immediately;
#endif /* H5_HAVE_PARALLEL */
hbool_t flush_in_progress;
hbool_t destroy_in_progress;
@@ -617,10 +617,10 @@ typedef struct H5C_cache_entry_t
/* fields supporting the 'flush dependency' feature: */
struct H5C_cache_entry_t * flush_dep_parent;
- uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS];
- unsigned flush_dep_height;
- hbool_t pinned_from_client;
- hbool_t pinned_from_cache;
+ uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS];
+ unsigned flush_dep_height;
+ hbool_t pinned_from_client;
+ hbool_t pinned_from_cache;
/* fields supporting the hash table: */
diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c
index 75080f7..2933e5e 100644
--- a/src/H5Dbtree.c
+++ b/src/H5Dbtree.c
@@ -49,11 +49,6 @@
/* Local Macros */
/****************/
-/*
- * Given a B-tree node return the dimensionality of the chunks pointed to by
- * that node.
- */
-#define H5D_BTREE_NDIMS(X) (((X)->sizeof_rkey-8)/8)
/******************/
/* Local Typedefs */
@@ -74,8 +69,8 @@
* The chunk's file address is part of the B-tree and not part of the key.
*/
typedef struct H5D_btree_key_t {
+ hsize_t scaled[H5O_LAYOUT_NDIMS]; /*logical offset to start*/
uint32_t nbytes; /*size of stored data */
- hsize_t offset[H5O_LAYOUT_NDIMS]; /*logical offset to start*/
unsigned filter_mask; /*excluded filters */
} H5D_btree_key_t;
@@ -97,8 +92,9 @@ typedef struct H5D_btree_dbg_t {
/* Local Prototypes */
/********************/
+static herr_t H5D__btree_shared_free(void *_shared);
static herr_t H5D__btree_shared_create(const H5F_t *f, H5O_storage_chunk_t *store,
- unsigned ndims);
+ const H5O_layout_chunk_t *layout);
/* B-tree iterator callbacks */
static int H5D__btree_idx_iterate_cb(H5F_t *f, hid_t dxpl_id, const void *left_key,
@@ -203,6 +199,10 @@ H5B_class_t H5B_BTREE[1] = {{
/* Local Variables */
/*******************/
+/* Declare a free list to manage H5O_layout_chunk_t objects */
+H5FL_DEFINE_STATIC(H5O_layout_chunk_t);
+
+
/*-------------------------------------------------------------------------
* Function: H5D__btree_get_shared
@@ -255,7 +255,7 @@ H5D__btree_get_shared(const H5F_t UNUSED *f, const void *_udata)
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__btree_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op,
+H5D__btree_new_node(H5F_t *f, hid_t UNUSED dxpl_id, H5B_ins_t op,
void *_lt_key, void *_udata, void *_rt_key,
haddr_t *addr_p/*out*/)
{
@@ -265,7 +265,7 @@ H5D__btree_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op,
unsigned u;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC_NOERR
/* check args */
HDassert(f);
@@ -275,21 +275,19 @@ H5D__btree_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op,
HDassert(udata->common.layout->ndims > 0 && udata->common.layout->ndims < H5O_LAYOUT_NDIMS);
HDassert(addr_p);
- /* Allocate new storage */
- HDassert(udata->nbytes > 0);
- H5_CHECK_OVERFLOW(udata->nbytes, uint32_t, hsize_t);
- if(HADDR_UNDEF == (*addr_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->nbytes)))
- HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "couldn't allocate new file storage")
- udata->addr = *addr_p;
+ /* Set address */
+ HDassert(H5F_addr_defined(udata->chunk_block.offset));
+ HDassert(udata->chunk_block.length > 0);
+ *addr_p = udata->chunk_block.offset;
/*
* The left key describes the storage of the UDATA chunk being
* inserted into the tree.
*/
- lt_key->nbytes = udata->nbytes;
+ H5_CHECKED_ASSIGN(lt_key->nbytes, uint32_t, udata->chunk_block.length, hsize_t);
lt_key->filter_mask = udata->filter_mask;
for(u = 0; u < udata->common.layout->ndims; u++)
- lt_key->offset[u] = udata->common.offset[u];
+ lt_key->scaled[u] = udata->common.scaled[u];
/*
* The right key might already be present. If not, then add a zero-width
@@ -299,13 +297,11 @@ H5D__btree_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op,
rt_key->nbytes = 0;
rt_key->filter_mask = 0;
for(u = 0; u < udata->common.layout->ndims; u++) {
- HDassert(udata->common.offset[u] + udata->common.layout->dim[u] >
- udata->common.offset[u]);
- rt_key->offset[u] = udata->common.offset[u] + udata->common.layout->dim[u];
+ HDassert(udata->common.scaled[u] + 1 > udata->common.scaled[u]);
+ rt_key->scaled[u] = udata->common.scaled[u] + 1;
} /* end if */
} /* end if */
-done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__btree_new_node() */
@@ -345,7 +341,7 @@ H5D__btree_cmp2(void *_lt_key, void *_udata, void *_rt_key)
HDassert(udata->layout->ndims > 0 && udata->layout->ndims <= H5O_LAYOUT_NDIMS);
/* Compare the offsets but ignore the other fields */
- ret_value = H5VM_vector_cmp_u(udata->layout->ndims, lt_key->offset, rt_key->offset);
+ ret_value = H5VM_vector_cmp_u(udata->layout->ndims, lt_key->scaled, rt_key->scaled);
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__btree_cmp2() */
@@ -400,18 +396,18 @@ H5D__btree_cmp3(void *_lt_key, void *_udata, void *_rt_key)
/* indexed storage B-tree... */
/* (Dump the B-tree with h5debug to look at it) -QAK */
if(udata->layout->ndims == 2) {
- if(udata->offset[0] > rt_key->offset[0])
+ if(udata->scaled[0] > rt_key->scaled[0])
ret_value = 1;
- else if(udata->offset[0] == rt_key->offset[0] &&
- udata->offset[1] >= rt_key->offset[1])
+ else if(udata->scaled[0] == rt_key->scaled[0] &&
+ udata->scaled[1] >= rt_key->scaled[1])
ret_value = 1;
- else if(udata->offset[0] < lt_key->offset[0])
+ else if(udata->scaled[0] < lt_key->scaled[0])
ret_value = (-1);
} /* end if */
else {
- if(H5VM_vector_ge_u(udata->layout->ndims, udata->offset, rt_key->offset))
+ if(H5VM_vector_ge_u(udata->layout->ndims, udata->scaled, rt_key->scaled))
ret_value = 1;
- else if(H5VM_vector_lt_u(udata->layout->ndims, udata->offset, lt_key->offset))
+ else if(H5VM_vector_lt_u(udata->layout->ndims, udata->scaled, lt_key->scaled))
ret_value = (-1);
} /* end else */
@@ -463,13 +459,13 @@ H5D__btree_found(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr, const void
/* Is this *really* the requested chunk? */
for(u = 0; u < udata->common.layout->ndims; u++)
- if(udata->common.offset[u] >= lt_key->offset[u] + udata->common.layout->dim[u])
+ if(udata->common.scaled[u] >= (lt_key->scaled[u] + 1))
HGOTO_DONE(FALSE)
/* Initialize return values */
HDassert(lt_key->nbytes > 0);
- udata->addr = addr;
- udata->nbytes = lt_key->nbytes;
+ udata->chunk_block.offset = addr;
+ udata->chunk_block.length = lt_key->nbytes;
udata->filter_mask = lt_key->filter_mask;
done:
@@ -478,6 +474,44 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_disjoint
+ *
+ * Purpose: Determines if two chunks are disjoint.
+ *
+ * Return: Success: FALSE if they are not disjoint.
+ * TRUE if they are disjoint.
+ *
+ * Programmer: Quincey Koziol
+ * Wednesday, May 6, 2015
+ *
+ * Note: Assumes that the chunk offsets are scaled coordinates
+ *
+ *-------------------------------------------------------------------------
+ */
+static hbool_t
+H5D__chunk_disjoint(unsigned n, const hsize_t *scaled1, const hsize_t *scaled2)
+{
+ unsigned u; /* Local index variable */
+ hbool_t ret_value = FALSE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(n);
+ HDassert(scaled1);
+ HDassert(scaled2);
+
+ /* Loop over two chunks, detecting disjointness and getting out quickly */
+ for(u = 0; u < n; u++)
+ if((scaled1[u] + 1) <= scaled2[u] || (scaled2[u] + 1) <= scaled1[u])
+ HGOTO_DONE(TRUE)
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__chunk_disjoint() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D__btree_insert
*
* Purpose: This function is called when the B-tree insert engine finds
@@ -507,7 +541,7 @@ done:
*/
/* ARGSUSED */
static H5B_ins_t
-H5D__btree_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key,
+H5D__btree_insert(H5F_t *f, hid_t UNUSED dxpl_id, haddr_t addr, void *_lt_key,
hbool_t *lt_key_changed,
void *_md_key, void *_udata, void *_rt_key,
hbool_t UNUSED *rt_key_changed,
@@ -541,68 +575,40 @@ H5D__btree_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key,
HGOTO_ERROR(H5E_STORAGE, H5E_UNSUPPORTED, H5B_INS_ERROR, "internal error")
} else if(H5VM_vector_eq_u(udata->common.layout->ndims,
- udata->common.offset, lt_key->offset) &&
- lt_key->nbytes > 0) {
+ udata->common.scaled, lt_key->scaled) && lt_key->nbytes > 0) {
/*
* Already exists. If the new size is not the same as the old size
* then we should reallocate storage.
*/
- if(lt_key->nbytes != udata->nbytes) {
-/* Currently, the old chunk data is "thrown away" after the space is reallocated,
- * so avoid data copy in H5MF_realloc() call by just free'ing the space and
- * allocating new space.
- *
- * This should keep the file smaller also, by freeing the space and then
- * allocating new space, instead of vice versa (in H5MF_realloc).
- *
- * QAK - 11/19/2002
- */
-#ifdef OLD_WAY
- if(HADDR_UNDEF == (*new_node_p = H5MF_realloc(f, H5FD_MEM_DRAW, addr,
- (hsize_t)lt_key->nbytes, (hsize_t)udata->nbytes)))
- HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "unable to reallocate chunk storage")
-#else /* OLD_WAY */
- H5_CHECK_OVERFLOW(lt_key->nbytes, uint32_t, hsize_t);
- if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, addr, (hsize_t)lt_key->nbytes) < 0)
- HGOTO_ERROR(H5E_STORAGE, H5E_CANTFREE, H5B_INS_ERROR, "unable to free chunk")
- H5_CHECK_OVERFLOW(udata->nbytes, uint32_t, hsize_t);
- if(HADDR_UNDEF == (*new_node_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->nbytes)))
- HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "unable to reallocate chunk")
-#endif /* OLD_WAY */
- lt_key->nbytes = udata->nbytes;
+ if(lt_key->nbytes != udata->chunk_block.length) {
+ /* Set node's address (already re-allocated by main chunk routines) */
+ HDassert(H5F_addr_defined(udata->chunk_block.offset));
+ *new_node_p = udata->chunk_block.offset;
+ H5_CHECKED_ASSIGN(lt_key->nbytes, uint32_t, udata->chunk_block.length, hsize_t);
lt_key->filter_mask = udata->filter_mask;
*lt_key_changed = TRUE;
- udata->addr = *new_node_p;
ret_value = H5B_INS_CHANGE;
} else {
- udata->addr = addr;
+ /* Already have address in udata, from main chunk routines */
+ HDassert(H5F_addr_defined(udata->chunk_block.offset));
ret_value = H5B_INS_NOOP;
}
- } else if (H5VM_hyper_disjointp(udata->common.layout->ndims,
- lt_key->offset, udata->common.layout->dim,
- udata->common.offset, udata->common.layout->dim)) {
- HDassert(H5VM_hyper_disjointp(udata->common.layout->ndims,
- rt_key->offset, udata->common.layout->dim,
- udata->common.offset, udata->common.layout->dim));
+ } else if (H5D__chunk_disjoint(udata->common.layout->ndims,
+ lt_key->scaled, udata->common.scaled)) {
+ HDassert(H5D__chunk_disjoint(udata->common.layout->ndims,
+ rt_key->scaled, udata->common.scaled));
/*
* Split this node, inserting the new new node to the right of the
* current node. The MD_KEY is where the split occurs.
*/
- md_key->nbytes = udata->nbytes;
+ H5_CHECKED_ASSIGN(md_key->nbytes, uint32_t, udata->chunk_block.length, hsize_t);
md_key->filter_mask = udata->filter_mask;
- for(u = 0; u < udata->common.layout->ndims; u++) {
- HDassert(0 == udata->common.offset[u] % udata->common.layout->dim[u]);
- md_key->offset[u] = udata->common.offset[u];
- } /* end for */
+ for(u = 0; u < udata->common.layout->ndims; u++)
+ md_key->scaled[u] = udata->common.scaled[u];
- /*
- * Allocate storage for the new chunk
- */
- H5_CHECK_OVERFLOW(udata->nbytes, uint32_t, hsize_t);
- if(HADDR_UNDEF == (*new_node_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->nbytes)))
- HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "file allocation failed")
- udata->addr = *new_node_p;
+ HDassert(H5F_addr_defined(udata->chunk_block.offset));
+ *new_node_p = udata->chunk_block.offset;
ret_value = H5B_INS_RIGHT;
} else {
@@ -669,9 +675,10 @@ done:
static herr_t
H5D__btree_decode_key(const H5B_shared_t *shared, const uint8_t *raw, void *_key)
{
- H5D_btree_key_t *key = (H5D_btree_key_t *) _key;
- size_t ndims;
- unsigned u;
+ const H5O_layout_chunk_t *layout; /* Chunk layout description */
+ H5D_btree_key_t *key = (H5D_btree_key_t *) _key; /* Pointer to decoded key */
+ hsize_t tmp_offset; /* Temporary coordinate offset, from file */
+ unsigned u; /* Local index variable */
FUNC_ENTER_STATIC_NOERR
@@ -679,14 +686,21 @@ H5D__btree_decode_key(const H5B_shared_t *shared, const uint8_t *raw, void *_key
HDassert(shared);
HDassert(raw);
HDassert(key);
- ndims = H5D_BTREE_NDIMS(shared);
- HDassert(ndims <= H5O_LAYOUT_NDIMS);
+ layout = (const H5O_layout_chunk_t *)shared->udata;
+ HDassert(layout);
+ HDassert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS);
/* decode */
UINT32DECODE(raw, key->nbytes);
UINT32DECODE(raw, key->filter_mask);
- for(u = 0; u < ndims; u++)
- UINT64DECODE(raw, key->offset[u]);
+ for(u = 0; u < layout->ndims; u++) {
+ /* Retrieve coordinate offset */
+ UINT64DECODE(raw, tmp_offset);
+ HDassert(0 == (tmp_offset % layout->dim[u]));
+
+ /* Convert to a scaled offset */
+ key->scaled[u] = tmp_offset / layout->dim[u];
+ } /* end for */
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5D__btree_decode_key() */
@@ -707,9 +721,10 @@ H5D__btree_decode_key(const H5B_shared_t *shared, const uint8_t *raw, void *_key
static herr_t
H5D__btree_encode_key(const H5B_shared_t *shared, uint8_t *raw, const void *_key)
{
+ const H5O_layout_chunk_t *layout; /* Chunk layout description */
const H5D_btree_key_t *key = (const H5D_btree_key_t *)_key;
- size_t ndims;
- unsigned u;
+ hsize_t tmp_offset; /* Temporary coordinate offset, from file */
+ unsigned u; /* Local index variable */
FUNC_ENTER_STATIC_NOERR
@@ -717,14 +732,18 @@ H5D__btree_encode_key(const H5B_shared_t *shared, uint8_t *raw, const void *_key
HDassert(shared);
HDassert(raw);
HDassert(key);
- ndims = H5D_BTREE_NDIMS(shared);
- HDassert(ndims <= H5O_LAYOUT_NDIMS);
+ layout = (const H5O_layout_chunk_t *)shared->udata;
+ HDassert(layout);
+ HDassert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS);
/* encode */
UINT32ENCODE(raw, key->nbytes);
UINT32ENCODE(raw, key->filter_mask);
- for(u = 0; u < ndims; u++)
- UINT64ENCODE(raw, key->offset[u]);
+ for(u = 0; u < layout->ndims; u++) {
+ /* Compute coordinate offset from scaled offset */
+ tmp_offset = key->scaled[u] * layout->dim[u];
+ UINT64ENCODE(raw, tmp_offset);
+ } /* end for */
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5D__btree_encode_key() */
@@ -759,7 +778,7 @@ H5D__btree_debug_key(FILE *stream, int indent, int fwidth, const void *_key,
HDfprintf(stream, "%*s%-*s 0x%08x\n", indent, "", fwidth, "Filter mask:", key->filter_mask);
HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Logical offset:");
for(u = 0; u < udata->ndims; u++)
- HDfprintf(stream, "%s%Hd", u?", ":"", key->offset[u]);
+ HDfprintf(stream, "%s%Hd", u?", ":"", (key->scaled[u] * udata->common.layout->dim[u]));
HDfputs("}\n", stream);
FUNC_LEAVE_NOAPI(SUCCEED)
@@ -767,6 +786,38 @@ H5D__btree_debug_key(FILE *stream, int indent, int fwidth, const void *_key,
/*-------------------------------------------------------------------------
+ * Function: H5D__btree_shared_free
+ *
+ * Purpose: Free "local" B-tree shared info
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, May 7, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__btree_shared_free(void *_shared)
+{
+ H5B_shared_t *shared = (H5B_shared_t *)_shared;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Free the chunk layout information */
+ shared->udata = H5FL_FREE(H5O_layout_chunk_t, shared->udata);
+
+ /* Chain up to the generic B-tree shared info free routine */
+ if(H5B_shared_free(shared) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't free shared B-tree info")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__btree_shared_free() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D__btree_shared_create
*
* Purpose: Create & initialize B-tree shared info
@@ -779,9 +830,11 @@ H5D__btree_debug_key(FILE *stream, int indent, int fwidth, const void *_key,
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__btree_shared_create(const H5F_t *f, H5O_storage_chunk_t *store, unsigned ndims)
+H5D__btree_shared_create(const H5F_t *f, H5O_storage_chunk_t *store,
+ const H5O_layout_chunk_t *layout)
{
H5B_shared_t *shared; /* Shared B-tree node info */
+ H5O_layout_chunk_t *my_layout = NULL; /* Pointer to copy of layout info */
size_t sizeof_rkey; /* Size of raw (disk) key */
herr_t ret_value = SUCCEED; /* Return value */
@@ -790,20 +843,27 @@ H5D__btree_shared_create(const H5F_t *f, H5O_storage_chunk_t *store, unsigned nd
/* Set the raw key size */
sizeof_rkey = 4 + /*storage size */
4 + /*filter mask */
- ndims * 8; /*dimension indices */
+ layout->ndims * 8; /*dimension indices */
/* Allocate & initialize global info for the shared structure */
if(NULL == (shared = H5B_shared_new(f, H5B_BTREE, sizeof_rkey)))
- HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, FAIL, "memory allocation failed for shared B-tree info")
+ HGOTO_ERROR(H5E_DATASET, H5E_NOSPACE, FAIL, "memory allocation failed for shared B-tree info")
/* Set up the "local" information for this dataset's chunks */
- /* <none> */
+ if(NULL == (my_layout = H5FL_MALLOC(H5O_layout_chunk_t)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "can't allocate chunk layout")
+ HDmemcpy(my_layout, layout, sizeof(H5O_layout_chunk_t));
+ shared->udata = my_layout;
/* Make shared B-tree info reference counted */
- if(NULL == (store->u.btree.shared = H5UC_create(shared, H5B_shared_free)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't create ref-count wrapper for shared B-tree info")
+ if(NULL == (store->u.btree.shared = H5UC_create(shared, H5D__btree_shared_free)))
+ HGOTO_ERROR(H5E_DATASET, H5E_NOSPACE, FAIL, "can't create ref-count wrapper for shared B-tree info")
done:
+ if(ret_value < 0)
+ if(my_layout)
+ my_layout = H5FL_FREE(H5O_layout_chunk_t, my_layout);
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__btree_shared_create() */
@@ -839,7 +899,7 @@ H5D__btree_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t UNUSED *spac
idx_info->storage->u.btree.dset_ohdr_addr = dset_ohdr_addr;
/* Allocate the shared structure */
- if(H5D__btree_shared_create(idx_info->f, idx_info->storage, idx_info->layout->ndims) < 0)
+ if(H5D__btree_shared_create(idx_info->f, idx_info->storage, idx_info->layout) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info")
done:
@@ -1032,8 +1092,8 @@ H5D__btree_idx_iterate_cb(H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
/* Sanity check for memcpy() */
HDcompile_assert(offsetof(H5D_chunk_rec_t, nbytes) == offsetof(H5D_btree_key_t, nbytes));
HDcompile_assert(sizeof(chunk_rec.nbytes) == sizeof(lt_key->nbytes));
- HDcompile_assert(offsetof(H5D_chunk_rec_t, offset) == offsetof(H5D_btree_key_t, offset));
- HDcompile_assert(sizeof(chunk_rec.offset) == sizeof(lt_key->offset));
+ HDcompile_assert(offsetof(H5D_chunk_rec_t, scaled) == offsetof(H5D_btree_key_t, scaled));
+ HDcompile_assert(sizeof(chunk_rec.scaled) == sizeof(lt_key->scaled));
HDcompile_assert(offsetof(H5D_chunk_rec_t, filter_mask) == offsetof(H5D_btree_key_t, filter_mask));
HDcompile_assert(sizeof(chunk_rec.filter_mask) == sizeof(lt_key->filter_mask));
@@ -1170,7 +1230,7 @@ H5D__btree_idx_delete(const H5D_chk_idx_info_t *idx_info)
tmp_storage = *idx_info->storage;
/* Set up the shared structure */
- if(H5D__btree_shared_create(idx_info->f, &tmp_storage, idx_info->layout->ndims) < 0)
+ if(H5D__btree_shared_create(idx_info->f, &tmp_storage, idx_info->layout) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info")
/* Set up B-tree user data */
@@ -1227,9 +1287,9 @@ H5D__btree_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src,
HDassert(!H5F_addr_defined(idx_info_dst->storage->idx_addr));
/* Create shared B-tree info for each file */
- if(H5D__btree_shared_create(idx_info_src->f, idx_info_src->storage, idx_info_src->layout->ndims) < 0)
+ if(H5D__btree_shared_create(idx_info_src->f, idx_info_src->storage, idx_info_src->layout) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for source shared B-tree info")
- if(H5D__btree_shared_create(idx_info_dst->f, idx_info_dst->storage, idx_info_dst->layout->ndims) < 0)
+ if(H5D__btree_shared_create(idx_info_dst->f, idx_info_dst->storage, idx_info_dst->layout) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for destination shared B-tree info")
/* Create the root of the B-tree that describes chunked storage in the dest. file */
@@ -1309,7 +1369,7 @@ H5D__btree_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size)
HDassert(index_size);
/* Initialize the shared info for the B-tree traversal */
- if(H5D__btree_shared_create(idx_info->f, idx_info->storage, idx_info->layout->ndims) < 0)
+ if(H5D__btree_shared_create(idx_info->f, idx_info->storage, idx_info->layout) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info")
shared_init = TRUE;
@@ -1441,11 +1501,13 @@ done:
*/
herr_t
H5D_btree_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent,
- int fwidth, unsigned ndims)
+ int fwidth, unsigned ndims, const uint32_t *dim)
{
H5D_btree_dbg_t udata; /* User data for B-tree callback */
H5O_storage_chunk_t storage; /* Storage information for B-tree callback */
+ H5O_layout_chunk_t layout; /* Layout information for B-tree callback */
hbool_t shared_init = FALSE; /* Whether B-tree shared info is initialized */
+ unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1454,15 +1516,21 @@ H5D_btree_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent
HDmemset(&storage, 0, sizeof(storage));
storage.idx_type = H5D_CHUNK_IDX_BTREE;
+ /* Reset "fake" layout info */
+ HDmemset(&layout, 0, sizeof(layout));
+ layout.ndims = ndims;
+ for(u = 0; u < ndims; u++)
+ layout.dim[u] = dim[u];
+
/* Allocate the shared structure */
- if(H5D__btree_shared_create(f, &storage, ndims) < 0)
+ if(H5D__btree_shared_create(f, &storage, &layout) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info")
shared_init = TRUE;
/* Set up user data for callback */
- udata.common.layout = NULL;
+ udata.common.layout = &layout;
udata.common.storage = &storage;
- udata.common.offset = NULL;
+ udata.common.scaled = NULL;
udata.ndims = ndims;
/* Dump the records for the B-tree */
@@ -1472,10 +1540,10 @@ done:
if(shared_init) {
/* Free the raw B-tree node buffer */
if(NULL == storage.u.btree.shared)
- HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "ref-counted page nil")
+ HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "ref-counted shared info nil")
else
if(H5UC_DEC(storage.u.btree.shared) < 0)
- HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page")
+ HDONE_ERROR(H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted shared info")
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index a284cee..5d8ea4a 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -59,6 +59,7 @@
#include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */
#include "H5MMprivate.h" /* Memory management */
+#include "H5MFprivate.h" /* File memory management */
#include "H5VMprivate.h" /* Vector and array functions */
@@ -159,6 +160,7 @@ typedef struct H5D_chunk_it_ud4_t {
FILE *stream; /* Output stream */
hbool_t header_displayed; /* Node's header is displayed? */
unsigned ndims; /* Number of dimensions for chunk/dataset */
+ uint32_t *chunk_dim; /* Chunk dimensions */
} H5D_chunk_it_ud4_t;
/* Callback info for nonexistent readvv operation */
@@ -211,9 +213,9 @@ H5D__nonexistent_readvv(const H5D_io_info_t *io_info,
/* Helper routines */
static herr_t H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims,
const hsize_t *curr_dims);
-static void *H5D__chunk_alloc(size_t size, const H5O_pline_t *pline);
-static void *H5D__chunk_xfree(void *chk, const H5O_pline_t *pline);
-static void *H5D__chunk_realloc(void *chk, size_t size,
+static void *H5D__chunk_mem_alloc(size_t size, const H5O_pline_t *pline);
+static void *H5D__chunk_mem_xfree(void *chk, const H5O_pline_t *pline);
+static void *H5D__chunk_mem_realloc(void *chk, size_t size,
const H5O_pline_t *pline);
static herr_t H5D__chunk_cinfo_cache_reset(H5D_chunk_cached_t *last);
static herr_t H5D__chunk_cinfo_cache_update(H5D_chunk_cached_t *last,
@@ -230,6 +232,7 @@ static herr_t H5D__chunk_file_cb(void *elem, hid_t type_id, unsigned ndims,
const hsize_t *coords, void *fm);
static herr_t H5D__chunk_mem_cb(void *elem, hid_t type_id, unsigned ndims,
const hsize_t *coords, void *fm);
+static unsigned H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled);
static herr_t H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id,
const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t reset);
static herr_t H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id,
@@ -237,6 +240,8 @@ static herr_t H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id,
static herr_t H5D__chunk_cache_prune(const H5D_t *dset, hid_t dxpl_id,
const H5D_dxpl_cache_t *dxpl_cache, size_t size);
static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata);
+static herr_t H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info,
+ const H5F_block_t *old_chunk, H5F_block_t *new_chunk, hbool_t *need_insert);
#ifdef H5_HAVE_PARALLEL
static herr_t H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id,
H5D_chunk_coll_info_t *chunk_info, size_t chunk_size, const void *fill_buf);
@@ -314,83 +319,94 @@ H5FL_BLK_DEFINE_STATIC(chunk);
*-------------------------------------------------------------------------
*/
herr_t
-H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, hsize_t *offset,
- uint32_t data_size, const void *buf)
+H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters,
+ hsize_t *offset, uint32_t data_size, const void *buf)
{
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
H5D_chunk_ud_t udata; /* User data for querying chunk info */
- hsize_t chunk_idx;
- H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
- H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
- const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
- int space_ndims; /* Dataset's space rank */
- hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Dataset's dataspace dimensions */
+ H5F_block_t old_chunk; /* Offset/length of old chunk */
+ H5D_chk_idx_info_t idx_info; /* Chunked index info */
+ hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */
+ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC_TAG(dxpl_id, dset->oloc.addr, FAIL)
/* Allocate dataspace and initialize it if it hasn't been. */
- if(!(*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage))
+ if(!(*layout->ops->is_space_alloc)(&layout->storage))
/* Allocate storage */
if(H5D__alloc_storage(dset, dxpl_id, H5D_ALLOC_WRITE, FALSE, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage")
- /* Retrieve the dataset dimensions */
- if((space_ndims = H5S_get_simple_extent_dims(dset->shared->space, space_dim, NULL)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple dataspace info")
-
/* Calculate the index of this chunk */
- if(H5VM_chunk_index((unsigned)space_ndims, offset,
- layout->u.chunk.dim, layout->u.chunk.down_chunks, &chunk_idx) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
+ H5VM_chunk_scaled(dset->shared->ndims, offset, layout->u.chunk.dim, scaled);
+ scaled[dset->shared->ndims] = 0;
/* Find out the file address of the chunk (if any) */
- if(H5D__chunk_lookup(dset, dxpl_id, offset, chunk_idx, &udata) < 0)
+ if(H5D__chunk_lookup(dset, dxpl_id, scaled, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
- udata.filter_mask = filters;
+ /* Sanity check */
+ HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
+ (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+
+ /* Set the file block information for the old chunk */
+ /* (Which is only defined when overwriting an existing chunk) */
+ old_chunk.offset = udata.chunk_block.offset;
+ old_chunk.length = udata.chunk_block.length;
- /* Check if the chunk needs to be 'inserted' (could exist already and
- * the 'insert' operation could resize it)
+ /* Check if the chunk needs to be inserted (it also could exist already
+ * and the chunk allocate operation could resize it)
*/
- {
- H5D_chk_idx_info_t idx_info; /* Chunked index info */
- /* Compose chunked index info struct */
- idx_info.f = dset->oloc.file;
- idx_info.dxpl_id = dxpl_id;
- idx_info.pline = &(dset->shared->dcpl_cache.pline);
- idx_info.layout = &(dset->shared->layout.u.chunk);
- idx_info.storage = &(dset->shared->layout.storage.u.chunk);
-
- /* Set up the size of chunk for user data */
- udata.nbytes = data_size;
+ /* Compose chunked index info struct */
+ idx_info.f = dset->oloc.file;
+ idx_info.dxpl_id = dxpl_id;
+ idx_info.pline = &(dset->shared->dcpl_cache.pline);
+ idx_info.layout = &(dset->shared->layout.u.chunk);
+ idx_info.storage = &(dset->shared->layout.storage.u.chunk);
- /* Create the chunk it if it doesn't exist, or reallocate the chunk
- * if its size changed.
- */
- if((dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk")
+ /* Set up the size of chunk for user data */
+ udata.chunk_block.length = data_size;
- /* Make sure the address of the chunk is returned. */
- if(!H5F_addr_defined(udata.addr))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined")
- } /* end if */
+ /* Create the chunk it if it doesn't exist, or reallocate the chunk
+ * if its size changed.
+ */
+ if(H5D__chunk_file_alloc(&idx_info, &old_chunk, &udata.chunk_block, &need_insert) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk")
- /* Fill the DXPL cache values for later use */
- if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+ /* Make sure the address of the chunk is returned. */
+ if(!H5F_addr_defined(udata.chunk_block.offset))
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk address isn't defined")
/* Evict the (old) entry from the cache if present, but do not flush
* it to disk */
- if(UINT_MAX != udata.idx_hint)
+ if(UINT_MAX != udata.idx_hint) {
+ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
+ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
+ const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
+
+ /* Fill the DXPL cache values for later use */
+ if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+
if(H5D__chunk_cache_evict(dset, dxpl_id, dxpl_cache, rdcc->slot[udata.idx_hint], FALSE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk")
+ } /* end if */
/* Write the data to the file */
- if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, data_size, dxpl_id, buf) < 0)
+ if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, data_size, dxpl_id, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
+ /* Insert the chunk record into the index */
+ if(need_insert && layout->storage.u.chunk.ops->insert) {
+ /* Set the chunk's filter mask to the new settings */
+ udata.filter_mask = filters;
+
+ if((layout->storage.u.chunk.ops->insert)(&idx_info, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index")
+ } /* end if */
+
done:
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5D__chunk_direct_write() */
@@ -454,23 +470,15 @@ done:
herr_t
H5D__chunk_set_info(const H5D_t *dset)
{
- hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /* Curr. size of dataset dimensions */
- int sndims; /* Rank of dataspace */
- unsigned ndims; /* Rank of dataspace */
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
/* Sanity checks */
HDassert(dset);
- /* Get the dim info for dataset */
- if((sndims = H5S_get_simple_extent_dims(dset->shared->space, curr_dims, NULL)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace dimensions")
- H5_ASSIGN_OVERFLOW(ndims, sndims, int, unsigned);
-
/* Set the base layout information */
- if(H5D__chunk_set_info_real(&dset->shared->layout.u.chunk, ndims, curr_dims) < 0)
+ if(H5D__chunk_set_info_real(&dset->shared->layout.u.chunk, dset->shared->ndims, dset->shared->curr_dims) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set layout's chunk info")
/* Call the index's "resize" callback */
@@ -498,10 +506,7 @@ static herr_t
H5D__chunk_construct(H5F_t UNUSED *f, H5D_t *dset)
{
const H5T_t *type = dset->shared->type; /* Convenience pointer to dataset's datatype */
- hsize_t max_dims[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */
- hsize_t dims[H5O_LAYOUT_NDIMS]; /* Dimension size of data in elements */
uint64_t chunk_size; /* Size of chunk in bytes */
- int ndims; /* Rank of dataspace */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -516,9 +521,7 @@ H5D__chunk_construct(H5F_t UNUSED *f, H5D_t *dset)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "no chunk information set?")
/* Set up layout information */
- if((ndims = H5S_GET_EXTENT_NDIMS(dset->shared->space)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get rank")
- if(dset->shared->layout.u.chunk.ndims != (unsigned)ndims)
+ if(dset->shared->layout.u.chunk.ndims != dset->shared->ndims)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "dimensionality of chunks doesn't match the dataspace")
/* Increment # of chunk dimensions, to account for datatype size as last element */
@@ -532,10 +535,6 @@ H5D__chunk_construct(H5F_t UNUSED *f, H5D_t *dset)
/* Set the last dimension of the chunk size to the size of the datatype */
dset->shared->layout.u.chunk.dim[dset->shared->layout.u.chunk.ndims - 1] = (uint32_t)H5T_GET_SIZE(type);
- /* Get local copy of dataset dimensions (for sanity checking) */
- if(H5S_get_simple_extent_dims(dset->shared->space, dims, max_dims) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to query maximum dimensions")
-
/* Sanity check dimensions */
for(u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++) {
/* Don't allow zero-sized chunk dimensions */
@@ -547,7 +546,7 @@ H5D__chunk_construct(H5F_t UNUSED *f, H5D_t *dset)
* the maximum dimension size. If any dimension size is zero, there
* will be no such restriction.
*/
- if(dims[u] && max_dims[u] != H5S_UNLIMITED && max_dims[u] < dset->shared->layout.u.chunk.dim[u])
+ if(dset->shared->curr_dims[u] && dset->shared->max_dims[u] != H5S_UNLIMITED && dset->shared->max_dims[u] < dset->shared->layout.u.chunk.dim[u])
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be <= maximum dimension size for fixed-sized dimensions")
} /* end for */
@@ -562,7 +561,7 @@ H5D__chunk_construct(H5F_t UNUSED *f, H5D_t *dset)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be < 4GB")
/* Retain computed chunk size */
- H5_ASSIGN_OVERFLOW(dset->shared->layout.u.chunk.size, chunk_size, uint64_t, uint32_t);
+ H5_CHECKED_ASSIGN(dset->shared->layout.u.chunk.size, uint32_t, chunk_size, uint64_t);
/* Reset address and pointer of the array struct for the chunked storage index */
if(H5D_chunk_idx_reset(&dset->shared->layout.storage.u.chunk, TRUE) < 0)
@@ -631,6 +630,22 @@ H5D__chunk_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, hid_t dapl_id)
H5D__chunk_cinfo_cache_reset(&(rdcc->last));
} /* end else */
+ /* Compute scaled dimension info, if dataset dims > 1 */
+ if(dset->shared->ndims > 1) {
+ unsigned u; /* Local index value */
+
+ for(u = 0; u < dset->shared->ndims; u++) {
+ /* Initial scaled dimension sizes */
+ rdcc->scaled_dims[u] = dset->shared->curr_dims[u] / dset->shared->layout.u.chunk.dim[u];
+
+ /* Inital 'power2up' values for scaled dimensions */
+ rdcc->scaled_power2up[u] = H5VM_power2up(rdcc->scaled_dims[u]);
+
+ /* Number of bits required to encode scaled dimension size */
+ rdcc->scaled_encode_bits[u] = H5VM_log2_gen(rdcc->scaled_power2up[u]);
+ } /* end for */
+ } /* end if */
+
/* Compose chunked index info struct */
idx_info.f = f;
idx_info.dxpl_id = dxpl_id;
@@ -722,12 +737,10 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
if((sm_ndims = H5S_GET_EXTENT_NDIMS(mem_space)) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimension number")
/* Set the number of dimensions for the memory dataspace */
- H5_ASSIGN_OVERFLOW(fm->m_ndims, sm_ndims, int, unsigned);
+ H5_CHECKED_ASSIGN(fm->m_ndims, unsigned, sm_ndims, int);
- /* Get dim number and dimensionality for each dataspace */
+ /* Get rank for file dataspace */
fm->f_ndims = f_ndims = dataset->shared->layout.u.chunk.ndims - 1;
- if(H5S_get_simple_extent_dims(file_space, fm->f_dims, NULL) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality")
/* Normalize hyperslab selections by adjusting them by the offset */
/* (It might be worthwhile to normalize both the file and memory dataspaces
@@ -974,7 +987,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5D__chunk_alloc
+ * Function: H5D__chunk_mem_alloc
*
* Purpose: Allocate space for a chunk in memory. This routine allocates
* memory space for non-filtered chunks from a block free list
@@ -988,7 +1001,7 @@ done:
*-------------------------------------------------------------------------
*/
static void *
-H5D__chunk_alloc(size_t size, const H5O_pline_t *pline)
+H5D__chunk_mem_alloc(size_t size, const H5O_pline_t *pline)
{
void *ret_value = NULL; /* Return value */
@@ -1003,11 +1016,11 @@ H5D__chunk_alloc(size_t size, const H5O_pline_t *pline)
ret_value = H5FL_BLK_MALLOC(chunk, size);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5D__chunk_alloc() */
+} /* H5D__chunk_mem_alloc() */
/*-------------------------------------------------------------------------
- * Function: H5D__chunk_xfree
+ * Function: H5D__chunk_mem_xfree
*
* Purpose: Free space for a chunk in memory. This routine allocates
* memory space for non-filtered chunks from a block free list
@@ -1021,7 +1034,7 @@ H5D__chunk_alloc(size_t size, const H5O_pline_t *pline)
*-------------------------------------------------------------------------
*/
static void *
-H5D__chunk_xfree(void *chk, const H5O_pline_t *pline)
+H5D__chunk_mem_xfree(void *chk, const H5O_pline_t *pline)
{
FUNC_ENTER_STATIC_NOERR
@@ -1035,11 +1048,11 @@ H5D__chunk_xfree(void *chk, const H5O_pline_t *pline)
} /* end if */
FUNC_LEAVE_NOAPI(NULL)
-} /* H5D__chunk_xfree() */
+} /* H5D__chunk_mem_xfree() */
/*-------------------------------------------------------------------------
- * Function: H5D__chunk_realloc
+ * Function: H5D__chunk_mem_realloc
*
* Purpose: Reallocate space for a chunk in memory. This routine allocates
* memory space for non-filtered chunks from a block free list
@@ -1053,7 +1066,7 @@ H5D__chunk_xfree(void *chk, const H5O_pline_t *pline)
*-------------------------------------------------------------------------
*/
static void *
-H5D__chunk_realloc(void *chk, size_t size, const H5O_pline_t *pline)
+H5D__chunk_mem_realloc(void *chk, size_t size, const H5O_pline_t *pline)
{
void *ret_value = NULL; /* Return value */
@@ -1068,7 +1081,7 @@ H5D__chunk_realloc(void *chk, size_t size, const H5O_pline_t *pline)
ret_value = H5FL_BLK_REALLOC(chunk, chk, size);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5D__chunk_realloc() */
+} /* H5D__chunk_mem_realloc() */
/*--------------------------------------------------------------------------
@@ -1134,6 +1147,7 @@ H5D__create_chunk_map_single(H5D_chunk_map_t *fm, const H5D_io_info_t
*io_info)
{
H5D_chunk_info_t *chunk_info; /* Chunk information to insert into skip list */
+ hsize_t coords[H5O_LAYOUT_NDIMS]; /* Coordinates of chunk */
hsize_t sel_start[H5O_LAYOUT_NDIMS]; /* Offset of low bound of file selection */
hsize_t sel_end[H5O_LAYOUT_NDIMS]; /* Offset of high bound of file selection */
unsigned u; /* Local index variable */
@@ -1155,20 +1169,20 @@ H5D__create_chunk_map_single(H5D_chunk_map_t *fm, const H5D_io_info_t
/* Set chunk location & hyperslab size */
for(u = 0; u < fm->f_ndims; u++) {
HDassert(sel_start[u] == sel_end[u]);
- chunk_info->coords[u] = (sel_start[u] / fm->layout->u.chunk.dim[u]) * fm->layout->u.chunk.dim[u];
+ chunk_info->scaled[u] = sel_start[u] / fm->layout->u.chunk.dim[u];
+ coords[u] = chunk_info->scaled[u] * fm->layout->u.chunk.dim[u];
} /* end for */
- chunk_info->coords[fm->f_ndims] = 0;
+ chunk_info->scaled[fm->f_ndims] = 0;
/* Calculate the index of this chunk */
- if(H5VM_chunk_index(fm->f_ndims, chunk_info->coords, fm->layout->u.chunk.dim, fm->layout->u.chunk.down_chunks, &chunk_info->index) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
+ chunk_info->index = H5VM_array_offset_pre(fm->f_ndims, fm->layout->u.chunk.down_chunks, chunk_info->scaled);
/* Copy selection for file's dataspace into chunk dataspace */
if(H5S_select_copy(fm->single_space, fm->file_space, FALSE) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy file selection")
/* Move selection back to have correct offset in chunk */
- if(H5S_SELECT_ADJUST_U(fm->single_space, chunk_info->coords) < 0)
+ if(H5S_SELECT_ADJUST_U(fm->single_space, coords) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't adjust chunk selection")
#ifdef H5_HAVE_PARALLEL
@@ -1219,8 +1233,10 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
hsize_t sel_points; /* Number of elements in file selection */
hsize_t start_coords[H5O_LAYOUT_NDIMS]; /* Starting coordinates of selection */
hsize_t coords[H5O_LAYOUT_NDIMS]; /* Current coordinates of chunk */
- hsize_t end[H5O_LAYOUT_NDIMS]; /* Current coordinates of chunk */
+ hsize_t end[H5O_LAYOUT_NDIMS]; /* Final coordinates of chunk */
hsize_t chunk_index; /* Index of chunk */
+ hsize_t start_scaled[H5S_MAX_RANK]; /* Starting scaled coordinates of selection */
+ hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */
int curr_dim; /* Current dimension to increment */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -1239,14 +1255,13 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
/* Set initial chunk location & hyperslab size */
for(u = 0; u < fm->f_ndims; u++) {
- start_coords[u] = (sel_start[u] / fm->layout->u.chunk.dim[u]) * fm->layout->u.chunk.dim[u];
- coords[u] = start_coords[u];
+ scaled[u] = start_scaled[u] = sel_start[u] / fm->layout->u.chunk.dim[u];
+ coords[u] = start_coords[u] = scaled[u] * fm->layout->u.chunk.dim[u];
end[u] = (coords[u] + fm->chunk_dim[u]) - 1;
} /* end for */
/* Calculate the index of this chunk */
- if(H5VM_chunk_index(fm->f_ndims, coords, fm->layout->u.chunk.dim, fm->layout->u.chunk.down_chunks, &chunk_index) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
+ chunk_index = H5VM_array_offset_pre(fm->f_ndims, fm->layout->u.chunk.down_chunks, scaled);
/* Iterate through each chunk in the dataset */
while(sel_points) {
@@ -1312,9 +1327,9 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
new_chunk_info->mspace=NULL;
new_chunk_info->mspace_shared = FALSE;
- /* Copy the chunk's coordinates */
- HDmemcpy(new_chunk_info->coords, coords, sizeof(hsize_t) * fm->f_ndims);
- new_chunk_info->coords[fm->f_ndims] = 0;
+ /* Copy the chunk's scaled coordinates */
+ HDmemcpy(new_chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims);
+ new_chunk_info->scaled[fm->f_ndims] = 0;
/* Insert the new chunk into the skip list */
if(H5SL_insert(fm->sel_chunks, new_chunk_info, &new_chunk_info->index) < 0) {
@@ -1325,7 +1340,7 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
/* Get number of elements selected in chunk */
if((schunk_points = H5S_GET_SELECT_NPOINTS(tmp_fchunk)) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection # of elements")
- H5_ASSIGN_OVERFLOW(new_chunk_info->chunk_points, schunk_points, hssize_t, uint32_t);
+ H5_CHECKED_ASSIGN(new_chunk_info->chunk_points, uint32_t, schunk_points, hssize_t);
/* Decrement # of points left in file selection */
sel_points -= (hsize_t)schunk_points;
@@ -1345,11 +1360,13 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
H5_CHECK_OVERFLOW(fm->chunk_dim[curr_dim],hsize_t,hssize_t);
coords[curr_dim]+=fm->chunk_dim[curr_dim];
end[curr_dim]+=fm->chunk_dim[curr_dim];
+ scaled[curr_dim]++;
/* Bring chunk location back into bounds, if necessary */
if(coords[curr_dim] > sel_end[curr_dim]) {
do {
/* Reset current dimension's location to 0 */
+ scaled[curr_dim] = start_scaled[curr_dim];
coords[curr_dim] = start_coords[curr_dim]; /*lint !e771 The start_coords will always be initialized */
end[curr_dim] = (coords[curr_dim] + fm->chunk_dim[curr_dim]) - 1;
@@ -1357,13 +1374,13 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
curr_dim--;
/* Increment chunk location in current dimension */
+ scaled[curr_dim]++;
coords[curr_dim] += fm->chunk_dim[curr_dim];
end[curr_dim] = (coords[curr_dim] + fm->chunk_dim[curr_dim]) - 1;
} while(coords[curr_dim] > sel_end[curr_dim]);
/* Re-calculate the index of this chunk */
- if(H5VM_chunk_index(fm->f_ndims, coords, fm->layout->u.chunk.dim, fm->layout->u.chunk.down_chunks, &chunk_index) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
+ chunk_index = H5VM_array_offset_pre(fm->f_ndims, fm->layout->u.chunk.down_chunks, scaled);
} /* end if */
} /* end while */
@@ -1464,10 +1481,16 @@ H5D__create_chunk_mem_map_hyper(const H5D_chunk_map_t *fm)
if(H5S_select_copy(chunk_info->mspace,chunk_info->fspace,FALSE) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy selection")
- /* Compensate for the chunk offset */
- for(u=0; u<fm->f_ndims; u++) {
- H5_CHECK_OVERFLOW(chunk_info->coords[u],hsize_t,hssize_t);
- chunk_adjust[u]=adjust[u]-(hssize_t)chunk_info->coords[u]; /*lint !e771 The adjust array will always be initialized */
+ /* Compute the adjustment for this chunk */
+ for(u = 0; u < fm->f_ndims; u++) {
+ hsize_t coords[H5O_LAYOUT_NDIMS]; /* Current coordinates of chunk */
+
+ /* Compute the chunk coordinates from the scaled coordinates */
+ coords[u] = chunk_info->scaled[u] * fm->layout->u.chunk.dim[u];
+
+ /* Compensate for the chunk offset */
+ H5_CHECK_OVERFLOW(coords[u], hsize_t, hssize_t);
+ chunk_adjust[u] = adjust[u] - (hssize_t)coords[u]; /*lint !e771 The adjust array will always be initialized */
} /* end for */
/* Adjust the selection */
@@ -1505,14 +1528,14 @@ H5D__chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, cons
H5D_chunk_info_t *chunk_info; /* Chunk information for current chunk */
hsize_t coords_in_chunk[H5O_LAYOUT_NDIMS]; /* Coordinates of element in chunk */
hsize_t chunk_index; /* Chunk index */
+ hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
/* Calculate the index of this chunk */
- if(H5VM_chunk_index(ndims, coords, fm->layout->u.chunk.dim, fm->layout->u.chunk.down_chunks, &chunk_index) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
+ chunk_index = H5VM_chunk_index_scaled(ndims, coords, fm->layout->u.chunk.dim, fm->layout->u.chunk.down_chunks, scaled);
/* Find correct chunk in file & memory skip list */
if(chunk_index==fm->last_index) {
@@ -1562,12 +1585,9 @@ H5D__chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, cons
/* Set the number of selected elements in chunk to zero */
chunk_info->chunk_points = 0;
- /* Compute the chunk's coordinates */
- for(u = 0; u < fm->f_ndims; u++) {
- H5_CHECK_OVERFLOW(fm->layout->u.chunk.dim[u], hsize_t, hssize_t);
- chunk_info->coords[u] = (coords[u] / (hssize_t)fm->layout->u.chunk.dim[u]) * (hssize_t)fm->layout->u.chunk.dim[u];
- } /* end for */
- chunk_info->coords[fm->f_ndims] = 0;
+ /* Set the chunk's scaled coordinates */
+ HDmemcpy(chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims);
+ chunk_info->scaled[fm->f_ndims] = 0;
/* Insert the new chunk into the skip list */
if(H5SL_insert(fm->sel_chunks,chunk_info,&chunk_info->index) < 0) {
@@ -1589,7 +1609,7 @@ H5D__chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, cons
/* Get the offset of the element within the chunk */
for(u = 0; u < fm->f_ndims; u++)
- coords_in_chunk[u] = coords[u] - chunk_info->coords[u];
+ coords_in_chunk[u] = coords[u] - (scaled[u] * fm->layout->u.chunk.dim[u]);
/* Add point to file selection for chunk */
if(H5S_select_elements(chunk_info->fspace, H5S_SELECT_APPEND, (size_t)1, coords_in_chunk) < 0)
@@ -1629,8 +1649,7 @@ H5D__chunk_mem_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const
FUNC_ENTER_STATIC
/* Calculate the index of this chunk */
- if(H5VM_chunk_index(ndims, coords, fm->layout->u.chunk.dim, fm->layout->u.chunk.down_chunks, &chunk_index) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
+ chunk_index = H5VM_chunk_index(ndims, coords, fm->layout->u.chunk.dim, fm->layout->u.chunk.down_chunks);
/* Find correct chunk in file & memory skip list */
if(chunk_index == fm->last_index) {
@@ -1803,7 +1822,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
ctg_io_info.layout_ops = *H5D_LOPS_CONTIG;
/* Initialize temporary contiguous storage info */
- H5_ASSIGN_OVERFLOW(ctg_store.contig.dset_size, io_info->dset->shared->layout.u.chunk.size, uint32_t, hsize_t);
+ H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size, uint32_t);
/* Set up compact I/O info object */
HDmemcpy(&cpt_io_info, io_info, sizeof(cpt_io_info));
@@ -1835,33 +1854,39 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
while(chunk_node) {
H5D_chunk_info_t *chunk_info; /* Chunk information */
- H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */
- void *chunk; /* Pointer to locked chunk buffer */
- H5D_chunk_ud_t udata; /* B-tree pass-through */
- htri_t cacheable; /* Whether the chunk is cacheable */
+ H5D_chunk_ud_t udata; /* Chunk index pass-through */
/* Get the actual chunk information from the skip list node */
chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
/* Get the info for the chunk in the file */
- if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->coords, chunk_info->index, &udata) < 0)
+ if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->scaled, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ /* Sanity check */
+ HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
+ (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+
/* Check for non-existant chunk & skip it if appropriate */
- if(H5F_addr_defined(udata.addr) || UINT_MAX != udata.idx_hint
+ if(H5F_addr_defined(udata.chunk_block.offset) || UINT_MAX != udata.idx_hint
|| !skip_missing_chunks) {
- /* Load the chunk into cache and lock it. */
- if((cacheable = H5D__chunk_cacheable(io_info, udata.addr, FALSE)) < 0)
+ H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */
+ void *chunk = NULL; /* Pointer to locked chunk buffer */
+ htri_t cacheable; /* Whether the chunk is cacheable */
+
+ /* Determine if we should use the chunk cache */
+ if((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, FALSE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
if(cacheable) {
- /* Pass in chunk's coordinates in a union. */
- io_info->store->chunk.offset = chunk_info->coords;
- io_info->store->chunk.index = chunk_info->index;
+ /* Load the chunk into cache and lock it. */
/* Compute # of bytes accessed in chunk */
H5_CHECK_OVERFLOW(type_info->src_type_size, /*From:*/ size_t, /*To:*/ uint32_t);
src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size;
+ /* Set chunk's [scaled] coordinates */
+ io_info->store->chunk.scaled = chunk_info->scaled;
+
/* Lock the chunk into the cache */
if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
@@ -1872,20 +1897,14 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
/* Point I/O info at contiguous I/O info for this chunk */
chk_io_info = &cpt_io_info;
} /* end if */
- else if(H5F_addr_defined(udata.addr)) {
+ else if(H5F_addr_defined(udata.chunk_block.offset)) {
/* Set up the storage address information for this chunk */
- ctg_store.contig.dset_addr = udata.addr;
-
- /* No chunk cached */
- chunk = NULL;
+ ctg_store.contig.dset_addr = udata.chunk_block.offset;
/* Point I/O info at temporary I/O info for this chunk */
chk_io_info = &ctg_io_info;
} /* end else if */
else {
- /* No chunk cached */
- chunk = NULL;
-
/* Point I/O info at "nonexistent" I/O info for this chunk */
chk_io_info = &nonexistent_io_info;
} /* end else */
@@ -1949,7 +1968,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
ctg_io_info.layout_ops = *H5D_LOPS_CONTIG;
/* Initialize temporary contiguous storage info */
- H5_ASSIGN_OVERFLOW(ctg_store.contig.dset_size, io_info->dset->shared->layout.u.chunk.size, uint32_t, hsize_t);
+ H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size, uint32_t);
/* Set up compact I/O info object */
HDmemcpy(&cpt_io_info, io_info, sizeof(cpt_io_info));
@@ -1963,27 +1982,32 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
while(chunk_node) {
H5D_chunk_info_t *chunk_info; /* Chunk information */
+ H5D_chk_idx_info_t idx_info; /* Chunked index info */
H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */
void *chunk; /* Pointer to locked chunk buffer */
H5D_chunk_ud_t udata; /* Index pass-through */
htri_t cacheable; /* Whether the chunk is cacheable */
+ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
/* Get the actual chunk information from the skip list node */
chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
- /* Load the chunk into cache. But if the whole chunk is written,
- * simply allocate space instead of load the chunk. */
- if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->coords, chunk_info->index, &udata) < 0)
+ /* Look up the chunk */
+ if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->scaled, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
- if((cacheable = H5D__chunk_cacheable(io_info, udata.addr, TRUE)) < 0)
+
+ /* Sanity check */
+ HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
+ (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+
+ /* Determine if we should use the chunk cache */
+ if((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, TRUE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
if(cacheable) {
+ /* Load the chunk into cache. But if the whole chunk is written,
+ * simply allocate space instead of load the chunk. */
hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */
- /* Pass in chunk's coordinates in a union. */
- io_info->store->chunk.offset = chunk_info->coords;
- io_info->store->chunk.index = chunk_info->index;
-
/* Compute # of bytes accessed in chunk */
H5_CHECK_OVERFLOW(type_info->dst_type_size, /*From:*/ size_t, /*To:*/ uint32_t);
dst_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->dst_type_size;
@@ -1993,6 +2017,9 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
(chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size)
entire_chunk = FALSE;
+ /* Set chunk's [scaled] coordinates */
+ io_info->store->chunk.scaled = chunk_info->scaled;
+
/* Lock the chunk into the cache */
if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
@@ -2005,9 +2032,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
} /* end if */
else {
/* If the chunk hasn't been allocated on disk, do so now. */
- if(!H5F_addr_defined(udata.addr)) {
- H5D_chk_idx_info_t idx_info; /* Chunked index info */
-
+ if(!H5F_addr_defined(udata.chunk_block.offset)) {
/* Compose chunked index info struct */
idx_info.f = io_info->dset->oloc.file;
idx_info.dxpl_id = io_info->dxpl_id;
@@ -2016,14 +2041,14 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
idx_info.storage = &(io_info->dset->shared->layout.storage.u.chunk);
/* Set up the size of chunk for user data */
- udata.nbytes = io_info->dset->shared->layout.u.chunk.size;
+ udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size;
- /* Create the chunk */
- if((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk")
+ /* Allocate the chunk */
+ if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
/* Make sure the address of the chunk is returned. */
- if(!H5F_addr_defined(udata.addr))
+ if(!H5F_addr_defined(udata.chunk_block.offset))
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined")
/* Cache the new chunk information */
@@ -2031,7 +2056,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
} /* end if */
/* Set up the storage address information for this chunk */
- ctg_store.contig.dset_addr = udata.addr;
+ ctg_store.contig.dset_addr = udata.chunk_block.offset;
/* No chunk cached */
chunk = NULL;
@@ -2045,9 +2070,16 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
(hsize_t)chunk_info->chunk_points, chunk_info->fspace, chunk_info->mspace) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed")
- /* Release the cache lock on the chunk. */
- if(chunk && H5D__chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
+ /* Release the cache lock on the chunk, or insert chunk into index. */
+ if(chunk) {
+ if(H5D__chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
+ } /* end if */
+ else {
+ if(need_insert && io_info->dset->shared->layout.storage.u.chunk.ops->insert)
+ if((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index")
+ } /* end else */
/* Advance to next chunk in list */
chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node);
@@ -2239,13 +2271,12 @@ H5D__chunk_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_chunk_ud_t *ud
HDassert(last);
HDassert(udata);
HDassert(udata->common.layout);
- HDassert(udata->common.storage);
- HDassert(udata->common.offset);
+ HDassert(udata->common.scaled);
/* Stored the information to cache */
- HDmemcpy(last->offset, udata->common.offset, sizeof(hsize_t) * udata->common.layout->ndims);
- last->addr = udata->addr;
- last->nbytes = udata->nbytes;
+ HDmemcpy(last->scaled, udata->common.scaled, sizeof(hsize_t) * udata->common.layout->ndims);
+ last->addr = udata->chunk_block.offset;
+ H5_CHECKED_ASSIGN(last->nbytes, uint32_t, udata->chunk_block.length, hsize_t);
last->filter_mask = udata->filter_mask;
/* Indicate that the cached info is valid */
@@ -2278,21 +2309,20 @@ H5D__chunk_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_chunk_ud_t *uda
HDassert(last);
HDassert(udata);
HDassert(udata->common.layout);
- HDassert(udata->common.storage);
- HDassert(udata->common.offset);
+ HDassert(udata->common.scaled);
/* Check if the cached information is what is desired */
if(last->valid) {
unsigned u; /* Local index variable */
- /* Check that the offset is the same */
+ /* Check that the scaled offset is the same */
for(u = 0; u < udata->common.layout->ndims; u++)
- if(last->offset[u] != udata->common.offset[u])
+ if(last->scaled[u] != udata->common.scaled[u])
HGOTO_DONE(FALSE)
/* Retrieve the information from the cache */
- udata->addr = last->addr;
- udata->nbytes = last->nbytes;
+ udata->chunk_block.offset = last->addr;
+ udata->chunk_block.length = last->nbytes;
udata->filter_mask = last->filter_mask;
/* Indicate that the data was found */
@@ -2357,6 +2387,53 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_hash_val
+ *
+ * Purpose: To calculate an index based on the dataset's scaled coordinates and
+ * sizes of the faster dimensions.
+ *
+ * Return: Hash value index
+ *
+ * Programmer: Vailin Choi; Nov 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled)
+{
+ hsize_t val; /* Intermediate value */
+ unsigned ndims = shared->ndims; /* Rank of dataset */
+ unsigned ret; /* Value to return */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity check */
+ HDassert(shared);
+ HDassert(scaled);
+
+ /* If the fastest changing dimension doesn't have enough entropy, use
+ * other dimensions too
+ */
+ if(ndims > 1 && shared->cache.chunk.scaled_dims[ndims - 1] <= shared->cache.chunk.nslots) {
+ unsigned u; /* Local index variable */
+
+ val = scaled[0];
+ for(u = 1; u < ndims; u++) {
+ val <<= shared->cache.chunk.scaled_encode_bits[u];
+ val ^= scaled[u];
+ } /* end for */
+ } /* end if */
+ else
+ val = scaled[ndims - 1];
+
+ /* Modulo value against the number of array slots */
+ ret = (unsigned)(val % shared->cache.chunk.nslots);
+
+ FUNC_LEAVE_NOAPI(ret)
+} /* H5D__chunk_hash_val() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D__chunk_lookup
*
* Purpose: Loops up a chunk in cache and on disk, and retrieves
@@ -2370,8 +2447,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *chunk_offset,
- hsize_t chunk_idx, H5D_chunk_ud_t *udata)
+H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled,
+ H5D_chunk_ud_t *udata)
{
H5D_rdcc_ent_t *ent = NULL; /* Cache entry */
hbool_t found = FALSE; /* In cache? */
@@ -2382,36 +2459,37 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *chunk_offset,
HDassert(dset);
HDassert(dset->shared->layout.u.chunk.ndims > 0);
- HDassert(chunk_offset);
+ HDassert(scaled);
HDassert(udata);
/* Initialize the query information about the chunk we are looking for */
udata->common.layout = &(dset->shared->layout.u.chunk);
udata->common.storage = &(dset->shared->layout.storage.u.chunk);
- udata->common.offset = chunk_offset;
- udata->common.rdcc = &(dset->shared->cache.chunk);
+ udata->common.scaled = scaled;
/* Reset information about the chunk we are looking for */
- udata->addr = HADDR_UNDEF;
- udata->nbytes = 0;
+ udata->chunk_block.offset = HADDR_UNDEF;
+ udata->chunk_block.length = 0;
udata->filter_mask = 0;
/* Check for chunk in cache */
if(dset->shared->cache.chunk.nslots > 0) {
- udata->idx_hint = H5D_CHUNK_HASH(dset->shared, chunk_idx);
+ udata->idx_hint = H5D__chunk_hash_val(dset->shared, scaled);
ent = dset->shared->cache.chunk.slot[udata->idx_hint];
if(ent)
- for(u = 0, found = TRUE; u < dset->shared->layout.u.chunk.ndims - 1; u++)
- if(chunk_offset[u] != ent->offset[u]) {
+ for(u = 0, found = TRUE; u < dset->shared->ndims; u++)
+ if(scaled[u] != ent->scaled[u]) {
found = FALSE;
break;
} /* end if */
} /* end if */
/* Find chunk addr */
- if(found)
- udata->addr = ent->chunk_addr;
+ if(found) {
+ udata->chunk_block.offset = ent->chunk_block.offset;
+ udata->chunk_block.length = ent->chunk_block.length;;
+ } /* end if */
else {
/* Invalidate idx_hint, to signal that the chunk is not in cache */
udata->idx_hint = UINT_MAX;
@@ -2473,22 +2551,23 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
HDassert(!ent->locked);
buf = ent->chunk;
- if(ent->dirty && !ent->deleted) {
+ if(ent->dirty) {
+ H5D_chk_idx_info_t idx_info; /* Chunked index info */
H5D_chunk_ud_t udata; /* pass through B-tree */
- hbool_t must_insert = FALSE; /* Whether the chunk must go through the "insert" method */
+ hbool_t must_alloc = FALSE; /* Whether the chunk must be allocated */
+ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
/* Set up user data for index callbacks */
udata.common.layout = &dset->shared->layout.u.chunk;
udata.common.storage = &dset->shared->layout.storage.u.chunk;
- udata.common.offset = ent->offset;
- udata.common.rdcc = &(dset->shared->cache.chunk);
+ udata.common.scaled = ent->scaled;
+ udata.chunk_block.offset = ent->chunk_block.offset;
+ udata.chunk_block.length = dset->shared->layout.u.chunk.size;
udata.filter_mask = 0;
- udata.nbytes = dset->shared->layout.u.chunk.size;
- udata.addr = ent->chunk_addr;
/* Should the chunk be filtered before writing it to disk? */
if(dset->shared->dcpl_cache.pline.nused) {
- size_t alloc = udata.nbytes; /* Bytes allocated for BUF */
+ size_t alloc = udata.chunk_block.length; /* Bytes allocated for BUF */
size_t nbytes; /* Chunk size (in bytes) */
if(!reset) {
@@ -2497,10 +2576,9 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
* the pipeline because we'll want to save the original buffer
* for later.
*/
- H5_ASSIGN_OVERFLOW(alloc, udata.nbytes, uint32_t, size_t);
if(NULL == (buf = H5MM_malloc(alloc)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for pipeline")
- HDmemcpy(buf, ent->chunk, udata.nbytes);
+ HDmemcpy(buf, ent->chunk, alloc);
} /* end if */
else {
/*
@@ -2513,7 +2591,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
point_of_no_return = TRUE;
ent->chunk = NULL;
} /* end else */
- H5_ASSIGN_OVERFLOW(nbytes, udata.nbytes, uint32_t, size_t);
+ H5_CHECKED_ASSIGN(nbytes, size_t, udata.chunk_block.length, hsize_t);
if(H5Z_pipeline(&(dset->shared->dcpl_cache.pline), 0, &(udata.filter_mask), dxpl_cache->err_detect,
dxpl_cache->filter_cb, &nbytes, &alloc, &buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed")
@@ -2522,21 +2600,19 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
if(nbytes > ((size_t)0xffffffff))
HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk too large for 32-bit length")
#endif /* H5_SIZEOF_SIZE_T > 4 */
- H5_ASSIGN_OVERFLOW(udata.nbytes, nbytes, size_t, uint32_t);
+ H5_CHECKED_ASSIGN(udata.chunk_block.length, hsize_t, nbytes, size_t);
- /* Indicate that the chunk must go through 'insert' method */
- must_insert = TRUE;
+ /* Indicate that the chunk must be allocated */
+ must_alloc = TRUE;
} /* end if */
- else if(!H5F_addr_defined(udata.addr))
- /* Indicate that the chunk must go through 'insert' method */
- must_insert = TRUE;
+ else if(!H5F_addr_defined(udata.chunk_block.offset))
+ /* Indicate that the chunk must be allocated */
+ must_alloc = TRUE;
- /* Check if the chunk needs to be 'inserted' (could exist already and
- * the 'insert' operation could resize it)
+ /* Check if the chunk needs to be allocated (it also could exist already
+ * and the chunk alloc operation could resize it)
*/
- if(must_insert) {
- H5D_chk_idx_info_t idx_info; /* Chunked index info */
-
+ if(must_alloc) {
/* Compose chunked index info struct */
idx_info.f = dset->oloc.file;
idx_info.dxpl_id = dxpl_id;
@@ -2547,18 +2623,25 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
/* Create the chunk it if it doesn't exist, or reallocate the chunk
* if its size changed.
*/
- if((dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk")
+ if(H5D__chunk_file_alloc(&idx_info, &(ent->chunk_block), &udata.chunk_block, &need_insert) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
- /* Update the chunk entry's address, in case it was allocated or relocated */
- ent->chunk_addr = udata.addr;
+ /* Update the chunk entry's info, in case it was allocated or relocated */
+ ent->chunk_block.offset = udata.chunk_block.offset;
+ ent->chunk_block.length = udata.chunk_block.length;
} /* end if */
/* Write the data to the file */
- HDassert(H5F_addr_defined(udata.addr));
- if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.nbytes, dxpl_id, buf) < 0)
+ HDassert(H5F_addr_defined(udata.chunk_block.offset));
+ H5_CHECK_OVERFLOW(udata.chunk_block.length, hsize_t, size_t);
+ if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, (size_t)udata.chunk_block.length, dxpl_id, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
+ /* Insert the chunk record into the index */
+ if(need_insert && dset->shared->layout.storage.u.chunk.ops->insert)
+ if((dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index")
+
/* Cache the chunk's info, in case it's accessed again shortly */
H5D__chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, &udata);
@@ -2575,7 +2658,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
if(buf == ent->chunk)
buf = NULL;
if(ent->chunk != NULL)
- ent->chunk = (uint8_t *)H5D__chunk_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
} /* end if */
done:
@@ -2591,7 +2674,7 @@ done:
*/
if(ret_value < 0 && point_of_no_return)
if(ent->chunk)
- ent->chunk = (uint8_t *)H5D__chunk_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5D__chunk_flush_entry() */
@@ -2633,7 +2716,7 @@ H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
else {
/* Don't flush, just free chunk */
if(ent->chunk != NULL)
- ent->chunk = (uint8_t *)H5D__chunk_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
} /* end else */
/* Unlink from list */
@@ -2647,8 +2730,12 @@ H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
rdcc->tail = ent->prev;
ent->prev = ent->next = NULL;
+ /* Only clear hash table slot if chunk was not marked as deleted already */
+ if(!ent->deleted)
+ rdcc->slot[ent->idx] = NULL;
+
/* Remove from cache */
- rdcc->slot[ent->idx] = NULL;
+ HDassert(rdcc->slot[ent->idx] != ent);
ent->idx = UINT_MAX;
rdcc->nbytes_used -= dset->shared->layout.u.chunk.size;
--rdcc->nused;
@@ -2800,14 +2887,13 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
hbool_t relax)
{
const H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
- const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info - always equal to the pline passed to H5D__chunk_alloc */
+ const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info - always equal to the pline passed to H5D__chunk_mem_alloc */
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/
- H5D_rdcc_ent_t *ent = NULL; /*cache entry */
- haddr_t chunk_addr = HADDR_UNDEF; /* Address of chunk on disk */
+ H5D_rdcc_ent_t *ent; /*cache entry */
size_t chunk_size; /*size of a chunk */
void *chunk = NULL; /*the file chunk */
void *ret_value; /*return value */
@@ -2823,7 +2909,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
/* Get the chunk's size */
HDassert(layout->u.chunk.size > 0);
- H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t);
+ H5_CHECKED_ASSIGN(chunk_size, size_t, layout->u.chunk.size, uint32_t);
/* Check if the chunk is in the cache */
if(UINT_MAX != udata->idx_hint) {
@@ -2840,7 +2926,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
/* Make sure this is the right chunk */
for(u = 0; u < layout->u.chunk.ndims; u++)
- HDassert(io_info->store->chunk.offset[u] == ent->offset[u]);
+ HDassert(io_info->store->chunk.scaled[u] == ent->scaled[u]);
}
#endif /* NDEBUG */
@@ -2848,110 +2934,12 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
* Already in the cache. Count a hit.
*/
rdcc->stats.nhits++;
- } /* end if */
- else if(relax) {
- /*
- * Not in the cache, but we're about to overwrite the whole thing
- * anyway, so just allocate a buffer for it but don't initialize that
- * buffer with the file contents. Count this as a hit instead of a
- * miss because we saved ourselves lots of work.
- */
- rdcc->stats.nhits++;
-
- /* Still save the chunk address so the cache stays consistent */
- chunk_addr = udata->addr;
- if(NULL == (chunk = H5D__chunk_alloc(chunk_size, pline)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
-
- /* In the case that some dataset functions look through this data,
- * clear it to all 0s. */
- HDmemset(chunk, 0, chunk_size);
- } /* end if */
- else {
/*
- * Not in the cache. Count this as a miss if it's in the file
- * or an init if it isn't.
- */
-
- /* Save the chunk address */
- chunk_addr = udata->addr;
-
- /* Check if the chunk exists on disk */
- if(H5F_addr_defined(chunk_addr)) {
- size_t chunk_alloc = 0; /*allocated chunk size */
-
- /* Chunk size on disk isn't [likely] the same size as the final chunk
- * size in memory, so allocate memory big enough. */
- H5_ASSIGN_OVERFLOW(chunk_alloc, udata->nbytes, uint32_t, size_t);
- if(NULL == (chunk = H5D__chunk_alloc(chunk_alloc, pline)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
- if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, chunk_alloc, io_info->dxpl_id, chunk) < 0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
-
- if(pline->nused) {
- if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), io_info->dxpl_cache->err_detect,
- io_info->dxpl_cache->filter_cb, &chunk_alloc, &chunk_alloc, &chunk) < 0)
- HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, NULL, "data pipeline read failed")
- H5_ASSIGN_OVERFLOW(udata->nbytes, chunk_alloc, size_t, uint32_t);
- } /* end if */
-
- /* Increment # of cache misses */
- rdcc->stats.nmisses++;
- } /* end if */
- else {
- H5D_fill_value_t fill_status;
-
- /* Sanity check */
- HDassert(fill->alloc_time != H5D_ALLOC_TIME_EARLY);
-
- /* Chunk size on disk isn't [likely] the same size as the final chunk
- * size in memory, so allocate memory big enough. */
- if(NULL == (chunk = H5D__chunk_alloc(chunk_size, pline)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
-
- if(H5P_is_fill_value_defined(fill, &fill_status) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't tell if fill value defined")
-
- if(fill->fill_time == H5D_FILL_TIME_ALLOC ||
- (fill->fill_time == H5D_FILL_TIME_IFSET &&
- (fill_status == H5D_FILL_VALUE_USER_DEFINED ||
- fill_status == H5D_FILL_VALUE_DEFAULT))) {
- /*
- * The chunk doesn't exist in the file. Replicate the fill
- * value throughout the chunk, if the fill value is defined.
- */
-
- /* Initialize the fill value buffer */
- /* (use the compact dataset storage buffer as the fill value buffer) */
- if(H5D__fill_init(&fb_info, chunk, NULL, NULL, NULL, NULL,
- &dset->shared->dcpl_cache.fill, dset->shared->type,
- dset->shared->type_id, (size_t)0, chunk_size, io_info->dxpl_id) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize fill buffer info")
- fb_info_init = TRUE;
-
- /* Check for VL datatype & non-default fill value */
- if(fb_info.has_vlen_fill_type)
- /* Fill the buffer with VL datatype fill values */
- if(H5D__fill_refill_vl(&fb_info, fb_info.elmts_per_buf, io_info->dxpl_id) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, NULL, "can't refill fill value buffer")
- } /* end if */
- else
- HDmemset(chunk, 0, chunk_size);
-
- /* Increment # of creations */
- rdcc->stats.ninits++;
- } /* end else */
- } /* end else */
- HDassert(chunk_size > 0);
-
- if(ent) {
- /*
- * The chunk is not at the beginning of the cache; move it backward
+ * If the chunk is not at the beginning of the cache; move it backward
* by one slot. This is how we implement the LRU preemption
* algorithm.
*/
- HDassert(ent);
if(ent->next) {
if(ent->next->next)
ent->next->next->prev = ent;
@@ -2967,67 +2955,166 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
ent->prev->next = ent;
} /* end if */
} /* end if */
- else if(rdcc->nslots > 0 && chunk_size <= rdcc->nbytes_max) {
- /* Calculate the index */
- udata->idx_hint = H5D_CHUNK_HASH(dset->shared, io_info->store->chunk.index);
+ else {
+ haddr_t chunk_addr; /* Address of chunk on disk */
+ hsize_t chunk_alloc; /* Length of chunk on disk */
+
+ /* Save the chunk info so the cache stays consistent */
+ chunk_addr = udata->chunk_block.offset;
+ chunk_alloc = udata->chunk_block.length;
+
+ if(relax) {
+ /*
+ * Not in the cache, but we're about to overwrite the whole thing
+ * anyway, so just allocate a buffer for it but don't initialize that
+ * buffer with the file contents. Count this as a hit instead of a
+ * miss because we saved ourselves lots of work.
+ */
+ rdcc->stats.nhits++;
- /* Add the chunk to the cache only if the slot is not already locked */
- ent = rdcc->slot[udata->idx_hint];
- if(!ent || !ent->locked) {
- /* Preempt enough things from the cache to make room */
- if(ent) {
- if(H5D__chunk_cache_evict(io_info->dset, io_info->dxpl_id, io_info->dxpl_cache, ent, TRUE) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk from cache")
+ if(NULL == (chunk = H5D__chunk_mem_alloc(chunk_size, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+
+ /* In the case that some dataset functions look through this data,
+ * clear it to all 0s. */
+ HDmemset(chunk, 0, chunk_size);
+ } /* end if */
+ else {
+ /*
+ * Not in the cache. Count this as a miss if it's in the file
+ * or an init if it isn't.
+ */
+
+ /* Check if the chunk exists on disk */
+ if(H5F_addr_defined(chunk_addr)) {
+ size_t my_chunk_alloc = chunk_alloc; /* Allocated buffer size */
+ size_t buf_alloc = chunk_alloc; /* [Re-]allocated buffer size */
+
+ /* Chunk size on disk isn't [likely] the same size as the final chunk
+ * size in memory, so allocate memory big enough. */
+ if(NULL == (chunk = H5D__chunk_mem_alloc(my_chunk_alloc, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, my_chunk_alloc, io_info->dxpl_id, chunk) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
+
+ if(pline->nused)
+ if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), io_info->dxpl_cache->err_detect,
+ io_info->dxpl_cache->filter_cb, &my_chunk_alloc, &buf_alloc, &chunk) < 0)
+ HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, NULL, "data pipeline read failed")
+
+ /* Increment # of cache misses */
+ rdcc->stats.nmisses++;
} /* end if */
- if(H5D__chunk_cache_prune(io_info->dset, io_info->dxpl_id, io_info->dxpl_cache, chunk_size) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk(s) from cache")
-
- /* Create a new entry */
- if(NULL == (ent = H5FL_CALLOC(H5D_rdcc_ent_t)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate raw data chunk entry")
-
- /* Initialize the new entry */
- ent->chunk_addr = chunk_addr;
- HDmemcpy(ent->offset, io_info->store->chunk.offset, sizeof(hsize_t) * layout->u.chunk.ndims);
- H5_ASSIGN_OVERFLOW(ent->rd_count, chunk_size, size_t, uint32_t);
- H5_ASSIGN_OVERFLOW(ent->wr_count, chunk_size, size_t, uint32_t);
- ent->chunk = (uint8_t *)chunk;
-
- /* Add it to the cache */
- HDassert(NULL == rdcc->slot[udata->idx_hint]);
- rdcc->slot[udata->idx_hint] = ent;
- ent->idx = udata->idx_hint;
- rdcc->nbytes_used += chunk_size;
- rdcc->nused++;
-
- /* Add it to the linked list */
- if(rdcc->tail) {
- rdcc->tail->next = ent;
- ent->prev = rdcc->tail;
- rdcc->tail = ent;
+ else {
+ H5D_fill_value_t fill_status;
+
+ /* Sanity check */
+ HDassert(fill->alloc_time != H5D_ALLOC_TIME_EARLY);
+
+ /* Chunk size on disk isn't [likely] the same size as the final chunk
+ * size in memory, so allocate memory big enough. */
+ if(NULL == (chunk = H5D__chunk_mem_alloc(chunk_size, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+
+ if(H5P_is_fill_value_defined(fill, &fill_status) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't tell if fill value defined")
+
+ if(fill->fill_time == H5D_FILL_TIME_ALLOC ||
+ (fill->fill_time == H5D_FILL_TIME_IFSET &&
+ (fill_status == H5D_FILL_VALUE_USER_DEFINED ||
+ fill_status == H5D_FILL_VALUE_DEFAULT))) {
+ /*
+ * The chunk doesn't exist in the file. Replicate the fill
+ * value throughout the chunk, if the fill value is defined.
+ */
+
+ /* Initialize the fill value buffer */
+ /* (use the compact dataset storage buffer as the fill value buffer) */
+ if(H5D__fill_init(&fb_info, chunk, NULL, NULL, NULL, NULL,
+ &dset->shared->dcpl_cache.fill, dset->shared->type,
+ dset->shared->type_id, (size_t)0, chunk_size, io_info->dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize fill buffer info")
+ fb_info_init = TRUE;
+
+ /* Check for VL datatype & non-default fill value */
+ if(fb_info.has_vlen_fill_type)
+ /* Fill the buffer with VL datatype fill values */
+ if(H5D__fill_refill_vl(&fb_info, fb_info.elmts_per_buf, io_info->dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, NULL, "can't refill fill value buffer")
+ } /* end if */
+ else
+ HDmemset(chunk, 0, chunk_size);
+
+ /* Increment # of creations */
+ rdcc->stats.ninits++;
+ } /* end else */
+ } /* end else */
+
+ /* See if the chunk can be cached */
+ if(rdcc->nslots > 0 && chunk_size <= rdcc->nbytes_max) {
+ /* Calculate the index */
+ udata->idx_hint = H5D__chunk_hash_val(io_info->dset->shared, udata->common.scaled);
+
+ /* Add the chunk to the cache only if the slot is not already locked */
+ ent = rdcc->slot[udata->idx_hint];
+ if(!ent || !ent->locked) {
+ /* Preempt enough things from the cache to make room */
+ if(ent) {
+ if(H5D__chunk_cache_evict(io_info->dset, io_info->dxpl_id, io_info->dxpl_cache, ent, TRUE) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk from cache")
+ } /* end if */
+ if(H5D__chunk_cache_prune(io_info->dset, io_info->dxpl_id, io_info->dxpl_cache, chunk_size) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk(s) from cache")
+
+ /* Create a new entry */
+ if(NULL == (ent = H5FL_CALLOC(H5D_rdcc_ent_t)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate raw data chunk entry")
+
+ /* Initialize the new entry */
+ ent->chunk_block.offset = chunk_addr;
+ ent->chunk_block.length = chunk_alloc;
+ HDmemcpy(ent->scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims);
+ H5_CHECKED_ASSIGN(ent->rd_count, uint32_t, chunk_size, size_t);
+ H5_CHECKED_ASSIGN(ent->wr_count, uint32_t, chunk_size, size_t);
+ ent->chunk = (uint8_t *)chunk;
+
+ /* Add it to the cache */
+ HDassert(NULL == rdcc->slot[udata->idx_hint]);
+ rdcc->slot[udata->idx_hint] = ent;
+ ent->idx = udata->idx_hint;
+ rdcc->nbytes_used += chunk_size;
+ rdcc->nused++;
+
+ /* Add it to the linked list */
+ if(rdcc->tail) {
+ rdcc->tail->next = ent;
+ ent->prev = rdcc->tail;
+ rdcc->tail = ent;
+ } /* end if */
+ else
+ rdcc->head = rdcc->tail = ent;
} /* end if */
else
- rdcc->head = rdcc->tail = ent;
- } /* end if */
- else
- /* We did not add the chunk to cache */
+ /* We did not add the chunk to cache */
+ ent = NULL;
+ } /* end else */
+ else /* No cache set up, or chunk is too large: chunk is uncacheable */
ent = NULL;
} /* end else */
- if(!ent)
- /*
- * The chunk cannot be placed in cache so we don't cache it. This is the
- * reason all those arguments have to be repeated for the unlock
- * function.
- */
- udata->idx_hint = UINT_MAX;
-
/* Lock the chunk into the cache */
if(ent) {
HDassert(!ent->locked);
ent->locked = TRUE;
chunk = ent->chunk;
} /* end if */
+ else
+ /*
+ * The chunk cannot be placed in cache so we don't cache it. This is the
+ * reason all those arguments have to be repeated for the unlock
+ * function.
+ */
+ udata->idx_hint = UINT_MAX;
/* Set return value */
ret_value = chunk;
@@ -3040,7 +3127,7 @@ done:
/* Release the chunk allocated, on error */
if(!ret_value)
if(chunk)
- chunk = H5D__chunk_xfree(chunk, pline);
+ chunk = H5D__chunk_mem_xfree(chunk, pline);
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__chunk_lock() */
@@ -3093,9 +3180,10 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
HDmemset(&fake_ent, 0, sizeof(fake_ent));
fake_ent.dirty = TRUE;
- HDmemcpy(fake_ent.offset, io_info->store->chunk.offset, layout->u.chunk.ndims * sizeof(fake_ent.offset[0]));
+ HDmemcpy(fake_ent.scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims);
HDassert(layout->u.chunk.size > 0);
- fake_ent.chunk_addr = udata->addr;
+ fake_ent.chunk_block.offset = udata->chunk_block.offset;
+ fake_ent.chunk_block.length = udata->chunk_block.length;
fake_ent.chunk = (uint8_t *)chunk;
if(H5D__chunk_flush_entry(io_info->dset, io_info->dxpl_id, io_info->dxpl_cache, &fake_ent, TRUE) < 0)
@@ -3103,7 +3191,7 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
} /* end if */
else {
if(chunk)
- chunk = H5D__chunk_xfree(chunk, &(io_info->dset->shared->dcpl_cache.pline));
+ chunk = H5D__chunk_mem_xfree(chunk, &(io_info->dset->shared->dcpl_cache.pline));
} /* end else */
} /* end if */
else {
@@ -3240,9 +3328,9 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
{
H5D_chk_idx_info_t idx_info; /* Chunked index info */
const H5D_chunk_ops_t *ops = dset->shared->layout.storage.u.chunk.ops; /* Chunk operations */
- hsize_t min_unalloc[H5O_LAYOUT_NDIMS]; /* First chunk in each dimension that is unallocated */
- hsize_t max_unalloc[H5O_LAYOUT_NDIMS]; /* Last chunk in each dimension that is unallocated */
- hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
+ hsize_t min_unalloc[H5O_LAYOUT_NDIMS]; /* First chunk in each dimension that is unallocated (in scaled coordinates) */
+ hsize_t max_unalloc[H5O_LAYOUT_NDIMS]; /* Last chunk in each dimension that is unallocated (in scaled coordinates) */
+ hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Offset of current chunk (in scaled coordinates) */
size_t orig_chunk_size; /* Original size of chunk in bytes */
size_t chunk_size; /* Actual size of chunk in bytes, possibly filtered */
unsigned filter_mask = 0; /* Filter mask for chunks that have them */
@@ -3259,8 +3347,8 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
H5D_chunk_coll_info_t chunk_info; /* chunk address information for doing I/O */
#endif /* H5_HAVE_PARALLEL */
hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
- int space_ndims; /* Dataset's space rank */
- hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Dataset's dataspace dimensions */
+ unsigned space_ndims; /* Dataset's space rank */
+ const hsize_t *space_dim; /* Dataset's dataspace dimensions */
const uint32_t *chunk_dim = layout->u.chunk.dim; /* Convenience pointer to chunk dimensions */
unsigned op_dim; /* Current operating dimension */
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
@@ -3275,12 +3363,11 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
HDassert(TRUE == H5P_isa_class(dxpl_id, H5P_DATASET_XFER));
/* Retrieve the dataset dimensions */
- if((space_ndims = H5S_get_simple_extent_dims(dset->shared->space, space_dim, NULL)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple dataspace info")
- space_dim[space_ndims] = layout->u.chunk.dim[space_ndims];
+ space_dim = dset->shared->curr_dims;
+ space_ndims = dset->shared->ndims;
- /* The last dimension in chunk_offset is always 0 */
- chunk_offset[space_ndims] = (hsize_t)0;
+ /* The last dimension in scaled chunk coordinates is always 0 */
+ scaled[space_ndims] = (hsize_t)0;
/* Check if any space dimensions are 0, if so we do not have to do anything
*/
@@ -3308,7 +3395,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
/* Get original chunk size */
- H5_ASSIGN_OVERFLOW(orig_chunk_size, layout->u.chunk.size, uint32_t, size_t);
+ H5_CHECKED_ASSIGN(orig_chunk_size, size_t, layout->u.chunk.size, uint32_t);
/* Check the dataset's fill-value status */
if(H5P_is_fill_value_defined(fill, &fill_status) < 0)
@@ -3331,8 +3418,8 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
/* Initialize the fill value buffer */
/* (delay allocating fill buffer for VL datatypes until refilling) */
/* (casting away const OK - QAK) */
- if(H5D__fill_init(&fb_info, NULL, (H5MM_allocate_t)H5D__chunk_alloc,
- (void *)pline, (H5MM_free_t)H5D__chunk_xfree, (void *)pline,
+ if(H5D__fill_init(&fb_info, NULL, (H5MM_allocate_t)H5D__chunk_mem_alloc,
+ (void *)pline, (H5MM_free_t)H5D__chunk_mem_xfree, (void *)pline,
&dset->shared->dcpl_cache.fill, dset->shared->type,
dset->shared->type_id, (size_t)0, orig_chunk_size, dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info")
@@ -3365,12 +3452,10 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
/* Calculate the minimum and maximum chunk offsets in each dimension. Note
* that we assume here that all elements of space_dim are > 0. This is
- * checked at the top of this function */
- for(op_dim=0; op_dim<space_ndims; op_dim++) {
- min_unalloc[op_dim] = ((old_dim[op_dim] + chunk_dim[op_dim] - 1)
- / chunk_dim[op_dim]) * chunk_dim[op_dim];
- max_unalloc[op_dim] = ((space_dim[op_dim] - 1) / chunk_dim[op_dim])
- * chunk_dim[op_dim];
+ * checked at the top of this function. */
+ for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) {
+ min_unalloc[op_dim] = (old_dim[op_dim] + chunk_dim[op_dim] - 1) / chunk_dim[op_dim];
+ max_unalloc[op_dim] = (space_dim[op_dim] - 1) / chunk_dim[op_dim];
} /* end for */
/* Loop over all chunks */
@@ -3390,8 +3475,11 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
* Every time the algorithm finishes allocating chunks allocated beyond a
* certain dimension, max_unalloc is updated in order to avoid allocating
* those chunks again.
+ *
+ * Note that min_unalloc & max_unalloc are in scaled coordinates.
+ *
*/
- for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) {
+ for(op_dim = 0; op_dim < space_ndims; op_dim++) {
H5D_chunk_ud_t udata; /* User data for querying chunk info */
int i; /* Local index variable */
@@ -3400,31 +3488,26 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
continue;
else {
/* Reset the chunk offset indices */
- HDmemset(chunk_offset, 0, ((unsigned)space_ndims * sizeof(chunk_offset[0])));
- chunk_offset[op_dim] = min_unalloc[op_dim];
+ HDmemset(scaled, 0, (space_ndims * sizeof(scaled[0])));
+ scaled[op_dim] = min_unalloc[op_dim];
carry = FALSE;
} /* end else */
while(!carry) {
+ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
+
/* Reset size of chunk in bytes, in case filtered size changes */
chunk_size = orig_chunk_size;
#ifndef NDEBUG
/* None of the chunks should be allocated */
{
- hsize_t chunk_idx;
-
- /* Calculate the index of this chunk */
- if(H5VM_chunk_index((unsigned)space_ndims, chunk_offset,
- layout->u.chunk.dim, layout->u.chunk.down_chunks,
- &chunk_idx) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
-
- if(H5D__chunk_lookup(dset, dxpl_id, chunk_offset, chunk_idx, &udata) < 0)
+ /* Look up this chunk */
+ if(H5D__chunk_lookup(dset, dxpl_id, scaled, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
- HDassert(!H5F_addr_defined(udata.addr));
+ HDassert(!H5F_addr_defined(udata.chunk_block.offset));
} /* end block */
/* Make sure the chunk is really in the dataset and outside the
@@ -3433,9 +3516,9 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
unsigned u; /* Local index variable */
hbool_t outside_orig = FALSE;
- for(u = 0; u < (unsigned)space_ndims; u++) {
- HDassert(chunk_offset[u] < space_dim[u]);
- if(chunk_offset[u] >= old_dim[u])
+ for(u = 0; u < space_ndims; u++) {
+ HDassert((scaled[u] * chunk_dim[u]) < space_dim[u]);
+ if((scaled[u] * chunk_dim[u]) >= old_dim[u])
outside_orig = TRUE;
} /* end for */
HDassert(outside_orig);
@@ -3454,7 +3537,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
* possible (though ill-advised) for the filter to shrink the
* buffer. */
if(fb_info.fill_buf_size < orig_chunk_size) {
- if(NULL == (fb_info.fill_buf = H5D__chunk_realloc(fb_info.fill_buf, orig_chunk_size, pline)))
+ if(NULL == (fb_info.fill_buf = H5D__chunk_mem_realloc(fb_info.fill_buf, orig_chunk_size, pline)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory reallocation failed for raw data chunk")
fb_info.fill_buf_size = orig_chunk_size;
} /* end if */
@@ -3485,22 +3568,21 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
/* Initialize the chunk information */
udata.common.layout = &layout->u.chunk;
udata.common.storage = &layout->storage.u.chunk;
- udata.common.offset = chunk_offset;
- udata.common.rdcc = NULL;
- udata.addr = HADDR_UNDEF;
- H5_ASSIGN_OVERFLOW(udata.nbytes, chunk_size, size_t, uint32_t);
+ udata.common.scaled = scaled;
+ udata.chunk_block.offset = HADDR_UNDEF;
+ H5_CHECKED_ASSIGN(udata.chunk_block.length, uint32_t, chunk_size, size_t);
udata.filter_mask = filter_mask;
/* Allocate the chunk (with all processes) */
- if((ops->insert)(&idx_info, &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert record into chunk index")
- HDassert(H5F_addr_defined(udata.addr));
+ if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
+ HDassert(H5F_addr_defined(udata.chunk_block.offset));
/* Check if fill values should be written to chunks */
if(should_fill) {
/* Sanity check */
HDassert(fb_info_init);
- HDassert(udata.nbytes == chunk_size);
+ HDassert(udata.chunk_block.length == chunk_size);
#ifdef H5_HAVE_PARALLEL
/* Check if this file is accessed with an MPI-capable file driver */
@@ -3513,7 +3595,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "memory allocation failed for chunk addresses")
/* Store the chunk's address for later */
- chunk_info.addr[chunk_info.num_io] = udata.addr;
+ chunk_info.addr[chunk_info.num_io] = udata.chunk_block.offset;
chunk_info.num_io++;
/* Indicate that blocks will be written */
@@ -3521,22 +3603,30 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
} /* end if */
else {
#endif /* H5_HAVE_PARALLEL */
- if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, dxpl_id, fb_info.fill_buf) < 0)
+ if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, chunk_size, dxpl_id, fb_info.fill_buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
#ifdef H5_HAVE_PARALLEL
} /* end else */
#endif /* H5_HAVE_PARALLEL */
} /* end if */
- /* Increment indices */
+ /* Insert the chunk record into the index */
+ /* (Note that this isn't safe, from a SWMR perspective, unlike
+ * serial operation. -QAK
+ */
+ if(need_insert && ops->insert)
+ if((ops->insert)(&idx_info, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index")
+
+ /* Increment indices and adjust the edge chunk state */
carry = TRUE;
for(i = ((int)space_ndims - 1); i >= 0; --i) {
- chunk_offset[i] += chunk_dim[i];
- if(chunk_offset[i] > max_unalloc[i]) {
+ scaled[i]++;
+ if(scaled[i] > max_unalloc[i]) {
if((unsigned)i == op_dim)
- chunk_offset[i] = min_unalloc[i];
+ scaled[i] = min_unalloc[i];
else
- chunk_offset[i] = 0;
+ scaled[i] = 0;
} /* end if */
else {
carry = FALSE;
@@ -3551,7 +3641,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
if(min_unalloc[op_dim] == 0)
break;
else
- max_unalloc[op_dim] = min_unalloc[op_dim] - chunk_dim[op_dim];
+ max_unalloc[op_dim] = min_unalloc[op_dim] - 1;
} /* end for(op_dim=0...) */
#ifdef H5_HAVE_PARALLEL
@@ -3638,9 +3728,9 @@ H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id,
leftover_blocks = chunk_info->num_io % mpi_size;
/* Cast values to types needed by MPI */
- H5_ASSIGN_OVERFLOW(blocks, num_blocks, size_t, int);
- H5_ASSIGN_OVERFLOW(leftover, leftover_blocks, size_t, int);
- H5_ASSIGN_OVERFLOW(block_len, chunk_size, size_t, int);
+ H5_CHECKED_ASSIGN(blocks, int, num_blocks, size_t);
+ H5_CHECKED_ASSIGN(leftover, int, leftover_blocks, size_t);
+ H5_CHECKED_ASSIGN(block_len, int, chunk_size, size_t);
/* Allocate buffers */
/* (MSC - should not need block_lens if MPI_type_create_hindexed_block is working) */
@@ -3753,7 +3843,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
const H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset's layout */
unsigned rank = udata->common.layout->ndims - 1; /* Dataset rank */
- const hsize_t *chunk_offset = io_info->store->chunk.offset; /* Chunk offset */
+ const hsize_t *scaled = udata->common.scaled; /* Scaled chunk offset */
H5S_sel_iter_t chunk_iter; /* Memory selection iteration info */
hssize_t sel_nelmts; /* Number of elements in selection */
hsize_t count[H5O_LAYOUT_NDIMS]; /* Element count of hyperslab */
@@ -3769,14 +3859,14 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
/* Get the chunk's size */
HDassert(layout->u.chunk.size > 0);
- H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t);
+ H5_CHECKED_ASSIGN(chunk_size, size_t, layout->u.chunk.size, uint32_t);
/* Get the info for the chunk in the file */
- if(H5D__chunk_lookup(dset, io_info->dxpl_id, chunk_offset, io_info->store->chunk.index, &chk_udata) < 0)
+ if(H5D__chunk_lookup(dset, io_info->dxpl_id, scaled, &chk_udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
/* If this chunk does not exist in cache or on disk, no need to do anything */
- if(!H5F_addr_defined(chk_udata.addr) && UINT_MAX == chk_udata.idx_hint)
+ if(!H5F_addr_defined(chk_udata.chunk_block.offset) && UINT_MAX == chk_udata.idx_hint)
HGOTO_DONE(SUCCEED)
/* Initialize the fill value buffer, if necessary */
@@ -3792,7 +3882,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
/* Compute the # of elements to leave with existing value, in each dimension */
for(u = 0; u < rank; u++) {
- count[u] = MIN(layout->u.chunk.dim[u], (udata->space_dim[u] - chunk_offset[u]));
+ count[u] = MIN(layout->u.chunk.dim[u], (udata->space_dim[u] - (scaled[u] * layout->u.chunk.dim[u])));
HDassert(count[u] > 0);
} /* end for */
@@ -3955,13 +4045,10 @@ done:
herr_t
H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
{
- hsize_t min_mod_chunk_off[H5O_LAYOUT_NDIMS]; /* Offset of first chunk to modify in each dimension */
- hsize_t max_mod_chunk_off[H5O_LAYOUT_NDIMS]; /* Offset of last chunk to modify in each dimension */
- hssize_t max_fill_chunk_off[H5O_LAYOUT_NDIMS]; /* Offset of last chunk that might be filled in each dimension */
+ hsize_t min_mod_chunk_off[H5O_LAYOUT_NDIMS]; /* Scaled offset of first chunk to modify in each dimension */
+ hsize_t max_mod_chunk_off[H5O_LAYOUT_NDIMS]; /* Scaled offset of last chunk to modify in each dimension */
+ hssize_t max_fill_chunk_off[H5O_LAYOUT_NDIMS]; /* Scaled offset of last chunk that might be filled in each dimension */
hbool_t fill_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension needs to be filled */
- hbool_t dims_outside_fill[H5O_LAYOUT_NDIMS]; /* Dimensions in chunk offset outside fill dimensions */
- int ndims_outside_fill = 0; /* Number of dimensions in chunk offset outside fill dimensions */
- hbool_t has_fill = FALSE; /* Whether there are chunks that must be filled */
H5D_chk_idx_info_t idx_info; /* Chunked index info */
H5D_io_info_t chk_io_info; /* Chunked I/O info object */
H5D_storage_t chk_store; /* Chunk storage information */
@@ -3969,21 +4056,18 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset's layout */
const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
- H5D_rdcc_ent_t *ent = NULL; /* Cache entry */
- int space_ndims; /* Dataset's space rank */
- hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Current dataspace dimensions */
+ unsigned space_ndims; /* Dataset's space rank */
+ const hsize_t *space_dim; /* Current dataspace dimensions */
unsigned op_dim; /* Current operating dimension */
hbool_t shrunk_dim[H5O_LAYOUT_NDIMS]; /* Dimensions which have shrunk */
H5D_chunk_it_ud1_t udata; /* Chunk index iterator user data */
hbool_t udata_init = FALSE; /* Whether the chunk index iterator user data has been initialized */
H5D_chunk_common_ud_t idx_udata; /* User data for index removal routine */
- H5D_chunk_ud_t chk_udata; /* User data for getting chunk info */
H5S_t *chunk_space = NULL; /* Dataspace for a chunk */
hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Chunk dimensions */
- hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
+ hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Scaled offset of current chunk */
hsize_t hyper_start[H5O_LAYOUT_NDIMS]; /* Starting location of hyperslab */
uint32_t elmts_per_chunk; /* Elements in chunk */
- hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -3999,13 +4083,11 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
/* Go get the rank & dimensions (including the element size) */
- if((space_ndims = H5S_get_simple_extent_dims(dset->shared->space, space_dim,
- NULL)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
- space_dim[space_ndims] = layout->u.chunk.dim[space_ndims];
+ space_dim = dset->shared->curr_dims;
+ space_ndims = dset->shared->ndims;
- /* The last dimension in chunk_offset is always 0 */
- chunk_offset[space_ndims] = (hsize_t)0;
+ /* The last dimension in scaled is always 0 */
+ scaled[space_ndims] = (hsize_t)0;
/* Check if any old dimensions are 0, if so we do not have to do anything */
for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++)
@@ -4021,14 +4103,14 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
/* (also copy the chunk dimensions into 'hsize_t' array for creating dataspace) */
/* (also compute the dimensions which have been shrunk) */
elmts_per_chunk = 1;
- for(u = 0; u < (unsigned)space_ndims; u++) {
+ for(u = 0; u < space_ndims; u++) {
elmts_per_chunk *= layout->u.chunk.dim[u];
chunk_dim[u] = layout->u.chunk.dim[u];
- shrunk_dim[u] = space_dim[u] < old_dim[u];
+ shrunk_dim[u] = (space_dim[u] < old_dim[u]);
} /* end for */
/* Create a dataspace for a chunk & set the extent */
- if(NULL == (chunk_space = H5S_create_simple((unsigned)space_ndims, chunk_dim, NULL)))
+ if(NULL == (chunk_space = H5S_create_simple(space_ndims, chunk_dim, NULL)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace")
/* Reset hyperslab start array */
@@ -4036,9 +4118,9 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
HDmemset(hyper_start, 0, sizeof(hyper_start));
/* Set up chunked I/O info object, for operations on chunks (in callback)
- * Note that we only need to set chunk_offset once, as the array's address
+ * Note that we only need to set scaled once, as the array's address
* will never change. */
- chk_store.chunk.offset = chunk_offset;
+ chk_store.chunk.scaled = scaled;
H5D_BUILD_IO_INFO_RD(&chk_io_info, dset, dxpl_cache, dxpl_id, &chk_store, NULL);
/* Compose chunked index info struct */
@@ -4052,7 +4134,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
HDmemset(&udata, 0, sizeof udata);
udata.common.layout = &layout->u.chunk;
udata.common.storage = &layout->storage.u.chunk;
- udata.common.rdcc = rdcc;
+ udata.common.scaled = scaled;
udata.io_info = &chk_io_info;
udata.idx_info = &idx_info;
udata.space_dim = space_dim;
@@ -4074,16 +4156,14 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) {
/* Calculate the largest offset of chunks that might need to be
* modified in this dimension */
- max_mod_chunk_off[op_dim] = chunk_dim[op_dim] * ((old_dim[op_dim] - 1)
- / chunk_dim[op_dim]);
+ max_mod_chunk_off[op_dim] = (old_dim[op_dim] - 1) / chunk_dim[op_dim];
/* Calculate the largest offset of chunks that might need to be
* filled in this dimension */
if(0 == space_dim[op_dim])
max_fill_chunk_off[op_dim] = -1;
else
- max_fill_chunk_off[op_dim] = (hssize_t)(chunk_dim[op_dim]
- * ((MIN(space_dim[op_dim], old_dim[op_dim]) - 1)
+ max_fill_chunk_off[op_dim] = (hssize_t)(((MIN(space_dim[op_dim], old_dim[op_dim]) - 1)
/ chunk_dim[op_dim]));
if(shrunk_dim[op_dim]) {
@@ -4091,14 +4171,11 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
* modified in this dimension. Note that this array contains
* garbage for all dimensions which are not shrunk. These locations
* must not be read from! */
- min_mod_chunk_off[op_dim] = chunk_dim[op_dim] * (space_dim[op_dim]
- / chunk_dim[op_dim]);
+ min_mod_chunk_off[op_dim] = space_dim[op_dim] / chunk_dim[op_dim];
/* Determine if we need to fill chunks in this dimension */
- if((hssize_t)min_mod_chunk_off[op_dim] == max_fill_chunk_off[op_dim]) {
+ if((hssize_t)min_mod_chunk_off[op_dim] == max_fill_chunk_off[op_dim])
fill_dim[op_dim] = TRUE;
- has_fill = TRUE;
- } /* end if */
else
fill_dim[op_dim] = FALSE;
} /* end if */
@@ -4106,69 +4183,55 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
fill_dim[op_dim] = FALSE;
} /* end for */
- /* Check the cache for any entries that are outside the bounds. Mark these
- * entries as deleted so they are not flushed to disk accidentally. This is
- * only necessary if there are chunks that need to be filled. */
- if(has_fill)
- for(ent = rdcc->head; ent; ent = ent->next)
- /* Check for chunk offset outside of new dimensions */
- for(u = 0; u < (unsigned)space_ndims; u++)
- if((hsize_t)ent->offset[u] >= space_dim[u]) {
- /* Mark the entry as "deleted" */
- ent->deleted = TRUE;
- break;
- } /* end if */
-
/* Main loop: fill or remove chunks */
for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) {
+ hbool_t dims_outside_fill[H5O_LAYOUT_NDIMS]; /* Dimensions in chunk offset outside fill dimensions */
+ int ndims_outside_fill; /* Number of dimensions in chunk offset outside fill dimensions */
+ hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
+
/* Check if modification along this dimension is really necessary */
if(!shrunk_dim[op_dim])
continue;
else {
- HDassert((hsize_t) max_mod_chunk_off[op_dim] >= min_mod_chunk_off[op_dim]);
+ HDassert(max_mod_chunk_off[op_dim] >= min_mod_chunk_off[op_dim]);
/* Reset the chunk offset indices */
- HDmemset(chunk_offset, 0, ((unsigned)space_ndims * sizeof(chunk_offset[0])));
- chunk_offset[op_dim] = min_mod_chunk_off[op_dim];
+ HDmemset(scaled, 0, (space_ndims * sizeof(scaled[0])));
+ scaled[op_dim] = min_mod_chunk_off[op_dim];
/* Initialize "dims_outside_fill" array */
ndims_outside_fill = 0;
- for(u = 0; u < (unsigned)space_ndims; u++)
- if((hssize_t)chunk_offset[u] > max_fill_chunk_off[u]) {
+ for(u = 0; u < space_ndims; u++)
+ if((hssize_t)scaled[u] > max_fill_chunk_off[u]) {
dims_outside_fill[u] = TRUE;
ndims_outside_fill++;
} /* end if */
else
dims_outside_fill[u] = FALSE;
-
- carry = FALSE;
} /* end if */
+ carry = FALSE;
while(!carry) {
int i; /* Local index variable */
- /* Calculate the index of this chunk */
- if(H5VM_chunk_index((unsigned)space_ndims, chunk_offset,
- layout->u.chunk.dim, layout->u.chunk.down_chunks,
- &(chk_io_info.store->chunk.index)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
-
if(0 == ndims_outside_fill) {
HDassert(fill_dim[op_dim]);
- HDassert(chunk_offset[op_dim] == min_mod_chunk_off[op_dim]);
+ HDassert(scaled[op_dim] == min_mod_chunk_off[op_dim]);
/* Fill the unused parts of the chunk */
if(H5D__chunk_prune_fill(&udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write fill value")
} /* end if */
else {
+ H5D_chunk_ud_t chk_udata; /* User data for getting chunk info */
+
#ifndef NDEBUG
/* Make sure this chunk is really outside the new dimensions */
{
hbool_t outside_dim = FALSE;
- for(u = 0; u < (unsigned)space_ndims; u++)
- if(chunk_offset[u] >= space_dim[u]) {
+ for(u = 0; u < space_ndims; u++)
+ if((scaled[u] * chunk_dim[u]) >= space_dim[u]) {
outside_dim = TRUE;
break;
} /* end if */
@@ -4177,7 +4240,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
#endif /* NDEBUG */
/* Check if the chunk exists in cache or on disk */
- if(H5D__chunk_lookup(dset, dxpl_id, chunk_offset, chk_io_info.store->chunk.index, &chk_udata) < 0)
+ if(H5D__chunk_lookup(dset, dxpl_id, scaled, &chk_udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk")
/* Evict the entry from the cache if present, but do not flush
@@ -4187,9 +4250,9 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk")
/* Remove the chunk from disk, if present */
- if(H5F_addr_defined(chk_udata.addr)) {
+ if(H5F_addr_defined(chk_udata.chunk_block.offset)) {
/* Update the offset in idx_udata */
- idx_udata.offset = chunk_offset;
+ idx_udata.scaled = scaled;
/* Remove the chunk from disk */
if((layout->storage.u.chunk.ops->remove)(&idx_info, &idx_udata) < 0)
@@ -4200,19 +4263,19 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
/* Increment indices */
carry = TRUE;
for(i = (int)(space_ndims - 1); i >= 0; --i) {
- chunk_offset[i] += chunk_dim[i];
- if(chunk_offset[i] > (hsize_t) max_mod_chunk_off[i]) {
+ scaled[i]++;
+ if(scaled[i] > max_mod_chunk_off[i]) {
/* Left maximum dimensions, "wrap around" and check if this
* dimension is no longer outside the fill dimension */
if((unsigned)i == op_dim) {
- chunk_offset[i] = min_mod_chunk_off[i];
+ scaled[i] = min_mod_chunk_off[i];
if(dims_outside_fill[i] && fill_dim[i]) {
dims_outside_fill[i] = FALSE;
ndims_outside_fill--;
} /* end if */
} /* end if */
else {
- chunk_offset[i] = 0;
+ scaled[i] = 0;
if(dims_outside_fill[i] && max_fill_chunk_off[i] >= 0) {
dims_outside_fill[i] = FALSE;
ndims_outside_fill--;
@@ -4221,7 +4284,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
} /* end if */
else {
/* Check if we just went outside the fill dimension */
- if(!dims_outside_fill[i] && (hssize_t)chunk_offset[i] > max_fill_chunk_off[i]) {
+ if(!dims_outside_fill[i] && (hssize_t)scaled[i] > max_fill_chunk_off[i]) {
dims_outside_fill[i] = TRUE;
ndims_outside_fill++;
} /* end if */
@@ -4239,7 +4302,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
if(min_mod_chunk_off[op_dim] == 0)
break;
else
- max_mod_chunk_off[op_dim] = min_mod_chunk_off[op_dim] - chunk_dim[op_dim];
+ max_mod_chunk_off[op_dim] = min_mod_chunk_off[op_dim] - 1;
} /* end for(op_dim=0...) */
/* Reset any cached chunk info for this dataset */
@@ -4282,8 +4345,7 @@ H5D__chunk_addrmap_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
FUNC_ENTER_STATIC
/* Compute the index for this chunk */
- if(H5VM_chunk_index(rank, chunk_rec->offset, udata->common.layout->dim, udata->common.layout->down_chunks, &chunk_index) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, H5_ITER_ERROR, "can't get chunk index")
+ chunk_index = H5VM_array_offset_pre(rank, udata->common.layout->down_chunks, chunk_rec->scaled);
/* Set it in the userdata to return */
udata->chunk_addr[chunk_index] = chunk_rec->chunk_addr;
@@ -4324,7 +4386,6 @@ H5D__chunk_addrmap(const H5D_io_info_t *io_info, haddr_t chunk_addr[])
HDmemset(&udata, 0, sizeof(udata));
udata.common.layout = &dset->shared->layout.u.chunk;
udata.common.storage = &dset->shared->layout.storage.u.chunk;
- udata.common.rdcc = &(dset->shared->cache.chunk);
udata.chunk_addr = chunk_addr;
/* Compose chunked index info struct */
@@ -4440,10 +4501,8 @@ H5D__chunk_update_cache(H5D_t *dset, hid_t dxpl_id)
{
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
H5D_rdcc_ent_t *ent, *next; /*cache entry */
- H5D_rdcc_ent_t *old_ent; /* Old cache entry */
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
- unsigned rank; /* Current # of dimensions */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -4452,13 +4511,8 @@ H5D__chunk_update_cache(H5D_t *dset, hid_t dxpl_id)
HDassert(dset && H5D_CHUNKED == dset->shared->layout.type);
HDassert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
- /* Get the rank */
- rank = dset->shared->layout.u.chunk.ndims-1;
- HDassert(rank > 0);
-
- /* 1-D dataset's chunks can't have their index change */
- if(rank == 1)
- HGOTO_DONE(SUCCEED)
+ /* Check the rank */
+ HDassert((dset->shared->layout.u.chunk.ndims - 1) > 1);
/* Fill the DXPL cache values for later use */
if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
@@ -4466,43 +4520,54 @@ H5D__chunk_update_cache(H5D_t *dset, hid_t dxpl_id)
/* Recompute the index for each cached chunk that is in a dataset */
for(ent = rdcc->head; ent; ent = next) {
- hsize_t idx; /* Chunk index */
unsigned old_idx; /* Previous index number */
/* Get the pointer to the next cache entry */
next = ent->next;
- /* Calculate the index of this chunk */
- if(H5VM_chunk_index(rank, ent->offset, dset->shared->layout.u.chunk.dim, dset->shared->layout.u.chunk.down_chunks, &idx) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
-
/* Compute the index for the chunk entry */
old_idx = ent->idx; /* Save for later */
- ent->idx = H5D_CHUNK_HASH(dset->shared, idx);
+ ent->idx = H5D__chunk_hash_val(dset->shared, ent->scaled);
if(old_idx != ent->idx) {
+ H5D_rdcc_ent_t *old_ent; /* Old cache entry */
+
/* Check if there is already a chunk at this chunk's new location */
old_ent = rdcc->slot[ent->idx];
if(old_ent != NULL) {
- HDassert(old_ent->locked == 0);
-
- /* Check if we are removing the entry we would walk to next */
- if(old_ent == next)
- next = old_ent->next;
+ HDassert(old_ent->locked == FALSE);
+ HDassert(old_ent->deleted == FALSE);
- /* Remove the old entry from the cache */
- if(H5D__chunk_cache_evict(dset, dxpl_id, dxpl_cache, old_ent, TRUE) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks")
+ /* Mark the old entry as deleted, but do not evict (yet).
+ * Make sure we do not make any calls to the index
+ * until all chunks have updated indices! */
+ old_ent->deleted = TRUE;
} /* end if */
/* Insert this chunk into correct location in hash table */
rdcc->slot[ent->idx] = ent;
- /* Null out previous location */
- rdcc->slot[old_idx] = NULL;
+ /* If this chunk was previously marked as deleted and therefore
+ * not in the hash table, reset the deleted flag.
+ * Otherwise clear the old hash table slot. */
+ if(ent->deleted)
+ ent->deleted = FALSE;
+ else
+ rdcc->slot[old_idx] = NULL;
} /* end if */
} /* end for */
+ /* Evict chunks that are still marked as deleted */
+ for(ent = rdcc->head; ent; ent = next) {
+ /* Get the pointer to the next cache entry */
+ next = ent->next;
+
+ /* Remove the old entry from the cache */
+ if(ent->deleted)
+ if(H5D__chunk_cache_evict(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks")
+ } /* end for */
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__chunk_update_cache() */
@@ -4528,6 +4593,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
H5D_chunk_ud_t udata_dst; /* User data about new destination chunk */
hbool_t is_vlen = FALSE; /* Whether datatype is variable-length */
hbool_t fix_ref = FALSE; /* Whether to fix up references in the dest. file */
+ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
/* General information about chunk copy */
void *bkg = udata->bkg; /* Background buffer for datatype conversion */
@@ -4545,7 +4611,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
FUNC_ENTER_STATIC
/* Get 'size_t' local value for number of bytes in chunk */
- H5_ASSIGN_OVERFLOW(nbytes, chunk_rec->nbytes, uint32_t, size_t);
+ H5_CHECKED_ASSIGN(nbytes, size_t, chunk_rec->nbytes, uint32_t);
/* Check parameter for type conversion */
if(udata->do_convert) {
@@ -4648,10 +4714,9 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
/* Set up destination chunk callback information for insertion */
udata_dst.common.layout = udata->idx_info_dst->layout;
udata_dst.common.storage = udata->idx_info_dst->storage;
- udata_dst.common.offset = chunk_rec->offset;
- udata_dst.common.rdcc = NULL;
- udata_dst.addr = HADDR_UNDEF;
- udata_dst.nbytes = chunk_rec->nbytes;
+ udata_dst.common.scaled = chunk_rec->scaled;
+ udata_dst.chunk_block.offset = HADDR_UNDEF;
+ udata_dst.chunk_block.length = chunk_rec->nbytes;
udata_dst.filter_mask = chunk_rec->filter_mask;
/* Need to compress variable-length & reference data elements before writing to file */
@@ -4663,27 +4728,31 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
if(nbytes > ((size_t)0xffffffff))
HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, H5_ITER_ERROR, "chunk too large for 32-bit length")
#endif /* H5_SIZEOF_SIZE_T > 4 */
- H5_ASSIGN_OVERFLOW(udata_dst.nbytes, nbytes, size_t, uint32_t);
+ H5_CHECKED_ASSIGN(udata_dst.chunk_block.length, uint32_t, nbytes, size_t);
udata->buf = buf;
udata->buf_size = buf_size;
} /* end if */
+ /* Allocate chunk in the file */
+ if(H5D__chunk_file_alloc(udata->idx_info_dst, NULL, &udata_dst.chunk_block, &need_insert) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
+
+ /* Write chunk data to destination file */
+ HDassert(H5F_addr_defined(udata_dst.chunk_block.offset));
+ if(H5F_block_write(udata->idx_info_dst->f, H5FD_MEM_DRAW, udata_dst.chunk_block.offset, nbytes, udata->idx_info_dst->dxpl_id, buf) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, H5_ITER_ERROR, "unable to write raw data to file")
+
/* Set metadata tag in dxpl_id */
H5_BEGIN_TAG(udata->idx_info_dst->dxpl_id, H5AC__COPIED_TAG, H5_ITER_ERROR);
- /* Insert chunk into the destination index */
- if(udata->idx_info_dst->storage->ops->insert)
+ /* Insert chunk record into index */
+ if(need_insert && udata->idx_info_dst->storage->ops->insert)
if((udata->idx_info_dst->storage->ops->insert)(udata->idx_info_dst, &udata_dst) < 0)
HGOTO_ERROR_TAG(H5E_DATASET, H5E_CANTINSERT, H5_ITER_ERROR, "unable to insert chunk addr into index")
/* Reset metadata tag in dxpl_id */
H5_END_TAG(H5_ITER_ERROR);
- /* Write chunk data to destination file */
- HDassert(H5F_addr_defined(udata_dst.addr));
- if(H5F_block_write(udata->idx_info_dst->f, H5FD_MEM_DRAW, udata_dst.addr, nbytes, udata->idx_info_dst->dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, H5_ITER_ERROR, "unable to write raw data to file")
-
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__chunk_copy_cb() */
@@ -4761,7 +4830,7 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
/* Get the dim info for dataset */
if((sndims = H5S_extent_get_dims(ds_extent_src, curr_dims, NULL)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace dimensions")
- H5_ASSIGN_OVERFLOW(ndims, sndims, int, unsigned);
+ H5_CHECKED_ASSIGN(ndims, unsigned, sndims, int);
/* Set the source layout chunk information */
if(H5D__chunk_set_info_real(layout_src, ndims, curr_dims) < 0)
@@ -4869,7 +4938,7 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
do_convert = TRUE;
} /* end if */
- H5_ASSIGN_OVERFLOW(buf_size, layout_src->size, uint32_t, size_t);
+ H5_CHECKED_ASSIGN(buf_size, size_t, layout_src->size, uint32_t);
reclaim_buf_size = 0;
} /* end else */
@@ -4894,7 +4963,6 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
HDmemset(&udata, 0, sizeof udata);
udata.common.layout = layout_src;
udata.common.storage = storage_src;
- udata.common.rdcc = NULL;
udata.file_src = f_src;
udata.idx_info_dst = &idx_info_dst;
udata.buf = buf;
@@ -5029,7 +5097,7 @@ H5D__chunk_dump_index_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
/* Print information about this chunk */
HDfprintf(udata->stream, " 0x%08x %8Zu %10a [", chunk_rec->filter_mask, chunk_rec->nbytes, chunk_rec->chunk_addr);
for(u = 0; u < udata->ndims; u++)
- HDfprintf(udata->stream, "%s%Hd", (u ? ", " : ""), chunk_rec->offset[u]);
+ HDfprintf(udata->stream, "%s%Hu", (u ? ", " : ""), (chunk_rec->scaled[u] * udata->chunk_dim[u]));
HDfputs("]\n", udata->stream);
} /* end if */
@@ -5081,6 +5149,7 @@ H5D__chunk_dump_index(H5D_t *dset, hid_t dxpl_id, FILE *stream)
udata.stream = stream;
udata.header_displayed = FALSE;
udata.ndims = dset->shared->layout.u.chunk.ndims;
+ udata.chunk_dim = dset->shared->layout.u.chunk.dim;
/* Iterate over index and dump chunk info */
if((dset->shared->layout.storage.u.chunk.ops->iterate)(&idx_info, H5D__chunk_dump_index_cb, &udata) < 0)
@@ -5325,3 +5394,114 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D__nonexistent_readvv() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_file_alloc()
+ *
+ * Purpose: Chunk allocation:
+ * Create the chunk if it doesn't exist, or reallocate the
+ * chunk if its size changed.
+ * The coding is moved and modified from each index structure.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; June 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old_chunk,
+ H5F_block_t *new_chunk, hbool_t *need_insert)
+{
+ hbool_t alloc_chunk = FALSE; /* Whether to allocate chunk */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(new_chunk);
+ HDassert(need_insert);
+
+ /* Check for filters on chunks */
+ if(idx_info->pline->nused > 0) {
+ /* Sanity/error checking block */
+ {
+ unsigned allow_chunk_size_len; /* Allowed size of encoded chunk size */
+ unsigned new_chunk_size_len; /* Size of encoded chunk size */
+
+ /* Compute the size required for encoding the size of a chunk, allowing
+ * for an extra byte, in case the filter makes the chunk larger.
+ */
+ allow_chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)(idx_info->layout->size)) + 8) / 8);
+ if(allow_chunk_size_len > 8)
+ allow_chunk_size_len = 8;
+
+ /* Compute encoded size of chunk */
+ new_chunk_size_len = (H5VM_log2_gen((uint64_t)(new_chunk->length)) + 8) / 8;
+ if(new_chunk_size_len > 8)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "encoded chunk size is more than 8 bytes?!?")
+
+ /* Check if the chunk became too large to be encoded */
+ if(new_chunk_size_len > allow_chunk_size_len)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk size can't be encoded")
+ } /* end block */
+
+ if(old_chunk && H5F_addr_defined(old_chunk->offset)) {
+ /* Sanity check */
+ HDassert(!H5F_addr_defined(new_chunk->offset) || H5F_addr_eq(new_chunk->offset, old_chunk->offset));
+
+ /* Check for chunk being same size */
+ if(new_chunk->length != old_chunk->length) {
+ /* Release previous chunk */
+ if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, old_chunk->offset, old_chunk->length) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+ alloc_chunk = TRUE;
+ } /* end if */
+ else {
+ /* Don't need to reallocate chunk, but send its address back up */
+ if(!H5F_addr_defined(new_chunk->offset))
+ new_chunk->offset = old_chunk->offset;
+ } /* end else */
+ } /* end if */
+ else {
+ HDassert(!H5F_addr_defined(new_chunk->offset));
+ alloc_chunk = TRUE;
+ } /* end else */
+ } /* end if */
+ else {
+ HDassert(!H5F_addr_defined(new_chunk->offset));
+ HDassert(new_chunk->length == idx_info->layout->size);
+ alloc_chunk = TRUE;
+ } /* end else */
+
+ /* Actually allocate space for the chunk in the file */
+ if(alloc_chunk) {
+ switch(idx_info->storage->idx_type) {
+ case H5D_CHUNK_IDX_BTREE:
+ HDassert(new_chunk->length > 0);
+ H5_CHECK_OVERFLOW(new_chunk->length, /*From: */uint32_t, /*To: */hsize_t);
+ new_chunk->offset = H5MF_alloc(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, (hsize_t)new_chunk->length);
+ if(!H5F_addr_defined(new_chunk->offset))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "file allocation failed")
+ *need_insert = TRUE;
+ break;
+
+ case H5D_CHUNK_IDX_NTYPES:
+ default:
+ HDassert(0 && "This should never be executed!");
+ break;
+ } /* end switch */
+ } /* end if */
+
+ HDassert(H5F_addr_defined(new_chunk->offset));
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__chunk_file_alloc() */
+
diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c
index 789beab..af70c07 100644
--- a/src/H5Dcompact.c
+++ b/src/H5Dcompact.c
@@ -174,11 +174,8 @@ H5D__compact_construct(H5F_t *f, H5D_t *dset)
hssize_t stmp_size; /* Temporary holder for raw data size */
hsize_t tmp_size; /* Temporary holder for raw data size */
hsize_t max_comp_data_size; /* Max. allowed size of compact data */
- hsize_t dim[H5O_LAYOUT_NDIMS]; /* Current size of data in elements */
- hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */
- int ndims; /* Rank of dataspace */
- int i; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -187,11 +184,9 @@ H5D__compact_construct(H5F_t *f, H5D_t *dset)
HDassert(dset);
/* Check for invalid dataset dimensions */
- if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dim, max_dim)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace dimensions")
- for(i = 0; i < ndims; i++)
- if(max_dim[i] > dim[i])
- HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "extendible compact dataset")
+ for(u = 0; u < dset->shared->ndims; u++)
+ if(dset->shared->max_dims[u] > dset->shared->curr_dims[u])
+ HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "extendible compact dataset not allowed")
/*
* Compact dataset is stored in dataset object header message of
@@ -202,7 +197,7 @@ H5D__compact_construct(H5F_t *f, H5D_t *dset)
tmp_size = H5T_get_size(dset->shared->type);
HDassert(tmp_size > 0);
tmp_size = tmp_size * (hsize_t)stmp_size;
- H5_ASSIGN_OVERFLOW(dset->shared->layout.storage.u.compact.size, tmp_size, hssize_t, size_t);
+ H5_CHECKED_ASSIGN(dset->shared->layout.storage.u.compact.size, size_t, tmp_size, hssize_t);
/* Verify data size is smaller than maximum header message size
* (64KB) minus other layout message fields.
diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c
index dc09768..898b46d 100644
--- a/src/H5Dcontig.c
+++ b/src/H5Dcontig.c
@@ -259,7 +259,7 @@ H5D__contig_fill(const H5D_t *dset, hid_t dxpl_id)
/* Get the number of elements in the dataset's dataspace */
if((snpoints = H5S_GET_EXTENT_NPOINTS(dset->shared->space)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "dataset has negative number of elements")
- H5_ASSIGN_OVERFLOW(npoints, snpoints, hssize_t, size_t);
+ H5_CHECKED_ASSIGN(npoints, size_t, snpoints, hssize_t);
/* Initialize the fill value buffer */
if(H5D__fill_init(&fb_info, NULL, NULL, NULL, NULL, NULL,
@@ -396,10 +396,7 @@ H5D__contig_construct(H5F_t *f, H5D_t *dset)
size_t dt_size; /* Size of datatype */
hsize_t tmp_size; /* Temporary holder for raw data size */
size_t tmp_sieve_buf_size; /* Temporary holder for sieve buffer size */
- hsize_t dim[H5O_LAYOUT_NDIMS]; /* Current size of data in elements */
- hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */
- int ndims; /* Rank of dataspace */
- int i; /* Local index variable */
+ unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -415,11 +412,9 @@ H5D__contig_construct(H5F_t *f, H5D_t *dset)
*/
/* Check for invalid dataset dimensions */
- if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dim, max_dim)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize contiguous storage")
- for(i = 0; i < ndims; i++)
- if(max_dim[i] > dim[i])
- HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "extendible contiguous non-external dataset")
+ for(u = 0; u < dset->shared->ndims; u++)
+ if(dset->shared->max_dims[u] > dset->shared->curr_dims[u])
+ HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "extendible contiguous non-external dataset not allowed")
/* Retrieve the number of elements in the dataspace */
if((snelmts = H5S_GET_EXTENT_NPOINTS(dset->shared->space)) < 0)
@@ -649,6 +644,7 @@ H5D__contig_readvv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len,
size_t sieve_size = (size_t)-1; /* Size of sieve buffer */
haddr_t rel_eoa; /* Relative end of file address */
hsize_t max_data; /* Actual maximum size of data to cache */
+ hsize_t min; /* temporary minimum value (avoids some ugly macro nesting) */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -689,7 +685,8 @@ H5D__contig_readvv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len,
max_data = store_contig->dset_size - dst_off;
/* Compute the size of the sieve buffer */
- H5_ASSIGN_OVERFLOW(dset_contig->sieve_size, MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size), hsize_t, size_t);
+ min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
+ H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t);
/* Read the new sieve buffer */
if(H5F_block_read(file, H5FD_MEM_DRAW, dset_contig->sieve_loc, dset_contig->sieve_size, udata->dxpl_id, dset_contig->sieve_buf) < 0)
@@ -762,9 +759,13 @@ H5D__contig_readvv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len,
/* Only need this when resizing sieve buffer */
max_data = store_contig->dset_size - dst_off;
- /* Compute the size of the sieve buffer */
- /* Don't read off the end of the file, don't read past the end of the data element and don't read more than the buffer size */
- H5_ASSIGN_OVERFLOW(dset_contig->sieve_size, MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size), hsize_t, size_t);
+ /* Compute the size of the sieve buffer.
+ * Don't read off the end of the file, don't read past
+ * the end of the data element, and don't read more than
+ * the buffer size.
+ */
+ min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
+ H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t);
/* Update local copies of sieve information */
sieve_start = dset_contig->sieve_loc;
@@ -920,6 +921,7 @@ H5D__contig_writevv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len,
size_t sieve_size = (size_t)-1; /* size of sieve buffer */
haddr_t rel_eoa; /* Relative end of file address */
hsize_t max_data; /* Actual maximum size of data to cache */
+ hsize_t min; /* temporary minimum value (avoids some ugly macro nesting) */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -965,7 +967,8 @@ if(dset_contig->sieve_size > len)
max_data = store_contig->dset_size - dst_off;
/* Compute the size of the sieve buffer */
- H5_ASSIGN_OVERFLOW(dset_contig->sieve_size, MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size), hsize_t, size_t);
+ min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
+ H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t);
/* Check if there is any point in reading the data from the file */
if(dset_contig->sieve_size > len) {
@@ -1080,9 +1083,13 @@ if(dset_contig->sieve_size > len)
/* Only need this when resizing sieve buffer */
max_data = store_contig->dset_size - dst_off;
- /* Compute the size of the sieve buffer */
- /* Don't read off the end of the file, don't read past the end of the data element and don't read more than the buffer size */
- H5_ASSIGN_OVERFLOW(dset_contig->sieve_size, MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size), hsize_t, size_t);
+ /* Compute the size of the sieve buffer.
+ * Don't read off the end of the file, don't read past
+ * the end of the data element, and don't read more than
+ * the buffer size.
+ */
+ min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
+ H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t);
/* Update local copies of sieve information */
sieve_start = dset_contig->sieve_loc;
diff --git a/src/H5Ddeprec.c b/src/H5Ddeprec.c
index f4bde4b..cd2bc84 100644
--- a/src/H5Ddeprec.c
+++ b/src/H5Ddeprec.c
@@ -313,9 +313,7 @@ static herr_t
H5D__extend(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id)
{
htri_t changed; /* Flag to indicate that the dataspace was successfully extended */
- H5S_t *space; /* Dataset's dataspace */
- int rank; /* Dataspace # of dimensions */
- hsize_t curr_dims[H5O_LAYOUT_NDIMS];/* Current dimension sizes */
+ hsize_t old_dims[H5S_MAX_RANK]; /* Current (i.e. old, if changed) dimension sizes */
H5O_fill_t *fill; /* Dataset's fill value */
herr_t ret_value = SUCCEED; /* Return value */
@@ -336,29 +334,82 @@ H5D__extend(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id)
*/
/* Retrieve the current dimensions */
- space = dataset->shared->space;
- if((rank = H5S_get_simple_extent_dims(space, curr_dims, NULL)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
+ HDcompile_assert(sizeof(old_dims) == sizeof(dataset->shared->curr_dims));
+ HDmemcpy(old_dims, dataset->shared->curr_dims, H5S_MAX_RANK * sizeof(old_dims[0]));
/* Increase the size of the data space */
- if((changed = H5S_extend(space, size)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to increase size of data space")
+ if((changed = H5S_extend(dataset->shared->space, size)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to increase size of dataspace")
/* Updated the dataset's info if the dataspace was successfully extended */
if(changed) {
+ /* Get the extended dimension sizes */
+ /* (Need to retrieve this here, since the 'size' dimensions could
+ * extend one dimension but be smaller in a different dimension,
+ * and the dataspace's extent is the larger of the current and
+ * 'size' dimension values. - QAK)
+ */
+ if(H5S_get_simple_extent_dims(dataset->shared->space, dataset->shared->curr_dims, NULL) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
+
/* Update the index values for the cached chunks for this dataset */
if(H5D_CHUNKED == dataset->shared->layout.type) {
+ hbool_t update_chunks = FALSE; /* Flag to indicate chunk cache update is needed */
+
+ /* Check if we need to track & update scaled dimension information */
+ if(dataset->shared->ndims > 1) {
+ unsigned u; /* Local indicate variable */
+
+ /* Update scaled chunk information */
+ for(u = 0; u < dataset->shared->ndims; u++) {
+ hsize_t scaled; /* Scaled value */
+
+ /* Compute the scaled dimension size value */
+ scaled = size[u] / dataset->shared->layout.u.chunk.dim[u];
+
+ /* Check if scaled dimension size changed */
+ if(scaled != dataset->shared->cache.chunk.scaled_dims[u]) {
+ hsize_t scaled_power2up; /* New size value, rounded to next power of 2 */
+
+ /* Update the scaled dimension size value for the current dimension */
+ dataset->shared->cache.chunk.scaled_dims[u] = scaled;
+
+ /* Check if algorithm for computing hash values will change */
+ if((scaled > dataset->shared->cache.chunk.nslots &&
+ dataset->shared->cache.chunk.scaled_dims[u] <= dataset->shared->cache.chunk.nslots)
+ || (scaled <= dataset->shared->cache.chunk.nslots &&
+ dataset->shared->cache.chunk.scaled_dims[u] > dataset->shared->cache.chunk.nslots))
+ update_chunks = TRUE;
+
+ /* Check if the number of bits required to encode the scaled size value changed */
+ if(dataset->shared->cache.chunk.scaled_power2up[u] != (scaled_power2up = H5VM_power2up(scaled))) {
+ /* Update the 'power2up' & 'encode_bits' values for the current dimension */
+ dataset->shared->cache.chunk.scaled_power2up[u] = scaled_power2up;
+ dataset->shared->cache.chunk.scaled_encode_bits[u] = H5VM_log2_gen(scaled_power2up);
+
+ /* Indicate that the chunk cache indices should be updated */
+ update_chunks = TRUE;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+ } /* end if */
+
+ /* Update general information for chunks */
if(H5D__chunk_set_info(dataset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to update # of chunks")
- if(H5D__chunk_update_cache(dataset, dxpl_id) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices")
+
+ /* Check for updating chunk cache indices */
+ if(update_chunks) {
+ /* Update the chunk cache indices */
+ if(H5D__chunk_update_cache(dataset, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices")
+ } /* end if */
} /* end if */
/* Allocate space for the new parts of the dataset, if appropriate */
fill = &dataset->shared->dcpl_cache.fill;
if(fill->alloc_time == H5D_ALLOC_TIME_EARLY)
- if(H5D__alloc_storage(dataset, dxpl_id, H5D_ALLOC_EXTEND, FALSE,
- curr_dims) < 0)
+ if(H5D__alloc_storage(dataset, dxpl_id, H5D_ALLOC_EXTEND, FALSE, old_dims) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize dataset with fill value")
/* Mark the dataspace as dirty, for later writing to the file */
diff --git a/src/H5Defl.c b/src/H5Defl.c
index 38c8ccd..1ae7a23 100644
--- a/src/H5Defl.c
+++ b/src/H5Defl.c
@@ -126,14 +126,11 @@ static herr_t
H5D__efl_construct(H5F_t *f, H5D_t *dset)
{
size_t dt_size; /* Size of datatype */
- hsize_t dim[H5O_LAYOUT_NDIMS]; /* Current size of data in elements */
- hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */
hssize_t stmp_size; /* Temporary holder for raw data size */
hsize_t tmp_size; /* Temporary holder for raw data size */
hsize_t max_points; /* Maximum elements */
hsize_t max_storage; /* Maximum storage size */
- int ndims; /* Rank of dataspace */
- int i; /* Local index variable */
+ unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -149,11 +146,9 @@ H5D__efl_construct(H5F_t *f, H5D_t *dset)
*/
/* Check for invalid dataset dimensions */
- if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dim, max_dim)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize contiguous storage")
- for(i = 1; i < ndims; i++)
- if(max_dim[i] > dim[i])
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "only the first dimension can be extendible")
+ for(u = 1; u < dset->shared->ndims; u++)
+ if(dset->shared->max_dims[u] > dset->shared->curr_dims[u])
+ HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "only the first dimension can be extendible")
/* Retrieve the size of the dataset's datatype */
if(0 == (dt_size = H5T_get_size(dset->shared->type)))
@@ -175,7 +170,7 @@ H5D__efl_construct(H5F_t *f, H5D_t *dset)
stmp_size = H5S_GET_EXTENT_NPOINTS(dset->shared->space);
HDassert(stmp_size >= 0);
tmp_size = (hsize_t)stmp_size * dt_size;
- H5_ASSIGN_OVERFLOW(dset->shared->layout.storage.u.contig.size, tmp_size, hssize_t, hsize_t);
+ H5_CHECKED_ASSIGN(dset->shared->layout.storage.u.contig.size, hsize_t, tmp_size, hssize_t);
/* Get the sieve buffer size for this dataset */
dset->shared->cache.contig.sieve_buf_size = H5F_SIEVE_BUF_SIZE(f);
@@ -293,7 +288,7 @@ H5D__efl_read(const H5O_efl_t *efl, haddr_t addr, size_t size, uint8_t *buf)
HGOTO_ERROR(H5E_EFL, H5E_OVERFLOW, FAIL, "external file address overflowed")
if((fd = HDopen(efl->slot[u].name, O_RDONLY, 0)) < 0)
HGOTO_ERROR(H5E_EFL, H5E_CANTOPENFILE, FAIL, "unable to open external raw data file")
- if(HDlseek(fd, (off_t)(efl->slot[u].offset + skip), SEEK_SET) < 0)
+ if(HDlseek(fd, (HDoff_t)(efl->slot[u].offset + skip), SEEK_SET) < 0)
HGOTO_ERROR(H5E_EFL, H5E_SEEKERROR, FAIL, "unable to seek in external raw data file")
#ifndef NDEBUG
tempto_read = MIN(efl->slot[u].size-skip, (hsize_t)size);
@@ -383,7 +378,7 @@ H5D__efl_write(const H5O_efl_t *efl, haddr_t addr, size_t size, const uint8_t *b
else
HGOTO_ERROR(H5E_EFL, H5E_CANTOPENFILE, FAIL, "unable to open external raw data file")
} /* end if */
- if(HDlseek(fd, (off_t)(efl->slot[u].offset + skip), SEEK_SET) < 0)
+ if(HDlseek(fd, (HDoff_t)(efl->slot[u].offset + skip), SEEK_SET) < 0)
HGOTO_ERROR(H5E_EFL, H5E_SEEKERROR, FAIL, "unable to seek in external raw data file")
#ifndef NDEBUG
tempto_write = MIN(efl->slot[u].size - skip, (hsize_t)size);
diff --git a/src/H5Dint.c b/src/H5Dint.c
index 39b3eaf..c1e3ad0 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -62,6 +62,7 @@ static H5D_shared_t *H5D__new(hid_t dcpl_id, hbool_t creating,
hbool_t vl_type);
static herr_t H5D__init_type(H5F_t *file, const H5D_t *dset, hid_t type_id,
const H5T_t *type);
+static herr_t H5D__cache_dataspace_info(const H5D_t *dset);
static herr_t H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space);
static herr_t H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset,
hid_t dapl_id);
@@ -665,6 +666,40 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5D__cache_dataspace_info
+ *
+ * Purpose: Cache dataspace info for a dataset
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * Wednesday, November 19, 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__cache_dataspace_info(const H5D_t *dset)
+{
+ int sndims; /* Signed number of dimensions of dataspace rank */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checking */
+ HDassert(dset);
+
+ /* Cache info for dataset's dataspace */
+ if((sndims = H5S_get_simple_extent_dims(dset->shared->space, dset->shared->curr_dims, dset->shared->max_dims)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't cache dataspace dimensions")
+ dset->shared->ndims = (unsigned)sndims;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__cache_dataspace_info() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D__init_space
*
* Purpose: Copy a dataspace for a dataset's use, performing all the
@@ -698,6 +733,10 @@ H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space)
if(NULL == (dset->shared->space = H5S_copy(space, FALSE, TRUE)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't copy dataspace")
+ /* Cache the dataset's dataspace info */
+ if(H5D__cache_dataspace_info(dset) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't cache dataspace info")
+
/* Set the latest format, if requested */
if(use_latest_format)
if(H5S_set_latest_version(dset->shared->space) < 0)
@@ -1314,6 +1353,10 @@ H5D__open_oid(H5D_t *dataset, hid_t dapl_id, hid_t dxpl_id)
if(NULL == (dataset->shared->space = H5S_read(&(dataset->oloc), dxpl_id)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to load dataspace info from dataset header")
+ /* Cache the dataset's dataspace info */
+ if(H5D__cache_dataspace_info(dataset) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't cache dataspace info")
+
/* Get a datatype ID for the dataset's datatype */
if((dataset->shared->type_id = H5I_register(H5I_DATATYPE, dataset->shared->type, FALSE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "unable to register type")
@@ -2260,9 +2303,7 @@ done:
herr_t
H5D__set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
{
- H5S_t *space; /* Dataset's dataspace */
- int rank; /* Dataspace # of dimensions */
- hsize_t curr_dims[H5O_LAYOUT_NDIMS];/* Current dimension sizes */
+ hsize_t curr_dims[H5S_MAX_RANK]; /* Current dimension sizes */
htri_t changed; /* Whether the dataspace changed size */
herr_t ret_value = SUCCEED; /* Return value */
@@ -2286,29 +2327,64 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
if(H5D__check_filters(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't apply filters")
- /* Get the data space */
- space = dset->shared->space;
-
- /* Check if we are shrinking or expanding any of the dimensions */
- if((rank = H5S_get_simple_extent_dims(space, curr_dims, NULL)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
+ /* Keep the current dataspace dimensions for later */
+ HDcompile_assert(sizeof(curr_dims) == sizeof(dset->shared->curr_dims));
+ HDmemcpy(curr_dims, dset->shared->curr_dims, H5S_MAX_RANK * sizeof(curr_dims[0]));
- /* Modify the size of the data space */
- if((changed = H5S_set_extent(space, size)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to modify size of data space")
+ /* Modify the size of the dataspace */
+ if((changed = H5S_set_extent(dset->shared->space, size)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to modify size of dataspace")
/* Don't bother updating things, unless they've changed */
if(changed) {
- hbool_t shrink = FALSE; /* Flag to indicate a dimension has shrank */
- hbool_t expand = FALSE; /* Flag to indicate a dimension has grown */
- unsigned u; /* Local index variable */
+ hbool_t shrink = FALSE; /* Flag to indicate a dimension has shrank */
+ hbool_t expand = FALSE; /* Flag to indicate a dimension has grown */
+ hbool_t update_chunks = FALSE; /* Flag to indicate chunk cache update is needed */
+ unsigned u; /* Local index variable */
/* Determine if we are shrinking and/or expanding any dimensions */
- for(u = 0; u < (unsigned)rank; u++) {
+ for(u = 0; u < dset->shared->ndims; u++) {
+ /* Check for various status changes */
if(size[u] < curr_dims[u])
shrink = TRUE;
if(size[u] > curr_dims[u])
expand = TRUE;
+
+ /* Chunked storage specific checks */
+ if(H5D_CHUNKED == dset->shared->layout.type && dset->shared->ndims > 1) {
+ hsize_t scaled; /* Scaled value */
+
+ /* Compute the scaled dimension size value */
+ scaled = size[u] / dset->shared->layout.u.chunk.dim[u];
+
+ /* Check if scaled dimension size changed */
+ if(scaled != dset->shared->cache.chunk.scaled_dims[u]) {
+ hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */
+
+ /* Update the scaled dimension size value for the current dimension */
+ dset->shared->cache.chunk.scaled_dims[u] = scaled;
+
+ /* Check if algorithm for computing hash values will change */
+ if((scaled > dset->shared->cache.chunk.nslots &&
+ dset->shared->cache.chunk.scaled_dims[u] <= dset->shared->cache.chunk.nslots)
+ || (scaled <= dset->shared->cache.chunk.nslots &&
+ dset->shared->cache.chunk.scaled_dims[u] > dset->shared->cache.chunk.nslots))
+ update_chunks = TRUE;
+
+ /* Check if the number of bits required to encode the scaled size value changed */
+ if(dset->shared->cache.chunk.scaled_power2up[u] != (scaled_power2up = H5VM_power2up(scaled))) {
+ /* Update the 'power2up' & 'encode_bits' values for the current dimension */
+ dset->shared->cache.chunk.scaled_power2up[u] = scaled_power2up;
+ dset->shared->cache.chunk.scaled_encode_bits[u] = H5VM_log2_gen(scaled_power2up);
+
+ /* Indicate that the cached chunk indices need to be updated */
+ update_chunks = TRUE;
+ } /* end if */
+ } /* end if */
+ } /* end if */
+
+ /* Update the cached copy of the dataset's dimensions */
+ dset->shared->curr_dims[u] = size[u];
} /* end for */
/*-------------------------------------------------------------------------
@@ -2317,11 +2393,15 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
*/
/* Update the index values for the cached chunks for this dataset */
if(H5D_CHUNKED == dset->shared->layout.type) {
- /* Update the cached chunk info */
+ /* Set the cached chunk info */
if(H5D__chunk_set_info(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to update # of chunks")
- if(H5D__chunk_update_cache(dset, dxpl_id) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices")
+
+ /* Check if updating the chunk cache indices is necessary */
+ if(update_chunks)
+ /* Update the chunk cache indices */
+ if(H5D__chunk_update_cache(dset, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices")
} /* end if */
/* Allocate space for the new parts of the dataset, if appropriate */
diff --git a/src/H5Dio.c b/src/H5Dio.c
index d21612a..3126b92 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -299,8 +299,6 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id,
uint32_t direct_filters;
hsize_t *direct_offset;
uint32_t direct_datasize;
- int ndims = 0;
- hsize_t dims[H5O_LAYOUT_NDIMS];
hsize_t internal_offset[H5O_LAYOUT_NDIMS];
unsigned u; /* Local index variable */
@@ -321,12 +319,9 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id,
/* The library's chunking code requires the offset terminates with a zero. So transfer the
* offset array to an internal offset array */
- if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dims, NULL)) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve dataspace extent dims")
-
- for(u = 0; u < (unsigned)ndims; u++) {
+ for(u = 0; u < dset->shared->ndims; u++) {
/* Make sure the offset doesn't exceed the dataset's dimensions */
- if(direct_offset[u] > dims[u])
+ if(direct_offset[u] > dset->shared->curr_dims[u])
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset")
/* Make sure the offset fall right on a chunk's boundary */
@@ -337,7 +332,7 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id,
} /* end for */
/* Terminate the offset with a zero */
- internal_offset[ndims] = 0;
+ internal_offset[dset->shared->ndims] = 0;
/* write raw data */
if(H5D__chunk_direct_write(dset, dxpl_id, direct_filters, internal_offset, direct_datasize, buf) < 0)
@@ -411,7 +406,7 @@ H5D__read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
mem_space = file_space;
if((snelmts = H5S_GET_SELECT_NPOINTS(mem_space)) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dst dataspace has invalid selection")
- H5_ASSIGN_OVERFLOW(nelmts,snelmts,hssize_t,hsize_t);
+ H5_CHECKED_ASSIGN(nelmts, hsize_t, snelmts, hssize_t);
/* Fill the DXPL cache values for later use */
if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
@@ -681,7 +676,7 @@ H5D__write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
if((snelmts = H5S_GET_SELECT_NPOINTS(mem_space)) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection")
- H5_ASSIGN_OVERFLOW(nelmts, snelmts, hssize_t, hsize_t);
+ H5_CHECKED_ASSIGN(nelmts, hsize_t, snelmts, hssize_t);
/* Make certain that the number of elements in each selection is the same */
if(nelmts != (hsize_t)H5S_GET_SELECT_NPOINTS(file_space))
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index 01d2288..63faa5d 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -438,7 +438,7 @@ H5D__mpio_get_sum_chunk(const H5D_io_info_t *io_info, const H5D_chunk_map_t *fm,
/* Get the number of chunks to perform I/O on */
num_chunkf = 0;
ori_num_chunkf = H5SL_count(fm->sel_chunks);
- H5_ASSIGN_OVERFLOW(num_chunkf, ori_num_chunkf, size_t, int);
+ H5_CHECKED_ASSIGN(num_chunkf, int, ori_num_chunkf, size_t);
/* Determine the summation of number of chunks for all processes */
if(MPI_SUCCESS != (mpi_code = MPI_Allreduce(&num_chunkf, sum_chunkf, 1, MPI_INT, MPI_SUM, io_info->comm)))
@@ -826,7 +826,7 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ
} /* end if */
/* Retrieve total # of chunks in dataset */
- H5_ASSIGN_OVERFLOW(total_chunks, fm->layout->u.chunk.nchunks, hsize_t, size_t);
+ H5_CHECKED_ASSIGN(total_chunks, size_t, fm->layout->u.chunk.nchunks, hsize_t);
/* Handle special case when dataspace dimensions only allow one chunk in
* the dataset. [This sometimes is used by developers who want the
@@ -860,10 +860,9 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ
mspace = chunk_info->mspace;
/* Look up address of chunk */
- if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->coords,
- chunk_info->index, &udata) < 0)
+ if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->scaled, &udata) < 0)
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk address")
- ctg_store.contig.dset_addr = udata.addr;
+ ctg_store.contig.dset_addr = udata.chunk_block.offset;
} /* end else */
/* Set up the base storage address for this chunk */
@@ -1149,7 +1148,7 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
#endif
/* Retrieve total # of chunks in dataset */
- H5_ASSIGN_OVERFLOW(total_chunk, fm->layout->u.chunk.nchunks, hsize_t, size_t);
+ H5_CHECKED_ASSIGN(total_chunk, size_t, fm->layout->u.chunk.nchunks, hsize_t);
HDassert(total_chunk != 0);
/* Allocate memories */
@@ -1201,8 +1200,7 @@ if(H5DEBUG(D))
HDassert(chunk_info->index == u);
/* Pass in chunk's coordinates in a union. */
- store.chunk.offset = chunk_info->coords;
- store.chunk.index = chunk_info->index;
+ store.chunk.scaled = chunk_info->scaled;
} /* end if */
/* Collective IO for this chunk,
@@ -1589,10 +1587,9 @@ if(H5DEBUG(D))
H5D_chunk_ud_t udata; /* User data for querying chunk info */
/* Get address of chunk */
- if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id,
- chunk_info->coords, chunk_info->index, &udata) < 0)
+ if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->scaled, &udata) < 0)
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk info from skipped list")
- chunk_addr = udata.addr;
+ chunk_addr = udata.chunk_block.offset;
} /* end if */
else
chunk_addr = total_chunk_addr_array[chunk_info->index];
@@ -1700,7 +1697,7 @@ H5D__obtain_mpio_mode(H5D_io_info_t* io_info, H5D_chunk_map_t *fm,
HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size")
/* Setup parameters */
- H5_ASSIGN_OVERFLOW(total_chunks, fm->layout->u.chunk.nchunks, hsize_t, int);
+ H5_CHECKED_ASSIGN(total_chunks, int, fm->layout->u.chunk.nchunks, hsize_t);
percent_nproc_per_chunk = H5P_peek_unsigned(dx_plist, H5D_XFER_MPIO_CHUNK_OPT_RATIO_NAME);
/* if ratio is 0, perform collective io */
if(0 == percent_nproc_per_chunk) {
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index d3cea4c..6af79e9 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -33,6 +33,7 @@
/* Other private headers needed by this file */
#include "H5ACprivate.h" /* Metadata cache */
+#include "H5Fprivate.h" /* File access */
#include "H5Gprivate.h" /* Groups */
#include "H5SLprivate.h" /* Skip lists */
#include "H5Tprivate.h" /* Datatypes */
@@ -60,8 +61,6 @@
(io_info)->op_type = H5D_IO_OP_READ; \
(io_info)->u.rbuf = buf
-#define H5D_CHUNK_HASH(D, ADDR) H5F_addr_hash(ADDR, (D)->cache.chunk.nslots)
-
/* Flags for marking aspects of a dataset dirty */
#define H5D_MARK_SPACE 0x01
#define H5D_MARK_LAYOUT 0x02
@@ -82,7 +81,7 @@ typedef struct H5D_type_info_t {
/* Computed/derived values */
size_t src_type_size; /* Size of source type */
- size_t dst_type_size; /* Size of destination type*/
+ size_t dst_type_size; /* Size of destination type */
size_t max_type_size; /* Size of largest source/destination type */
hbool_t is_conv_noop; /* Whether the type conversion is a NOOP */
hbool_t is_xform_noop; /* Whether the data transform is a NOOP */
@@ -164,8 +163,7 @@ typedef struct {
} H5D_contig_storage_t;
typedef struct {
- hsize_t index; /* "Index" of chunk in dataset (must be first for TBBT routines) */
- hsize_t *offset; /* Chunk's coordinates in elements */
+ hsize_t *scaled; /* Scaled coordinates for a chunk */
} H5D_chunk_storage_t;
typedef struct {
@@ -242,8 +240,8 @@ typedef struct H5D_chk_idx_info_t {
* The chunk's file address, filter mask and size on disk are not key values.
*/
typedef struct H5D_chunk_rec_t {
+ hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Logical offset to start */
uint32_t nbytes; /* Size of stored data */
- hsize_t offset[H5O_LAYOUT_NDIMS]; /* Logical offset to start */
unsigned filter_mask; /* Excluded filters */
haddr_t chunk_addr; /* Address of chunk in file */
} H5D_chunk_rec_t;
@@ -257,10 +255,7 @@ typedef struct H5D_chunk_common_ud_t {
/* downward */
const H5O_layout_chunk_t *layout; /* Chunk layout description */
const H5O_storage_chunk_t *storage; /* Chunk storage description */
- const hsize_t *offset; /* Logical offset of chunk */
- const struct H5D_rdcc_t *rdcc; /* Chunk cache. Only necessary if the index may
- * be modified, and if any chunks in the dset
- * may be cached */
+ const hsize_t *scaled; /* Scaled coordinates for a chunk */
} H5D_chunk_common_ud_t;
/* B-tree callback info for various operations */
@@ -269,8 +264,7 @@ typedef struct H5D_chunk_ud_t {
/* Upward */
unsigned idx_hint; /*index of chunk in cache, if present */
- haddr_t addr; /*file address of chunk */
- uint32_t nbytes; /*size of stored data */
+ H5F_block_t chunk_block; /*offset/length of chunk in file */
unsigned filter_mask; /*excluded filters */
} H5D_chunk_ud_t;
@@ -327,11 +321,11 @@ typedef struct H5D_chunk_ops_t {
typedef struct H5D_chunk_info_t {
hsize_t index; /* "Index" of chunk in dataset */
uint32_t chunk_points; /* Number of elements selected in chunk */
- hsize_t coords[H5O_LAYOUT_NDIMS]; /* Coordinates of chunk in file dataset's dataspace */
+ hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Scaled coordinates of chunk (in file dataset's dataspace) */
H5S_t *fspace; /* Dataspace describing chunk & selection in it */
- unsigned fspace_shared; /* Indicate that the file space for a chunk is shared and shouldn't be freed */
+ hbool_t fspace_shared; /* Indicate that the file space for a chunk is shared and shouldn't be freed */
H5S_t *mspace; /* Dataspace describing selection in memory corresponding to this chunk */
- unsigned mspace_shared; /* Indicate that the memory space for a chunk is shared and shouldn't be freed */
+ hbool_t mspace_shared; /* Indicate that the memory space for a chunk is shared and shouldn't be freed */
} H5D_chunk_info_t;
/* Main structure holding the mapping between file chunks and memory */
@@ -341,7 +335,6 @@ typedef struct H5D_chunk_map_t {
const H5S_t *file_space; /* Pointer to the file dataspace */
unsigned f_ndims; /* Number of dimensions for file dataspace */
- hsize_t f_dims[H5O_LAYOUT_NDIMS]; /* File dataspace dimensions */
const H5S_t *mem_space; /* Pointer to the memory dataspace */
H5S_t *mchunk_tmpl; /* Dataspace template for new memory chunks */
@@ -368,7 +361,7 @@ typedef struct H5D_chunk_map_t {
/* Cached information about a particular chunk */
typedef struct H5D_chunk_cached_t {
hbool_t valid; /*whether cache info is valid*/
- hsize_t offset[H5O_LAYOUT_NDIMS]; /*logical offset to start*/
+ hsize_t scaled[H5O_LAYOUT_NDIMS]; /*scaled offset of chunk*/
haddr_t addr; /*file address of chunk */
uint32_t nbytes; /*size of stored data */
unsigned filter_mask; /*excluded filters */
@@ -393,7 +386,12 @@ typedef struct H5D_rdcc_t {
struct H5D_rdcc_ent_t **slot; /* Chunk slots, each points to a chunk*/
H5SL_t *sel_chunks; /* Skip list containing information for each chunk selected */
H5S_t *single_space; /* Dataspace for single element I/O on chunks */
- H5D_chunk_info_t *single_chunk_info; /* Pointer to single chunk's info */
+ H5D_chunk_info_t *single_chunk_info; /* Pointer to single chunk's info */
+
+ /* Cached information about scaled dataspace dimensions */
+ hsize_t scaled_dims[H5S_MAX_RANK]; /* The scaled dim sizes */
+ hsize_t scaled_power2up[H5S_MAX_RANK]; /* The scaled dim sizes, rounded up to next power of 2 */
+ unsigned scaled_encode_bits[H5S_MAX_RANK]; /* The number of bits needed to encode the scaled dim sizes */
} H5D_rdcc_t;
/* The raw data contiguous data cache */
@@ -423,6 +421,11 @@ typedef struct H5D_shared_t {
H5O_layout_t layout; /* Data layout */
hbool_t checked_filters;/* TRUE if dataset passes can_apply check */
+ /* Cached dataspace info */
+ unsigned ndims; /* The dataset's dataspace rank */
+ hsize_t curr_dims[H5S_MAX_RANK]; /* The curr. size of dataset dimensions */
+ hsize_t max_dims[H5S_MAX_RANK]; /* The max. size of dataset dimensions */
+
/* Buffered/cached information for types of raw data storage*/
struct {
H5D_rdcdc_t contig; /* Information about contiguous data */
@@ -496,11 +499,11 @@ typedef struct {
typedef struct H5D_rdcc_ent_t {
hbool_t locked; /*entry is locked in cache */
hbool_t dirty; /*needs to be written to disk? */
- hbool_t deleted; /*chunk about to be deleted (do not flush) */
- hsize_t offset[H5O_LAYOUT_NDIMS]; /*chunk name */
+ hbool_t deleted; /*chunk about to be deleted */
+ hsize_t scaled[H5O_LAYOUT_NDIMS]; /*scaled chunk 'name' (coordinates) */
uint32_t rd_count; /*bytes remaining to be read */
uint32_t wr_count; /*bytes remaining to be written */
- haddr_t chunk_addr; /*address of chunk in file */
+ H5F_block_t chunk_block; /*offset/length of chunk in file */
uint8_t *chunk; /*the unfiltered chunk data */
unsigned idx; /*index in hash table */
struct H5D_rdcc_ent_t *next;/*next item in doubly-linked list */
@@ -620,7 +623,7 @@ H5_DLL herr_t H5D__chunk_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
hid_t dapl_id);
H5_DLL hbool_t H5D__chunk_is_space_alloc(const H5O_storage_t *storage);
H5_DLL herr_t H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id,
- const hsize_t *chunk_offset, hsize_t chunk_idx, H5D_chunk_ud_t *udata);
+ const hsize_t *scaled, H5D_chunk_ud_t *udata);
H5_DLL void *H5D__chunk_lock(const H5D_io_info_t *io_info,
H5D_chunk_ud_t *udata, hbool_t relax);
H5_DLL herr_t H5D__chunk_unlock(const H5D_io_info_t *io_info,
diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h
index 88a04ec..35927ff 100644
--- a/src/H5Dprivate.h
+++ b/src/H5Dprivate.h
@@ -183,7 +183,7 @@ H5_DLL herr_t H5D_virtual_update_min_dims(H5O_layout_t *layout, size_t idx);
/* Functions that operate on indexed storage */
H5_DLL herr_t H5D_btree_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream,
- int indent, int fwidth, unsigned ndims);
+ int indent, int fwidth, unsigned ndims, const uint32_t *dim);
#endif /* _H5Dprivate_H */
diff --git a/src/H5EA.c b/src/H5EA.c
index b790590..37682e7 100644
--- a/src/H5EA.c
+++ b/src/H5EA.c
@@ -148,7 +148,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array info")
/* Lock the array header into memory */
- if(NULL == (hdr = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, ctx_udata, H5AC_WRITE)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC_WRITE)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Point extensible array wrapper at header and bump it's ref count */
@@ -168,7 +168,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
CATCH
- if(hdr && H5AC_unprotect(f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, hdr, H5AC__NO_FLAGS_SET) < 0)
+ if(hdr && H5EA__hdr_unprotect(hdr, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array header")
if(!ret_value)
if(ea && H5EA_close(ea, dxpl_id) < 0)
@@ -209,7 +209,7 @@ H5EA_open(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata))
#ifdef QAK
HDfprintf(stderr, "%s: ea_addr = %a\n", FUNC, ea_addr);
#endif /* QAK */
- if(NULL == (hdr = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, ctx_udata, H5AC_READ)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC_READ)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header, address = %llu", (unsigned long long)ea_addr)
/* Check for pending array deletion */
@@ -237,7 +237,7 @@ HDfprintf(stderr, "%s: ea_addr = %a\n", FUNC, ea_addr);
CATCH
- if(hdr && H5AC_unprotect(f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, hdr, H5AC__NO_FLAGS_SET) < 0)
+ if(hdr && H5EA__hdr_unprotect(hdr, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array header")
if(!ret_value)
if(ea && H5EA_close(ea, dxpl_id) < 0)
@@ -1048,7 +1048,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Lock the array header into memory */
/* (OK to pass in NULL for callback context, since we know the header must be in the cache) */
- if(NULL == (hdr = (H5EA_hdr_t *)H5AC_protect(ea->f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, NULL, H5AC_WRITE)))
+ if(NULL == (hdr = H5EA__hdr_protect(ea->f, dxpl_id, ea_addr, NULL, H5AC_WRITE)))
H5E_THROW(H5E_CANTLOAD, "unable to load extensible array header")
/* Set the shared array header's file context for this operation */
@@ -1112,7 +1112,7 @@ H5EA_delete(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata))
#ifdef QAK
HDfprintf(stderr, "%s: ea_addr = %a\n", FUNC, ea_addr);
#endif /* QAK */
- if(NULL == (hdr = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, ctx_udata, H5AC_WRITE)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC_WRITE)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array header, address = %llu", (unsigned long long)ea_addr)
/* Check for files using shared array header */
@@ -1131,7 +1131,7 @@ HDfprintf(stderr, "%s: ea_addr = %a\n", FUNC, ea_addr);
CATCH
/* Unprotect the header, if an error occurred */
- if(hdr && H5AC_unprotect(f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, hdr, H5AC__NO_FLAGS_SET) < 0)
+ if(hdr && H5EA__hdr_unprotect(hdr, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array header")
END_FUNC(PRIV) /* end H5EA_delete() */
diff --git a/src/H5EAcache.c b/src/H5EAcache.c
index 1d40283..57d69a4 100644
--- a/src/H5EAcache.c
+++ b/src/H5EAcache.c
@@ -43,7 +43,7 @@
#include "H5Eprivate.h" /* Error handling */
#include "H5EApkg.h" /* Extensible Arrays */
#include "H5MFprivate.h" /* File memory management */
-#include "H5VMprivate.h" /* Vectors and arrays */
+#include "H5VMprivate.h" /* Vectors and arrays */
#include "H5WBprivate.h" /* Wrapped Buffers */
@@ -577,6 +577,7 @@ H5EA__cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
haddr_t arr_addr; /* Address of array header in the file */
+ size_t u; /* Local index variable */
/* Sanity check */
HDassert(f);
@@ -638,8 +639,6 @@ H5EA__cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
/* Decode data block addresses in index block */
if(iblock->ndblk_addrs > 0) {
- size_t u; /* Local index variable */
-
/* Decode addresses of data blocks in index block */
for(u = 0; u < iblock->ndblk_addrs; u++)
H5F_addr_decode(f, &p, &iblock->dblk_addrs[u]);
@@ -647,8 +646,6 @@ H5EA__cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
/* Decode super block addresses in index block */
if(iblock->nsblk_addrs > 0) {
- size_t u; /* Local index variable */
-
/* Decode addresses of super blocks in index block */
for(u = 0; u < iblock->nsblk_addrs; u++)
H5F_addr_decode(f, &p, &iblock->sblk_addrs[u]);
@@ -872,11 +869,7 @@ H5EA__cache_iblock_notify(H5AC_notify_action_t action, H5EA_iblock_t *iblock))
break;
default:
-#ifdef NDEBUG
H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
-#else /* NDEBUG */
- HDassert(0 && "Unknown action?!?");
-#endif /* NDEBUG */
} /* end switch */
CATCH
@@ -1300,11 +1293,7 @@ H5EA__cache_sblock_notify(H5AC_notify_action_t action, H5EA_sblock_t *sblock))
break;
default:
-#ifdef NDEBUG
H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
-#else /* NDEBUG */
- HDassert(0 && "Unknown action?!?");
-#endif /* NDEBUG */
} /* end switch */
CATCH
@@ -1662,11 +1651,7 @@ H5EA__cache_dblock_notify(H5AC_notify_action_t action, H5EA_dblock_t *dblock))
break;
default:
-#ifdef NDEBUG
H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
-#else /* NDEBUG */
- HDassert(0 && "Unknown action?!?");
-#endif /* NDEBUG */
} /* end switch */
CATCH
@@ -1791,9 +1776,6 @@ H5EA__cache_dblk_page_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
HDassert(f);
HDassert(H5F_addr_defined(addr));
HDassert(udata && udata->hdr && udata->parent);
-#ifdef QAK
-HDfprintf(stderr, "%s: addr = %a\n", FUNC, addr);
-#endif /* QAK */
/* Allocate the extensible array data block page */
if(NULL == (dblk_page = H5EA__dblk_page_alloc(udata->hdr, udata->parent)))
@@ -2014,11 +1996,7 @@ H5EA__cache_dblk_page_notify(H5AC_notify_action_t action, H5EA_dblk_page_t *dblk
break;
default:
-#ifdef NDEBUG
H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
-#else /* NDEBUG */
- HDassert(0 && "Unknown action?!?");
-#endif /* NDEBUG */
} /* end switch */
CATCH
diff --git a/src/H5EAdbg.c b/src/H5EAdbg.c
index b9a5bad..60b9ecd 100644
--- a/src/H5EAdbg.c
+++ b/src/H5EAdbg.c
@@ -119,7 +119,7 @@ H5EA__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
} /* end if */
/* Load the extensible array header */
- if(NULL == (hdr = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, addr, dbg_ctx, H5AC_READ)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, addr, dbg_ctx, H5AC_READ)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Print opening message */
@@ -171,7 +171,7 @@ H5EA__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
CATCH
if(dbg_ctx && cls->dst_dbg_ctx(dbg_ctx) < 0)
H5E_THROW(H5E_CANTRELEASE, "unable to release extensible array debugging context")
- if(hdr && H5AC_unprotect(f, dxpl_id, H5AC_EARRAY_HDR, addr, hdr, H5AC__NO_FLAGS_SET) < 0)
+ if(hdr && H5EA__hdr_unprotect(hdr, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array header")
END_FUNC(PKG) /* end H5EA__hdr_debug() */
@@ -196,9 +196,9 @@ H5EA__iblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, FILE *stream, i
int fwidth, const H5EA_class_t *cls, haddr_t hdr_addr, haddr_t obj_addr))
/* Local variables */
- H5EA_hdr_t *hdr = NULL; /* Shared extensible array header */
- H5EA_iblock_t *iblock = NULL; /* Extensible array index block */
- void *dbg_ctx = NULL; /* Extensible array context */
+ H5EA_hdr_t *hdr = NULL; /* Shared extensible array header */
+ H5EA_iblock_t *iblock = NULL; /* Extensible array index block */
+ void *dbg_ctx = NULL; /* Extensible array context */
/* Check arguments */
HDassert(f);
@@ -218,7 +218,7 @@ H5EA__iblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, FILE *stream, i
} /* end if */
/* Load the extensible array header */
- if(NULL == (hdr = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, hdr_addr, dbg_ctx, H5AC_READ)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC_READ)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Sanity check */
@@ -296,7 +296,7 @@ CATCH
H5E_THROW(H5E_CANTRELEASE, "unable to release extensible array debugging context")
if(iblock && H5EA__iblock_unprotect(iblock, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array index block")
- if(hdr && H5AC_unprotect(f, dxpl_id, H5AC_EARRAY_HDR, hdr_addr, hdr, H5AC__NO_FLAGS_SET) < 0)
+ if(hdr && H5EA__hdr_unprotect(hdr, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array header")
END_FUNC(PKG) /* end H5EA__iblock_debug() */
@@ -321,9 +321,9 @@ H5EA__sblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int inde
int fwidth, const H5EA_class_t *cls, haddr_t hdr_addr, unsigned sblk_idx, haddr_t obj_addr))
/* Local variables */
- H5EA_hdr_t *hdr = NULL; /* Shared extensible array header */
- H5EA_sblock_t *sblock = NULL; /* Extensible array super block */
- void *dbg_ctx = NULL; /* Extensible array context */
+ H5EA_hdr_t *hdr = NULL; /* Shared extensible array header */
+ H5EA_sblock_t *sblock = NULL; /* Extensible array super block */
+ void *dbg_ctx = NULL; /* Extensible array context */
/* Check arguments */
HDassert(f);
@@ -343,7 +343,7 @@ H5EA__sblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int inde
} /* end if */
/* Load the extensible array header */
- if(NULL == (hdr = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, hdr_addr, dbg_ctx, H5AC_READ)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC_READ)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Protect super block */
@@ -388,7 +388,7 @@ CATCH
H5E_THROW(H5E_CANTRELEASE, "unable to release extensible array debugging context")
if(sblock && H5EA__sblock_unprotect(sblock, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array super block")
- if(hdr && H5AC_unprotect(f, dxpl_id, H5AC_EARRAY_HDR, hdr_addr, hdr, H5AC__NO_FLAGS_SET) < 0)
+ if(hdr && H5EA__hdr_unprotect(hdr, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array header")
END_FUNC(PKG) /* end H5EA__sblock_debug() */
@@ -415,7 +415,7 @@ H5EA__dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int inde
/* Local variables */
H5EA_hdr_t *hdr = NULL; /* Shared extensible array header */
H5EA_dblock_t *dblock = NULL; /* Extensible array data block */
- void *dbg_ctx = NULL; /* Extensible array context */
+ void *dbg_ctx = NULL; /* Extensible array context */
size_t u; /* Local index variable */
/* Check arguments */
@@ -437,7 +437,7 @@ H5EA__dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int inde
} /* end if */
/* Load the extensible array header */
- if(NULL == (hdr = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, hdr_addr, dbg_ctx, H5AC_READ)))
+ if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC_READ)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Protect data block */
@@ -471,7 +471,7 @@ CATCH
H5E_THROW(H5E_CANTRELEASE, "unable to release extensible array debugging context")
if(dblock && H5EA__dblock_unprotect(dblock, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array data block")
- if(hdr && H5AC_unprotect(f, dxpl_id, H5AC_EARRAY_HDR, hdr_addr, hdr, H5AC__NO_FLAGS_SET) < 0)
+ if(hdr && H5EA__hdr_unprotect(hdr, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array header")
END_FUNC(PKG) /* end H5EA__dblock_debug() */
diff --git a/src/H5EAhdr.c b/src/H5EAhdr.c
index d5f3538..136cf1e 100644
--- a/src/H5EAhdr.c
+++ b/src/H5EAhdr.c
@@ -612,6 +612,70 @@ END_FUNC(PKG) /* end H5EA__hdr_modified() */
/*-------------------------------------------------------------------------
+ * Function: H5EA__hdr_protect
+ *
+ * Purpose: Convenience wrapper around protecting extensible array header
+ *
+ * Return: Non-NULL pointer to index block on success/NULL on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Jul 31 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(PKG, ERR,
+H5EA_hdr_t *, NULL, NULL,
+H5EA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata,
+ H5AC_protect_t rw))
+
+ /* Local variables */
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(H5F_addr_defined(ea_addr));
+
+ /* Protect the header */
+ if(NULL == (ret_value = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, ctx_udata, rw)))
+ H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array header, address = %llu", (unsigned long long)ea_addr)
+
+CATCH
+
+END_FUNC(PKG) /* end H5EA__hdr_protect() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5EA__hdr_unprotect
+ *
+ * Purpose: Convenience wrapper around unprotecting extensible array header
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Aug 1 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(PKG, ERR,
+herr_t, SUCCEED, FAIL,
+H5EA__hdr_unprotect(H5EA_hdr_t *hdr, hid_t dxpl_id, unsigned cache_flags))
+
+ /* Local variables */
+
+ /* Sanity check */
+ HDassert(hdr);
+
+ /* Unprotect the header */
+ if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_EARRAY_HDR, hdr->addr, hdr, cache_flags) < 0)
+ H5E_THROW(H5E_CANTUNPROTECT, "unable to unprotect extensible array hdr, address = %llu", (unsigned long long)hdr->addr)
+
+CATCH
+
+END_FUNC(PKG) /* end H5EA__hdr_unprotect() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5EA__hdr_delete
*
* Purpose: Delete an extensible array, starting with the header
@@ -665,7 +729,7 @@ HDfprintf(stderr, "%s: hdr->idx_blk_addr = %a\n", FUNC, hdr->idx_blk_addr);
CATCH
/* Unprotect the header, deleting it if an error hasn't occurred */
- if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_EARRAY_HDR, hdr->addr, hdr, cache_flags) < 0)
+ if(H5EA__hdr_unprotect(hdr, dxpl_id, cache_flags) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array header")
END_FUNC(PKG) /* end H5EA__hdr_delete() */
diff --git a/src/H5EApkg.h b/src/H5EApkg.h
index fc15e72..d89a35e 100644
--- a/src/H5EApkg.h
+++ b/src/H5EApkg.h
@@ -302,11 +302,12 @@ struct H5EA_t {
/* Metadata cache callback user data types */
-/* Info needed for loading data block page */
-typedef struct H5EA_dblk_page_cache_ud_t {
+/* Info needed for loading super block */
+typedef struct H5EA_sblock_cache_ud_t {
H5EA_hdr_t *hdr; /* Shared extensible array information */
- H5EA_sblock_t *parent; /* Pointer to parent object for data block page (super block) */
-} H5EA_dblk_page_cache_ud_t;
+ H5EA_iblock_t *parent; /* Pointer to parent object for super block (index block) */
+ unsigned sblk_idx; /* Index of super block */
+} H5EA_sblock_cache_ud_t;
/* Info needed for loading data block */
typedef struct H5EA_dblock_cache_ud_t {
@@ -315,12 +316,11 @@ typedef struct H5EA_dblock_cache_ud_t {
size_t nelmts; /* Number of elements in data block */
} H5EA_dblock_cache_ud_t;
-/* Info needed for loading super block */
-typedef struct H5EA_sblock_cache_ud_t {
+/* Info needed for loading data block page */
+typedef struct H5EA_dblk_page_cache_ud_t {
H5EA_hdr_t *hdr; /* Shared extensible array information */
- H5EA_iblock_t *parent; /* Pointer to parent object for super block (index block) */
- unsigned sblk_idx; /* Index of super block */
-} H5EA_sblock_cache_ud_t;
+ H5EA_sblock_t *parent; /* Pointer to parent object for data block page (super block) */
+} H5EA_dblk_page_cache_ud_t;
#ifdef H5EA_TESTING
typedef struct H5EA__ctx_cb_t {
@@ -377,6 +377,9 @@ H5_DLL herr_t H5EA__hdr_decr(H5EA_hdr_t *hdr);
H5_DLL herr_t H5EA__hdr_fuse_incr(H5EA_hdr_t *hdr);
H5_DLL size_t H5EA__hdr_fuse_decr(H5EA_hdr_t *hdr);
H5_DLL herr_t H5EA__hdr_modified(H5EA_hdr_t *hdr);
+H5_DLL H5EA_hdr_t *H5EA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr,
+ void *ctx_udata, H5AC_protect_t rw);
+H5_DLL herr_t H5EA__hdr_unprotect(H5EA_hdr_t *hdr, hid_t dxpl_id, unsigned cache_flags);
H5_DLL herr_t H5EA__hdr_delete(H5EA_hdr_t *hdr, hid_t dxpl_id);
H5_DLL herr_t H5EA__hdr_dest(H5EA_hdr_t *hdr);
diff --git a/src/H5F.c b/src/H5F.c
index 357897e..3cdb604 100644
--- a/src/H5F.c
+++ b/src/H5F.c
@@ -484,15 +484,15 @@ H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id, hid_t fapl_id)
/* Check/fix arguments */
if(!filename || !*filename)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file name")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file name")
/* In this routine, we only accept the following flags:
- * H5F_ACC_EXCL, H5F_ACC_TRUNC and H5F_ACC_DEBUG
+ * H5F_ACC_EXCL and H5F_ACC_TRUNC
*/
- if(flags & ~(H5F_ACC_EXCL | H5F_ACC_TRUNC | H5F_ACC_DEBUG))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid flags")
+ if(flags & ~(H5F_ACC_EXCL | H5F_ACC_TRUNC))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid flags")
/* The H5F_ACC_EXCL and H5F_ACC_TRUNC flags are mutually exclusive */
if((flags & H5F_ACC_EXCL) && (flags & H5F_ACC_TRUNC))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "mutually exclusive flags for file creation")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "mutually exclusive flags for file creation")
/* Check file creation property list */
if(H5P_DEFAULT == fcpl_id)
@@ -1380,7 +1380,7 @@ H5Fget_info2(hid_t obj_id, H5F_info2_t *finfo)
HDmemset(finfo, 0, sizeof(*finfo));
/* Get the size of the superblock and any superblock extensions */
- if(H5F_super_size(f, H5AC_ind_dxpl_id, &finfo->super.super_size, &finfo->super.super_ext_size) < 0)
+ if(H5F__super_size(f, H5AC_ind_dxpl_id, &finfo->super.super_size, &finfo->super.super_ext_size) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "Unable to retrieve superblock sizes")
/* Get the size of any persistent free space */
diff --git a/src/H5FA.c b/src/H5FA.c
index 128999a..1acb10b 100644
--- a/src/H5FA.c
+++ b/src/H5FA.c
@@ -141,7 +141,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for fixed array info")
/* Lock the array header into memory */
- if(NULL == (hdr = (H5FA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, ctx_udata, H5AC_WRITE)))
+ if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC_WRITE)))
H5E_THROW(H5E_CANTPROTECT, "unable to load fixed array header")
/* Point fixed array wrapper at header and bump it's ref count */
@@ -161,7 +161,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
CATCH
- if(hdr && H5AC_unprotect(f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, hdr, H5AC__NO_FLAGS_SET) < 0)
+ if(hdr && H5FA__hdr_unprotect(hdr, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release fixed array header")
if(!ret_value)
if(fa && H5FA_close(fa, dxpl_id) < 0)
@@ -201,7 +201,7 @@ H5FA_open(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata))
#ifdef H5FA_DEBUG
HDfprintf(stderr, "%s: fa_addr = %a\n", FUNC, fa_addr);
#endif /* H5FA_DEBUG */
- if(NULL == (hdr = (H5FA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, ctx_udata, H5AC_READ)))
+ if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC_READ)))
H5E_THROW(H5E_CANTPROTECT, "unable to load fixed array header, address = %llu", (unsigned long long)fa_addr)
/* Check for pending array deletion */
@@ -229,7 +229,7 @@ HDfprintf(stderr, "%s: fa_addr = %a\n", FUNC, fa_addr);
CATCH
- if(hdr && H5AC_unprotect(f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, hdr, H5AC__NO_FLAGS_SET) < 0)
+ if(hdr && H5FA__hdr_unprotect(hdr, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release fixed array header")
if(!ret_value)
if(fa && H5FA_close(fa, dxpl_id) < 0)
@@ -351,7 +351,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
HDfprintf(stderr, "%s: fixed array data block address not defined!\n", FUNC, idx);
#endif /* H5FA_DEBUG */
/* Create the data block */
- hdr->dblk_addr = H5FA__dblock_create(hdr, dxpl_id, &hdr_dirty, hdr->cparam.nelmts);
+ hdr->dblk_addr = H5FA__dblock_create(hdr, dxpl_id, &hdr_dirty);
if(!H5F_addr_defined(hdr->dblk_addr))
H5E_THROW(H5E_CANTCREATE, "unable to create fixed array data block")
} /* end if */
@@ -359,7 +359,7 @@ HDfprintf(stderr, "%s: fixed array data block address not defined!\n", FUNC, idx
HDassert(idx < hdr->cparam.nelmts);
/* Protect data block */
- if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, hdr->dblk_addr, hdr->stats.nelmts, H5AC_WRITE)))
+ if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, hdr->dblk_addr, H5AC_WRITE)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)hdr->dblk_addr)
/* Check for paging data block */
@@ -467,7 +467,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
else {
/* Get the data block */
HDassert(H5F_addr_defined(hdr->dblk_addr));
- if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, hdr->dblk_addr, hdr->stats.nelmts, H5AC_READ)))
+ if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, hdr->dblk_addr, H5AC_READ)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)hdr->dblk_addr)
/* Check for paged data block */
@@ -592,7 +592,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Lock the array header into memory */
/* (OK to pass in NULL for callback context, since we know the header must be in the cache) */
- if(NULL == (hdr = (H5FA_hdr_t *)H5AC_protect(fa->f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, NULL, H5AC_WRITE)))
+ if(NULL == (hdr = H5FA__hdr_protect(fa->f, dxpl_id, fa_addr, NULL, H5AC_WRITE)))
H5E_THROW(H5E_CANTLOAD, "unable to load fixed array header")
/* Set the shared array header's file context for this operation */
@@ -655,7 +655,7 @@ H5FA_delete(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata))
#ifdef H5FA_DEBUG
HDfprintf(stderr, "%s: fa_addr = %a\n", FUNC, fa_addr);
#endif /* H5FA_DEBUG */
- if(NULL == (hdr = (H5FA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, ctx_udata, H5AC_WRITE)))
+ if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC_WRITE)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array header, address = %llu", (unsigned long long)fa_addr)
/* Check for files using shared array header */
@@ -674,7 +674,7 @@ HDfprintf(stderr, "%s: fa_addr = %a\n", FUNC, fa_addr);
CATCH
/* Unprotect the header, if an error occurred */
- if(hdr && H5AC_unprotect(f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, hdr, H5AC__NO_FLAGS_SET) < 0)
+ if(hdr && H5FA__hdr_unprotect(hdr, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release fixed array header")
END_FUNC(PRIV) /* end H5FA_delete() */
diff --git a/src/H5FAcache.c b/src/H5FAcache.c
index cb156db..2f07cd6 100644
--- a/src/H5FAcache.c
+++ b/src/H5FAcache.c
@@ -16,6 +16,8 @@
/*-------------------------------------------------------------------------
*
* Created: H5FAcache.c
+ * Jul 2 2009
+ * Quincey Koziol <koziol@hdfgroup.org>
*
* Purpose: Implement fixed array metadata cache methods.
*
@@ -41,7 +43,7 @@
#include "H5Eprivate.h" /* Error handling */
#include "H5FApkg.h" /* Fixed Arrays */
#include "H5MFprivate.h" /* File memory management */
-#include "H5VMprivate.h" /* Vectors and arrays */
+#include "H5VMprivate.h" /* Vectors and arrays */
#include "H5WBprivate.h" /* Wrapped Buffers */
@@ -517,10 +519,10 @@ H5FA__cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
/* Sanity check */
HDassert(f);
HDassert(H5F_addr_defined(addr));
- HDassert(udata && udata->hdr && udata->nelmts > 0);
+ HDassert(udata && udata->hdr);
/* Allocate the fixed array data block */
- if(NULL == (dblock = H5FA__dblock_alloc(udata->hdr, udata->nelmts)))
+ if(NULL == (dblock = H5FA__dblock_alloc(udata->hdr)))
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for fixed array data block")
/* Set the fixed array data block's information */
@@ -575,9 +577,9 @@ H5FA__cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
if(!dblock->npages) {
/* Decode elements in data block */
/* Convert from raw elements on disk into native elements in memory */
- if((udata->hdr->cparam.cls->decode)(p, dblock->elmts, (size_t)udata->nelmts, udata->hdr->cb_ctx) < 0)
+ if((udata->hdr->cparam.cls->decode)(p, dblock->elmts, (size_t)udata->hdr->stats.nelmts, udata->hdr->cb_ctx) < 0)
H5E_THROW(H5E_CANTDECODE, "can't decode fixed array data elements")
- p += (udata->nelmts * udata->hdr->cparam.raw_elmt_size);
+ p += (udata->hdr->stats.nelmts * udata->hdr->cparam.raw_elmt_size);
} /* end if */
/* Sanity check */
@@ -868,9 +870,6 @@ H5FA__cache_dblk_page_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata))
HDassert(f);
HDassert(H5F_addr_defined(addr));
HDassert(udata && udata->hdr && udata->nelmts > 0);
-#ifdef QAK
-HDfprintf(stderr, "%s: addr = %a\n", FUNC, addr);
-#endif /* QAK */
/* Allocate the fixed array data block page */
if(NULL == (dblk_page = H5FA__dblk_page_alloc(udata->hdr, udata->nelmts)))
diff --git a/src/H5FAdbg.c b/src/H5FAdbg.c
index eb5e32e..d5239d3 100644
--- a/src/H5FAdbg.c
+++ b/src/H5FAdbg.c
@@ -117,7 +117,7 @@ H5FA__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
} /* end if */
/* Load the fixed array header */
- if(NULL == (hdr = (H5FA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FARRAY_HDR, addr, dbg_ctx, H5AC_READ)))
+ if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, addr, dbg_ctx, H5AC_READ)))
H5E_THROW(H5E_CANTPROTECT, "unable to load fixed array header")
/* Print opening message */
@@ -150,7 +150,7 @@ H5FA__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
CATCH
if(dbg_ctx && cls->dst_dbg_ctx(dbg_ctx) < 0)
H5E_THROW(H5E_CANTRELEASE, "unable to release fixed array debugging context")
- if(hdr && H5AC_unprotect(f, dxpl_id, H5AC_FARRAY_HDR, addr, hdr, H5AC__NO_FLAGS_SET) < 0)
+ if(hdr && H5FA__hdr_unprotect(hdr, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release fixed array header")
END_FUNC(PKG) /* end H5FA__hdr_debug() */
@@ -198,11 +198,11 @@ H5FA__dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int inde
} /* end if */
/* Load the fixed array header */
- if(NULL == (hdr = (H5FA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FARRAY_HDR, hdr_addr, dbg_ctx, H5AC_READ)))
+ if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC_READ)))
H5E_THROW(H5E_CANTPROTECT, "unable to load fixed array header")
/* Protect data block */
- if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, addr, hdr->cparam.nelmts, H5AC_READ)))
+ if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, addr, H5AC_READ)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)addr)
/* Print opening message */
@@ -280,7 +280,7 @@ CATCH
H5E_THROW(H5E_CANTRELEASE, "unable to release fixed array debugging context")
if(dblock && H5FA__dblock_unprotect(dblock, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release fixed array data block")
- if(hdr && H5AC_unprotect(f, dxpl_id, H5AC_FARRAY_HDR, hdr_addr, hdr, H5AC__NO_FLAGS_SET) < 0)
+ if(hdr && H5FA__hdr_unprotect(hdr, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
H5E_THROW(H5E_CANTUNPROTECT, "unable to release fixed array header")
END_FUNC(PKG) /* end H5FA__dblock_debug() */
diff --git a/src/H5FAdblock.c b/src/H5FAdblock.c
index 80e2159..e6e0e74 100644
--- a/src/H5FAdblock.c
+++ b/src/H5FAdblock.c
@@ -102,14 +102,14 @@ H5FL_BLK_DEFINE(fa_page_init);
*/
BEGIN_FUNC(PKG, ERR,
H5FA_dblock_t *, NULL, NULL,
-H5FA__dblock_alloc(H5FA_hdr_t *hdr, hsize_t nelmts))
+H5FA__dblock_alloc(H5FA_hdr_t *hdr))
/* Local variables */
H5FA_dblock_t *dblock = NULL; /* fixed array data block */
/* Check arguments */
HDassert(hdr);
- HDassert(nelmts > 0);
+ HDassert(hdr->cparam.nelmts > 0);
/* Allocate memory for the data block */
if(NULL == (dblock = H5FL_CALLOC(H5FA_dblock_t)))
@@ -124,12 +124,12 @@ H5FA__dblock_alloc(H5FA_hdr_t *hdr, hsize_t nelmts))
dblock->dblk_page_nelmts = (size_t)1 << hdr->cparam.max_dblk_page_nelmts_bits;
/* Check if this data block should be paged */
- if(nelmts > dblock->dblk_page_nelmts) {
+ if(hdr->cparam.nelmts > dblock->dblk_page_nelmts) {
/* Compute number of pages */
- hsize_t npages = ((nelmts + dblock->dblk_page_nelmts) - 1) / dblock->dblk_page_nelmts;
+ hsize_t npages = ((hdr->cparam.nelmts + dblock->dblk_page_nelmts) - 1) / dblock->dblk_page_nelmts;
/* Safely assign the number of pages */
- H5_ASSIGN_OVERFLOW(/* To: */ dblock->npages, /* From: */ npages, /* From: */ hsize_t, /* To: */ size_t);
+ H5_CHECKED_ASSIGN(dblock->npages, size_t, npages, hsize_t);
/* Sanity check that we have at least 1 page */
HDassert(dblock->npages > 0);
@@ -146,10 +146,10 @@ H5FA__dblock_alloc(H5FA_hdr_t *hdr, hsize_t nelmts))
dblock->dblk_page_size = (dblock->dblk_page_nelmts * hdr->cparam.raw_elmt_size) + H5FA_SIZEOF_CHKSUM;
/* Compute the # of elements on last page */
- if(0 == nelmts % dblock->dblk_page_nelmts)
+ if(0 == hdr->cparam.nelmts % dblock->dblk_page_nelmts)
dblock->last_page_nelmts = dblock->dblk_page_nelmts;
else
- dblock->last_page_nelmts = (size_t)(nelmts % dblock->dblk_page_nelmts);
+ dblock->last_page_nelmts = (size_t)(hdr->cparam.nelmts % dblock->dblk_page_nelmts);
} /* end if */
else {
hsize_t dblk_size = hdr->cparam.nelmts * hdr->cparam.cls->nat_elmt_size;
@@ -186,24 +186,22 @@ END_FUNC(PKG) /* end H5FA__dblock_alloc() */
*/
BEGIN_FUNC(PKG, ERR,
haddr_t, HADDR_UNDEF, HADDR_UNDEF,
-H5FA__dblock_create(H5FA_hdr_t *hdr, hid_t dxpl_id, hbool_t *hdr_dirty,
- hsize_t nelmts))
+H5FA__dblock_create(H5FA_hdr_t *hdr, hid_t dxpl_id, hbool_t *hdr_dirty))
/* Local variables */
H5FA_dblock_t *dblock = NULL; /* fixed array data block */
haddr_t dblock_addr; /* fixed array data block address */
#ifdef H5FA_DEBUG
-HDfprintf(stderr, "%s: Called, hdr->stats.nelmts = %Zu, nelmts = %Zu\n", FUNC, hdr->stats.nelmts, nelmts);
+HDfprintf(stderr, "%s: Called, hdr->stats.nelmts = %Zu, nelmts = %Zu\n", FUNC, hdr->stats.nelmts, hdr->cparam.nelmts);
#endif /* H5FA_DEBUG */
/* Sanity check */
HDassert(hdr);
HDassert(hdr_dirty);
- HDassert(nelmts > 0);
/* Allocate the data block */
- if(NULL == (dblock = H5FA__dblock_alloc(hdr, nelmts)))
+ if(NULL == (dblock = H5FA__dblock_alloc(hdr)))
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for fixed array data block")
/* Set size of data block on disk */
@@ -265,7 +263,7 @@ END_FUNC(PKG) /* end H5FA__dblock_create() */
BEGIN_FUNC(PKG, ERR,
H5FA_dblock_t *, NULL, NULL,
H5FA__dblock_protect(H5FA_hdr_t *hdr, hid_t dxpl_id, haddr_t dblk_addr,
- hsize_t dblk_nelmts, H5AC_protect_t rw))
+ H5AC_protect_t rw))
/* Local variables */
H5FA_dblock_cache_ud_t udata; /* Information needed for loading data block */
@@ -277,11 +275,9 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Sanity check */
HDassert(hdr);
HDassert(H5F_addr_defined(dblk_addr));
- HDassert(dblk_nelmts);
/* Set up user data */
udata.hdr = hdr;
- udata.nelmts = dblk_nelmts;
/* Protect the data block */
if(NULL == (ret_value = (H5FA_dblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FARRAY_DBLOCK, dblk_addr, &udata, rw)))
@@ -340,8 +336,7 @@ END_FUNC(PKG) /* end H5FA__dblock_unprotect() */
*/
BEGIN_FUNC(PKG, ERR,
herr_t, SUCCEED, FAIL,
-H5FA__dblock_delete(H5FA_hdr_t *hdr, hid_t dxpl_id, haddr_t dblk_addr,
- hsize_t dblk_nelmts))
+H5FA__dblock_delete(H5FA_hdr_t *hdr, hid_t dxpl_id, haddr_t dblk_addr))
/* Local variables */
H5FA_dblock_t *dblock = NULL; /* Pointer to data block */
@@ -353,10 +348,9 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Sanity check */
HDassert(hdr);
HDassert(H5F_addr_defined(dblk_addr));
- HDassert(dblk_nelmts > 0);
/* Protect data block */
- if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, dblk_addr, dblk_nelmts, H5AC_WRITE)))
+ if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, dblk_addr, H5AC_WRITE)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)dblk_addr)
/* Check if data block is paged */
diff --git a/src/H5FAhdr.c b/src/H5FAhdr.c
index 23a554a..5970ff0 100644
--- a/src/H5FAhdr.c
+++ b/src/H5FAhdr.c
@@ -393,6 +393,70 @@ END_FUNC(PKG) /* end H5FA__hdr_modified() */
/*-------------------------------------------------------------------------
+ * Function: H5FA__hdr_protect
+ *
+ * Purpose: Convenience wrapper around protecting fixed array header
+ *
+ * Return: Non-NULL pointer to index block on success/NULL on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Aug 12 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(PKG, ERR,
+H5FA_hdr_t *, NULL, NULL,
+H5FA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata,
+ H5AC_protect_t rw))
+
+ /* Local variables */
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(H5F_addr_defined(fa_addr));
+
+ /* Protect the header */
+ if(NULL == (ret_value = (H5FA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, ctx_udata, rw)))
+ H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array header, address = %llu", (unsigned long long)fa_addr)
+
+CATCH
+
+END_FUNC(PKG) /* end H5FA__hdr_protect() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5FA__hdr_unprotect
+ *
+ * Purpose: Convenience wrapper around unprotecting fixed array header
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Aug 12 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(PKG, ERR,
+herr_t, SUCCEED, FAIL,
+H5FA__hdr_unprotect(H5FA_hdr_t *hdr, hid_t dxpl_id, unsigned cache_flags))
+
+ /* Local variables */
+
+ /* Sanity check */
+ HDassert(hdr);
+
+ /* Unprotect the header */
+ if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_FARRAY_HDR, hdr->addr, hdr, cache_flags) < 0)
+ H5E_THROW(H5E_CANTUNPROTECT, "unable to unprotect fixed array hdr, address = %llu", (unsigned long long)hdr->addr)
+
+CATCH
+
+END_FUNC(PKG) /* end H5EA__hdr_unprotect() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5FA__hdr_delete
*
* Purpose: Delete a fixed array, starting with the header
@@ -436,7 +500,7 @@ HDfprintf(stderr, "%s: hdr->dblk_addr = %a\n", FUNC, hdr->dblk_addr);
#endif /* H5FA_DEBUG */
/* Delete Fixed Array Data block */
- if(H5FA__dblock_delete(hdr, dxpl_id, hdr->dblk_addr, hdr->cparam.nelmts) < 0)
+ if(H5FA__dblock_delete(hdr, dxpl_id, hdr->dblk_addr) < 0)
H5E_THROW(H5E_CANTDELETE, "unable to delete fixed array data block")
} /* end if */
diff --git a/src/H5FApkg.h b/src/H5FApkg.h
index 20d30c3..7101f0b 100644
--- a/src/H5FApkg.h
+++ b/src/H5FApkg.h
@@ -199,13 +199,12 @@ struct H5FA_t {
/* Info needed for loading data block */
typedef struct H5FA_dblock_cache_ud_t {
H5FA_hdr_t *hdr; /* Shared fixed array information */
- hsize_t nelmts; /* Number of elements in data block */
} H5FA_dblock_cache_ud_t;
/* Info needed for loading data block page */
typedef struct H5FA_dblk_page_cache_ud_t {
H5FA_hdr_t *hdr; /* Shared fixed array information */
- size_t nelmts; /* Number of elements in data block page */
+ size_t nelmts; /* Number of elements in data block page */
} H5FA_dblk_page_cache_ud_t;
@@ -244,19 +243,22 @@ H5_DLL herr_t H5FA__hdr_decr(H5FA_hdr_t *hdr);
H5_DLL herr_t H5FA__hdr_fuse_incr(H5FA_hdr_t *hdr);
H5_DLL size_t H5FA__hdr_fuse_decr(H5FA_hdr_t *hdr);
H5_DLL herr_t H5FA__hdr_modified(H5FA_hdr_t *hdr);
+H5_DLL H5FA_hdr_t *H5FA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr,
+ void *ctx_udata, H5AC_protect_t rw);
+H5_DLL herr_t H5FA__hdr_unprotect(H5FA_hdr_t *hdr, hid_t dxpl_id, unsigned cache_flags);
H5_DLL herr_t H5FA__hdr_delete(H5FA_hdr_t *hdr, hid_t dxpl_id);
H5_DLL herr_t H5FA__hdr_dest(H5FA_hdr_t *hdr);
/* Data block routines */
-H5_DLL H5FA_dblock_t *H5FA__dblock_alloc(H5FA_hdr_t *hdr, hsize_t nelmts);
-H5_DLL haddr_t H5FA__dblock_create(H5FA_hdr_t *hdr, hid_t dxpl_id, hbool_t *hdr_dirty, hsize_t nelmts);
+H5_DLL H5FA_dblock_t *H5FA__dblock_alloc(H5FA_hdr_t *hdr);
+H5_DLL haddr_t H5FA__dblock_create(H5FA_hdr_t *hdr, hid_t dxpl_id, hbool_t *hdr_dirty);
H5_DLL unsigned H5FA__dblock_sblk_idx(const H5FA_hdr_t *hdr, hsize_t idx);
H5_DLL H5FA_dblock_t *H5FA__dblock_protect(H5FA_hdr_t *hdr, hid_t dxpl_id,
- haddr_t dblk_addr, hsize_t dblk_nelmts, H5AC_protect_t rw);
+ haddr_t dblk_addr, H5AC_protect_t rw);
H5_DLL herr_t H5FA__dblock_unprotect(H5FA_dblock_t *dblock, hid_t dxpl_id,
unsigned cache_flags);
H5_DLL herr_t H5FA__dblock_delete(H5FA_hdr_t *hdr, hid_t dxpl_id,
- haddr_t dblk_addr, hsize_t dblk_nelmts);
+ haddr_t dblk_addr);
H5_DLL herr_t H5FA__dblock_dest(H5FA_dblock_t *dblock);
/* Data block page routines */
diff --git a/src/H5FD.c b/src/H5FD.c
index fc6937e..c4ee11f 100644
--- a/src/H5FD.c
+++ b/src/H5FD.c
@@ -540,11 +540,11 @@ H5FD_sb_encode(H5FD_t *file, char *name/*out*/, uint8_t *buf)
done:
FUNC_LEAVE_NOAPI(ret_value)
-}
+} /* end H5FD_sb_encode() */
/*-------------------------------------------------------------------------
- * Function: H5FD_sb_decode
+ * Function: H5FD__sb_decode
*
* Purpose: Decodes the driver information block.
*
@@ -556,20 +556,61 @@ done:
*
*-------------------------------------------------------------------------
*/
-herr_t
-H5FD_sb_decode(H5FD_t *file, const char *name, const uint8_t *buf)
+static herr_t
+H5FD__sb_decode(H5FD_t *file, const char *name, const uint8_t *buf)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
HDassert(file && file->cls);
+
+ /* Decode driver information */
if(file->cls->sb_decode && (file->cls->sb_decode)(file, name, buf) < 0)
HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver sb_decode request failed")
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5FD_sb_decode() */
+} /* end H5FD__sb_decode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5FD_sb_load
+ *
+ * Purpose: Validate and decode the driver information block.
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Quincey Koziol
+ * Friday, July 19, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FD_sb_load(H5FD_t *file, const char *name, const uint8_t *buf)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(file && file->cls);
+
+ /* Check if driver matches driver information saved. Unfortunately, we can't push this
+ * function to each specific driver because we're checking if the driver is correct.
+ */
+ if(!HDstrncmp(name, "NCSAfami", (size_t)8) && HDstrcmp(file->cls->name, "family"))
+ HGOTO_ERROR(H5E_VFL, H5E_BADVALUE, FAIL, "family driver should be used")
+ if(!HDstrncmp(name, "NCSAmult", (size_t)8) && HDstrcmp(file->cls->name, "multi"))
+ HGOTO_ERROR(H5E_VFL, H5E_BADVALUE, FAIL, "multi driver should be used")
+
+ /* Decode driver information */
+ if(H5FD__sb_decode(file, name, buf) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTDECODE, FAIL, "unable to decode driver information")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD_sb_load() */
/*-------------------------------------------------------------------------
@@ -1840,12 +1881,12 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5FDtruncate(H5FD_t *file, hid_t dxpl_id, unsigned closing)
+H5FDtruncate(H5FD_t *file, hid_t dxpl_id, hbool_t closing)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
- H5TRACE3("e", "*xiIu", file, dxpl_id, closing);
+ H5TRACE3("e", "*xib", file, dxpl_id, closing);
/* Check args */
if(!file || !file->cls)
@@ -1879,7 +1920,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5FD_truncate(H5FD_t *file, hid_t dxpl_id, unsigned closing)
+H5FD_truncate(H5FD_t *file, hid_t dxpl_id, hbool_t closing)
{
herr_t ret_value = SUCCEED; /* Return value */
diff --git a/src/H5FDcore.c b/src/H5FDcore.c
index ba4270e..f9bda00 100644
--- a/src/H5FDcore.c
+++ b/src/H5FDcore.c
@@ -1300,7 +1300,7 @@ H5FD_core_write(H5FD_t *_file, H5FD_mem_t UNUSED type, hid_t UNUSED dxpl_id, had
size_t new_eof;
/* Determine new size of memory buffer */
- H5_ASSIGN_OVERFLOW(new_eof, file->increment * ((addr + size) / file->increment), hsize_t, size_t);
+ H5_CHECKED_ASSIGN(new_eof, size_t, file->increment * ((addr + size) / file->increment), hsize_t);
if((addr + size) % file->increment)
new_eof += file->increment;
@@ -1469,7 +1469,7 @@ H5FD_core_truncate(H5FD_t *_file, hid_t UNUSED dxpl_id, hbool_t closing)
new_eof = file->eoa;
else { /* set eof to smallest multiple of increment that exceeds eoa */
/* Determine new size of memory buffer */
- H5_ASSIGN_OVERFLOW(new_eof, file->increment * (file->eoa / file->increment), hsize_t, size_t);
+ H5_CHECKED_ASSIGN(new_eof, size_t, file->increment * (file->eoa / file->increment), hsize_t);
if(file->eoa % file->increment)
new_eof += file->increment;
} /* end else */
diff --git a/src/H5FDdirect.c b/src/H5FDdirect.c
index 7507a2e..c2b0660 100644
--- a/src/H5FDdirect.c
+++ b/src/H5FDdirect.c
@@ -515,7 +515,7 @@ H5FD_direct_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxadd
HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, NULL, "bad VFL driver info")
file->fd = fd;
- H5_ASSIGN_OVERFLOW(file->eof,sb.st_size,h5_stat_size_t,haddr_t);
+ H5_CHECKED_ASSIGN(file->eof, haddr_t, sb.st_size, h5_stat_size_t);
file->pos = HADDR_UNDEF;
file->op = OP_UNKNOWN;
#ifdef H5_HAVE_WIN32_API
diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c
index ad703ef..8660f1f 100644
--- a/src/H5FDfamily.c
+++ b/src/H5FDfamily.c
@@ -587,26 +587,25 @@ H5FD_family_sb_decode(H5FD_t *_file, const char UNUSED *name, const unsigned cha
* h5repart is being used to change member file size. h5repart will open
* files for read and write. When the files are closed, metadata will be
* flushed to the files and updated to this new size */
- if(file->mem_newsize) {
+ if(file->mem_newsize)
file->memb_size = file->pmem_size = file->mem_newsize;
- HGOTO_DONE(ret_value)
- } /* end if */
-
- /* Default - use the saved member size */
- if(file->pmem_size == H5F_FAMILY_DEFAULT)
- file->pmem_size = msize;
+ else {
+ /* Default - use the saved member size */
+ if(file->pmem_size == H5F_FAMILY_DEFAULT)
+ file->pmem_size = msize;
- /* Check if member size from file access property is correct */
- if(msize != file->pmem_size) {
- char err_msg[128];
+ /* Check if member size from file access property is correct */
+ if(msize != file->pmem_size) {
+ char err_msg[128];
- HDsnprintf(err_msg, sizeof(err_msg), "Family member size should be %lu. But the size from file access property is %lu", (unsigned long)msize, (unsigned long)file->pmem_size);
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, err_msg)
- } /* end if */
+ HDsnprintf(err_msg, sizeof(err_msg), "Family member size should be %lu. But the size from file access property is %lu", (unsigned long)msize, (unsigned long)file->pmem_size);
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, err_msg)
+ } /* end if */
- /* Update member file size to the size saved in the superblock.
- * That's the size intended to be. */
- file->memb_size = msize;
+ /* Update member file size to the size saved in the superblock.
+ * That's the size intended to be. */
+ file->memb_size = msize;
+ } /* end else */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -686,14 +685,11 @@ H5FD_family_open(const char *name, unsigned flags, hid_t fapl_id,
/* Check for new family file size. It's used by h5repart only. */
if(H5P_exist_plist(plist, H5F_ACS_FAMILY_NEWSIZE_NAME) > 0) {
- hsize_t fam_newsize = 0; /* New member size, when repartitioning */
-
/* Get the new family file size */
- if(H5P_get(plist, H5F_ACS_FAMILY_NEWSIZE_NAME, &fam_newsize) < 0)
+ if(H5P_get(plist, H5F_ACS_FAMILY_NEWSIZE_NAME, &file->mem_newsize) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get new family member size")
- /* Store information for later */
- file->mem_newsize = fam_newsize; /* New member size passed in through property */
+ /* Set flag for later */
file->repart_members = TRUE;
} /* end if */
@@ -1170,7 +1166,7 @@ H5FD_family_read(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, si
/* Read from each member */
while(size > 0) {
- H5_ASSIGN_OVERFLOW(u,addr /file->memb_size,hsize_t,unsigned);
+ H5_CHECKED_ASSIGN(u, unsigned, addr / file->memb_size, hsize_t);
sub = addr % file->memb_size;
@@ -1239,7 +1235,7 @@ H5FD_family_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, s
/* Write to each member */
while (size>0) {
- H5_ASSIGN_OVERFLOW(u,addr /file->memb_size,hsize_t,unsigned);
+ H5_CHECKED_ASSIGN(u, unsigned, addr / file->memb_size, hsize_t);
sub = addr % file->memb_size;
diff --git a/src/H5FDlog.c b/src/H5FDlog.c
index a2284dd..cc0caa4 100644
--- a/src/H5FDlog.c
+++ b/src/H5FDlog.c
@@ -571,7 +571,7 @@ H5FD_log_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "unable to allocate file struct")
file->fd = fd;
- H5_ASSIGN_OVERFLOW(file->eof, sb.st_size, h5_stat_size_t, haddr_t);
+ H5_CHECKED_ASSIGN(file->eof, haddr_t, sb.st_size, h5_stat_size_t);
file->pos = HADDR_UNDEF;
file->op = OP_UNKNOWN;
#ifdef H5_HAVE_WIN32_API
diff --git a/src/H5FDmulti.c b/src/H5FDmulti.c
index b1f094c..bc61374 100644
--- a/src/H5FDmulti.c
+++ b/src/H5FDmulti.c
@@ -30,18 +30,11 @@
/* Disable certain warnings in PC-Lint: */
/*lint --emacro( {534, 830}, H5P_DEFAULT, H5P_FILE_ACCESS, H5P_DATASET_XFER) */
-/*lint --emacro( {534, 830}, H5F_ACC_DEBUG, H5F_ACC_RDWR) */
/*lint --emacro( {534, 830}, H5FD_MULTI) */
/*lint -esym( 534, H5Eclear2, H5Epush2) */
#include "hdf5.h"
-/*
- * Define H5FD_MULTI_DEBUG if you want the ability to print debugging
- * messages to the standard error stream. Messages are only printed if the
- * file is opened with the H5F_ACC_DEBUG flag.
- */
-#define H5FD_MULTI_DEBUG
#ifndef FALSE
#define FALSE 0
@@ -796,19 +789,6 @@ H5FD_multi_sb_decode(H5FD_t *_file, const char *name, const unsigned char *buf)
* files at the end.
*/
if (map_changed) {
-#ifdef H5FD_MULTI_DEBUG
- if (file->flags & H5F_ACC_DEBUG) {
- fprintf(stderr, "H5FD_MULTI: member map override\n");
- fprintf(stderr, " old value: ");
- ALL_MEMBERS(mt) {
- fprintf(stderr, "%s%d", mt?", ":"", (int)(file->fa.memb_map[mt]));
- } END_MEMBERS;
- fprintf(stderr, "\n new value: ");
- ALL_MEMBERS(mt) {
- fprintf(stderr, "%s%d", mt?", ":"", (int)(map[mt]));
- } END_MEMBERS;
- }
-#endif
/* Commit map */
ALL_MEMBERS(mt) {
file->fa.memb_map[mt] = map[mt];
@@ -821,11 +801,6 @@ H5FD_multi_sb_decode(H5FD_t *_file, const char *name, const unsigned char *buf)
} END_MEMBERS;
ALL_MEMBERS(mt) {
if (!in_use[mt] && file->memb[mt]) {
-#ifdef H5FD_MULTI_DEBUG
- if (file->flags & H5F_ACC_DEBUG) {
- fprintf(stderr, "H5FD_MULTI: close member %d\n", (int)mt);
- }
-#endif
(void)H5FDclose(file->memb[mt]);
file->memb[mt] = NULL;
}
@@ -1109,20 +1084,10 @@ H5FD_multi_close(H5FD_t *_file)
/* Close as many members as possible */
ALL_MEMBERS(mt) {
if (file->memb[mt]) {
-#ifdef H5FD_MULTI_DEBUG
- if (file->flags & H5F_ACC_DEBUG) {
- fprintf(stderr, "H5FD_MULTI: closing member %d\n", (int)mt);
- }
-#endif
if (H5FDclose(file->memb[mt])<0) {
-#ifdef H5FD_MULTI_DEBUG
- if (file->flags & H5F_ACC_DEBUG) {
- fprintf(stderr, "H5FD_MULTI: close failed\n");
- }
-#endif
- nerrors++;
+ nerrors++;
} else {
- file->memb[mt] = NULL;
+ file->memb[mt] = NULL;
}
}
} END_MEMBERS;
@@ -1895,18 +1860,10 @@ open_members(H5FD_multi_t *file)
*/
sprintf(tmp, file->fa.memb_name[mt], file->name);
-#ifdef H5FD_MULTI_DEBUG
- if(file->flags & H5F_ACC_DEBUG)
- fprintf(stderr, "H5FD_MULTI: open member %d \"%s\"\n", (int)mt, tmp);
-#endif
H5E_BEGIN_TRY {
file->memb[mt] = H5FDopen(tmp, file->flags, file->fa.memb_fapl[mt], HADDR_UNDEF);
} H5E_END_TRY;
if(!file->memb[mt]) {
-#ifdef H5FD_MULTI_DEBUG
- if(file->flags & H5F_ACC_DEBUG)
- fprintf(stderr, "H5FD_MULTI: open failed for member %d\n", (int)mt);
-#endif
if(!file->fa.relax || (file->flags & H5F_ACC_RDWR))
nerrors++;
}
diff --git a/src/H5FDprivate.h b/src/H5FDprivate.h
index e98f0f4..0a7fe6c 100644
--- a/src/H5FDprivate.h
+++ b/src/H5FDprivate.h
@@ -109,7 +109,7 @@ H5_DLL herr_t H5FD_locate_signature(H5FD_t *file, const H5P_genplist_t *dxpl, ha
H5_DLL H5FD_class_t *H5FD_get_class(hid_t id);
H5_DLL hsize_t H5FD_sb_size(H5FD_t *file);
H5_DLL herr_t H5FD_sb_encode(H5FD_t *file, char *name/*out*/, uint8_t *buf);
-H5_DLL herr_t H5FD_sb_decode(H5FD_t *file, const char *name, const uint8_t *buf);
+H5_DLL herr_t H5FD_sb_load(H5FD_t *file, const char *name, const uint8_t *buf);
H5_DLL void *H5FD_fapl_get(H5FD_t *file);
H5_DLL herr_t H5FD_fapl_open(struct H5P_genplist_t *plist, hid_t driver_id, const void *driver_info);
H5_DLL herr_t H5FD_fapl_close(hid_t driver_id, void *fapl);
@@ -134,7 +134,7 @@ H5_DLL herr_t H5FD_read(H5FD_t *file, const H5P_genplist_t *dxpl, H5FD_mem_t typ
haddr_t addr, size_t size, void *buf/*out*/);
H5_DLL herr_t H5FD_write(H5FD_t *file, const H5P_genplist_t *dxpl, H5FD_mem_t type,
haddr_t addr, size_t size, const void *buf);
-H5_DLL herr_t H5FD_flush(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
+H5_DLL herr_t H5FD_flush(H5FD_t *file, hid_t dxpl_id, unsigned closing);
H5_DLL herr_t H5FD_truncate(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
H5_DLL herr_t H5FD_get_fileno(const H5FD_t *file, unsigned long *filenum);
H5_DLL herr_t H5FD_get_vfd_handle(H5FD_t *file, hid_t fapl, void** file_handle);
diff --git a/src/H5FDsec2.c b/src/H5FDsec2.c
index 1130789..b938c41 100644
--- a/src/H5FDsec2.c
+++ b/src/H5FDsec2.c
@@ -358,7 +358,7 @@ H5FD_sec2_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "unable to allocate file struct")
file->fd = fd;
- H5_ASSIGN_OVERFLOW(file->eof, sb.st_size, h5_stat_size_t, haddr_t);
+ H5_CHECKED_ASSIGN(file->eof, haddr_t, sb.st_size, h5_stat_size_t);
file->pos = HADDR_UNDEF;
file->op = OP_UNKNOWN;
#ifdef H5_HAVE_WIN32_API
diff --git a/src/H5FS.c b/src/H5FS.c
index 9dcf081..67ec838 100644
--- a/src/H5FS.c
+++ b/src/H5FS.c
@@ -123,7 +123,7 @@ HDfprintf(stderr, "%s: Creating free space manager, nclasses = %Zu\n", FUNC, ncl
/*
* Allocate free space structure
*/
- if(NULL == (fspace = H5FS_new(f, nclasses, classes, cls_init_udata)))
+ if(NULL == (fspace = H5FS__new(f, nclasses, classes, cls_init_udata)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for free space free list")
/* Initialize creation information for free space manager */
@@ -161,7 +161,7 @@ HDfprintf(stderr, "%s: fspace = %p, fspace->addr = %a\n", FUNC, fspace, fspace->
done:
if(!ret_value && fspace)
- if(H5FS_hdr_dest(fspace) < 0)
+ if(H5FS__hdr_dest(fspace) < 0)
HDONE_ERROR(H5E_FSPACE, H5E_CANTFREE, NULL, "unable to destroy free space header")
#ifdef H5FS_DEBUG
@@ -573,7 +573,7 @@ HDfprintf(stderr, "%s: Leaving, ret_value = %d, fspace->rc = %u\n", FUNC, ret_va
/*-------------------------------------------------------------------------
- * Function: H5FS_new
+ * Function: H5FS__new
*
* Purpose: Create new free space manager structure
*
@@ -586,14 +586,14 @@ HDfprintf(stderr, "%s: Leaving, ret_value = %d, fspace->rc = %u\n", FUNC, ret_va
*-------------------------------------------------------------------------
*/
H5FS_t *
-H5FS_new(const H5F_t *f, uint16_t nclasses, const H5FS_section_class_t *classes[],
+H5FS__new(const H5F_t *f, uint16_t nclasses, const H5FS_section_class_t *classes[],
void *cls_init_udata)
{
H5FS_t *fspace = NULL; /* Free space manager */
size_t u; /* Local index variable */
H5FS_t *ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Check arguments. */
HDassert(nclasses == 0 || (nclasses > 0 && classes));
@@ -631,7 +631,7 @@ H5FS_new(const H5F_t *f, uint16_t nclasses, const H5FS_section_class_t *classes[
/* Initialize non-zero information for new free space manager */
fspace->addr = HADDR_UNDEF;
- fspace->hdr_size = H5FS_HEADER_SIZE(f);
+ fspace->hdr_size = (size_t)H5FS_HEADER_SIZE(f);
fspace->sect_addr = HADDR_UNDEF;
/* Set return value */
@@ -649,7 +649,7 @@ done:
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5FS_new() */
+} /* H5FS__new() */
/*-------------------------------------------------------------------------
@@ -763,7 +763,7 @@ HDfprintf(stderr, "%s: Entering, fpace->addr = %a, fspace->rc = %u\n", FUNC, fsp
HGOTO_ERROR(H5E_FSPACE, H5E_CANTUNPIN, FAIL, "unable to unpin free space header")
} /* end if */
else {
- if(H5FS_hdr_dest(fspace) < 0)
+ if(H5FS__hdr_dest(fspace) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTCLOSEOBJ, FAIL, "unable to destroy free space header")
} /* end else */
} /* end if */
@@ -1005,7 +1005,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5FS_hdr_dest
+ * Function: H5FS__hdr_dest
*
* Purpose: Destroys a free space header in memory.
*
@@ -1018,12 +1018,12 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5FS_hdr_dest(H5FS_t *fspace)
+H5FS__hdr_dest(H5FS_t *fspace)
{
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/*
* Check arguments.
@@ -1047,7 +1047,7 @@ H5FS_hdr_dest(H5FS_t *fspace)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5FS_hdr_dest() */
+} /* end H5FS__hdr_dest() */
/*-------------------------------------------------------------------------
diff --git a/src/H5FScache.c b/src/H5FScache.c
index e03a0a4..5ab788e 100644
--- a/src/H5FScache.c
+++ b/src/H5FScache.c
@@ -38,7 +38,7 @@
#include "H5Eprivate.h" /* Error handling */
#include "H5FSpkg.h" /* File free space */
#include "H5MFprivate.h" /* File memory management */
-#include "H5VMprivate.h" /* Vectors and arrays */
+#include "H5VMprivate.h" /* Vectors and arrays */
#include "H5WBprivate.h" /* Wrapped Buffers */
/****************/
@@ -167,7 +167,7 @@ H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HDassert(udata);
/* Allocate a new free space manager */
- if(NULL == (fspace = H5FS_new(udata->f, udata->nclasses, udata->classes, udata->cls_init_udata)))
+ if(NULL == (fspace = H5FS__new(udata->f, udata->nclasses, udata->classes, udata->cls_init_udata)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Set free space manager's internal information */
@@ -260,7 +260,7 @@ done:
if(wb && H5WB_unwrap(wb) < 0)
HDONE_ERROR(H5E_FSPACE, H5E_CLOSEERROR, NULL, "can't close wrapped buffer")
if(!ret_value && fspace)
- if(H5FS_hdr_dest(fspace) < 0)
+ if(H5FS__hdr_dest(fspace) < 0)
HDONE_ERROR(H5E_FSPACE, H5E_CANTFREE, NULL, "unable to destroy free space header")
FUNC_LEAVE_NOAPI(ret_value)
@@ -463,7 +463,7 @@ H5FS_cache_hdr_dest(H5F_t *f, H5FS_t *fspace)
} /* end if */
/* Destroy free space header */
- if(H5FS_hdr_dest(fspace) < 0)
+ if(H5FS__hdr_dest(fspace) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "unable to destroy free space header")
done:
@@ -578,7 +578,7 @@ H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, void *_udata
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Allocate space for the buffer to serialize the sections into */
- H5_ASSIGN_OVERFLOW(/* To: */ old_sect_size, /* From: */ udata->fspace->sect_size, /* From: */ hsize_t, /* To: */ size_t);
+ H5_CHECKED_ASSIGN(old_sect_size, size_t, udata->fspace->sect_size, hsize_t);
if(NULL == (buf = H5FL_BLK_MALLOC(sect_block, (size_t)udata->fspace->sect_size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
@@ -865,14 +865,12 @@ H5FS_cache_sinfo_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H
udata.sect_cnt_size = H5VM_limit_enc_size((uint64_t)sinfo->fspace->serial_sect_count);
/* Iterate over all the bins */
- for(bin = 0; bin < sinfo->nbins; bin++) {
+ for(bin = 0; bin < sinfo->nbins; bin++)
/* Check if there are any sections in this bin */
- if(sinfo->bins[bin].bin_list) {
+ if(sinfo->bins[bin].bin_list)
/* Iterate over list of section size nodes for bin */
if(H5SL_iterate(sinfo->bins[bin].bin_list, H5FS_sinfo_serialize_node_cb, &udata) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_BADITER, FAIL, "can't iterate over section size nodes")
- } /* end if */
- } /* end for */
/* Compute checksum */
metadata_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
@@ -1038,7 +1036,7 @@ H5FS_cache_sinfo_size(const H5F_t UNUSED *f, const H5FS_sinfo_t *sinfo, size_t *
HDassert(size_ptr);
/* Set size value */
- H5_ASSIGN_OVERFLOW(/* To: */ *size_ptr, /* From: */ sinfo->fspace->alloc_sect_size, /* From: */ hsize_t, /* To: */ size_t);
+ H5_CHECKED_ASSIGN(*size_ptr, size_t, sinfo->fspace->alloc_sect_size, hsize_t);
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5FS_cache_sinfo_size() */
diff --git a/src/H5FSpkg.h b/src/H5FSpkg.h
index b6c240e..78afde8 100644
--- a/src/H5FSpkg.h
+++ b/src/H5FSpkg.h
@@ -221,7 +221,7 @@ H5FL_EXTERN(H5FS_t);
/******************************/
/* Free space manager header routines */
-H5_DLL H5FS_t *H5FS_new(const H5F_t *f, uint16_t nclasses,
+H5_DLL H5FS_t *H5FS__new(const H5F_t *f, uint16_t nclasses,
const H5FS_section_class_t *classes[], void *cls_init_udata);
H5_DLL herr_t H5FS_incr(H5FS_t *fspace);
H5_DLL herr_t H5FS_decr(H5FS_t *fspace);
@@ -231,7 +231,7 @@ H5_DLL herr_t H5FS_dirty(H5FS_t *fspace);
H5_DLL H5FS_sinfo_t *H5FS_sinfo_new(H5F_t *f, H5FS_t *fspace);
/* Routines for destroying structures */
-H5_DLL herr_t H5FS_hdr_dest(H5FS_t *hdr);
+H5_DLL herr_t H5FS__hdr_dest(H5FS_t *hdr);
H5_DLL herr_t H5FS_sinfo_dest(H5FS_sinfo_t *sinfo);
/* Sanity check routines */
diff --git a/src/H5Faccum.c b/src/H5Faccum.c
index 7eb75bd..7cd9abb 100644
--- a/src/H5Faccum.c
+++ b/src/H5Faccum.c
@@ -172,7 +172,7 @@ H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t type, haddr_t addr,
/* Read the part before the metadata accumulator */
if(addr < accum->loc) {
/* Set the amount to read */
- H5_ASSIGN_OVERFLOW(amount_before, (accum->loc - addr), hsize_t, size_t);
+ H5_CHECKED_ASSIGN(amount_before, size_t, (accum->loc - addr), hsize_t);
/* Make room for the metadata to read in */
HDmemmove(accum->buf + amount_before, accum->buf, accum->size);
@@ -193,7 +193,7 @@ H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t type, haddr_t addr,
size_t amount_after; /* Amount to read at a time */
/* Set the amount to read */
- H5_ASSIGN_OVERFLOW(amount_after, ((addr + size) - (accum->loc + accum->size)), hsize_t, size_t);
+ H5_CHECKED_ASSIGN(amount_after, size_t, ((addr + size) - (accum->loc + accum->size)), hsize_t);
/* Dispatch to driver */
if(H5FD_read(fio_info->f->shared->lf, fio_info->dxpl, map_type, (accum->loc + accum->size), amount_after, (accum->buf + accum->size + amount_before)) < 0)
@@ -536,14 +536,14 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t type, haddr_t addr,
size_t old_offset; /* Offset of old data within the accumulator buffer */
/* Calculate the amount we will need to add to the accumulator size, based on the amount of overlap */
- H5_ASSIGN_OVERFLOW(add_size, (accum->loc - addr), hsize_t, size_t);
+ H5_CHECKED_ASSIGN(add_size, size_t, (accum->loc - addr), hsize_t);
/* Check if we need to adjust accumulator size */
if(H5F__accum_adjust(accum, fio_info, H5F_ACCUM_PREPEND, add_size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
/* Calculate the proper offset of the existing metadata */
- H5_ASSIGN_OVERFLOW(old_offset, (addr + size) - accum->loc, hsize_t, size_t);
+ H5_CHECKED_ASSIGN(old_offset, size_t, (addr + size) - accum->loc, hsize_t);
/* Move the existing metadata to the proper location */
HDmemmove(accum->buf + size, accum->buf + old_offset, (accum->size - old_offset));
@@ -576,7 +576,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t type, haddr_t addr,
size_t dirty_off; /* Offset of dirty region */
/* Calculate the amount we will need to add to the accumulator size, based on the amount of overlap */
- H5_ASSIGN_OVERFLOW(add_size, (addr + size) - (accum->loc + accum->size), hsize_t, size_t);
+ H5_CHECKED_ASSIGN(add_size, size_t, (addr + size) - (accum->loc + accum->size), hsize_t);
/* Check if we need to adjust accumulator size */
if(H5F__accum_adjust(accum, fio_info, H5F_ACCUM_APPEND, add_size) < 0)
@@ -885,7 +885,7 @@ H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t UNUSED type, haddr_t a
size_t new_accum_size; /* Size of new accumulator buffer */
/* Calculate the size of the overlap with the accumulator, etc. */
- H5_ASSIGN_OVERFLOW(overlap_size, (addr + size) - accum->loc, haddr_t, size_t);
+ H5_CHECKED_ASSIGN(overlap_size, size_t, (addr + size) - accum->loc, haddr_t);
new_accum_size = accum->size - overlap_size;
/* Move the accumulator buffer information to eliminate the freed block */
@@ -919,7 +919,7 @@ H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t UNUSED type, haddr_t a
haddr_t dirty_start = accum->loc + accum->dirty_off;
/* Calculate the size of the overlap with the accumulator */
- H5_ASSIGN_OVERFLOW(overlap_size, (accum->loc + accum->size) - addr, haddr_t, size_t);
+ H5_CHECKED_ASSIGN(overlap_size, size_t, (accum->loc + accum->size) - addr, haddr_t);
/* Check if block to free begins before end of dirty region */
if(accum->dirty && H5F_addr_lt(addr, dirty_end)) {
diff --git a/src/H5Fdeprec.c b/src/H5Fdeprec.c
index c5a500a..7e2ea17 100644
--- a/src/H5Fdeprec.c
+++ b/src/H5Fdeprec.c
@@ -181,7 +181,7 @@ H5Fget_info1(hid_t obj_id, H5F_info1_t *finfo)
HDmemset(finfo, 0, sizeof(*finfo));
/* Get the size of the superblock extension */
- if(H5F_super_size(f, H5AC_ind_dxpl_id, NULL, &finfo->super_ext_size) < 0)
+ if(H5F__super_size(f, H5AC_ind_dxpl_id, NULL, &finfo->super_ext_size) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "Unable to retrieve superblock extension size")
/* Check for SOHM info */
diff --git a/src/H5Fint.c b/src/H5Fint.c
index 3600476..72f5103 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -1074,7 +1074,7 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
/* Initialize information about the superblock and allocate space for it */
/* (Writes superblock extension messages, if there are any) */
- if(H5F_super_init(file, dxpl_id) < 0)
+ if(H5F__super_init(file, dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to allocate file superblock")
/* Create and open the root group */
@@ -1085,7 +1085,7 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to create/open root group")
} else if (1 == shared->nrefs) {
/* Read the superblock if it hasn't been read before. */
- if(H5F_super_read(file, dxpl_id) < 0)
+ if(H5F__super_read(file, dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_READERROR, NULL, "unable to read superblock")
/* Open the root group */
diff --git a/src/H5Fio.c b/src/H5Fio.c
index 1d05cd0..763bd69 100644
--- a/src/H5Fio.c
+++ b/src/H5Fio.c
@@ -100,6 +100,9 @@ H5F_block_read(const H5F_t *f, H5FD_mem_t type, haddr_t addr, size_t size,
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
+#ifdef QAK
+HDfprintf(stderr, "%s: read from addr = %a, size = %Zu\n", FUNC, addr, size);
+#endif /* QAK */
HDassert(f);
HDassert(f->shared);
diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h
index 53fe0d9..a645fd3 100644
--- a/src/H5Fpkg.h
+++ b/src/H5Fpkg.h
@@ -306,10 +306,10 @@ H5_DLL int H5F_term_unmount_cb(void *obj_ptr, hid_t obj_id, void *key);
H5_DLL herr_t H5F_mount_count_ids(H5F_t *f, unsigned *nopen_files, unsigned *nopen_objs);
/* Superblock related routines */
-H5_DLL herr_t H5F_super_init(H5F_t *f, hid_t dxpl_id);
-H5_DLL herr_t H5F_super_read(H5F_t *f, hid_t dxpl_id);
-H5_DLL herr_t H5F_super_size(H5F_t *f, hid_t dxpl_id, hsize_t *super_size, hsize_t *super_ext_size);
-H5_DLL herr_t H5F_super_free(H5F_super_t *sblock);
+H5_DLL herr_t H5F__super_init(H5F_t *f, hid_t dxpl_id);
+H5_DLL herr_t H5F__super_read(H5F_t *f, hid_t dxpl_id);
+H5_DLL herr_t H5F__super_size(H5F_t *f, hid_t dxpl_id, hsize_t *super_size, hsize_t *super_ext_size);
+H5_DLL herr_t H5F__super_free(H5F_super_t *sblock);
/* Superblock extension related routines */
H5_DLL herr_t H5F_super_ext_open(H5F_t *f, haddr_t ext_addr, H5O_loc_t *ext_ptr);
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index ab07526..b04f289 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -244,7 +244,6 @@
#define H5F_addr_overflow(X,Z) (HADDR_UNDEF==(X) || \
HADDR_UNDEF==(X)+(haddr_t)(Z) || \
(X)+(haddr_t)(Z)<(X))
-#define H5F_addr_hash(X,M) ((unsigned)((X)%(M)))
#define H5F_addr_defined(X) ((X)!=HADDR_UNDEF)
/* The H5F_addr_eq() macro guarantees that Y is not HADDR_UNDEF by making
* certain that X is not HADDR_UNDEF and then checking that X equals Y
@@ -573,6 +572,12 @@ typedef struct H5F_io_info_t {
const struct H5P_genplist_t *dxpl; /* DXPL object */
} H5F_io_info_t;
+/* Concise info about a block of bytes in a file */
+typedef struct H5F_block_t {
+ haddr_t offset; /* Offset of the block in the file */
+ hsize_t length; /* Length of the block in the file */
+} H5F_block_t;
+
/*****************************/
/* Library-private Variables */
diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h
index f32b3e0..aa6cc2a 100644
--- a/src/H5Fpublic.h
+++ b/src/H5Fpublic.h
@@ -41,12 +41,14 @@
* We're assuming that these constants are used rather early in the hdf5
* session.
*
+ * Note that H5F_ACC_DEBUG is deprecated (nonfuncational) but retained as a
+ * symbol for backward compatibility.
*/
#define H5F_ACC_RDONLY (H5CHECK 0x0000u) /*absence of rdwr => rd-only */
#define H5F_ACC_RDWR (H5CHECK 0x0001u) /*open for read and write */
#define H5F_ACC_TRUNC (H5CHECK 0x0002u) /*overwrite existing files */
#define H5F_ACC_EXCL (H5CHECK 0x0004u) /*fail if file already exists*/
-#define H5F_ACC_DEBUG (H5CHECK 0x0008u) /*print debug info */
+/* NOTE: 0x0008u was H5F_ACC_DEBUG, now deprecated */
#define H5F_ACC_CREAT (H5CHECK 0x0010u) /*create non-existing files */
/* Value passed to H5Pset_elink_acc_flags to cause flags to be taken from the
@@ -221,7 +223,7 @@ H5_DLL herr_t H5Fget_mpi_atomicity(hid_t file_id, hbool_t *flag);
#ifndef H5_NO_DEPRECATED_SYMBOLS
/* Macros */
-
+#define H5F_ACC_DEBUG (H5CHECK 0x0000u) /*print debug info (deprecated)*/
/* Typedefs */
diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c
index dd80a46..6db631e 100644
--- a/src/H5Fsuper.c
+++ b/src/H5Fsuper.c
@@ -237,7 +237,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5F_super_read
+ * Function: H5F__super_read
*
* Purpose: Reads the superblock from the file or from the BUF. If
* ADDR is a valid address, then it reads it from the file.
@@ -254,17 +254,17 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F_super_read(H5F_t *f, hid_t dxpl_id)
+H5F__super_read(H5F_t *f, hid_t dxpl_id)
{
H5P_genplist_t *dxpl; /* DXPL object */
- H5F_super_t * sblock = NULL; /* superblock structure */
+ H5F_super_t * sblock = NULL; /* Superblock structure */
unsigned sblock_flags = H5AC__NO_FLAGS_SET; /* flags used in superblock unprotect call */
haddr_t super_addr; /* Absolute address of superblock */
- H5AC_protect_t rw; /* read/write permissions for file */
+ H5AC_protect_t rw; /* Read/write permissions for file */
hbool_t dirtied = FALSE; /* Bool for sblock protect call */
- herr_t ret_value = SUCCEED; /* return value */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
+ FUNC_ENTER_PACKAGE_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
/* Get the DXPL plist object for DXPL ID */
if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
@@ -291,7 +291,7 @@ H5F_super_read(H5F_t *f, hid_t dxpl_id)
/* Look up the superblock */
if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, &dirtied, rw)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, FAIL, "unable to load superblock")
+ HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "unable to load superblock")
/* Mark the superblock dirty if it was modified during loading or VFD indicated to do so */
if((H5AC_WRITE == rw) && (dirtied || H5F_HAS_FEATURE(f, H5FD_FEAT_DIRTY_SBLK_LOAD)))
@@ -299,7 +299,7 @@ H5F_super_read(H5F_t *f, hid_t dxpl_id)
/* Pin the superblock in the cache */
if(H5AC_pin_protected_entry(sblock) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTPIN, FAIL, "unable to pin superblock")
+ HGOTO_ERROR(H5E_FILE, H5E_CANTPIN, FAIL, "unable to pin superblock")
/* Set the pointer to the pinned superblock */
f->shared->sblock = sblock;
@@ -307,14 +307,14 @@ H5F_super_read(H5F_t *f, hid_t dxpl_id)
done:
/* Release the superblock */
if(sblock && H5AC_unprotect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, sblock, sblock_flags) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "unable to close superblock")
+ HDONE_ERROR(H5E_FILE, H5E_CANTUNPROTECT, FAIL, "unable to close superblock")
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
-} /* end H5F_super_read() */
+} /* end H5F__super_read() */
/*-------------------------------------------------------------------------
- * Function: H5F_super_init
+ * Function: H5F__super_init
*
* Purpose: Allocates the superblock for the file and initializes
* information about the superblock in memory. Writes extension
@@ -330,7 +330,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F_super_init(H5F_t *f, hid_t dxpl_id)
+H5F__super_init(H5F_t *f, hid_t dxpl_id)
{
H5F_super_t *sblock = NULL; /* Superblock cache structure */
hbool_t sblock_in_cache = FALSE; /* Whether the superblock has been inserted into the metadata cache */
@@ -344,7 +344,7 @@ H5F_super_init(H5F_t *f, hid_t dxpl_id)
hbool_t ext_created = FALSE; /* Whether the extension has been created */
herr_t ret_value = SUCCEED; /* Return Value */
- FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
+ FUNC_ENTER_PACKAGE_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
/* Allocate space for the superblock */
if(NULL == (sblock = H5FL_CALLOC(H5F_super_t)))
@@ -434,7 +434,7 @@ H5F_super_init(H5F_t *f, hid_t dxpl_id)
superblock_size = (hsize_t)H5F_SUPERBLOCK_SIZE(super_vers, f);
/* Compute the size of the driver information block */
- H5_ASSIGN_OVERFLOW(driver_size, H5FD_sb_size(f->shared->lf), hsize_t, size_t);
+ H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
if(driver_size > 0) {
driver_size += H5F_DRVINFOBLOCK_HDR_SIZE;
@@ -594,7 +594,7 @@ done:
} /* end if */
else
/* Free superblock */
- if(H5F_super_free(sblock) < 0)
+ if(H5F__super_free(sblock) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTFREE, FAIL, "unable to destroy superblock")
/* Reset variables in file structure */
@@ -603,7 +603,7 @@ done:
} /* end if */
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
-} /* end H5F_super_init() */
+} /* end H5F__super_init() */
/*-------------------------------------------------------------------------
@@ -641,7 +641,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5F_super_free
+ * Function: H5F__super_free
*
* Purpose: Destroyer the file's superblock
*
@@ -654,9 +654,9 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F_super_free(H5F_super_t *sblock)
+H5F__super_free(H5F_super_t *sblock)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_PACKAGE_NOERR
/* Sanity check */
HDassert(sblock);
@@ -668,11 +668,11 @@ H5F_super_free(H5F_super_t *sblock)
sblock = (H5F_super_t *)H5FL_FREE(H5F_super_t, sblock);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5F_super_free() */
+} /* H5F__super_free() */
/*-------------------------------------------------------------------------
- * Function: H5F_super_size
+ * Function: H5F__super_size
*
* Purpose: Get storage size of the superblock and superblock extension
*
@@ -685,11 +685,11 @@ H5F_super_free(H5F_super_t *sblock)
*-------------------------------------------------------------------------
*/
herr_t
-H5F_super_size(H5F_t *f, hid_t dxpl_id, hsize_t *super_size, hsize_t *super_ext_size)
+H5F__super_size(H5F_t *f, hid_t dxpl_id, hsize_t *super_size, hsize_t *super_ext_size)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_PACKAGE
/* Sanity check */
HDassert(f);
@@ -725,7 +725,7 @@ H5F_super_size(H5F_t *f, hid_t dxpl_id, hsize_t *super_size, hsize_t *super_ext_
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5F_super_size() */
+} /* H5F__super_size() */
/*-------------------------------------------------------------------------
diff --git a/src/H5Fsuper_cache.c b/src/H5Fsuper_cache.c
index f3931cb..46ebe6e 100644
--- a/src/H5Fsuper_cache.c
+++ b/src/H5Fsuper_cache.c
@@ -13,6 +13,17 @@
* access to either file, you may request a copy from help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5Fsuper_cache.c
+ * Aug 15 2009
+ * Quincey Koziol <koziol@hdfgroup.org>
+ *
+ * Purpose: Implement file superblock & driver info metadata cache methods.
+ *
+ *-------------------------------------------------------------------------
+ */
+
/****************/
/* Module Setup */
/****************/
@@ -133,7 +144,7 @@ H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, void *_udata)
FUNC_ENTER_NOAPI_NOINIT
- /* check arguments */
+ /* Check arguments */
HDassert(f);
HDassert(H5F_addr_eq(addr, 0));
HDassert(dirtied);
@@ -371,14 +382,6 @@ H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, void *_udata)
drv_name[8] = '\0';
p += 8; /* advance past name/version */
- /* Check if driver matches driver information saved. Unfortunately, we can't push this
- * function to each specific driver because we're checking if the driver is correct.
- */
- if(!HDstrncmp(drv_name, "NCSAfami", (size_t)8) && HDstrcmp(lf->cls->name, "family"))
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "family driver should be used")
- if(!HDstrncmp(drv_name, "NCSAmult", (size_t)8) && HDstrcmp(lf->cls->name, "multi"))
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "multi driver should be used")
-
/* Read in variable-sized portion of driver info block */
if(H5FD_set_eoa(lf, H5FD_MEM_SUPER, sblock->driver_addr + H5F_DRVINFOBLOCK_HDR_SIZE + drv_variable_size) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "set end of space allocation request failed")
@@ -386,7 +389,7 @@ H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, void *_udata)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to read file driver information")
/* Decode driver information */
- if(H5FD_sb_decode(lf, drv_name, p) < 0)
+ if(H5FD_sb_load(lf, drv_name, p) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to decode driver information")
} /* end if */
} /* end if */
@@ -535,16 +538,8 @@ H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, void *_udata)
if(NULL == H5O_msg_read(&ext_loc, H5O_DRVINFO_ID, &drvinfo, dxpl_id))
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "driver info message not present")
- /* Check if driver matches driver information saved. Unfortunately, we can't push this
- * function to each specific driver because we're checking if the driver is correct.
- */
- if(!HDstrncmp(drvinfo.name, "NCSAfami", (size_t)8) && HDstrcmp(lf->cls->name, "family"))
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "family driver should be used")
- if(!HDstrncmp(drvinfo.name, "NCSAmult", (size_t)8) && HDstrcmp(lf->cls->name, "multi"))
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "multi driver should be used")
-
/* Decode driver information */
- if(H5FD_sb_decode(lf, drvinfo.name, drvinfo.buf) < 0)
+ if(H5FD_sb_load(lf, drvinfo.name, drvinfo.buf) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to decode driver information")
/* Reset driver info message */
@@ -618,7 +613,7 @@ H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, void *_udata)
done:
/* Release the [possibly partially initialized] superblock on errors */
if(!ret_value && sblock)
- if(H5F_super_free(sblock) < 0)
+ if(H5F__super_free(sblock) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTFREE, NULL, "unable to destroy superblock data")
FUNC_LEAVE_NOAPI(ret_value)
@@ -723,7 +718,7 @@ H5F_sblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t UNUSED addr,
HGOTO_ERROR(H5E_FILE, H5E_CANTENCODE, FAIL, "can't encode root group symbol table entry")
/* Encode the driver information block. */
- H5_ASSIGN_OVERFLOW(driver_size, H5FD_sb_size(f->shared->lf), hsize_t, size_t);
+ H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
/* Checking whether driver block address is defined here is to handle backward
* compatibility. If the file was created with v1.6 library or earlier and no
@@ -779,7 +774,7 @@ H5F_sblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t UNUSED addr,
* ultimately match it. */
if ((rel_eof = H5FD_get_eoa(f->shared->lf, H5FD_MEM_SUPER)) == HADDR_UNDEF)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
- H5F_addr_encode(f, &p, rel_eof + sblock->base_addr);
+ H5F_addr_encode(f, &p, (rel_eof + sblock->base_addr));
/* Retrieve information for root group */
if(NULL == (root_oloc = H5G_oloc(f->shared->root_grp)))
@@ -799,7 +794,7 @@ H5F_sblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t UNUSED addr,
} /* end else */
/* Retrieve the total size of the superblock info */
- H5_ASSIGN_OVERFLOW(superblock_size, (p - buf), ptrdiff_t, size_t);
+ H5_CHECKED_ASSIGN(superblock_size, size_t, (p - buf), ptrdiff_t);
/* Double check we didn't overrun the block (unlikely) */
HDassert(superblock_size <= sizeof(buf));
@@ -824,7 +819,7 @@ H5F_sblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t UNUSED addr,
/* Check for ignoring the driver info for this file */
if(!H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO)) {
/* Check for driver info message */
- H5_ASSIGN_OVERFLOW(driver_size, H5FD_sb_size(f->shared->lf), hsize_t, size_t);
+ H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
if(driver_size > 0) {
H5O_drvinfo_t drvinfo; /* Driver info */
uint8_t dbuf[H5F_MAX_DRVINFOBLOCK_SIZE]; /* Driver info block encoding buffer */
@@ -887,7 +882,7 @@ H5F_sblock_dest(H5F_t UNUSED *f, H5F_super_t* sblock)
HDassert(sblock);
/* Free superblock */
- if(H5F_super_free(sblock) < 0)
+ if(H5F__super_free(sblock) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFREE, FAIL, "unable to destroy superblock")
done:
diff --git a/src/H5Gcache.c b/src/H5Gcache.c
index 15dbf65..d1923a6 100644
--- a/src/H5Gcache.c
+++ b/src/H5Gcache.c
@@ -145,10 +145,10 @@ H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata)
/* Allocate symbol table data structures */
if(NULL == (sym = H5FL_CALLOC(H5G_node_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- sym->node_size = H5G_NODE_SIZE(f);
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+ sym->node_size = (size_t)(H5G_NODE_SIZE(f));
if(NULL == (sym->entry = H5FL_SEQ_CALLOC(H5G_entry_t, (size_t)(2 * H5F_SYM_LEAF_K(f)))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Wrap the local buffer for serialized node info */
if(NULL == (wb = H5WB_wrap(node_buf, sizeof(node_buf))))
@@ -167,12 +167,12 @@ H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata)
/* magic */
if(HDmemcmp(p, H5G_NODE_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, NULL, "bad symbol table node signature")
- p += 4;
+ HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, NULL, "bad symbol table node signature")
+ p += H5_SIZEOF_MAGIC;
/* version */
if(H5G_NODE_VERS != *p++)
- HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, NULL, "bad symbol table node version")
+ HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, NULL, "bad symbol table node version")
/* reserved */
p++;
@@ -182,7 +182,7 @@ H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata)
/* entries */
if(H5G__ent_decode_vec(f, &p, sym->entry, sym->nsyms) < 0)
- HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, NULL, "unable to decode symbol table entries")
+ HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, NULL, "unable to decode symbol table entries")
/* Set return value */
ret_value = sym;
@@ -248,7 +248,7 @@ H5G_node_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5G_node_
/* magic number */
HDmemcpy(p, H5G_NODE_MAGIC, (size_t)H5_SIZEOF_MAGIC);
- p += 4;
+ p += H5_SIZEOF_MAGIC;
/* version number */
*p++ = H5G_NODE_VERS;
diff --git a/src/H5Gdeprec.c b/src/H5Gdeprec.c
index ca9e7fd..8f2fe54 100644
--- a/src/H5Gdeprec.c
+++ b/src/H5Gdeprec.c
@@ -261,7 +261,7 @@ H5Gcreate1(hid_t loc_id, const char *name, size_t size_hint)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get group info")
/* Set the non-default local heap size hint */
- H5_ASSIGN_OVERFLOW(ginfo.lheap_size_hint, size_hint, size_t, uint32_t);
+ H5_CHECKED_ASSIGN(ginfo.lheap_size_hint, uint32_t, size_hint, size_t);
if(H5P_set(gc_plist, H5G_CRT_GROUP_INFO_NAME, &ginfo) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set group info")
} /* end if */
diff --git a/src/H5Glink.c b/src/H5Glink.c
index f934052..8b258b5 100644
--- a/src/H5Glink.c
+++ b/src/H5Glink.c
@@ -467,7 +467,7 @@ H5G__link_iterate_table(const H5G_link_table_t *ltable, hsize_t skip,
*last_lnk += skip;
/* Iterate over link messages */
- H5_ASSIGN_OVERFLOW(/* To: */ u, /* From: */ skip, /* From: */ hsize_t, /* To: */ size_t)
+ H5_CHECKED_ASSIGN(u, size_t, skip, hsize_t)
for(; u < ltable->nlinks && !ret_value; u++) {
/* Make the callback */
ret_value = (op)(&(ltable->lnks[u]), op_data);
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index 0f73c68..e5ea437 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -40,7 +40,7 @@
#include "H5HFpkg.h" /* Fractal heaps */
#include "H5MFprivate.h" /* File memory management */
#include "H5MMprivate.h" /* Memory management */
-#include "H5VMprivate.h" /* Vectors and arrays */
+#include "H5VMprivate.h" /* Vectors and arrays */
#include "H5WBprivate.h" /* Wrapped Buffers */
@@ -75,8 +75,8 @@
/********************/
/* Local encode/decode routines */
-static herr_t H5HF_dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *dtable);
-static herr_t H5HF_dtable_decode(H5F_t *f, const uint8_t **pp, H5HF_dtable_t *dtable);
+static herr_t H5HF__dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *dtable);
+static herr_t H5HF__dtable_decode(H5F_t *f, const uint8_t **pp, H5HF_dtable_t *dtable);
/* Metadata cache (H5AC) callbacks */
static H5HF_hdr_t *H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
@@ -98,28 +98,16 @@ static herr_t H5HF_cache_dblock_notify(H5C_notify_action_t action, H5HF_direct_t
static herr_t H5HF_cache_dblock_size(const H5F_t *f, const H5HF_direct_t *dblock, size_t *size_ptr);
-/*********************************/
/* Debugging Function Prototypes */
-/*********************************/
#ifndef NDEBUG
-static herr_t H5HF_cache_verify_hdr_descendants_clean(H5F_t *f,
- hid_t dxpl_id,
- H5HF_hdr_t * hdr,
- hbool_t *clean_ptr);
-static herr_t H5HF_cache_verify_iblock_descendants_clean(H5F_t *f,
- hid_t dxpl_id,
- H5HF_indirect_t * iblock,
- unsigned * iblock_status_ptr,
- hbool_t *clean_ptr);
-static herr_t H5HF_cache_verify_iblocks_dblocks_clean(H5F_t *f,
- H5HF_indirect_t * iblock,
- hbool_t *clean_ptr,
- hbool_t *has_dblocks_ptr);
-static herr_t H5HF_cache_verify_descendant_iblocks_clean(H5F_t *f,
- hid_t dxpl_id,
- H5HF_indirect_t * iblock,
- hbool_t *clean_ptr,
- hbool_t *has_iblocks_ptr);
+static herr_t H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
+ H5HF_hdr_t *hdr, hbool_t *clean);
+static herr_t H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, hid_t dxpl_id,
+ H5HF_indirect_t *iblock, unsigned *iblock_status, hbool_t *clean);
+static herr_t H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f,
+ H5HF_indirect_t *iblock, hbool_t *clean, hbool_t *has_dblocks);
+static herr_t H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, hid_t dxpl_id,
+ H5HF_indirect_t *iblock, hbool_t *clean, hbool_t *has_iblocks);
#endif /* NDEBUG */
@@ -176,7 +164,7 @@ H5FL_BLK_DEFINE(direct_block);
/*-------------------------------------------------------------------------
- * Function: H5HF_dtable_decode
+ * Function: H5HF__dtable_decode
*
* Purpose: Decodes the metadata for a doubling table
*
@@ -191,9 +179,9 @@ H5FL_BLK_DEFINE(direct_block);
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF_dtable_decode(H5F_t *f, const uint8_t **pp, H5HF_dtable_t *dtable)
+H5HF__dtable_decode(H5F_t *f, const uint8_t **pp, H5HF_dtable_t *dtable)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Check arguments */
HDassert(f);
@@ -222,11 +210,11 @@ H5HF_dtable_decode(H5F_t *f, const uint8_t **pp, H5HF_dtable_t *dtable)
UINT16DECODE(*pp, dtable->curr_root_rows);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5HF_dtable_decode() */
+} /* end H5HF__dtable_decode() */
/*-------------------------------------------------------------------------
- * Function: H5HF_dtable_encode
+ * Function: H5HF__dtable_encode
*
* Purpose: Encodes the metadata for a doubling table
*
@@ -241,9 +229,9 @@ H5HF_dtable_decode(H5F_t *f, const uint8_t **pp, H5HF_dtable_t *dtable)
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF_dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *dtable)
+H5HF__dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *dtable)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* Check arguments */
HDassert(f);
@@ -272,7 +260,7 @@ H5HF_dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *dtable)
UINT16ENCODE(*pp, dtable->curr_root_rows);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5HF_dtable_encode() */
+} /* end H5HF__dtable_encode() */
/*-------------------------------------------------------------------------
@@ -313,7 +301,7 @@ H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Allocate space for the fractal heap data structure */
if(NULL == (hdr = H5HF_hdr_alloc(udata->f)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Wrap the local buffer for serialized header info */
if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
@@ -335,12 +323,12 @@ H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Magic number */
if(HDmemcmp(p, H5HF_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong fractal heap header signature")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong fractal heap header signature")
p += H5_SIZEOF_MAGIC;
/* Version */
if(*p++ != H5HF_HDR_VERSION)
- HGOTO_ERROR(H5E_HEAP, H5E_VERSION, NULL, "wrong fractal heap header version")
+ HGOTO_ERROR(H5E_HEAP, H5E_VERSION, NULL, "wrong fractal heap header version")
/* General heap information */
UINT16DECODE(p, hdr->id_len); /* Heap ID length */
@@ -373,7 +361,7 @@ H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
H5F_DECODE_LENGTH(udata->f, p, hdr->tiny_nobjs);
/* Managed objects' doubling-table info */
- if(H5HF_dtable_decode(hdr->f, &p, &(hdr->man_dtable)) < 0)
+ if(H5HF__dtable_decode(hdr->f, &p, &(hdr->man_dtable)) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTENCODE, NULL, "unable to encode managed obj. doubling table info")
/* Sanity check */
@@ -443,11 +431,11 @@ H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Verify checksum */
if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "incorrect metadata checksum for fractal heap header")
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "incorrect metadata checksum for fractal heap header")
/* Finish initialization of heap header */
if(H5HF_hdr_finish_init(hdr) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, NULL, "can't finish initializing shared fractal heap header")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, NULL, "can't finish initializing shared fractal heap header")
/* Set return value */
ret_value = hdr;
@@ -499,30 +487,27 @@ H5HF_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5H
uint32_t metadata_chksum; /* Computed metadata checksum value */
#ifndef NDEBUG
- /* verify that flush dependencies are working correctly. Do this
+{
+ /* Verify that flush dependencies are working correctly. Do this
* by verifying that either:
*
* 1) the header has a root iblock, and that the root iblock and all
- * of its children are clean, or
+ * of its children are clean, or
*
- * 2) The header has a root dblock, which is clean, or
+ * 2) The header has a root dblock, which is clean, or
*
* 3) The heap is empty, and thus the header has neither a root
- * iblock no a root dblock. In this case, the flush ordering
+ * iblock no a root dblock. In this case, the flush ordering
* constraint is met by default.
*
- * Do this with a call to H5HF_cache_verify_hdr_descendants_clean().
+ * Do this with a call to H5HF__cache_verify_hdr_descendants_clean().
*/
hbool_t descendants_clean = TRUE;
- if ( H5HF_cache_verify_hdr_descendants_clean(f, dxpl_id, hdr,
- &descendants_clean) < 0 )
-
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, \
- "can't verify hdr descendants clean.")
-
- HDassert( descendants_clean );
-
+ if(H5HF__cache_verify_hdr_descendants_clean(f, dxpl_id, hdr, &descendants_clean) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify hdr descendants clean.")
+ HDassert(descendants_clean);
+}
#endif /* NDEBUG */
/* Set the shared heap header's file context for this operation */
@@ -557,8 +542,8 @@ H5HF_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5H
/* (bit 0: "huge" object IDs have wrapped) */
/* (bit 1: checksum direct blocks) */
heap_flags = 0;
- heap_flags |= (hdr->huge_ids_wrapped ? H5HF_HDR_FLAGS_HUGE_ID_WRAPPED : 0);
- heap_flags |= (hdr->checksum_dblocks ? H5HF_HDR_FLAGS_CHECKSUM_DBLOCKS : 0);
+ heap_flags = (uint8_t)(heap_flags | (hdr->huge_ids_wrapped ? H5HF_HDR_FLAGS_HUGE_ID_WRAPPED : 0));
+ heap_flags = (uint8_t)(heap_flags | (hdr->checksum_dblocks ? H5HF_HDR_FLAGS_CHECKSUM_DBLOCKS : 0));
*p++ = heap_flags;
/* "Huge" object information */
@@ -581,7 +566,7 @@ H5HF_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5H
H5F_ENCODE_LENGTH(f, p, hdr->tiny_nobjs);
/* Managed objects' doubling-table info */
- if(H5HF_dtable_encode(hdr->f, &p, &(hdr->man_dtable)) < 0)
+ if(H5HF__dtable_encode(hdr->f, &p, &(hdr->man_dtable)) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTENCODE, FAIL, "unable to encode managed obj. doubling table info")
/* Check for I/O filter information to encode */
@@ -780,7 +765,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Allocate space for the fractal heap indirect block */
if(NULL == (iblock = H5FL_CALLOC(H5HF_indirect_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Get the pointer to the shared heap header */
hdr = udata->par_info->hdr;
@@ -791,7 +776,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Share common heap information */
iblock->hdr = hdr;
if(H5HF_hdr_incr(hdr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINC, NULL, "can't increment reference count on shared heap header")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTINC, NULL, "can't increment reference count on shared heap header")
/* Set block's internal information */
iblock->rc = 0;
@@ -818,17 +803,17 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Magic number */
if(HDmemcmp(p, H5HF_IBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong fractal heap indirect block signature")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong fractal heap indirect block signature")
p += H5_SIZEOF_MAGIC;
/* Version */
if(*p++ != H5HF_IBLOCK_VERSION)
- HGOTO_ERROR(H5E_HEAP, H5E_VERSION, NULL, "wrong fractal heap direct block version")
+ HGOTO_ERROR(H5E_HEAP, H5E_VERSION, NULL, "wrong fractal heap direct block version")
/* Address of heap that owns this block */
H5F_addr_decode(udata->f, &p, &heap_addr);
if(H5F_addr_ne(heap_addr, hdr->heap_addr))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "incorrect heap header address for direct block")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "incorrect heap header address for direct block")
/* Address of parent block */
iblock->parent = udata->par_info->iblock;
@@ -856,9 +841,10 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Allocate & decode child block entry tables */
HDassert(iblock->nrows > 0);
if(NULL == (iblock->ents = H5FL_SEQ_MALLOC(H5HF_indirect_ent_t, (size_t)(iblock->nrows * hdr->man_dtable.cparam.width))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for direct entries")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for direct entries")
+
if(hdr->filter_len > 0) {
- unsigned dir_rows; /* Number of direct rows in this indirect block */
+ unsigned dir_rows; /* Number of direct rows in this indirect block */
/* Compute the number of direct rows for this indirect block */
dir_rows = MIN(iblock->nrows, hdr->man_dtable.max_direct_rows);
@@ -869,6 +855,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
} /* end if */
else
iblock->filt_ents = NULL;
+
for(u = 0; u < (iblock->nrows * hdr->man_dtable.cparam.width); u++) {
/* Decode child block address */
H5F_addr_decode(udata->f, &p, &(iblock->ents[u].addr));
@@ -903,7 +890,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
} /* end for */
/* Sanity check */
- HDassert(iblock->nchildren); /* indirect blocks w/no children should have been deleted */
+ HDassert(iblock->nchildren); /* indirect blocks w/no children should have been deleted */
/* Compute checksum on indirect block */
computed_chksum = H5_checksum_metadata(buf, (size_t)(p - (const uint8_t *)buf), 0);
@@ -916,7 +903,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Verify checksum */
if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "incorrect metadata checksum for fractal heap indirect block")
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "incorrect metadata checksum for fractal heap indirect block")
/* Check if we have any indirect block children */
if(iblock->nrows > hdr->man_dtable.max_direct_rows) {
@@ -986,31 +973,26 @@ H5HF_cache_iblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
size_t u; /* Local index variable */
#ifndef NDEBUG
- /* verify that flush dependencies are working correctly. Do this
+{
+ /* Verify that flush dependencies are working correctly. Do this
* by verifying that all children of this iblock are clean.
*/
hbool_t descendants_clean = TRUE;
unsigned iblock_status;
- if ( H5AC_get_entry_status(f, iblock->addr, &iblock_status) < 0 )
-
+ if(H5AC_get_entry_status(f, iblock->addr, &iblock_status) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get iblock status")
- /* since the current iblock is the guest of honor in a flush, we know
+ /* since the current iblock is the guest of honor in a flush, we know
* that it is locked into the cache for the duration of the call. Hence
- * there is no need to check to see if it is pinned or protected, or to
+ * there is no need to check to see if it is pinned or protected, or to
* protect it if it is not.
*/
-
- if ( H5HF_cache_verify_iblock_descendants_clean(f, dxpl_id,
- iblock, &iblock_status,
- &descendants_clean) < 0 )
-
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, \
- "can't verify descendants clean.")
+ if(H5HF__cache_verify_iblock_descendants_clean(f, dxpl_id, iblock, &iblock_status, &descendants_clean) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify descendants clean.")
HDassert(descendants_clean);
-
+}
#endif /* NDEBUG */
/* Get the pointer to the shared heap header */
@@ -1276,99 +1258,78 @@ H5HF_cache_iblock_notify(H5C_notify_action_t action, H5HF_indirect_t *iblock)
HDassert(iblock);
HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(iblock->hdr);
- if ( action == H5C_NOTIFY_ACTION_BEFORE_EVICT )
- HDassert((iblock->parent == iblock->fd_parent) ||
- ((NULL == iblock->parent) && (iblock->fd_parent)));
+
+ if(action == H5AC_NOTIFY_ACTION_BEFORE_EVICT)
+ HDassert((iblock->parent == iblock->fd_parent) || ((NULL == iblock->parent) && (iblock->fd_parent)));
else
- HDassert(iblock->parent == iblock->fd_parent);
+ HDassert(iblock->parent == iblock->fd_parent);
/* further sanity checks */
- if ( iblock->parent == NULL ) {
-
- /* Either this is the root iblock, or the parent pointer is */
+ if(iblock->parent == NULL) {
+ /* Either this is the root iblock, or the parent pointer is */
/* invalid. Since we save a copy of the parent pointer on */
/* the insertion event, it doesn't matter if the parent pointer */
/* is invalid just before eviction. However, we will not be */
/* able to function if it is invalid on the insertion event. */
- /* Scream and die if this is the case. */
-
- HDassert((action == H5C_NOTIFY_ACTION_BEFORE_EVICT) ||
- (iblock->block_off == 0));
+ /* Scream and die if this is the case. */
+ HDassert((action == H5C_NOTIFY_ACTION_BEFORE_EVICT) || (iblock->block_off == 0));
- /* pointer from hdr to root iblock will not be set up unless */
- /* the fractal heap has already pinned the hdr. Do what */
+ /* pointer from hdr to root iblock will not be set up unless */
+ /* the fractal heap has already pinned the hdr. Do what */
/* sanity checking we can. */
-
- if ( ( iblock->block_off == 0 ) &&
- ( iblock->hdr->root_iblock_flags & H5HF_ROOT_IBLOCK_PINNED ) )
+ if((iblock->block_off == 0) && (iblock->hdr->root_iblock_flags & H5HF_ROOT_IBLOCK_PINNED))
HDassert(iblock->hdr->root_iblock == iblock);
-
- } else {
- /* if this is a child iblock, verify that the pointers are */
+ } /* end if */
+ else {
+ /* if this is a child iblock, verify that the pointers are */
/* either uninitialized or set up correctly. */
H5HF_indirect_t *par_iblock = iblock->parent;
unsigned indir_idx; /* Index in parent's child iblock pointer array */
/* Sanity check */
HDassert(par_iblock->child_iblocks);
- HDassert(iblock->par_entry >= (iblock->hdr->man_dtable.max_direct_rows
- * iblock->hdr->man_dtable.cparam.width));
+ HDassert(iblock->par_entry >= (iblock->hdr->man_dtable.max_direct_rows * iblock->hdr->man_dtable.cparam.width));
/* Compute index in parent's child iblock pointer array */
- indir_idx = iblock->par_entry -
- (iblock->hdr->man_dtable.max_direct_rows
- * iblock->hdr->man_dtable.cparam.width);
+ indir_idx = iblock->par_entry - (iblock->hdr->man_dtable.max_direct_rows * iblock->hdr->man_dtable.cparam.width);
- /* The pointer to iblock in the parent may not be set yet -- */
+ /* The pointer to iblock in the parent may not be set yet -- */
/* verify that it is either NULL, or that it has been set to */
/* iblock. */
- HDassert((NULL == par_iblock->child_iblocks[indir_idx]) ||
- (par_iblock->child_iblocks[indir_idx] == iblock));
- }
-
- switch ( action )
- {
- case H5C_NOTIFY_ACTION_AFTER_INSERT:
- if ( iblock->parent ) /* this is a child iblock */
- {
- /* create flush dependency with parent iblock */
- if(H5AC_create_flush_dependency(iblock->parent, iblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTDEPEND, FAIL, \
- "unable to create flush dependency")
- }
- else /* this is the root iblock */
- {
- /* create flush dependency with header */
- if(H5AC_create_flush_dependency(iblock->hdr, iblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTDEPEND, FAIL, \
- "unable to create flush dependency")
- }
- break;
-
- case H5C_NOTIFY_ACTION_BEFORE_EVICT:
- if ( iblock->fd_parent ) /* this is a child iblock */
- {
- /* destroy flush dependency with parent iblock */
- if(H5AC_destroy_flush_dependency(iblock->fd_parent,
- iblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTUNDEPEND, FAIL, \
- "unable to destroy flush dependency")
- }
- else /* this is the root iblock */
- {
- /* destroy flush dependency with header */
- if(H5AC_destroy_flush_dependency(iblock->hdr, iblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTUNDEPEND, FAIL, \
- "unable to destroy flush dependency")
-
- }
- break;
-
- default:
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "unknown action from metadata cache")
- break;
- }
+ HDassert((NULL == par_iblock->child_iblocks[indir_idx]) || (par_iblock->child_iblocks[indir_idx] == iblock));
+ } /* end else */
+
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ if(iblock->parent) { /* this is a child iblock */
+ /* create flush dependency with parent iblock */
+ if(H5AC_create_flush_dependency(iblock->parent, iblock) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+ else { /* this is the root iblock */
+ /* create flush dependency with header */
+ if(H5AC_create_flush_dependency(iblock->hdr, iblock) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end else */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ if(iblock->fd_parent) { /* this is a child iblock */
+ /* destroy flush dependency with parent iblock */
+ if(H5AC_destroy_flush_dependency(iblock->fd_parent, iblock) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ } /* end if */
+ else { /* this is the root iblock */
+ /* destroy flush dependency with header */
+ if(H5AC_destroy_flush_dependency(iblock->hdr, iblock) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ } /* end else */
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "unknown action from metadata cache")
+ break;
+ } /* end switch */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1424,8 +1385,8 @@ H5HF_cache_iblock_size(const H5F_t UNUSED *f, const H5HF_indirect_t *iblock, siz
static H5HF_direct_t *
H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
{
- H5HF_dblock_cache_ud_t *udata = (H5HF_dblock_cache_ud_t *)_udata; /* pointer to user data */
H5HF_hdr_t *hdr; /* Shared fractal heap information */
+ H5HF_dblock_cache_ud_t *udata = (H5HF_dblock_cache_ud_t *)_udata; /* User data for callback */
H5HF_parent_t *par_info; /* Pointer to parent information */
H5HF_direct_t *dblock = NULL; /* Direct block info */
const uint8_t *p; /* Pointer into raw data buffer */
@@ -1443,7 +1404,7 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Allocate space for the fractal heap direct block */
if(NULL == (dblock = H5FL_MALLOC(H5HF_direct_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
HDmemset(&dblock->cache_info, 0, sizeof(H5AC_info_t));
/* Get the pointer to the shared heap header */
@@ -1456,7 +1417,7 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Share common heap information */
dblock->hdr = hdr;
if(H5HF_hdr_incr(hdr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINC, NULL, "can't increment reference count on shared heap header")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTINC, NULL, "can't increment reference count on shared heap header")
/* Set block's internal information */
dblock->size = udata->dblock_size;
@@ -1473,7 +1434,7 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
size_t nbytes; /* Number of bytes used in buffer, after applying reverse filters */
void *read_buf; /* Pointer to buffer to read in */
size_t read_size; /* Size of filtered direct block to read */
- unsigned filter_mask; /* Excluded filters for direct block */
+ unsigned filter_mask; /* Excluded filters for direct block */
/* Check for root direct block */
if(par_info->iblock == NULL) {
@@ -1525,17 +1486,17 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Magic number */
if(HDmemcmp(p, H5HF_DBLOCK_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong fractal heap direct block signature")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong fractal heap direct block signature")
p += H5_SIZEOF_MAGIC;
/* Version */
if(*p++ != H5HF_DBLOCK_VERSION)
- HGOTO_ERROR(H5E_HEAP, H5E_VERSION, NULL, "wrong fractal heap direct block version")
+ HGOTO_ERROR(H5E_HEAP, H5E_VERSION, NULL, "wrong fractal heap direct block version")
/* Address of heap that owns this block (just for file integrity checks) */
H5F_addr_decode(udata->f, &p, &heap_addr);
if(H5F_addr_ne(heap_addr, hdr->heap_addr))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "incorrect heap header address for direct block")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "incorrect heap header address for direct block")
/* Address of parent block */
dblock->parent = par_info->iblock;
@@ -1973,57 +1934,43 @@ H5HF_cache_dblock_notify(H5C_notify_action_t action, H5HF_direct_t *dblock)
HDassert(dblock);
HDassert(dblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(dblock->hdr);
- HDassert((dblock->fd_parent) ||
- ((dblock->hdr->man_dtable.curr_root_rows == 0) &&
- (dblock->block_off == (hsize_t)0)));
-
- switch ( action )
- {
- case H5C_NOTIFY_ACTION_AFTER_INSERT:
- HDassert(dblock->parent == dblock->fd_parent);
-
- if ( dblock->parent ) /* this is a leaf dblock */
- {
- /* create flush dependency with parent iblock */
- if(H5AC_create_flush_dependency(dblock->parent, dblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTDEPEND, FAIL, \
- "unable to create flush dependency")
- }
- else /* this is a root dblock */
- {
- /* create flush dependency with header */
- if(H5AC_create_flush_dependency(dblock->hdr, dblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTDEPEND, FAIL, \
- "unable to create flush dependency")
- }
- break;
-
- case H5C_NOTIFY_ACTION_BEFORE_EVICT:
+ HDassert((dblock->fd_parent) ||
+ ((dblock->hdr->man_dtable.curr_root_rows == 0) && (dblock->block_off == (hsize_t)0)));
+
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ HDassert(dblock->parent == dblock->fd_parent);
+ if(dblock->parent) { /* this is a leaf dblock */
+ /* create flush dependency with parent iblock */
+ if(H5AC_create_flush_dependency(dblock->parent, dblock) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+ else { /* this is a root dblock */
+ /* create flush dependency with header */
+ if(H5AC_create_flush_dependency(dblock->hdr, dblock) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end else */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
HDassert((dblock->parent == dblock->fd_parent) ||
- ((NULL == dblock->parent) && (dblock->fd_parent)));
- if ( dblock->fd_parent ) /* this is a leaf dblock */
- {
- /* destroy flush dependency with parent iblock */
- if(H5AC_destroy_flush_dependency(dblock->fd_parent,
- dblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTUNDEPEND, FAIL, \
- "unable to destroy flush dependency")
- }
- else /* this is a root dblock */
- {
- /* destroy flush dependency with header */
- if(H5AC_destroy_flush_dependency(dblock->hdr, dblock) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTUNDEPEND, FAIL, \
- "unable to destroy flush dependency")
-
- }
- break;
-
- default:
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "unknown action from metadata cache")
- break;
- }
+ ((NULL == dblock->parent) && (dblock->fd_parent)));
+ if(dblock->fd_parent) { /* this is a leaf dblock */
+ /* destroy flush dependency with parent iblock */
+ if(H5AC_destroy_flush_dependency(dblock->fd_parent, dblock) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ } /* end if */
+ else { /* this is a root dblock */
+ /* destroy flush dependency with header */
+ if(H5AC_destroy_flush_dependency(dblock->hdr, dblock) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ } /* end else */
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "unknown action from metadata cache")
+ break;
+ } /* end switch */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2063,11 +2010,11 @@ H5HF_cache_dblock_size(const H5F_t UNUSED *f, const H5HF_direct_t *dblock, size_
/*------------------------------------------------------------------------
- * Function: H5HF_cache_verify_hdr_descendants_clean
+ * Function: H5HF__cache_verify_hdr_descendants_clean
*
* Purpose: Sanity checking routine that verifies that all indirect
* and direct blocks that are descendants of the supplied
- * instance of H5HF_hdr_t are clean. Set *clean_ptr to
+ * instance of H5HF_hdr_t are clean. Set *clean to
* TRUE if this is the case, and to FALSE otherwise.
*
* Return: Non-negative on success/Negative on failure
@@ -2079,41 +2026,26 @@ H5HF_cache_dblock_size(const H5F_t UNUSED *f, const H5HF_direct_t *dblock, size_
*/
#ifndef NDEBUG
static herr_t
-H5HF_cache_verify_hdr_descendants_clean(H5F_t *f,
- hid_t dxpl_id,
- H5HF_hdr_t * hdr,
- hbool_t *clean_ptr)
+H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
+ H5HF_hdr_t * hdr, hbool_t *clean)
{
- hbool_t in_cache;
- hbool_t type_ok;
- hbool_t root_iblock_in_cache = FALSE;
- hbool_t unprotect_root_iblock = FALSE;
- unsigned hdr_status = 0;
- unsigned root_iblock_status = 0;
- unsigned root_dblock_status = 0;
- H5HF_indirect_t * root_iblock = NULL;
- haddr_t hdr_addr;
- haddr_t root_iblock_addr = HADDR_UNDEF;
- haddr_t root_dblock_addr;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
+ haddr_t hdr_addr; /* Address of header */
+ unsigned hdr_status = 0; /* Header cache entry status */
+ herr_t ret_value = SUCCEED; /* Return value */
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
HDassert(f);
HDassert(hdr);
HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert((const H5AC_class_t *)(hdr->cache_info.type) == \
- &(H5AC_FHEAP_HDR[0]));
- HDassert(clean_ptr);
-
+ HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR);
+ HDassert(clean);
hdr_addr = hdr->cache_info.addr;
-
HDassert(hdr_addr == hdr->heap_addr);
- if ( H5AC_get_entry_status(f, hdr_addr, &hdr_status) < 0 )
-
+ if(H5AC_get_entry_status(f, hdr_addr, &hdr_status) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get hdr status")
-
HDassert(hdr_status & H5AC_ES__IN_CACHE);
/* We have three basic scenarios we have to deal with:
@@ -2146,64 +2078,50 @@ H5HF_cache_verify_hdr_descendants_clean(H5F_t *f,
* Since the former case is far and away the most common, we don't
* worry too much about efficiency in the second case.
*/
-
- if ( ( hdr->root_iblock ) ||
- ( ( hdr->man_dtable.curr_root_rows > 0 ) &&
- ( HADDR_UNDEF != hdr->man_dtable.table_addr ) ) ) {
-
- root_iblock = hdr->root_iblock;
-
- /* make note of the on disk address of the root iblock */
-
- if ( root_iblock == NULL ) {
-
+ if(hdr->root_iblock ||
+ ((hdr->man_dtable.curr_root_rows > 0) &&
+ (HADDR_UNDEF != hdr->man_dtable.table_addr))) {
+ H5HF_indirect_t *root_iblock = hdr->root_iblock;
+ haddr_t root_iblock_addr;
+ unsigned root_iblock_status = 0;
+ hbool_t root_iblock_in_cache;
+
+ /* make note of the on disk address of the root iblock */
+ if(root_iblock == NULL)
/* hdr->man_dtable.table_addr must contain address of root
* iblock. Check to see if it is in cache. If it is,
* protect it and put its address in root_iblock.
*/
root_iblock_addr = hdr->man_dtable.table_addr;
-
- } else {
-
+ else
root_iblock_addr = root_iblock->addr;
- }
/* get the status of the root iblock */
HDassert(root_iblock_addr != HADDR_UNDEF);
+ if(H5AC_get_entry_status(f, root_iblock_addr, &root_iblock_status) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get root iblock status")
- if ( H5AC_get_entry_status(f, root_iblock_addr,
- &root_iblock_status) < 0 )
-
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, \
- "can't get root iblock status")
-
- root_iblock_in_cache = ( (root_iblock_status & H5AC_ES__IN_CACHE) != 0 );
-
+ root_iblock_in_cache = ( (root_iblock_status & H5AC_ES__IN_CACHE) != 0);
HDassert(root_iblock_in_cache || (root_iblock == NULL));
- if ( ! root_iblock_in_cache ) { /* we are done */
-
- *clean_ptr = TRUE;
-
- } else if ( root_iblock_status & H5AC_ES__IS_DIRTY ) {
-
- *clean_ptr = FALSE;
-
- } else { /* must examine children */
+ if(!root_iblock_in_cache) /* we are done */
+ *clean = TRUE;
+ else if(root_iblock_status & H5AC_ES__IS_DIRTY)
+ *clean = FALSE;
+ else { /* must examine children */
+ hbool_t unprotect_root_iblock = FALSE;
/* At this point, the root iblock may be pinned, protected,
* both, or neither, and we may or may not have a pointer
* to root iblock in memory.
*
- * Before we call H5HF_cache_verify_iblock_descendants_clean(),
+ * Before we call H5HF__cache_verify_iblock_descendants_clean(),
* we must ensure that the root iblock is either pinned or
* protected or both, and that we have a pointer to it.
* Do this as follows:
*/
- if ( root_iblock == NULL ) { /* we don't have ptr to root iblock */
-
- if ( 0 == (root_iblock_status & H5AC_ES__IS_PROTECTED) ) {
-
+ if(root_iblock == NULL) { /* we don't have ptr to root iblock */
+ if(0 == (root_iblock_status & H5AC_ES__IS_PROTECTED)) {
/* just protect the root iblock -- this will give us
* the pointer we need to proceed, and ensure that
* it is locked into the metadata cache for the
@@ -2214,21 +2132,11 @@ H5HF_cache_verify_hdr_descendants_clean(H5F_t *f,
* in this case, since we know that the entry is in cache,
* we can pass NULL udata.
*/
-
- root_iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id,
- H5AC_FHEAP_IBLOCK,
- root_iblock_addr,
- NULL, H5AC_READ);
-
- if ( NULL == root_iblock )
-
- HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, \
- "H5AC_protect() faild.")
-
+ if(NULL == (root_iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC_READ)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
unprotect_root_iblock = TRUE;
-
- } else {
-
+ } /* end if */
+ else {
/* the root iblock is protected, and we have no
* legitimate way of getting a pointer to it.
*
@@ -2263,25 +2171,20 @@ H5HF_cache_verify_hdr_descendants_clean(H5F_t *f,
* code, I expect that we will use this approach until it
* causes problems, or we think of a better way.
*/
- if ( H5AC_get_entry_ptr_from_addr(f, root_iblock_addr,
- (void **)(&root_iblock)) < 0 )
-
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, \
- "H5AC_get_entry_ptr_from_addr() failed.")
-
+ if(H5AC_get_entry_ptr_from_addr(f, root_iblock_addr, (void **)(&root_iblock)) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() failed.")
HDassert(root_iblock);
- }
- } else /* root_iblock != NULL */ {
-
+ } /* end else */
+ } /* end if */
+ else { /* root_iblock != NULL */
/* we have the pointer to the root iblock. Protect it
* if it is neither pinned nor protected -- otherwise we
* are ready to go.
*/
H5HF_indirect_t * iblock = NULL;
- if ( ( (root_iblock_status & H5AC_ES__IS_PINNED) == 0 ) &&
- ( (root_iblock_status & H5AC_ES__IS_PROTECTED) == 0 ) ) {
-
+ if(((root_iblock_status & H5AC_ES__IS_PINNED) == 0) &&
+ ((root_iblock_status & H5AC_ES__IS_PROTECTED) == 0)) {
/* the root iblock is neither pinned nor protected -- hence
* we must protect it before we proceed
*
@@ -2290,82 +2193,49 @@ H5HF_cache_verify_hdr_descendants_clean(H5F_t *f,
* in this case, since we know that the entry is in cache,
* we can pass NULL udata.
*/
-
- iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id,
- H5AC_FHEAP_IBLOCK,
- root_iblock_addr,
- NULL, H5AC_READ);
-
- if ( NULL == iblock )
-
- HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, \
- "H5AC_protect() faild.")
-
+ if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC_READ)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
unprotect_root_iblock = TRUE;
-
HDassert(iblock == root_iblock);
-
- }
- }
+ } /* end if */
+ } /* end else */
/* at this point, one way or another, the root iblock is locked
* in memory for the duration of the call. Do some sanity checks,
- * and then call H5HF_cache_verify_iblock_descendants_clean().
+ * and then call H5HF__cache_verify_iblock_descendants_clean().
*/
+ HDassert(hdr->root_iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(hdr->root_iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
- HDassert(hdr->root_iblock->cache_info.magic == \
- H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert((const H5AC_class_t *)(hdr->root_iblock->cache_info.type) \
- == &(H5AC_FHEAP_IBLOCK[0]));
-
- if ( H5HF_cache_verify_iblock_descendants_clean(f, dxpl_id,
- root_iblock, &root_iblock_status,
- clean_ptr) < 0 )
-
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, \
- "can't verify root iblock & descendants clean.")
-
+ if(H5HF__cache_verify_iblock_descendants_clean(f, dxpl_id, root_iblock, &root_iblock_status, clean) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify root iblock & descendants clean.")
/* unprotect the root indirect block if required */
- if ( unprotect_root_iblock ) {
-
+ if(unprotect_root_iblock) {
HDassert(root_iblock);
-
- if ( H5AC_unprotect(f, dxpl_id, H5AC_FHEAP_IBLOCK,
- root_iblock_addr, root_iblock,
- H5AC__NO_FLAGS_SET) < 0 )
-
- HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, \
- "H5AC_unprotect() faild.")
- }
- }
- } else if ( ( hdr->man_dtable.curr_root_rows == 0 ) &&
- ( HADDR_UNDEF != hdr->man_dtable.table_addr ) ) {
+ if(H5AC_unprotect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, root_iblock, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.")
+ } /* end if */
+ } /* end else */
+ } /* end if */
+ else if((hdr->man_dtable.curr_root_rows == 0) &&
+ (HADDR_UNDEF != hdr->man_dtable.table_addr)) {
+ haddr_t root_dblock_addr;
+ unsigned root_dblock_status = 0;
+ hbool_t in_cache;
+ hbool_t type_ok;
/* this is scenario 2 -- we have a root dblock */
-
root_dblock_addr = hdr->man_dtable.table_addr;
+ if(H5AC_get_entry_status(f, root_dblock_addr, &root_dblock_status) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get root dblock status")
- if ( H5AC_get_entry_status(f, root_dblock_addr,
- &root_dblock_status) < 0 )
-
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, \
- "can't get root dblock status")
-
- if ( root_dblock_status & H5AC_ES__IN_CACHE ) {
-
- if ( H5AC_verify_entry_type(f, root_dblock_addr,
- &H5AC_FHEAP_DBLOCK[0],
- &in_cache, &type_ok) < 0 )
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL,
- "can't check dblock type")
-
+ if(root_dblock_status & H5AC_ES__IN_CACHE) {
+ if(H5AC_verify_entry_type(f, root_dblock_addr, &H5AC_FHEAP_DBLOCK[0], &in_cache, &type_ok) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check dblock type")
HDassert(in_cache);
-
- if ( !type_ok )
-
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, \
- "root dblock addr doesn't refer to a dblock?!?")
+ if(!type_ok)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock addr doesn't refer to a dblock?!?")
/* If a root dblock is in cache, it must have a flush
* dependency relationship with the header, and it
@@ -2376,45 +2246,34 @@ H5HF_cache_verify_hdr_descendants_clean(H5F_t *f,
* the root iblock is a child in some flush dependency
* relationship.
*/
- if ( 0 == (root_dblock_status & H5AC_ES__IS_FLUSH_DEP_CHILD) )
-
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, \
- "root dblock in cache and not a flush dep child.")
-
- if ( 0 != (root_dblock_status & H5AC_ES__IS_FLUSH_DEP_PARENT) )
-
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, \
- "root dblock in cache and is a flush dep parent.")
-
-
- *clean_ptr = ! (root_dblock_status & H5AC_ES__IS_DIRTY);
-
- } else { /* root dblock not in cache */
-
- *clean_ptr = TRUE;
- }
- } else {
+ if(0 == (root_dblock_status & H5AC_ES__IS_FLUSH_DEP_CHILD))
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock in cache and not a flush dep child.")
+ if(0 != (root_dblock_status & H5AC_ES__IS_FLUSH_DEP_PARENT))
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock in cache and is a flush dep parent.")
+
+ *clean = ! (root_dblock_status & H5AC_ES__IS_DIRTY);
+ } /* end if */
+ else /* root dblock not in cache */
+ *clean = TRUE;
+ } /* end else-if */
+ else
/* this is scenario 3 -- the fractal heap is empty, and we
* have nothing to do.
*/
- *clean_ptr = TRUE;
- }
+ *clean = TRUE;
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5HF_cache_verify_hdr_descendants_clean() */
-
+} /* H5HF__cache_verify_hdr_descendants_clean() */
#endif /* NDEBUG */
/*------------------------------------------------------------------------
- * Function: H5HF_cache_verify_iblock_descendants_clean
+ * Function: H5HF__cache_verify_iblock_descendants_clean
*
* Purpose: Sanity checking routine that verifies that all indirect
* and direct blocks that are decendents of the supplied
- * instance of H5HF_indirect_t are clean. Set *clean_ptr
+ * instance of H5HF_indirect_t are clean. Set *clean
* to TRUE if this is the case, and to FALSE otherwise.
*
* In passing, the function also does a cursory check to
@@ -2433,7 +2292,7 @@ done:
* met.
*
* Note that this function and
- * H5HF_cache_verify_descendant_iblocks_clean() are
+ * H5HF__cache_verify_descendant_iblocks_clean() are
* recursive co-routines.
*
* Return: Non-negative on success/Negative on failure
@@ -2445,72 +2304,49 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF_cache_verify_iblock_descendants_clean(H5F_t *f,
- hid_t dxpl_id,
- H5HF_indirect_t * iblock,
- unsigned * iblock_status_ptr,
- hbool_t *clean_ptr)
+H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, hid_t dxpl_id,
+ H5HF_indirect_t *iblock, unsigned *iblock_status, hbool_t *clean)
{
hbool_t has_dblocks = FALSE;
hbool_t has_iblocks = FALSE;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
HDassert(f);
HDassert(iblock);
HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert((const H5AC_class_t *)(iblock->cache_info.type) == \
- &(H5AC_FHEAP_IBLOCK[0]));
- HDassert(iblock_status_ptr);
- HDassert(clean_ptr);
- HDassert(*clean_ptr);
-
- if ( ( *clean_ptr ) &&
- ( H5HF_cache_verify_iblocks_dblocks_clean(f, iblock, clean_ptr,
- &has_dblocks) < 0 ) )
+ HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
+ HDassert(iblock_status);
+ HDassert(clean);
+ HDassert(*clean);
+
+ if((*clean) && H5HF__cache_verify_iblocks_dblocks_clean(f, iblock, clean, &has_dblocks) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify dblocks clean.")
- if ( ( *clean_ptr ) &&
- ( H5HF_cache_verify_descendant_iblocks_clean(f, dxpl_id, iblock,
- clean_ptr, &has_iblocks) < 0 ) )
+ if((*clean) && H5HF__cache_verify_descendant_iblocks_clean(f, dxpl_id, iblock, clean, &has_iblocks) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify iblocks clean.")
- if ( ( NULL == iblock_status_ptr ) &&
- ( H5AC_get_entry_status(f, iblock->addr, iblock_status_ptr) < 0 ) )
-
+ if((NULL == iblock_status) && H5AC_get_entry_status(f, iblock->addr, iblock_status) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get iblock status")
/* verify that flush dependency setup is plausible */
-
- if ( 0 == (*iblock_status_ptr & H5AC_ES__IS_FLUSH_DEP_CHILD) )
-
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, \
- "iblock is not a flush dep child.")
-
- if ( ( ( has_dblocks || has_iblocks ) ) &&
- ( 0 == (*iblock_status_ptr & H5AC_ES__IS_FLUSH_DEP_PARENT) ) )
-
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, \
- "iblock has children and is not a flush dep parent.")
-
- if ( ( ( has_dblocks || has_iblocks ) ) &&
- ( 0 == (*iblock_status_ptr & H5AC_ES__IS_PINNED) ) )
-
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, \
- "iblock has children and is not pinned.")
+ if(0 == (*iblock_status & H5AC_ES__IS_FLUSH_DEP_CHILD))
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "iblock is not a flush dep child.")
+ if(((has_dblocks || has_iblocks)) && (0 == (*iblock_status & H5AC_ES__IS_FLUSH_DEP_PARENT)))
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "iblock has children and is not a flush dep parent.")
+ if(((has_dblocks || has_iblocks)) && (0 == (*iblock_status & H5AC_ES__IS_PINNED)))
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "iblock has children and is not pinned.")
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5HF_cache_verify_iblock_descendants_clean() */
-
+} /* H5HF__cache_verify_iblock_descendants_clean() */
#endif /* NDEBUG */
/*------------------------------------------------------------------------
- * Function: H5HF_cache_verify_iblocks_dblocks_clean
+ * Function: H5HF__cache_verify_iblocks_dblocks_clean
*
* Purpose: Sanity checking routine that attempts to verify that all
* direct blocks pointed to by the supplied indirect block
@@ -2538,71 +2374,53 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF_cache_verify_iblocks_dblocks_clean(H5F_t *f,
- H5HF_indirect_t * iblock,
- hbool_t *clean_ptr,
- hbool_t *has_dblocks_ptr)
+H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f, H5HF_indirect_t *iblock,
+ hbool_t *clean, hbool_t *has_dblocks)
{
- hbool_t in_cache;
- hbool_t type_ok;
- unsigned i;
unsigned num_direct_rows;
unsigned max_dblock_index;
- haddr_t dblock_addr;
- unsigned dblock_status = 0;
+ unsigned i;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
HDassert(f);
HDassert(iblock);
HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
- HDassert(clean_ptr);
- HDassert(*clean_ptr);
- HDassert(has_dblocks_ptr);
+ HDassert(clean);
+ HDassert(*clean);
+ HDassert(has_dblocks);
i = 0;
-
- num_direct_rows =
- MIN(iblock->nrows, iblock->hdr->man_dtable.max_direct_rows);
-
- HDassert(num_direct_rows <= iblock->nrows );
-
- max_dblock_index =
- (num_direct_rows * iblock->hdr->man_dtable.cparam.width) - 1;
-
- while ( ( *clean_ptr ) && ( i <= max_dblock_index ) ) {
+ num_direct_rows = MIN(iblock->nrows, iblock->hdr->man_dtable.max_direct_rows);
+ HDassert(num_direct_rows <= iblock->nrows);
+ max_dblock_index = (num_direct_rows * iblock->hdr->man_dtable.cparam.width) - 1;
+ while((*clean) && (i <= max_dblock_index)) {
+ haddr_t dblock_addr;
dblock_addr = iblock->ents[i].addr;
+ if(H5F_addr_defined(dblock_addr)) {
+ hbool_t in_cache;
+ hbool_t type_ok;
- if ( H5F_addr_defined(dblock_addr) ) {
-
- if ( H5AC_verify_entry_type(f, dblock_addr, &H5AC_FHEAP_DBLOCK[0],
- &in_cache, &type_ok) < 0 )
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL,
- "can't check dblock type")
-
- if ( in_cache ) { /* dblock is in cache */
-
- if ( ! type_ok )
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, \
- "dblock addr doesn't refer to a dblock?!?")
+ if(H5AC_verify_entry_type(f, dblock_addr, &H5AC_FHEAP_DBLOCK[0], &in_cache, &type_ok) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check dblock type")
- if ( H5AC_get_entry_status(f, dblock_addr,
- &dblock_status) < 0 )
+ if(in_cache) { /* dblock is in cache */
+ unsigned dblock_status = 0;
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL,
- "can't get dblock status")
+ if(!type_ok)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock addr doesn't refer to a dblock?!?")
- HDassert(dblock_status & H5AC_ES__IN_CACHE );
+ if(H5AC_get_entry_status(f, dblock_addr, &dblock_status) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get dblock status")
+ HDassert(dblock_status & H5AC_ES__IN_CACHE);
- *has_dblocks_ptr = TRUE;
-
- if ( dblock_status & H5AC_ES__IS_DIRTY ) {
-
- *clean_ptr = FALSE;
- }
+ *has_dblocks = TRUE;
+ if(dblock_status & H5AC_ES__IS_DIRTY)
+ *clean = FALSE;
/* If a child dblock is in cache, it must have a flush
* dependency relationship with this iblock, and it
@@ -2613,33 +2431,26 @@ H5HF_cache_verify_iblocks_dblocks_clean(H5F_t *f,
* the child iblock is a child in some flush dependency
* relationship.
*/
- if ( 0 == (dblock_status & H5AC_ES__IS_FLUSH_DEP_CHILD) )
-
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, \
- "dblock in cache and not a flush dep child.")
+ if(0 == (dblock_status & H5AC_ES__IS_FLUSH_DEP_CHILD))
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock in cache and not a flush dep child.")
- if ( 0 != (dblock_status & H5AC_ES__IS_FLUSH_DEP_PARENT) )
-
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, \
- "dblock in cache and is a flush dep parent.")
+ if(0 != (dblock_status & H5AC_ES__IS_FLUSH_DEP_PARENT))
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock in cache and is a flush dep parent.")
- }
- }
+ } /* end if */
+ } /* end if */
i++;
- }
+ } /* end while */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5HF_cache_verify_iblocks_dblocks_clean() */
-
+} /* H5HF__cache_verify_iblocks_dblocks_clean() */
#endif /* NDEBUG */
/*------------------------------------------------------------------------
- * Function: H5HF_cache_verify_descendant_iblocks_clean
+ * Function: H5HF__cache_verify_descendant_iblocks_clean
*
* Purpose: Sanity checking routine that attempts to verify that all
* direct blocks pointed to by the supplied indirect block
@@ -2667,70 +2478,52 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF_cache_verify_descendant_iblocks_clean(H5F_t *f,
- hid_t dxpl_id,
- H5HF_indirect_t * iblock,
- hbool_t *clean_ptr,
- hbool_t *has_iblocks_ptr)
+H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, hid_t dxpl_id,
+ H5HF_indirect_t *iblock, hbool_t *clean, hbool_t *has_iblocks)
{
- hbool_t unprotect_child_iblock;
- unsigned i;
unsigned first_iblock_index;
unsigned last_iblock_index;
unsigned num_direct_rows;
- unsigned child_iblock_status = 0;
- haddr_t child_iblock_addr;
- H5HF_indirect_t * child_iblock_ptr;
+ unsigned i;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
+ /* Sanity checks */
HDassert(f);
HDassert(iblock);
HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(iblock->cache_info.type == &(H5AC_FHEAP_IBLOCK[0]));
- HDassert(clean_ptr);
- HDassert(*clean_ptr);
- HDassert(has_iblocks_ptr);
-
- num_direct_rows =
- MIN(iblock->nrows, iblock->hdr->man_dtable.max_direct_rows);
-
- HDassert(num_direct_rows <= iblock->nrows );
+ HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
+ HDassert(clean);
+ HDassert(*clean);
+ HDassert(has_iblocks);
+ num_direct_rows = MIN(iblock->nrows, iblock->hdr->man_dtable.max_direct_rows);
+ HDassert(num_direct_rows <= iblock->nrows);
first_iblock_index = num_direct_rows * iblock->hdr->man_dtable.cparam.width;
- last_iblock_index =
- (iblock->nrows * iblock->hdr->man_dtable.cparam.width) - 1;
+ last_iblock_index = (iblock->nrows * iblock->hdr->man_dtable.cparam.width) - 1;
i = first_iblock_index;
+ while((*clean) && (i <= last_iblock_index)) {
+ haddr_t child_iblock_addr = iblock->ents[i].addr;
- while ( ( *clean_ptr ) && ( i <= last_iblock_index ) ) {
-
- child_iblock_addr = iblock->ents[i].addr;
-
- if ( H5F_addr_defined(child_iblock_addr) ) {
+ if(H5F_addr_defined(child_iblock_addr)) {
+ unsigned child_iblock_status = 0;
- if ( H5AC_get_entry_status(f, child_iblock_addr,
- &child_iblock_status) < 0 )
+ if(H5AC_get_entry_status(f, child_iblock_addr, &child_iblock_status) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get iblock status")
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, \
- "can't get iblock status")
+ if(child_iblock_status & H5AC_ES__IN_CACHE) {
+ *has_iblocks = TRUE;
+ if(child_iblock_status & H5AC_ES__IS_DIRTY)
+ *clean = FALSE;
- if ( child_iblock_status & H5AC_ES__IN_CACHE ) {
-
- *has_iblocks_ptr = TRUE;
-
- if ( child_iblock_status & H5AC_ES__IS_DIRTY ) {
-
- *clean_ptr = FALSE;
- }
-
- /* if the child iblock is in cache and *clean_ptr is TRUE,
+ /* if the child iblock is in cache and *clean is TRUE,
* we must continue to explore down the fractal heap tree
* structure to verify that all descendant blocks are either
* clean, or not in the metadata cache. We do this with a
* recursive call to
- * H5HF_cache_verify_iblock_descendants_clean().
+ * H5HF__cache_verify_iblock_descendants_clean().
* However, we can't make this call unless the child iblock
* is somehow locked into the cache -- typically via either
* pinning or protecting.
@@ -2776,102 +2569,69 @@ H5HF_cache_verify_descendant_iblocks_clean(H5F_t *f,
* expect that we will use this approach until it causes
* problems, or we think of a better way.
*/
- if ( *clean_ptr ) {
-
- child_iblock_ptr = NULL;
- unprotect_child_iblock = FALSE;
+ if(*clean) {
+ H5HF_indirect_t *child_iblock = NULL;
+ hbool_t unprotect_child_iblock = FALSE;
- if ( 0 == (child_iblock_status & H5AC_ES__IS_PINNED)) {
-
+ if(0 == (child_iblock_status & H5AC_ES__IS_PINNED)) {
/* child iblock is not pinned */
-
- if (0 == (child_iblock_status & H5AC_ES__IS_PROTECTED)){
-
+ if(0 == (child_iblock_status & H5AC_ES__IS_PROTECTED)) {
/* child iblock is unprotected, and unpinned */
/* protect it. Note that the udata is only */
/* used in the load callback. While the */
/* fractal heap makes heavy use of the udata */
/* in this case, since we know that the */
/* entry is in cache, we can pass NULL udata */
- child_iblock_ptr = (H5HF_indirect_t *)
- H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK,
- child_iblock_addr,
- NULL, H5AC_READ);
-
- if ( NULL == child_iblock_ptr )
-
- HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, \
- "H5AC_protect() faild.")
+ if(NULL == (child_iblock = (H5HF_indirect_t *) H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, child_iblock_addr, NULL, H5AC_READ)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
unprotect_child_iblock = TRUE;
-
- } else {
-
+ } /* end if */
+ else {
/* child iblock is protected -- use */
/* H5AC_get_entry_ptr_from_addr() to get a */
/* pointer to the entry. This is very slimy -- */
/* come up with a better solution. */
- if ( H5AC_get_entry_ptr_from_addr(f,
- child_iblock_addr,
- (void **)(&child_iblock_ptr)) < 0 )
-
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, \
- "H5AC_get_entry_ptr_from_addr() faild.")
-
- HDassert ( child_iblock_ptr );
- }
- } else {
+ if(H5AC_get_entry_ptr_from_addr(f, child_iblock_addr, (void **)(&child_iblock)) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() faild.")
+ HDassert(child_iblock);
+ } /* end else */
+ } /* end if */
+ else {
/* child iblock is pinned -- look it up in the */
/* parent iblocks child_iblocks array. */
-
HDassert(iblock->child_iblocks);
-
- child_iblock_ptr =
- iblock->child_iblocks[i - first_iblock_index];
- }
+ child_iblock = iblock->child_iblocks[i - first_iblock_index];
+ } /* end else */
/* At this point, one way or another we should have
* a pointer to the child iblock. Verify that we
* that we have the correct one.
*/
- HDassert(child_iblock_ptr);
- HDassert(child_iblock_ptr->cache_info.magic ==
- H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(child_iblock_ptr->cache_info.type ==
- H5AC_FHEAP_IBLOCK);
- HDassert(child_iblock_ptr->addr == child_iblock_addr);
+ HDassert(child_iblock);
+ HDassert(child_iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(child_iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
+ HDassert(child_iblock->addr == child_iblock_addr);
/* now make the recursive call */
- if ( H5HF_cache_verify_iblock_descendants_clean(f, dxpl_id,
- child_iblock_ptr, &child_iblock_status,
- clean_ptr) < 0 )
-
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, \
- "can't verify child iblock clean.")
+ if(H5HF__cache_verify_iblock_descendants_clean(f, dxpl_id, child_iblock, &child_iblock_status, clean) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify child iblock clean.")
/* if we protected the child iblock, unprotect it now */
- if ( unprotect_child_iblock ) {
-
- if ( H5AC_unprotect(f, dxpl_id, H5AC_FHEAP_IBLOCK,
- child_iblock_addr, child_iblock_ptr,
- H5AC__NO_FLAGS_SET) < 0 )
-
- HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, \
- "H5AC_unprotect() faild.")
+ if(unprotect_child_iblock) {
+ if(H5AC_unprotect(f, dxpl_id, H5AC_FHEAP_IBLOCK, child_iblock_addr, child_iblock, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.")
- }
- }
- }
- }
+ } /* end if */
+ } /* end if */
+ } /* end if */
+ } /* end if */
i++;
- }
+ } /* end while */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5HF_cache_verify_descendant_iblocks_clean() */
-
+} /* H5HF__cache_verify_descendant_iblocks_clean() */
#endif /* NDEBUG */
diff --git a/src/H5HFdbg.c b/src/H5HFdbg.c
index 7d22ddd..8620f6f 100644
--- a/src/H5HFdbg.c
+++ b/src/H5HFdbg.c
@@ -387,11 +387,11 @@ H5HF_dblock_debug_cb(H5FS_section_info_t *_sect, void *_udata)
if(sect_start < dblock_start)
start = 0;
else
- H5_ASSIGN_OVERFLOW(/* To: */ start, /* From: */ (sect_start - dblock_start), /* From: */ hsize_t, /* To: */ size_t)
+ H5_CHECKED_ASSIGN(start, size_t, (sect_start - dblock_start), hsize_t)
if(sect_end > dblock_end)
- H5_ASSIGN_OVERFLOW(/* To: */ end, /* From: */ udata->dblock_size, /* From: */ hsize_t, /* To: */ size_t)
+ H5_CHECKED_ASSIGN(end, size_t, udata->dblock_size, hsize_t)
else
- H5_ASSIGN_OVERFLOW(/* To: */ end, /* From: */ ((sect_end - dblock_start) + 1), /* From: */ hsize_t, /* To: */ size_t)
+ H5_CHECKED_ASSIGN(end, size_t, ((sect_end - dblock_start) + 1), hsize_t)
/* Calculate the length */
len = end - start;
diff --git a/src/H5HFdblock.c b/src/H5HFdblock.c
index c4ae573..9a8da4a 100644
--- a/src/H5HFdblock.c
+++ b/src/H5HFdblock.c
@@ -131,7 +131,7 @@ H5HF_man_dblock_create(hid_t dxpl_id, H5HF_hdr_t *hdr, H5HF_indirect_t *par_iblo
dblock->block_off = par_iblock->block_off;
dblock->block_off += hdr->man_dtable.row_block_off[par_row];
dblock->block_off += hdr->man_dtable.row_block_size[par_row] * (par_entry % hdr->man_dtable.cparam.width);
- H5_ASSIGN_OVERFLOW(/* To: */ dblock->size, /* From: */ hdr->man_dtable.row_block_size[par_row], /* From: */ hsize_t, /* To: */ size_t);
+ H5_CHECKED_ASSIGN(dblock->size, size_t, hdr->man_dtable.row_block_size[par_row], hsize_t);
} /* end if */
else {
/* Must be the root direct block */
@@ -397,7 +397,7 @@ H5HF_man_dblock_new(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t request,
if(H5HF_man_iter_curr(&hdr->next_block, &next_row, NULL, &next_entry, &iblock) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "unable to retrieve current block iterator location")
HDassert(next_row < iblock->nrows);
- H5_ASSIGN_OVERFLOW(/* To: */ next_size, /* From: */ hdr->man_dtable.row_block_size[next_row], /* From: */ hsize_t, /* To: */ size_t);
+ H5_CHECKED_ASSIGN(next_size, size_t, hdr->man_dtable.row_block_size[next_row], hsize_t);
/* Check for skipping over blocks */
if(min_dblock_size > next_size) {
diff --git a/src/H5HFdtable.c b/src/H5HFdtable.c
index 607862a..c523396 100644
--- a/src/H5HFdtable.c
+++ b/src/H5HFdtable.c
@@ -168,7 +168,7 @@ HDfprintf(stderr, "%s: off = %Hu\n", "H5HF_dtable_lookup", off);
/* Check for offset in first row */
if(off < dtable->num_id_first_row) {
*row = 0;
- H5_ASSIGN_OVERFLOW(/* To: */ *col, /* From: */ (off / dtable->cparam.start_block_size), /* From: */ hsize_t, /* To: */ unsigned);
+ H5_CHECKED_ASSIGN(*col, unsigned, (off / dtable->cparam.start_block_size), hsize_t);
} /* end if */
else {
unsigned high_bit = H5VM_log2_gen(off); /* Determine the high bit in the offset */
@@ -178,7 +178,7 @@ HDfprintf(stderr, "%s: off = %Hu\n", "H5HF_dtable_lookup", off);
HDfprintf(stderr, "%s: high_bit = %u, off_mask = %Hu\n", "H5HF_dtable_lookup", high_bit, off_mask);
#endif /* QAK */
*row = (high_bit - dtable->first_row_bits) + 1;
- H5_ASSIGN_OVERFLOW(/* To: */ *col, /* From: */ ((off - off_mask) / dtable->row_block_size[*row]), /* From: */ hsize_t, /* To: */ unsigned);
+ H5_CHECKED_ASSIGN(*col, unsigned, ((off - off_mask) / dtable->row_block_size[*row]), hsize_t);
} /* end else */
FUNC_LEAVE_NOAPI(SUCCEED)
diff --git a/src/H5HFhdr.c b/src/H5HFhdr.c
index d3c4473..cf8da23 100644
--- a/src/H5HFhdr.c
+++ b/src/H5HFhdr.c
@@ -262,7 +262,7 @@ H5HF_hdr_finish_init_phase2(H5HF_hdr_t *hdr)
if(u < hdr->man_dtable.max_direct_rows) {
hdr->man_dtable.row_tot_dblock_free[u] = hdr->man_dtable.row_block_size[u] -
H5HF_MAN_ABS_DIRECT_OVERHEAD(hdr);
- H5_ASSIGN_OVERFLOW(/* To: */ hdr->man_dtable.row_max_dblock_free[u], /* From: */ hdr->man_dtable.row_tot_dblock_free[u], /* From: */ hsize_t, /* To: */ size_t);
+ H5_CHECKED_ASSIGN(hdr->man_dtable.row_max_dblock_free[u], size_t, hdr->man_dtable.row_tot_dblock_free[u], hsize_t);
} /* end if */
else
if(H5HF_hdr_compute_free_space(hdr, u) < 0)
diff --git a/src/H5HFhuge.c b/src/H5HFhuge.c
index 6f0b48e..7f0da02 100644
--- a/src/H5HFhuge.c
+++ b/src/H5HFhuge.c
@@ -653,7 +653,7 @@ H5HF_huge_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
/* Retrieve the object's address & length */
obj_addr = found_rec.addr;
- H5_ASSIGN_OVERFLOW(/* To: */ obj_size, /* From: */ found_rec.len, /* From: */ hsize_t, /* To: */ size_t);
+ H5_CHECKED_ASSIGN(obj_size, size_t, found_rec.len, hsize_t);
filter_mask = found_rec.filter_mask;
} /* end if */
else {
@@ -669,7 +669,7 @@ H5HF_huge_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
/* Retrieve the object's address & length */
obj_addr = found_rec.addr;
- H5_ASSIGN_OVERFLOW(/* To: */ obj_size, /* From: */ found_rec.len, /* From: */ hsize_t, /* To: */ size_t);
+ H5_CHECKED_ASSIGN(obj_size, size_t, found_rec.len, hsize_t);
} /* end else */
} /* end else */
@@ -797,7 +797,7 @@ H5HF_huge_write(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
/* Retrieve the object's address & length */
obj_addr = found_rec.addr;
- H5_ASSIGN_OVERFLOW(/* To: */ obj_size, /* From: */ found_rec.len, /* From: */ hsize_t, /* To: */ size_t);
+ H5_CHECKED_ASSIGN(obj_size, size_t, found_rec.len, hsize_t);
} /* end else */
/* Write the object's data to the file */
diff --git a/src/H5HLcache.c b/src/H5HLcache.c
index a3c43ac..d946815 100644
--- a/src/H5HLcache.c
+++ b/src/H5HLcache.c
@@ -121,7 +121,7 @@ const H5AC_class_t H5AC_LHEAP_DBLK[1] = {{
/*-------------------------------------------------------------------------
- * Function: H5HL_fl_deserialize
+ * Function: H5HL__fl_deserialize
*
* Purpose: Deserialize the free list for a heap data block
*
@@ -134,13 +134,13 @@ const H5AC_class_t H5AC_LHEAP_DBLK[1] = {{
*-------------------------------------------------------------------------
*/
static herr_t
-H5HL_fl_deserialize(H5HL_t *heap)
+H5HL__fl_deserialize(H5HL_t *heap)
{
H5HL_free_t *fl = NULL, *tail = NULL; /* Heap free block nodes */
hsize_t free_block; /* Offset of free block */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* check arguments */
HDassert(heap);
@@ -188,11 +188,11 @@ done:
fl = H5FL_FREE(H5HL_free_t, fl);
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5HL_fl_deserialize() */
+} /* end H5HL__fl_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5HL_fl_serialize
+ * Function: H5HL__fl_serialize
*
* Purpose: Serialize the free list for a heap data block
*
@@ -206,11 +206,11 @@ done:
*-------------------------------------------------------------------------
*/
static void
-H5HL_fl_serialize(const H5HL_t *heap)
+H5HL__fl_serialize(const H5HL_t *heap)
{
H5HL_free_t *fl; /* Pointer to heap free list node */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
/* check arguments */
HDassert(heap);
@@ -231,7 +231,7 @@ H5HL_fl_serialize(const H5HL_t *heap)
} /* end for */
FUNC_LEAVE_NOAPI_VOID
-} /* end H5HL_fl_serialize() */
+} /* end H5HL__fl_serialize() */
/*-------------------------------------------------------------------------
@@ -258,6 +258,7 @@ H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
size_t spec_read_size; /* Size of buffer to speculatively read in */
const uint8_t *p; /* Pointer into decoding buffer */
haddr_t eoa; /* Relative end of file address */
+ hsize_t min; /* temp min value to avoid macro nesting */
H5HL_prfx_t *ret_value; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -276,7 +277,8 @@ H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, NULL, "unable to determine file size")
/* Compute the size of the speculative local heap prefix buffer */
- H5_ASSIGN_OVERFLOW(spec_read_size, MIN(eoa - addr, H5HL_SPEC_READ_SIZE), /* From: */ hsize_t, /* To: */ size_t);
+ min = MIN(eoa - addr, H5HL_SPEC_READ_SIZE);
+ H5_CHECKED_ASSIGN(spec_read_size, size_t, min, hsize_t);
HDassert(spec_read_size >= udata->sizeof_prfx);
/* Attempt to speculatively read both local heap prefix and heap data */
@@ -298,11 +300,11 @@ H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Allocate space in memory for the heap */
if(NULL == (heap = H5HL_new(udata->sizeof_size, udata->sizeof_addr, udata->sizeof_prfx)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "can't allocate local heap structure")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "can't allocate local heap structure")
/* Allocate the heap prefix */
if(NULL == (prfx = H5HL_prfx_new(heap)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "can't allocate local heap prefix")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "can't allocate local heap prefix")
/* Store the prefix's address & length */
heap->prfx_addr = udata->prfx_addr;
@@ -313,8 +315,9 @@ H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Free list head */
H5F_DECODE_LENGTH_LEN(p, heap->free_block, udata->sizeof_size);
- if(heap->free_block != H5HL_FREE_NULL && heap->free_block >= heap->dblk_size)
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "bad heap free list")
+
+ if((heap->free_block != H5HL_FREE_NULL) && (heap->free_block >= heap->dblk_size))
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "bad heap free list")
/* Heap data address */
H5F_addr_decode_len(udata->sizeof_addr, &p, &(heap->dblk_addr));
@@ -347,7 +350,7 @@ H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
} /* end else */
/* Build free list */
- if(H5HL_fl_deserialize(heap) < 0)
+ if(H5HL__fl_deserialize(heap) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, NULL, "can't initialize free list")
} /* end if */
else
@@ -453,7 +456,7 @@ H5HL_prefix_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
} /* end if */
/* Serialize the free list into the heap data's image */
- H5HL_fl_serialize(heap);
+ H5HL__fl_serialize(heap);
/* Copy the heap data block into the cache image */
HDmemcpy(p, heap->dblk_image, heap->dblk_size);
@@ -632,7 +635,7 @@ H5HL_datablock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
FUNC_ENTER_NOAPI_NOINIT
- /* check arguments */
+ /* Check arguments */
HDassert(f);
HDassert(H5F_addr_defined(addr));
HDassert(udata);
@@ -642,7 +645,7 @@ H5HL_datablock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Allocate space in memory for the heap data block */
if(NULL == (dblk = H5HL_dblk_new(udata->heap)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "memory allocation failed")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "memory allocation failed")
/* Check for heap still retaining image */
if(NULL == udata->heap->dblk_image) {
@@ -655,7 +658,7 @@ H5HL_datablock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "unable to read local heap data block")
/* Build free list */
- if(H5HL_fl_deserialize(udata->heap) < 0)
+ if(H5HL__fl_deserialize(udata->heap) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, NULL, "can't initialize free list")
} /* end if */
@@ -669,7 +672,7 @@ done:
/* Release the [possibly partially initialized] local heap on errors */
if(!ret_value && dblk)
if(H5HL_dblk_dest(dblk) < 0)
- HDONE_ERROR(H5E_HEAP, H5E_CANTRELEASE, NULL, "unable to destroy local heap data block")
+ HDONE_ERROR(H5E_HEAP, H5E_CANTRELEASE, NULL, "unable to destroy local heap data block")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5HL_datablock_load() */
@@ -712,7 +715,7 @@ H5HL_datablock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
heap->free_block = heap->freelist ? heap->freelist->offset : H5HL_FREE_NULL;
/* Serialize the free list into the heap data's image */
- H5HL_fl_serialize(heap);
+ H5HL__fl_serialize(heap);
/* Write the data block to the file */
if(H5F_block_write(f, H5FD_MEM_LHEAP, heap->dblk_addr, heap->dblk_size, dxpl_id, heap->dblk_image) < 0)
@@ -752,7 +755,7 @@ H5HL_datablock_dest(H5F_t *f, void *_thing)
FUNC_ENTER_NOAPI_NOINIT
- /* check arguments */
+ /* Check arguments */
HDassert(dblk);
HDassert(dblk->heap);
HDassert(!dblk->heap->single_cache_obj);
@@ -802,7 +805,7 @@ H5HL_datablock_clear(H5F_t *f, void *_thing, hbool_t destroy)
FUNC_ENTER_NOAPI_NOINIT
- /* check arguments */
+ /* Check arguments */
HDassert(dblk);
/* Mark local heap data block as clean */
diff --git a/src/H5HLpkg.h b/src/H5HLpkg.h
index bf9be2c..880cc05 100644
--- a/src/H5HLpkg.h
+++ b/src/H5HLpkg.h
@@ -114,9 +114,9 @@ struct H5HL_dblk_t {
/* Struct for heap prefix */
struct H5HL_prfx_t {
- H5AC_info_t cache_info; /* Information for H5AC cache functions, _must_ be */
- /* first field in structure */
- H5HL_t *heap; /* Pointer to heap for prefix */
+ H5AC_info_t cache_info; /* Information for H5AC cache functions, */
+ /* _must_ be first field in structure */
+ H5HL_t *heap; /* Pointer to heap for prefix */
};
/* Callback information for loading local heap prefix from disk */
diff --git a/src/H5I.c b/src/H5I.c
index 54b7ecc..528be81 100644
--- a/src/H5I.c
+++ b/src/H5I.c
@@ -433,7 +433,7 @@ H5Inmembers(H5I_type_t type, hsize_t *num_members)
if((members = H5I_nmembers(type)) < 0)
HGOTO_ERROR(H5E_ATOM, H5E_CANTCOUNT, FAIL, "can't compute number of members")
- H5_ASSIGN_OVERFLOW(*num_members, members, int64_t, hsize_t);
+ H5_CHECKED_ASSIGN(*num_members, hsize_t, members, int64_t);
} /* end if */
done:
@@ -460,7 +460,7 @@ int64_t
H5I_nmembers(H5I_type_t type)
{
H5I_id_type_t *type_ptr = NULL;
- int ret_value;
+ int64_t ret_value;
FUNC_ENTER_NOAPI(FAIL)
@@ -470,7 +470,7 @@ H5I_nmembers(H5I_type_t type)
HGOTO_DONE(0);
/* Set return value */
- H5_ASSIGN_OVERFLOW(ret_value, type_ptr->id_count, uint64_t, int64_t);
+ H5_CHECKED_ASSIGN(ret_value, int64_t, type_ptr->id_count, uint64_t);
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -593,15 +593,9 @@ H5I_clear_type(H5I_type_t type, hbool_t force, hbool_t app_ref)
/* Check if we should delete this node or not */
if(delete_node) {
- /* Decrement the number of IDs in the type */
- (type_ptr->id_count)--;
-
- /* Remove the node from the list */
- if(NULL == H5SL_remove(type_ptr->ids, &cur->id))
- HGOTO_ERROR(H5E_ATOM, H5E_CANTDELETE, FAIL, "can't remove ID node from skip list")
-
- /* Free the node */
- cur = H5FL_FREE(H5I_id_info_t, cur);
+ /* Remove the node from the type */
+ if(NULL == H5I__remove_common(type_ptr, cur->id))
+ HGOTO_ERROR(H5E_ATOM, H5E_CANTDELETE, FAIL, "can't remove ID node")
} /* end if */
} /* end for */
@@ -1208,8 +1202,6 @@ done:
int
H5I_dec_ref(hid_t id)
{
- H5I_type_t type; /*type the object is in*/
- H5I_id_type_t *type_ptr; /*ptr to the type */
H5I_id_info_t *id_ptr; /*ptr to the new ID */
int ret_value; /* Return value */
@@ -1218,17 +1210,9 @@ H5I_dec_ref(hid_t id)
/* Sanity check */
HDassert(id >= 0);
- /* Check arguments */
- type = H5I_TYPE(id);
- if(type <= H5I_BADID || type >= H5I_next_type)
- HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "invalid type number")
- type_ptr = H5I_id_type_list_g[type];
- if(NULL == type_ptr || type_ptr->init_count <= 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "invalid type number")
-
/* General lookup of the ID */
- if(NULL == (id_ptr = (H5I_id_info_t *)H5SL_search(type_ptr->ids, &id)))
- HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't locate ID")
+ if(NULL == (id_ptr = H5I__find_id(id)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't locate ID")
/*
* If this is the last reference to the object then invoke the type's
@@ -1246,6 +1230,11 @@ H5I_dec_ref(hid_t id)
* file. We have to close the dataset anyway. (SLU - 2010/9/7)
*/
if(1 == id_ptr->count) {
+ H5I_id_type_t *type_ptr; /*ptr to the type */
+
+ /* Get the ID's type */
+ type_ptr = H5I_id_type_list_g[H5I_TYPE(id)];
+
/* (Casting away const OK -QAK) */
if(!type_ptr->cls->free_func || (type_ptr->cls->free_func)((void *)id_ptr->obj_ptr) >= 0) {
/* Remove the node from the type */
@@ -1409,8 +1398,6 @@ done:
int
H5I_inc_ref(hid_t id, hbool_t app_ref)
{
- H5I_type_t type; /*type the object is in*/
- H5I_id_type_t *type_ptr; /*ptr to the type */
H5I_id_info_t *id_ptr; /*ptr to the ID */
int ret_value; /* Return value */
@@ -1419,16 +1406,8 @@ H5I_inc_ref(hid_t id, hbool_t app_ref)
/* Sanity check */
HDassert(id >= 0);
- /* Check arguments */
- type = H5I_TYPE(id);
- if(type <= H5I_BADID || type >= H5I_next_type)
- HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "invalid type number")
- type_ptr = H5I_id_type_list_g[type];
- if(!type_ptr || type_ptr->init_count <= 0)
- HGOTO_ERROR(H5E_ATOM, H5E_BADGROUP, FAIL, "invalid type")
-
/* General lookup of the ID */
- if(NULL == (id_ptr = (H5I_id_info_t *)H5SL_search(type_ptr->ids, &id)))
+ if(NULL == (id_ptr = H5I__find_id(id)))
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't locate ID")
/* Adjust reference counts */
@@ -1494,8 +1473,6 @@ done:
int
H5I_get_ref(hid_t id, hbool_t app_ref)
{
- H5I_type_t type; /*type the object is in*/
- H5I_id_type_t *type_ptr; /*ptr to the type */
H5I_id_info_t *id_ptr; /*ptr to the ID */
int ret_value; /* Return value */
@@ -1504,16 +1481,8 @@ H5I_get_ref(hid_t id, hbool_t app_ref)
/* Sanity check */
HDassert(id >= 0);
- /* Check arguments */
- type = H5I_TYPE(id);
- if(type <= H5I_BADID || type >= H5I_next_type)
- HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "invalid type number")
- type_ptr = H5I_id_type_list_g[type];
- if(!type_ptr || type_ptr->init_count <= 0)
- HGOTO_ERROR(H5E_ATOM, H5E_BADGROUP, FAIL, "invalid type")
-
/* General lookup of the ID */
- if(NULL == (id_ptr = (H5I_id_info_t *)H5SL_search(type_ptr->ids, &id)))
+ if(NULL == (id_ptr = H5I__find_id(id)))
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't locate ID")
/* Set return value */
@@ -2016,20 +1985,20 @@ done:
static H5I_id_info_t *
H5I__find_id(hid_t id)
{
- H5I_id_type_t *type_ptr; /*ptr to the type */
H5I_type_t type; /*ID's type */
+ H5I_id_type_t *type_ptr; /*ptr to the type */
H5I_id_info_t *ret_value; /*return value */
FUNC_ENTER_STATIC_NOERR
/* Check arguments */
type = H5I_TYPE(id);
- if (type <= H5I_BADID || type >= H5I_next_type)
- HGOTO_DONE(NULL);
+ if(type <= H5I_BADID || type >= H5I_next_type)
+ HGOTO_DONE(NULL)
type_ptr = H5I_id_type_list_g[type];
- if (!type_ptr || type_ptr->init_count <= 0)
- HGOTO_DONE(NULL);
+ if(!type_ptr || type_ptr->init_count <= 0)
+ HGOTO_DONE(NULL)
/* Locate the ID node for the ID */
ret_value = (H5I_id_info_t *)H5SL_search(type_ptr->ids, &id);
diff --git a/src/H5MF.c b/src/H5MF.c
index 29e9ece..eecd724 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -1383,7 +1383,7 @@ H5MF_get_free_sections(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, size_t nsects,
/* Query how many sections of this type */
if(H5FS_sect_stats(f->shared->fs_man[ty], NULL, &hnums) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query free space stats")
- H5_ASSIGN_OVERFLOW(nums, hnums, hsize_t, size_t);
+ H5_CHECKED_ASSIGN(nums, size_t, hnums, hsize_t);
/* Increment total # of sections */
total_sects += nums;
diff --git a/src/H5O.c b/src/H5O.c
index 1fd7225..6745716 100644
--- a/src/H5O.c
+++ b/src/H5O.c
@@ -2716,8 +2716,8 @@ H5O_get_hdr_info_real(const H5O_t *oh, H5O_hdr_info_t *hdr)
hdr->version = oh->version;
/* Set the number of messages & chunks */
- H5_ASSIGN_OVERFLOW(hdr->nmesgs, oh->nmesgs, size_t, unsigned);
- H5_ASSIGN_OVERFLOW(hdr->nchunks, oh->nchunks, size_t, unsigned);
+ H5_CHECKED_ASSIGN(hdr->nmesgs, unsigned, oh->nmesgs, size_t);
+ H5_CHECKED_ASSIGN(hdr->nchunks, unsigned, oh->nchunks, size_t);
/* Set the status flags */
hdr->flags = oh->flags;
diff --git a/src/H5Oattr.c b/src/H5Oattr.c
index 2269bd3..9710f5d 100644
--- a/src/H5Oattr.c
+++ b/src/H5Oattr.c
@@ -218,7 +218,7 @@ H5O_attr_decode(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, unsigned UNUSED mesg_fl
p += attr->shared->ds_size;
/* Compute the size of the data */
- H5_ASSIGN_OVERFLOW(attr->shared->data_size, H5S_GET_EXTENT_NPOINTS(attr->shared->ds) * H5T_get_size(attr->shared->dt), hsize_t, size_t);
+ H5_CHECKED_ASSIGN(attr->shared->data_size, size_t, H5S_GET_EXTENT_NPOINTS(attr->shared->ds) * H5T_get_size(attr->shared->dt), hsize_t);
/* Go get the data */
if(attr->shared->data_size) {
diff --git a/src/H5Ocache.c b/src/H5Ocache.c
index b3dea03..3ccdf11 100644
--- a/src/H5Ocache.c
+++ b/src/H5Ocache.c
@@ -81,15 +81,15 @@ static herr_t H5O_cache_chk_clear(H5F_t *f, H5O_chunk_proxy_t *chk_proxy, hbool_
static herr_t H5O_cache_chk_size(const H5F_t *f, const H5O_chunk_proxy_t *chk_proxy, size_t *size_ptr);
/* Chunk proxy routines */
-static herr_t H5O_chunk_proxy_dest(H5O_chunk_proxy_t *chunk_proxy);
+static herr_t H5O__chunk_proxy_dest(H5O_chunk_proxy_t *chunk_proxy);
/* Chunk routines */
-static herr_t H5O_chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len,
+static herr_t H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len,
const uint8_t *image, H5O_common_cache_ud_t *udata, hbool_t *dirty);
-static herr_t H5O_chunk_serialize(const H5F_t *f, H5O_t *oh, unsigned chunkno);
+static herr_t H5O__chunk_serialize(const H5F_t *f, H5O_t *oh, unsigned chunkno);
/* Misc. routines */
-static herr_t H5O_add_cont_msg(H5O_cont_msgs_t *cont_msg_info,
+static herr_t H5O__add_cont_msg(H5O_cont_msgs_t *cont_msg_info,
const H5O_cont_t *cont);
@@ -184,7 +184,7 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, NULL, "unable to determine file size")
/* Compute the size of the speculative object header buffer */
- H5_ASSIGN_OVERFLOW(spec_read_size, MIN(eoa - addr, H5O_SPEC_READ_SIZE), /* From: */ hsize_t, /* To: */ size_t);
+ H5_CHECKED_ASSIGN(spec_read_size, size_t, MIN(eoa - addr, H5O_SPEC_READ_SIZE), hsize_t);
/* Attempt to speculatively read both object header prefix and first chunk */
if(H5F_block_read(f, H5FD_MEM_OHDR, addr, spec_read_size, dxpl_id, read_buf) < 0)
@@ -193,7 +193,7 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Allocate space for the object header data structure */
if(NULL == (oh = H5FL_CALLOC(H5O_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* File-specific, non-stored information */
oh->sizeof_size = H5F_SIZEOF_SIZE(udata->common.f);
@@ -333,7 +333,7 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
buf = read_buf;
/* Parse the first chunk */
- if(H5O_chunk_deserialize(oh, udata->common.addr, oh->chunk0_size, buf, &(udata->common), &oh->cache_info.is_dirty) < 0)
+ if(H5O__chunk_deserialize(oh, udata->common.addr, oh->chunk0_size, buf, &(udata->common), &oh->cache_info.is_dirty) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't deserialize first object header chunk")
/* Note that we've loaded the object header from the file */
@@ -350,7 +350,7 @@ done:
/* Release the [possibly partially initialized] object header on errors */
if(!ret_value && oh)
if(H5O_free(oh) < 0)
- HDONE_ERROR(H5E_OHDR, H5E_CANTRELEASE, NULL, "unable to destroy object header data")
+ HDONE_ERROR(H5E_OHDR, H5E_CANTRELEASE, NULL, "unable to destroy object header data")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5O_load() */
@@ -480,7 +480,7 @@ H5O_assert(oh);
HDassert((size_t)(p - oh->chunk[0].image) == (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh)));
/* Serialize messages for this chunk */
- if(H5O_chunk_serialize(f, oh, (unsigned)0) < 0)
+ if(H5O__chunk_serialize(f, oh, (unsigned)0) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "unable to serialize first object header chunk")
/* Write the chunk out */
@@ -522,7 +522,7 @@ H5O_dest(H5F_t *f, H5O_t *oh)
FUNC_ENTER_NOAPI_NOINIT
- /* check args */
+ /* Check arguments */
HDassert(oh);
HDassert(oh->rc == 0);
@@ -585,7 +585,7 @@ H5O_clear(H5F_t *f, H5O_t *oh, hbool_t destroy)
FUNC_ENTER_NOAPI_NOINIT
- /* check args */
+ /* Check arguments */
HDassert(oh);
#ifdef H5_HAVE_PARALLEL
@@ -603,16 +603,13 @@ H5O_clear(H5F_t *f, H5O_t *oh, hbool_t destroy)
for ( i = 0; i < oh->nchunks; i++ ) {
- if ( H5O_chunk_serialize(f, oh, i) < 0 ) {
-
- HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL,
- "unable to serialize object header chunk")
- }
+ if(H5O__chunk_serialize(f, oh, i) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "unable to serialize object header chunk")
}
}
#endif /* H5_HAVE_PARALLEL */
- /* Mark messages as clean */
+ /* Mark messages stored with the object header (i.e. messages in chunk 0) as clean */
for(u = 0; u < oh->nmesgs; u++)
oh->mesg[u].dirty = FALSE;
@@ -722,7 +719,7 @@ H5O_cache_chk_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HDassert(udata->common.cont_msg_info);
/* Parse the chunk */
- if(H5O_chunk_deserialize(udata->oh, udata->common.addr, udata->size, buf, &(udata->common), &chk_proxy->cache_info.is_dirty) < 0)
+ if(H5O__chunk_deserialize(udata->oh, udata->common.addr, udata->size, buf, &(udata->common), &chk_proxy->cache_info.is_dirty) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't deserialize object header chunk")
/* Set the fields for the chunk proxy */
@@ -737,8 +734,8 @@ H5O_cache_chk_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
chk_proxy->oh = udata->oh;
chk_proxy->chunkno = udata->chunkno;
- /* Sanity check that the chunk representation we have in memory is the same
- * as the one being brought in from disk.
+ /* Sanity check that the chunk representation we have in memory is
+ * the same as the one being brought in from disk.
*/
HDassert(0 == HDmemcmp(buf, chk_proxy->oh->chunk[chk_proxy->chunkno].image, chk_proxy->oh->chunk[chk_proxy->chunkno].size));
} /* end else */
@@ -757,7 +754,7 @@ done:
/* Release the [possibly partially initialized] object header on errors */
if(!ret_value && chk_proxy)
- if(H5O_chunk_proxy_dest(chk_proxy) < 0)
+ if(H5O__chunk_proxy_dest(chk_proxy) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTRELEASE, NULL, "unable to destroy object header chunk proxy")
FUNC_LEAVE_NOAPI(ret_value)
@@ -788,7 +785,7 @@ H5O_cache_chk_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
/* flush */
if(chk_proxy->cache_info.is_dirty) {
/* Serialize messages for this chunk */
- if(H5O_chunk_serialize(f, chk_proxy->oh, chk_proxy->chunkno) < 0)
+ if(H5O__chunk_serialize(f, chk_proxy->oh, chk_proxy->chunkno) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "unable to serialize object header continuation chunk")
/* Write the chunk out */
@@ -850,7 +847,7 @@ H5O_cache_chk_dest(H5F_t *f, H5O_chunk_proxy_t *chk_proxy)
} /* end if */
/* Destroy object header chunk proxy */
- if(H5O_chunk_proxy_dest(chk_proxy) < 0)
+ if(H5O__chunk_proxy_dest(chk_proxy) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "unable to destroy object header chunk proxy")
done:
@@ -903,18 +900,13 @@ H5O_cache_chk_clear(H5F_t *f, H5O_chunk_proxy_t *chk_proxy, hbool_t destroy)
FUNC_ENTER_NOAPI_NOINIT
- /* check args */
+ /* Check arguments */
HDassert(chk_proxy);
#ifdef H5_HAVE_PARALLEL
- if ( ( chk_proxy->oh->cache_info.is_dirty ) && ( ! destroy ) ) {
-
- if ( H5O_chunk_serialize(f, chk_proxy->oh, chk_proxy->chunkno) < 0 ) {
-
- HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL,
- "unable to serialize object header chunk")
- }
- }
+ if((chk_proxy->oh->cache_info.is_dirty) && (!destroy))
+ if(H5O__chunk_serialize(f, chk_proxy->oh, chk_proxy->chunkno) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "unable to serialize object header chunk")
#endif /* H5_HAVE_PARALLEL */
/* Mark messages in chunk as clean */
@@ -966,7 +958,7 @@ H5O_cache_chk_size(const H5F_t UNUSED *f, const H5O_chunk_proxy_t *chk_proxy, si
/*-------------------------------------------------------------------------
- * Function: H5O_add_cont_msg
+ * Function: H5O__add_cont_msg
*
* Purpose: Add information from a continuation message to the list of
* continuation messages in the object header
@@ -981,12 +973,12 @@ H5O_cache_chk_size(const H5F_t UNUSED *f, const H5O_chunk_proxy_t *chk_proxy, si
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_add_cont_msg(H5O_cont_msgs_t *cont_msg_info, const H5O_cont_t *cont)
+H5O__add_cont_msg(H5O_cont_msgs_t *cont_msg_info, const H5O_cont_t *cont)
{
size_t contno; /* Continuation message index */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
HDassert(cont_msg_info);
@@ -1011,11 +1003,11 @@ H5O_add_cont_msg(H5O_cont_msgs_t *cont_msg_info, const H5O_cont_t *cont)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5O_add_cont_msg() */
+} /* H5O__add_cont_msg() */
/*-------------------------------------------------------------------------
- * Function: H5O_chunk_deserialize
+ * Function: H5O__chunk_deserialize
*
* Purpose: Deserialize a chunk for an object header
*
@@ -1029,7 +1021,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image,
+H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image,
H5O_common_cache_ud_t *udata, hbool_t *dirty)
{
const uint8_t *p; /* Pointer into buffer to decode */
@@ -1042,7 +1034,7 @@ H5O_chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image,
#endif /* NDEBUG */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
HDassert(oh);
@@ -1063,7 +1055,7 @@ H5O_chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image,
} /* end if */
/* Init the chunk data info */
- chunkno = oh->nchunks++;
+ chunkno = (unsigned)oh->nchunks++;
oh->chunk[chunkno].gap = 0;
if(chunkno == 0) {
/* First chunk's 'image' includes room for the object header prefix */
@@ -1294,7 +1286,7 @@ H5O_chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image,
oh->mesg[curmesg].native = cont;
/* Add to continuation messages left to interpret */
- if(H5O_add_cont_msg(udata->cont_msg_info, cont) < 0)
+ if(H5O__add_cont_msg(udata->cont_msg_info, cont) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't add continuation message")
/* Mark the message & chunk as dirty if the message was changed by decoding */
@@ -1350,11 +1342,11 @@ H5O_chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image,
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5O_chunk_deserialize() */
+} /* H5O__chunk_deserialize() */
/*-------------------------------------------------------------------------
- * Function: H5O_chunk_serialize
+ * Function: H5O__chunk_serialize
*
* Purpose: Serialize a chunk for an object header
*
@@ -1368,13 +1360,13 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_chunk_serialize(const H5F_t *f, H5O_t *oh, unsigned chunkno)
+H5O__chunk_serialize(const H5F_t *f, H5O_t *oh, unsigned chunkno)
{
H5O_mesg_t *curr_msg; /* Pointer to current message being operated on */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
HDassert(f);
@@ -1415,11 +1407,11 @@ H5O_chunk_serialize(const H5F_t *f, H5O_t *oh, unsigned chunkno)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5O_chunk_serialize() */
+} /* H5O__chunk_serialize() */
/*-------------------------------------------------------------------------
- * Function: H5O_chunk_proxy_dest
+ * Function: H5O__chunk_proxy_dest
*
* Purpose: Destroy a chunk proxy object
*
@@ -1433,11 +1425,11 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_chunk_proxy_dest(H5O_chunk_proxy_t *chk_proxy)
+H5O__chunk_proxy_dest(H5O_chunk_proxy_t *chk_proxy)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* Check arguments */
HDassert(chk_proxy);
diff --git a/src/H5Ofill.c b/src/H5Ofill.c
index a0fc42e..33022fc 100644
--- a/src/H5Ofill.c
+++ b/src/H5Ofill.c
@@ -977,7 +977,7 @@ H5O_fill_convert(H5O_fill_t *fill, H5T_t *dset_type, hbool_t *fill_changed, hid_
} /* end if */
H5T_close(fill->type);
fill->type = NULL;
- H5_ASSIGN_OVERFLOW(fill->size, H5T_get_size(dset_type), size_t, ssize_t);
+ H5_CHECKED_ASSIGN(fill->size, ssize_t, H5T_get_size(dset_type), size_t);
/* Note that the fill value info has changed */
*fill_changed = TRUE;
diff --git a/src/H5Omessage.c b/src/H5Omessage.c
index 1369583..d361194 100644
--- a/src/H5Omessage.c
+++ b/src/H5Omessage.c
@@ -73,10 +73,10 @@ typedef struct {
/* Local Prototypes */
/********************/
-static herr_t H5O_msg_reset_real(const H5O_msg_class_t *type, void *native);
-static herr_t H5O_msg_remove_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/,
+static herr_t H5O__msg_reset_real(const H5O_msg_class_t *type, void *native);
+static herr_t H5O__msg_remove_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/,
unsigned sequence, unsigned *oh_modified, void *_udata/*in,out*/);
-static herr_t H5O_copy_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t idx,
+static herr_t H5O__copy_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t idx,
const H5O_msg_class_t *type, const void *mesg, unsigned mesg_flags,
unsigned update_flags);
@@ -224,7 +224,7 @@ H5O_msg_append_real(H5F_t *f, hid_t dxpl_id, H5O_t *oh, const H5O_msg_class_t *t
HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, FAIL, "unable to create new message")
/* Copy the information for the message */
- if(H5O_copy_mesg(f, dxpl_id, oh, idx, type, mesg, mesg_flags, update_flags) < 0)
+ if(H5O__copy_mesg(f, dxpl_id, oh, idx, type, mesg, mesg_flags, update_flags) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTCOPY, FAIL, "unable to write message")
#ifdef H5O_DEBUG
H5O_assert(oh);
@@ -426,7 +426,7 @@ H5O_msg_write_real(H5F_t *f, hid_t dxpl_id, H5O_t *oh, const H5O_msg_class_t *ty
} /* end if */
/* Copy the information for the message */
- if(H5O_copy_mesg(f, dxpl_id, oh, idx, type, mesg, mesg_flags, update_flags) < 0)
+ if(H5O__copy_mesg(f, dxpl_id, oh, idx, type, mesg, mesg_flags, update_flags) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, FAIL, "unable to write message")
#ifdef H5O_DEBUG
H5O_assert(oh);
@@ -586,7 +586,7 @@ H5O_msg_reset(unsigned type_id, void *native)
HDassert(type);
/* Call the "real" reset routine */
- if(H5O_msg_reset_real(type, native) < 0)
+ if(H5O__msg_reset_real(type, native) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTRESET, FAIL, "unable to reset object header")
done:
@@ -595,7 +595,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5O_msg_reset_real
+ * Function: H5O__msg_reset_real
*
* Purpose: Some message data structures have internal fields that
* need to be freed. This function does that if appropriate
@@ -610,11 +610,11 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_msg_reset_real(const H5O_msg_class_t *type, void *native)
+H5O__msg_reset_real(const H5O_msg_class_t *type, void *native)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* check args */
HDassert(type);
@@ -630,7 +630,7 @@ H5O_msg_reset_real(const H5O_msg_class_t *type, void *native)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5O_msg_reset_real() */
+} /* end H5O__msg_reset_real() */
/*-------------------------------------------------------------------------
@@ -719,7 +719,7 @@ H5O_msg_free_real(const H5O_msg_class_t *type, void *msg_native)
HDassert(type);
if(msg_native) {
- H5O_msg_reset_real(type, msg_native);
+ H5O__msg_reset_real(type, msg_native);
if(NULL != (type->free))
(type->free)(msg_native);
else
@@ -807,7 +807,7 @@ H5O_msg_count(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
/* Count the messages of the correct type */
msg_count = H5O_msg_count_real(oh, type);
- H5_ASSIGN_OVERFLOW(ret_value, msg_count, unsigned, int);
+ H5_CHECKED_ASSIGN(ret_value, int, msg_count, unsigned);
done:
if(oh && H5O_unprotect(loc, dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
@@ -1049,7 +1049,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5O_msg_remove_cb
+ * Function: H5O__msg_remove_cb
*
* Purpose: Object header iterator callback routine to remove messages
* of a particular type that match a particular sequence number,
@@ -1064,14 +1064,14 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_msg_remove_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/, unsigned sequence,
+H5O__msg_remove_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/, unsigned sequence,
unsigned *oh_modified, void *_udata/*in,out*/)
{
H5O_iter_rm_t *udata = (H5O_iter_rm_t *)_udata; /* Operator user data */
htri_t try_remove = FALSE; /* Whether to try removing a message */
herr_t ret_value = H5_ITER_CONT; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* check args */
HDassert(mesg);
@@ -1109,7 +1109,7 @@ H5O_msg_remove_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/, unsigned sequence,
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5O_msg_remove_cb() */
+} /* end H5O__msg_remove_cb() */
/*-------------------------------------------------------------------------
@@ -1160,7 +1160,7 @@ H5O_msg_remove_real(H5F_t *f, H5O_t *oh, const H5O_msg_class_t *type,
/* Iterate over the messages, deleting appropriate one(s) */
op.op_type = H5O_MESG_OP_LIB;
- op.u.lib_op = H5O_msg_remove_cb;
+ op.u.lib_op = H5O__msg_remove_cb;
if(H5O_msg_iterate_real(f, oh, type, &op, &udata, dxpl_id) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_NOTFOUND, FAIL, "error iterating over messages")
@@ -1950,7 +1950,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5O_copy_mesg
+ * Function: H5O__copy_mesg
*
* Purpose: Make a copy of the native object for an object header's
* native message info
@@ -1963,7 +1963,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_copy_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t idx,
+H5O__copy_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t idx,
const H5O_msg_class_t *type, const void *mesg, unsigned mesg_flags,
unsigned update_flags)
{
@@ -1972,7 +1972,7 @@ H5O_copy_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t idx,
hbool_t chk_dirtied = FALSE; /* Flag for unprotecting chunk */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
/* check args */
HDassert(f);
@@ -1986,7 +1986,7 @@ H5O_copy_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t idx,
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header chunk")
/* Reset existing native information for the header's message */
- H5O_msg_reset_real(type, idx_msg->native);
+ H5O__msg_reset_real(type, idx_msg->native);
/* Copy the native object for the message */
if(NULL == (idx_msg->native = (type->copy)(mesg, idx_msg->native)))
@@ -2015,7 +2015,7 @@ done:
HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to release object header chunk")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5O_copy_mesg() */
+} /* end H5O__copy_mesg() */
/*-------------------------------------------------------------------------
@@ -2300,7 +2300,7 @@ H5O_msg_get_chunkno(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
HGOTO_ERROR(H5E_OHDR, H5E_NOTFOUND, FAIL, "message type not found")
/* Set return value */
- H5_ASSIGN_OVERFLOW(ret_value, idx_msg->chunkno, unsigned, int);
+ H5_CHECKED_ASSIGN(ret_value, int, idx_msg->chunkno, unsigned);
done:
if(oh && H5O_unprotect(loc, dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index 09a5b9c..1059e8b 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -467,8 +467,8 @@ typedef struct H5O_layout_chunk_t {
uint32_t dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in elements */
uint32_t size; /* Size of chunk in bytes */
hsize_t nchunks; /* Number of chunks in dataset */
- hsize_t chunks[H5O_LAYOUT_NDIMS]; /* # of chunks in dataset dimensions */
- hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */
+ hsize_t chunks[H5O_LAYOUT_NDIMS]; /* # of chunks in each dataset dimension */
+ hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */
} H5O_layout_chunk_t;
typedef struct H5O_layout_t {
diff --git a/src/H5PL.c b/src/H5PL.c
index 619dd84..f4cd3e7 100644
--- a/src/H5PL.c
+++ b/src/H5PL.c
@@ -237,10 +237,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5PLget_loading_state(unsigned int* plugin_type)
+H5PLget_loading_state(unsigned int *plugin_type)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "*Iu", plugin_type);
if(plugin_type)
*plugin_type = H5PL_plugin_g;
diff --git a/src/H5Pencdec.c b/src/H5Pencdec.c
index 3285ae3..bd0a260 100644
--- a/src/H5Pencdec.c
+++ b/src/H5Pencdec.c
@@ -509,7 +509,7 @@ H5P__decode_size_t(const void **_pp, void *_value)
/* Decode the value */
UINT64DECODE_VAR(*pp, enc_value, enc_size);
- H5_ASSIGN_OVERFLOW(*value, enc_value, uint64_t, size_t);
+ H5_CHECKED_ASSIGN(*value, size_t, enc_value, uint64_t);
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5P__decode_size_t() */
@@ -550,7 +550,7 @@ H5P__decode_hsize_t(const void **_pp, void *_value)
/* Decode the value */
UINT64DECODE_VAR(*pp, enc_value, enc_size);
- H5_ASSIGN_OVERFLOW(*value, enc_value, uint64_t, hsize_t);
+ H5_CHECKED_ASSIGN(*value, hsize_t, enc_value, uint64_t);
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5P__decode_hsize_t() */
diff --git a/src/H5Pgcpl.c b/src/H5Pgcpl.c
index b7d5106..1e3a278 100644
--- a/src/H5Pgcpl.c
+++ b/src/H5Pgcpl.c
@@ -179,7 +179,7 @@ H5Pset_local_heap_size_hint(hid_t plist_id, size_t size_hint)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get group info")
/* Update field */
- H5_ASSIGN_OVERFLOW(ginfo.lheap_size_hint, size_hint, size_t, uint32_t);
+ H5_CHECKED_ASSIGN(ginfo.lheap_size_hint, uint32_t, size_hint, size_t);
/* Set value */
if(H5P_set(plist, H5G_CRT_GROUP_INFO_NAME, &ginfo) < 0)
diff --git a/src/H5Rpublic.h b/src/H5Rpublic.h
index 997309f..e990661 100644
--- a/src/H5Rpublic.h
+++ b/src/H5Rpublic.h
@@ -14,7 +14,7 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * This file contains public declarations for the H5S module.
+ * This file contains public declarations for the H5R module.
*/
#ifndef _H5Rpublic_H
#define _H5Rpublic_H
diff --git a/src/H5S.c b/src/H5S.c
index 045ab51..4a4ed8d 100644
--- a/src/H5S.c
+++ b/src/H5S.c
@@ -1564,7 +1564,7 @@ H5S_encode(H5S_t *obj, unsigned char *buf, size_t *nalloc)
/* Find out the size of buffer needed for selection */
if((sselect_size = H5S_SELECT_SERIAL_SIZE(f, obj)) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADSIZE, FAIL, "can't find dataspace selection size")
- H5_ASSIGN_OVERFLOW(select_size, sselect_size, hssize_t, size_t);
+ H5_CHECKED_ASSIGN(select_size, size_t, sselect_size, hssize_t);
/* Verify the size of buffer. If it's not big enough, simply return the
* right size without filling the buffer. */
@@ -2201,14 +2201,13 @@ H5S_extend(H5S_t *space, const hsize_t *size)
HDassert(size);
/* Check through all the dimensions to see if modifying the dataspace is allowed */
- for(u = 0; u < space->extent.rank; u++) {
- if(space->extent.size[u]<size[u]) {
- if(space->extent.max && H5S_UNLIMITED!=space->extent.max[u] &&
- space->extent.max[u]<size[u])
+ for(u = 0; u < space->extent.rank; u++)
+ if(space->extent.size[u] < size[u]) {
+ if(space->extent.max && H5S_UNLIMITED != space->extent.max[u] &&
+ space->extent.max[u] < size[u])
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dimension cannot be increased")
ret_value++;
} /* end if */
- } /* end for */
/* Update */
if(ret_value) {
diff --git a/src/H5SMcache.c b/src/H5SMcache.c
index 9955f39..2e69899 100644
--- a/src/H5SMcache.c
+++ b/src/H5SMcache.c
@@ -13,6 +13,17 @@
* access to either file, you may request a copy from help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5SMcache.c
+ * Nov 13 2006
+ * James Laird <jlaird@hdfgroup.org>
+ *
+ * Purpose: Implement shared message metadata cache methods.
+ *
+ *-------------------------------------------------------------------------
+ */
+
/****************/
/* Module Setup */
/****************/
@@ -68,6 +79,7 @@ static herr_t H5SM_list_size(const H5F_t *f, const H5SM_list_t UNUSED *list, siz
/*********************/
/* Package Variables */
/*********************/
+
/* H5SM inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_SOHM_TABLE[1] = {{
H5AC_SOHM_TABLE_ID,
@@ -136,7 +148,7 @@ H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void UNUSED *udata)
/* Allocate space for the master table in memory */
if(NULL == (table = H5FL_CALLOC(H5SM_master_table_t)))
- HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "memory allocation failed")
+ HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "memory allocation failed")
/* Read number of indexes and version from file superblock */
table->num_indexes = H5F_SOHM_NINDEXES(f);
@@ -172,7 +184,7 @@ H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void UNUSED *udata)
/* Allocate space for the index headers in memory*/
if(NULL == (table->indexes = (H5SM_index_header_t *)H5FL_ARR_MALLOC(H5SM_index_header_t, (size_t)table->num_indexes)))
- HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "memory allocation failed for SOHM indexes")
+ HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "memory allocation failed for SOHM indexes")
/* Read in the index headers */
for(x = 0; x < table->num_indexes; ++x) {
@@ -230,7 +242,7 @@ done:
HDONE_ERROR(H5E_SOHM, H5E_CLOSEERROR, NULL, "can't close wrapped buffer")
if(!ret_value && table)
if(H5SM_table_free(table) < 0)
- HDONE_ERROR(H5E_SOHM, H5E_CANTFREE, NULL, "unable to destroy sohm table")
+ HDONE_ERROR(H5E_SOHM, H5E_CANTFREE, NULL, "unable to destroy sohm table")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5SM_table_load() */
@@ -363,7 +375,7 @@ H5SM_table_dest(H5F_t UNUSED *f, H5SM_master_table_t* table)
FUNC_ENTER_NOAPI_NOINIT
- /* Sanity check */
+ /* Check arguments */
HDassert(table);
HDassert(table->indexes);
@@ -429,7 +441,7 @@ H5SM_table_size(const H5F_t UNUSED *f, const H5SM_master_table_t *table, size_t
{
FUNC_ENTER_NOAPI_NOINIT_NOERR
- /* check arguments */
+ /* Check arguments */
HDassert(f);
HDassert(table);
HDassert(size_ptr);
@@ -470,18 +482,17 @@ H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
FUNC_ENTER_NOAPI_NOINIT
- /* Sanity check */
+ /* Check arguments */
HDassert(udata->header);
/* Allocate space for the SOHM list data structure */
if(NULL == (list = H5FL_MALLOC(H5SM_list_t)))
- HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "memory allocation failed")
+ HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "memory allocation failed")
HDmemset(&list->cache_info, 0, sizeof(H5AC_info_t));
/* Allocate list in memory as an array*/
- if((list->messages = (H5SM_sohm_t *)H5FL_ARR_MALLOC(H5SM_sohm_t, udata->header->list_max)) == NULL)
- HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "file allocation failed for SOHM list")
-
+ if(NULL == (list->messages = (H5SM_sohm_t *)H5FL_ARR_MALLOC(H5SM_sohm_t, udata->header->list_max)))
+ HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "file allocation failed for SOHM list")
list->header = udata->header;
/* Wrap the local buffer for serialized list index info */
@@ -701,9 +712,7 @@ H5SM_list_clear(H5F_t *f, H5SM_list_t *list, hbool_t destroy)
FUNC_ENTER_NOAPI_NOINIT
- /*
- * Check arguments.
- */
+ /* Check arguments */
HDassert(list);
/* Reset the dirty flag. */
diff --git a/src/H5Shyper.c b/src/H5Shyper.c
index 4e82aed..a483c21 100644
--- a/src/H5Shyper.c
+++ b/src/H5Shyper.c
@@ -8032,7 +8032,7 @@ H5S_hyper_get_seq_list_gen(const H5S_t *space,H5S_sel_iter_t *iter,
/* Finish the span in the fastest changing dimension */
/* Compute the number of bytes to attempt in this span */
- H5_ASSIGN_OVERFLOW(span_size,((curr_span->high-abs_arr[fast_dim])+1)*elem_size,hsize_t,size_t);
+ H5_CHECKED_ASSIGN(span_size, size_t, ((curr_span->high-abs_arr[fast_dim])+1)*elem_size, hsize_t);
/* Check number of bytes against upper bounds allowed */
if(span_size>io_bytes_left)
@@ -8175,7 +8175,7 @@ H5S_hyper_get_seq_list_gen(const H5S_t *space,H5S_sel_iter_t *iter,
loc_off += curr_span->pstride;
/* Compute the number of elements to attempt in this span */
- H5_ASSIGN_OVERFLOW(span_size, curr_span->nelem, hsize_t, size_t);
+ H5_CHECKED_ASSIGN(span_size, size_t, curr_span->nelem, hsize_t);
/* Check number of elements against upper bounds allowed */
if(span_size >= io_bytes_left) {
@@ -8505,7 +8505,7 @@ H5S_hyper_get_seq_list_opt(const H5S_t *space, H5S_sel_iter_t *iter,
loc += offset[u] * slab[u];
/* Set the number of elements to write each time */
- H5_ASSIGN_OVERFLOW(actual_elem, tdiminfo[fast_dim].block, hsize_t, size_t);
+ H5_CHECKED_ASSIGN(actual_elem, size_t, tdiminfo[fast_dim].block, hsize_t);
/* Set the number of actual bytes */
actual_bytes = actual_elem * elem_size;
@@ -8514,7 +8514,7 @@ H5S_hyper_get_seq_list_opt(const H5S_t *space, H5S_sel_iter_t *iter,
fast_dim_start = tdiminfo[fast_dim].start;
fast_dim_stride = tdiminfo[fast_dim].stride;
fast_dim_block = tdiminfo[fast_dim].block;
- H5_ASSIGN_OVERFLOW(fast_dim_buf_off, slab[fast_dim] * fast_dim_stride, hsize_t, size_t);
+ H5_CHECKED_ASSIGN(fast_dim_buf_off, size_t, slab[fast_dim] * fast_dim_stride, hsize_t);
fast_dim_offset = (hsize_t)((hssize_t)fast_dim_start + sel_off[fast_dim]);
/* Compute the number of blocks which would fit into the buffer */
@@ -8535,7 +8535,7 @@ H5S_hyper_get_seq_list_opt(const H5S_t *space, H5S_sel_iter_t *iter,
/* Check if there is a partial row left (with full blocks) */
if(tmp_count[fast_dim] > 0) {
/* Get number of blocks in fastest dimension */
- H5_ASSIGN_OVERFLOW(fast_dim_count, tdiminfo[fast_dim].count - tmp_count[fast_dim], hsize_t, size_t);
+ H5_CHECKED_ASSIGN(fast_dim_count, size_t, tdiminfo[fast_dim].count - tmp_count[fast_dim], hsize_t);
/* Make certain this entire row will fit into buffer */
fast_dim_count = MIN(fast_dim_count, tot_blk_count);
@@ -8620,7 +8620,7 @@ H5S_hyper_get_seq_list_opt(const H5S_t *space, H5S_sel_iter_t *iter,
curr_rows = total_rows = (size_t)(tot_blk_count / tdiminfo[fast_dim].count);
/* Reset copy of number of blocks in fastest dimension */
- H5_ASSIGN_OVERFLOW(fast_dim_count, tdiminfo[fast_dim].count, hsize_t, size_t);
+ H5_CHECKED_ASSIGN(fast_dim_count, size_t, tdiminfo[fast_dim].count, hsize_t);
/* Read in data until an entire sequence can't be written out any longer */
while(curr_rows > 0) {
@@ -8925,7 +8925,7 @@ H5S_hyper_get_seq_list_single(const H5S_t *space, H5S_sel_iter_t *iter,
tot_blk_count = MIN(tot_blk_count, maxseq);
/* Set the number of elements to write each time */
- H5_ASSIGN_OVERFLOW(actual_elem, fast_dim_block, hsize_t, size_t);
+ H5_CHECKED_ASSIGN(actual_elem, size_t, fast_dim_block, hsize_t);
/* Check for blocks to operate on */
if(tot_blk_count > 0) {
@@ -9182,10 +9182,10 @@ H5S_hyper_get_seq_list(const H5S_t *space, unsigned UNUSED flags, H5S_sel_iter_t
/* Calculate the number of elements left in the sequence */
if(tdiminfo[fast_dim].count == 1) {
- H5_ASSIGN_OVERFLOW(leftover, tdiminfo[fast_dim].block - (iter->u.hyp.off[fast_dim] - tdiminfo[fast_dim].start), hsize_t, size_t);
+ H5_CHECKED_ASSIGN(leftover, size_t, tdiminfo[fast_dim].block - (iter->u.hyp.off[fast_dim] - tdiminfo[fast_dim].start), hsize_t);
} /* end if */
else {
- H5_ASSIGN_OVERFLOW(leftover, tdiminfo[fast_dim].block - ((iter->u.hyp.off[fast_dim] - tdiminfo[fast_dim].start) % tdiminfo[fast_dim].stride), hsize_t, size_t);
+ H5_CHECKED_ASSIGN(leftover, size_t, tdiminfo[fast_dim].block - ((iter->u.hyp.off[fast_dim] - tdiminfo[fast_dim].start) % tdiminfo[fast_dim].stride), hsize_t);
} /* end else */
/* Make certain that we don't write too many */
@@ -9204,7 +9204,7 @@ H5S_hyper_get_seq_list(const H5S_t *space, unsigned UNUSED flags, H5S_sel_iter_t
/* Add a new sequence */
off[0] = loc;
- H5_ASSIGN_OVERFLOW(len[0], actual_elem * elem_size, hsize_t, size_t);
+ H5_CHECKED_ASSIGN(len[0], size_t, actual_elem * elem_size, hsize_t);
/* Increment sequence array locations */
off++;
diff --git a/src/H5Smpio.c b/src/H5Smpio.c
index def7598..c2190b4 100644
--- a/src/H5Smpio.c
+++ b/src/H5Smpio.c
@@ -94,13 +94,13 @@ H5S_mpio_all_type(const H5S_t *space, size_t elmt_size,
/* Just treat the entire extent as a block of bytes */
if((snelmts = (hssize_t)H5S_GET_EXTENT_NPOINTS(space)) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection")
- H5_ASSIGN_OVERFLOW(nelmts, snelmts, hssize_t, hsize_t);
+ H5_CHECKED_ASSIGN(nelmts, hsize_t, snelmts, hssize_t);
total_bytes = (hsize_t)elmt_size * nelmts;
/* fill in the return values */
*new_type = MPI_BYTE;
- H5_ASSIGN_OVERFLOW(*count, total_bytes, hsize_t, int);
+ H5_CHECKED_ASSIGN(*count, int, total_bytes, hsize_t);
*is_derived_type = FALSE;
done:
@@ -388,7 +388,7 @@ H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute,
sel_iter_init = TRUE; /* Selection iteration info has been initialized */
/* Set the number of elements to iterate over */
- H5_ASSIGN_OVERFLOW(max_elem, num_points, hsize_t, size_t);
+ H5_CHECKED_ASSIGN(max_elem, size_t, num_points, hsize_t);
/* Loop, while elements left in selection */
u = 0;
diff --git a/src/H5Sselect.c b/src/H5Sselect.c
index b22ea64..c120769 100644
--- a/src/H5Sselect.c
+++ b/src/H5Sselect.c
@@ -1373,7 +1373,7 @@ H5S_select_iterate(void *buf, hid_t type_id, const H5S_t *space, H5D_operator_t
space_size[ndims] = elmt_size;
/* Compute the maximum number of bytes required */
- H5_ASSIGN_OVERFLOW(max_elem, nelmts, hssize_t, size_t);
+ H5_CHECKED_ASSIGN(max_elem, size_t, nelmts, hssize_t);
/* Loop, while elements left in selection */
while(max_elem > 0 && user_ret == 0) {
@@ -2132,7 +2132,7 @@ H5S_select_fill(const void *fill, size_t fill_size, const H5S_t *space, void *_b
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOUNT, FAIL, "can't get number of elements selected")
/* Compute the number of bytes to process */
- H5_ASSIGN_OVERFLOW(max_elem, nelmts, hssize_t, size_t);
+ H5_CHECKED_ASSIGN(max_elem, size_t, nelmts, hssize_t);
/* Loop, while elements left in selection */
while(max_elem > 0) {
diff --git a/src/H5T.c b/src/H5T.c
index 9320d28..2758395 100644
--- a/src/H5T.c
+++ b/src/H5T.c
@@ -1,3 +1,4 @@
+
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright by The HDF Group. *
* Copyright by the Board of Trustees of the University of Illinois. *
@@ -67,10 +68,6 @@
*
*/
-/* Define the code template for types which need no extra initialization for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_NONE_CORE { \
-}
-
/* Define the code template for bitfields for the "GUTS" in the H5T_INIT_TYPE macro */
#define H5T_INIT_TYPE_BITFIELD_CORE { \
dt->shared->type = H5T_BITFIELD; \
@@ -1986,7 +1983,7 @@ H5T_detect_class(const H5T_t *dt, H5T_class_t cls, hbool_t from_api)
case H5T_VLEN:
case H5T_ENUM:
HGOTO_DONE(H5T_detect_class(dt->shared->parent, cls, from_api));
-
+ break;
case H5T_NO_CLASS:
case H5T_INTEGER:
case H5T_FLOAT:
@@ -3211,7 +3208,7 @@ H5T_copy(H5T_t *old_dt, H5T_copy_t method)
switch(new_dt->shared->type) {
case H5T_COMPOUND:
{
- int accum_change = 0; /* Amount of change in the offset of the fields */
+ ssize_t accum_change = 0; /* Amount of change in the offset of the fields */
/*
* Copy all member fields to new type, then overwrite the
@@ -3239,13 +3236,17 @@ H5T_copy(H5T_t *old_dt, H5T_copy_t method)
new_dt->shared->u.compnd.memb[i].type = tmp;
HDassert(tmp != NULL);
+ /* Range check against compound member's offset */
+ if ((accum_change < 0) && ((ssize_t) new_dt->shared->u.compnd.memb[i].offset < accum_change))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, NULL, "invalid field size in datatype");
+
/* Apply the accumulated size change to the offset of the field */
- new_dt->shared->u.compnd.memb[i].offset += accum_change;
+ new_dt->shared->u.compnd.memb[i].offset += (size_t) accum_change;
if(old_dt->shared->u.compnd.sorted != H5T_SORT_VALUE) {
for(old_match = -1, j = 0; j < old_dt->shared->u.compnd.nmembs; j++) {
if(!HDstrcmp(new_dt->shared->u.compnd.memb[i].name, old_dt->shared->u.compnd.memb[j].name)) {
- old_match = j;
+ old_match = (int) j;
break;
} /* end if */
} /* end for */
@@ -3255,20 +3256,23 @@ H5T_copy(H5T_t *old_dt, H5T_copy_t method)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, NULL, "fields in datatype corrupted");
} /* end if */
else
- old_match = i;
+ old_match = (int) i;
/* If the field changed size, add that change to the accumulated size change */
if(new_dt->shared->u.compnd.memb[i].type->shared->size != old_dt->shared->u.compnd.memb[old_match].type->shared->size) {
/* Adjust the size of the member */
new_dt->shared->u.compnd.memb[i].size = (old_dt->shared->u.compnd.memb[old_match].size*tmp->shared->size)/old_dt->shared->u.compnd.memb[old_match].type->shared->size;
- accum_change += (new_dt->shared->u.compnd.memb[i].type->shared->size - old_dt->shared->u.compnd.memb[old_match].type->shared->size);
+ accum_change += (ssize_t) (new_dt->shared->u.compnd.memb[i].type->shared->size - old_dt->shared->u.compnd.memb[old_match].type->shared->size);
} /* end if */
} /* end for */
- /* Apply the accumulated size change to the size of the compound struct */
- new_dt->shared->size += accum_change;
+ /* Range check against datatype size */
+ if ((accum_change < 0) && ((ssize_t) new_dt->shared->size < accum_change))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, NULL, "invalid field size in datatype");
+ /* Apply the accumulated size change to the size of the compound struct */
+ new_dt->shared->size += (size_t) accum_change;
}
break;
@@ -3313,6 +3317,13 @@ H5T_copy(H5T_t *old_dt, H5T_copy_t method)
new_dt->shared->size=new_dt->shared->u.array.nelem*new_dt->shared->parent->shared->size;
break;
+ case H5T_NO_CLASS:
+ case H5T_INTEGER:
+ case H5T_FLOAT:
+ case H5T_TIME:
+ case H5T_STRING:
+ case H5T_BITFIELD:
+ case H5T_NCLASSES:
default:
break;
} /* end switch */
@@ -3530,6 +3541,16 @@ H5T__free(H5T_t *dt)
H5MM_xfree(dt->shared->u.opaque.tag);
break;
+ case H5T_NO_CLASS:
+ case H5T_INTEGER:
+ case H5T_FLOAT:
+ case H5T_TIME:
+ case H5T_STRING:
+ case H5T_BITFIELD:
+ case H5T_REFERENCE:
+ case H5T_VLEN:
+ case H5T_ARRAY:
+ case H5T_NCLASSES:
default:
break;
} /* end switch */
@@ -3796,11 +3817,14 @@ H5T_set_size(H5T_t *dt, size_t size)
case H5T_ARRAY:
case H5T_REFERENCE:
HDassert("can't happen" && 0);
+ break;
case H5T_NO_CLASS:
case H5T_NCLASSES:
HDassert("invalid type" && 0);
+ break;
default:
HDassert("not implemented yet" && 0);
+ break;
}
/* Commit (if we didn't convert this type to a VL string) */
@@ -3875,7 +3899,6 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
unsigned *idx1 = NULL, *idx2 = NULL;
size_t base_size;
hbool_t swapped;
- int i, j;
unsigned u;
int tmp;
int ret_value = 0;
@@ -3930,24 +3953,32 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
for(u = 0; u < dt1->shared->u.compnd.nmembs; u++)
idx1[u] = idx2[u] = u;
if(dt1->shared->u.enumer.nmembs > 1) {
- for(i = dt1->shared->u.compnd.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i)
+ int i;
+
+ for(i = (int) dt1->shared->u.compnd.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i) {
+ int j;
+
for(j = 0, swapped=FALSE; j < i; j++)
if(HDstrcmp(dt1->shared->u.compnd.memb[idx1[j]].name,
dt1->shared->u.compnd.memb[idx1[j + 1]].name) > 0) {
- tmp = idx1[j];
+ unsigned tmp_idx = idx1[j];
idx1[j] = idx1[j + 1];
- idx1[j + 1] = tmp;
+ idx1[j + 1] = tmp_idx;
swapped = TRUE;
}
- for(i = dt2->shared->u.compnd.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i)
+ }
+ for(i = (int) dt2->shared->u.compnd.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i) {
+ int j;
+
for(j = 0, swapped = FALSE; j<i; j++)
if(HDstrcmp(dt2->shared->u.compnd.memb[idx2[j]].name,
dt2->shared->u.compnd.memb[idx2[j + 1]].name) > 0) {
- tmp = idx2[j];
+ unsigned tmp_idx = idx2[j];
idx2[j] = idx2[j + 1];
- idx2[j + 1] = tmp;
+ idx2[j + 1] = tmp_idx;
swapped = TRUE;
}
+ }
} /* end if */
#ifdef H5T_DEBUG
@@ -4007,28 +4038,39 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "memory allocation failed");
for (u=0; u<dt1->shared->u.enumer.nmembs; u++)
idx1[u] = u;
- if(dt1->shared->u.enumer.nmembs > 1)
- for (i=dt1->shared->u.enumer.nmembs-1, swapped=TRUE; swapped && i>=0; --i)
- for (j=0, swapped=FALSE; j<i; j++)
+ if(dt1->shared->u.enumer.nmembs > 1) {
+ int i;
+ for (i = (int) dt1->shared->u.enumer.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i) {
+ int j;
+
+ for (j = 0, swapped = FALSE; j < i; j++)
if (HDstrcmp(dt1->shared->u.enumer.name[idx1[j]],
dt1->shared->u.enumer.name[idx1[j+1]]) > 0) {
- tmp = idx1[j];
+ unsigned tmp_idx = idx1[j];
idx1[j] = idx1[j+1];
- idx1[j+1] = tmp;
+ idx1[j+1] = tmp_idx;
swapped = TRUE;
}
+ }
+ }
for (u=0; u<dt2->shared->u.enumer.nmembs; u++)
idx2[u] = u;
- if(dt2->shared->u.enumer.nmembs > 1)
- for (i=dt2->shared->u.enumer.nmembs-1, swapped=TRUE; swapped && i>=0; --i)
- for (j=0, swapped=FALSE; j<i; j++)
+ if(dt2->shared->u.enumer.nmembs > 1) {
+ int i;
+
+ for (i = (int) dt2->shared->u.enumer.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i) {
+ int j;
+
+ for (j = 0, swapped = FALSE; j < i; j++)
if (HDstrcmp(dt2->shared->u.enumer.name[idx2[j]],
dt2->shared->u.enumer.name[idx2[j+1]]) > 0) {
- tmp = idx2[j];
+ unsigned tmp_idx = idx2[j];
idx2[j] = idx2[j+1];
- idx2[j+1] = tmp;
+ idx2[j+1] = tmp_idx;
swapped = TRUE;
}
+ }
+ }
#ifdef H5T_DEBUG
/* I don't quite trust the code above yet :-) --RPM */
@@ -4148,6 +4190,14 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
HGOTO_DONE(1);
break;
+ case H5T_NO_CLASS:
+ case H5T_INTEGER:
+ case H5T_FLOAT:
+ case H5T_TIME:
+ case H5T_STRING:
+ case H5T_BITFIELD:
+ case H5T_REFERENCE:
+ case H5T_NCLASSES:
default:
/*
* Atomic datatypes...
@@ -4255,13 +4305,23 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
case H5R_BADTYPE:
case H5R_MAXTYPE:
HDassert("invalid type" && 0);
+ break;
default:
HDassert("not implemented yet" && 0);
+ break;
}
break;
+ case H5T_NO_CLASS:
+ case H5T_OPAQUE:
+ case H5T_COMPOUND:
+ case H5T_ENUM:
+ case H5T_VLEN:
+ case H5T_ARRAY:
+ case H5T_NCLASSES:
default:
HDassert("not implemented yet" && 0);
+ break;
}
break;
} /* end switch */
@@ -4535,7 +4595,7 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name,
} /* end if */
if(cmp > 0)
md++;
- HDmemmove(H5T_g.path + md + 1, H5T_g.path + md, (H5T_g.npaths - md) * sizeof(H5T_path_t*));
+ HDmemmove(H5T_g.path + md + 1, H5T_g.path + md, (size_t) (H5T_g.npaths - md) * sizeof(H5T_path_t*));
H5T_g.npaths++;
H5T_g.path[md] = path;
table = path;
@@ -5000,6 +5060,17 @@ H5T_is_sensible(const H5T_t *dt)
ret_value=FALSE;
break;
+ case H5T_NO_CLASS:
+ case H5T_INTEGER:
+ case H5T_FLOAT:
+ case H5T_TIME:
+ case H5T_STRING:
+ case H5T_BITFIELD:
+ case H5T_OPAQUE:
+ case H5T_REFERENCE:
+ case H5T_VLEN:
+ case H5T_ARRAY:
+ case H5T_NCLASSES:
default:
/* Assume all other datatype are sensible to store on disk */
ret_value=TRUE;
@@ -5041,7 +5112,6 @@ H5T_set_loc(H5T_t *dt, H5F_t *f, H5T_loc_t loc)
htri_t changed; /* Whether H5T_set_loc changed the type (even if the size didn't change) */
htri_t ret_value = 0; /* Indicate that success, but no location change */
unsigned i; /* Local index variable */
- int accum_change; /* Amount of change in the offset of the fields */
size_t old_size; /* Previous size of a field */
FUNC_ENTER_NOAPI(FAIL)
@@ -5075,14 +5145,21 @@ H5T_set_loc(H5T_t *dt, H5F_t *f, H5T_loc_t loc)
break;
case H5T_COMPOUND: /* Check each field and recurse on VL, compound and array type */
+ {
+ ssize_t accum_change = 0; /* Amount of change in the offset of the fields */
+
/* Sort the fields based on offsets */
H5T__sort_value(dt, NULL);
- for (i=0,accum_change=0; i<dt->shared->u.compnd.nmembs; i++) {
+ for (i=0; i<dt->shared->u.compnd.nmembs; i++) {
H5T_t *memb_type; /* Member's datatype pointer */
+ /* Range check against compound member's offset */
+ if ((accum_change < 0) && ((ssize_t) dt->shared->u.compnd.memb[i].offset < accum_change))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "invalid field size in datatype");
+
/* Apply the accumulated size change to the offset of the field */
- dt->shared->u.compnd.memb[i].offset += accum_change;
+ dt->shared->u.compnd.memb[i].offset += (size_t) accum_change;
/* Set the member type pointer (for convenience) */
memb_type=dt->shared->u.compnd.memb[i].type;
@@ -5105,13 +5182,18 @@ H5T_set_loc(H5T_t *dt, H5F_t *f, H5T_loc_t loc)
dt->shared->u.compnd.memb[i].size = (dt->shared->u.compnd.memb[i].size*memb_type->shared->size)/old_size;
/* Add that change to the accumulated size change */
- accum_change += (memb_type->shared->size - (int)old_size);
+ accum_change += (ssize_t) (memb_type->shared->size - old_size);
} /* end if */
} /* end if */
} /* end for */
+ /* Range check against datatype size */
+ if ((accum_change < 0) && ((ssize_t) dt->shared->size < accum_change))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "invalid field size in datatype");
+
/* Apply the accumulated size change to the datatype */
- dt->shared->size = (size_t)(dt->shared->size + accum_change);
+ dt->shared->size += (size_t) accum_change;
+ }
break;
case H5T_VLEN: /* Recurse on the VL information if it's VL, compound or array, then free VL sequence */
@@ -5145,6 +5227,15 @@ H5T_set_loc(H5T_t *dt, H5F_t *f, H5T_loc_t loc)
} /* end if */
break;
+ case H5T_NO_CLASS:
+ case H5T_INTEGER:
+ case H5T_FLOAT:
+ case H5T_TIME:
+ case H5T_STRING:
+ case H5T_BITFIELD:
+ case H5T_OPAQUE:
+ case H5T_ENUM:
+ case H5T_NCLASSES:
default:
break;
} /* end switch */
@@ -5235,6 +5326,15 @@ H5T_upgrade_version_cb(H5T_t *dt, void *op_value)
dt->shared->version = dt->shared->parent->shared->version;
break;
+ case H5T_NO_CLASS:
+ case H5T_INTEGER:
+ case H5T_FLOAT:
+ case H5T_TIME:
+ case H5T_STRING:
+ case H5T_BITFIELD:
+ case H5T_OPAQUE:
+ case H5T_REFERENCE:
+ case H5T_NCLASSES:
default:
break;
} /* end switch */
diff --git a/src/H5Tarray.c b/src/H5Tarray.c
index c73b934..2e9dec0 100644
--- a/src/H5Tarray.c
+++ b/src/H5Tarray.c
@@ -202,7 +202,7 @@ H5T__array_create(H5T_t *base, unsigned ndims, const hsize_t dim[/* ndims */])
/* Copy the array dimensions & compute the # of elements in the array */
for(u = 0, ret_value->shared->u.array.nelem = 1; u < ndims; u++) {
- H5_ASSIGN_OVERFLOW(ret_value->shared->u.array.dim[u], dim[u], hsize_t, size_t);
+ H5_CHECKED_ASSIGN(ret_value->shared->u.array.dim[u], size_t, dim[u], hsize_t);
ret_value->shared->u.array.nelem *= (size_t)dim[u];
} /* end for */
diff --git a/src/H5Tconv.c b/src/H5Tconv.c
index df85553..1c51b69 100644
--- a/src/H5Tconv.c
+++ b/src/H5Tconv.c
@@ -1952,7 +1952,7 @@ H5T_conv_struct_init(H5T_t *src, H5T_t *dst, H5T_cdata_t *cdata, hid_t dxpl_id)
src2dst[i] = -1;
for(j = 0; j < dst_nmembs; j++) {
if(!HDstrcmp(src->shared->u.compnd.memb[i].name, dst->shared->u.compnd.memb[j].name)) {
- H5_ASSIGN_OVERFLOW(src2dst[i],j,unsigned,int);
+ H5_CHECKED_ASSIGN(src2dst[i], int, j, unsigned);
break;
} /* end if */
} /* end for */
@@ -2187,16 +2187,16 @@ H5T__conv_struct(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts,
* Direction of conversion and striding through background.
*/
if(buf_stride) {
- H5_ASSIGN_OVERFLOW(src_delta, buf_stride, size_t, ssize_t);
+ H5_CHECKED_ASSIGN(src_delta, ssize_t, buf_stride, size_t);
if(!bkg_stride) {
- H5_ASSIGN_OVERFLOW(bkg_delta, dst->shared->size, size_t, ssize_t);
+ H5_CHECKED_ASSIGN(bkg_delta, ssize_t, dst->shared->size, size_t);
} /* end if */
else
- H5_ASSIGN_OVERFLOW(bkg_delta, bkg_stride, size_t, ssize_t);
+ H5_CHECKED_ASSIGN(bkg_delta, ssize_t, bkg_stride, size_t);
} /* end if */
else if(dst->shared->size <= src->shared->size) {
- H5_ASSIGN_OVERFLOW(src_delta, src->shared->size, size_t, ssize_t);
- H5_ASSIGN_OVERFLOW(bkg_delta, dst->shared->size, size_t, ssize_t);
+ H5_CHECKED_ASSIGN(src_delta, ssize_t, src->shared->size, size_t);
+ H5_CHECKED_ASSIGN(bkg_delta, ssize_t, dst->shared->size, size_t);
} /* end else-if */
else {
H5_CHECK_OVERFLOW(src->shared->size, size_t, ssize_t);
@@ -2278,7 +2278,7 @@ H5T__conv_struct(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts,
/* If the bkg_delta was set to -(dst->shared->size), make it positive now */
if(buf_stride == 0 && dst->shared->size > src->shared->size)
- H5_ASSIGN_OVERFLOW(bkg_delta, dst->shared->size, size_t, ssize_t);
+ H5_CHECKED_ASSIGN(bkg_delta, ssize_t, dst->shared->size, size_t);
/*
* Copy the background buffer back into the in-place conversion
@@ -2833,8 +2833,8 @@ H5T__conv_enum(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts,
src_delta = dst_delta = (ssize_t)buf_stride;
s = d = buf;
} else if(dst->shared->size <= src->shared->size) {
- H5_ASSIGN_OVERFLOW(src_delta, src->shared->size, size_t, ssize_t);
- H5_ASSIGN_OVERFLOW(dst_delta, dst->shared->size, size_t, ssize_t);
+ H5_CHECKED_ASSIGN(src_delta, ssize_t, src->shared->size, size_t);
+ H5_CHECKED_ASSIGN(dst_delta, ssize_t, dst->shared->size, size_t);
s = d = buf;
} else {
H5_CHECK_OVERFLOW(src->shared->size, size_t, ssize_t);
diff --git a/src/H5Tfloat.c b/src/H5Tfloat.c
index 7d08d62..fff413d 100644
--- a/src/H5Tfloat.c
+++ b/src/H5Tfloat.c
@@ -203,7 +203,7 @@ H5Tget_ebias(hid_t type_id)
HGOTO_ERROR(H5E_DATATYPE, H5E_BADTYPE, 0, "operation not defined for datatype class")
/* bias */
- H5_ASSIGN_OVERFLOW(ret_value, dt->shared->u.atomic.u.f.ebias, uint64_t, size_t);
+ H5_CHECKED_ASSIGN(ret_value, size_t, dt->shared->u.atomic.u.f.ebias, uint64_t);
done:
FUNC_LEAVE_API(ret_value)
diff --git a/src/H5Tnative.c b/src/H5Tnative.c
index 960a811..bf6ba08 100644
--- a/src/H5Tnative.c
+++ b/src/H5Tnative.c
@@ -276,7 +276,7 @@ H5T_get_native_type(H5T_t *dtype, H5T_direction_t direction, size_t *struct_alig
if((snmemb = H5T_get_nmembers(dtype)) <= 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "compound data type doesn't have any member")
- H5_ASSIGN_OVERFLOW(nmemb, snmemb, int, unsigned);
+ H5_CHECKED_ASSIGN(nmemb, unsigned, snmemb, int);
if(NULL == (memb_list = (H5T_t **)H5MM_calloc(nmemb * sizeof(H5T_t *))))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "cannot allocate memory")
@@ -388,7 +388,7 @@ H5T_get_native_type(H5T_t *dtype, H5T_direction_t direction, size_t *struct_alig
/* Retrieve member info and insert members into new enum type */
if((snmemb = H5T_get_nmembers(dtype)) <= 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "enumerate data type doesn't have any member")
- H5_ASSIGN_OVERFLOW(nmemb, snmemb, int, unsigned);
+ H5_CHECKED_ASSIGN(nmemb, unsigned, snmemb, int);
for(u = 0; u < nmemb; u++) {
if(NULL == (memb_name = H5T__get_member_name(dtype, u)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "cannot get member name")
@@ -429,7 +429,7 @@ H5T_get_native_type(H5T_t *dtype, H5T_direction_t direction, size_t *struct_alig
/* Retrieve dimension information for array data type */
if((sarray_rank = H5T__get_array_ndims(dtype)) <= 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "cannot get dimension rank")
- H5_ASSIGN_OVERFLOW(array_rank, sarray_rank, int, unsigned);
+ H5_CHECKED_ASSIGN(array_rank, unsigned, sarray_rank, int);
if(NULL == (dims = (hsize_t*)H5MM_malloc(array_rank * sizeof(hsize_t))))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "cannot allocate memory")
if(H5T__get_array_dims(dtype, dims) < 0)
diff --git a/src/H5VM.c b/src/H5VM.c
index 2fa49ba..c546609 100644
--- a/src/H5VM.c
+++ b/src/H5VM.c
@@ -386,55 +386,6 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5VM_hyper_disjointp
- *
- * Purpose: Determines if two hyperslabs are disjoint.
- *
- * Return: Success: FALSE if they are not disjoint.
- * TRUE if they are disjoint.
- *
- * Failure: A hyperslab of zero size is disjoint from all
- * other hyperslabs.
- *
- * Programmer: Robb Matzke
- * Thursday, October 16, 1997
- *
- * Modifications:
- *
- *-------------------------------------------------------------------------
- */
-htri_t
-H5VM_hyper_disjointp(unsigned n,
- const hsize_t *offset1, const uint32_t *size1,
- const hsize_t *offset2, const uint32_t *size2)
-{
- unsigned u;
- htri_t ret_value = FALSE; /* Return value */
-
- /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- if(!n || !size1 || !size2)
- HGOTO_DONE(TRUE)
-
- for(u = 0; u < n; u++) {
- HDcompile_assert(sizeof(uint32_t) <= sizeof(hsize_t));
-
- if(0 == size1[u] || 0 == size2[u])
- HGOTO_DONE(TRUE)
- if(((offset1 ? offset1[u] : 0) < (offset2 ? offset2[u] : 0) &&
- ((offset1 ? offset1[u] : 0) + size1[u] <= (offset2 ? offset2[u] : 0))) ||
- ((offset2 ? offset2[u] : 0) < (offset1 ? offset1[u] : 0) &&
- ((offset2 ? offset2[u] : 0) + size2[u] <= (offset1 ? offset1[u] : 0))))
- HGOTO_DONE(TRUE)
- } /* end for */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5VM_hyper_disjointp() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5VM_hyper_fill
*
* Purpose: Similar to memset() except it operates on hyperslabs...
@@ -1068,17 +1019,12 @@ H5VM_array_down(unsigned n, const hsize_t *total_size, hsize_t *down)
* Programmer: Quincey Koziol
* Tuesday, June 22, 1999
*
- * Modifications:
- * Use precomputed accumulator array
- * Quincey Koziol
- * Saturday, April 26, 2003
- *
*-------------------------------------------------------------------------
*/
hsize_t
H5VM_array_offset_pre(unsigned n, const hsize_t *acc, const hsize_t *offset)
{
- int i; /*counter */
+ unsigned u; /* Local index variable */
hsize_t ret_value; /* Return value */
FUNC_ENTER_NOAPI_NOINIT_NOERR
@@ -1088,8 +1034,8 @@ H5VM_array_offset_pre(unsigned n, const hsize_t *acc, const hsize_t *offset)
HDassert(offset);
/* Compute offset in array */
- for(i = (int)(n - 1), ret_value = 0; i >= 0; --i)
- ret_value += acc[i] * offset[i];
+ for(u = 0, ret_value = 0; u < n; u++)
+ ret_value += acc[u] * offset[u];
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5VM_array_offset_pre() */
@@ -1157,7 +1103,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+herr_t
H5VM_array_calc_pre(hsize_t offset, unsigned n, const hsize_t *down,
hsize_t *coords)
{
@@ -1258,18 +1204,51 @@ done:
* The chunk index is placed in the CHUNK_IDX location for return
* from this function
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Chunk index on success (can't fail)
*
* Programmer: Quincey Koziol
* Monday, April 21, 2003
*
*-------------------------------------------------------------------------
*/
-herr_t
+hsize_t
H5VM_chunk_index(unsigned ndims, const hsize_t *coord, const uint32_t *chunk,
- const hsize_t *down_nchunks, hsize_t *chunk_idx)
+ const hsize_t *down_nchunks)
{
hsize_t scaled_coord[H5VM_HYPER_NDIMS]; /* Scaled, coordinates, in terms of chunks */
+ hsize_t chunk_idx; /* Chunk index computed */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity check */
+ HDassert(ndims <= H5VM_HYPER_NDIMS);
+ HDassert(coord);
+ HDassert(chunk);
+ HDassert(down_nchunks);
+
+ /* Defer to H5VM_chunk_index_scaled */
+ chunk_idx = H5VM_chunk_index_scaled(ndims, coord, chunk, down_nchunks, scaled_coord);
+
+ FUNC_LEAVE_NOAPI(chunk_idx)
+} /* end H5VM_chunk_index() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5VM_chunk_scaled
+ *
+ * Purpose: Compute the scaled coordinates for a chunk offset
+ *
+ * Return: <none>
+ *
+ * Programmer: Quincey Koziol
+ * Wednesday, November 19, 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+H5VM_chunk_scaled(unsigned ndims, const hsize_t *coord, const uint32_t *chunk,
+ hsize_t *scaled)
+{
unsigned u; /* Local index variable */
FUNC_ENTER_NOAPI_NOINIT_NOERR
@@ -1278,17 +1257,86 @@ H5VM_chunk_index(unsigned ndims, const hsize_t *coord, const uint32_t *chunk,
HDassert(ndims <= H5VM_HYPER_NDIMS);
HDassert(coord);
HDassert(chunk);
- HDassert(chunk_idx);
+ HDassert(scaled);
/* Compute the scaled coordinates for actual coordinates */
+ /* (Note that the 'scaled' array is an 'OUT' parameter) */
for(u = 0; u < ndims; u++)
- scaled_coord[u] = coord[u] / chunk[u];
+ scaled[u] = coord[u] / chunk[u];
+
+ FUNC_LEAVE_NOAPI_VOID
+} /* end H5VM_chunk_scaled() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5VM_chunk_index_scaled
+ *
+ * Purpose: Given a coordinate offset (COORD), the size of each chunk
+ * (CHUNK), the number of chunks in each dimension (NCHUNKS)
+ * and the number of dimensions of all of these (NDIMS), calculate
+ * a "chunk index" for the chunk that the coordinate offset is
+ * located in.
+ *
+ * The chunk index starts at 0 and increases according to the
+ * fastest changing dimension, then the next fastest, etc.
+ *
+ * For example, with a 3x5 chunk size and 6 chunks in the fastest
+ * changing dimension and 3 chunks in the slowest changing
+ * dimension, the chunk indices are as follows:
+ *
+ * +-----+-----+-----+-----+-----+-----+
+ * | | | | | | |
+ * | 0 | 1 | 2 | 3 | 4 | 5 |
+ * | | | | | | |
+ * +-----+-----+-----+-----+-----+-----+
+ * | | | | | | |
+ * | 6 | 7 | 8 | 9 | 10 | 11 |
+ * | | | | | | |
+ * +-----+-----+-----+-----+-----+-----+
+ * | | | | | | |
+ * | 12 | 13 | 14 | 15 | 16 | 17 |
+ * | | | | | | |
+ * +-----+-----+-----+-----+-----+-----+
+ *
+ * The chunk index is placed in the CHUNK_IDX location for return
+ * from this function
+ *
+ * Note: This routine is identical to H5VM_chunk_index(), except for
+ * caching the scaled information. Make changes in both places.
+ *
+ * Return: Chunk index on success (can't fail)
+ *
+ * Programmer: Vailin Choi
+ * Monday, February 9, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+hsize_t
+H5VM_chunk_index_scaled(unsigned ndims, const hsize_t *coord, const uint32_t *chunk,
+ const hsize_t *down_nchunks, hsize_t *scaled)
+{
+ hsize_t chunk_idx; /* Computed chunk index */
+ unsigned u; /* Local index variable */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity check */
+ HDassert(ndims <= H5VM_HYPER_NDIMS);
+ HDassert(coord);
+ HDassert(chunk);
+ HDassert(down_nchunks);
+ HDassert(scaled);
+
+ /* Compute the scaled coordinates for actual coordinates */
+ /* (Note that the 'scaled' array is an 'OUT' parameter) */
+ for(u = 0; u < ndims; u++)
+ scaled[u] = coord[u] / chunk[u];
/* Compute the chunk index */
- *chunk_idx = H5VM_array_offset_pre(ndims,down_nchunks,scaled_coord); /*lint !e772 scaled_coord will always be initialized */
+ chunk_idx = H5VM_array_offset_pre(ndims, down_nchunks, scaled); /*lint !e772 scaled_coord will always be initialized */
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5VM_chunk_index() */
+ FUNC_LEAVE_NOAPI(chunk_idx)
+} /* end H5VM_chunk_index_scaled() */
/*-------------------------------------------------------------------------
diff --git a/src/H5VMprivate.h b/src/H5VMprivate.h
index 20821b2..5d1e0c2 100644
--- a/src/H5VMprivate.h
+++ b/src/H5VMprivate.h
@@ -56,8 +56,6 @@ H5_DLL hsize_t H5VM_hyper_stride(unsigned n, const hsize_t *size,
const hsize_t *total_size,
const hsize_t *offset,
hsize_t *stride);
-H5_DLL htri_t H5VM_hyper_disjointp(unsigned n, const hsize_t *offset1,
- const uint32_t *size1, const hsize_t *offset2, const uint32_t *size2);
H5_DLL htri_t H5VM_hyper_eq(unsigned n, const hsize_t *offset1,
const hsize_t *size1, const hsize_t *offset2,
const hsize_t *size2);
@@ -87,10 +85,16 @@ H5_DLL hsize_t H5VM_array_offset_pre(unsigned n,
const hsize_t *acc, const hsize_t *offset);
H5_DLL hsize_t H5VM_array_offset(unsigned n, const hsize_t *total_size,
const hsize_t *offset);
+H5_DLL herr_t H5VM_array_calc_pre(hsize_t offset, unsigned n,
+ const hsize_t *down, hsize_t *coords);
H5_DLL herr_t H5VM_array_calc(hsize_t offset, unsigned n,
const hsize_t *total_size, hsize_t *coords);
-H5_DLL herr_t H5VM_chunk_index(unsigned ndims, const hsize_t *coord,
- const uint32_t *chunk, const hsize_t *down_nchunks, hsize_t *chunk_idx);
+H5_DLL hsize_t H5VM_chunk_index(unsigned ndims, const hsize_t *coord,
+ const uint32_t *chunk, const hsize_t *down_nchunks);
+H5_DLL void H5VM_chunk_scaled(unsigned ndims, const hsize_t *coord,
+ const uint32_t *chunk, hsize_t *scaled);
+H5_DLL hsize_t H5VM_chunk_index_scaled(unsigned ndims, const hsize_t *coord,
+ const uint32_t *chunk, const hsize_t *down_nchunks, hsize_t *scaled);
H5_DLL ssize_t H5VM_opvv(size_t dst_max_nseq, size_t *dst_curr_seq, size_t dst_len_arr[],
hsize_t dst_off_arr[],
size_t src_max_nseq, size_t *src_curr_seq, size_t src_len_arr[],
@@ -410,6 +414,29 @@ H5VM_log2_of2(uint32_t n)
/*-------------------------------------------------------------------------
+ * Function: H5VM_power2up
+ *
+ * Purpose: Round up a number to the next power of 2
+ *
+ * Return: Return the number which is a power of 2
+ *
+ * Programmer: Vailin Choi; Nov 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static H5_inline hsize_t UNUSED
+H5VM_power2up(hsize_t n)
+{
+ hsize_t ret_value = 1; /* Return value */
+
+ while(ret_value < n)
+ ret_value <<= 1;
+
+ return(ret_value);
+} /* H5VM_power2up */
+
+
+/*-------------------------------------------------------------------------
* Function: H5VM_limit_enc_size
*
* Purpose: Determine the # of bytes needed to encode values within a
diff --git a/src/H5Zdeflate.c b/src/H5Zdeflate.c
index 82ad1ba..06d9866 100644
--- a/src/H5Zdeflate.c
+++ b/src/H5Zdeflate.c
@@ -102,9 +102,9 @@ H5Z_filter_deflate (unsigned flags, size_t cd_nelmts,
/* Set the uncompression parameters */
HDmemset(&z_strm, 0, sizeof(z_strm));
z_strm.next_in = (Bytef *)*buf;
- H5_ASSIGN_OVERFLOW(z_strm.avail_in,nbytes,size_t,unsigned);
+ H5_CHECKED_ASSIGN(z_strm.avail_in, unsigned, nbytes, size_t);
z_strm.next_out = (Bytef *)outbuf;
- H5_ASSIGN_OVERFLOW(z_strm.avail_out,nalloc,size_t,unsigned);
+ H5_CHECKED_ASSIGN(z_strm.avail_out, unsigned, nalloc, size_t);
/* Initialize the uncompression routines */
if (Z_OK!=inflateInit(&z_strm))
@@ -169,7 +169,7 @@ H5Z_filter_deflate (unsigned flags, size_t cd_nelmts,
int aggression; /* Compression aggression setting */
/* Set the compression aggression level */
- H5_ASSIGN_OVERFLOW(aggression,cd_values[0],unsigned,int);
+ H5_CHECKED_ASSIGN(aggression, int, cd_values[0], unsigned);
/* Allocate output (compressed) buffer */
if(NULL == (outbuf = H5MM_malloc(z_dst_nbytes)))
diff --git a/src/H5Znbit.c b/src/H5Znbit.c
index 9506ace..ff3ead2 100644
--- a/src/H5Znbit.c
+++ b/src/H5Znbit.c
@@ -803,7 +803,7 @@ H5Z_set_local_nbit(hid_t dcpl_id, hid_t type_id, hid_t space_id)
cd_values_index = 2;
/* Set "local" parameter for number of elements in the chunk */
- H5_ASSIGN_OVERFLOW(cd_values[cd_values_index++], npoints, hssize_t, unsigned);
+ H5_CHECKED_ASSIGN(cd_values[cd_values_index++], unsigned, npoints, hssize_t);
/* Assume no need to compress now, will be changed to FALSE later if not */
need_not_compress = TRUE;
diff --git a/src/H5Zscaleoffset.c b/src/H5Zscaleoffset.c
index 38e2986..cda0a11 100644
--- a/src/H5Zscaleoffset.c
+++ b/src/H5Zscaleoffset.c
@@ -901,7 +901,7 @@ H5Z_set_local_scaleoffset(hid_t dcpl_id, hid_t type_id, hid_t space_id)
HGOTO_ERROR(H5E_PLINE, H5E_CANTGET, FAIL, "unable to get number of points in the dataspace")
/* Set "local" parameter for this dataset's number of elements */
- H5_ASSIGN_OVERFLOW(cd_values[H5Z_SCALEOFFSET_PARM_NELMTS],npoints,hssize_t,unsigned);
+ H5_CHECKED_ASSIGN(cd_values[H5Z_SCALEOFFSET_PARM_NELMTS], unsigned, npoints, hssize_t);
/* Get datatype's class */
if((dtype_class = H5T_get_class(type, TRUE)) == H5T_NO_CLASS)
diff --git a/src/H5Zszip.c b/src/H5Zszip.c
index 4544ec3..631667b 100644
--- a/src/H5Zszip.c
+++ b/src/H5Zszip.c
@@ -225,7 +225,7 @@ H5Z_set_local_szip(hid_t dcpl_id, hid_t type_id, hid_t space_id)
} /* end else */
/* Assign the final value to the scanline */
- H5_ASSIGN_OVERFLOW(cd_values[H5Z_SZIP_PARM_PPS],scanline,hsize_t,unsigned);
+ H5_CHECKED_ASSIGN(cd_values[H5Z_SZIP_PARM_PPS], unsigned, scanline, hsize_t);
/* Get datatype's endianness order */
if((dtype_order = H5T_get_order(type)) == H5T_ORDER_ERROR)
@@ -301,10 +301,10 @@ H5Z_filter_szip (unsigned flags, size_t cd_nelmts, const unsigned cd_values[],
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, 0, "invalid deflate aggression level")
/* Copy the filter parameters into the szip parameter block */
- H5_ASSIGN_OVERFLOW(sz_param.options_mask,cd_values[H5Z_SZIP_PARM_MASK],unsigned,int);
- H5_ASSIGN_OVERFLOW(sz_param.bits_per_pixel,cd_values[H5Z_SZIP_PARM_BPP],unsigned,int);
- H5_ASSIGN_OVERFLOW(sz_param.pixels_per_block,cd_values[H5Z_SZIP_PARM_PPB],unsigned,int);
- H5_ASSIGN_OVERFLOW(sz_param.pixels_per_scanline,cd_values[H5Z_SZIP_PARM_PPS],unsigned,int);
+ H5_CHECKED_ASSIGN(sz_param.options_mask, int, cd_values[H5Z_SZIP_PARM_MASK], unsigned);
+ H5_CHECKED_ASSIGN(sz_param.bits_per_pixel, int, cd_values[H5Z_SZIP_PARM_BPP], unsigned);
+ H5_CHECKED_ASSIGN(sz_param.pixels_per_block, int, cd_values[H5Z_SZIP_PARM_PPB], unsigned);
+ H5_CHECKED_ASSIGN(sz_param.pixels_per_scanline, int, cd_values[H5Z_SZIP_PARM_PPS], unsigned);
/* Input; uncompress */
if (flags & H5Z_FLAG_REVERSE) {
@@ -314,7 +314,7 @@ H5Z_filter_szip (unsigned flags, size_t cd_nelmts, const unsigned cd_values[],
/* Get the size of the uncompressed buffer */
newbuf = *buf;
UINT32DECODE(newbuf,stored_nalloc);
- H5_ASSIGN_OVERFLOW(nalloc,stored_nalloc,uint32_t,size_t);
+ H5_CHECKED_ASSIGN(nalloc, size_t, stored_nalloc, uint32_t);
/* Allocate space for the uncompressed buffer */
if(NULL==(outbuf = H5MM_malloc(nalloc)))
diff --git a/src/H5detect.c b/src/H5detect.c
index fc0570c..15736bd 100644
--- a/src/H5detect.c
+++ b/src/H5detect.c
@@ -73,7 +73,7 @@ static const char *FileHeader = "\n\
#define H5JMP_BUF sigjmp_buf
#define H5SETJMP(buf) HDsigsetjmp(buf, 1)
#define H5LONGJMP(buf, val) HDsiglongjmp(buf, val)
-#define H5HAVE_SIGJMP # sigsetjmp/siglongjmp are supported.
+#define H5HAVE_SIGJMP /* sigsetjmp/siglongjmp are supported. */
#elif defined(H5_HAVE_LONGJMP)
#define H5JMP_BUF jmp_buf
#define H5SETJMP(buf) HDsetjmp(buf)
@@ -90,41 +90,41 @@ static const char *FileHeader = "\n\
* was detected.
*/
typedef struct detected_t {
- const char *varname;
- int size; /*total byte size */
- int precision; /*meaningful bits */
- int offset; /*bit offset to meaningful bits */
- int perm[32]; /*for detection of byte order */
- int is_vax; /*for vax (float & double) only */
- int sign; /*location of sign bit */
- int mpos, msize, imp;/*information about mantissa */
- int epos, esize; /*information about exponent */
- unsigned long bias; /*exponent bias for floating pt.*/
- size_t align; /*required byte alignment */
- size_t comp_align; /*alignment for structure */
+ const char *varname;
+ unsigned int size; /* total byte size */
+ unsigned int precision; /* meaningful bits */
+ unsigned int offset; /* bit offset to meaningful bits */
+ int perm[32]; /* for detection of byte order */
+ hbool_t is_vax; /* for vax (float & double) only */
+ unsigned int sign; /* location of sign bit */
+ unsigned int mpos, msize, imp; /* information about mantissa */
+ unsigned int epos, esize; /* information about exponent */
+ unsigned long bias; /* exponent bias for floating pt */
+ unsigned int align; /* required byte alignment */
+ unsigned int comp_align; /* alignment for structure */
} detected_t;
/* This structure holds structure alignment for pointers, hvl_t, hobj_ref_t,
* hdset_reg_ref_t */
typedef struct malign_t {
const char *name;
- size_t comp_align; /*alignment for structure */
+ unsigned int comp_align; /* alignment for structure */
} malign_t;
/* global variables types detection code */
-static detected_t d_g[MAXDETECT];
-static malign_t m_g[MAXDETECT];
-static volatile int nd_g = 0, na_g = 0;
+H5_GCC_DIAG_OFF(larger-than=)
+static detected_t d_g[MAXDETECT];
+H5_GCC_DIAG_ON(larger-than=)
+static malign_t m_g[MAXDETECT];
+static volatile int nd_g = 0, na_g = 0;
static void print_results(int nd, detected_t *d, int na, malign_t *m);
static void iprint(detected_t *);
static int byte_cmp(int, const void *, const void *, const unsigned char *);
-static int bit_cmp(int, int *, volatile void *, volatile void *,
- const unsigned char *);
+static unsigned int bit_cmp(unsigned int, int *, void *, void *, const unsigned char *);
static void fix_order(int, int, int *, const char **);
-static int imp_bit(int, int *, volatile void *, volatile void *,
- const unsigned char *);
-static unsigned long find_bias(int, int, int *, volatile void *);
+static unsigned int imp_bit(unsigned int, int *, void *, void *, const unsigned char *);
+static unsigned int find_bias(unsigned int, unsigned int, int *, void *);
static void precision (detected_t*);
static void print_header(void);
static void detect_C89_integers(void);
@@ -136,7 +136,7 @@ static void detect_C99_integers16(void);
static void detect_C99_integers32(void);
static void detect_C99_integers64(void);
static void detect_alignments(void);
-static size_t align_g[] = {1, 2, 4, 8, 16};
+static unsigned int align_g[] = {1, 2, 4, 8, 16};
static int align_status_g = 0; /* ALIGNMENT Signal Status */
static int sigbus_handler_called_g = 0; /* how many times called */
static int sigsegv_handler_called_g = 0;/* how many times called */
@@ -167,39 +167,41 @@ static H5JMP_BUF jbuf_g;
static void
precision (detected_t *d)
{
- int n;
+ unsigned int n;
- if (0==d->msize) {
- /*
- * An integer. The permutation can have negative values at the
- * beginning or end which represent padding of bytes. We must adjust
- * the precision and offset accordingly.
- */
- if (d->perm[0] < 0) {
- /*
- * Lower addresses are padded.
- */
- for (n=0; n<d->size && d->perm[n]<0; n++) /*void*/;
- d->precision = 8*(d->size-n);
- d->offset = 0;
- } else if (d->perm[d->size - 1] < 0) {
- /*
- * Higher addresses are padded.
- */
- for (n=0; n<d->size && d->perm[d->size-(n+1)]; n++) /*void*/;
- d->precision = 8*(d->size-n);
- d->offset = 8*n;
- } else {
- /*
- * No padding.
- */
- d->precision = 8*d->size;
- d->offset = 0;
- }
+ if (0 == d->msize) {
+ /*
+ * An integer. The permutation can have negative values at the
+ * beginning or end which represent padding of bytes. We must adjust
+ * the precision and offset accordingly.
+ */
+ if (d->perm[0] < 0) {
+ /*
+ * Lower addresses are padded.
+ */
+ for (n = 0; n < d->size && d->perm[n] < 0; n++)
+ /*void*/;
+ d->precision = 8 * (d->size - n);
+ d->offset = 0;
+ } else if (d->perm[d->size - 1] < 0) {
+ /*
+ * Higher addresses are padded.
+ */
+ for (n = 0; n < d->size && d->perm[d->size - (n + 1)]; n++)
+ /*void*/;
+ d->precision = 8 * (d->size - n);
+ d->offset = 8 * n;
+ } else {
+ /*
+ * No padding.
+ */
+ d->precision = 8 * d->size;
+ d->offset = 0;
+ }
} else {
- /* A floating point */
- d->offset = MIN3 (d->mpos, d->epos, d->sign);
- d->precision = d->msize + d->esize + 1;
+ /* A floating point */
+ d->offset = MIN3(d->mpos, d->epos, d->sign);
+ d->precision = d->msize + d->esize + 1;
}
}
@@ -251,7 +253,8 @@ precision (detected_t *d)
INFO.size = sizeof(TYPE); \
\
for(_i = sizeof(DETECT_TYPE), _v = 0; _i > 0; --_i) \
- _v = (_v << 8) + _i; \
+ _v = (DETECT_TYPE) ((DETECT_TYPE) (_v << 8) + (DETECT_TYPE) _i); \
+ \
for(_i = 0, _x = (unsigned char *)&_v; _i < (signed)sizeof(DETECT_TYPE); _i++) { \
_j = (*_x++) - 1; \
HDassert(_j < (signed)sizeof(DETECT_TYPE)); \
@@ -290,9 +293,6 @@ precision (detected_t *d)
* absence of implicit mantissa bit, and exponent bias and
* initializes a detected_t structure with those properties.
*
- * Note: 'volatile' is used for the variables below to prevent the
- * compiler from optimizing them away.
- *
* Return: void
*
* Programmer: Robb Matzke
@@ -302,12 +302,12 @@ precision (detected_t *d)
*-------------------------------------------------------------------------
*/
#define DETECT_F(TYPE,VAR,INFO) { \
- volatile TYPE _v1, _v2, _v3; \
+ TYPE _v1, _v2, _v3; \
unsigned char _buf1[sizeof(TYPE)], _buf3[sizeof(TYPE)]; \
unsigned char _pad_mask[sizeof(TYPE)]; \
unsigned char _byte_mask; \
int _i, _j, _last = (-1); \
- char *_mesg; \
+ const char *_mesg; \
\
HDmemset(&INFO, 0, sizeof(INFO)); \
INFO.varname = #VAR; \
@@ -326,11 +326,13 @@ precision (detected_t *d)
_v1 = (TYPE)4.0L; \
HDmemcpy(_buf1, (const void *)&_v1, sizeof(TYPE)); \
for(_i = 0; _i < (int)sizeof(TYPE); _i++) \
- for(_byte_mask = (unsigned char)1; _byte_mask; _byte_mask <<= 1) { \
+ for(_byte_mask = (unsigned char)1; _byte_mask; _byte_mask = (unsigned char) (_byte_mask << 1)) { \
_buf1[_i] ^= _byte_mask; \
HDmemcpy((void *)&_v2, (const void *)_buf1, sizeof(TYPE)); \
+ H5_GCC_DIAG_OFF(float-equal) \
if(_v1 != _v2) \
_pad_mask[_i] |= _byte_mask; \
+ H5_GCC_DIAG_ON(float-equal) \
_buf1[_i] ^= _byte_mask; \
} /* end for */ \
\
@@ -368,7 +370,7 @@ precision (detected_t *d)
_v1 = (TYPE)1.0L; \
_v2 = (TYPE)1.5L; \
INFO.msize = bit_cmp (sizeof(TYPE), INFO.perm, &_v1, &_v2, _pad_mask); \
- INFO.msize += 1 + (INFO.imp?0:1) - INFO.mpos; \
+ INFO.msize += 1 + (unsigned int) (INFO.imp ? 0 : 1) - INFO.mpos; \
\
/* Exponent */ \
INFO.epos = INFO.mpos + INFO.msize; \
@@ -415,14 +417,13 @@ precision (detected_t *d)
TYPE x; \
} s; \
\
- COMP_ALIGN = (size_t)((char*)(&(s.x)) - (char*)(&s)); \
+ COMP_ALIGN = (unsigned int)((char*)(&(s.x)) - (char*)(&s)); \
}
#if defined(H5SETJMP) && defined(H5_HAVE_SIGNAL)
#define ALIGNMENT(TYPE,INFO) { \
- char *volatile _buf = NULL; \
- volatile TYPE _val = 1; \
- volatile TYPE _val2; \
+ char *volatile _buf = NULL; \
+ TYPE _val = 1, _val2; \
volatile size_t _ano = 0; \
void (*_handler)(int) = HDsignal(SIGBUS, sigbus_handler); \
void (*_handler2)(int) = HDsignal(SIGSEGV, sigsegv_handler);\
@@ -445,8 +446,10 @@ precision (detected_t *d)
else /* Little-Endian */ \
HDmemcpy(_buf+align_g[_ano]+(INFO.offset/8),((char *)&_val)+(INFO.offset/8),(size_t)(INFO.precision/8)); \
_val2 = *((TYPE*)(_buf+align_g[_ano])); \
+ H5_GCC_DIAG_OFF(float-equal) \
if(_val!=_val2) \
H5LONGJMP(jbuf_g, 1); \
+ H5_GCC_DIAG_ON(float-equal) \
/* End Cray Check */ \
(INFO.align)=align_g[_ano]; \
} else { \
@@ -881,63 +884,69 @@ done:\n\
static void
iprint(detected_t *d)
{
- int i, j, k, pass;
+ unsigned int pass;
- for (pass=(d->size-1)/4; pass>=0; --pass) {
- /*
- * Print the byte ordering above the bit fields.
- */
- printf(" * ");
- for (i=MIN(pass*4+3,d->size-1); i>=pass*4; --i) {
- printf ("%4d", d->perm[i]);
- if (i>pass*4) HDfputs (" ", stdout);
- }
+ for (pass = (d->size - 1) / 4; ; --pass) {
+ unsigned int i, k;
+ /*
+ * Print the byte ordering above the bit fields.
+ */
+ printf(" * ");
+ for (i = MIN(pass * 4 + 3, d->size - 1); i >= pass * 4; --i) {
+ printf("%4d", d->perm[i]);
+ if (i > pass * 4) HDfputs(" ", stdout);
+ if (!i) break;
+ }
- /*
- * Print the bit fields
- */
- printf("\n * ");
- for (i=MIN(pass*4+3,d->size-1),
- k=MIN(pass*32+31,8*d->size-1);
- i>=pass*4; --i) {
- for (j=7; j>=0; --j) {
- if (k==d->sign && d->msize) {
- HDputchar('S');
- } else if (k>=d->epos && k<d->epos+d->esize) {
- HDputchar('E');
- } else if (k>=d->mpos && k<d->mpos+d->msize) {
- HDputchar('M');
- } else if (d->msize) {
- HDputchar('?'); /*unknown floating point bit */
- } else if (d->sign) {
- HDputchar('I');
- } else {
- HDputchar('U');
- }
- --k;
- }
- if (i>pass*4) HDputchar(' ');
- }
- HDputchar('\n');
+ /*
+ * Print the bit fields
+ */
+ printf("\n * ");
+ for (i = MIN(pass * 4 + 3, d->size - 1), k = MIN(pass * 32 + 31,
+ 8 * d->size - 1); i >= pass * 4; --i) {
+ unsigned int j;
+
+ for (j = 8; j > 0; --j) {
+ if (k == d->sign && d->msize) {
+ HDputchar('S');
+ } else if (k >= d->epos && k < d->epos + d->esize) {
+ HDputchar('E');
+ } else if (k >= d->mpos && k < d->mpos + d->msize) {
+ HDputchar('M');
+ } else if (d->msize) {
+ HDputchar('?'); /*unknown floating point bit */
+ } else if (d->sign) {
+ HDputchar('I');
+ } else {
+ HDputchar('U');
+ }
+ --k;
+ }
+ if (i > pass * 4) HDputchar(' ');
+ if (!i) break;
+ }
+ HDputchar('\n');
+ if (!pass) break;
}
/*
* Is there an implicit bit in the mantissa.
*/
if (d->msize) {
- printf(" * Implicit bit? %s\n", d->imp ? "yes" : "no");
+ printf(" * Implicit bit? %s\n", d->imp ? "yes" : "no");
}
/*
* Alignment
*/
- if (0==d->align) {
- printf(" * Alignment: NOT CALCULATED\n");
- } else if (1==d->align) {
- printf(" * Alignment: none\n");
+ if (0 == d->align) {
+ printf(" * Alignment: NOT CALCULATED\n");
+ } else if (1 == d->align) {
+ printf(" * Alignment: none\n");
} else {
- printf(" * Alignment: %lu\n", (unsigned long)(d->align));
+ printf(" * Alignment: %lu\n", (unsigned long) (d->align));
}
+
}
@@ -985,31 +994,25 @@ byte_cmp(int n, const void *_a, const void *_b, const unsigned char *pad_mask)
* actual order to little endian. Ignores differences where
* the corresponding bit in pad_mask is set to 0.
*
- * Return: Success: Index of first differing bit.
- *
- * Failure: -1
- *
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jun 13, 1996
- *
- * Modifications:
+ * Return: Index of first differing bit.
*
*-------------------------------------------------------------------------
*/
-static int
-bit_cmp(int nbytes, int *perm, volatile void *_a, volatile void *_b,
- const unsigned char *pad_mask)
+static unsigned int
+bit_cmp(unsigned int nbytes, int *perm, void *_a, void *_b,
+ const unsigned char *pad_mask)
{
- int i, j;
- volatile unsigned char *a = (volatile unsigned char *) _a;
- volatile unsigned char *b = (volatile unsigned char *) _b;
- unsigned char aa, bb;
+ unsigned int i;
+ unsigned char *a = (unsigned char *) _a;
+ unsigned char *b = (unsigned char *) _b;
+ unsigned char aa, bb;
for (i = 0; i < nbytes; i++) {
- HDassert(perm[i] < nbytes);
- if ((aa = a[perm[i]] & pad_mask[perm[i]])
- != (bb = b[perm[i]] & pad_mask[perm[i]])) {
+ HDassert(perm[i] < (int) nbytes);
+ if ((aa = (unsigned char) (a[perm[i]] & pad_mask[perm[i]]))
+ != (bb = (unsigned char) (b[perm[i]] & pad_mask[perm[i]]))) {
+ unsigned int j;
+
for (j = 0; j < 8; j++, aa >>= 1, bb >>= 1) {
if ((aa & 1) != (bb & 1)) return i * 8 + j;
}
@@ -1017,7 +1020,9 @@ bit_cmp(int nbytes, int *perm, volatile void *_a, volatile void *_b,
HDabort();
}
}
- return -1;
+ fprintf(stderr, "INTERNAL ERROR");
+ HDabort();
+ return 0;
}
@@ -1123,21 +1128,19 @@ fix_order(int n, int last, int *perm, const char **mesg)
*
*-------------------------------------------------------------------------
*/
-static int
-imp_bit(int n, int *perm, volatile void *_a, volatile void *_b,
- const unsigned char *pad_mask)
+static unsigned int
+imp_bit(unsigned int n, int *perm, void *_a, void *_b, const unsigned char *pad_mask)
{
- volatile unsigned char *a = (volatile unsigned char *) _a;
- volatile unsigned char *b = (volatile unsigned char *) _b;
- int changed, major, minor;
- int msmb; /*most significant mantissa bit */
+ unsigned char *a = (unsigned char *) _a;
+ unsigned char *b = (unsigned char *) _b;
+ unsigned int changed, major, minor;
+ unsigned int msmb; /* most significant mantissa bit */
/*
* Look for the least significant bit that has changed between
* A and B. This is the least significant bit of the exponent.
*/
changed = bit_cmp(n, perm, a, b, pad_mask);
- HDassert(changed >= 0);
/*
* The bit to the right (less significant) of the changed bit should
@@ -1174,22 +1177,22 @@ imp_bit(int n, int *perm, volatile void *_a, volatile void *_b,
*
*-------------------------------------------------------------------------
*/
-static unsigned long
-find_bias(int epos, int esize, int *perm, volatile void *_a)
+static unsigned int
+find_bias(unsigned int epos, unsigned int esize, int *perm, void *_a)
{
- unsigned char *a = (unsigned char *) _a;
- unsigned char mask;
- unsigned long b, shift = 0, nbits, bias = 0;
+ unsigned char *a = (unsigned char *) _a;
+ unsigned char mask;
+ unsigned int b, shift = 0, nbits, bias = 0;
while (esize > 0) {
- nbits = MIN(esize, (8 - epos % 8));
- mask = (1 << nbits) - 1;
- b = (a[perm[epos / 8]] >> (epos % 8)) & mask;
- bias |= b << shift;
-
- shift += nbits;
- esize -= nbits;
- epos += nbits;
+ nbits = MIN(esize, (8 - epos % 8));
+ mask = (unsigned char) ((1 << nbits) - 1);
+ b = (unsigned int) (a[perm[epos / 8]] >> (epos % 8)) & mask;
+ bias |= b << shift;
+
+ shift += nbits;
+ esize -= nbits;
+ epos += nbits;
}
return bias;
}
@@ -1667,7 +1670,7 @@ detect_alignments(void)
static int verify_signal_handlers(int signum, void (*handler)(int))
{
void (*save_handler)(int) = HDsignal(signum, handler);
- int i, val;
+ volatile int i, val;
int ntries=5;
volatile int nfailures=0;
volatile int nsuccesses=0;
diff --git a/src/H5private.h b/src/H5private.h
index d1bbc24..66bf7eb 100644
--- a/src/H5private.h
+++ b/src/H5private.h
@@ -1523,11 +1523,14 @@ extern char *strdup(const char *s);
/* Include the generated overflow header file */
#include "H5overflow.h"
-#define H5_ASSIGN_OVERFLOW(dst, src, srctype, dsttype) \
+/* Assign a variable to one of a different size (think safer dst = (dsttype)src").
+ * The code generated by the macro checks for overflows.
+ */
+#define H5_CHECKED_ASSIGN(dst, dsttype, src, srctype) \
H5_GLUE4(ASSIGN_,srctype,_TO_,dsttype)(dst,dsttype,src,srctype)\
#else /* NDEBUG */
-#define H5_ASSIGN_OVERFLOW(dst, src, srctype, dsttype) \
+#define H5_CHECKED_ASSIGN(dst, dsttype, src, srctype) \
(dst) = (dsttype)(src);
#endif /* NDEBUG */
diff --git a/src/H5public.h b/src/H5public.h
index 3b89879..e2d2a50 100644
--- a/src/H5public.h
+++ b/src/H5public.h
@@ -94,10 +94,10 @@ extern "C" {
/* Version numbers */
#define H5_VERS_MAJOR 1 /* For major interface/format changes */
#define H5_VERS_MINOR 9 /* For minor interface/format changes */
-#define H5_VERS_RELEASE 216 /* For tweaks, bug-fixes, or development */
+#define H5_VERS_RELEASE 219 /* For tweaks, bug-fixes, or development */
#define H5_VERS_SUBRELEASE "" /* For pre-releases like snap0 */
/* Empty string for real releases. */
-#define H5_VERS_INFO "HDF5 library version: 1.9.216" /* Full version string */
+#define H5_VERS_INFO "HDF5 library version: 1.9.219" /* Full version string */
#define H5check() H5check_version(H5_VERS_MAJOR,H5_VERS_MINOR, \
H5_VERS_RELEASE)
diff --git a/src/H5trace.c b/src/H5trace.c
index de2b969..6b12318 100644
--- a/src/H5trace.c
+++ b/src/H5trace.c
@@ -2352,7 +2352,7 @@ H5_trace(const double *returning, const char *func, const char *type, ...)
unsigned long iul = va_arg(ap, unsigned long); /*lint !e732 Loss of sign not really occuring */
fprintf(out, "%lu", iul);
- asize[argno] = iul;
+ asize[argno] = (hssize_t)iul;
} /* end else */
break;
@@ -2376,7 +2376,7 @@ H5_trace(const double *returning, const char *func, const char *type, ...)
unsigned long long iull = va_arg(ap, unsigned long long); /*lint !e732 Loss of sign not really occuring */
fprintf(out, "%llu", iull);
- asize[argno] = iull;
+ asize[argno] = (hssize_t)iull;
} /* end else */
break;
diff --git a/src/Makefile.in b/src/Makefile.in
index a2a461a..abcb07a 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -724,7 +724,7 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog *.clog2
# After making changes, run bin/reconfigure to update other configure related
# files like Makefile.in.
LT_VERS_INTERFACE = 6
-LT_VERS_REVISION = 206
+LT_VERS_REVISION = 209
LT_VERS_AGE = 0
# Our main target, the HDF5 library
diff --git a/src/libhdf5.settings.in b/src/libhdf5.settings.in
index 1b1ae96..1eed645 100644
--- a/src/libhdf5.settings.in
+++ b/src/libhdf5.settings.in
@@ -51,7 +51,7 @@ Languages:
@BUILD_CXX_CONDITIONAL_TRUE@ C++ Flags: @CXXFLAGS@
@BUILD_CXX_CONDITIONAL_TRUE@ H5 C++ Flags: @H5_CXXFLAGS@
@BUILD_CXX_CONDITIONAL_TRUE@ AM C++ Flags: @AM_CXXFLAGS@
-@BUILD_CXX_CONDITIONAL_TRUE@ Shared C++ Library: @H5_CXX_SHARED@
+@BUILD_CXX_CONDITIONAL_TRUE@ Shared C++ Library: @enable_shared@
@BUILD_CXX_CONDITIONAL_TRUE@ Static C++ Library: @enable_static@
Features: