summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MANIFEST1
-rw-r--r--src/H5HF.c42
-rw-r--r--src/H5HFbtree2.c949
-rw-r--r--src/H5HFcache.c239
-rw-r--r--src/H5HFdbg.c18
-rw-r--r--src/H5HFhdr.c84
-rw-r--r--src/H5HFhuge.c559
-rw-r--r--src/H5HFiblock.c113
-rw-r--r--src/H5HFman.c8
-rw-r--r--src/H5HFpkg.h130
-rw-r--r--src/H5HFprivate.h33
-rw-r--r--src/H5HFstat.c2
-rw-r--r--src/H5HFtest.c210
-rw-r--r--src/H5HFtiny.c326
-rwxr-xr-xsrc/Makefile.am2
-rw-r--r--src/Makefile.in9
-rw-r--r--test/fheap.c1940
-rw-r--r--tools/misc/h5debug.c48
18 files changed, 4333 insertions, 380 deletions
diff --git a/MANIFEST b/MANIFEST
index 5a93c8c..3e366ae 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -515,6 +515,7 @@
./src/H5HFspace.c
./src/H5HFstat.c
./src/H5HFtest.c
+./src/H5HFtiny.c
./src/H5HG.c
./src/H5HGdbg.c
./src/H5HGpkg.h
diff --git a/src/H5HF.c b/src/H5HF.c
index 20d1aef..12822d5 100644
--- a/src/H5HF.c
+++ b/src/H5HF.c
@@ -334,11 +334,18 @@ HDfprintf(stderr, "%s: size = %Zu\n", FUNC, size);
/* Get the fractal heap header */
hdr = fh->hdr;
- /* Check if object is large enough to be standalone */
+ /* Check for 'huge' object */
if(size > hdr->max_man_size) {
/* Store 'huge' object in heap */
- if(H5HF_huge_insert(hdr, dxpl_id, size, obj, id) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, FAIL, "can't allocate space for 'managed' object in fractal heap")
+ /* (Casting away const OK - QAK) */
+ if(H5HF_huge_insert(hdr, dxpl_id, size, (void *)obj, id) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTINSERT, FAIL, "can't store 'huge' object in fractal heap")
+ } /* end if */
+ /* Check for 'tiny' object */
+ else if(size <= hdr->tiny_max_len) {
+ /* Store 'tiny' object in heap */
+ if(H5HF_tiny_insert(hdr, size, obj, id) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTINSERT, FAIL, "can't store 'tiny' object in fractal heap")
} /* end if */
else {
/* Check if we are in "append only" mode, or if there's enough room for the object */
@@ -348,7 +355,7 @@ HGOTO_ERROR(H5E_HEAP, H5E_UNSUPPORTED, FAIL, "'write once' managed blocks not su
else {
/* Allocate space for object in 'managed' heap */
if(H5HF_man_insert(hdr, dxpl_id, size, obj, id) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, FAIL, "can't allocate space for 'managed' object in fractal heap")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTINSERT, FAIL, "can't store 'managed' object in fractal heap")
} /* end else */
} /* end else */
@@ -390,7 +397,7 @@ H5HF_get_obj_len(H5HF_t *fh, hid_t dxpl_id, const void *_id, size_t *obj_len_p)
HDassert(obj_len_p);
/* Get the ID flags */
- id_flags = *id++;
+ id_flags = *id;
/* Check for correct heap ID version */
if((id_flags & H5HF_ID_VERS_MASK) != H5HF_ID_VERS_CURR)
@@ -398,6 +405,9 @@ H5HF_get_obj_len(H5HF_t *fh, hid_t dxpl_id, const void *_id, size_t *obj_len_p)
/* Check type of object in heap */
if((id_flags & H5HF_ID_TYPE_MASK) == H5HF_ID_TYPE_MAN) {
+ /* Skip over the flag byte */
+ id++;
+
/* Skip over object offset */
id += fh->hdr->heap_off_size;
@@ -408,6 +418,10 @@ H5HF_get_obj_len(H5HF_t *fh, hid_t dxpl_id, const void *_id, size_t *obj_len_p)
if(H5HF_huge_get_obj_len(fh->hdr, dxpl_id, id, obj_len_p) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get 'huge' object's length")
} /* end if */
+ else if((id_flags & H5HF_ID_TYPE_MASK) == H5HF_ID_TYPE_TINY) {
+ if(H5HF_tiny_get_obj_len(fh->hdr, id, obj_len_p) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get 'tiny' object's length")
+ } /* end if */
else {
HDfprintf(stderr, "%s: Heap ID type not supported yet!\n", FUNC);
HGOTO_ERROR(H5E_HEAP, H5E_UNSUPPORTED, FAIL, "heap ID type not supported yet")
@@ -448,7 +462,7 @@ H5HF_read(H5HF_t *fh, hid_t dxpl_id, const void *_id, void *obj/*out*/)
HDassert(obj);
/* Get the ID flags */
- id_flags = *id++;
+ id_flags = *id;
/* Check for correct heap ID version */
if((id_flags & H5HF_ID_VERS_MASK) != H5HF_ID_VERS_CURR)
@@ -465,6 +479,11 @@ H5HF_read(H5HF_t *fh, hid_t dxpl_id, const void *_id, void *obj/*out*/)
if(H5HF_huge_read(fh->hdr, dxpl_id, id, obj) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't read 'huge' object from fractal heap")
} /* end if */
+ else if((id_flags & H5HF_ID_TYPE_MASK) == H5HF_ID_TYPE_TINY) {
+ /* Read 'tiny' object from file */
+ if(H5HF_tiny_read(fh->hdr, id, obj) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't read 'tiny' object from fractal heap")
+ } /* end if */
else {
HDfprintf(stderr, "%s: Heap ID type not supported yet!\n", FUNC);
HGOTO_ERROR(H5E_HEAP, H5E_UNSUPPORTED, FAIL, "heap ID type not supported yet")
@@ -505,7 +524,7 @@ H5HF_remove(H5HF_t *fh, hid_t dxpl_id, const void *_id)
HDassert(id);
/* Get the ID flags */
- id_flags = *id++;
+ id_flags = *id;
/* Check for correct heap ID version */
if((id_flags & H5HF_ID_VERS_MASK) != H5HF_ID_VERS_CURR)
@@ -522,6 +541,11 @@ H5HF_remove(H5HF_t *fh, hid_t dxpl_id, const void *_id)
if(H5HF_huge_remove(fh->hdr, dxpl_id, id) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "can't remove 'huge' object from fractal heap")
} /* end if */
+ else if((id_flags & H5HF_ID_TYPE_MASK) == H5HF_ID_TYPE_TINY) {
+ /* Remove 'tiny' object from heap statistics */
+ if(H5HF_tiny_remove(fh->hdr, id) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "can't remove 'tiny' object from fractal heap")
+ } /* end if */
else {
HDfprintf(stderr, "%s: Heap ID type not supported yet!\n", FUNC);
HGOTO_ERROR(H5E_HEAP, H5E_UNSUPPORTED, FAIL, "heap ID type not supported yet")
@@ -596,7 +620,7 @@ HDfprintf(stderr, "%s; After iterator reset fh->hdr->rc = %Zu\n", FUNC, fh->hdr-
/* Shut down the huge object information */
/* (Can't put this in header "destroy" routine, because it has
* has the address of an object in the file, which might be
- * by the shutdown routine - QAK)
+ * modified by the shutdown routine - QAK)
*/
if(H5HF_huge_term(fh->hdr, dxpl_id) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTRELEASE, FAIL, "can't release 'huge' object info")
@@ -693,7 +717,7 @@ HDfprintf(stderr, "%s: hdr->huge_bt2_addr = %a\n", FUNC, hdr->huge_bt2_addr);
} /* end if */
/* Release header's disk space */
- if(H5MF_xfree(f, H5FD_MEM_FHEAP_HDR, dxpl_id, fh_addr, (hsize_t)H5HF_HEADER_SIZE(hdr)) < 0)
+ if(H5MF_xfree(f, H5FD_MEM_FHEAP_HDR, dxpl_id, fh_addr, (hsize_t)hdr->heap_size) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to release fractal heap header")
/* Finished deleting header */
diff --git a/src/H5HFbtree2.c b/src/H5HFbtree2.c
index 4a324aa..43fafd1 100644
--- a/src/H5HFbtree2.c
+++ b/src/H5HFbtree2.c
@@ -59,34 +59,104 @@
/********************/
/* v2 B-tree function callbacks */
-herr_t H5HF_huge_bt2_found(const void *nrecord, void *op_data);
-herr_t H5HF_huge_bt2_remove(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_indir_found(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_indir_remove(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_filt_indir_found(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_filt_indir_remove(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_dir_remove(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_filt_dir_found(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_filt_dir_remove(const void *nrecord, void *op_data);
/* v2 B-tree driver callbacks */
-static herr_t H5HF_huge_btree2_store(const H5B2_class_t *cls, void *native, const void *udata);
-static herr_t H5HF_huge_btree2_retrieve(const H5B2_class_t *cls, void *udata, const void *native);
-static herr_t H5HF_huge_btree2_compare(const H5B2_class_t *cls, const void *rec1, const void *rec2);
-static herr_t H5HF_huge_btree2_encode(const H5F_t *f, const H5B2_class_t *cls, uint8_t *raw,
+static herr_t H5HF_huge_btree2_indir_store(void *native, const void *udata);
+static herr_t H5HF_huge_btree2_indir_retrieve(void *udata, const void *native);
+static herr_t H5HF_huge_btree2_indir_compare(const void *rec1, const void *rec2);
+static herr_t H5HF_huge_btree2_indir_encode(const H5F_t *f, uint8_t *raw,
const void *native);
-static herr_t H5HF_huge_btree2_decode(const H5F_t *f, const H5B2_class_t *cls, const uint8_t *raw,
+static herr_t H5HF_huge_btree2_indir_decode(const H5F_t *f, const uint8_t *raw,
void *native);
-static herr_t H5HF_huge_btree2_debug(FILE *stream, const H5F_t *f, hid_t dxpl_id,
- int indent, int fwidth, const H5B2_class_t *cls, const void *record, const void *_udata);
+static herr_t H5HF_huge_btree2_indir_debug(FILE *stream, const H5F_t *f, hid_t dxpl_id,
+ int indent, int fwidth, const void *record, const void *_udata);
+
+static herr_t H5HF_huge_btree2_filt_indir_store(void *native, const void *udata);
+static herr_t H5HF_huge_btree2_filt_indir_retrieve(void *udata, const void *native);
+static herr_t H5HF_huge_btree2_filt_indir_compare(const void *rec1, const void *rec2);
+static herr_t H5HF_huge_btree2_filt_indir_encode(const H5F_t *f, uint8_t *raw,
+ const void *native);
+static herr_t H5HF_huge_btree2_filt_indir_decode(const H5F_t *f, const uint8_t *raw,
+ void *native);
+static herr_t H5HF_huge_btree2_filt_indir_debug(FILE *stream, const H5F_t *f, hid_t dxpl_id,
+ int indent, int fwidth, const void *record, const void *_udata);
+
+static herr_t H5HF_huge_btree2_dir_store(void *native, const void *udata);
+static herr_t H5HF_huge_btree2_dir_retrieve(void *udata, const void *native);
+static herr_t H5HF_huge_btree2_dir_compare(const void *rec1, const void *rec2);
+static herr_t H5HF_huge_btree2_dir_encode(const H5F_t *f, uint8_t *raw,
+ const void *native);
+static herr_t H5HF_huge_btree2_dir_decode(const H5F_t *f, const uint8_t *raw,
+ void *native);
+static herr_t H5HF_huge_btree2_dir_debug(FILE *stream, const H5F_t *f, hid_t dxpl_id,
+ int indent, int fwidth, const void *record, const void *_udata);
+
+static herr_t H5HF_huge_btree2_filt_dir_store(void *native, const void *udata);
+static herr_t H5HF_huge_btree2_filt_dir_retrieve(void *udata, const void *native);
+static herr_t H5HF_huge_btree2_filt_dir_compare(const void *rec1, const void *rec2);
+static herr_t H5HF_huge_btree2_filt_dir_encode(const H5F_t *f, uint8_t *raw,
+ const void *native);
+static herr_t H5HF_huge_btree2_filt_dir_decode(const H5F_t *f, const uint8_t *raw,
+ void *native);
+static herr_t H5HF_huge_btree2_filt_dir_debug(FILE *stream, const H5F_t *f, hid_t dxpl_id,
+ int indent, int fwidth, const void *record, const void *_udata);
/*********************/
/* Package Variables */
/*********************/
-const H5B2_class_t H5HF_BTREE2[1]={{ /* B-tree class information */
- H5B2_FHEAP_ID, /* Type of B-tree */
- 0, /* Size of native record */
- /* (computed at run-time for each heap) */
- NULL, /* Class private information */
- H5HF_huge_btree2_store, /* Record storage callback */
- H5HF_huge_btree2_retrieve, /* Record retrieval callback */
- H5HF_huge_btree2_compare, /* Record comparison callback */
- H5HF_huge_btree2_encode, /* Record encoding callback */
- H5HF_huge_btree2_decode, /* Record decoding callback */
- H5HF_huge_btree2_debug /* Record debugging callback */
+/* v2 B-tree class for indirectly accessed 'huge' objects */
+const H5B2_class_t H5HF_BT2_INDIR[1]={{ /* B-tree class information */
+ H5B2_FHEAP_HUGE_INDIR_ID, /* Type of B-tree */
+ sizeof(H5HF_huge_bt2_indir_rec_t), /* Size of native record */
+ H5HF_huge_btree2_indir_store, /* Record storage callback */
+ H5HF_huge_btree2_indir_retrieve, /* Record retrieval callback */
+ H5HF_huge_btree2_indir_compare, /* Record comparison callback */
+ H5HF_huge_btree2_indir_encode, /* Record encoding callback */
+ H5HF_huge_btree2_indir_decode, /* Record decoding callback */
+ H5HF_huge_btree2_indir_debug /* Record debugging callback */
+}};
+
+/* v2 B-tree class for indirectly accessed, filtered 'huge' objects */
+const H5B2_class_t H5HF_BT2_FILT_INDIR[1]={{ /* B-tree class information */
+ H5B2_FHEAP_HUGE_FILT_INDIR_ID, /* Type of B-tree */
+ sizeof(H5HF_huge_bt2_filt_indir_rec_t), /* Size of native record */
+ H5HF_huge_btree2_filt_indir_store, /* Record storage callback */
+ H5HF_huge_btree2_filt_indir_retrieve, /* Record retrieval callback */
+ H5HF_huge_btree2_filt_indir_compare, /* Record comparison callback */
+ H5HF_huge_btree2_filt_indir_encode, /* Record encoding callback */
+ H5HF_huge_btree2_filt_indir_decode, /* Record decoding callback */
+ H5HF_huge_btree2_filt_indir_debug /* Record debugging callback */
+}};
+
+/* v2 B-tree class for directly accessed 'huge' objects */
+const H5B2_class_t H5HF_BT2_DIR[1]={{ /* B-tree class information */
+ H5B2_FHEAP_HUGE_DIR_ID, /* Type of B-tree */
+ sizeof(H5HF_huge_bt2_dir_rec_t), /* Size of native record */
+ H5HF_huge_btree2_dir_store, /* Record storage callback */
+ H5HF_huge_btree2_dir_retrieve, /* Record retrieval callback */
+ H5HF_huge_btree2_dir_compare, /* Record comparison callback */
+ H5HF_huge_btree2_dir_encode, /* Record encoding callback */
+ H5HF_huge_btree2_dir_decode, /* Record decoding callback */
+ H5HF_huge_btree2_dir_debug /* Record debugging callback */
+}};
+
+/* v2 B-tree class for directly accessed, filtered 'huge' objects */
+const H5B2_class_t H5HF_BT2_FILT_DIR[1]={{ /* B-tree class information */
+ H5B2_FHEAP_HUGE_FILT_DIR_ID, /* Type of B-tree */
+ sizeof(H5HF_huge_bt2_filt_dir_rec_t),/* Size of native record */
+ H5HF_huge_btree2_filt_dir_store, /* Record storage callback */
+ H5HF_huge_btree2_filt_dir_retrieve, /* Record retrieval callback */
+ H5HF_huge_btree2_filt_dir_compare, /* Record comparison callback */
+ H5HF_huge_btree2_filt_dir_encode, /* Record encoding callback */
+ H5HF_huge_btree2_filt_dir_decode, /* Record decoding callback */
+ H5HF_huge_btree2_filt_dir_debug /* Record debugging callback */
}};
/*****************************/
@@ -100,10 +170,10 @@ const H5B2_class_t H5HF_BTREE2[1]={{ /* B-tree class information */
/*-------------------------------------------------------------------------
- * Function: H5HF_huge_bt2_found
+ * Function: H5HF_huge_bt2_indir_found
*
- * Purpose: Retrieve record for 'huge' object, when it's found in the
- * v2 B-tree
+ * Purpose: Retrieve record for indirectly accessed 'huge' object, when
+ * it's found in the v2 B-tree
*
* Return: Success: non-negative
* Failure: negative
@@ -114,26 +184,27 @@ const H5B2_class_t H5HF_BTREE2[1]={{ /* B-tree class information */
*-------------------------------------------------------------------------
*/
herr_t
-H5HF_huge_bt2_found(const void *nrecord, void *op_data)
+H5HF_huge_bt2_indir_found(const void *nrecord, void *op_data)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_bt2_found)
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_bt2_indir_found)
#ifdef QAK
-HDfprintf(stderr, "%s: nrecord = {%a, %Hu, %Hu}\n", "H5HF_huge_bt2_store",
- ((const H5HF_huge_bt2_rec_t *)nrecord)->addr,
- ((const H5HF_huge_bt2_rec_t *)nrecord)->len,
- ((const H5HF_huge_bt2_rec_t *)nrecord)->id);
+HDfprintf(stderr, "%s: nrecord = {%a, %Hu, %Hu}\n", "H5HF_huge_bt2_indir_found",
+ ((const H5HF_huge_bt2_indir_rec_t *)nrecord)->addr,
+ ((const H5HF_huge_bt2_indir_rec_t *)nrecord)->len,
+ ((const H5HF_huge_bt2_indir_rec_t *)nrecord)->id);
#endif /* QAK */
- *(H5HF_huge_bt2_rec_t *)op_data = *(const H5HF_huge_bt2_rec_t *)nrecord;
+ *(H5HF_huge_bt2_indir_rec_t *)op_data = *(const H5HF_huge_bt2_indir_rec_t *)nrecord;
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5HF_huge_bt2_found() */
+} /* H5HF_huge_bt2_indir_found() */
/*-------------------------------------------------------------------------
- * Function: H5HF_huge_bt2_remove
+ * Function: H5HF_huge_bt2_indir_remove
*
- * Purpose: Free space for 'huge' object, as v2 B-tree is being deleted
+ * Purpose: Free space for indirectly accessed 'huge' object, as v2 B-tree
+ * is being deleted or v2 B-tree node is removed
*
* Return: Success: non-negative
* Failure: negative
@@ -144,32 +215,55 @@ HDfprintf(stderr, "%s: nrecord = {%a, %Hu, %Hu}\n", "H5HF_huge_bt2_store",
*-------------------------------------------------------------------------
*/
herr_t
-H5HF_huge_bt2_remove(const void *nrecord, void *_udata)
+H5HF_huge_bt2_indir_remove(const void *nrecord, void *_udata)
{
H5HF_huge_remove_ud1_t *udata = (H5HF_huge_remove_ud1_t *)_udata; /* User callback data */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT(H5HF_huge_bt2_remove)
+ FUNC_ENTER_NOAPI_NOINIT(H5HF_huge_bt2_indir_remove)
/* Free the space in the file for the object being removed */
- if(H5MF_xfree(udata->hdr->f, H5FD_MEM_FHEAP_HUGE_OBJ, udata->dxpl_id, ((const H5HF_huge_bt2_rec_t *)nrecord)->addr, ((const H5HF_huge_bt2_rec_t *)nrecord)->len) < 0)
+ if(H5MF_xfree(udata->hdr->f, H5FD_MEM_FHEAP_HUGE_OBJ, udata->dxpl_id, ((const H5HF_huge_bt2_indir_rec_t *)nrecord)->addr, ((const H5HF_huge_bt2_indir_rec_t *)nrecord)->len) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free space for huge object on disk")
/* Set the length of the object removed */
- udata->obj_len = ((const H5HF_huge_bt2_rec_t *)nrecord)->len;
+ udata->obj_len = ((const H5HF_huge_bt2_indir_rec_t *)nrecord)->len;
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5HF_huge_bt2_remove() */
+} /* H5HF_huge_bt2_indir_remove() */
/*-------------------------------------------------------------------------
- * Function: H5HF_huge_btree2_store
+ * Function: H5HF_huge_btree2_indir_store
*
* Purpose: Store native information into record for v2 B-tree
*
* Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 7, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_indir_store(void *nrecord, const void *udata)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_indir_store)
+
+ *(H5HF_huge_bt2_indir_rec_t *)nrecord = *(const H5HF_huge_bt2_indir_rec_t *)udata;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_indir_store() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_indir_retrieve
+ *
+ * Purpose: Retrieve native information from record for v2 B-tree
*
+ * Return: Success: non-negative
* Failure: negative
*
* Programmer: Quincey Koziol
@@ -178,23 +272,319 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF_huge_btree2_store(const H5B2_class_t UNUSED *cls, void *nrecord, const void *udata)
+H5HF_huge_btree2_indir_retrieve(void *udata, const void *nrecord)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_store)
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_indir_retrieve)
- *(H5HF_huge_bt2_rec_t *)nrecord = *(const H5HF_huge_bt2_rec_t *)udata;
+ *(H5HF_huge_bt2_indir_rec_t *)udata = *(const H5HF_huge_bt2_indir_rec_t *)nrecord;
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5HF_huge_btree2_store() */
+} /* H5HF_huge_btree2_indir_retrieve() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_indir_compare
+ *
+ * Purpose: Compare two native information records, according to some key
+ *
+ * Return: <0 if rec1 < rec2
+ * =0 if rec1 == rec2
+ * >0 if rec1 > rec2
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 7, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_indir_compare(const void *_rec1, const void *_rec2)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_indir_compare)
+
+#ifdef QAK
+{
+const H5HF_huge_bt2_indir_rec_t *rec1 = (const H5HF_huge_bt2_indir_rec_t *)_rec1;
+const H5HF_huge_bt2_indir_rec_t *rec2 = (const H5HF_huge_bt2_indir_rec_t *)_rec2;
+
+HDfprintf(stderr, "%s: rec1 = {%a, %Hu, %Hu}\n", "H5HF_huge_btree2_indir_compare", rec1->addr, rec1->len, rec1->id);
+HDfprintf(stderr, "%s: rec2 = {%a, %Hu, %Hu}\n", "H5HF_huge_btree2_indir_compare", rec2->addr, rec2->len, rec2->id);
+}
+#endif /* QAK */
+ FUNC_LEAVE_NOAPI((herr_t)(((const H5HF_huge_bt2_indir_rec_t *)_rec1)->id - ((const H5HF_huge_bt2_indir_rec_t *)_rec2)->id))
+} /* H5HF_huge_btree2_indir_compare() */
/*-------------------------------------------------------------------------
- * Function: H5HF_huge_btree2_retrieve
+ * Function: H5HF_huge_btree2_indir_encode
+ *
+ * Purpose: Encode native information into raw form for storing on disk
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 7, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_indir_encode(const H5F_t *f, uint8_t *raw, const void *_nrecord)
+{
+ const H5HF_huge_bt2_indir_rec_t *nrecord = (const H5HF_huge_bt2_indir_rec_t *)_nrecord;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_indir_encode)
+
+ /* Encode the record's fields */
+ H5F_addr_encode(f, &raw, nrecord->addr);
+ H5F_ENCODE_LENGTH(f, raw, nrecord->len);
+ H5F_ENCODE_LENGTH(f, raw, nrecord->id);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_indir_encode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_indir_decode
+ *
+ * Purpose: Decode raw disk form of record into native form
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 7, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_indir_decode(const H5F_t *f, const uint8_t *raw, void *_nrecord)
+{
+ H5HF_huge_bt2_indir_rec_t *nrecord = (H5HF_huge_bt2_indir_rec_t *)_nrecord;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_indir_decode)
+
+ /* Decode the record's fields */
+ H5F_addr_decode(f, &raw, &nrecord->addr);
+ H5F_DECODE_LENGTH(f, raw, nrecord->len);
+ H5F_DECODE_LENGTH(f, raw, nrecord->id);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_indir_decode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_indir_debug
+ *
+ * Purpose: Debug native form of record
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 7, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_indir_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
+ int indent, int fwidth, const void *_nrecord,
+ const void UNUSED *_udata)
+{
+ const H5HF_huge_bt2_indir_rec_t *nrecord = (const H5HF_huge_bt2_indir_rec_t *)_nrecord;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_indir_debug)
+
+ HDfprintf(stream, "%*s%-*s {%a, %Hu, %Hu}\n", indent, "", fwidth, "Record:",
+ nrecord->addr, nrecord->len, nrecord->id);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_indir_debug() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_bt2_filt_indir_found
+ *
+ * Purpose: Retrieve record for indirectly accessed, filtered 'huge' object,
+ * when it's found in the v2 B-tree
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, August 8, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_huge_bt2_filt_indir_found(const void *nrecord, void *op_data)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_bt2_filt_indir_found)
+
+#ifdef QAK
+HDfprintf(stderr, "%s: nrecord = {%a, %Hu, %x, %Hu, %Hu}\n", "H5HF_huge_bt2_filt_indir_found",
+ ((const H5HF_huge_bt2_filt_indir_rec_t *)nrecord)->addr,
+ ((const H5HF_huge_bt2_filt_indir_rec_t *)nrecord)->len,
+ ((const H5HF_huge_bt2_filt_indir_rec_t *)nrecord)->filter_mask,
+ ((const H5HF_huge_bt2_filt_indir_rec_t *)nrecord)->obj_size,
+ ((const H5HF_huge_bt2_filt_indir_rec_t *)nrecord)->id);
+#endif /* QAK */
+ *(H5HF_huge_bt2_filt_indir_rec_t *)op_data = *(const H5HF_huge_bt2_filt_indir_rec_t *)nrecord;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_bt2_filt_indir_found() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_bt2_filt_indir_remove
+ *
+ * Purpose: Free space for indirectly accessed, filtered 'huge' object, as
+ * v2 B-tree is being deleted or v2 B-tree node is removed
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, August 8, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_huge_bt2_filt_indir_remove(const void *nrecord, void *_udata)
+{
+ H5HF_huge_remove_ud1_t *udata = (H5HF_huge_remove_ud1_t *)_udata; /* User callback data */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5HF_huge_bt2_filt_indir_remove)
+
+ /* Free the space in the file for the object being removed */
+ if(H5MF_xfree(udata->hdr->f, H5FD_MEM_FHEAP_HUGE_OBJ, udata->dxpl_id, ((const H5HF_huge_bt2_filt_indir_rec_t *)nrecord)->addr, ((const H5HF_huge_bt2_filt_indir_rec_t *)nrecord)->len) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free space for huge object on disk")
+
+ /* Set the length of the object removed */
+ udata->obj_len = ((const H5HF_huge_bt2_filt_indir_rec_t *)nrecord)->obj_size;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5HF_huge_bt2_filt_indir_remove() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_filt_indir_store
+ *
+ * Purpose: Store native information into record for v2 B-tree
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 7, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_filt_indir_store(void *nrecord, const void *udata)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_filt_indir_store)
+
+ *(H5HF_huge_bt2_filt_indir_rec_t *)nrecord = *(const H5HF_huge_bt2_filt_indir_rec_t *)udata;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_filt_indir_store() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_filt_indir_retrieve
*
* Purpose: Retrieve native information from record for v2 B-tree
*
* Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 7, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_filt_indir_retrieve(void *udata, const void *nrecord)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_filt_indir_retrieve)
+
+ *(H5HF_huge_bt2_filt_indir_rec_t *)udata = *(const H5HF_huge_bt2_filt_indir_rec_t *)nrecord;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_filt_indir_retrieve() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_filt_indir_compare
+ *
+ * Purpose: Compare two native information records, according to some key
+ *
+ * Return: <0 if rec1 < rec2
+ * =0 if rec1 == rec2
+ * >0 if rec1 > rec2
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 7, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_filt_indir_compare(const void *_rec1, const void *_rec2)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_filt_indir_compare)
+
+#ifdef QAK
+{
+const H5HF_huge_bt2_filt_indir_rec_t *rec1 = (const H5HF_huge_bt2_filt_indir_rec_t *)_rec1;
+const H5HF_huge_bt2_filt_indir_rec_t *rec2 = (const H5HF_huge_bt2_filt_indir_rec_t *)_rec2;
+
+HDfprintf(stderr, "%s: rec1 = {%a, %Hu, %x, %Hu, %Hu}\n", "H5HF_huge_btree2_filt_indir_compare", rec1->addr, rec1->len, rec1->filter_mask, rec1->obj_size, rec1->id);
+HDfprintf(stderr, "%s: rec2 = {%a, %Hu, %x, %Hu, %Hu}\n", "H5HF_huge_btree2_filt_indir_compare", rec2->addr, rec2->len, rec2->filter_mask, rec2->obj_size, rec2->id);
+}
+#endif /* QAK */
+ FUNC_LEAVE_NOAPI((herr_t)(((const H5HF_huge_bt2_filt_indir_rec_t *)_rec1)->id - ((const H5HF_huge_bt2_filt_indir_rec_t *)_rec2)->id))
+} /* H5HF_huge_btree2_filt_indir_compare() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_filt_indir_encode
+ *
+ * Purpose: Encode native information into raw form for storing on disk
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 7, 2006
*
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_filt_indir_encode(const H5F_t *f, uint8_t *raw, const void *_nrecord)
+{
+ const H5HF_huge_bt2_filt_indir_rec_t *nrecord = (const H5HF_huge_bt2_filt_indir_rec_t *)_nrecord;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_filt_indir_encode)
+
+ /* Encode the record's fields */
+ H5F_addr_encode(f, &raw, nrecord->addr);
+ H5F_ENCODE_LENGTH(f, raw, nrecord->len);
+ UINT32ENCODE(raw, nrecord->filter_mask);
+ H5F_ENCODE_LENGTH(f, raw, nrecord->obj_size);
+ H5F_ENCODE_LENGTH(f, raw, nrecord->id);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_filt_indir_encode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_filt_indir_decode
+ *
+ * Purpose: Decode raw disk form of record into native form
+ *
+ * Return: Success: non-negative
* Failure: negative
*
* Programmer: Quincey Koziol
@@ -203,18 +593,136 @@ H5HF_huge_btree2_store(const H5B2_class_t UNUSED *cls, void *nrecord, const void
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF_huge_btree2_retrieve(const H5B2_class_t UNUSED *cls, void *udata, const void *nrecord)
+H5HF_huge_btree2_filt_indir_decode(const H5F_t *f, const uint8_t *raw, void *_nrecord)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_retrieve)
+ H5HF_huge_bt2_filt_indir_rec_t *nrecord = (H5HF_huge_bt2_filt_indir_rec_t *)_nrecord;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_filt_indir_decode)
- *(H5HF_huge_bt2_rec_t *)udata = *(const H5HF_huge_bt2_rec_t *)nrecord;
+ /* Decode the record's fields */
+ H5F_addr_decode(f, &raw, &nrecord->addr);
+ H5F_DECODE_LENGTH(f, raw, nrecord->len);
+ UINT32DECODE(raw, nrecord->filter_mask);
+ H5F_DECODE_LENGTH(f, raw, nrecord->obj_size);
+ H5F_DECODE_LENGTH(f, raw, nrecord->id);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5HF_huge_btree2_retrieve() */
+} /* H5HF_huge_btree2_filt_indir_decode() */
/*-------------------------------------------------------------------------
- * Function: H5HF_huge_btree2_compare
+ * Function: H5HF_huge_btree2_filt_indir_debug
+ *
+ * Purpose: Debug native form of record
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 7, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_filt_indir_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
+ int indent, int fwidth, const void *_nrecord,
+ const void UNUSED *_udata)
+{
+ const H5HF_huge_bt2_filt_indir_rec_t *nrecord = (const H5HF_huge_bt2_filt_indir_rec_t *)_nrecord;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_filt_indir_debug)
+
+ HDfprintf(stream, "%*s%-*s {%a, %Hu, %x, %Hu, %Hu}\n", indent, "", fwidth, "Record:",
+ nrecord->addr, nrecord->len, nrecord->filter_mask, nrecord->obj_size, nrecord->id);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_filt_indir_debug() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_bt2_dir_remove
+ *
+ * Purpose: Free space for directly accessed 'huge' object, as v2 B-tree
+ * is being deleted or v2 B-tree node is being removed
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, August 8, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_huge_bt2_dir_remove(const void *nrecord, void *_udata)
+{
+ H5HF_huge_remove_ud1_t *udata = (H5HF_huge_remove_ud1_t *)_udata; /* User callback data */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5HF_huge_bt2_dir_remove)
+
+ /* Free the space in the file for the object being removed */
+ if(H5MF_xfree(udata->hdr->f, H5FD_MEM_FHEAP_HUGE_OBJ, udata->dxpl_id, ((const H5HF_huge_bt2_indir_rec_t *)nrecord)->addr, ((const H5HF_huge_bt2_indir_rec_t *)nrecord)->len) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free space for huge object on disk")
+
+ /* Set the length of the object removed */
+ udata->obj_len = ((const H5HF_huge_bt2_indir_rec_t *)nrecord)->len;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5HF_huge_bt2_dir_remove() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_dir_store
+ *
+ * Purpose: Store native information into record for v2 B-tree
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 7, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_dir_store(void *nrecord, const void *udata)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_dir_store)
+
+ *(H5HF_huge_bt2_dir_rec_t *)nrecord = *(const H5HF_huge_bt2_dir_rec_t *)udata;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_dir_store() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_dir_retrieve
+ *
+ * Purpose: Retrieve native information from record for v2 B-tree
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 7, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_dir_retrieve(void *udata, const void *nrecord)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_dir_retrieve)
+
+ *(H5HF_huge_bt2_dir_rec_t *)udata = *(const H5HF_huge_bt2_dir_rec_t *)nrecord;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_dir_retrieve() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_dir_compare
*
* Purpose: Compare two native information records, according to some key
*
@@ -228,47 +736,39 @@ H5HF_huge_btree2_retrieve(const H5B2_class_t UNUSED *cls, void *udata, const voi
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF_huge_btree2_compare(const H5B2_class_t *cls, const void *_rec1, const void *_rec2)
+H5HF_huge_btree2_dir_compare(const void *_rec1, const void *_rec2)
{
- const H5HF_huge_bt2_rec_t *rec1 = (const H5HF_huge_bt2_rec_t *)_rec1;
- const H5HF_huge_bt2_rec_t *rec2 = (const H5HF_huge_bt2_rec_t *)_rec2;
- const H5HF_hdr_t *hdr = (const H5HF_hdr_t *)cls->cls_private;
+ const H5HF_huge_bt2_dir_rec_t *rec1 = (const H5HF_huge_bt2_dir_rec_t *)_rec1;
+ const H5HF_huge_bt2_dir_rec_t *rec2 = (const H5HF_huge_bt2_dir_rec_t *)_rec2;
herr_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_compare)
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_dir_compare)
#ifdef QAK
-HDfprintf(stderr, "%s: hdr->huge_ids_direct = %t\n", "H5HF_huge_btree2_compare", hdr->huge_ids_direct);
-HDfprintf(stderr, "%s: rec1 = {%a, %Hu, %Hu}\n", "H5HF_huge_btree2_compare", rec1->addr, rec1->len, rec1->id);
-HDfprintf(stderr, "%s: rec2 = {%a, %Hu, %Hu}\n", "H5HF_huge_btree2_compare", rec2->addr, rec2->len, rec2->id);
+HDfprintf(stderr, "%s: rec1 = {%a, %Hu}\n", "H5HF_huge_btree2_dir_compare", rec1->addr, rec1->len);
+HDfprintf(stderr, "%s: rec2 = {%a, %Hu}\n", "H5HF_huge_btree2_dir_compare", rec2->addr, rec2->len);
#endif /* QAK */
- /* Sort differently, depending on whether 'huge' object directly reference disk */
- if(hdr->huge_ids_direct) {
- if(rec1->addr < rec2->addr)
- ret_value = -1;
- else if(rec1->addr > rec2->addr)
- ret_value = 1;
- else if(rec1->len < rec2->len)
- ret_value = -1;
- else if(rec1->len > rec2->len)
- ret_value = 1;
- else
- ret_value = 0;
- } /* end if */
+ if(rec1->addr < rec2->addr)
+ ret_value = -1;
+ else if(rec1->addr > rec2->addr)
+ ret_value = 1;
+ else if(rec1->len < rec2->len)
+ ret_value = -1;
+ else if(rec1->len > rec2->len)
+ ret_value = 1;
else
- ret_value = (herr_t)(rec1->id - rec2->id);
+ ret_value = 0;
- FUNC_LEAVE_NOAPI(ret_value);
-} /* H5HF_huge_btree2_compare() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5HF_huge_btree2_dir_compare() */
/*-------------------------------------------------------------------------
- * Function: H5HF_huge_btree2_encode
+ * Function: H5HF_huge_btree2_dir_encode
*
* Purpose: Encode native information into raw form for storing on disk
*
* Return: Success: non-negative
- *
* Failure: negative
*
* Programmer: Quincey Koziol
@@ -277,32 +777,26 @@ HDfprintf(stderr, "%s: rec2 = {%a, %Hu, %Hu}\n", "H5HF_huge_btree2_compare", rec
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF_huge_btree2_encode(const H5F_t *f, const H5B2_class_t *cls, uint8_t *raw, const void *_nrecord)
+H5HF_huge_btree2_dir_encode(const H5F_t *f, uint8_t *raw, const void *_nrecord)
{
- const H5HF_huge_bt2_rec_t *nrecord = (const H5HF_huge_bt2_rec_t *)_nrecord;
- const H5HF_hdr_t *hdr = (const H5HF_hdr_t *)cls->cls_private;
+ const H5HF_huge_bt2_dir_rec_t *nrecord = (const H5HF_huge_bt2_dir_rec_t *)_nrecord;
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_encode)
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_dir_encode)
- /* Encode the record's common fields */
+ /* Encode the record's fields */
H5F_addr_encode(f, &raw, nrecord->addr);
H5F_ENCODE_LENGTH(f, raw, nrecord->len);
- /* If 'huge' objects in this heap are not accessed directly, encode the ID also */
- if(!hdr->huge_ids_direct)
- UINT64ENCODE_VAR(raw, nrecord->id, hdr->huge_id_size)
-
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5HF_huge_btree2_encode() */
+} /* H5HF_huge_btree2_dir_encode() */
/*-------------------------------------------------------------------------
- * Function: H5HF_huge_btree2_decode
+ * Function: H5HF_huge_btree2_dir_decode
*
* Purpose: Decode raw disk form of record into native form
*
* Return: Success: non-negative
- *
* Failure: negative
*
* Programmer: Quincey Koziol
@@ -311,34 +805,26 @@ H5HF_huge_btree2_encode(const H5F_t *f, const H5B2_class_t *cls, uint8_t *raw, c
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF_huge_btree2_decode(const H5F_t *f, const H5B2_class_t *cls, const uint8_t *raw, void *_nrecord)
+H5HF_huge_btree2_dir_decode(const H5F_t *f, const uint8_t *raw, void *_nrecord)
{
- H5HF_huge_bt2_rec_t *nrecord = (H5HF_huge_bt2_rec_t *)_nrecord;
- const H5HF_hdr_t *hdr = (const H5HF_hdr_t *)cls->cls_private;
+ H5HF_huge_bt2_dir_rec_t *nrecord = (H5HF_huge_bt2_dir_rec_t *)_nrecord;
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_decode)
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_dir_decode)
- /* Decode the record's common fields */
+ /* Decode the record's fields */
H5F_addr_decode(f, &raw, &nrecord->addr);
H5F_DECODE_LENGTH(f, raw, nrecord->len);
- /* If 'huge' objects in this heap are not accessed directly, decode the ID also */
- if(!hdr->huge_ids_direct)
- UINT64DECODE_VAR(raw, nrecord->id, hdr->huge_id_size)
- else
- nrecord->id = 0;
-
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5HF_huge_btree2_decode() */
+} /* H5HF_huge_btree2_dir_decode() */
/*-------------------------------------------------------------------------
- * Function: H5HF_huge_btree2_debug
+ * Function: H5HF_huge_btree2_dir_debug
*
* Purpose: Debug native form of record
*
* Return: Success: non-negative
- *
* Failure: negative
*
* Programmer: Quincey Koziol
@@ -347,24 +833,261 @@ H5HF_huge_btree2_decode(const H5F_t *f, const H5B2_class_t *cls, const uint8_t *
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF_huge_btree2_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
- int indent, int fwidth, const H5B2_class_t *cls, const void *_nrecord,
+H5HF_huge_btree2_dir_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
+ int indent, int fwidth, const void *_nrecord,
const void UNUSED *_udata)
{
- const H5HF_huge_bt2_rec_t *nrecord = (const H5HF_huge_bt2_rec_t *)_nrecord;
- const H5HF_hdr_t *hdr = (const H5HF_hdr_t *)cls->cls_private;
+ const H5HF_huge_bt2_dir_rec_t *nrecord = (const H5HF_huge_bt2_dir_rec_t *)_nrecord;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_dir_debug)
+
+ HDfprintf(stream, "%*s%-*s {%a, %Hu}\n", indent, "", fwidth, "Record:",
+ nrecord->addr, nrecord->len);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_dir_debug() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_bt2_filt_dir_found
+ *
+ * Purpose: Retrieve record for directly accessed, filtered 'huge' object,
+ * when it's found in the v2 B-tree
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, August 15, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_huge_bt2_filt_dir_found(const void *nrecord, void *op_data)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_bt2_filt_dir_found)
+
+#ifdef QAK
+HDfprintf(stderr, "%s: nrecord = {%a, %Hu, %x, %Hu}\n", "H5HF_huge_bt2_filt_dir_found",
+ ((const H5HF_huge_bt2_filt_dir_rec_t *)nrecord)->addr,
+ ((const H5HF_huge_bt2_filt_dir_rec_t *)nrecord)->len,
+ ((const H5HF_huge_bt2_filt_dir_rec_t *)nrecord)->filter_mask,
+ ((const H5HF_huge_bt2_filt_dir_rec_t *)nrecord)->obj_size);
+#endif /* QAK */
+ *(H5HF_huge_bt2_filt_dir_rec_t *)op_data = *(const H5HF_huge_bt2_filt_dir_rec_t *)nrecord;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_bt2_filt_dir_found() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_bt2_filt_dir_remove
+ *
+ * Purpose: Free space for directly accessed, filtered 'huge' object, as
+ * v2 B-tree is being deleted or v2 B-tree node is removed
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, August 15, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_huge_bt2_filt_dir_remove(const void *nrecord, void *_udata)
+{
+ H5HF_huge_remove_ud1_t *udata = (H5HF_huge_remove_ud1_t *)_udata; /* User callback data */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5HF_huge_bt2_filt_dir_remove)
+
+ /* Free the space in the file for the object being removed */
+ if(H5MF_xfree(udata->hdr->f, H5FD_MEM_FHEAP_HUGE_OBJ, udata->dxpl_id, ((const H5HF_huge_bt2_filt_dir_rec_t *)nrecord)->addr, ((const H5HF_huge_bt2_filt_dir_rec_t *)nrecord)->len) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free space for huge object on disk")
+
+ /* Set the length of the object removed */
+ udata->obj_len = ((const H5HF_huge_bt2_filt_dir_rec_t *)nrecord)->obj_size;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5HF_huge_bt2_filt_dir_remove() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_filt_dir_store
+ *
+ * Purpose: Store native information into record for v2 B-tree
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, August 15, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_filt_dir_store(void *nrecord, const void *udata)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_filt_dir_store)
+
+ *(H5HF_huge_bt2_filt_dir_rec_t *)nrecord = *(const H5HF_huge_bt2_filt_dir_rec_t *)udata;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_filt_dir_store() */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_debug)
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_filt_dir_retrieve
+ *
+ * Purpose: Retrieve native information from record for v2 B-tree
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, August 15, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_filt_dir_retrieve(void *udata, const void *nrecord)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_filt_dir_retrieve)
+
+ *(H5HF_huge_bt2_filt_dir_rec_t *)udata = *(const H5HF_huge_bt2_filt_dir_rec_t *)nrecord;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_filt_dir_retrieve() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_filt_dir_compare
+ *
+ * Purpose: Compare two native information records, according to some key
+ *
+ * Return: <0 if rec1 < rec2
+ * =0 if rec1 == rec2
+ * >0 if rec1 > rec2
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, August 15, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_filt_dir_compare(const void *_rec1, const void *_rec2)
+{
+ const H5HF_huge_bt2_filt_dir_rec_t *rec1 = (const H5HF_huge_bt2_filt_dir_rec_t *)_rec1;
+ const H5HF_huge_bt2_filt_dir_rec_t *rec2 = (const H5HF_huge_bt2_filt_dir_rec_t *)_rec2;
+ herr_t ret_value; /* Return value */
- HDassert(nrecord);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_filt_dir_compare)
- if(hdr->huge_ids_direct)
- HDfprintf(stream, "%*s%-*s {%a, %Hu}\n", indent, "", fwidth, "Record:",
- nrecord->addr, nrecord->len);
+#ifdef QAK
+HDfprintf(stderr, "%s: rec1 = {%a, %Hu, %x, %Hu}\n", "H5HF_huge_btree2_filt_dir_compare", rec1->addr, rec1->len, rec1->filter_mask, rec1->obj_size);
+HDfprintf(stderr, "%s: rec2 = {%a, %Hu, %x, %Hu}\n", "H5HF_huge_btree2_filt_dir_compare", rec2->addr, rec2->len, rec2->filter_mask, rec2->obj_size);
+#endif /* QAK */
+ if(rec1->addr < rec2->addr)
+ ret_value = -1;
+ else if(rec1->addr > rec2->addr)
+ ret_value = 1;
+ else if(rec1->len < rec2->len)
+ ret_value = -1;
+ else if(rec1->len > rec2->len)
+ ret_value = 1;
else
- HDfprintf(stream, "%*s%-*s {%a, %Hu, %Hu}\n", indent, "", fwidth, "Record:",
- nrecord->addr, nrecord->len, nrecord->id);
+ ret_value = 0;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5HF_huge_btree2_filt_dir_compare() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_filt_dir_encode
+ *
+ * Purpose: Encode native information into raw form for storing on disk
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, August 15, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_filt_dir_encode(const H5F_t *f, uint8_t *raw, const void *_nrecord)
+{
+ const H5HF_huge_bt2_filt_dir_rec_t *nrecord = (const H5HF_huge_bt2_filt_dir_rec_t *)_nrecord;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_filt_dir_encode)
+
+ /* Encode the record's fields */
+ H5F_addr_encode(f, &raw, nrecord->addr);
+ H5F_ENCODE_LENGTH(f, raw, nrecord->len);
+ UINT32ENCODE(raw, nrecord->filter_mask);
+ H5F_ENCODE_LENGTH(f, raw, nrecord->obj_size);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_filt_dir_encode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_filt_dir_decode
+ *
+ * Purpose: Decode raw disk form of record into native form
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, August 15, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_filt_dir_decode(const H5F_t *f, const uint8_t *raw, void *_nrecord)
+{
+ H5HF_huge_bt2_filt_dir_rec_t *nrecord = (H5HF_huge_bt2_filt_dir_rec_t *)_nrecord;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_filt_dir_decode)
+
+ /* Decode the record's fields */
+ H5F_addr_decode(f, &raw, &nrecord->addr);
+ H5F_DECODE_LENGTH(f, raw, nrecord->len);
+ UINT32DECODE(raw, nrecord->filter_mask);
+ H5F_DECODE_LENGTH(f, raw, nrecord->obj_size);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_huge_btree2_filt_dir_decode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_btree2_filt_dir_debug
+ *
+ * Purpose: Debug native form of record
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, August 15, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF_huge_btree2_filt_dir_debug(FILE *stream, const H5F_t UNUSED *f, hid_t UNUSED dxpl_id,
+ int indent, int fwidth, const void *_nrecord, const void UNUSED *_udata)
+{
+ const H5HF_huge_bt2_filt_dir_rec_t *nrecord = (const H5HF_huge_bt2_filt_dir_rec_t *)_nrecord;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_huge_btree2_filt_dir_debug)
+
+ HDfprintf(stream, "%*s%-*s {%a, %Hu, %x, %Hu}\n", indent, "", fwidth, "Record:",
+ nrecord->addr, nrecord->len, nrecord->filter_mask, nrecord->obj_size);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5HF_huge_btree2_debug() */
+} /* H5HF_huge_btree2_filt_dir_debug() */
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index e174260..63e4e46 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -35,6 +35,7 @@
#include "H5private.h" /* Generic Functions */
#include "H5Eprivate.h" /* Error handling */
#include "H5HFpkg.h" /* Fractal heaps */
+#include "H5MMprivate.h" /* Memory management */
#include "H5Vprivate.h" /* Vectors and arrays */
/****************/
@@ -309,8 +310,9 @@ HDfprintf(stderr, "%s: Load heap header, addr = %a\n", FUNC, addr);
if(metadata_chksum != 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "incorrect metadata checksum for fractal heap header")
- /* Heap ID length */
- UINT16DECODE(p, hdr->id_len);
+ /* General heap information */
+ UINT16DECODE(p, hdr->id_len); /* Heap ID length */
+ UINT16DECODE(p, hdr->filter_len); /* I/O filters' encoded length */
/* Heap status flags */
/* (bit 0: "huge" object IDs have wrapped) */
@@ -333,13 +335,62 @@ HDfprintf(stderr, "%s: Load heap header, addr = %a\n", FUNC, addr);
H5F_DECODE_LENGTH(f, p, hdr->man_nobjs);
H5F_DECODE_LENGTH(f, p, hdr->huge_size);
H5F_DECODE_LENGTH(f, p, hdr->huge_nobjs);
+ H5F_DECODE_LENGTH(f, p, hdr->tiny_size);
+ H5F_DECODE_LENGTH(f, p, hdr->tiny_nobjs);
/* Managed objects' doubling-table info */
if(H5HF_dtable_decode(hdr->f, &p, &(hdr->man_dtable)) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTENCODE, NULL, "unable to encode managed obj. doubling table info")
+ /* Sanity check */
HDassert((size_t)(p - buf) == size);
+ /* Check for I/O filter information to decode */
+ if(hdr->filter_len > 0) {
+ size_t filter_info_size; /* Size of filter information */
+ H5O_pline_t *pline; /* Pipeline information from the header on disk */
+
+ /* Compute the size of the extra filter information */
+ filter_info_size = hdr->sizeof_size /* Size of size for filtered root direct block */
+ + 4 /* Size of filter mask for filtered root direct block */
+ + hdr->filter_len; /* Size of encoded I/O filter info */
+
+ /* Check if the current buffer can be re-used, or needs to be re-sized */
+ if(filter_info_size > size) {
+ if((buf = H5FL_BLK_REALLOC(header_block, buf, filter_info_size)) == NULL)
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, NULL, "can't allocate space to decode I/O pipeline filters")
+ } /* end if */
+
+ /* Read in I/O filter information */
+ if(H5F_block_read(f, H5FD_MEM_FHEAP_HDR, (addr + size), filter_info_size, dxpl_id, buf) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "can't read fractal heap header's I/O pipeline filter info")
+
+ p = buf;
+
+ /* Decode the size of a filtered root direct block */
+ H5F_DECODE_LENGTH(f, p, hdr->pline_root_direct_size);
+
+ /* Decode the filter mask for a filtered root direct block */
+ UINT32DECODE(p, hdr->pline_root_direct_filter_mask);
+
+ /* Decode I/O filter information */
+ if(NULL == (pline = H5O_decode(hdr->f, p, H5O_PLINE_ID)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, NULL, "can't decode I/O pipeline filters")
+
+ /* Copy the information into the header's I/O pipeline structure */
+ if(NULL == H5O_copy(H5O_PLINE_ID, pline, &(hdr->pline)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTCOPY, NULL, "can't copy I/O filter pipeline")
+
+ /* Release the space allocated for the I/O pipeline filters */
+ H5O_free(H5O_PLINE_ID, pline);
+
+ /* Compute the heap header's size */
+ hdr->heap_size = size + filter_info_size;
+ } /* end if */
+ else
+ /* Set the heap header's size */
+ hdr->heap_size = size;
+
/* Finish initialization of heap header */
if(H5HF_hdr_finish_init(hdr) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, NULL, "can't finish initializing shared fractal heap header")
@@ -398,7 +449,7 @@ HDfprintf(stderr, "%s: Flushing heap header, addr = %a, destroy = %u\n", FUNC, a
HDassert(hdr->dirty);
/* Compute the size of the heap header on disk */
- size = H5HF_HEADER_SIZE(hdr);
+ size = hdr->heap_size;
/* Allocate temporary buffer */
if((buf = H5FL_BLK_MALLOC(header_block, size)) == NULL)
@@ -422,8 +473,9 @@ HDfprintf(stderr, "%s: Flushing heap header, addr = %a, destroy = %u\n", FUNC, a
HDmemset(p, 0, (size_t)4);
p += 4;
- /* Heap ID length */
- UINT16ENCODE(p, hdr->id_len);
+ /* General heap information */
+ UINT16ENCODE(p, hdr->id_len); /* Heap ID length */
+ UINT16ENCODE(p, hdr->filter_len); /* I/O filters' encoded length */
/* Heap status flags */
/* (bit 0: "huge" object IDs have wrapped) */
@@ -447,11 +499,27 @@ HDfprintf(stderr, "%s: Flushing heap header, addr = %a, destroy = %u\n", FUNC, a
H5F_ENCODE_LENGTH(f, p, hdr->man_nobjs);
H5F_ENCODE_LENGTH(f, p, hdr->huge_size);
H5F_ENCODE_LENGTH(f, p, hdr->huge_nobjs);
+ H5F_ENCODE_LENGTH(f, p, hdr->tiny_size);
+ H5F_ENCODE_LENGTH(f, p, hdr->tiny_nobjs);
/* Managed objects' doubling-table info */
if(H5HF_dtable_encode(hdr->f, &p, &(hdr->man_dtable)) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTENCODE, FAIL, "unable to encode managed obj. doubling table info")
+ /* Check for I/O filter information to encode */
+ if(hdr->filter_len > 0) {
+ /* Encode the size of a filtered root direct block */
+ H5F_ENCODE_LENGTH(f, p, hdr->pline_root_direct_size);
+
+ /* Encode the filter mask for a filtered root direct block */
+ UINT32ENCODE(p, hdr->pline_root_direct_filter_mask);
+
+ /* Encode I/O filter information */
+ if(H5O_encode(hdr->f, p, &(hdr->pline), H5O_PLINE_ID) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTENCODE, FAIL, "can't encode I/O pipeline fiters")
+ p += hdr->filter_len;
+ } /* end if */
+
/* Write the heap header. */
HDassert((size_t)(p - buf) == size);
if(H5F_block_write(f, H5FD_MEM_FHEAP_HDR, addr, size, dxpl_id, buf) < 0)
@@ -500,6 +568,10 @@ H5HF_cache_hdr_dest(H5F_t UNUSED *f, H5HF_hdr_t *hdr)
/* Free the block size lookup table for the doubling table */
H5HF_dtable_dest(&hdr->man_dtable);
+ /* Release any I/O pipeline filter information */
+ if(hdr->pline.nused)
+ H5O_reset(H5O_PLINE_ID, &(hdr->pline));
+
/* Free the shared info itself */
H5FL_FREE(H5HF_hdr_t, hdr);
@@ -570,7 +642,7 @@ H5HF_cache_hdr_size(const H5F_t UNUSED *f, const H5HF_hdr_t *hdr, size_t *size_p
HDassert(size_ptr);
/* Set size value */
- *size_ptr = H5HF_HEADER_SIZE(hdr);
+ *size_ptr = hdr->heap_size;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5HF_cache_hdr_size() */
@@ -595,6 +667,7 @@ static H5HF_direct_t *
H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_size, void *_par_info)
{
const size_t *size = (const size_t *)_size; /* Size of block */
+ H5HF_hdr_t *hdr; /* Shared fractal heap information */
H5HF_parent_t *par_info = (H5HF_parent_t *)_par_info; /* Pointer to parent information */
H5HF_direct_t *dblock = NULL; /* Direct block info */
const uint8_t *p; /* Pointer into raw data buffer */
@@ -614,11 +687,22 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_size,
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
HDmemset(&dblock->cache_info, 0, sizeof(H5AC_info_t));
+ /* Get the pointer to the shared heap header */
+ hdr = par_info->hdr;
+
/* Share common heap information */
- dblock->hdr = par_info->hdr;
- if(H5HF_hdr_incr(dblock->hdr) < 0)
+ dblock->hdr = hdr;
+ if(H5HF_hdr_incr(hdr) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTINC, NULL, "can't increment reference count on shared heap header")
+#ifdef LATER
+ /* Check for I/O filters on this heap */
+ if(hdr->filter_len > 0) {
+HDfprintf(stderr, "%s: I/O filters not supported yet!\n", FUNC);
+HGOTO_ERROR(H5E_HEAP, H5E_UNSUPPORTED, NULL, "I/O filters not supported yet")
+ } /* end if */
+#endif /* LATER */
+
/* Set block's internal information */
dblock->size = *size;
dblock->blk_off_size = H5HF_SIZEOF_OFFSET_LEN(dblock->size);
@@ -656,7 +740,7 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_size,
/* Address of heap that owns this block (skip) */
H5F_addr_decode(f, &p, &heap_addr);
- if(H5F_addr_ne(heap_addr, dblock->hdr->heap_addr))
+ if(H5F_addr_ne(heap_addr, hdr->heap_addr))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "incorrect heap header address for direct block")
/* Address of parent block */
@@ -669,7 +753,7 @@ H5HF_cache_dblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_size,
} /* end if */
/* Offset of heap within the heap's address space */
- UINT64DECODE_VAR(p, dblock->block_off, dblock->hdr->heap_off_size);
+ UINT64DECODE_VAR(p, dblock->block_off, hdr->heap_off_size);
/* Set return value */
ret_value = dblock;
@@ -709,6 +793,8 @@ H5HF_cache_dblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
if(dblock->cache_info.is_dirty) {
H5HF_hdr_t *hdr; /* Shared fractal heap information */
+ void *write_buf; /* Pointer to buffer to write out */
+ size_t write_size; /* Size of buffer to write out */
uint8_t *p; /* Pointer into raw data buffer */
/* Get the pointer to the shared heap header */
@@ -742,10 +828,46 @@ H5HF_cache_dblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
/* Sanity check */
HDassert((size_t)(p - dblock->blk) == H5HF_MAN_ABS_DIRECT_OVERHEAD(hdr));
+#ifdef LATER
+ /* Check for I/O filters on this heap */
+ if(hdr->filter_len > 0) {
+ H5Z_cb_t filter_cb = {NULL, NULL}; /* Filter callback structure */
+ size_t nbytes; /* Number of bytes used */
+ unsigned filter_mask; /* Filter mask for block */
+
+ /* Allocate buffer to perform I/O filtering on */
+ write_size = dblock->size;
+ if(NULL == (write_buf = H5MM_malloc(write_size)))
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "memory allocation failed for pipeline buffer")
+ HDmemcpy(write_buf, dblock->blk, write_size);
+
+ /* Push direct block data through I/O filter pipeline */
+ nbytes = write_size;
+ if(H5Z_pipeline(&(hdr->pline), 0, &filter_mask, H5Z_ENABLE_EDC,
+ filter_cb, &nbytes, &write_size, &write_buf) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "output pipeline failed")
+HDfprintf(stderr, "%s: nbytes = %Zu, write_size = %Zu, write_buf = %p\n", FUNC, nbytes, write_size, write_buf);
+HDfprintf(stderr, "%s: dblock->size = %Zu, dblock->blk = %p\n", FUNC, dblock->size, dblock->blk);
+
+HDfprintf(stderr, "%s: I/O filters not supported yet!\n", FUNC);
+HGOTO_ERROR(H5E_HEAP, H5E_UNSUPPORTED, FAIL, "I/O filters not supported yet")
+ } /* end if */
+ else {
+#endif /* LATER */
+ write_buf = dblock->blk;
+ write_size = dblock->size;
+#ifdef LATER
+ } /* end else */
+#endif /* LATER */
+
/* Write the direct block */
- if(H5F_block_write(f, H5FD_MEM_FHEAP_DBLOCK, addr, (size_t)dblock->size, dxpl_id, dblock->blk) < 0)
+ if(H5F_block_write(f, H5FD_MEM_FHEAP_DBLOCK, addr, write_size, dxpl_id, write_buf) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTFLUSH, FAIL, "unable to save fractal heap direct block to disk")
+ /* Release the write buffer, if it was allocated */
+ if(write_buf != dblock->blk)
+ H5MM_xfree(write_buf);
+
dblock->cache_info.is_dirty = FALSE;
} /* end if */
@@ -892,6 +1014,7 @@ H5HF_cache_dblock_size(const H5F_t UNUSED *f, const H5HF_direct_t *dblock, size_
static H5HF_indirect_t *
H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *_nrows, void *_par_info)
{
+ H5HF_hdr_t *hdr; /* Shared fractal heap information */
const unsigned *nrows = (const unsigned *)_nrows; /* # of rows in indirect block */
H5HF_parent_t *par_info = (H5HF_parent_t *)_par_info; /* Shared parent information */
H5HF_indirect_t *iblock = NULL; /* Indirect block info */
@@ -917,9 +1040,12 @@ HDfprintf(stderr, "%s: Load indirect block, addr = %a\n", FUNC, addr);
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
HDmemset(&iblock->cache_info, 0, sizeof(H5AC_info_t));
+ /* Get the pointer to the shared heap header */
+ hdr = par_info->hdr;
+
/* Share common heap information */
- iblock->hdr = par_info->hdr;
- if(H5HF_hdr_incr(iblock->hdr) < 0)
+ iblock->hdr = hdr;
+ if(H5HF_hdr_incr(hdr) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTINC, NULL, "can't increment reference count on shared heap header")
/* Set block's internal information */
@@ -929,7 +1055,7 @@ HDfprintf(stderr, "%s: Load indirect block, addr = %a\n", FUNC, addr);
iblock->nchildren = 0;
/* Compute size of indirect block */
- iblock->size = H5HF_MAN_INDIRECT_SIZE(iblock->hdr, iblock);
+ iblock->size = H5HF_MAN_INDIRECT_SIZE(hdr, iblock);
/* Allocate buffer to decode block */
/* XXX: Use free list factories? */
@@ -964,7 +1090,7 @@ HDfprintf(stderr, "%s: Load indirect block, addr = %a\n", FUNC, addr);
/* Address of heap that owns this block */
H5F_addr_decode(f, &p, &heap_addr);
- if(H5F_addr_ne(heap_addr, iblock->hdr->heap_addr))
+ if(H5F_addr_ne(heap_addr, hdr->heap_addr))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "incorrect heap header address for direct block")
/* Address of parent block */
@@ -980,31 +1106,59 @@ HDfprintf(stderr, "%s: Load indirect block, addr = %a\n", FUNC, addr);
} /* end if */
else {
/* Set max. # of rows in this block */
- iblock->max_rows = iblock->hdr->man_dtable.max_root_rows;
+ iblock->max_rows = hdr->man_dtable.max_root_rows;
} /* end else */
/* Offset of heap within the heap's address space */
- UINT64DECODE_VAR(p, iblock->block_off, iblock->hdr->heap_off_size);
+ UINT64DECODE_VAR(p, iblock->block_off, hdr->heap_off_size);
- /* Allocate & decode indirect block entry tables */
+ /* Allocate & decode child block entry tables */
HDassert(iblock->nrows > 0);
- if(NULL == (iblock->ents = H5FL_SEQ_MALLOC(H5HF_indirect_ent_t, (size_t)(iblock->nrows * iblock->hdr->man_dtable.cparam.width))))
+ if(NULL == (iblock->ents = H5FL_SEQ_MALLOC(H5HF_indirect_ent_t, (size_t)(iblock->nrows * hdr->man_dtable.cparam.width))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for direct entries")
- for(u = 0; u < (iblock->nrows * iblock->hdr->man_dtable.cparam.width); u++) {
+ if(hdr->filter_len > 0) {
+ unsigned dir_rows; /* Number of direct rows in this indirect block */
+
+ /* Compute the number of direct rows for this indirect block */
+ dir_rows = MIN(iblock->nrows, hdr->man_dtable.max_direct_rows);
+
+ /* Allocate indirect block filtered entry array */
+ if(NULL == (iblock->filt_ents = H5FL_SEQ_MALLOC(H5HF_indirect_filt_ent_t, (size_t)(dir_rows * hdr->man_dtable.cparam.width))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for block entries")
+ } /* end if */
+ else
+ iblock->filt_ents = NULL;
+ for(u = 0; u < (iblock->nrows * hdr->man_dtable.cparam.width); u++) {
/* Decode child block address */
H5F_addr_decode(f, &p, &(iblock->ents[u].addr));
+ /* Check for heap with I/O filters */
+ if(hdr->filter_len > 0) {
+ /* Sanity check */
+ HDassert(iblock->filt_ents);
+
+ /* Decode extra information for direct blocks */
+ if(u < (hdr->man_dtable.max_direct_rows * hdr->man_dtable.cparam.width)) {
+ /* Size of filtered direct block */
+ H5F_DECODE_LENGTH(f, p, iblock->filt_ents[u].size);
+
+ /* Sanity check */
+ /* (either both the address & size are defined or both are
+ * not defined)
+ */
+ HDassert((H5F_addr_defined(iblock->ents[u].addr) && iblock->filt_ents[u].size)
+ || (!H5F_addr_defined(iblock->ents[u].addr) && iblock->filt_ents[u].size == 0));
+
+ /* I/O filter mask for filtered direct block */
+ UINT32DECODE(p, iblock->filt_ents[u].filter_mask);
+ } /* end if */
+ } /* end if */
+
/* Count child blocks */
if(H5F_addr_defined(iblock->ents[u].addr)) {
iblock->nchildren++;
iblock->max_child = u;
} /* end if */
-
-#ifdef LATER
- /* Decode direct & indirect blocks differently (later, when direct blocks can be compressed) */
- if(u < (iblock->hdr->man_dtable.max_direct_rows * iblock->hdr->man_dtable.cparam.width))
- UINT32DECODE_VAR(p, iblock->ents[u].free_space, iblock->hdr->man_dtable.max_dir_blk_off_size)
-#endif /* LATER */
#ifdef QAK
HDfprintf(stderr, "%s: iblock->ents[%Zu] = {%a}\n", FUNC, u, iblock->ents[u].addr);
#endif /* QAK */
@@ -1113,6 +1267,31 @@ HDfprintf(stderr, "%s: iblock->ents[%Zu] = {%a}\n", FUNC, u, iblock->ents[u].add
/* Encode child block address */
H5F_addr_encode(f, &p, iblock->ents[u].addr);
+ /* Check for heap with I/O filters */
+ if(hdr->filter_len > 0) {
+ /* Sanity check */
+ HDassert(iblock->filt_ents);
+
+ /* Encode extra information for direct blocks */
+ if(u < (hdr->man_dtable.max_direct_rows * hdr->man_dtable.cparam.width)) {
+#ifdef QAK
+HDfprintf(stderr, "%s: iblock->filt_ents[%Zu] = {%Zu, %x}\n", FUNC, u, iblock->filt_ents[u].size, iblock->filt_ents[u].filter_mask);
+#endif /* QAK */
+ /* Sanity check */
+ /* (either both the address & size are defined or both are
+ * not defined)
+ */
+ HDassert((H5F_addr_defined(iblock->ents[u].addr) && iblock->filt_ents[u].size)
+ || (!H5F_addr_defined(iblock->ents[u].addr) && iblock->filt_ents[u].size == 0));
+
+ /* Size of filtered direct block */
+ H5F_ENCODE_LENGTH(f, p, iblock->filt_ents[u].size);
+
+ /* I/O filter mask for filtered direct block */
+ UINT32ENCODE(p, iblock->filt_ents[u].filter_mask);
+ } /* end if */
+ } /* end if */
+
#ifndef NDEBUG
/* Count child blocks */
if(H5F_addr_defined(iblock->ents[u].addr)) {
@@ -1121,12 +1300,6 @@ HDfprintf(stderr, "%s: iblock->ents[%Zu] = {%a}\n", FUNC, u, iblock->ents[u].add
max_child = u;
} /* end if */
#endif /* NDEBUG */
-
-#ifdef LATER
- /* Encode direct & indirect blocks differently (when direct blocks can be compressed) */
- if(u < (hdr->man_dtable.max_direct_rows * hdr->man_dtable.cparam.width))
- UINT32ENCODE_VAR(p, iblock->ents[u].free_space, hdr->man_dtable.max_dir_blk_off_size)
-#endif /* LATER */
} /* end for */
/* Sanity check */
@@ -1197,6 +1370,8 @@ HDfprintf(stderr, "%s: Destroying indirect block\n", FUNC);
/* Release entry tables */
if(iblock->ents)
H5FL_SEQ_FREE(H5HF_indirect_ent_t, iblock->ents);
+ if(iblock->filt_ents)
+ H5FL_SEQ_FREE(H5HF_indirect_filt_ent_t, iblock->filt_ents);
/* Free fractal heap indirect block info */
H5FL_FREE(H5HF_indirect_t, iblock);
diff --git a/src/H5HFdbg.c b/src/H5HFdbg.c
index cfff2ca..b7d8b79 100644
--- a/src/H5HFdbg.c
+++ b/src/H5HFdbg.c
@@ -216,9 +216,6 @@ H5HF_hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
"Objects stored in 'debugging' format:",
hdr->debug_objs);
HDfprintf(stream, "%*s%-*s %t\n", indent, "", fwidth,
- "I/O filters present:",
- hdr->have_io_filter);
- HDfprintf(stream, "%*s%-*s %t\n", indent, "", fwidth,
"'Write once' flag:",
hdr->write_once);
HDfprintf(stream, "%*s%-*s %t\n", indent, "", fwidth,
@@ -257,9 +254,22 @@ H5HF_hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
HDfprintf(stream, "%*s%-*s %Hu\n", indent, "", fwidth,
"Address of v2 B-tree for 'huge' objects:",
hdr->huge_bt2_addr);
+ HDfprintf(stream, "%*s%-*s %Hu\n", indent, "", fwidth,
+ "'Tiny' object space used:",
+ hdr->tiny_size);
+ HDfprintf(stream, "%*s%-*s %Hu\n", indent, "", fwidth,
+ "Number of 'tiny' objects in heap:",
+ hdr->tiny_nobjs);
HDfprintf(stream, "%*sManaged Objects Doubling-Table Info...\n", indent, "");
- H5HF_dtable_debug(&hdr->man_dtable, stream, indent + 3, MAX(0, fwidth -3));
+ H5HF_dtable_debug(&hdr->man_dtable, stream, indent + 3, MAX(0, fwidth - 3));
+
+ /* Print information about I/O filters */
+ if(hdr->filter_len > 0) {
+ HDfprintf(stream, "%*sI/O filter Info...\n", indent, "");
+ H5O_debug_id(H5O_PLINE_ID, f, dxpl_id, &(hdr->pline), stream,
+ indent + 3, MAX(0, fwidth - 3));
+ } /* end if */
done:
if(hdr && H5AC_unprotect(f, dxpl_id, H5AC_FHEAP_HDR, addr, hdr, H5AC__NO_FLAGS_SET) < 0)
diff --git a/src/H5HFhdr.c b/src/H5HFhdr.c
index a0af4b1..8376da4 100644
--- a/src/H5HFhdr.c
+++ b/src/H5HFhdr.c
@@ -232,7 +232,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5HF_hdr_finish_init_pahse2
+ * Function: H5HF_hdr_finish_init_phase2
*
* Purpose: Second phase to finish initializing info in shared heap header
*
@@ -281,7 +281,11 @@ HDfprintf(stderr, "%s: row_max_dblock_free[%Zu] = %Zu\n", FUNC, u, hdr->man_dtab
/* Initialize the information for tracking 'huge' objects */
if(H5HF_huge_init(hdr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "can't informan for tracking huge objects")
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "can't initialize info for tracking huge objects")
+
+ /* Initialize the information for tracking 'tiny' objects */
+ if(H5HF_tiny_init(hdr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "can't initialize info for tracking tiny objects")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -343,7 +347,6 @@ haddr_t
H5HF_hdr_create(H5F_t *f, hid_t dxpl_id, const H5HF_create_t *cparam)
{
H5HF_hdr_t *hdr = NULL; /* The new fractal heap header information */
- haddr_t hdr_addr; /* Heap header address */
size_t dblock_overhead; /* Direct block's overhead */
haddr_t ret_value; /* Return value */
@@ -388,12 +391,7 @@ H5HF_hdr_create(H5F_t *f, hid_t dxpl_id, const H5HF_create_t *cparam)
HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, HADDR_UNDEF, "max. heap size too large for file")
#endif /* NDEBUG */
- /* Allocate space for the header on disk */
- if(HADDR_UNDEF == (hdr_addr = H5MF_alloc(f, H5FD_MEM_FHEAP_HDR, dxpl_id, (hsize_t)H5HF_HEADER_SIZE(hdr))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, HADDR_UNDEF, "file allocation failed for fractal heap header")
-
/* Set the creation parameters for the heap */
- hdr->heap_addr = hdr_addr;
hdr->max_man_size = cparam->max_man_size;
HDmemcpy(&(hdr->man_dtable.cparam), &(cparam->managed), sizeof(H5HF_dtable_cparam_t));
@@ -410,9 +408,37 @@ H5HF_hdr_create(H5F_t *f, hid_t dxpl_id, const H5HF_create_t *cparam)
hdr->dirty = TRUE;
/* First phase of header final initialization */
+ /* (doesn't need ID length set up) */
if(H5HF_hdr_finish_init_phase1(hdr) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, HADDR_UNDEF, "can't finish phase #1 of header final initialization")
+ /* Copy any I/O filter pipeline */
+ /* (This code is not in the "finish init phase" routines because those
+ * routines are also called from the cache 'load' callback, and the filter
+ * length is already set in that case (its stored in the header on disk))
+ */
+ if(cparam->pline.nused > 0) {
+ /* Copy the I/O filter pipeline from the creation parameters to the header */
+ if(NULL == H5O_copy(H5O_PLINE_ID, &(cparam->pline), &(hdr->pline)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTCOPY, HADDR_UNDEF, "can't copy I/O filter pipeline")
+
+ /* Compute the I/O filters' encoded size */
+ if(0 == (hdr->filter_len = H5O_raw_size(H5O_PLINE_ID, hdr->f, &(hdr->pline))))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGETSIZE, HADDR_UNDEF, "can't get I/O filter pipeline size")
+#ifdef QAK
+HDfprintf(stderr, "%s: hdr->filter_len = %u\n", FUNC, hdr->filter_len);
+#endif /* QAK */
+
+ /* Compute size of header on disk */
+ hdr->heap_size = H5HF_HEADER_SIZE(hdr) /* Base header size */
+ + hdr->sizeof_size /* Size of size for filtered root direct block */
+ + 4 /* Size of filter mask for filtered root direct block */
+ + hdr->filter_len; /* Size of encoded I/O filter info */
+ } /* end if */
+ else
+ /* Set size of header on disk */
+ hdr->heap_size = H5HF_HEADER_SIZE(hdr);
+
/* Set the length of IDs in the heap */
/* (This code is not in the "finish init phase" routines because those
* routines are also called from the cache 'load' callback, and the ID
@@ -423,39 +449,55 @@ H5HF_hdr_create(H5F_t *f, hid_t dxpl_id, const H5HF_create_t *cparam)
hdr->id_len = 1 + hdr->heap_off_size + hdr->heap_len_size;
break;
- case 1: /* Set the length of heap IDs to just enough to hold the file offset & length of 'huge' objects in the heap */
- hdr->id_len = 1 + hdr->sizeof_size + hdr->sizeof_addr;
+ case 1: /* Set the length of heap IDs to just enough to hold the information needed to directly access 'huge' objects in the heap */
+ if(hdr->filter_len > 0)
+ hdr->id_len = 1 /* ID flags */
+ + hdr->sizeof_addr /* Address of filtered object */
+ + hdr->sizeof_size /* Length of filtered object */
+ + 4 /* Filter mask for filtered object */
+ + hdr->sizeof_size; /* Size of de-filtered object in memory */
+ else
+ hdr->id_len = 1 /* ID flags */
+ + hdr->sizeof_addr /* Address of object */
+ + hdr->sizeof_size; /* Length of object */
break;
default: /* Use the requested size for the heap ID */
-/* XXX: Limit heap ID length to 4096 + 1, due to # of bits required to store
- * length of 'tiny' objects (12 bits)
- */
-HDfprintf(stderr, "%s: Varying size of heap IDs not supported yet!\n", FUNC);
-HGOTO_ERROR(H5E_HEAP, H5E_UNSUPPORTED, HADDR_UNDEF, "varying size of heap IDs not supported yet")
+ /* Check boundaries */
+ if(cparam->id_len < (1 + hdr->heap_off_size + hdr->heap_len_size))
+ HGOTO_ERROR(H5E_HEAP, H5E_BADRANGE, HADDR_UNDEF, "ID length not large enough to hold object IDs")
+ else if(cparam->id_len > H5HF_MAX_ID_LEN)
+ HGOTO_ERROR(H5E_HEAP, H5E_BADRANGE, HADDR_UNDEF, "ID length too large to store tiny object lengths")
+
+ /* Use the requested size for the heap ID */
+ hdr->id_len = cparam->id_len;
break;
} /* end switch */
+#ifdef QAK
+HDfprintf(stderr, "%s: hdr->id_len = %Zu\n", FUNC, hdr->id_len);
+#endif /* QAK */
/* Second phase of header final initialization */
+ /* (needs ID and filter lengths set up) */
if(H5HF_hdr_finish_init_phase2(hdr) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, HADDR_UNDEF, "can't finish phase #2 of header final initialization")
-#ifdef QAK
-HDfprintf(stderr, "%s: hdr->id_len = %Zu\n", FUNC, hdr->id_len);
-#endif /* QAK */
-
/* Extra checking for possible gap between max. direct block size minus
* overhead and "huge" object size */
dblock_overhead = H5HF_MAN_ABS_DIRECT_OVERHEAD(hdr);
if((cparam->managed.max_direct_size - dblock_overhead) < cparam->max_man_size)
HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, HADDR_UNDEF, "max. direct block size not large enough to hold all managed blocks")
+ /* Allocate space for the header on disk */
+ if(HADDR_UNDEF == (hdr->heap_addr = H5MF_alloc(f, H5FD_MEM_FHEAP_HDR, dxpl_id, (hsize_t)hdr->heap_size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, HADDR_UNDEF, "file allocation failed for fractal heap header")
+
/* Cache the new fractal heap header */
- if(H5AC_set(f, dxpl_id, H5AC_FHEAP_HDR, hdr_addr, hdr, H5AC__NO_FLAGS_SET) < 0)
+ if(H5AC_set(f, dxpl_id, H5AC_FHEAP_HDR, hdr->heap_addr, hdr, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, HADDR_UNDEF, "can't add fractal heap header to cache")
/* Set address of heap header to return */
- ret_value = hdr_addr;
+ ret_value = hdr->heap_addr;
done:
if(!H5F_addr_defined(ret_value))
diff --git a/src/H5HFhuge.c b/src/H5HFhuge.c
index 7a47dba..cc5b824 100644
--- a/src/H5HFhuge.c
+++ b/src/H5HFhuge.c
@@ -37,6 +37,7 @@
#include "H5Eprivate.h" /* Error handling */
#include "H5HFpkg.h" /* Fractal heaps */
#include "H5MFprivate.h" /* File memory management */
+#include "H5MMprivate.h" /* Memory management */
/****************/
@@ -63,20 +64,25 @@
/* Local Prototypes */
/********************/
-/* local v2 B-tree operations */
+/* Local v2 B-tree operations */
static herr_t H5HF_huge_bt2_create(H5HF_hdr_t *hdr, hid_t dxpl_id);
/* v2 B-tree function callbacks (in H5HFbtree2.c) */
-herr_t H5HF_huge_bt2_found(const void *nrecord, void *op_data);
-herr_t H5HF_huge_bt2_remove(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_indir_found(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_indir_remove(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_filt_indir_found(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_filt_indir_remove(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_dir_remove(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_filt_dir_found(const void *nrecord, void *op_data);
+herr_t H5HF_huge_bt2_filt_dir_remove(const void *nrecord, void *op_data);
+
+/* Local 'huge' object support routines */
+static hsize_t H5HF_huge_new_id(H5HF_hdr_t *hdr);
/*********************/
/* Package Variables */
/*********************/
-/* The v2 B-tree class for tracking huge objects */
-H5_DLLVAR const H5B2_class_t H5HF_BTREE2[1];
-
/*****************************/
/* Library Private Variables */
@@ -104,6 +110,7 @@ H5_DLLVAR const H5B2_class_t H5HF_BTREE2[1];
static herr_t
H5HF_huge_bt2_create(H5HF_hdr_t *hdr, hid_t dxpl_id)
{
+ const H5B2_class_t *bt2_class; /* v2 B-tree class to use */
size_t rrec_size; /* Size of 'raw' records on disk */
herr_t ret_value = SUCCEED; /* Return value */
@@ -115,13 +122,45 @@ H5HF_huge_bt2_create(H5HF_hdr_t *hdr, hid_t dxpl_id)
HDassert(hdr);
/* Compute the size of 'raw' records on disk */
- if(hdr->huge_ids_direct)
- rrec_size = hdr->sizeof_addr + hdr->sizeof_size;
- else
- rrec_size = hdr->sizeof_addr + hdr->sizeof_size + hdr->huge_id_size;
+ /* (Note: the size for huge IDs could be set to 'huge_id_size', instead
+ * of 'sizeof_size', but that would make the v2 B-tree callback routines
+ * depend on the heap header, which makes the v2 B-tree flush routines
+ * difficult to write. "Waste" an extra byte or for small heaps (where
+ * the 'huge_id_size' is < 'sizeof_size' in order to make this easier -QAK)
+ */
+ if(hdr->huge_ids_direct) {
+ if(hdr->filter_len > 0) {
+ rrec_size = hdr->sizeof_addr /* Address of object */
+ + hdr->sizeof_size /* Length of object */
+ + 4 /* Filter mask for filtered object */
+ + hdr->sizeof_size; /* Size of de-filtered object in memory */
+ bt2_class = H5HF_BT2_FILT_DIR;
+ } /* end if */
+ else {
+ rrec_size = hdr->sizeof_addr /* Address of object */
+ + hdr->sizeof_size; /* Length of object */
+ bt2_class = H5HF_BT2_DIR;
+ } /* end else */
+ } /* end if */
+ else {
+ if (hdr->filter_len > 0) {
+ rrec_size = hdr->sizeof_addr /* Address of filtered object */
+ + hdr->sizeof_size /* Length of filtered object */
+ + 4 /* Filter mask for filtered object */
+ + hdr->sizeof_size /* Size of de-filtered object in memory */
+ + hdr->sizeof_size; /* Unique ID for object */
+ bt2_class = H5HF_BT2_FILT_INDIR;
+ } /* end if */
+ else {
+ rrec_size = hdr->sizeof_addr /* Address of object */
+ + hdr->sizeof_size /* Length of object */
+ + hdr->sizeof_size; /* Unique ID for object */
+ bt2_class = H5HF_BT2_INDIR;
+ } /* end else */
+ } /* end else */
/* Create v2 B-tree for tracking 'huge' objects */
- if(H5B2_create(hdr->f, dxpl_id, &hdr->huge_bt2_class, (size_t)H5HF_HUGE_BT2_NODE_SIZE, rrec_size,
+ if(H5B2_create(hdr->f, dxpl_id, bt2_class, (size_t)H5HF_HUGE_BT2_NODE_SIZE, rrec_size,
H5HF_HUGE_BT2_SPLIT_PERC, H5HF_HUGE_BT2_MERGE_PERC, &hdr->huge_bt2_addr/*out*/) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCREATE, FAIL, "can't create v2 B-tree for tracking 'huge' heap objects")
@@ -156,22 +195,38 @@ H5HF_huge_init(H5HF_hdr_t *hdr)
/* Compute information about 'huge' objects for the heap */
/* Check if we can completely hold the 'huge' object's offset & length in
- * the file in the heap ID (which will speed up accessing it)
+ * the file in the heap ID (which will speed up accessing it) and we don't
+ * have any I/O pipeline filters.
*/
#ifdef QAK
HDfprintf(stderr, "%s: hdr->id_len = %u\n", "H5HF_huge_init", (unsigned)hdr->id_len);
+HDfprintf(stderr, "%s: hdr->filter_len = %u\n", "H5HF_huge_init", (unsigned)hdr->filter_len);
#endif /* QAK */
- if((hdr->sizeof_addr + hdr->sizeof_size) <= (hdr->id_len - 1)) {
- /* Indicate that v2 B-tree doesn't have to be used to locate object */
- hdr->huge_ids_direct = TRUE;
-
- /* Set the size of 'huge' object IDs */
- hdr->huge_id_size = hdr->sizeof_addr + hdr->sizeof_size;
+ if(hdr->filter_len > 0) {
+ if((hdr->id_len - 1) >= (hdr->sizeof_addr + hdr->sizeof_size + 4 + hdr->sizeof_size)) {
+ /* Indicate that v2 B-tree doesn't have to be used to locate object */
+ hdr->huge_ids_direct = TRUE;
+
+ /* Set the size of 'huge' object IDs */
+ hdr->huge_id_size = hdr->sizeof_addr + hdr->sizeof_size + hdr->sizeof_size;
+ } /* end if */
+ else
+ /* Indicate that v2 B-tree must be used to access object */
+ hdr->huge_ids_direct = FALSE;
} /* end if */
else {
- /* Indicate that v2 B-tree must be used to locate object */
- hdr->huge_ids_direct = FALSE;
-
+ if((hdr->sizeof_addr + hdr->sizeof_size) <= (hdr->id_len - 1)) {
+ /* Indicate that v2 B-tree doesn't have to be used to locate object */
+ hdr->huge_ids_direct = TRUE;
+
+ /* Set the size of 'huge' object IDs */
+ hdr->huge_id_size = hdr->sizeof_addr + hdr->sizeof_size;
+ } /* end if */
+ else
+ /* Indicate that v2 B-tree must be used to locate object */
+ hdr->huge_ids_direct = FALSE;
+ } /* end else */
+ if(!hdr->huge_ids_direct) {
/* Set the size and maximum value of 'huge' object ID */
if((hdr->id_len - 1) < sizeof(hsize_t)) {
hdr->huge_id_size = hdr->id_len - 1;
@@ -181,28 +236,65 @@ HDfprintf(stderr, "%s: hdr->id_len = %u\n", "H5HF_huge_init", (unsigned)hdr->id_
hdr->huge_id_size = sizeof(hsize_t);
hdr->huge_max_id = HSIZET_MAX;
} /* end else */
- } /* end else */
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HF_huge_init() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_huge_new_id
+ *
+ * Purpose: Determine a new ID for an indirectly accessed 'huge' object
+ * (either filtered or not)
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Aug 15 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static hsize_t
+H5HF_huge_new_id(H5HF_hdr_t *hdr)
+{
+ hsize_t new_id; /* New object's ID */
+ hsize_t ret_value; /* Return value */
+ FUNC_ENTER_NOAPI_NOINIT(H5HF_huge_new_id)
- /* Set up the v2 B-tree for tracking 'huge' objects in the heap */
+ /*
+ * Check arguments.
+ */
+ HDassert(hdr);
- /* Copy the standard v2 B-tree class */
- HDmemcpy(&hdr->huge_bt2_class, H5HF_BTREE2, sizeof(H5B2_class_t));
+ /* Check for wrapping around 'huge' object ID space */
+ if(hdr->huge_ids_wrapped)
+ /* Fail for now - eventually should iterate through v2 B-tree, looking for available ID */
+ HGOTO_ERROR(H5E_HEAP, H5E_UNSUPPORTED, 0, "wrapping 'huge' object IDs not supported yet")
+ else {
+ /* Get new 'huge' object ID to use for object */
+ /* (avoids using ID 0) */
+ new_id = ++hdr->huge_next_id;
- /* Set the native record size for the v2 B-tree */
- hdr->huge_bt2_class.nrec_size = sizeof(H5HF_huge_bt2_rec_t);
+ /* Check for wrapping 'huge' object IDs around */
+ if(hdr->huge_next_id == hdr->huge_max_id)
+ hdr->huge_ids_wrapped = TRUE;
+ } /* end else */
- /* Set v2 B-tree class's "class private" pointer to the heap header */
- hdr->huge_bt2_class.cls_private = hdr;
+ /* Set return value */
+ ret_value = new_id;
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5HF_huge_init() */
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF_huge_new_id() */
/*-------------------------------------------------------------------------
* Function: H5HF_huge_insert
*
- * Purpose: Insert a huge object into the file and track it
+ * Purpose: Insert a 'huge' object into the file and track it
*
* Return: SUCCEED/FAIL
*
@@ -213,12 +305,14 @@ HDfprintf(stderr, "%s: hdr->id_len = %u\n", "H5HF_huge_init", (unsigned)hdr->id_
*-------------------------------------------------------------------------
*/
herr_t
-H5HF_huge_insert(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t obj_size, const void *obj,
+H5HF_huge_insert(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t obj_size, void *obj,
void *_id)
{
- H5HF_huge_bt2_rec_t obj_rec; /* Record for tracking object */
uint8_t *id = (uint8_t *)_id; /* Pointer to ID buffer */
haddr_t obj_addr; /* Address of object in the file */
+ void *write_buf; /* Pointer to buffer to write */
+ size_t write_size; /* Size of [possibly filtered] object written to file */
+ unsigned filter_mask = 0; /* Filter mask for object (only used for filtered objects) */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5HF_huge_insert)
@@ -239,54 +333,142 @@ HDfprintf(stderr, "%s: obj_size = %Zu\n", FUNC, obj_size);
if(H5HF_huge_bt2_create(hdr, dxpl_id) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCREATE, FAIL, "can't create v2 B-tree for tracking 'huge' heap objects")
+ /* Check for I/O pipeline filter on heap */
+ if(hdr->filter_len > 0) {
+ H5Z_cb_t filter_cb = {NULL, NULL}; /* Filter callback structure */
+ size_t nbytes; /* Number of bytes used */
+
+ /* Allocate buffer to perform I/O filtering on */
+ write_size = obj_size;
+ if(NULL == (write_buf = H5MM_malloc(write_size)))
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "memory allocation failed for pipeline buffer")
+ HDmemcpy(write_buf, obj, write_size);
+
+ /* Push direct block data through I/O filter pipeline */
+ nbytes = write_size;
+ if(H5Z_pipeline(&(hdr->pline), 0, &filter_mask, H5Z_NO_EDC,
+ filter_cb, &nbytes, &write_size, &write_buf) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTFILTER, FAIL, "output pipeline failed")
+#ifdef QAK
+HDfprintf(stderr, "%s: nbytes = %Zu, write_size = %Zu, write_buf = %p\n", FUNC, nbytes, write_size, write_buf);
+HDfprintf(stderr, "%s: obj_size = %Zu, obj = %p\n", FUNC, obj_size, obj);
+#endif /* QAK */
+
+ /* Update size of object on disk */
+ write_size = nbytes;
+ } /* end if */
+ else {
+ write_buf = obj;
+ write_size = obj_size;
+ } /* end else */
+
/* Allocate space in the file for storing the 'huge' object */
- if(HADDR_UNDEF == (obj_addr = H5MF_alloc(hdr->f, H5FD_MEM_FHEAP_HUGE_OBJ, dxpl_id, (hsize_t)obj_size)))
+ if(HADDR_UNDEF == (obj_addr = H5MF_alloc(hdr->f, H5FD_MEM_FHEAP_HUGE_OBJ, dxpl_id, (hsize_t)write_size)))
HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap huge object")
/* Write the object's data to disk */
- if(H5F_block_write(hdr->f, H5FD_MEM_FHEAP_HUGE_OBJ, obj_addr, obj_size, dxpl_id, obj) < 0)
+ if(H5F_block_write(hdr->f, H5FD_MEM_FHEAP_HUGE_OBJ, obj_addr, write_size, dxpl_id, write_buf) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "writing 'huge' object to file failed")
- /* Initialize shared part of record for tracking object in v2 B-tree */
- obj_rec.addr = obj_addr;
- obj_rec.len = obj_size;
+ /* Release buffer for writing, if we had one */
+ if(write_buf != obj) {
+ HDassert(hdr->filter_len > 0);
+ H5MM_xfree(write_buf);
+ } /* end if */
+
+ /* Perform different actions for directly & indirectly accessed 'huge' objects */
+ if(hdr->huge_ids_direct) {
+ if(hdr->filter_len > 0) {
+ H5HF_huge_bt2_filt_dir_rec_t obj_rec; /* Record for tracking object */
+
+ /* Initialize record for tracking object in v2 B-tree */
+ obj_rec.addr = obj_addr;
+ obj_rec.len = write_size;
+ obj_rec.filter_mask = filter_mask;
+ obj_rec.obj_size = obj_size;
+#ifdef QAK
+HDfprintf(stderr, "%s: obj_rec = {%a, %Hu, %x, %Hu}\n", FUNC, obj_rec.addr, obj_rec.len, obj_rec.filter_mask, obj_rec.obj_size);
+#endif /* QAK */
- /* If the 'huge' object will be indirectly accessed, through the v2 B-tree,
- * create an ID for it, otherwise put a zero in for ID
- */
- if(hdr->huge_ids_direct)
- obj_rec.id = 0;
- else {
- /* Check for wrapping around 'huge' object ID space */
- if(hdr->huge_ids_wrapped)
- /* Fail for now - eventually should iterate through v2 B-tree, looking for available ID */
- HGOTO_ERROR(H5E_HEAP, H5E_UNSUPPORTED, FAIL, "wrapping 'huge' object IDs not supported yet")
+ /* Insert record for object in v2 B-tree */
+ if(H5B2_insert(hdr->f, dxpl_id, H5HF_BT2_FILT_DIR, hdr->huge_bt2_addr, &obj_rec) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTINSERT, FAIL, "couldn't insert object tracking record in v2 B-tree")
+
+ /* Encode ID for user */
+ *id++ = H5HF_ID_VERS_CURR | H5HF_ID_TYPE_HUGE;
+ H5F_addr_encode(hdr->f, &id, obj_addr);
+ H5F_ENCODE_LENGTH(hdr->f, id, (hsize_t)write_size);
+ UINT32ENCODE(id, filter_mask);
+ H5F_ENCODE_LENGTH(hdr->f, id, (hsize_t)obj_size);
+ } /* end if */
else {
- /* Get new 'huge' object ID to use for object */
- /* (avoid using ID 0) */
- obj_rec.id = ++hdr->huge_next_id;
+ H5HF_huge_bt2_dir_rec_t obj_rec; /* Record for tracking object */
- /* Check for wrapping 'huge' object IDs around */
- if(hdr->huge_next_id == hdr->huge_max_id)
- hdr->huge_ids_wrapped = TRUE;
- } /* end else */
- } /* end else */
+ /* Initialize record for tracking object in v2 B-tree */
+ obj_rec.addr = obj_addr;
+ obj_rec.len = write_size;
#ifdef QAK
-HDfprintf(stderr, "%s: obj_rec = {%a, %Hu, %Hu}\n", FUNC, obj_rec.addr, obj_rec.len, obj_rec.id);
+HDfprintf(stderr, "%s: obj_rec = {%a, %Hu}\n", FUNC, obj_rec.addr, obj_rec.len);
#endif /* QAK */
- /* Insert record for object in v2 B-tree */
- if(H5B2_insert(hdr->f, dxpl_id, &hdr->huge_bt2_class, hdr->huge_bt2_addr, &obj_rec) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINSERT, FAIL, "couldn't insert object tracking record in v2 B-tree")
+ /* Insert record for object in v2 B-tree */
+ if(H5B2_insert(hdr->f, dxpl_id, H5HF_BT2_DIR, hdr->huge_bt2_addr, &obj_rec) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTINSERT, FAIL, "couldn't insert object tracking record in v2 B-tree")
- /* Encode ID for user */
- *id++ = H5HF_ID_VERS_CURR | H5HF_ID_TYPE_HUGE;
- if(hdr->huge_ids_direct) {
- H5F_addr_encode(hdr->f, &id, obj_addr);
- H5F_ENCODE_LENGTH(hdr->f, id, (hsize_t)obj_size);
+ /* Encode ID for user */
+ *id++ = H5HF_ID_VERS_CURR | H5HF_ID_TYPE_HUGE;
+ H5F_addr_encode(hdr->f, &id, obj_addr);
+ H5F_ENCODE_LENGTH(hdr->f, id, (hsize_t)write_size);
+ } /* end if */
} /* end if */
- else
- UINT64ENCODE_VAR(id, obj_rec.id, hdr->huge_id_size)
+ else {
+ H5HF_huge_bt2_filt_indir_rec_t filt_indir_rec; /* Record for tracking filtered object */
+ H5HF_huge_bt2_indir_rec_t indir_rec; /* Record for tracking non-filtered object */
+ const H5B2_class_t *bt2_class; /* v2 B-tree class to use */
+ void *ins_rec; /* Pointer to record to insert */
+ hsize_t new_id; /* New ID for object */
+
+ /* Get new ID for object */
+ if(0 == (new_id = H5HF_huge_new_id(hdr)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "can't generate new ID for object")
+
+ if(hdr->filter_len > 0) {
+ /* Initialize record for object in v2 B-tree */
+ filt_indir_rec.addr = obj_addr;
+ filt_indir_rec.len = write_size;
+ filt_indir_rec.filter_mask = filter_mask;
+ filt_indir_rec.obj_size = obj_size;
+ filt_indir_rec.id = new_id;
+#ifdef QAK
+HDfprintf(stderr, "%s: filt_indir_rec = {%a, %Hu, %x, %Hu, %Hu}\n", FUNC, filt_indir_rec.addr, filt_indir_rec.len, filt_indir_rec.filter_mask, filt_indir_rec.obj_size, filt_indir_rec.id);
+#endif /* QAK */
+
+ /* Set pointer to record to insert */
+ ins_rec = &filt_indir_rec;
+ bt2_class = H5HF_BT2_FILT_INDIR;
+ } /* end if */
+ else {
+ /* Initialize record for object in v2 B-tree */
+ indir_rec.addr = obj_addr;
+ indir_rec.len = write_size;
+ indir_rec.id = new_id;
+#ifdef QAK
+HDfprintf(stderr, "%s: indir_rec = {%a, %Hu, %Hu}\n", FUNC, indir_rec.addr, indir_rec.len, indir_rec.id);
+#endif /* QAK */
+
+ /* Set pointer to record to insert */
+ ins_rec = &indir_rec;
+ bt2_class = H5HF_BT2_INDIR;
+ } /* end else */
+
+ /* Insert record for tracking object in v2 B-tree */
+ if(H5B2_insert(hdr->f, dxpl_id, bt2_class, hdr->huge_bt2_addr, ins_rec) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTINSERT, FAIL, "couldn't insert object tracking record in v2 B-tree")
+
+ /* Encode ID for user */
+ *id++ = H5HF_ID_VERS_CURR | H5HF_ID_TYPE_HUGE;
+ UINT64ENCODE_VAR(id, new_id, hdr->huge_id_size)
+ } /* end else */
/* Update statistics about heap */
hdr->huge_size += obj_size;
@@ -320,7 +502,7 @@ H5HF_huge_get_obj_len(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(H5HF_huge_get_obj_len, FAIL)
+ FUNC_ENTER_NOAPI_NOINIT(H5HF_huge_get_obj_len)
/*
* Check arguments.
@@ -330,28 +512,57 @@ H5HF_huge_get_obj_len(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
HDassert(id);
HDassert(obj_len_p);
+ /* Skip over the flag byte */
+ id++;
+
/* Check if 'huge' object ID encodes address & length directly */
if(hdr->huge_ids_direct) {
- /* Skip over object offset in file */
- id += hdr->sizeof_addr;
+ if(hdr->filter_len > 0) {
+ /* Skip over filtered object info */
+ id += hdr->sizeof_addr + hdr->sizeof_size + 4;
+
+ /* Retrieve the object's length */
+ H5F_DECODE_LENGTH(hdr->f, id, *obj_len_p);
+ } /* end if */
+ else {
+ /* Skip over object offset in file */
+ id += hdr->sizeof_addr;
- /* Retrieve the object's length */
- H5F_DECODE_LENGTH(hdr->f, id, *obj_len_p);
+ /* Retrieve the object's length */
+ H5F_DECODE_LENGTH(hdr->f, id, *obj_len_p);
+ } /* end else */
} /* end if */
else {
- H5HF_huge_bt2_rec_t found_rec; /* Record found from tracking object */
- H5HF_huge_bt2_rec_t search_rec; /* Record for searching for object */
+ if(hdr->filter_len > 0) {
+ H5HF_huge_bt2_filt_indir_rec_t found_rec; /* Record found from tracking object */
+ H5HF_huge_bt2_filt_indir_rec_t search_rec; /* Record for searching for object */
+
+ /* Get ID for looking up 'huge' object in v2 B-tree */
+ UINT64DECODE_VAR(id, search_rec.id, hdr->huge_id_size)
+
+ /* Look up object in v2 B-tree */
+ if(H5B2_find(hdr->f, dxpl_id, H5HF_BT2_FILT_INDIR, hdr->huge_bt2_addr,
+ &search_rec, H5HF_huge_bt2_filt_indir_found, &found_rec) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "can't find object in B-tree")
+
+ /* Retrieve the object's length */
+ *obj_len_p = (size_t)found_rec.obj_size;
+ } /* end if */
+ else {
+ H5HF_huge_bt2_indir_rec_t found_rec; /* Record found from tracking object */
+ H5HF_huge_bt2_indir_rec_t search_rec; /* Record for searching for object */
- /* Get ID for looking up 'huge' object in v2 B-tree */
- UINT64DECODE_VAR(id, search_rec.id, hdr->huge_id_size)
+ /* Get ID for looking up 'huge' object in v2 B-tree */
+ UINT64DECODE_VAR(id, search_rec.id, hdr->huge_id_size)
- /* Look up object in v2 B-tree */
- if(H5B2_find(hdr->f, dxpl_id, &hdr->huge_bt2_class, hdr->huge_bt2_addr,
- &search_rec, H5HF_huge_bt2_found, &found_rec) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "can't find object in B-tree")
+ /* Look up object in v2 B-tree */
+ if(H5B2_find(hdr->f, dxpl_id, H5HF_BT2_INDIR, hdr->huge_bt2_addr,
+ &search_rec, H5HF_huge_bt2_indir_found, &found_rec) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "can't find object in B-tree")
- /* Retrieve the object's length */
- *obj_len_p = (size_t)found_rec.len;
+ /* Retrieve the object's length */
+ *obj_len_p = (size_t)found_rec.len;
+ } /* end else */
} /* end else */
done:
@@ -375,8 +586,10 @@ done:
herr_t
H5HF_huge_read(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id, void *obj)
{
+ void *read_buf; /* Pointer to buffer for reading */
haddr_t obj_addr; /* Object's address in the file */
hsize_t obj_size = 0; /* Object's size in the file */
+ unsigned filter_mask = 0; /* Filter mask for object (only used for filtered objects) */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5HF_huge_read)
@@ -388,33 +601,85 @@ H5HF_huge_read(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id, void *obj)
HDassert(id);
HDassert(obj);
+ /* Skip over the flag byte */
+ id++;
+
/* Check for 'huge' object ID that encodes address & length directly */
if(hdr->huge_ids_direct) {
- /* Retrieve the object's address and length */
+ /* Retrieve the object's address and length (common) */
H5F_addr_decode(hdr->f, &id, &obj_addr);
H5F_DECODE_LENGTH(hdr->f, id, obj_size);
+
+ /* Retrieve extra information needed for filtered objects */
+ if(hdr->filter_len > 0)
+ UINT32DECODE(id, filter_mask);
} /* end if */
else {
- H5HF_huge_bt2_rec_t found_rec; /* Record found from tracking object */
- H5HF_huge_bt2_rec_t search_rec; /* Record for searching for object */
+ if(hdr->filter_len > 0) {
+ H5HF_huge_bt2_filt_indir_rec_t found_rec; /* Record found from tracking object */
+ H5HF_huge_bt2_filt_indir_rec_t search_rec; /* Record for searching for object */
+
+ /* Get ID for looking up 'huge' object in v2 B-tree */
+ UINT64DECODE_VAR(id, search_rec.id, hdr->huge_id_size)
+
+ /* Look up object in v2 B-tree */
+ if(H5B2_find(hdr->f, dxpl_id, H5HF_BT2_FILT_INDIR, hdr->huge_bt2_addr,
+ &search_rec, H5HF_huge_bt2_filt_indir_found, &found_rec) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "can't find object in B-tree")
+
+ /* Retrieve the object's address & length */
+ obj_addr = found_rec.addr;
+ obj_size = found_rec.len;
+ filter_mask = found_rec.filter_mask;
+ } /* end if */
+ else {
+ H5HF_huge_bt2_indir_rec_t found_rec; /* Record found from tracking object */
+ H5HF_huge_bt2_indir_rec_t search_rec; /* Record for searching for object */
- /* Get ID for looking up 'huge' object in v2 B-tree */
- UINT64DECODE_VAR(id, search_rec.id, hdr->huge_id_size)
+ /* Get ID for looking up 'huge' object in v2 B-tree */
+ UINT64DECODE_VAR(id, search_rec.id, hdr->huge_id_size)
- /* Look up object in v2 B-tree */
- if(H5B2_find(hdr->f, dxpl_id, &hdr->huge_bt2_class, hdr->huge_bt2_addr,
- &search_rec, H5HF_huge_bt2_found, &found_rec) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "can't find object in B-tree")
+ /* Look up object in v2 B-tree */
+ if(H5B2_find(hdr->f, dxpl_id, H5HF_BT2_INDIR, hdr->huge_bt2_addr,
+ &search_rec, H5HF_huge_bt2_indir_found, &found_rec) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "can't find object in B-tree")
- /* Retrieve the object's address & length */
- obj_addr = found_rec.addr;
- obj_size = found_rec.len;
+ /* Retrieve the object's address & length */
+ obj_addr = found_rec.addr;
+ obj_size = found_rec.len;
+ } /* end else */
} /* end else */
- /* Read the object's data from the file */
- if (H5F_block_read(hdr->f, H5FD_MEM_FHEAP_HUGE_OBJ, obj_addr, (size_t)obj_size, dxpl_id, obj) < 0)
+ /* Set up buffer for reading */
+ if(hdr->filter_len > 0) {
+ if(NULL == (read_buf = H5MM_malloc((size_t)obj_size)))
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "memory allocation failed for pipeline buffer")
+ } /* end if */
+ else
+ read_buf = obj;
+
+ /* Read the object's (possibly filtered) data from the file */
+ if(H5F_block_read(hdr->f, H5FD_MEM_FHEAP_HUGE_OBJ, obj_addr, (size_t)obj_size, dxpl_id, read_buf) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_READERROR, FAIL, "can't read 'huge' object's data from the file")
+ /* Check for I/O pipeline filter on heap */
+ if(hdr->filter_len > 0) {
+ H5Z_cb_t filter_cb = {NULL, NULL}; /* Filter callback structure */
+ size_t read_size; /* Object's size in the file */
+ size_t nbytes; /* Number of bytes used */
+
+ /* De-filter the object */
+ read_size = nbytes = obj_size;
+ if(H5Z_pipeline(&(hdr->pline), H5Z_FLAG_REVERSE, &filter_mask, H5Z_NO_EDC, filter_cb, &nbytes, &read_size, &read_buf) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTFILTER, FAIL, "input filter failed")
+
+ /* Copy object to user's buffer */
+ HDmemcpy(obj, read_buf, nbytes);
+
+ /* Release read buffer */
+ H5MM_xfree(read_buf);
+ } /* end if */
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5HF_huge_read() */
@@ -436,7 +701,6 @@ done:
herr_t
H5HF_huge_remove(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id)
{
- H5HF_huge_bt2_rec_t search_rec; /* Record for searching for object */
H5HF_huge_remove_ud1_t udata; /* User callback data for v2 B-tree remove call */
herr_t ret_value = SUCCEED; /* Return value */
@@ -448,26 +712,70 @@ H5HF_huge_remove(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id)
HDassert(hdr);
HDassert(id);
+ /* Skip over the flag byte */
+ id++;
+
+ /* Set up the common callback info */
+ udata.hdr = hdr;
+ udata.dxpl_id = dxpl_id;
+
/* Check for 'huge' object ID that encodes address & length directly */
if(hdr->huge_ids_direct) {
- /* Retrieve the object's address and length */
- /* (used as key in v2 B-tree record) */
- H5F_addr_decode(hdr->f, &id, &search_rec.addr);
- H5F_DECODE_LENGTH(hdr->f, id, search_rec.len);
+ if(hdr->filter_len > 0) {
+ H5HF_huge_bt2_filt_dir_rec_t search_rec; /* Record for searching for object */
+
+ /* Retrieve the object's address and length */
+ /* (used as key in v2 B-tree record) */
+ H5F_addr_decode(hdr->f, &id, &search_rec.addr);
+ H5F_DECODE_LENGTH(hdr->f, id, search_rec.len);
+
+ /* Remove the record for tracking the 'huge' object from the v2 B-tree */
+ /* (space in the file for the object is freed in the 'remove' callback) */
+ if(H5B2_remove(hdr->f, dxpl_id, H5HF_BT2_FILT_DIR, hdr->huge_bt2_addr,
+ &search_rec, H5HF_huge_bt2_filt_dir_remove, &udata) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "can't remove object from B-tree")
+ } /* end if */
+ else {
+ H5HF_huge_bt2_dir_rec_t search_rec; /* Record for searching for object */
+
+ /* Retrieve the object's address and length */
+ /* (used as key in v2 B-tree record) */
+ H5F_addr_decode(hdr->f, &id, &search_rec.addr);
+ H5F_DECODE_LENGTH(hdr->f, id, search_rec.len);
+
+ /* Remove the record for tracking the 'huge' object from the v2 B-tree */
+ /* (space in the file for the object is freed in the 'remove' callback) */
+ if(H5B2_remove(hdr->f, dxpl_id, H5HF_BT2_DIR, hdr->huge_bt2_addr,
+ &search_rec, H5HF_huge_bt2_dir_remove, &udata) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "can't remove object from B-tree")
+ } /* end else */
} /* end if */
- else
- /* Get ID for looking up 'huge' object in v2 B-tree */
- UINT64DECODE_VAR(id, search_rec.id, hdr->huge_id_size)
+ else {
+ if(hdr->filter_len > 0) {
+ H5HF_huge_bt2_filt_indir_rec_t search_rec; /* Record for searching for object */
+
+ /* Get ID for looking up 'huge' object in v2 B-tree */
+ UINT64DECODE_VAR(id, search_rec.id, hdr->huge_id_size)
+
+ /* Remove the record for tracking the 'huge' object from the v2 B-tree */
+ /* (space in the file for the object is freed in the 'remove' callback) */
+ if(H5B2_remove(hdr->f, dxpl_id, H5HF_BT2_FILT_INDIR, hdr->huge_bt2_addr,
+ &search_rec, H5HF_huge_bt2_filt_indir_remove, &udata) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "can't remove object from B-tree")
+ } /* end if */
+ else {
+ H5HF_huge_bt2_indir_rec_t search_rec; /* Record for searching for object */
- /* Set up the callback info */
- udata.hdr = hdr;
- udata.dxpl_id = dxpl_id;
+ /* Get ID for looking up 'huge' object in v2 B-tree */
+ UINT64DECODE_VAR(id, search_rec.id, hdr->huge_id_size)
- /* Remove the record for tracking the 'huge' object from the v2 B-tree */
- /* (space in the file for the object is freed in the 'remove' callback) */
- if(H5B2_remove(hdr->f, dxpl_id, &hdr->huge_bt2_class, hdr->huge_bt2_addr,
- &search_rec, H5HF_huge_bt2_remove, &udata) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "can't remove object from B-tree")
+ /* Remove the record for tracking the 'huge' object from the v2 B-tree */
+ /* (space in the file for the object is freed in the 'remove' callback) */
+ if(H5B2_remove(hdr->f, dxpl_id, H5HF_BT2_INDIR, hdr->huge_bt2_addr,
+ &search_rec, H5HF_huge_bt2_indir_remove, &udata) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "can't remove object from B-tree")
+ } /* end else */
+ } /* end else */
/* Update statistics about heap */
hdr->huge_size -= udata.obj_len;
@@ -515,7 +823,8 @@ H5HF_huge_term(H5HF_hdr_t *hdr, hid_t dxpl_id)
HDassert(hdr->huge_size == 0);
/* Delete the v2 B-tree */
- if(H5B2_delete(hdr->f, dxpl_id, &hdr->huge_bt2_class, hdr->huge_bt2_addr, NULL, NULL) < 0)
+ /* (any v2 B-tree class will work here) */
+ if(H5B2_delete(hdr->f, dxpl_id, H5HF_BT2_INDIR, hdr->huge_bt2_addr, NULL, NULL) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTDELETE, FAIL, "can't delete v2 B-tree")
/* Reset the information about 'huge' objects in the file */
@@ -568,8 +877,26 @@ H5HF_huge_delete(H5HF_hdr_t *hdr, hid_t dxpl_id)
udata.dxpl_id = dxpl_id;
/* Delete the v2 B-tree */
- if(H5B2_delete(hdr->f, dxpl_id, &hdr->huge_bt2_class, hdr->huge_bt2_addr, H5HF_huge_bt2_remove, &udata) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTDELETE, FAIL, "can't delete v2 B-tree")
+ if(hdr->huge_ids_direct) {
+ if(hdr->filter_len > 0) {
+ if(H5B2_delete(hdr->f, dxpl_id, H5HF_BT2_FILT_DIR, hdr->huge_bt2_addr, H5HF_huge_bt2_filt_dir_remove, &udata) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDELETE, FAIL, "can't delete v2 B-tree")
+ } /* end if */
+ else {
+ if(H5B2_delete(hdr->f, dxpl_id, H5HF_BT2_DIR, hdr->huge_bt2_addr, H5HF_huge_bt2_dir_remove, &udata) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDELETE, FAIL, "can't delete v2 B-tree")
+ } /* end else */
+ } /* end if */
+ else {
+ if(hdr->filter_len > 0) {
+ if(H5B2_delete(hdr->f, dxpl_id, H5HF_BT2_FILT_INDIR, hdr->huge_bt2_addr, H5HF_huge_bt2_filt_indir_remove, &udata) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDELETE, FAIL, "can't delete v2 B-tree")
+ } /* end if */
+ else {
+ if(H5B2_delete(hdr->f, dxpl_id, H5HF_BT2_INDIR, hdr->huge_bt2_addr, H5HF_huge_bt2_indir_remove, &udata) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDELETE, FAIL, "can't delete v2 B-tree")
+ } /* end else */
+ } /* end else */
done:
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5HFiblock.c b/src/H5HFiblock.c
index 7bb7884..70e3aac 100644
--- a/src/H5HFiblock.c
+++ b/src/H5HFiblock.c
@@ -70,6 +70,9 @@ H5FL_DEFINE(H5HF_indirect_t);
/* Declare a free list to manage the H5HF_indirect_ent_t sequence information */
H5FL_SEQ_DEFINE(H5HF_indirect_ent_t);
+/* Declare a free list to manage the H5HF_indirect_filt_ent_t sequence information */
+H5FL_SEQ_DEFINE(H5HF_indirect_filt_ent_t);
+
/*****************************/
/* Library Private Variables */
@@ -154,24 +157,12 @@ HDfprintf(stderr, "%s: iblock->block_off = %Hu\n", FUNC, iblock->block_off);
/* Mark block as evictable again when no child blocks depend on it */
if(iblock->rc == 0) {
- H5HF_indirect_t *tmp_iblock = NULL; /* Temporary pointer to indirect block */
-
#ifdef QAK
HDfprintf(stderr, "%s: indirect block ref. count at zero, iblock->addr = %a\n", FUNC, iblock->addr);
#endif /* QAK */
- /* Lock indirect block */
- if(iblock->nchildren == 0) {
- if(NULL == (tmp_iblock = H5HF_man_iblock_protect(iblock->hdr, H5AC_dxpl_id, iblock->addr, iblock->nrows, NULL, 0, H5AC_WRITE)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
- HDassert(tmp_iblock == iblock);
- } /* end if */
-
if(H5AC_unpin_entry(iblock->hdr->f, iblock) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPIN, FAIL, "unable to unpin fractal heap indirect block")
-/* XXX: If the indirect block has no children, delete indirect block's entry
- * from cache.
- */
if(iblock->nchildren == 0) {
#ifdef QAK
HDfprintf(stderr, "%s: Removing indirect block from cache, iblock->addr = %a\n", FUNC, iblock->addr);
@@ -200,10 +191,9 @@ HDfprintf(stderr, "%s: Removing indirect block from cache, iblock->addr = %a\n",
if(H5MF_xfree(iblock->hdr->f, H5FD_MEM_FHEAP_IBLOCK, H5AC_dxpl_id, iblock->addr, (hsize_t)iblock->size) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free fractal heap indirect block disk space")
- /* Unlock indirect block with delete flag */
- if(H5AC_unprotect(iblock->hdr->f, H5AC_dxpl_id, H5AC_FHEAP_IBLOCK, iblock->addr, tmp_iblock, H5AC__DIRTIED_FLAG|H5AC__DELETED_FLAG) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "unable to release fractal heap indirect block")
- tmp_iblock = NULL;
+ /* Evict the indirect block from the metadata cache */
+ if(H5AC_expunge_entry(iblock->hdr->f, H5AC_dxpl_id, H5AC_FHEAP_IBLOCK, iblock->addr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "unable to remove indirect block from cache")
} /* end if */
} /* end if */
@@ -488,15 +478,14 @@ HDfprintf(stderr, "%s: Check 1.0 - iblock->addr = %a, new_addr = %a\n", FUNC, ib
iblock->addr = new_addr;
} /* end if */
- /* Re-allocate direct block entry table */
+ /* Re-allocate child block entry array */
if(NULL == (iblock->ents = H5FL_SEQ_REALLOC(H5HF_indirect_ent_t, iblock->ents, (size_t)(iblock->nrows * hdr->man_dtable.cparam.width))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for direct entries")
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "memory allocation failed for direct entries")
/* Check for skipping over rows and add free section for skipped rows */
if(skip_direct_rows) {
/* Add skipped blocks to heap's free space */
- if(H5HF_hdr_skip_blocks(hdr, dxpl_id, iblock, next_entry,
- (new_next_entry - next_entry)) < 0)
+ if(H5HF_hdr_skip_blocks(hdr, dxpl_id, iblock, next_entry, (new_next_entry - next_entry)) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTDEC, FAIL, "can't add skipped blocks to heap's free space")
} /* end if */
@@ -509,6 +498,26 @@ HDfprintf(stderr, "%s: Check 1.0 - iblock->addr = %a, new_addr = %a\n", FUNC, ib
acc_dblock_free += hdr->man_dtable.row_tot_dblock_free[row];
} /* end for */
+ /* Check for needing to re-allocate filtered entry array */
+ if(hdr->filter_len > 0 && old_nrows < hdr->man_dtable.max_direct_rows) {
+ unsigned dir_rows; /* Number of direct rows in this indirect block */
+
+ /* Compute the number of direct rows for this indirect block */
+ dir_rows = MIN(iblock->nrows, hdr->man_dtable.max_direct_rows);
+HDfprintf(stderr, "%s: dir_rows = %u\n", FUNC, dir_rows);
+ HDassert(dir_rows > old_nrows);
+
+ /* Re-allocate filtered direct block entry array */
+ if(NULL == (iblock->filt_ents = H5FL_SEQ_REALLOC(H5HF_indirect_filt_ent_t, iblock->filt_ents, (size_t)(dir_rows * hdr->man_dtable.cparam.width))))
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "memory allocation failed for filtered direct entries")
+
+ /* Initialize new entries allocated */
+ for(u = (old_nrows * hdr->man_dtable.cparam.width); u < (dir_rows * hdr->man_dtable.cparam.width); u++) {
+ iblock->filt_ents[u].size = 0;
+ iblock->filt_ents[u].filter_mask = 0;
+ } /* end for */
+ } /* end if */
+
/* Mark indirect block as dirty */
if(H5HF_iblock_dirty(iblock) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark indirect block as dirty")
@@ -612,10 +621,17 @@ HDfprintf(stderr, "%s: new_addr = %a\n", FUNC, new_addr);
iblock->addr = new_addr;
} /* end if */
- /* Re-allocate direct block entry table */
+ /* Re-allocate child block entry array */
if(NULL == (iblock->ents = H5FL_SEQ_REALLOC(H5HF_indirect_ent_t, iblock->ents, (iblock->nrows * hdr->man_dtable.cparam.width))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for direct entries")
+ /* Check for needing to re-allocate filtered entry array */
+ if(hdr->filter_len > 0 && new_nrows < hdr->man_dtable.max_direct_rows) {
+ /* Re-allocate filtered direct block entry array */
+ if(NULL == (iblock->filt_ents = H5FL_SEQ_REALLOC(H5HF_indirect_filt_ent_t, iblock->filt_ents, (size_t)(iblock->nrows * hdr->man_dtable.cparam.width))))
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "memory allocation failed for filtered direct entries")
+ } /* end if */
+
/* Mark indirect block as dirty */
if(H5HF_iblock_dirty(iblock) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark indirect block as dirty")
@@ -842,7 +858,7 @@ HDfprintf(stderr, "%s: nrows = %u, max_rows = %u\n", FUNC, nrows, max_rows);
/* Compute size of buffer needed for indirect block */
iblock->size = H5HF_MAN_INDIRECT_SIZE(hdr, iblock);
- /* Allocate indirect block entry tables */
+ /* Allocate child block entry array */
if(NULL == (iblock->ents = H5FL_SEQ_MALLOC(H5HF_indirect_ent_t, (size_t)(iblock->nrows * hdr->man_dtable.cparam.width))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for block entries")
@@ -850,6 +866,23 @@ HDfprintf(stderr, "%s: nrows = %u, max_rows = %u\n", FUNC, nrows, max_rows);
for(u = 0; u < (iblock->nrows * hdr->man_dtable.cparam.width); u++)
iblock->ents[u].addr = HADDR_UNDEF;
+ /* Check for I/O filters to apply to this heap */
+ if(hdr->filter_len > 0) {
+ unsigned dir_rows; /* Number of direct rows in this indirect block */
+
+ /* Compute the number of direct rows for this indirect block */
+ dir_rows = MIN(iblock->nrows, hdr->man_dtable.max_direct_rows);
+#ifdef QAK
+HDfprintf(stderr, "%s: dir_rows = %u\n", FUNC, dir_rows);
+#endif /* QAK */
+
+ /* Allocate & initialize indirect block filtered entry array */
+ if(NULL == (iblock->filt_ents = H5FL_SEQ_CALLOC(H5HF_indirect_filt_ent_t, (size_t)(dir_rows * hdr->man_dtable.cparam.width))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for block entries")
+ } /* end if */
+ else
+ iblock->filt_ents = NULL;
+
/* Allocate space for the indirect block on disk */
if(HADDR_UNDEF == (*addr_p = H5MF_alloc(hdr->f, H5FD_MEM_FHEAP_IBLOCK, dxpl_id, (hsize_t)iblock->size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "file allocation failed for fractal heap indirect block")
@@ -979,6 +1012,21 @@ HDfprintf(stderr, "%s: iblock->block_off = %Hu, iblock->nchildren = %u\n", FUNC,
/* Point at the direct block */
iblock->ents[entry].addr = child_addr;
+ /* Check for I/O filters on this heap */
+ if(iblock->hdr->filter_len > 0) {
+ unsigned row; /* Row for entry */
+
+ /* Sanity check */
+ HDassert(iblock->filt_ents);
+
+ /* Compute row for entry */
+ row = entry / iblock->hdr->man_dtable.cparam.width;
+
+ /* If this is a direct block, set its initial size */
+ if(row < iblock->hdr->man_dtable.max_direct_rows)
+ iblock->filt_ents[entry].size = iblock->hdr->man_dtable.row_block_size[row];
+ } /* end if */
+
/* Check for max. entry used */
if(entry > iblock->max_child)
iblock->max_child = entry;
@@ -1028,9 +1076,26 @@ HDfprintf(stderr, "%s: iblock->block_off = %Hu, iblock->nchildren = %u\n", FUNC,
/* Reset address of entry */
iblock->ents[entry].addr = HADDR_UNDEF;
+ /* Check for I/O filters on this heap */
+ if(iblock->hdr->filter_len > 0) {
+ unsigned row; /* Row for entry */
+
+ /* Sanity check */
+ HDassert(iblock->filt_ents);
+
+ /* Compute row for entry */
+ row = entry / iblock->hdr->man_dtable.cparam.width;
+
+ /* If this is a direct block, set its initial size */
+ if(row < iblock->hdr->man_dtable.max_direct_rows) {
+ iblock->filt_ents[entry].size = 0;
+ iblock->filt_ents[entry].filter_mask = 0;
+ } /* end if */
+ } /* end if */
+
/* Decrement the # of child blocks */
/* (If the number of children drop to 0, the indirect block will be
- * removed from the heap when it's ref. count drops to zero and the
+ * removed from the heap when its ref. count drops to zero and the
* metadata cache calls the indirect block destructor)
*/
iblock->nchildren--;
@@ -1113,7 +1178,7 @@ H5HF_man_iblock_entry_addr(H5HF_indirect_t *iblock, unsigned entry, haddr_t *chi
HDassert(iblock);
HDassert(child_addr);
- /* Reset address of entry */
+ /* Retrieve address of entry */
*child_addr = iblock->ents[entry].addr;
FUNC_LEAVE_NOAPI(SUCCEED)
diff --git a/src/H5HFman.c b/src/H5HFman.c
index 30eb31c..38e8d43 100644
--- a/src/H5HFman.c
+++ b/src/H5HFman.c
@@ -18,7 +18,7 @@
* Feb 24 2006
* Quincey Koziol <koziol@ncsa.uiuc.edu>
*
- * Purpose: "Internal" routines for fractal heaps.
+ * Purpose: "Managed" object routines for fractal heaps.
*
*-------------------------------------------------------------------------
*/
@@ -394,6 +394,9 @@ H5HF_man_read(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id, void *obj)
HDassert(id);
HDassert(obj);
+ /* Skip over the flag byte */
+ id++;
+
/* Decode the object offset within the heap & it's length */
UINT64DECODE_VAR(id, obj_off, hdr->heap_off_size);
UINT64DECODE_VAR(id, obj_len, hdr->heap_len_size);
@@ -494,6 +497,9 @@ H5HF_man_remove(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id)
HDassert(hdr);
HDassert(id);
+ /* Skip over the flag byte */
+ id++;
+
/* Decode the object offset within the heap & it's length */
#ifdef QAK
HDfprintf(stderr, "%s: fh->hdr->heap_off_size = %u, fh->hdr->heap_len_size = %u\n", FUNC, (unsigned)fh->hdr->heap_off_size, (unsigned)fh->hdr->heap_len_size);
diff --git a/src/H5HFpkg.h b/src/H5HFpkg.h
index 21e5468..947f3ad 100644
--- a/src/H5HFpkg.h
+++ b/src/H5HFpkg.h
@@ -72,6 +72,9 @@
#define H5HF_HDR_FLAGS_HUGE_ID_WRAPPED 0x01
/* Size of the fractal heap header on disk */
+/* (this is the fixed-len portion, the variable-len I/O filter information
+ * follows this information, if there are I/O filters for the heap)
+ */
#define H5HF_HEADER_SIZE(h) ( \
/* General metadata fields */ \
H5HF_METADATA_PREFIX_SIZE \
@@ -80,6 +83,7 @@
\
/* General heap information */ \
+ 2 /* Heap ID len */ \
+ + 2 /* I/O filters' encoded len */ \
+ 1 /* Status flags */ \
\
/* "Huge" object fields */ \
@@ -98,6 +102,8 @@
+ (h)->sizeof_size /* Number of man. objects in heap */ \
+ (h)->sizeof_size /* Size of huge space in heap */ \
+ (h)->sizeof_size /* Number of huge objects in heap */ \
+ + (h)->sizeof_size /* Size of tiny space in heap */ \
+ + (h)->sizeof_size /* Number of tiny objects in heap */ \
\
/* "Managed" object doubling table info */ \
+ H5HF_DTABLE_INFO_SIZE(h) /* Size of managed obj. doubling-table info */ \
@@ -113,7 +119,14 @@
+ (h)->heap_off_size /* Offset of the block in the heap */ \
)
-/* Size of managed indirect block (absolute & mapped) */
+/* Size of managed indirect block entry for a child direct block */
+#define H5HF_MAN_INDIRECT_CHILD_DIR_ENTRY_SIZE(h) ( \
+ ((h)->filter_len > 0 ? \
+ ((h)->sizeof_addr + (h)->sizeof_size + 4) : /* Size of entries for filtered direct blocks */ \
+ (h)->sizeof_addr) /* Size of entries for un-filtered direct blocks */ \
+ )
+
+/* Size of managed indirect block */
#define H5HF_MAN_INDIRECT_SIZE(h, i) ( \
/* General metadata fields */ \
H5HF_METADATA_PREFIX_SIZE \
@@ -121,7 +134,7 @@
/* Fractal heap managed, absolutely mapped indirect block specific fields */ \
+ (h)->sizeof_addr /* File address of heap owning the block */ \
+ (h)->heap_off_size /* Offset of the block in the heap */ \
- + (MIN((i)->nrows, (h)->man_dtable.max_direct_rows) * (h)->man_dtable.cparam.width * (h)->sizeof_addr) /* Size of entries for direct blocks */ \
+ + (MIN((i)->nrows, (h)->man_dtable.max_direct_rows) * (h)->man_dtable.cparam.width * H5HF_MAN_INDIRECT_CHILD_DIR_ENTRY_SIZE(h)) /* Size of entries for direct blocks */ \
+ ((((i)->nrows > (h)->man_dtable.max_direct_rows) ? ((i)->nrows - (h)->man_dtable.max_direct_rows) : 0) * (h)->man_dtable.cparam.width * (h)->sizeof_addr) /* Size of entries for indirect blocks */ \
)
@@ -289,12 +302,12 @@ typedef struct H5HF_hdr_t {
/* General header information (stored in header) */
unsigned id_len; /* Size of heap IDs (in bytes) */
+ unsigned filter_len; /* Size of I/O filter information (in bytes) */
/* Flags for heap settings (stored in status byte in header) */
hbool_t debug_objs; /* Is the heap storing objects in 'debug' format */
- hbool_t have_io_filter; /* Does the heap have I/O filters for the direct blocks? */
hbool_t write_once; /* Is heap being written in "write once" mode? */
- hbool_t huge_ids_wrapped; /* Have "huge" object IDs wrapped around? */
+ hbool_t huge_ids_wrapped; /* Have "huge" object IDs wrapped around? */
/* Doubling table information (partially stored in header) */
/* (Partially set by user, partially derived/updated internally) */
@@ -309,38 +322,54 @@ typedef struct H5HF_hdr_t {
hsize_t huge_next_id; /* Next ID to use for indirectly tracked 'huge' object */
haddr_t huge_bt2_addr; /* Address of v2 B-tree for tracking "huge" object info */
+ /* I/O filter support (stored in header, if any are used) */
+ H5O_pline_t pline; /* I/O filter pipeline for heap objects */
+ size_t pline_root_direct_size; /* Size of filtered root direct block */
+ unsigned pline_root_direct_filter_mask; /* I/O filter mask for filtered root direct block */
+
/* Statistics for heap (stored in header) */
- hsize_t man_size; /* Total amount of managed space in heap */
- hsize_t man_alloc_size; /* Total amount of allocated managed space in heap */
- hsize_t man_iter_off; /* Offset of iterator in managed heap space */
- hsize_t man_nobjs; /* Number of "managed" objects in heap */
- hsize_t huge_size; /* Total size of "huge" objects in heap */
- hsize_t huge_nobjs; /* Number of "huge" objects in heap */
+ hsize_t man_size; /* Total amount of 'managed' space in heap */
+ hsize_t man_alloc_size; /* Total amount of allocated 'managed' space in heap */
+ hsize_t man_iter_off; /* Offset of iterator in 'managed' heap space */
+ hsize_t man_nobjs; /* Number of 'managed' objects in heap */
+ hsize_t huge_size; /* Total size of 'huge' objects in heap */
+ hsize_t huge_nobjs; /* Number of 'huge' objects in heap */
+ hsize_t tiny_size; /* Total size of 'tiny' objects in heap */
+ hsize_t tiny_nobjs; /* Number of 'tiny' objects in heap */
/* Cached/computed values (not stored in header) */
size_t rc; /* Reference count of objects using heap header */
hbool_t dirty; /* Shared info is modified */
haddr_t heap_addr; /* Address of heap header in the file */
+ size_t heap_size; /* Size of heap header in the file */
H5AC_protect_t mode; /* Access mode for heap */
H5F_t *f; /* Pointer to file for heap */
size_t sizeof_size; /* Size of file sizes */
size_t sizeof_addr; /* Size of file addresses */
H5FS_t *fspace; /* Free space list for objects in heap */
H5HF_block_iter_t next_block; /* Block iterator for searching for next block with space */
- H5B2_class_t huge_bt2_class; /* v2 B-tree class information for "huge" object tracking */
hsize_t huge_max_id; /* Max. 'huge' heap ID before rolling 'huge' heap IDs over */
hbool_t huge_ids_direct; /* Flag to indicate that 'huge' object's offset & length are stored directly in heap ID */
- unsigned char huge_id_size; /* Size of 'huge' heap IDs (in bytes) */
+ size_t tiny_max_len; /* Max. size of tiny objects for this heap */
+ hbool_t tiny_len_extended; /* Flag to indicate that 'tiny' object's length is stored in extended form (i.e. w/extra byte) */
+ unsigned char huge_id_size; /* Size of 'huge' heap IDs (in bytes) */
unsigned char heap_off_size; /* Size of heap offsets (in bytes) */
unsigned char heap_len_size; /* Size of heap ID lengths (in bytes) */
} H5HF_hdr_t;
-/* Indirect block entry */
+/* Common indirect block doubling table entry */
+/* (common between entries pointing to direct & indirect child blocks) */
typedef struct H5HF_indirect_ent_t {
haddr_t addr; /* Direct block's address */
-/* XXX: Will need space for block size, for blocks with I/O filters */
} H5HF_indirect_ent_t;
+/* Extern indirect block doubling table entry for compressed direct blocks */
+/* (only exists for indirect blocks in heaps that have I/O filters) */
+typedef struct H5HF_indirect_filt_ent_t {
+ size_t size; /* Size of child direct block, after passing though I/O filters */
+ unsigned filter_mask; /* Excluded filters for child direct block */
+} H5HF_indirect_filt_ent_t;
+
/* Fractal heap indirect block */
struct H5HF_indirect_t {
/* Information for H5AC cache functions, _must_ be first field in structure */
@@ -361,6 +390,7 @@ struct H5HF_indirect_t {
/* Stored values */
hsize_t block_off; /* Offset of the block within the heap's address space */
H5HF_indirect_ent_t *ents; /* Pointer to block entry table */
+ H5HF_indirect_filt_ent_t *filt_ents; /* Pointer to filtered information for direct blocks */
};
/* A fractal heap direct block */
@@ -393,6 +423,36 @@ typedef struct H5HF_parent_t {
unsigned entry; /* Location of block in parent's entry table */
} H5HF_parent_t;
+/* Typedef for indirectly accessed 'huge' object's records in the v2 B-tree */
+typedef struct H5HF_huge_bt2_indir_rec_t {
+ haddr_t addr; /* Address of the object in the file */
+ hsize_t len; /* Length of the object in the file */
+ hsize_t id; /* ID used for object (not used for 'huge' objects directly accessed) */
+} H5HF_huge_bt2_indir_rec_t;
+
+/* Typedef for indirectly accessed, filtered 'huge' object's records in the v2 B-tree */
+typedef struct H5HF_huge_bt2_filt_indir_rec_t {
+ haddr_t addr; /* Address of the filtered object in the file */
+ hsize_t len; /* Length of the filtered object in the file */
+ unsigned filter_mask; /* I/O pipeline filter mask for filtered object in the file */
+ hsize_t obj_size; /* Size of the de-filtered object in memory */
+ hsize_t id; /* ID used for object (not used for 'huge' objects directly accessed) */
+} H5HF_huge_bt2_filt_indir_rec_t;
+
+/* Typedef for directly accessed 'huge' object's records in the v2 B-tree */
+typedef struct H5HF_huge_bt2_dir_rec_t {
+ haddr_t addr; /* Address of the object in the file */
+ hsize_t len; /* Length of the object in the file */
+} H5HF_huge_bt2_dir_rec_t;
+
+/* Typedef for directly accessed, filtered 'huge' object's records in the v2 B-tree */
+typedef struct H5HF_huge_bt2_filt_dir_rec_t {
+ haddr_t addr; /* Address of the filtered object in the file */
+ hsize_t len; /* Length of the filtered object in the file */
+ unsigned filter_mask; /* I/O pipeline filter mask for filtered object in the file */
+ hsize_t obj_size; /* Size of the de-filtered object in memory */
+} H5HF_huge_bt2_filt_dir_rec_t;
+
/* User data for free space section 'add' callback */
typedef struct {
H5HF_hdr_t *hdr; /* Fractal heap header */
@@ -406,13 +466,6 @@ typedef struct {
hsize_t obj_len; /* Length of object removed (out) */
} H5HF_huge_remove_ud1_t;
-/* Typedef for 'huge' object's records in the v2 B-tree */
-typedef struct H5HF_huge_bt2_rec_t {
- haddr_t addr; /* Address of the object in the file */
- hsize_t len; /* Length of the object in the file */
- hsize_t id; /* ID used for object (not used for 'huge' objects directly accessed) */
-} H5HF_huge_bt2_rec_t;
-
/*****************************/
/* Package Private Variables */
/*****************************/
@@ -426,6 +479,18 @@ H5_DLLVAR const H5AC_class_t H5AC_FHEAP_DBLOCK[1];
/* H5HF indirect block inherits cache-like properties from H5AC */
H5_DLLVAR const H5AC_class_t H5AC_FHEAP_IBLOCK[1];
+/* The v2 B-tree class for tracking indirectly accessed 'huge' objects */
+H5_DLLVAR const H5B2_class_t H5HF_BT2_INDIR[1];
+
+/* The v2 B-tree class for tracking indirectly accessed filtered 'huge' objects */
+H5_DLLVAR const H5B2_class_t H5HF_BT2_FILT_INDIR[1];
+
+/* The v2 B-tree class for tracking directly accessed 'huge' objects */
+H5_DLLVAR const H5B2_class_t H5HF_BT2_DIR[1];
+
+/* The v2 B-tree class for tracking directly accessed filtered 'huge' objects */
+H5_DLLVAR const H5B2_class_t H5HF_BT2_FILT_DIR[1];
+
/* H5HF single section inherits serializable properties from H5FS_section_class_t */
H5_DLLVAR H5FS_section_class_t H5HF_FSPACE_SECT_CLS_SINGLE[1];
@@ -453,6 +518,9 @@ H5FL_EXTERN(H5HF_indirect_t);
/* Declare a free list to manage the H5HF_indirect_ent_t sequence information */
H5FL_SEQ_EXTERN(H5HF_indirect_ent_t);
+/* Declare a free list to manage the H5HF_indirect_filt_ent_t sequence information */
+H5FL_SEQ_EXTERN(H5HF_indirect_filt_ent_t);
+
/******************************/
/* Package Private Prototypes */
@@ -543,10 +611,10 @@ H5_DLL herr_t H5HF_man_read(H5HF_hdr_t *fh, hid_t dxpl_id, const uint8_t *id,
void *obj);
H5_DLL herr_t H5HF_man_remove(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id);
-/* "Huge" object routines */
+/* 'Huge' object routines */
H5_DLL herr_t H5HF_huge_init(H5HF_hdr_t *hdr);
H5_DLL herr_t H5HF_huge_insert(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t obj_size,
- const void *obj, void *id);
+ void *obj, void *id);
H5_DLL herr_t H5HF_huge_get_obj_len(H5HF_hdr_t *hdr, hid_t dxpl_id,
const uint8_t *id, size_t *obj_len_p);
H5_DLL herr_t H5HF_huge_read(H5HF_hdr_t *fh, hid_t dxpl_id, const uint8_t *id,
@@ -555,6 +623,15 @@ H5_DLL herr_t H5HF_huge_remove(H5HF_hdr_t *fh, hid_t dxpl_id, const uint8_t *id)
H5_DLL herr_t H5HF_huge_term(H5HF_hdr_t *hdr, hid_t dxpl_id);
H5_DLL herr_t H5HF_huge_delete(H5HF_hdr_t *hdr, hid_t dxpl_id);
+/* 'Tiny' object routines */
+H5_DLL herr_t H5HF_tiny_init(H5HF_hdr_t *hdr);
+H5_DLL herr_t H5HF_tiny_insert(H5HF_hdr_t *hdr, size_t obj_size, const void *obj,
+ void *id);
+H5_DLL herr_t H5HF_tiny_get_obj_len(H5HF_hdr_t *hdr, const uint8_t *id,
+ size_t *obj_len_p);
+H5_DLL herr_t H5HF_tiny_read(H5HF_hdr_t *fh, const uint8_t *id, void *obj);
+H5_DLL herr_t H5HF_tiny_remove(H5HF_hdr_t *fh, const uint8_t *id);
+
/* Metadata cache callbacks */
H5_DLL herr_t H5HF_cache_hdr_dest(H5F_t *f, H5HF_hdr_t *hdr);
H5_DLL herr_t H5HF_cache_dblock_dest(H5F_t *f, H5HF_direct_t *dblock);
@@ -619,6 +696,7 @@ H5_DLL herr_t H5HF_sect_indirect_add(H5HF_hdr_t *hdr, hid_t dxpl_id,
/* Testing routines */
#ifdef H5HF_TESTING
H5_DLL herr_t H5HF_get_cparam_test(const H5HF_t *fh, H5HF_create_t *cparam);
+H5_DLL int H5HF_cmp_cparam_test(const H5HF_create_t *cparam1, const H5HF_create_t *cparam2);
H5_DLL unsigned H5HF_get_max_root_rows(const H5HF_t *fh);
H5_DLL unsigned H5HF_get_dtable_width_test(const H5HF_t *fh);
H5_DLL unsigned H5HF_get_dtable_max_drows_test(const H5HF_t *fh);
@@ -626,6 +704,12 @@ H5_DLL unsigned H5HF_get_iblock_max_drows_test(const H5HF_t *fh, unsigned pos);
H5_DLL hsize_t H5HF_get_dblock_size_test(const H5HF_t *fh, unsigned row);
H5_DLL hsize_t H5HF_get_dblock_free_test(const H5HF_t *fh, unsigned row);
H5_DLL herr_t H5HF_get_id_off_test(const H5HF_t *fh, const void *id, hsize_t *obj_off);
+H5_DLL herr_t H5HF_get_id_type_test(const H5HF_t *fh, const void *id,
+ unsigned char *obj_type);
+H5_DLL herr_t H5HF_get_tiny_info_test(const H5HF_t *fh, size_t *max_len,
+ hbool_t *len_extended);
+H5_DLL herr_t H5HF_get_huge_info_test(const H5HF_t *fh, hsize_t *next_id,
+ hbool_t *ids_direct);
#endif /* H5HF_TESTING */
#endif /* _H5HFpkg_H */
diff --git a/src/H5HFprivate.h b/src/H5HFprivate.h
index 1c9a3ba..20415c1 100644
--- a/src/H5HFprivate.h
+++ b/src/H5HFprivate.h
@@ -31,11 +31,17 @@
/* Private headers needed by this file */
#include "H5Fprivate.h" /* File access */
+#include "H5Oprivate.h" /* Object headers */
/**************************/
/* Library Private Macros */
/**************************/
+/* Limit heap ID length to 4096 + 1, due to # of bits required to store
+ * length of 'tiny' objects (12 bits)
+ */
+#define H5HF_MAX_ID_LEN (4096 + 1)
+
/****************************/
/* Library Private Typedefs */
@@ -60,22 +66,27 @@ typedef struct H5HF_create_t {
/* (i.e. min. size of object to store standalone) */
uint16_t id_len; /* Length of IDs to use for heap objects */
/* (0 - make ID just large enough to hold length & offset of object in the heap) */
- /* (1 - make ID just large enough to allow 'huge' objects to hold the file address & length of the 'huge' object) */
+ /* (1 - make ID just large enough to allow 'huge' objects to be accessed directly) */
/* (n - make ID 'n' bytes in size) */
+ H5O_pline_t pline; /* I/O filter pipeline to apply to direct blocks & 'huge' objects */
} H5HF_create_t;
/* Fractal heap metadata statistics info */
typedef struct H5HF_stat_t {
- /* "Managed" object info */
- hsize_t man_size; /* Size of managed space in heap */
- hsize_t man_alloc_size; /* Size of managed space allocated in heap */
- hsize_t man_iter_off; /* Offset of "new block" iterator in managed heap space */
- hsize_t man_free_space; /* Free space within managed heap blocks */
- hsize_t man_nobjs; /* Number of "managed" objects in heap */
-
- /* "Huge" object info */
- hsize_t huge_size; /* Size of "huge" objects in heap */
- hsize_t huge_nobjs; /* Number of "huge" objects in heap */
+ /* 'Managed' object info */
+ hsize_t man_size; /* Size of 'managed' space in heap */
+ hsize_t man_alloc_size; /* Size of 'managed' space allocated in heap */
+ hsize_t man_iter_off; /* Offset of "new block" iterator in 'managed' heap space */
+ hsize_t man_free_space; /* Free space within 'managed' heap blocks */
+ hsize_t man_nobjs; /* Number of 'managed' objects in heap */
+
+ /* 'Huge' object info */
+ hsize_t huge_size; /* Size of 'huge' objects in heap */
+ hsize_t huge_nobjs; /* Number of 'huge' objects in heap */
+
+ /* 'Tiny' object info */
+ hsize_t tiny_size; /* Size of 'tiny' objects in heap */
+ hsize_t tiny_nobjs; /* Number of 'tiny' objects in heap */
} H5HF_stat_t;
/* Fractal heap info (forward decl - defined in H5HFpkg.h) */
diff --git a/src/H5HFstat.c b/src/H5HFstat.c
index 0924dae..1c6e887 100644
--- a/src/H5HFstat.c
+++ b/src/H5HFstat.c
@@ -99,6 +99,8 @@ H5HF_stat_info(const H5HF_t *fh, H5HF_stat_t *stats)
stats->man_free_space = fh->hdr->total_man_free;
stats->huge_size = fh->hdr->huge_size;
stats->huge_nobjs = fh->hdr->huge_nobjs;
+ stats->tiny_size = fh->hdr->tiny_size;
+ stats->tiny_nobjs = fh->hdr->tiny_nobjs;
/* XXX: Add more metadata statistics for the heap */
FUNC_LEAVE_NOAPI(SUCCEED)
diff --git a/src/H5HFtest.c b/src/H5HFtest.c
index d1daa2b..bbe93ca 100644
--- a/src/H5HFtest.c
+++ b/src/H5HFtest.c
@@ -93,14 +93,122 @@ H5HF_get_cparam_test(const H5HF_t *fh, H5HF_create_t *cparam)
HDassert(cparam);
/* Get fractal heap creation parameters */
+ if(fh->hdr->id_len == (unsigned)(1 + fh->hdr->heap_off_size + fh->hdr->heap_len_size))
+ cparam->id_len = 0;
+ else if(fh->hdr->id_len == (1 + fh->hdr->sizeof_size + fh->hdr->sizeof_addr))
+ cparam->id_len = 1;
+ else
+ cparam->id_len = fh->hdr->id_len;
cparam->max_man_size = fh->hdr->max_man_size;
HDmemcpy(&(cparam->managed), &(fh->hdr->man_dtable.cparam), sizeof(H5HF_dtable_cparam_t));
+ H5O_copy(H5O_PLINE_ID, &(fh->hdr->pline), &(cparam->pline));
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5HF_get_cparam_test() */
/*-------------------------------------------------------------------------
+ * Function: H5HF_cmp_cparam_test
+ *
+ * Purpose: Compare the parameters used to create the fractal heap
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 14, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+H5HF_cmp_cparam_test(const H5HF_create_t *cparam1, const H5HF_create_t *cparam2)
+{
+ int ret_value; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_cmp_cparam_test)
+
+ /* Check arguments. */
+ HDassert(cparam1);
+ HDassert(cparam2);
+
+ /* Compare doubling table parameters */
+ if((ret_value = HDmemcmp(&(cparam1->managed), &(cparam2->managed), sizeof(H5HF_dtable_cparam_t))))
+ HGOTO_DONE(ret_value)
+
+ /* Compare other general parameters for heap */
+ if(cparam1->max_man_size < cparam2->max_man_size)
+ HGOTO_DONE(-1)
+ else if(cparam1->max_man_size > cparam2->max_man_size)
+ HGOTO_DONE(1)
+ if(cparam1->id_len < cparam2->id_len)
+ HGOTO_DONE(-1)
+ else if(cparam1->id_len > cparam2->id_len)
+ HGOTO_DONE(1)
+
+ /* Compare "important" parameters for any I/O pipeline filters */
+ if(cparam1->pline.nused < cparam2->pline.nused)
+ HGOTO_DONE(-1)
+ else if(cparam1->pline.nused > cparam2->pline.nused)
+ HGOTO_DONE(1)
+ else {
+ size_t u, v; /* Local index variables */
+
+ /* Compare each filter */
+ for(u = 0; u < cparam1->pline.nused; u++) {
+ /* Check filter ID */
+ if(cparam1->pline.filter[u].id < cparam2->pline.filter[u].id)
+ HGOTO_DONE(-1)
+ else if(cparam1->pline.filter[u].id > cparam2->pline.filter[u].id)
+ HGOTO_DONE(1)
+
+ /* Check filter flags */
+ if(cparam1->pline.filter[u].flags < cparam2->pline.filter[u].flags)
+ HGOTO_DONE(-1)
+ else if(cparam1->pline.filter[u].flags > cparam2->pline.filter[u].flags)
+ HGOTO_DONE(1)
+
+/* Don't worry about comparing the filter names right now... */
+/* (they are expanded during the encode/decode process, but aren't copied
+ * during the H5Z_append operation, generating false positive failures)
+ */
+#ifdef QAK
+ /* Check filter name */
+HDfprintf(stderr, "%s: Check 1.0\n", "H5HF_cmp_cparam_test");
+HDfprintf(stderr, "%s: cparam1->pline.filter[%Zu].name = %s\n", "H5HF_cmp_cparam_test", u, (cparam1->pline.filter[u].name ? cparam1->pline.filter[u].name : "<nil>"));
+HDfprintf(stderr, "%s: cparam2->pline.filter[%Zu].name = %s\n", "H5HF_cmp_cparam_test", u, (cparam2->pline.filter[u].name ? cparam2->pline.filter[u].name : "<nil>"));
+ if(!cparam1->pline.filter[u].name && cparam2->pline.filter[u].name)
+ HGOTO_DONE(-1)
+ else if(cparam1->pline.filter[u].name && !cparam2->pline.filter[u].name)
+ HGOTO_DONE(1)
+ else if(cparam1->pline.filter[u].name && cparam2->pline.filter[u].name) {
+ if((ret_value = HDstrcmp(cparam1->pline.filter[u].name, cparam2->pline.filter[u].name)))
+ HGOTO_DONE(ret_value)
+ } /* end if */
+#endif /* QAK */
+
+ /* Check # of filter parameters */
+ if(cparam1->pline.filter[u].cd_nelmts < cparam2->pline.filter[u].cd_nelmts)
+ HGOTO_DONE(-1)
+ else if(cparam1->pline.filter[u].cd_nelmts > cparam2->pline.filter[u].cd_nelmts)
+ HGOTO_DONE(1)
+
+ /* Check filter parameters */
+ for(v = 0; v < cparam1->pline.filter[u].cd_nelmts; v++) {
+ if(cparam1->pline.filter[u].cd_values[v] < cparam2->pline.filter[u].cd_values[v])
+ HGOTO_DONE(-1)
+ else if(cparam1->pline.filter[u].cd_values[v] > cparam2->pline.filter[u].cd_values[v])
+ HGOTO_DONE(1)
+ } /* end for */
+
+ } /* end for */
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5HF_get_cparam_test() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5HF_get_max_root_rows
*
* Purpose: Retrieve the max. # of rows in the root indirect block
@@ -296,7 +404,7 @@ H5HF_get_dblock_free_test(const H5HF_t *fh, unsigned row)
/*-------------------------------------------------------------------------
* Function: H5HF_get_id_off_test
*
- * Purpose: Retrieve the offset for a heap ID
+ * Purpose: Retrieve the offset for a [managed] heap ID
*
* Return: Success: non-negative
*
@@ -322,8 +430,106 @@ H5HF_get_id_off_test(const H5HF_t *fh, const void *_id, hsize_t *obj_off)
/* Get the offset for a 'normal' heap ID */
id++;
- UINT64DECODE_VAR(id, *obj_off, fh->hdr->heap_off_size); \
+ UINT64DECODE_VAR(id, *obj_off, fh->hdr->heap_off_size);
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5HF_get_id_off_test() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_get_id_type_test
+ *
+ * Purpose: Retrieve the type of a heap ID
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 14, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_get_id_type_test(const H5HF_t *fh, const void *_id, unsigned char *obj_type)
+{
+ const uint8_t *id = (const uint8_t *)_id; /* Object ID */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_get_id_type_test)
+
+ /* Check arguments. */
+ HDassert(fh);
+ HDassert(fh->hdr);
+ HDassert(id);
+ HDassert(obj_type);
+
+ /* Get the type for a heap ID */
+ *obj_type = *id & H5HF_ID_TYPE_MASK;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_get_id_type_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_get_tiny_info_test
+ *
+ * Purpose: Retrieve information about tiny object's ID length
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 14, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_get_tiny_info_test(const H5HF_t *fh, size_t *max_len,
+ hbool_t *len_extended)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_get_tiny_info_test)
+
+ /* Check arguments. */
+ HDassert(fh);
+ HDassert(fh->hdr);
+ HDassert(max_len);
+ HDassert(len_extended);
+
+ /* Retrieve information about tiny object's ID encoding in a heap */
+ *max_len = fh->hdr->tiny_max_len;
+ *len_extended = fh->hdr->tiny_len_extended;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_get_tiny_info_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_get_huge_info_test
+ *
+ * Purpose: Retrieve information about huge object's ID length
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 14, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_get_huge_info_test(const H5HF_t *fh, hsize_t *next_id, hbool_t *ids_direct)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_get_huge_info_test)
+
+ /* Check arguments. */
+ HDassert(fh);
+ HDassert(fh->hdr);
+ HDassert(ids_direct);
+
+ /* Retrieve information about tiny object's ID encoding in a heap */
+ if(next_id)
+ *next_id = fh->hdr->huge_next_id;
+ *ids_direct = fh->hdr->huge_ids_direct;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5HF_get_huge_info_test() */
+
diff --git a/src/H5HFtiny.c b/src/H5HFtiny.c
new file mode 100644
index 0000000..5183a39
--- /dev/null
+++ b/src/H5HFtiny.c
@@ -0,0 +1,326 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5HFtiny.c
+ * Aug 14 2006
+ * Quincey Koziol <koziol@hdfgroup.org>
+ *
+ * Purpose: Routines for "tiny" objects in fractal heap
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#define H5HF_PACKAGE /*suppress error about including H5HFpkg */
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5HFpkg.h" /* Fractal heaps */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+/* Tiny object length information */
+#define H5HF_TINY_LEN_SHORT 16 /* Max. length able to be encoded in first heap ID byte */
+#define H5HF_TINY_MASK_SHORT 0x0F /* Mask for length in first heap ID byte */
+#define H5HF_TINY_MASK_EXT 0x0FFF /* Mask for length in two heap ID bytes */
+#define H5HF_TINY_MASK_EXT_1 0x0F00 /* Mask for length in first byte of two heap ID bytes */
+#define H5HF_TINY_MASK_EXT_2 0x00FF /* Mask for length in second byte of two heap ID bytes */
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+
+/********************/
+/* Package Typedefs */
+/********************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_tiny_init
+ *
+ * Purpose: Initialize information for tracking 'tiny' objects
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Aug 14 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_tiny_init(H5HF_hdr_t *hdr)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_tiny_init)
+
+ /*
+ * Check arguments.
+ */
+ HDassert(hdr);
+
+ /* Compute information about 'tiny' objects for the heap */
+
+ /* Check if tiny objects need an extra byte for their length */
+ /* (account for boundary condition when length of an object would need an
+ * extra byte, but using that byte means that the extra length byte is
+ * unneccessary)
+ */
+ if((hdr->id_len - 1) <= H5HF_TINY_LEN_SHORT) {
+ hdr->tiny_max_len = hdr->id_len - 1;
+ hdr->tiny_len_extended = FALSE;
+ } /* end if */
+ else if((hdr->id_len - 1) == (H5HF_TINY_LEN_SHORT + 1)) {
+ hdr->tiny_max_len = H5HF_TINY_LEN_SHORT;
+ hdr->tiny_len_extended = FALSE;
+ } /* end if */
+ else {
+ hdr->tiny_max_len = hdr->id_len - 2;
+ hdr->tiny_len_extended = TRUE;
+ } /* end else */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HF_tiny_init() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_tiny_insert
+ *
+ * Purpose: Pack a 'tiny' object in a heap ID
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Aug 14 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_tiny_insert(H5HF_hdr_t *hdr, size_t obj_size, const void *obj, void *_id)
+{
+ uint8_t *id = (uint8_t *)_id; /* Pointer to ID buffer */
+ size_t enc_obj_size; /* Encoded object size */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5HF_tiny_insert)
+#ifdef QAK
+HDfprintf(stderr, "%s: obj_size = %Zu\n", FUNC, obj_size);
+#endif /* QAK */
+
+ /*
+ * Check arguments.
+ */
+ HDassert(hdr);
+ HDassert(obj_size <= hdr->tiny_max_len);
+ HDassert(obj_size <= (H5HF_TINY_MASK_EXT + 1));
+ HDassert(obj);
+ HDassert(id);
+
+ /* Adjust object's size for encoding it */
+ enc_obj_size = obj_size - 1;
+
+ /* Encode object into ID */
+ if(!hdr->tiny_len_extended) {
+ *id++ = H5HF_ID_VERS_CURR | H5HF_ID_TYPE_TINY |
+ (enc_obj_size & H5HF_TINY_MASK_SHORT);
+ } /* end if */
+ else {
+ *id++ = H5HF_ID_VERS_CURR | H5HF_ID_TYPE_TINY |
+ ((enc_obj_size & H5HF_TINY_MASK_EXT_1) >> 8);
+ *id++ = enc_obj_size & H5HF_TINY_MASK_EXT_2;
+ } /* end else */
+ HDmemcpy(id, obj, obj_size);
+
+ /* Update statistics about heap */
+ hdr->tiny_size += obj_size;
+ hdr->tiny_nobjs++;
+
+ /* Mark heap header as modified */
+ if(H5HF_hdr_dirty(hdr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF_tiny_insert() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_tiny_get_obj_len
+ *
+ * Purpose: Get the size of a 'tiny' object in a fractal heap
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Aug 14 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_tiny_get_obj_len(H5HF_hdr_t *hdr, const uint8_t *id, size_t *obj_len_p)
+{
+ size_t enc_obj_size; /* Encoded object size */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_tiny_get_obj_len)
+
+ /*
+ * Check arguments.
+ */
+ HDassert(hdr);
+ HDassert(id);
+ HDassert(obj_len_p);
+
+ /* Check if 'tiny' object ID is in extended form, and retrieve encoded size */
+ if(!hdr->tiny_len_extended)
+ enc_obj_size = *id & H5HF_TINY_MASK_SHORT;
+ else
+ enc_obj_size = ((*id & H5HF_TINY_MASK_EXT_1) << 8) | (*(id + 1) & H5HF_TINY_MASK_EXT_2);
+
+ /* Set the object's length */
+ *obj_len_p = enc_obj_size + 1;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HF_tiny_get_obj_len() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_tiny_read
+ *
+ * Purpose: Read a 'tiny' object from the heap
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Aug 8 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_tiny_read(H5HF_hdr_t *hdr, const uint8_t *id, void *obj)
+{
+ size_t enc_obj_size; /* Encoded object size */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5HF_tiny_read)
+
+ /*
+ * Check arguments.
+ */
+ HDassert(hdr);
+ HDassert(id);
+ HDassert(obj);
+
+ /* Check if 'tiny' object ID is in extended form */
+ if(!hdr->tiny_len_extended) {
+ /* Retrieve the object's encoded length */
+ enc_obj_size = *id & H5HF_TINY_MASK_SHORT;
+
+ /* Advance past flag byte(s) */
+ id++;
+ } /* end if */
+ else {
+ /* Retrieve the object's encoded length */
+ enc_obj_size = ((*id & H5HF_TINY_MASK_EXT_1) << 8) | (*(id + 1) & H5HF_TINY_MASK_EXT_2);
+
+ /* Advance past flag byte(s) */
+ id+=2;
+ } /* end else */
+
+ /* Retrieve the object's data */
+ HDmemcpy(obj, id, (enc_obj_size + 1));
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5HF_tiny_read() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_tiny_remove
+ *
+ * Purpose: Remove a 'tiny' object from the heap statistics
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Aug 14 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_tiny_remove(H5HF_hdr_t *hdr, const uint8_t *id)
+{
+ size_t enc_obj_size; /* Encoded object size */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5HF_tiny_remove)
+
+ /*
+ * Check arguments.
+ */
+ HDassert(hdr);
+ HDassert(id);
+
+ /* Check if 'tiny' object ID is in extended form */
+ if(!hdr->tiny_len_extended)
+ enc_obj_size = *id & H5HF_TINY_MASK_SHORT;
+ else
+ enc_obj_size = ((*id & H5HF_TINY_MASK_EXT_1) << 8) | (*(id + 1) & H5HF_TINY_MASK_EXT_2);
+
+ /* Update statistics about heap */
+ hdr->tiny_size -= (enc_obj_size + 1);
+ hdr->tiny_nobjs--;
+
+ /* Mark heap header as modified */
+ if(H5HF_hdr_dirty(hdr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDIRTY, FAIL, "can't mark heap header as dirty")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF_tiny_remove() */
+
diff --git a/src/Makefile.am b/src/Makefile.am
index 4d464bb..86ce039 100755
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -53,7 +53,7 @@ libhdf5_la_SOURCES= H5.c H5dbg.c H5A.c H5AC.c H5B.c H5Bcache.c \
H5Gobj.c H5Goh.c H5Gstab.c H5Gtest.c H5Gtraverse.c \
H5HF.c H5HFbtree2.c H5HFcache.c H5HFdbg.c H5HFdblock.c H5HFdtable.c \
H5HFhdr.c H5HFhuge.c H5HFiblock.c H5HFiter.c H5HFman.c H5HFsection.c \
- H5HFspace.c H5HFstat.c H5HFtest.c \
+ H5HFspace.c H5HFstat.c H5HFtest.c H5HFtiny.c \
H5HG.c H5HGdbg.c H5HL.c H5HLdbg.c H5HP.c H5I.c H5MF.c H5MM.c \
H5MP.c H5MPtest.c H5L.c H5Lexternal.c H5O.c H5Oattr.c H5Obogus.c H5Ocache.c \
H5Ocont.c H5Odtype.c H5Oefl.c H5Ofill.c H5Oginfo.c H5Olayout.c \
diff --git a/src/Makefile.in b/src/Makefile.in
index 990023b..6b5d981 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -96,9 +96,9 @@ am_libhdf5_la_OBJECTS = H5.lo H5dbg.lo H5A.lo H5AC.lo H5B.lo \
H5Gtest.lo H5Gtraverse.lo H5HF.lo H5HFbtree2.lo H5HFcache.lo \
H5HFdbg.lo H5HFdblock.lo H5HFdtable.lo H5HFhdr.lo H5HFhuge.lo \
H5HFiblock.lo H5HFiter.lo H5HFman.lo H5HFsection.lo \
- H5HFspace.lo H5HFstat.lo H5HFtest.lo H5HG.lo H5HGdbg.lo \
- H5HL.lo H5HLdbg.lo H5HP.lo H5I.lo H5MF.lo H5MM.lo H5MP.lo \
- H5MPtest.lo H5L.lo H5Lexternal.lo H5O.lo H5Oattr.lo \
+ H5HFspace.lo H5HFstat.lo H5HFtest.lo H5HFtiny.lo H5HG.lo \
+ H5HGdbg.lo H5HL.lo H5HLdbg.lo H5HP.lo H5I.lo H5MF.lo H5MM.lo \
+ H5MP.lo H5MPtest.lo H5L.lo H5Lexternal.lo H5O.lo H5Oattr.lo \
H5Obogus.lo H5Ocache.lo H5Ocont.lo H5Odtype.lo H5Oefl.lo \
H5Ofill.lo H5Oginfo.lo H5Olayout.lo H5Olinfo.lo H5Olink.lo \
H5Omtime.lo H5Oname.lo H5Onull.lo H5Opline.lo H5Osdspace.lo \
@@ -402,7 +402,7 @@ libhdf5_la_SOURCES = H5.c H5dbg.c H5A.c H5AC.c H5B.c H5Bcache.c \
H5Gobj.c H5Goh.c H5Gstab.c H5Gtest.c H5Gtraverse.c \
H5HF.c H5HFbtree2.c H5HFcache.c H5HFdbg.c H5HFdblock.c H5HFdtable.c \
H5HFhdr.c H5HFhuge.c H5HFiblock.c H5HFiter.c H5HFman.c H5HFsection.c \
- H5HFspace.c H5HFstat.c H5HFtest.c \
+ H5HFspace.c H5HFstat.c H5HFtest.c H5HFtiny.c \
H5HG.c H5HGdbg.c H5HL.c H5HLdbg.c H5HP.c H5I.c H5MF.c H5MM.c \
H5MP.c H5MPtest.c H5L.c H5Lexternal.c H5O.c H5Oattr.c H5Obogus.c H5Ocache.c \
H5Ocont.c H5Odtype.c H5Oefl.c H5Ofill.c H5Oginfo.c H5Olayout.c \
@@ -624,6 +624,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/H5HFspace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/H5HFstat.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/H5HFtest.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/H5HFtiny.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/H5HG.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/H5HGdbg.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/H5HL.Plo@am__quote@
diff --git a/test/fheap.c b/test/fheap.c
index 49baf02..ade01a5 100644
--- a/test/fheap.c
+++ b/test/fheap.c
@@ -111,6 +111,8 @@ typedef struct fheap_heap_state_t {
hsize_t man_free_space; /* Managed object free space within heap */
size_t huge_nobjs; /* # of 'huge' objects within heap */
hsize_t huge_size; /* Size of 'huge' object heap */
+ size_t tiny_nobjs; /* # of 'tiny' objects within heap */
+ hsize_t tiny_size; /* Size of 'tiny' object heap */
} fheap_heap_state_t;
/* Heap IDs to retain */
@@ -220,6 +222,14 @@ check_stats(const H5HF_t *fh, const fheap_heap_state_t *state)
HDfprintf(stdout, "heap_stats.huge_size = %Hu, state->huge_size = %Hu\n", heap_stats.huge_size, state->huge_size);
FAIL_STACK_ERROR
} /* end if */
+ if(heap_stats.tiny_nobjs != state->tiny_nobjs) {
+ HDfprintf(stdout, "heap_stats.tiny_nobjs = %Hu, state->tiny_nobjs = %Hu\n", heap_stats.tiny_nobjs, state->tiny_nobjs);
+ FAIL_STACK_ERROR
+ } /* end if */
+ if(heap_stats.tiny_size != state->tiny_size) {
+ HDfprintf(stdout, "heap_stats.tiny_size = %Hu, state->tiny_size = %Hu\n", heap_stats.tiny_size, state->tiny_size);
+ FAIL_STACK_ERROR
+ } /* end if */
/* All tests passed */
return(0);
@@ -1684,7 +1694,7 @@ test_create(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t UNUSED *tparam
HDmemset(&test_cparam, 0, sizeof(H5HF_create_t));
if(H5HF_get_cparam_test(fh, &test_cparam) < 0)
FAIL_STACK_ERROR
- if(HDmemcmp(cparam, &test_cparam, sizeof(H5HF_create_t)))
+ if(H5HF_cmp_cparam_test(cparam, &test_cparam))
TEST_ERROR
/* Close the fractal heap */
@@ -1792,7 +1802,7 @@ test_reopen(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t UNUSED *tparam
HDmemset(&test_cparam, 0, sizeof(H5HF_create_t));
if(H5HF_get_cparam_test(fh, &test_cparam) < 0)
FAIL_STACK_ERROR
- if(HDmemcmp(cparam, &test_cparam, sizeof(H5HF_create_t)))
+ if(H5HF_cmp_cparam_test(cparam, &test_cparam))
TEST_ERROR
/* Close the fractal heap */
@@ -1817,6 +1827,466 @@ error:
return(1);
} /* test_reopen() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_id_limits
+ *
+ * Purpose: Test limits for heap ID lengths
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 14, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_id_limits(hid_t fapl, H5HF_create_t *cparam)
+{
+ hid_t file = -1; /* File ID */
+ hid_t dxpl = H5P_DATASET_XFER_DEFAULT; /* DXPL to use */
+ char filename[FHEAP_FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ H5HF_t *fh = NULL; /* Fractal heap wrapper */
+ H5HF_create_t tmp_cparam; /* Local heap creation parameters */
+ unsigned deflate_level; /* Deflation level */
+ size_t id_len; /* Size of fractal heap IDs */
+ size_t tiny_max_len; /* Max. length of tiny objects */
+ hbool_t tiny_len_extended; /* Do tiny objects use two bytes for the length? */
+ hbool_t huge_ids_direct; /* Are 'huge' objects directly acccessed? */
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /* Create the file to work on */
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = H5I_object(file)))
+ FAIL_STACK_ERROR
+
+ /* Display testing message */
+ TESTING("limits of heap ID lengths")
+
+ /* Copy heap creation properties */
+ HDmemcpy(&tmp_cparam, cparam, sizeof(H5HF_create_t));
+
+
+ /* Set the 'default' heap ID length */
+ tmp_cparam.id_len = 0;
+
+ /* Create absolute heap */
+ if(NULL == (fh = H5HF_create(f, dxpl, &tmp_cparam)))
+ FAIL_STACK_ERROR
+
+ /* Test ID length information for heap */
+ if(H5HF_get_id_len(fh, &id_len) < 0)
+ FAIL_STACK_ERROR
+ if(id_len != HEAP_ID_LEN)
+ FAIL_STACK_ERROR
+ if(H5HF_get_tiny_info_test(fh, &tiny_max_len, &tiny_len_extended) < 0)
+ FAIL_STACK_ERROR
+ if(tiny_max_len != (HEAP_ID_LEN - 1))
+ TEST_ERROR
+ if(tiny_len_extended != FALSE)
+ TEST_ERROR
+ if(H5HF_get_huge_info_test(fh, NULL, &huge_ids_direct) < 0)
+ FAIL_STACK_ERROR
+ if(huge_ids_direct != FALSE)
+ TEST_ERROR
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+
+ /* Set the heap ID length to the size needed for directly accessing 'huge' objects */
+ /* (with no I/O pipeline filters) */
+ tmp_cparam.id_len = 1;
+
+ /* Create absolute heap */
+ if(NULL == (fh = H5HF_create(f, dxpl, &tmp_cparam)))
+ FAIL_STACK_ERROR
+
+ /* Test ID length information for heap */
+ if(H5HF_get_id_len(fh, &id_len) < 0)
+ FAIL_STACK_ERROR
+ if(id_len != 17)
+ FAIL_STACK_ERROR
+ if(H5HF_get_tiny_info_test(fh, &tiny_max_len, &tiny_len_extended) < 0)
+ FAIL_STACK_ERROR
+ if(tiny_max_len != 16)
+ TEST_ERROR
+ if(tiny_len_extended != FALSE)
+ TEST_ERROR
+ if(H5HF_get_huge_info_test(fh, NULL, &huge_ids_direct) < 0)
+ FAIL_STACK_ERROR
+ if(huge_ids_direct != TRUE)
+ TEST_ERROR
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+
+ /* Set the heap ID length to the size needed for directly accessing 'huge' objects */
+ /* (with I/O pipeline filters) */
+ tmp_cparam.id_len = 1;
+
+ /* Set an I/O filter for heap data */
+ deflate_level = 6;
+ if(H5Z_append(&tmp_cparam.pline, H5Z_FILTER_DEFLATE, H5Z_FLAG_OPTIONAL, 1, &deflate_level) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create absolute heap */
+ if(NULL == (fh = H5HF_create(f, dxpl, &tmp_cparam)))
+ FAIL_STACK_ERROR
+
+ /* Test ID length information for heap */
+ if(H5HF_get_id_len(fh, &id_len) < 0)
+ FAIL_STACK_ERROR
+ if(id_len != 29)
+ FAIL_STACK_ERROR
+ if(H5HF_get_tiny_info_test(fh, &tiny_max_len, &tiny_len_extended) < 0)
+ FAIL_STACK_ERROR
+ if(tiny_max_len != 27)
+ TEST_ERROR
+ if(tiny_len_extended != TRUE)
+ TEST_ERROR
+ if(H5HF_get_huge_info_test(fh, NULL, &huge_ids_direct) < 0)
+ FAIL_STACK_ERROR
+ if(huge_ids_direct != TRUE)
+ TEST_ERROR
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+ /* Release the I/O pipeline filter information */
+ H5O_reset(H5O_PLINE_ID, &tmp_cparam.pline);
+
+
+ /* Set the heap ID length to a size that's too small for 'managed' heap IDs */
+ tmp_cparam.id_len = 3;
+
+ /* Create absolute heap */
+ H5E_BEGIN_TRY {
+ fh = H5HF_create(f, dxpl, &tmp_cparam);
+ } H5E_END_TRY;
+ if(NULL != fh)
+ FAIL_STACK_ERROR
+
+
+ /* Set the heap ID length a size that's large enough for 'tiny' & 'managed'
+ * objects, but too small for directly accessing 'huge' objects
+ */
+ tmp_cparam.id_len = 8;
+
+ /* Create absolute heap */
+ if(NULL == (fh = H5HF_create(f, dxpl, &tmp_cparam)))
+ FAIL_STACK_ERROR
+
+ /* Test ID length information for heap */
+ if(H5HF_get_id_len(fh, &id_len) < 0)
+ FAIL_STACK_ERROR
+ if(id_len != 8)
+ FAIL_STACK_ERROR
+ if(H5HF_get_tiny_info_test(fh, &tiny_max_len, &tiny_len_extended) < 0)
+ FAIL_STACK_ERROR
+ if(tiny_max_len != 7)
+ TEST_ERROR
+ if(tiny_len_extended != FALSE)
+ TEST_ERROR
+ if(H5HF_get_huge_info_test(fh, NULL, &huge_ids_direct) < 0)
+ FAIL_STACK_ERROR
+ if(huge_ids_direct != FALSE)
+ TEST_ERROR
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+
+ /* Set the heap ID length a size that's large enough for directly
+ * directly accessing 'huge' objects
+ */
+ tmp_cparam.id_len = 17;
+
+ /* Create absolute heap */
+ if(NULL == (fh = H5HF_create(f, dxpl, &tmp_cparam)))
+ FAIL_STACK_ERROR
+
+ /* Test ID length information for heap */
+ if(H5HF_get_id_len(fh, &id_len) < 0)
+ FAIL_STACK_ERROR
+ if(id_len != 17)
+ FAIL_STACK_ERROR
+ if(H5HF_get_tiny_info_test(fh, &tiny_max_len, &tiny_len_extended) < 0)
+ FAIL_STACK_ERROR
+ if(tiny_max_len != 16)
+ TEST_ERROR
+ if(tiny_len_extended != FALSE)
+ TEST_ERROR
+ if(H5HF_get_huge_info_test(fh, NULL, &huge_ids_direct) < 0)
+ FAIL_STACK_ERROR
+ if(huge_ids_direct != TRUE)
+ TEST_ERROR
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+
+ /* Set the heap ID length to the low side of the boundary condition for
+ * encoding 'tiny' objects in one byte
+ */
+ tmp_cparam.id_len = 18;
+
+ /* Create absolute heap */
+ if(NULL == (fh = H5HF_create(f, dxpl, &tmp_cparam)))
+ FAIL_STACK_ERROR
+
+ /* Test ID length information for heap */
+ if(H5HF_get_id_len(fh, &id_len) < 0)
+ FAIL_STACK_ERROR
+ if(id_len != 18)
+ FAIL_STACK_ERROR
+ if(H5HF_get_tiny_info_test(fh, &tiny_max_len, &tiny_len_extended) < 0)
+ FAIL_STACK_ERROR
+ if(tiny_max_len != 16)
+ TEST_ERROR
+ if(tiny_len_extended != FALSE)
+ TEST_ERROR
+ if(H5HF_get_huge_info_test(fh, NULL, &huge_ids_direct) < 0)
+ FAIL_STACK_ERROR
+ if(huge_ids_direct != TRUE)
+ TEST_ERROR
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+
+ /* Set the heap ID length to the high side of the boundary condition for
+ * encoding 'tiny' objects in one byte
+ */
+ tmp_cparam.id_len = 19;
+
+ /* Create absolute heap */
+ if(NULL == (fh = H5HF_create(f, dxpl, &tmp_cparam)))
+ FAIL_STACK_ERROR
+
+ /* Test ID length information for heap */
+ if(H5HF_get_id_len(fh, &id_len) < 0)
+ FAIL_STACK_ERROR
+ if(id_len != 19)
+ FAIL_STACK_ERROR
+ if(H5HF_get_tiny_info_test(fh, &tiny_max_len, &tiny_len_extended) < 0)
+ FAIL_STACK_ERROR
+ if(tiny_max_len != 17)
+ TEST_ERROR
+ if(tiny_len_extended != TRUE)
+ TEST_ERROR
+ if(H5HF_get_huge_info_test(fh, NULL, &huge_ids_direct) < 0)
+ FAIL_STACK_ERROR
+ if(huge_ids_direct != TRUE)
+ TEST_ERROR
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+
+ /* Set the heap ID length a size that's larger than what is needed for
+ * directly accessing 'huge' objects
+ */
+ tmp_cparam.id_len = 45;
+
+ /* Create absolute heap */
+ if(NULL == (fh = H5HF_create(f, dxpl, &tmp_cparam)))
+ FAIL_STACK_ERROR
+
+ /* Test ID length information for heap */
+ if(H5HF_get_id_len(fh, &id_len) < 0)
+ FAIL_STACK_ERROR
+ if(id_len != 45)
+ FAIL_STACK_ERROR
+ if(H5HF_get_tiny_info_test(fh, &tiny_max_len, &tiny_len_extended) < 0)
+ FAIL_STACK_ERROR
+ if(tiny_max_len != 43)
+ TEST_ERROR
+ if(tiny_len_extended != TRUE)
+ TEST_ERROR
+ if(H5HF_get_huge_info_test(fh, NULL, &huge_ids_direct) < 0)
+ FAIL_STACK_ERROR
+ if(huge_ids_direct != TRUE)
+ TEST_ERROR
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+
+ /* Set the heap ID length to a size that's too large to encode the length
+ * of 'tiny' objects
+ */
+ tmp_cparam.id_len = H5HF_MAX_ID_LEN + 1;
+
+ /* Create absolute heap */
+ H5E_BEGIN_TRY {
+ fh = H5HF_create(f, dxpl, &tmp_cparam);
+ } H5E_END_TRY;
+ if(NULL != fh)
+ FAIL_STACK_ERROR
+
+
+ /* Close the file */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* All tests passed */
+ PASSED()
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ if(fh)
+ H5HF_close(fh, dxpl);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return(1);
+} /* test_id_limits() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_filtered_create
+ *
+ * Purpose: Test creating a heap with I/O filters
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 14, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_filtered_create(hid_t fapl, H5HF_create_t *cparam)
+{
+ hid_t file = -1; /* File ID */
+ hid_t dxpl = H5P_DATASET_XFER_DEFAULT; /* DXPL to use */
+ char filename[FHEAP_FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ H5HF_t *fh = NULL; /* Fractal heap wrapper */
+ haddr_t fh_addr; /* Address of fractal heap */
+ H5HF_create_t tmp_cparam; /* Local heap creation parameters */
+ H5HF_create_t test_cparam; /* Temporary local heap creation parameters */
+ unsigned deflate_level; /* Deflation level */
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /* Create the file to work on */
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = H5I_object(file)))
+ FAIL_STACK_ERROR
+
+ /* Display testing message */
+ TESTING("creating heaps with I/O filters")
+
+ /* Copy heap creation properties */
+ HDmemcpy(&tmp_cparam, cparam, sizeof(H5HF_create_t));
+
+
+ /* Set an I/O filter for heap data */
+ deflate_level = 6;
+ if(H5Z_append(&tmp_cparam.pline, H5Z_FILTER_DEFLATE, H5Z_FLAG_OPTIONAL, 1, &deflate_level) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create absolute heap */
+ if(NULL == (fh = H5HF_create(f, dxpl, &tmp_cparam)))
+ FAIL_STACK_ERROR
+
+ /* Get heap's address */
+ if(H5HF_get_heap_addr(fh, &fh_addr) < 0)
+ FAIL_STACK_ERROR
+ if(!H5F_addr_defined(fh_addr))
+ TEST_ERROR
+
+/* XXX: Check heap's I/O filter settings? */
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+ /* Close the file */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open the file */
+ if((file = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = H5I_object(file)))
+ FAIL_STACK_ERROR
+
+ /* Re-open the heap */
+ if(NULL == (fh = H5HF_open(f, H5P_DATASET_XFER_DEFAULT, fh_addr)))
+ FAIL_STACK_ERROR
+
+ /* Query the heap creation parameters */
+ HDmemset(&test_cparam, 0, sizeof(H5HF_create_t));
+ if(H5HF_get_cparam_test(fh, &test_cparam) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_cmp_cparam_test(&tmp_cparam, &test_cparam))
+ FAIL_STACK_ERROR
+
+/* XXX: Check heap's I/O filter settings? */
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, H5P_DATASET_XFER_DEFAULT) < 0)
+ FAIL_STACK_ERROR
+
+
+ /* Close the file */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* Release the I/O pipeline filter information */
+ H5O_reset(H5O_PLINE_ID, &tmp_cparam.pline);
+ H5O_reset(H5O_PLINE_ID, &test_cparam.pline);
+
+ /* All tests passed */
+ PASSED()
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ if(fh)
+ H5HF_close(fh, dxpl);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return(1);
+} /* test_filtered_create() */
+
#ifdef ALL_INSERT_TESTS
/*-------------------------------------------------------------------------
@@ -10894,6 +11364,7 @@ test_huge_insert_one(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
unsigned char *heap_id = NULL; /* Heap ID for object */
size_t obj_size; /* Size of object */
size_t robj_size; /* Size of object read */
+ unsigned char obj_type; /* Type of storage for object */
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "insert one huge object, then remove %s"; /* Test description */
@@ -10919,6 +11390,10 @@ test_huge_insert_one(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
obj_size = SMALL_STAND_SIZE + 1;
if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id) < 0)
FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_HUGE)
+ TEST_ERROR
/* Check for closing & re-opening the heap */
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
@@ -11043,6 +11518,7 @@ test_huge_insert_two(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
unsigned char *heap_id2 = NULL; /* Heap ID for second object */
size_t obj_size; /* Size of object */
size_t robj_size; /* Size of object read */
+ unsigned char obj_type; /* Type of storage for object */
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "insert two huge objects, then remove %s"; /* Test description */
@@ -11070,6 +11546,10 @@ test_huge_insert_two(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
obj_size = SMALL_STAND_SIZE + 1;
if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id) < 0)
FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_HUGE)
+ TEST_ERROR
/* Check for closing & re-opening the heap */
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
@@ -11096,6 +11576,10 @@ test_huge_insert_two(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
obj_size = SMALL_STAND_SIZE + 1;
if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id2) < 0)
FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id2, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_HUGE)
+ TEST_ERROR
/* Check for closing & re-opening the heap */
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
@@ -11268,6 +11752,7 @@ test_huge_insert_three(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tp
unsigned char *heap_id3 = NULL; /* Heap ID for third object */
size_t obj_size; /* Size of object */
size_t robj_size; /* Size of object read */
+ unsigned char obj_type; /* Type of storage for object */
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "insert three huge objects, then remove %s"; /* Test description */
@@ -11297,6 +11782,10 @@ test_huge_insert_three(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tp
obj_size = SMALL_STAND_SIZE + 1;
if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id) < 0)
FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_HUGE)
+ TEST_ERROR
/* Check for closing & re-opening the heap */
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
@@ -11323,6 +11812,10 @@ test_huge_insert_three(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tp
obj_size = SMALL_STAND_SIZE + 2;
if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id2) < 0)
FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id2, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_HUGE)
+ TEST_ERROR
/* Check for closing & re-opening the heap */
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
@@ -11349,6 +11842,10 @@ test_huge_insert_three(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tp
obj_size = SMALL_STAND_SIZE + 3;
if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id3) < 0)
FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id3, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_HUGE)
+ TEST_ERROR
/* Check for closing & re-opening the heap */
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
@@ -11565,6 +12062,7 @@ test_huge_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
unsigned char *heap_id5 = NULL; /* Heap ID for fifth object */
size_t obj_size; /* Size of object */
size_t robj_size; /* Size of object read */
+ unsigned char obj_type; /* Type of storage for object */
fheap_heap_state_t state; /* State of fractal heap */
const char *base_desc = "insert mix of normal & huge objects, then remove %s"; /* Test description */
@@ -11598,6 +12096,10 @@ test_huge_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
obj_size = SMALL_STAND_SIZE + 1;
if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id) < 0)
FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_HUGE)
+ TEST_ERROR
/* Check for closing & re-opening the heap */
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
@@ -11624,6 +12126,10 @@ test_huge_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
obj_size = SMALL_STAND_SIZE + 2;
if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id2) < 0)
FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id2, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_HUGE)
+ TEST_ERROR
/* Check for closing & re-opening the heap */
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
@@ -11650,6 +12156,10 @@ test_huge_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
obj_size = SMALL_STAND_SIZE + 3;
if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id3) < 0)
FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id3, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_HUGE)
+ TEST_ERROR
/* Check for closing & re-opening the heap */
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
@@ -11676,6 +12186,10 @@ test_huge_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
obj_size = DBLOCK_SIZE(fh, 0) + 1;
if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id4) < 0)
FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id4, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_MAN)
+ TEST_ERROR
/* Check for closing & re-opening the heap */
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
@@ -11709,6 +12223,10 @@ test_huge_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
obj_size = DBLOCK_SIZE(fh, 3) + 1;
if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id5) < 0)
FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id5, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_MAN)
+ TEST_ERROR
/* Check for closing & re-opening the heap */
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
@@ -11823,7 +12341,15 @@ test_huge_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
TEST_ERROR
+ /* Reset 'managed' object statistics after they are all removed */
+ state.man_nobjs = 0;
+ state.man_size = 0;
+ state.man_alloc_size = 0;
+ state.man_free_space = 0;
+
/* Remove third object from heap */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id3, &robj_size) < 0)
+ FAIL_STACK_ERROR
if(H5HF_remove(fh, dxpl, heap_id3) < 0)
FAIL_STACK_ERROR
@@ -11831,7 +12357,15 @@ test_huge_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
TEST_ERROR
+ /* Check up on heap... */
+ state.huge_size -= robj_size;
+ state.huge_nobjs--;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
/* Remove second object from heap */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id2, &robj_size) < 0)
+ FAIL_STACK_ERROR
if(H5HF_remove(fh, dxpl, heap_id2) < 0)
FAIL_STACK_ERROR
@@ -11839,6 +12373,613 @@ test_huge_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
TEST_ERROR
+ /* Check up on heap... */
+ state.huge_size -= robj_size;
+ state.huge_nobjs--;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Remove first object from heap */
+ if(H5HF_remove(fh, dxpl, heap_id) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+ } /* end else */
+
+ /* Check up on heap... */
+ HDmemset(&state, 0, sizeof(fheap_heap_state_t));
+ if(check_stats(fh, &state))
+ TEST_ERROR
+ } /* end if */
+
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+ /* Check for deleting the entire heap */
+ if(tparam->del_dir == FHEAP_DEL_HEAP) {
+ /* Delete heap */
+ if(H5HF_delete(f, dxpl, fh_addr) < 0)
+ FAIL_STACK_ERROR
+ } /* end if */
+
+ /* Close the file */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the size of the file */
+ if((file_size = h5_get_file_size(filename)) == 0)
+ TEST_ERROR
+#ifdef QAK
+HDfprintf(stderr, "file_size = %lu\n", (unsigned long)file_size);
+#endif /* QAK */
+
+ /* Verify the file is correct size */
+ if(file_size != empty_size)
+ TEST_ERROR
+
+ /* Free resources */
+ H5MM_xfree(heap_id);
+ H5MM_xfree(heap_id2);
+ H5MM_xfree(heap_id3);
+ H5MM_xfree(heap_id4);
+ H5MM_xfree(heap_id5);
+ H5MM_xfree(keep_ids.ids);
+ H5MM_xfree(keep_ids.lens);
+ H5MM_xfree(keep_ids.offs);
+
+ /* All tests passed */
+ PASSED()
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ H5MM_xfree(heap_id);
+ H5MM_xfree(heap_id2);
+ H5MM_xfree(heap_id3);
+ H5MM_xfree(heap_id4);
+ H5MM_xfree(heap_id5);
+ H5MM_xfree(keep_ids.ids);
+ H5MM_xfree(keep_ids.lens);
+ H5MM_xfree(keep_ids.offs);
+ if(fh)
+ H5HF_close(fh, dxpl);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return(1);
+} /* test_huge_insert_mix() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_filtered_huge
+ *
+ * Purpose: Test storing 'huge' object in a heap with I/O filters
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, August 15, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_filtered_huge(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
+{
+ hid_t file = -1; /* File ID */
+ hid_t dxpl = H5P_DATASET_XFER_DEFAULT; /* DXPL to use */
+ char filename[FHEAP_FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ H5HF_t *fh = NULL; /* Fractal heap wrapper */
+ haddr_t fh_addr; /* Address of fractal heap */
+ H5HF_create_t tmp_cparam; /* Local heap creation parameters */
+ fheap_heap_ids_t keep_ids; /* Structure to retain heap IDs */
+ size_t id_len; /* Size of fractal heap IDs */
+ off_t empty_size; /* Size of a file with an empty heap */
+ off_t file_size; /* Size of file currently */
+ unsigned char *heap_id = NULL; /* Heap ID for object */
+ size_t obj_size; /* Size of object */
+ size_t robj_size; /* Size of object read */
+ unsigned char obj_type; /* Type of storage for object */
+ fheap_heap_state_t state; /* State of fractal heap */
+ unsigned deflate_level; /* Deflation level */
+ unsigned old_actual_id_len; /* Old actual ID length */
+ hbool_t huge_ids_direct; /* Are 'huge' objects directly acccessed? */
+ const char *base_desc = "insert 'huge' object into heap with I/O filters, then remove %s"; /* Test description */
+
+ /* Copy heap creation properties */
+ HDmemcpy(&tmp_cparam, cparam, sizeof(H5HF_create_t));
+
+ /* Set an I/O filter for heap data */
+ deflate_level = 6;
+ if(H5Z_append(&tmp_cparam.pline, H5Z_FILTER_DEFLATE, H5Z_FLAG_OPTIONAL, 1, &deflate_level) < 0)
+ FAIL_STACK_ERROR
+
+ /* Adjust actual ID length, if asking for IDs that can directly access 'huge' objects */
+ if(cparam->id_len == 1) {
+ old_actual_id_len = tparam->actual_id_len;
+ tparam->actual_id_len = 29; /* 1 + 8 (file address size) + 8 (file length size) + 4 (filter mask length) + 8 (object length size) */
+ } /* end if */
+
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, &tmp_cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
+ /* Perform common test initialization operations */
+ if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
+ TEST_ERROR
+
+
+ /* Allocate heap ID(s) */
+ if(NULL == (heap_id = H5MM_malloc(tparam->actual_id_len)))
+ TEST_ERROR
+
+ /* Make certain that 'huge' object's heap IDs are correct form */
+ if(H5HF_get_id_len(fh, &id_len) < 0)
+ FAIL_STACK_ERROR
+ if(id_len != tparam->actual_id_len)
+ TEST_ERROR
+ if(H5HF_get_huge_info_test(fh, NULL, &huge_ids_direct) < 0)
+ FAIL_STACK_ERROR
+ if(cparam->id_len == 1) {
+ if(huge_ids_direct != TRUE)
+ TEST_ERROR
+ } /* end if */
+ else
+ if(tparam->actual_id_len >= 29) {
+ if(huge_ids_direct != TRUE)
+ TEST_ERROR
+ } /* end if */
+ else {
+ if(huge_ids_direct != FALSE)
+ TEST_ERROR
+ } /* end else */
+
+ /* Insert object too large for managed heap blocks */
+ obj_size = SMALL_STAND_SIZE + 1;
+ if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_HUGE)
+ TEST_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+/* QAK */
+#ifdef QAK
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+ /* Close the file */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open the file */
+ if((file = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = H5I_object(file)))
+ FAIL_STACK_ERROR
+
+ /* Re-open the heap */
+ if(NULL == (fh = H5HF_open(f, H5P_DATASET_XFER_DEFAULT, fh_addr)))
+ FAIL_STACK_ERROR
+#endif /* QAK */
+/* QAK */
+
+ /* Check up on heap... */
+ state.huge_size = obj_size;
+ state.huge_nobjs = 1;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Read in huge object */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(obj_size != robj_size)
+ TEST_ERROR
+ HDmemset(shared_robj_g, 0, obj_size);
+ if(H5HF_read(fh, dxpl, heap_id, shared_robj_g) < 0)
+ FAIL_STACK_ERROR
+ if(HDmemcmp(shared_wobj_g, shared_robj_g, obj_size))
+ TEST_ERROR
+
+ /* Delete individual objects, if we won't be deleting the entire heap later */
+ if(tparam->del_dir != FHEAP_DEL_HEAP) {
+ /* Remove object from heap */
+ if(H5HF_remove(fh, dxpl, heap_id) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.huge_size = 0;
+ state.huge_nobjs = 0;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+ } /* end if */
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+ /* Check for deleting the entire heap */
+ if(tparam->del_dir == FHEAP_DEL_HEAP) {
+ /* Delete heap */
+ if(H5HF_delete(f, dxpl, fh_addr) < 0)
+ FAIL_STACK_ERROR
+ } /* end if */
+
+ /* Close the file */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the size of the file */
+ if((file_size = h5_get_file_size(filename)) == 0)
+ TEST_ERROR
+#ifdef QAK
+HDfprintf(stderr, "empty_size = %lu, file_size = %lu\n", (unsigned long)empty_size, (unsigned long)file_size);
+#endif /* QAK */
+
+ /* Verify the file is correct size */
+ if(file_size != empty_size)
+ TEST_ERROR
+
+ /* Reset actual ID length, if asking for IDs that can directly access 'huge' objects */
+ if(cparam->id_len == 1)
+ tparam->actual_id_len = old_actual_id_len;
+
+ /* Free resources */
+ H5O_reset(H5O_PLINE_ID, &tmp_cparam.pline); /* Release the I/O pipeline filter information */
+ H5MM_xfree(heap_id);
+
+ /* All tests passed */
+ PASSED()
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ H5MM_xfree(heap_id);
+ if(fh)
+ H5HF_close(fh, dxpl);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return(1);
+} /* test_filtered_huge() */
+#endif /* QAK */
+
+#ifndef QAK
+
+/*-------------------------------------------------------------------------
+ * Function: test_tiny_insert_one
+ *
+ * Purpose: Test inserting one tiny object in the heap
+ *
+ * Then, remove all the objects, in various ways
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 14, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_tiny_insert_one(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
+{
+ hid_t file = -1; /* File ID */
+ hid_t dxpl = H5P_DATASET_XFER_DEFAULT; /* DXPL to use */
+ char filename[FHEAP_FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ H5HF_t *fh = NULL; /* Fractal heap wrapper */
+ haddr_t fh_addr; /* Address of fractal heap */
+ fheap_heap_ids_t keep_ids; /* Structure to retain heap IDs */
+ size_t id_len; /* Size of fractal heap IDs */
+ off_t empty_size; /* Size of a file with an empty heap */
+ off_t file_size; /* Size of file currently */
+ unsigned char *heap_id = NULL; /* Heap ID for object */
+ size_t obj_size; /* Size of object */
+ size_t robj_size; /* Size of object read */
+ unsigned char obj_type; /* Type of storage for object */
+ fheap_heap_state_t state; /* State of fractal heap */
+ const char *base_desc = "insert one tiny object, then remove %s"; /* Test description */
+
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
+ /* Perform common test initialization operations */
+ if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
+ TEST_ERROR
+
+ /* Allocate heap ID(s) */
+ if(NULL == (heap_id = H5MM_malloc(tparam->actual_id_len)))
+ TEST_ERROR
+
+ /* Make certain that 'tiny' object's heap IDs are correct size */
+ if(H5HF_get_id_len(fh, &id_len) < 0)
+ FAIL_STACK_ERROR
+ if(id_len != tparam->actual_id_len)
+ TEST_ERROR
+
+ /* Insert object small enough to encode in heap ID */
+ obj_size = tparam->actual_id_len - 2;
+ if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_TINY)
+ TEST_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.tiny_size = obj_size;
+ state.tiny_nobjs = 1;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Read in tiny object */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(obj_size != robj_size)
+ TEST_ERROR
+ HDmemset(shared_robj_g, 0, obj_size);
+ if(H5HF_read(fh, dxpl, heap_id, shared_robj_g) < 0)
+ FAIL_STACK_ERROR
+ if(HDmemcmp(shared_wobj_g, shared_robj_g, obj_size))
+ TEST_ERROR
+
+ /* Delete individual objects, if we won't be deleting the entire heap later */
+ if(tparam->del_dir != FHEAP_DEL_HEAP) {
+ /* Remove object from heap */
+ if(H5HF_remove(fh, dxpl, heap_id) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.tiny_size = 0;
+ state.tiny_nobjs = 0;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+ } /* end if */
+
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+ /* Check for deleting the entire heap */
+ if(tparam->del_dir == FHEAP_DEL_HEAP) {
+ /* Delete heap */
+ if(H5HF_delete(f, dxpl, fh_addr) < 0)
+ FAIL_STACK_ERROR
+ } /* end if */
+
+ /* Close the file */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the size of the file */
+ if((file_size = h5_get_file_size(filename)) == 0)
+ TEST_ERROR
+#ifdef QAK
+HDfprintf(stderr, "file_size = %lu\n", (unsigned long)file_size);
+#endif /* QAK */
+
+ /* Verify the file is correct size */
+ if(file_size != empty_size)
+ TEST_ERROR
+
+ /* Free resources */
+ H5MM_xfree(heap_id);
+ H5MM_xfree(keep_ids.ids);
+ H5MM_xfree(keep_ids.lens);
+ H5MM_xfree(keep_ids.offs);
+
+ /* All tests passed */
+ PASSED()
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ H5MM_xfree(heap_id);
+ H5MM_xfree(keep_ids.ids);
+ H5MM_xfree(keep_ids.lens);
+ H5MM_xfree(keep_ids.offs);
+ if(fh)
+ H5HF_close(fh, dxpl);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return(1);
+} /* test_tiny_insert_one() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_tiny_insert_two
+ *
+ * Purpose: Test inserting two tiny objects in the heap
+ *
+ * Then, remove all the objects, in various ways
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 14, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_tiny_insert_two(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
+{
+ hid_t file = -1; /* File ID */
+ hid_t dxpl = H5P_DATASET_XFER_DEFAULT; /* DXPL to use */
+ char filename[FHEAP_FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ H5HF_t *fh = NULL; /* Fractal heap wrapper */
+ haddr_t fh_addr; /* Address of fractal heap */
+ fheap_heap_ids_t keep_ids; /* Structure to retain heap IDs */
+ size_t id_len; /* Size of fractal heap IDs */
+ off_t empty_size; /* Size of a file with an empty heap */
+ off_t file_size; /* Size of file currently */
+ unsigned char *heap_id = NULL; /* Heap ID for first object */
+ unsigned char *heap_id2 = NULL; /* Heap ID for second object */
+ size_t obj_size; /* Size of object */
+ size_t robj_size; /* Size of object read */
+ unsigned char obj_type; /* Type of storage for object */
+ fheap_heap_state_t state; /* State of fractal heap */
+ const char *base_desc = "insert two tiny objects, then remove %s"; /* Test description */
+
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
+ /* Perform common test initialization operations */
+ if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
+ TEST_ERROR
+
+ /* Allocate heap ID(s) */
+ if(NULL == (heap_id = H5MM_malloc(tparam->actual_id_len)))
+ TEST_ERROR
+ if(NULL == (heap_id2 = H5MM_malloc(tparam->actual_id_len)))
+ TEST_ERROR
+
+ /* Make certain that 'tiny' object's heap IDs are correct size */
+ if(H5HF_get_id_len(fh, &id_len) < 0)
+ FAIL_STACK_ERROR
+ if(id_len != tparam->actual_id_len)
+ TEST_ERROR
+
+ /* Insert object small enough to encode in heap ID */
+ obj_size = tparam->actual_id_len - 2;
+ if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_TINY)
+ TEST_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.tiny_size = obj_size;
+ state.tiny_nobjs = 1;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Read in tiny object */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(obj_size != robj_size)
+ TEST_ERROR
+ HDmemset(shared_robj_g, 0, obj_size);
+ if(H5HF_read(fh, dxpl, heap_id, shared_robj_g) < 0)
+ FAIL_STACK_ERROR
+ if(HDmemcmp(shared_wobj_g, shared_robj_g, obj_size))
+ TEST_ERROR
+
+ /* Insert second object small enough to encode in heap ID */
+ obj_size = tparam->actual_id_len - 2;
+ if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id2) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id2, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_TINY)
+ TEST_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.tiny_size += obj_size ;
+ state.tiny_nobjs = 2;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Read in second tiny object */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id2, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(obj_size != robj_size)
+ TEST_ERROR
+ HDmemset(shared_robj_g, 0, obj_size);
+ if(H5HF_read(fh, dxpl, heap_id2, shared_robj_g) < 0)
+ FAIL_STACK_ERROR
+ if(HDmemcmp(shared_wobj_g, shared_robj_g, obj_size))
+ TEST_ERROR
+
+ /* Delete individual objects, if we won't be deleting the entire heap later */
+ if(tparam->del_dir != FHEAP_DEL_HEAP) {
+ if(tparam->del_dir == FHEAP_DEL_FORWARD) {
+ /* Remove first object from heap */
+ if(H5HF_remove(fh, dxpl, heap_id) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.tiny_size = obj_size;
+ state.tiny_nobjs = 1;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Remove second object from heap */
+ if(H5HF_remove(fh, dxpl, heap_id2) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.tiny_size = 0;
+ state.tiny_nobjs = 0;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+ } /* end if */
+ else {
+ /* Remove second object from heap */
+ if(H5HF_remove(fh, dxpl, heap_id2) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.tiny_size = obj_size;
+ state.tiny_nobjs = 1;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
/* Remove first object from heap */
if(H5HF_remove(fh, dxpl, heap_id) < 0)
FAIL_STACK_ERROR
@@ -11848,7 +12989,8 @@ test_huge_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
TEST_ERROR
/* Check up on heap... */
- HDmemset(&state, 0, sizeof(fheap_heap_state_t));
+ state.tiny_size = 0;
+ state.tiny_nobjs = 0;
if(check_stats(fh, &state))
TEST_ERROR
} /* end else */
@@ -11885,9 +13027,563 @@ HDfprintf(stderr, "file_size = %lu\n", (unsigned long)file_size);
/* Free resources */
H5MM_xfree(heap_id);
H5MM_xfree(heap_id2);
+ H5MM_xfree(keep_ids.ids);
+ H5MM_xfree(keep_ids.lens);
+ H5MM_xfree(keep_ids.offs);
+
+ /* All tests passed */
+ PASSED()
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ H5MM_xfree(heap_id);
+ H5MM_xfree(heap_id2);
+ H5MM_xfree(keep_ids.ids);
+ H5MM_xfree(keep_ids.lens);
+ H5MM_xfree(keep_ids.offs);
+ if(fh)
+ H5HF_close(fh, dxpl);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return(1);
+} /* test_tiny_insert_two() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_tiny_insert_mix
+ *
+ * Purpose: Test inserting a mix of 'normal', 'huge' & 'tiny' objects in
+ * the heap
+ *
+ * Then, remove all the objects, in various ways
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 14, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_tiny_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
+{
+ hid_t file = -1; /* File ID */
+ hid_t dxpl = H5P_DATASET_XFER_DEFAULT; /* DXPL to use */
+ char filename[FHEAP_FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ H5HF_t *fh = NULL; /* Fractal heap wrapper */
+ haddr_t fh_addr; /* Address of fractal heap */
+ fheap_heap_ids_t keep_ids; /* Structure to retain heap IDs */
+ size_t id_len; /* Size of fractal heap IDs */
+ off_t empty_size; /* Size of a file with an empty heap */
+ off_t file_size; /* Size of file currently */
+ unsigned char *heap_id = NULL; /* Heap ID for first object */
+ unsigned char *heap_id2 = NULL; /* Heap ID for second object */
+ unsigned char *heap_id3 = NULL; /* Heap ID for third object */
+ unsigned char *heap_id4 = NULL; /* Heap ID for fourth object */
+ unsigned char *heap_id5 = NULL; /* Heap ID for fifth object */
+ unsigned char *heap_id6 = NULL; /* Heap ID for sixth object */
+ unsigned char *heap_id7 = NULL; /* Heap ID for seventh object */
+ size_t obj_size; /* Size of object */
+ size_t robj_size; /* Size of object read */
+ unsigned char obj_type; /* Type of storage for object */
+ fheap_heap_state_t state; /* State of fractal heap */
+ const char *base_desc = "insert mix of normal, huge & tiny objects, then remove %s"; /* Test description */
+
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
+ /* Perform common test initialization operations */
+ if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
+ TEST_ERROR
+
+ /* Allocate heap ID(s) */
+ if(NULL == (heap_id = H5MM_malloc(tparam->actual_id_len)))
+ TEST_ERROR
+ if(NULL == (heap_id2 = H5MM_malloc(tparam->actual_id_len)))
+ TEST_ERROR
+ if(NULL == (heap_id3 = H5MM_malloc(tparam->actual_id_len)))
+ TEST_ERROR
+ if(NULL == (heap_id4 = H5MM_malloc(tparam->actual_id_len)))
+ TEST_ERROR
+ if(NULL == (heap_id5 = H5MM_malloc(tparam->actual_id_len)))
+ TEST_ERROR
+ if(NULL == (heap_id6 = H5MM_malloc(tparam->actual_id_len)))
+ TEST_ERROR
+ if(NULL == (heap_id7 = H5MM_malloc(tparam->actual_id_len)))
+ TEST_ERROR
+
+ /* Make certain that 'tiny' object's heap IDs are correct size */
+ if(H5HF_get_id_len(fh, &id_len) < 0)
+ FAIL_STACK_ERROR
+ if(id_len != tparam->actual_id_len)
+ TEST_ERROR
+
+ /* Insert first object too large for managed heap blocks */
+ obj_size = SMALL_STAND_SIZE + 1;
+ if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_HUGE)
+ TEST_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.huge_size += obj_size;
+ state.huge_nobjs++;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Read in first huge object */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(obj_size != robj_size)
+ TEST_ERROR
+ HDmemset(shared_robj_g, 0, obj_size);
+ if(H5HF_read(fh, dxpl, heap_id, shared_robj_g) < 0)
+ FAIL_STACK_ERROR
+ if(HDmemcmp(shared_wobj_g, shared_robj_g, obj_size))
+ TEST_ERROR
+
+ /* Insert second object too large for managed heap blocks */
+ obj_size = SMALL_STAND_SIZE + 2;
+ if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id2) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id2, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_HUGE)
+ TEST_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.huge_size += obj_size;
+ state.huge_nobjs++;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Read in second huge object */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id2, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(obj_size != robj_size)
+ TEST_ERROR
+ HDmemset(shared_robj_g, 0, obj_size);
+ if(H5HF_read(fh, dxpl, heap_id2, shared_robj_g) < 0)
+ FAIL_STACK_ERROR
+ if(HDmemcmp(shared_wobj_g, shared_robj_g, obj_size))
+ TEST_ERROR
+
+ /* Insert third object too large for managed heap blocks */
+ obj_size = SMALL_STAND_SIZE + 3;
+ if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id3) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id3, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_HUGE)
+ TEST_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.huge_size += obj_size;
+ state.huge_nobjs++;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Read in third huge object */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id3, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(obj_size != robj_size)
+ TEST_ERROR
+ HDmemset(shared_robj_g, 0, obj_size);
+ if(H5HF_read(fh, dxpl, heap_id3, shared_robj_g) < 0)
+ FAIL_STACK_ERROR
+ if(HDmemcmp(shared_wobj_g, shared_robj_g, obj_size))
+ TEST_ERROR
+
+ /* Insert fourth object small enough to fit into 'normal' heap blocks */
+ obj_size = DBLOCK_SIZE(fh, 0) + 1;
+ if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id4) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id4, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_MAN)
+ TEST_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.man_size = cparam->managed.width * DBLOCK_SIZE(fh, 0);
+ state.man_size += cparam->managed.width * DBLOCK_SIZE(fh, 1);
+ state.man_size += cparam->managed.width * DBLOCK_SIZE(fh, 2);
+ state.man_alloc_size = DBLOCK_SIZE(fh, 2);
+ state.man_free_space = cparam->managed.width * DBLOCK_FREE(fh, 0);
+ state.man_free_space += cparam->managed.width * DBLOCK_FREE(fh, 1);
+ state.man_free_space += DBLOCK_FREE(fh, 2) - obj_size;
+ state.man_free_space += (cparam->managed.width - 1) * DBLOCK_FREE(fh, 2);
+ state.man_nobjs++;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Read in fourth ('normal') object */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id4, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(obj_size != robj_size)
+ TEST_ERROR
+ HDmemset(shared_robj_g, 0, obj_size);
+ if(H5HF_read(fh, dxpl, heap_id4, shared_robj_g) < 0)
+ FAIL_STACK_ERROR
+ if(HDmemcmp(shared_wobj_g, shared_robj_g, obj_size))
+ TEST_ERROR
+
+ /* Insert fifth object small enough to fit into 'normal' heap blocks */
+ obj_size = DBLOCK_SIZE(fh, 3) + 1;
+ if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id5) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id5, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_MAN)
+ TEST_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ /* (account for doubling of root indirect block) */
+ state.man_size += cparam->managed.width * DBLOCK_SIZE(fh, 3);
+ state.man_size += cparam->managed.width * DBLOCK_SIZE(fh, 4);
+ state.man_size += cparam->managed.width * DBLOCK_SIZE(fh, 5);
+ state.man_alloc_size += DBLOCK_SIZE(fh, 4);
+ state.man_free_space += cparam->managed.width * DBLOCK_FREE(fh, 3);
+ state.man_free_space += DBLOCK_FREE(fh, 4) - obj_size;
+ state.man_free_space += (cparam->managed.width - 1) * DBLOCK_FREE(fh, 4);
+ state.man_free_space += cparam->managed.width * DBLOCK_FREE(fh, 5);
+ state.man_nobjs++;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Read in fifth ('normal') object */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id5, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(obj_size != robj_size)
+ TEST_ERROR
+ HDmemset(shared_robj_g, 0, obj_size);
+ if(H5HF_read(fh, dxpl, heap_id5, shared_robj_g) < 0)
+ FAIL_STACK_ERROR
+ if(HDmemcmp(shared_wobj_g, shared_robj_g, obj_size))
+ TEST_ERROR
+
+
+ /* Insert sixth object small enough to encode in heap ID */
+ obj_size = tparam->actual_id_len - 2;
+ if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id6) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id6, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_TINY)
+ TEST_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.tiny_size = obj_size;
+ state.tiny_nobjs = 1;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Read in tiny object */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id6, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(obj_size != robj_size)
+ TEST_ERROR
+ HDmemset(shared_robj_g, 0, obj_size);
+ if(H5HF_read(fh, dxpl, heap_id6, shared_robj_g) < 0)
+ FAIL_STACK_ERROR
+ if(HDmemcmp(shared_wobj_g, shared_robj_g, obj_size))
+ TEST_ERROR
+
+ /* Insert seventh object small enough to encode in heap ID */
+ obj_size = tparam->actual_id_len - 2;
+ if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id7) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id7, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_TINY)
+ TEST_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.tiny_size += obj_size;
+ state.tiny_nobjs++;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Read in tiny object */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id7, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(obj_size != robj_size)
+ TEST_ERROR
+ HDmemset(shared_robj_g, 0, obj_size);
+ if(H5HF_read(fh, dxpl, heap_id7, shared_robj_g) < 0)
+ FAIL_STACK_ERROR
+ if(HDmemcmp(shared_wobj_g, shared_robj_g, obj_size))
+ TEST_ERROR
+
+ /* Delete individual objects, if we won't be deleting the entire heap later */
+ if(tparam->del_dir != FHEAP_DEL_HEAP) {
+ if(tparam->del_dir == FHEAP_DEL_FORWARD) {
+ /* Remove first object from heap */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_remove(fh, dxpl, heap_id) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.huge_size -= robj_size;
+ state.huge_nobjs--;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Remove second object from heap */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id2, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_remove(fh, dxpl, heap_id2) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.huge_size -= robj_size;
+ state.huge_nobjs--;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Remove third object from heap */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id3, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_remove(fh, dxpl, heap_id3) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.huge_size -= robj_size;
+ state.huge_nobjs--;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Remove fourth ('normal') object from heap */
+ if(H5HF_remove(fh, dxpl, heap_id4) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Remove fifth ('normal') object from heap */
+ if(H5HF_remove(fh, dxpl, heap_id5) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Reset 'managed' object statistics after they are all removed */
+ state.man_nobjs = 0;
+ state.man_size = 0;
+ state.man_alloc_size = 0;
+ state.man_free_space = 0;
+
+ /* Remove sixth object from heap */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id6, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_remove(fh, dxpl, heap_id6) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.tiny_size -= robj_size;
+ state.tiny_nobjs--;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Remove seventh object from heap */
+ if(H5HF_remove(fh, dxpl, heap_id7) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+ } /* end if */
+ else {
+ /* Remove seventh object from heap */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id7, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_remove(fh, dxpl, heap_id7) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.tiny_size -= robj_size;
+ state.tiny_nobjs--;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Remove sixth object from heap */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id6, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_remove(fh, dxpl, heap_id6) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.tiny_size -= robj_size;
+ state.tiny_nobjs--;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Remove fifth ('normal') object from heap */
+ if(H5HF_remove(fh, dxpl, heap_id5) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Remove fourth ('normal') object from heap */
+ if(H5HF_remove(fh, dxpl, heap_id4) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Reset 'managed' object statistics after they are all removed */
+ state.man_nobjs = 0;
+ state.man_size = 0;
+ state.man_alloc_size = 0;
+ state.man_free_space = 0;
+
+ /* Remove third object from heap */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id3, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_remove(fh, dxpl, heap_id3) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.huge_size -= robj_size;
+ state.huge_nobjs--;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Remove second object from heap */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id2, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_remove(fh, dxpl, heap_id2) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ state.huge_size -= robj_size;
+ state.huge_nobjs--;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Remove first object from heap */
+ if(H5HF_remove(fh, dxpl, heap_id) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+ } /* end else */
+
+ /* Check up on heap... */
+ HDmemset(&state, 0, sizeof(fheap_heap_state_t));
+ if(check_stats(fh, &state))
+ TEST_ERROR
+ } /* end if */
+
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+ /* Check for deleting the entire heap */
+ if(tparam->del_dir == FHEAP_DEL_HEAP) {
+ /* Delete heap */
+ if(H5HF_delete(f, dxpl, fh_addr) < 0)
+ FAIL_STACK_ERROR
+ } /* end if */
+
+ /* Close the file */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the size of the file */
+ if((file_size = h5_get_file_size(filename)) == 0)
+ TEST_ERROR
+#ifdef QAK
+HDfprintf(stderr, "file_size = %lu\n", (unsigned long)file_size);
+#endif /* QAK */
+
+ /* Verify the file is correct size */
+ if(file_size != empty_size)
+ TEST_ERROR
+
+ /* Free resources */
+ H5MM_xfree(heap_id);
+ H5MM_xfree(heap_id2);
H5MM_xfree(heap_id3);
H5MM_xfree(heap_id4);
H5MM_xfree(heap_id5);
+ H5MM_xfree(heap_id6);
+ H5MM_xfree(heap_id7);
H5MM_xfree(keep_ids.ids);
H5MM_xfree(keep_ids.lens);
H5MM_xfree(keep_ids.offs);
@@ -11904,6 +13600,8 @@ error:
H5MM_xfree(heap_id3);
H5MM_xfree(heap_id4);
H5MM_xfree(heap_id5);
+ H5MM_xfree(heap_id6);
+ H5MM_xfree(heap_id7);
H5MM_xfree(keep_ids.ids);
H5MM_xfree(keep_ids.lens);
H5MM_xfree(keep_ids.offs);
@@ -11912,14 +13610,192 @@ error:
H5Fclose(file);
} H5E_END_TRY;
return(1);
-} /* test_huge_insert_mix() */
+} /* test_tiny_insert_mix() */
#endif /* QAK */
#endif /* QAK2 */
+#ifdef NOT_YET
+
+/*-------------------------------------------------------------------------
+ * Function: test_filtered_man_one
+ *
+ * Purpose: Test storing one 'managed' object in a heap with I/O filters
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Quincey Koziol
+ * Monday, August 14, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_filtered_man_one(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
+{
+ hid_t file = -1; /* File ID */
+ hid_t dxpl = H5P_DATASET_XFER_DEFAULT; /* DXPL to use */
+ char filename[FHEAP_FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ H5HF_t *fh = NULL; /* Fractal heap wrapper */
+ haddr_t fh_addr; /* Address of fractal heap */
+ H5HF_create_t tmp_cparam; /* Local heap creation parameters */
+ fheap_heap_ids_t keep_ids; /* Structure to retain heap IDs */
+ off_t empty_size; /* Size of a file with an empty heap */
+ off_t file_size; /* Size of file currently */
+ unsigned char heap_id[HEAP_ID_LEN]; /* Heap ID for object */
+ size_t obj_size; /* Size of object */
+ size_t robj_size; /* Size of object read */
+ unsigned char obj_type; /* Type of storage for object */
+ fheap_heap_state_t state; /* State of fractal heap */
+ unsigned deflate_level; /* Deflation level */
+ const char *base_desc = "insert one 'managed' object into heap with I/O filters, then remove %s"; /* Test description */
+
+ /* Copy heap creation properties */
+ HDmemcpy(&tmp_cparam, cparam, sizeof(H5HF_create_t));
+
+ /* Set an I/O filter for heap data */
+ deflate_level = 6;
+ if(H5Z_append(&tmp_cparam.pline, H5Z_FILTER_DEFLATE, H5Z_FLAG_OPTIONAL, 1, &deflate_level) < 0)
+ FAIL_STACK_ERROR
+
+ /* Perform common file & heap open operations */
+ if(open_heap(filename, fapl, dxpl, &tmp_cparam, tparam, &file, &f, &fh, &fh_addr, &state, &empty_size) < 0)
+ TEST_ERROR
+
+ /* Perform common test initialization operations */
+ if(begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
+ TEST_ERROR
+
+
+ /* Insert object small enough to fit into 'normal' heap blocks */
+ obj_size = DBLOCK_SIZE(fh, 0) + 1;
+ if(H5HF_insert(fh, dxpl, obj_size, shared_wobj_g, heap_id) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_get_id_type_test(fh, heap_id, &obj_type) < 0)
+ FAIL_STACK_ERROR
+ if(obj_type != H5HF_ID_TYPE_MAN)
+ TEST_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+/* QAK */
+#ifdef QAK
+ /* Close the fractal heap */
+ if(H5HF_close(fh, dxpl) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+ /* Close the file */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open the file */
+ if((file = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = H5I_object(file)))
+ FAIL_STACK_ERROR
+
+ /* Re-open the heap */
+ if(NULL == (fh = H5HF_open(f, H5P_DATASET_XFER_DEFAULT, fh_addr)))
+ FAIL_STACK_ERROR
+#endif /* QAK */
+/* QAK */
+
+ /* Check up on heap... */
+ state.man_size = tmp_cparam.managed.width * DBLOCK_SIZE(fh, 0);
+ state.man_size += tmp_cparam.managed.width * DBLOCK_SIZE(fh, 1);
+ state.man_size += tmp_cparam.managed.width * DBLOCK_SIZE(fh, 2);
+ state.man_alloc_size = DBLOCK_SIZE(fh, 2);
+ state.man_free_space = tmp_cparam.managed.width * DBLOCK_FREE(fh, 0);
+ state.man_free_space += tmp_cparam.managed.width * DBLOCK_FREE(fh, 1);
+ state.man_free_space += DBLOCK_FREE(fh, 2) - obj_size;
+ state.man_free_space += (tmp_cparam.managed.width - 1) * DBLOCK_FREE(fh, 2);
+ state.man_nobjs++;
+ if(check_stats(fh, &state))
+ TEST_ERROR
+
+ /* Read in ('normal') object */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(obj_size != robj_size)
+ TEST_ERROR
+ HDmemset(shared_robj_g, 0, obj_size);
+ if(H5HF_read(fh, dxpl, heap_id, shared_robj_g) < 0)
+ FAIL_STACK_ERROR
+ if(HDmemcmp(shared_wobj_g, shared_robj_g, obj_size))
+ TEST_ERROR
+
+ /* Delete individual objects, if we won't be deleting the entire heap later */
+ if(tparam->del_dir != FHEAP_DEL_HEAP) {
+ /* Remove object from heap */
+ if(H5HF_get_obj_len(fh, dxpl, heap_id, &robj_size) < 0)
+ FAIL_STACK_ERROR
+ if(H5HF_remove(fh, dxpl, heap_id) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check for closing & re-opening the heap */
+ if(reopen_heap(f, dxpl, &fh, fh_addr, tparam) < 0)
+ TEST_ERROR
+
+ /* Check up on heap... */
+ HDmemset(&state, 0, sizeof(fheap_heap_state_t));
+ if(check_stats(fh, &state))
+ TEST_ERROR
+ } /* end if */
+
+ /* Close the fractal heap */
+ if(H5HF_close(fh, H5P_DATASET_XFER_DEFAULT) < 0)
+ FAIL_STACK_ERROR
+ fh = NULL;
+
+ /* Check for deleting the entire heap */
+ if(tparam->del_dir == FHEAP_DEL_HEAP) {
+ /* Delete heap */
+ if(H5HF_delete(f, dxpl, fh_addr) < 0)
+ FAIL_STACK_ERROR
+ } /* end if */
+
+ /* Close the file */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the size of the file */
+ if((file_size = h5_get_file_size(filename)) == 0)
+ TEST_ERROR
+#ifdef QAK
+HDfprintf(stderr, "empty_size = %lu, file_size = %lu\n", (unsigned long)empty_size, (unsigned long)file_size);
+#endif /* QAK */
+
+ /* Verify the file is correct size */
+ if(file_size != empty_size)
+ TEST_ERROR
+
+ /* Free resources */
+ H5O_reset(H5O_PLINE_ID, &tmp_cparam.pline); /* Release the I/O pipeline filter information */
+
+ /* All tests passed */
+ PASSED()
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ if(fh)
+ H5HF_close(fh, dxpl);
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return(1);
+} /* test_filtered_man_one() */
+#endif /* NOT_YET */
+
#ifndef QAK
/*-------------------------------------------------------------------------
- * Function: test_man_random
+ * Function: test_random
*
* Purpose: Test inserting random sized objects into a heap, and read
* them back.
@@ -11936,7 +13812,7 @@ error:
*-------------------------------------------------------------------------
*/
static int
-test_man_random(hsize_t size_limit, hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
+test_random(hsize_t size_limit, hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
{
hid_t file = -1; /* File ID */
hid_t dxpl = H5P_DATASET_XFER_DEFAULT; /* DXPL to use */
@@ -12085,11 +13961,11 @@ error:
H5Fclose(file);
} H5E_END_TRY;
return(1);
-} /* test_man_random() */
+} /* test_random() */
/*-------------------------------------------------------------------------
- * Function: test_man_random_pow2
+ * Function: test_random_pow2
*
* Purpose: Test inserting random sized objects with a "power of 2
* distribution" (which favors small objects) into a heap,
@@ -12107,7 +13983,7 @@ error:
*-------------------------------------------------------------------------
*/
static int
-test_man_random_pow2(hsize_t size_limit, hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
+test_random_pow2(hsize_t size_limit, hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
{
hid_t file = -1; /* File ID */
hid_t dxpl = H5P_DATASET_XFER_DEFAULT; /* DXPL to use */
@@ -12268,7 +14144,7 @@ error:
H5Fclose(file);
} H5E_END_TRY;
return(1);
-} /* test_man_random_pow2() */
+} /* test_random_pow2() */
#endif /* QAK */
@@ -12356,6 +14232,8 @@ curr_test = FHEAP_TEST_REOPEN;
/* Test fractal heap creation */
nerrors += test_create(fapl, &cparam, &tparam);
nerrors += test_reopen(fapl, &cparam, &tparam);
+ nerrors += test_id_limits(fapl, &cparam);
+ nerrors += test_filtered_create(fapl, &cparam);
#ifndef QAK2
#ifndef QAK
@@ -12562,15 +14440,15 @@ HDfprintf(stderr, "Uncomment tests!\n");
#endif /* QAK */
/*
- * Test fractal heap huge object insertion & deletion
+ * Test fractal heap 'huge' & 'tiny' object insertion & deletion
*/
#ifndef QAK
{
fheap_test_del_dir_t del_dir; /* Deletion direction */
unsigned id_len; /* Length of heap IDs */
- /* Test "normal" & "direct" storage of 'huge' heap IDs */
- for(id_len = 0; id_len < 2; id_len++) {
+ /* Test "normal" & "direct" storage of 'huge' & 'tiny' heap IDs */
+ for(id_len = 0; id_len < 3; id_len++) {
/* Set the ID length for this test */
cparam.id_len = id_len;
@@ -12586,7 +14464,14 @@ HDfprintf(stderr, "Uncomment tests!\n");
puts("Using 'direct' heap ID format for 'huge' objects");
/* Adjust actual length of heap IDs for directly storing 'huge' object's file offset & length in heap ID */
- tparam.actual_id_len = 17; /* 1 + 8 (address size) + 8 (length size) */
+ tparam.actual_id_len = 17; /* 1 + 8 (file address size) + 8 (file length size) */
+ break;
+
+ /* Use "direct" storage for 'huge' objects and larger IDs for 'tiny' objects */
+ case 2:
+ cparam.id_len = 37;
+ puts("Using 'direct' heap ID format for 'huge' objects and larger IDs for 'tiny' objects");
+ tparam.actual_id_len = 37;
break;
/* An unknown test? */
@@ -12594,14 +14479,25 @@ HDfprintf(stderr, "Uncomment tests!\n");
goto error;
} /* end switch */
- /* Try several different methods of deleting 'huge' objects */
+ /* Try several different methods of deleting objects */
for(del_dir = FHEAP_DEL_FORWARD; del_dir < FHEAP_DEL_NDIRS; del_dir++) {
tparam.del_dir = del_dir;
+ /* Test 'huge' object insert & delete */
nerrors += test_huge_insert_one(fapl, &cparam, &tparam);
nerrors += test_huge_insert_two(fapl, &cparam, &tparam);
nerrors += test_huge_insert_three(fapl, &cparam, &tparam);
nerrors += test_huge_insert_mix(fapl, &cparam, &tparam);
+ nerrors += test_filtered_huge(fapl, &cparam, &tparam);
+
+#ifndef QAK
+ /* Test 'tiny' object insert & delete */
+ nerrors += test_tiny_insert_one(fapl, &cparam, &tparam);
+ nerrors += test_tiny_insert_two(fapl, &cparam, &tparam);
+ nerrors += test_tiny_insert_mix(fapl, &cparam, &tparam);
+#else /* QAK */
+HDfprintf(stderr, "Uncomment tests!\n");
+#endif /* QAK */
} /* end for */
} /* end for */
@@ -12616,6 +14512,12 @@ HDfprintf(stderr, "Uncomment tests!\n");
HDfprintf(stderr, "Uncomment tests!\n");
#endif /* QAK2 */
+ /* Test I/O filter support */
+#ifdef NOT_YET
+ /* This test isn't working properly yet */
+ nerrors += test_filtered_man_one(fapl, &cparam, &tparam);
+#endif /* NOT_YET */
+
#ifndef QAK
/* Random object insertion & deletion */
if (ExpressMode > 1)
@@ -12624,12 +14526,12 @@ HDfprintf(stderr, "Uncomment tests!\n");
/* (reduce size of tests when re-opening each time) */
/* XXX: Try to speed things up enough that these tests don't have to be reduced */
tparam.del_dir = FHEAP_DEL_FORWARD;
- nerrors += test_man_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &cparam, &tparam);
- nerrors += test_man_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &cparam, &tparam);
+ nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &cparam, &tparam);
+ nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &cparam, &tparam);
tparam.del_dir = FHEAP_DEL_HEAP;
- nerrors += test_man_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &cparam, &tparam);
- nerrors += test_man_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &cparam, &tparam);
+ nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &cparam, &tparam);
+ nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &cparam, &tparam);
} /* end else */
#else /* QAK */
HDfprintf(stderr, "Uncomment tests!\n");
diff --git a/tools/misc/h5debug.c b/tools/misc/h5debug.c
index 5cb47e2..bbeb1c2 100644
--- a/tools/misc/h5debug.c
+++ b/tools/misc/h5debug.c
@@ -214,6 +214,22 @@ main(int argc, char *argv[])
status = H5B2_hdr_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5B2_TEST);
break;
+ case H5B2_FHEAP_HUGE_INDIR_ID:
+ status = H5B2_hdr_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5HF_BT2_INDIR);
+ break;
+
+ case H5B2_FHEAP_HUGE_FILT_INDIR_ID:
+ status = H5B2_hdr_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5HF_BT2_FILT_INDIR);
+ break;
+
+ case H5B2_FHEAP_HUGE_DIR_ID:
+ status = H5B2_hdr_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5HF_BT2_DIR);
+ break;
+
+ case H5B2_FHEAP_HUGE_FILT_DIR_ID:
+ status = H5B2_hdr_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5HF_BT2_FILT_DIR);
+ break;
+
default:
fprintf(stderr, "Unknown B-tree subtype %u\n", (unsigned)(subtype));
HDexit(4);
@@ -240,6 +256,22 @@ main(int argc, char *argv[])
status = H5B2_int_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5B2_TEST, extra, (unsigned)extra2);
break;
+ case H5B2_FHEAP_HUGE_INDIR_ID:
+ status = H5B2_int_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5HF_BT2_INDIR, extra, (unsigned)extra2);
+ break;
+
+ case H5B2_FHEAP_HUGE_FILT_INDIR_ID:
+ status = H5B2_int_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5HF_BT2_FILT_INDIR, extra, (unsigned)extra2);
+ break;
+
+ case H5B2_FHEAP_HUGE_DIR_ID:
+ status = H5B2_int_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5HF_BT2_DIR, extra, (unsigned)extra2);
+ break;
+
+ case H5B2_FHEAP_HUGE_FILT_DIR_ID:
+ status = H5B2_int_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5HF_BT2_FILT_DIR, extra, (unsigned)extra2);
+ break;
+
default:
fprintf(stderr, "Unknown B-tree subtype %u\n", (unsigned)(subtype));
HDexit(4);
@@ -266,6 +298,22 @@ main(int argc, char *argv[])
status = H5B2_leaf_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5B2_TEST, extra, (unsigned)extra2);
break;
+ case H5B2_FHEAP_HUGE_INDIR_ID:
+ status = H5B2_leaf_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5HF_BT2_INDIR, extra, (unsigned)extra2);
+ break;
+
+ case H5B2_FHEAP_HUGE_FILT_INDIR_ID:
+ status = H5B2_leaf_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5HF_BT2_FILT_INDIR, extra, (unsigned)extra2);
+ break;
+
+ case H5B2_FHEAP_HUGE_DIR_ID:
+ status = H5B2_leaf_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5HF_BT2_DIR, extra, (unsigned)extra2);
+ break;
+
+ case H5B2_FHEAP_HUGE_FILT_DIR_ID:
+ status = H5B2_leaf_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, H5HF_BT2_FILT_DIR, extra, (unsigned)extra2);
+ break;
+
default:
fprintf(stderr, "Unknown B-tree subtype %u\n", (unsigned)(subtype));
HDexit(4);