summaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/CMakeLists.txt1
-rw-r--r--test/CMakeTests.cmake2
-rw-r--r--test/Makefile.am2
-rw-r--r--test/cmpd_dset.c53
-rw-r--r--test/dsets.c92
-rw-r--r--test/dt_arith.c16
-rw-r--r--test/evict_on_close.c779
-rw-r--r--test/gheap.c84
-rw-r--r--test/h5test.c34
-rw-r--r--test/tattr.c44
-rw-r--r--test/tchecksum.c23
-rw-r--r--test/th5o.c32
-rw-r--r--test/theap.c47
-rw-r--r--test/titerate.c13
-rw-r--r--test/tskiplist.c51
-rw-r--r--test/tvltypes.c12
16 files changed, 1169 insertions, 116 deletions
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 1ab4165..6e6a0b9 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -204,6 +204,7 @@ set (H5_TESTS
ohdr
stab
gheap
+ evict_on_close
farray
earray
btree2
diff --git a/test/CMakeTests.cmake b/test/CMakeTests.cmake
index ca890ba..1e7940a 100644
--- a/test/CMakeTests.cmake
+++ b/test/CMakeTests.cmake
@@ -513,6 +513,7 @@ set (H5TEST_TESTS
ohdr
stab
gheap
+ evict_on_close
farray
earray
btree2
@@ -865,6 +866,7 @@ if (HDF5_TEST_VFD)
ohdr
stab
gheap
+ evict_on_close
pool
# accum
farray
diff --git a/test/Makefile.am b/test/Makefile.am
index 9e0f9c5..7d062f4 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -42,7 +42,7 @@ check_SCRIPTS = $(TEST_SCRIPT)
# As an exception, long-running tests should occur earlier in the list.
# This gives them more time to run when tests are executing in parallel.
TEST_PROG= testhdf5 cache cache_api cache_tagging lheap ohdr stab gheap \
- farray earray btree2 fheap \
+ evict_on_close farray earray btree2 fheap \
pool accum hyperslab istore bittests dt_arith \
dtypes dsets cmpd_dset filter_fail extend external efc objcopy links unlink \
big mtime fillval mount flush1 flush2 app_ref enum \
diff --git a/test/cmpd_dset.c b/test/cmpd_dset.c
index 311b9bb..a7f3902 100644
--- a/test/cmpd_dset.c
+++ b/test/cmpd_dset.c
@@ -154,30 +154,28 @@ static unsigned
test_compound (char *filename, hid_t fapl)
{
/* First dataset */
- static s1_t s1[NX*NY];
+ s1_t *s1 = NULL;
hid_t s1_tid;
/* Second dataset */
- static s2_t s2[NX*NY];
+ s2_t *s2 = NULL;
hid_t s2_tid;
/* Third dataset */
- static s3_t s3[NX*NY];
+ s3_t *s3 = NULL;
hid_t s3_tid;
/* Fourth dataset */
- static s4_t s4[NX*NY];
+ s4_t *s4 = NULL;
hid_t s4_tid;
/* Fifth dataset */
- static s5_t s5[NX*NY];
+ s5_t *s5 = NULL;
hid_t s5_tid;
- static s6_t s6[NX*NY];
- hid_t s6_tid;
-
-
/* Sixth dataset */
+ s6_t *s6 = NULL;
+ hid_t s6_tid;
/* Seventh dataset */
hid_t s7_sid;
@@ -204,6 +202,20 @@ test_compound (char *filename, hid_t fapl)
hsize_t memb_size[1] = {4};
int ret_code;
+ /* Allocate buffers for datasets */
+ if(NULL == (s1 = (s1_t *)HDmalloc(sizeof(s1_t) * NX * NY)))
+ goto error;
+ if(NULL == (s2 = (s2_t *)HDmalloc(sizeof(s2_t) * NX * NY)))
+ goto error;
+ if(NULL == (s3 = (s3_t *)HDmalloc(sizeof(s3_t) * NX * NY)))
+ goto error;
+ if(NULL == (s4 = (s4_t *)HDmalloc(sizeof(s4_t) * NX * NY)))
+ goto error;
+ if(NULL == (s5 = (s5_t *)HDmalloc(sizeof(s5_t) * NX * NY)))
+ goto error;
+ if(NULL == (s6 = (s6_t *)HDmalloc(sizeof(s6_t) * NX * NY)))
+ goto error;
+
/* Create the file */
if ((file = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) {
goto error;
@@ -848,11 +860,34 @@ test_compound (char *filename, hid_t fapl)
H5Dclose (dataset);
H5Fclose (file);
+ /* Release buffers */
+ HDfree(s1);
+ HDfree(s2);
+ HDfree(s3);
+ HDfree(s4);
+ HDfree(s5);
+ HDfree(s6);
+
PASSED();
return 0;
error:
puts("*** DATASET TESTS FAILED ***");
+
+ /* Release resources */
+ if(s1)
+ HDfree(s1);
+ if(s2)
+ HDfree(s2);
+ if(s3)
+ HDfree(s3);
+ if(s4)
+ HDfree(s4);
+ if(s5)
+ HDfree(s5);
+ if(s6)
+ HDfree(s6);
+
return 1;
}
diff --git a/test/dsets.c b/test/dsets.c
index fe6a0c0..8aa073f 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -240,6 +240,10 @@ double points_dbl[DSET_DIM1][DSET_DIM2], check_dbl[DSET_DIM1][DSET_DIM2];
size_t count_nbytes_read = 0;
size_t count_nbytes_written = 0;
+/* Temporary buffer dimensions */
+#define DSET_TMP_DIM1 50
+#define DSET_TMP_DIM2 100
+
/* Declarations for test_idx_compatible() */
#define DSET "dset"
#define DSET_FILTER "dset_filter"
@@ -7942,11 +7946,10 @@ test_big_chunks_bypass_cache(hid_t fapl)
int fvalue = BYPASS_FILL_VALUE; /* Fill value */
hsize_t count, stride, offset, block; /* Setting for hyperslab (1-D) */
hsize_t t_count[2], t_stride[2], t_offset[2], t_block[2]; /* Setting for hyperslab (2-D) */
- /* Buffer for reading and writing data (1-D) */
- static int wdata[BYPASS_CHUNK_DIM/2], rdata1[BYPASS_DIM],
- rdata2[BYPASS_CHUNK_DIM/2];
+ /* Buffers for reading and writing data (1-D) */
+ int *wdata = NULL, *rdata1 = NULL, *rdata2 = NULL;
/* Buffer for reading and writing data (2-D) */
- static int t_wdata[BYPASS_CHUNK_DIM/2][BYPASS_CHUNK_DIM/2], t_rdata1[BYPASS_DIM][BYPASS_DIM],
+ int t_wdata[BYPASS_CHUNK_DIM/2][BYPASS_CHUNK_DIM/2], t_rdata1[BYPASS_DIM][BYPASS_DIM],
t_rdata2[BYPASS_CHUNK_DIM/2][BYPASS_CHUNK_DIM/2];
int i, j; /* Local index variables */
H5F_libver_t low; /* File format low bound */
@@ -8031,6 +8034,14 @@ test_big_chunks_bypass_cache(hid_t fapl)
if(H5Sselect_hyperslab(t_sid, H5S_SELECT_SET, t_offset, t_stride, t_count, t_block) < 0)
FAIL_STACK_ERROR
+ /* Allocate buffers */
+ if(NULL == (wdata = (int *)HDmalloc(sizeof(int) * (BYPASS_CHUNK_DIM / 2))))
+ TEST_ERROR
+ if(NULL == (rdata1 = (int *)HDmalloc(sizeof(int) * BYPASS_DIM)))
+ TEST_ERROR
+ if(NULL == (rdata2 = (int *)HDmalloc(sizeof(int) * (BYPASS_CHUNK_DIM / 2))))
+ TEST_ERROR
+
/* Initialize data to write for 1-D dataset */
for(i = 0; i < BYPASS_CHUNK_DIM / 2; i++)
wdata[i] = i;
@@ -8165,6 +8176,11 @@ test_big_chunks_bypass_cache(hid_t fapl)
if(H5Pclose(fapl_local) < 0) FAIL_STACK_ERROR
if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+ /* Release buffers */
+ HDfree(wdata);
+ HDfree(rdata1);
+ HDfree(rdata2);
+
PASSED();
return 0;
@@ -8179,6 +8195,12 @@ error:
H5Sclose(t_sid);
H5Fclose(fid);
} H5E_END_TRY;
+ if(wdata)
+ HDfree(wdata);
+ if(rdata1)
+ HDfree(rdata1);
+ if(rdata2)
+ HDfree(rdata2);
return -1;
} /* end test_big_chunks_bypass_cache() */
@@ -9265,9 +9287,9 @@ test_fixed_array(hid_t fapl)
hsize_t msize_big[1] = {POINTS_BIG}; /* Size of memory space for big dataset */
int wbuf[POINTS]; /* write buffer */
- int wbuf_big[POINTS_BIG]; /* write buffer for big dataset */
+ int *wbuf_big = NULL; /* write buffer for big dataset */
int rbuf[POINTS]; /* read buffer */
- int rbuf_big[POINTS_BIG]; /* read buffer for big dataset */
+ int *rbuf_big = NULL; /* read buffer for big dataset */
hsize_t chunk_dim2[2] = {4, 3}; /* Chunk dimensions */
int chunks[12][6]; /* # of chunks for dataset dimensions */
@@ -9309,6 +9331,12 @@ test_fixed_array(hid_t fapl)
if((empty_size = h5_get_file_size(filename, fapl)) < 0)
TEST_ERROR
+ /* Allocate the "big" buffers */
+ if(NULL == (wbuf_big = (int *)HDmalloc(sizeof(int) * POINTS_BIG)))
+ TEST_ERROR
+ if(NULL == (rbuf_big = (int *)HDmalloc(sizeof(int) * POINTS_BIG)))
+ TEST_ERROR
+
#ifdef H5_HAVE_FILTER_DEFLATE
/* Loop over compressing chunks */
for(compress = FALSE; compress <= TRUE; compress++) {
@@ -9567,7 +9595,7 @@ test_fixed_array(hid_t fapl)
/* Verify that written and read data are the same */
for(i = 0; i < POINTS_BIG; i++)
- if(rbuf_big[i] != wbuf_big[i]){
+ if(rbuf_big[i] != wbuf_big[i]) {
printf(" Line %d: Incorrect value, wbuf_bif[%u]=%d, rbuf_big[%u]=%d\n",
__LINE__,(unsigned)i,wbuf_big[i],(unsigned)i,rbuf_big[i]);
TEST_ERROR;
@@ -9599,6 +9627,10 @@ test_fixed_array(hid_t fapl)
} /* end for */
#endif /* H5_HAVE_FILTER_DEFLATE */
+ /* Release buffers */
+ HDfree(wbuf_big);
+ HDfree(rbuf_big);
+
PASSED();
return 0;
@@ -9610,6 +9642,10 @@ error:
H5Sclose(mem_id);
H5Fclose(fid);
} H5E_END_TRY;
+ if(wbuf_big)
+ HDfree(wbuf_big);
+ if(rbuf_big)
+ HDfree(rbuf_big);
return -1;
} /* end test_fixed_array() */
@@ -9650,11 +9686,11 @@ test_single_chunk(hid_t fapl)
hid_t sid = -1, sid_max = -1; /* Dataspace ID for dataset with fixed dimensions */
hid_t did = -1, did_max = -1; /* Dataset ID for dataset with fixed dimensions */
hsize_t dim2[2] = {DSET_DIM1, DSET_DIM2}; /* Dataset dimensions */
- hsize_t t_dim2[2] = {50, 100}; /* Dataset dimensions */
- int wbuf[DSET_DIM1*DSET_DIM2]; /* write buffer */
- int t_wbuf[50*100]; /* write buffer */
- int rbuf[DSET_DIM1*DSET_DIM2]; /* read buffer */
- int t_rbuf[50*100]; /* read buffer */
+ hsize_t t_dim2[2] = {DSET_TMP_DIM1, DSET_TMP_DIM2}; /* Dataset dimensions */
+ int *wbuf = NULL; /* write buffer */
+ int *t_wbuf = NULL; /* write buffer */
+ int *rbuf = NULL; /* read buffer */
+ int *t_rbuf = NULL; /* read buffer */
H5D_chunk_index_t idx_type; /* Dataset chunk index type */
H5F_libver_t low, high; /* File format bounds */
@@ -9686,10 +9722,20 @@ test_single_chunk(hid_t fapl)
if((empty_size = h5_get_file_size(filename, fapl)) < 0)
TEST_ERROR
+ /* Allocate the buffers */
+ if(NULL == (wbuf = (int *)HDmalloc(sizeof(int) * (DSET_DIM1 * DSET_DIM2))))
+ TEST_ERROR
+ if(NULL == (rbuf = (int *)HDmalloc(sizeof(int) * (DSET_DIM1 * DSET_DIM2))))
+ TEST_ERROR
+ if(NULL == (t_wbuf = (int *)HDmalloc(sizeof(int) * (DSET_TMP_DIM1 * DSET_TMP_DIM2))))
+ TEST_ERROR
+ if(NULL == (t_rbuf = (int *)HDmalloc(sizeof(int) * (DSET_TMP_DIM1 * DSET_TMP_DIM2))))
+ TEST_ERROR
+
for(i = n = 0; i < (DSET_DIM1 * DSET_DIM2); i++)
wbuf[i] = (int)n++;
- for(i = n = 0; i < (50* 100); i++)
+ for(i = n = 0; i < (DSET_TMP_DIM1* DSET_TMP_DIM2); i++)
t_wbuf[i] = (int)n++;
#ifdef H5_HAVE_FILTER_DEFLATE
@@ -9800,14 +9846,14 @@ test_single_chunk(hid_t fapl)
/* Open the second dataset */
if((did = H5Dopen2(fid, DSET_SINGLE_NOMAX, H5P_DEFAULT)) < 0) TEST_ERROR;
- HDmemset(rbuf, 0, sizeof(rbuf));
+ HDmemset(rbuf, 0, sizeof(int) * (DSET_DIM1 * DSET_DIM2));
/* Read from dataset */
if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, t_rbuf) < 0) TEST_ERROR;
/* Verify that written and read data are the same */
- for(i = 0; i < (50* 100); i++)
- if(t_rbuf[i] != t_wbuf[i]){
+ for(i = 0; i < (DSET_TMP_DIM1* DSET_TMP_DIM2); i++)
+ if(t_rbuf[i] != t_wbuf[i]) {
printf(" Line %d: Incorrect value, t_wbuf[%u]=%d, t_rbuf[%u]=%d\n",
__LINE__,(unsigned)i,t_wbuf[i],(unsigned)i,t_rbuf[i]);
TEST_ERROR;
@@ -9836,6 +9882,12 @@ test_single_chunk(hid_t fapl)
} /* end for */
#endif /* H5_HAVE_FILTER_DEFLATE */
+ /* Release buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(t_wbuf);
+ HDfree(t_rbuf);
+
PASSED();
return 0;
@@ -9849,6 +9901,14 @@ error:
H5Sclose(sid_max);
H5Fclose(fid);
} H5E_END_TRY;
+ if(wbuf)
+ HDfree(wbuf);
+ if(rbuf)
+ HDfree(rbuf);
+ if(t_wbuf)
+ HDfree(t_wbuf);
+ if(t_rbuf)
+ HDfree(t_rbuf);
return -1;
} /* end test_single_chunk() */
diff --git a/test/dt_arith.c b/test/dt_arith.c
index 064ee69..1ebcf74 100644
--- a/test/dt_arith.c
+++ b/test/dt_arith.c
@@ -94,6 +94,9 @@ static int skip_overflow_tests_g = 0;
#define TEST_DENORM 2
#define TEST_SPECIAL 3
+/* Temporary buffer sizes */
+#define TMP_BUF_DIM1 32
+#define TMP_BUF_DIM2 100
/* Don't use hardware conversions if set */
static int without_hardware_g = 0;
@@ -2673,14 +2676,16 @@ test_conv_int_2(void)
{
int i, j;
hid_t src_type, dst_type;
- char buf[32*100];
+ char *buf;
printf("%-70s", "Testing overlap calculations");
HDfflush(stdout);
- HDmemset(buf, 0, sizeof buf);
- for (i=1; i<=32; i++) {
- for (j=1; j<=32; j++) {
+ buf = (char *)HDcalloc(TMP_BUF_DIM1, TMP_BUF_DIM2);
+ HDassert(buf);
+
+ for(i = 1; i <= TMP_BUF_DIM1; i++) {
+ for(j = 1; j <= TMP_BUF_DIM1; j++) {
/* Source type */
src_type = H5Tcopy(H5T_NATIVE_CHAR);
@@ -2694,12 +2699,13 @@ test_conv_int_2(void)
* Conversion. If overlap calculations aren't right then an
* assertion will fail in H5T__conv_i_i()
*/
- H5Tconvert(src_type, dst_type, (size_t)100, buf, NULL, H5P_DEFAULT);
+ H5Tconvert(src_type, dst_type, (size_t)TMP_BUF_DIM2, buf, NULL, H5P_DEFAULT);
H5Tclose(src_type);
H5Tclose(dst_type);
}
}
PASSED();
+ HDfree(buf);
return 0;
}
diff --git a/test/evict_on_close.c b/test/evict_on_close.c
new file mode 100644
index 0000000..1f515b2
--- /dev/null
+++ b/test/evict_on_close.c
@@ -0,0 +1,779 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer: Dana Robinson
+ * Spring 2016
+ *
+ * Purpose: Tests the basic operation of the evict-on-close cache
+ * behavior. Tests that ensure the tagging is handled correctly
+ * are located in cache.c.
+ */
+
+#define H5C_FRIEND /*suppress error about including H5Cpkg */
+#define H5D_FRIEND /*suppress error about including H5Dpkg */
+#define H5D_TESTING
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_TESTING
+#define H5I_FRIEND /*suppress error about including H5Ipkg */
+#define H5I_TESTING
+
+
+#include "h5test.h"
+#include "H5Cpkg.h"
+#include "H5Dpkg.h"
+#include "H5Fpkg.h"
+#include "H5Ipkg.h"
+
+/* Uncomment to manually inspect cache states */
+/* #define EOC_MANUAL_INSPECTION */
+
+const char *FILENAMES[] = {
+ "evict-on-close", /* 0 */
+ NULL
+};
+#define FILENAME_BUF_SIZE 1024
+
+/* Dataset names */
+#define DSET_COMPACT_NAME "compact"
+#define DSET_CONTIGUOUS_NAME "contiguous"
+#define DSET_BTREE_NAME "v1_btree"
+#define DSET_EARRAY_NAME "earray"
+#define DSET_BT2_NAME "v2_btree"
+#define DSET_FARRAY_NAME "farray"
+#define DSET_SINGLE_NAME "single"
+
+/* All datasets store 1000 elements */
+#define NELEMENTS 1024
+
+static hbool_t verify_tag_not_in_cache(H5F_t *f, haddr_t tag);
+static herr_t check_evict_on_close_api(void);
+static hid_t generate_eoc_test_file(hid_t fapl_id);
+static herr_t check_configuration(hid_t fid, const char *dset_name);
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_tag_not_in_cache()
+ *
+ * Purpose: Ensure that metadata cache entries with a given tag are not
+ * present in the cache.
+ *
+ * Return: TRUE/FALSE
+ *
+ * Programmer: Dana Robinson
+ * Fall 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static hbool_t
+verify_tag_not_in_cache(H5F_t *f, haddr_t tag)
+{
+ H5C_t *cache_ptr = NULL; /* cache pointer */
+ int i = 0; /* iterator */
+ H5C_cache_entry_t *entry_ptr = NULL; /* entry pointer */
+
+ cache_ptr = f->shared->cache;
+
+ for(i = 0; i < H5C__HASH_TABLE_LEN; i++) {
+
+ entry_ptr = cache_ptr->index[i];
+
+ while(entry_ptr != NULL) {
+
+ if(tag == entry_ptr->tag)
+ return TRUE;
+ else
+ entry_ptr = entry_ptr->ht_next;
+
+ } /* end while */
+ } /* end for */
+
+ return FALSE;
+
+} /* end verify_tag_not_in_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: generate_eoc_test_file()
+ *
+ * Purpose: Generate the evict-on-close test file.
+ *
+ * Return: Success: The file ID of the created file
+ * Failure: -1
+ *
+ * Programmer: Dana Robinson
+ * Fall 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+generate_eoc_test_file(hid_t fapl_id)
+{
+ char filename[FILENAME_BUF_SIZE]; /* decorated file name */
+ hid_t fid = -1; /* file ID (returned) */
+ hid_t fapl_copy_id = -1; /* ID of copied fapl */
+ hid_t sid = -1; /* dataspace ID */
+ hid_t dcpl_id = -1; /* dataset creation plist */
+ hid_t did = -1; /* dataset ID */
+ int rank; /* # of array dimensions */
+ hsize_t current_dims[2]; /* current dataset size */
+ hsize_t maximum_dims[2]; /* maximum dataset size */
+ hsize_t chunk_dims[2]; /* chunk dimensions */
+ H5D_chunk_index_t idx_type; /* dataset chunk index type */
+ H5D_layout_t layout_type; /* dataset layout type */
+ int *data = NULL; /* buffer for fake data */
+ int n; /* # of data elements */
+
+ TESTING("generating evict-on-close test file");
+
+ /* Get a VFD-specific filename */
+ h5_fixname(FILENAMES[0], fapl_id, filename, sizeof(filename));
+
+ /* Create file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0)
+ TEST_ERROR;
+
+ /***********************************************************/
+ /* Generate datasets and ensure that the scheme is correct */
+ /***********************************************************/
+
+ /* Create the data buffer */
+ if(NULL == (data = (int *)HDcalloc(NELEMENTS, sizeof(int))))
+ TEST_ERROR;
+
+ /****************************************************/
+ /* Old file format data structures (v1 B-tree only) */
+ /****************************************************/
+
+ /********************/
+ /* Version 1 B-tree */
+ /********************/
+
+ /* Create dataspace */
+ n = NELEMENTS;
+ rank = 1;
+ current_dims[0] = (hsize_t)n;
+ maximum_dims[0] = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(rank, current_dims, maximum_dims)) < 0)
+ TEST_ERROR;
+
+ /* Create dcpl and set up chunking */
+ if((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+ chunk_dims[0] = 1;
+ if(H5Pset_chunk(dcpl_id, rank, chunk_dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataset */
+ if((did = H5Dcreate2(fid, DSET_BTREE_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Ensure we're using the correct chunk indexing scheme */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ TEST_ERROR;
+ if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using version 1 B-tree as the chunk index");
+
+ /* Write a bunch of fake data */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ TEST_ERROR;
+
+ /* Close IDs for this dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR;
+ if(H5Sclose(sid) < 0)
+ TEST_ERROR;
+ if(H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+
+ /***********************************/
+ /* New file format data structures */
+ /***********************************/
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR;
+
+ /* Copy the fapl and set the latest file format */
+ if((fapl_copy_id = H5Pcopy(fapl_id)) < 0)
+ TEST_ERROR;
+ if(H5Pset_libver_bounds(fapl_copy_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ TEST_ERROR;
+
+ /* Reopen the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_copy_id)) < 0)
+ TEST_ERROR;
+
+ /********************/
+ /* Extensible Array */
+ /********************/
+
+ /* Create dataspace */
+ n = NELEMENTS;
+ rank = 1;
+ current_dims[0] = (hsize_t)n;
+ maximum_dims[0] = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(rank, current_dims, maximum_dims)) < 0)
+ TEST_ERROR;
+
+ /* Create dcpl and set up chunking */
+ if((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+ chunk_dims[0] = 1;
+ if(H5Pset_chunk(dcpl_id, rank, chunk_dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataset */
+ if((did = H5Dcreate2(fid, DSET_EARRAY_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Ensure we're using the correct chunk indexing scheme */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ TEST_ERROR;
+ if(idx_type != H5D_CHUNK_IDX_EARRAY)
+ FAIL_PUTS_ERROR("should be using extensible array as the chunk index");
+
+ /* Write a bunch of fake data */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ TEST_ERROR;
+
+ /* Close IDs for this dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR;
+ if(H5Sclose(sid) < 0)
+ TEST_ERROR;
+ if(H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+
+ /********************/
+ /* Version 2 B-Tree */
+ /********************/
+
+ /* Create dataspace */
+ n = NELEMENTS;
+ rank = 2;
+ current_dims[0] = (hsize_t)2;
+ current_dims[1] = (hsize_t)(n/2);
+ maximum_dims[0] = H5S_UNLIMITED;
+ maximum_dims[1] = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(rank, current_dims, maximum_dims)) < 0)
+ TEST_ERROR;
+
+ /* Create dcpl and set up chunking */
+ if((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+ chunk_dims[0] = 1;
+ chunk_dims[1] = 1;
+ if(H5Pset_chunk(dcpl_id, rank, chunk_dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataset */
+ if((did = H5Dcreate2(fid, DSET_BT2_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Ensure we're using the correct chunk indexing scheme */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ TEST_ERROR;
+ if(idx_type != H5D_CHUNK_IDX_BT2)
+ FAIL_PUTS_ERROR("should be using version 2 B-tree as the chunk index");
+
+ /* Write a bunch of fake data */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ TEST_ERROR;
+
+ /* Close IDs for this dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR;
+ if(H5Sclose(sid) < 0)
+ TEST_ERROR;
+ if(H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+
+ /***************/
+ /* Fixed Array */
+ /***************/
+
+ /* Create dataspace */
+ n = NELEMENTS;
+ rank = 1;
+ current_dims[0] = (hsize_t)n;
+ maximum_dims[0] = (hsize_t)n;
+ if((sid = H5Screate_simple(rank, current_dims, maximum_dims)) < 0)
+ TEST_ERROR;
+
+ /* Create dcpl and set up chunking */
+ if((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+ chunk_dims[0] = 1;
+ chunk_dims[1] = 1;
+ if(H5Pset_chunk(dcpl_id, rank, chunk_dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataset */
+ if((did = H5Dcreate2(fid, DSET_FARRAY_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Ensure we're using the correct chunk indexing scheme */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ TEST_ERROR;
+ if(idx_type != H5D_CHUNK_IDX_FARRAY)
+ FAIL_PUTS_ERROR("should be using fixed array as the chunk index");
+
+ /* Write a bunch of fake data */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ TEST_ERROR;
+
+ /* Close IDs for this dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR;
+ if(H5Sclose(sid) < 0)
+ TEST_ERROR;
+ if(H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+
+ /****************/
+ /* Single Chunk */
+ /****************/
+
+ /* Create dataspace */
+ n = NELEMENTS;
+ rank = 1;
+ current_dims[0] = (hsize_t)n;
+ maximum_dims[0] = (hsize_t)n;
+ if((sid = H5Screate_simple(rank, current_dims, maximum_dims)) < 0)
+ TEST_ERROR;
+
+ /* Create dcpl and set up chunking */
+ if((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+ chunk_dims[0] = (hsize_t)n;
+ chunk_dims[1] = (hsize_t)n;
+ if(H5Pset_chunk(dcpl_id, rank, chunk_dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataset */
+ if((did = H5Dcreate2(fid, DSET_SINGLE_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Ensure we're using the correct chunk indexing scheme */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ TEST_ERROR;
+ if(idx_type != H5D_CHUNK_IDX_SINGLE)
+ FAIL_PUTS_ERROR("should be using single chunk as the chunk index");
+
+ /* Write a bunch of fake data */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ TEST_ERROR;
+
+ /* Close IDs for this dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR;
+ if(H5Sclose(sid) < 0)
+ TEST_ERROR;
+ if(H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+
+ /**************/
+ /* Contiguous */
+ /**************/
+
+ /* Create dataspace */
+ n = NELEMENTS;
+ rank = 1;
+ current_dims[0] = (hsize_t)n;
+ maximum_dims[0] = (hsize_t)n;
+ if((sid = H5Screate_simple(rank, current_dims, maximum_dims)) < 0)
+ TEST_ERROR;
+
+ /* Create dataset */
+ if((did = H5Dcreate2(fid, DSET_CONTIGUOUS_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Ensure we're using the correct layout scheme */
+ if(H5D__layout_type_test(did, &layout_type) < 0)
+ TEST_ERROR;
+ if(layout_type != H5D_CONTIGUOUS)
+ FAIL_PUTS_ERROR("should be using contiguous layout");
+
+ /* Write a bunch of fake data */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ TEST_ERROR;
+
+ /* Close IDs for this dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR;
+ if(H5Sclose(sid) < 0)
+ TEST_ERROR;
+
+ /***********/
+ /* Compact */
+ /***********/
+
+ /* Create dataspace */
+ n = 1;
+ rank = 1;
+ current_dims[0] = (hsize_t)n;
+ maximum_dims[0] = (hsize_t)n;
+ if((sid = H5Screate_simple(rank, current_dims, maximum_dims)) < 0)
+ TEST_ERROR;
+
+ /* Create dcpl and set up compact layout */
+ if((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+ if(H5Pset_layout(dcpl_id, H5D_COMPACT) < 0)
+ TEST_ERROR;
+
+ /* Create dataset */
+ if((did = H5Dcreate2(fid, DSET_COMPACT_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Ensure we're using the correct layout scheme */
+ if(H5D__layout_type_test(did, &layout_type) < 0)
+ TEST_ERROR;
+ if(layout_type != H5D_COMPACT)
+ FAIL_PUTS_ERROR("should be using compact layout");
+
+ /* Write a bunch of fake data */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ TEST_ERROR;
+
+ /* Close IDs for this dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR;
+ if(H5Sclose(sid) < 0)
+ TEST_ERROR;
+ if(H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+
+ /********/
+ /* DONE */
+ /********/
+
+ /* Close/free everything else */
+ if(H5Pclose(fapl_copy_id) < 0)
+ TEST_ERROR;
+
+ HDfree(data);
+
+ PASSED();
+ return fid;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Fclose(fid);
+ H5Dclose(did);
+ H5Sclose(sid);
+ H5Pclose(dcpl_id);
+ H5Pclose(fapl_copy_id);
+ } H5E_END_TRY;
+
+ HDfree(data);
+
+ H5_FAILED();
+ return -1;
+
+} /* end generate_eoc_test_file() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_configuration()
+ *
+ * Purpose: Verify that the evict-on-close feature works for a given
+ * dataset layout and/or chunk index.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+check_configuration(hid_t fid, const char *dset_name)
+{
+ H5F_t *file_ptr = NULL; /* ptr to internal file struct */
+ hid_t did = -1; /* dataset ID */
+ H5D_t *dset_ptr = NULL; /* ptr to internal dset struct */
+ haddr_t tag; /* MD cache tag for dataset */
+ int *data = NULL; /* buffer for fake data */
+ int32_t before, during, after; /* cache sizes */
+
+ /* NOTE: The TESTING() macro is called in main() */
+
+ /* Get a pointer to the file struct */
+ if(NULL == (file_ptr = (H5F_t *)H5I_object_verify(fid, H5I_FILE)))
+ TEST_ERROR;
+
+ /* Create the data buffer */
+ if(NULL == (data = (int *)HDcalloc(NELEMENTS, sizeof(int))))
+ TEST_ERROR;
+
+ /* Record the number of cache entries */
+ before = file_ptr->shared->cache->index_len;
+
+#ifdef EOC_MANUAL_INSPECTION
+ HDprintf("\nCACHE BEFORE DATASET OPEN:\n");
+ if(H5AC_dump_cache(file_ptr) < 0)
+ TEST_ERROR;
+ HDprintf("NUMBER OF CACHE ENTRIES: %d\n", before);
+#endif
+
+ /* Open dataset and get the metadata tag */
+ if((did = H5Dopen2(fid, dset_name, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if(NULL == (dset_ptr = (H5D_t *)H5I_object_verify(did, H5I_DATASET)))
+ TEST_ERROR;
+ tag = dset_ptr->oloc.addr;
+
+ /* Read data from the dataset so the cache gets populated with chunk
+ * and the like.
+ */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ TEST_ERROR;
+
+ /* Record the number of cache entries */
+ during = file_ptr->shared->cache->index_len;
+
+#ifdef EOC_MANUAL_INSPECTION
+ HDprintf("\nCACHE AFTER DATA READ (WHILE OPEN):\n");
+ if(H5AC_dump_cache(file_ptr) < 0)
+ TEST_ERROR;
+ HDprintf("TAG: %#X\n", tag);
+ HDprintf("NUMBER OF CACHE ENTRIES: %d\n", during);
+#endif
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR;
+
+ /* Record the number of cache entries */
+ after = file_ptr->shared->cache->index_len;
+
+#ifdef EOC_MANUAL_INSPECTION
+ HDprintf("\nCACHE AFTER DATASET CLOSE:\n");
+ if(H5AC_dump_cache(file_ptr) < 0)
+ TEST_ERROR;
+ HDprintf("NUMBER OF CACHE ENTRIES: %d\n", after);
+#endif
+
+ /* Ensure that the cache does not contain data items with the tag */
+ if(TRUE == verify_tag_not_in_cache(file_ptr, tag))
+ TEST_ERROR;
+
+ /* Compare the number of cache entries */
+ if(before != after || before == during)
+ TEST_ERROR;
+
+ HDfree(data);
+
+ PASSED();
+ return SUCCEED;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(did);
+ } H5E_END_TRY;
+
+ H5_FAILED();
+ return FAIL;
+
+} /* check_configuration() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_evict_on_close_api()
+ *
+ * Purpose: Verify that the H5Pset/get_evict_on_close() calls behave
+ * correctly.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Spring 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+check_evict_on_close_api(void)
+{
+ hid_t fapl_id = -1;
+ hid_t dapl_id = -1;
+ hbool_t evict_on_close;
+ herr_t status;
+
+ TESTING("evict on close API");
+
+ /* Create a fapl */
+ if((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR;
+
+ /* Check the default */
+ evict_on_close = TRUE;
+ if(H5Pget_evict_on_close(fapl_id, &evict_on_close) < 0)
+ TEST_ERROR;
+ if(evict_on_close != FALSE)
+ FAIL_PUTS_ERROR("Incorrect default evict on close value.");
+
+ /* Set the evict on close property */
+ evict_on_close = TRUE;
+ if(H5Pset_evict_on_close(fapl_id, evict_on_close) < 0)
+ TEST_ERROR;
+
+ /* Make sure we can get it back out */
+ evict_on_close = FALSE;
+ if(H5Pget_evict_on_close(fapl_id, &evict_on_close) < 0)
+ TEST_ERROR;
+ if(evict_on_close != TRUE)
+ FAIL_PUTS_ERROR("Incorrect evict on close value.");
+
+ /* close fapl */
+ if(H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+
+ /**********************************************/
+ /* Trying passing in a non-fapl property list */
+ /**********************************************/
+
+ if((dapl_id = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ TEST_ERROR;
+
+ /* ensure using an incorrect access plist fails */
+ H5E_BEGIN_TRY {
+ status = H5Pset_evict_on_close(dapl_id, evict_on_close);
+ } H5E_END_TRY;
+ if(status >= 0)
+ FAIL_PUTS_ERROR("H5Pset_evict_on_close() accepted invalid access plist.");
+
+ /* ensure an invalid plist fails */
+ H5E_BEGIN_TRY {
+ status = H5Pget_evict_on_close((hid_t)-1, &evict_on_close);
+ } H5E_END_TRY;
+ if(status >= 0)
+ FAIL_PUTS_ERROR("H5Pget_evict_on_close() accepted invalid hid_t.");
+
+ /* close dapl */
+ if(H5Pclose(dapl_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+ return SUCCEED;
+
+error:
+ H5_FAILED();
+ return FAIL;
+
+} /* check_evict_on_close_api() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Return: EXIT_FAILURE/EXIT_SUCCESS
+ *
+ * Programmer: Dana Robinson
+ * Spring 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ hid_t fapl_id = -1; /* VFD-specific fapl */
+ hid_t fid = -1; /* file ID */
+ unsigned nerrors = 0; /* number of test errors */
+
+ HDprintf("Testing evict-on-close cache behavior.\n");
+
+ /* Initialize */
+ h5_reset();
+
+ /* Test H5P call to set up EoC (does not require VFD-specific fapl) */
+ nerrors += check_evict_on_close_api() < 0 ? 1 : 0;
+
+ /* Set up VFD-specific fapl */
+ if((fapl_id = h5_fileaccess()) < 0) {
+ nerrors++;
+ PUTS_ERROR("Unable to get VFD-specific fapl\n");
+ } /* end if */
+
+ /* Set evict-on-close property */
+ if(H5Pset_evict_on_close(fapl_id, TRUE) < 0) {
+ nerrors++;
+ PUTS_ERROR("Unable to set evict-on-close property\n");
+ } /* end if */
+
+ /*************************/
+ /* Test EoC for datasets */
+ /*************************/
+
+ /* Generate the test file */
+ if((fid = generate_eoc_test_file(fapl_id)) < 0) {
+ nerrors++;
+ PUTS_ERROR("Unable to generate test file\n");
+ } /* end if */
+
+ /* Run tests with a variety of dataset configurations
+ * PASSED() and H5_FAILED() are handled in check_configuration()
+ */
+ TESTING("evict on close with version 1 B-tree chunk index");
+ nerrors += check_configuration(fid, DSET_BTREE_NAME) < 0 ? 1 : 0;
+ TESTING("evict on close with extensible array chunk index");
+ nerrors += check_configuration(fid, DSET_EARRAY_NAME) < 0 ? 1 : 0;
+ TESTING("evict on close with version 2 B-tree chunk index");
+ nerrors += check_configuration(fid, DSET_BT2_NAME) < 0 ? 1 : 0;
+ TESTING("evict on close with fixed array chunk index");
+ nerrors += check_configuration(fid, DSET_FARRAY_NAME) < 0 ? 1 : 0;
+ TESTING("evict on close with \'single chunk\' chunk index");
+ nerrors += check_configuration(fid, DSET_SINGLE_NAME) < 0 ? 1 : 0;
+ TESTING("evict on close with contiguous layout");
+ nerrors += check_configuration(fid, DSET_CONTIGUOUS_NAME) < 0 ? 1 : 0;
+ TESTING("evict on close with compact layout");
+ nerrors += check_configuration(fid, DSET_COMPACT_NAME) < 0 ? 1 : 0;
+
+ /* Close the test file */
+ if(H5Fclose(fid) < 0) {
+ nerrors++;
+ PUTS_ERROR("Unable to close the test file.\n");
+ } /* end if */
+
+ /* Clean up files and close the VFD-specific fapl */
+ h5_delete_all_test_files(FILENAMES, fapl_id);
+ if(H5Pclose(fapl_id) < 0) {
+ nerrors++;
+ PUTS_ERROR("Unable to close VFD-specific fapl.\n");
+ } /* end if */
+
+ if(nerrors)
+ goto error;
+
+ HDprintf("All evict-on-close tests passed.\n");
+
+ return EXIT_SUCCESS;
+
+error:
+
+ HDprintf("***** %u evict-on-close test%s FAILED! *****\n",
+ nerrors, nerrors > 1 ? "S" : "");
+
+ h5_delete_all_test_files(FILENAMES, fapl_id);
+ H5E_BEGIN_TRY {
+ H5Fclose(fid);
+ H5Pclose(fapl_id);
+ } H5E_END_TRY;
+
+ return EXIT_FAILURE;
+
+} /* end main() */
+
diff --git a/test/gheap.c b/test/gheap.c
index 317e306..eafc49d 100644
--- a/test/gheap.c
+++ b/test/gheap.c
@@ -35,6 +35,9 @@
* GHEAP_REPEATED_ERR_LIM errors, and suppress the rest */
#define GHEAP_REPEATED_ERR_LIM 20
+/* Number of heap objects to test */
+#define GHEAP_TEST_NOBJS 1024
+
#define GHEAP_REPEATED_ERR(MSG) \
{ \
nerrors++; \
@@ -78,9 +81,9 @@ test_1 (hid_t fapl)
{
hid_t file = -1;
H5F_t *f = NULL;
- H5HG_t obj[1024];
- uint8_t out[1024];
- uint8_t in[1024];
+ H5HG_t *obj = NULL;
+ uint8_t out[GHEAP_TEST_NOBJS];
+ uint8_t in[GHEAP_TEST_NOBJS];
size_t u;
size_t size;
herr_t status;
@@ -89,6 +92,10 @@ test_1 (hid_t fapl)
TESTING("monotonically increasing lengths");
+ /* Allocate buffer for H5HG_t */
+ if(NULL == (obj = (H5HG_t *)HDmalloc(sizeof(H5HG_t) * GHEAP_TEST_NOBJS)))
+ goto error;
+
/* Open a clean file */
h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
@@ -104,7 +111,7 @@ test_1 (hid_t fapl)
* a clean file, the addresses allocated for the collections should also
* be monotonically increasing.
*/
- for(u = 0; u < 1024; u++) {
+ for(u = 0; u < GHEAP_TEST_NOBJS; u++) {
size = u + 1;
HDmemset(out, (int)('A' + u % 26), size);
H5Eclear2(H5E_DEFAULT);
@@ -123,7 +130,7 @@ test_1 (hid_t fapl)
/*
* Now try to read each object back.
*/
- for(u = 0; u < 1024; u++) {
+ for(u = 0; u < GHEAP_TEST_NOBJS; u++) {
size = u + 1;
HDmemset(out, (int)('A' + u % 26), size);
H5Eclear2(H5E_DEFAULT);
@@ -138,6 +145,10 @@ test_1 (hid_t fapl)
}
}
+ /* Release buffer */
+ HDfree(obj);
+ obj = NULL;
+
if(H5Fclose(file) < 0) goto error;
if(nerrors) goto error;
@@ -148,6 +159,8 @@ error:
H5E_BEGIN_TRY {
H5Fclose(file);
} H5E_END_TRY;
+ if(obj)
+ HDfree(obj);
return MAX(1, nerrors);
}
@@ -174,9 +187,9 @@ test_2 (hid_t fapl)
{
hid_t file = -1;
H5F_t *f = NULL;
- H5HG_t obj[1024];
- uint8_t out[1024];
- uint8_t in[1024];
+ H5HG_t *obj = NULL;
+ uint8_t out[GHEAP_TEST_NOBJS];
+ uint8_t in[GHEAP_TEST_NOBJS];
size_t u;
size_t size;
int nerrors = 0;
@@ -184,6 +197,10 @@ test_2 (hid_t fapl)
TESTING("monotonically decreasing lengths");
+ /* Allocate buffer for H5HG_t */
+ if(NULL == (obj = (H5HG_t *)HDmalloc(sizeof(H5HG_t) * GHEAP_TEST_NOBJS)))
+ goto error;
+
/* Open a clean file */
h5_fixname(FILENAME[1], fapl, filename, sizeof filename);
if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
@@ -197,8 +214,8 @@ test_2 (hid_t fapl)
/*
* Write the objects, monotonically decreasing in length.
*/
- for(u = 0; u < 1024; u++) {
- size = 1024 - u;
+ for(u = 0; u < GHEAP_TEST_NOBJS; u++) {
+ size = GHEAP_TEST_NOBJS - u;
HDmemset(out, (int)('A' + u % 26), size);
H5Eclear2(H5E_DEFAULT);
if (H5HG_insert (f, H5AC_ind_read_dxpl_id, size, out, obj + u) < 0) {
@@ -211,8 +228,8 @@ test_2 (hid_t fapl)
/*
* Now try to read each object back.
*/
- for(u = 0; u < 1024; u++) {
- size = 1024 - u;
+ for(u = 0; u < GHEAP_TEST_NOBJS; u++) {
+ size = GHEAP_TEST_NOBJS - u;
HDmemset(out, (int)('A' + u % 26), size);
H5Eclear2(H5E_DEFAULT);
if (NULL==H5HG_read (f, H5AC_ind_read_dxpl_id, obj + u, in, NULL)) {
@@ -226,8 +243,13 @@ test_2 (hid_t fapl)
}
}
+ /* Release buffer */
+ HDfree(obj);
+ obj = NULL;
+
if (H5Fclose(file)<0) goto error;
if (nerrors) goto error;
+
PASSED();
return 0;
@@ -235,6 +257,8 @@ test_2 (hid_t fapl)
H5E_BEGIN_TRY {
H5Fclose(file);
} H5E_END_TRY;
+ if(obj)
+ HDfree(obj);
return MAX(1, nerrors);
}
@@ -261,8 +285,8 @@ test_3 (hid_t fapl)
{
hid_t file = -1;
H5F_t *f = NULL;
- H5HG_t obj[1024];
- uint8_t out[1024];
+ H5HG_t *obj = NULL;
+ uint8_t out[GHEAP_TEST_NOBJS];
size_t u;
size_t size;
herr_t status;
@@ -271,6 +295,10 @@ test_3 (hid_t fapl)
TESTING("complete object removal");
+ /* Allocate buffer for H5HG_t */
+ if(NULL == (obj = (H5HG_t *)HDmalloc(sizeof(H5HG_t) * GHEAP_TEST_NOBJS)))
+ goto error;
+
/* Open a clean file */
h5_fixname(FILENAME[2], fapl, filename, sizeof filename);
if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
@@ -282,7 +310,7 @@ test_3 (hid_t fapl)
}
/* Create some stuff */
- for(u = 0; u < 1024; u++) {
+ for(u = 0; u < GHEAP_TEST_NOBJS; u++) {
size = u % 30 + 100;
HDmemset(out, (int)('A' + u % 26), size);
H5Eclear2(H5E_DEFAULT);
@@ -295,7 +323,7 @@ test_3 (hid_t fapl)
}
/* Remove everything */
- for(u = 0; u < 1024; u++) {
+ for(u = 0; u < GHEAP_TEST_NOBJS; u++) {
status = H5HG_remove (f, H5AC_ind_read_dxpl_id, obj + u);
if (status<0) {
H5_FAILED();
@@ -304,8 +332,13 @@ test_3 (hid_t fapl)
}
}
+ /* Release buffer */
+ HDfree(obj);
+ obj = NULL;
+
if (H5Fclose(file)<0) goto error;
if (nerrors) goto error;
+
PASSED();
return 0;
@@ -313,6 +346,8 @@ test_3 (hid_t fapl)
H5E_BEGIN_TRY {
H5Fclose(file);
} H5E_END_TRY;
+ if(obj)
+ HDfree(obj);
return MAX(1, nerrors);
}
@@ -340,8 +375,8 @@ test_4 (hid_t fapl)
{
hid_t file = -1;
H5F_t *f = NULL;
- H5HG_t obj[1024];
- uint8_t out[1024];
+ H5HG_t *obj = NULL;
+ uint8_t out[GHEAP_TEST_NOBJS];
size_t u;
size_t size;
herr_t status;
@@ -350,6 +385,10 @@ test_4 (hid_t fapl)
TESTING("partial object removal");
+ /* Allocate buffer for H5HG_t */
+ if(NULL == (obj = (H5HG_t *)HDmalloc(sizeof(H5HG_t) * GHEAP_TEST_NOBJS)))
+ goto error;
+
/* Open a clean file */
h5_fixname(FILENAME[3], fapl, filename, sizeof filename);
if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
@@ -360,7 +399,7 @@ test_4 (hid_t fapl)
goto error;
}
- for(u = 0; u < 1024; u++) {
+ for(u = 0; u < GHEAP_TEST_NOBJS; u++) {
/* Insert */
size = u % 30 + 100;
HDmemset(out, (int)('A' + u % 26), size);
@@ -389,8 +428,13 @@ test_4 (hid_t fapl)
}
}
+ /* Release buffer */
+ HDfree(obj);
+ obj = NULL;
+
if (H5Fclose(file)<0) goto error;
if (nerrors) goto error;
+
PASSED();
return 0;
@@ -398,6 +442,8 @@ test_4 (hid_t fapl)
H5E_BEGIN_TRY {
H5Fclose(file);
} H5E_END_TRY;
+ if(obj)
+ HDfree(obj);
return MAX(1, nerrors);
}
diff --git a/test/h5test.c b/test/h5test.c
index aea5dc7..c126da8 100644
--- a/test/h5test.c
+++ b/test/h5test.c
@@ -74,7 +74,7 @@ char *paraprefix = NULL; /* for command line option para-prefix */
MPI_Info h5_io_info_g=MPI_INFO_NULL;/* MPI INFO object for IO */
#endif
-#define READ_BUF_SIZE 4096
+#define READ_BUF_SIZE 65536
/*
* These are the letters that are appended to the file name when generating
@@ -90,6 +90,9 @@ MPI_Info h5_io_info_g=MPI_INFO_NULL;/* MPI INFO object for IO */
*/
static const char *multi_letters = "msbrglo";
+/* Length of multi-file VFD filename buffers */
+#define H5TEST_MULTI_FILENAME_LEN 1024
+
/* Previous error reporting function */
static H5E_auto2_t err_func = NULL;
@@ -196,7 +199,7 @@ h5_clean_files(const char *base_name[], hid_t fapl)
*
* Purpose Clean up temporary test files.
*
- * When a test calls h5_fixname() get a VFD-dependent
+ * When a test calls h5_fixname() to get a VFD-dependent
* test file name, this function can be used to clean it up.
*
* Return: void
@@ -855,7 +858,7 @@ h5_fileaccess(void)
H5FD_mem_t memb_map[H5FD_MEM_NTYPES];
hid_t memb_fapl[H5FD_MEM_NTYPES];
const char *memb_name[H5FD_MEM_NTYPES];
- char sv[H5FD_MEM_NTYPES][1024];
+ char *sv[H5FD_MEM_NTYPES];
haddr_t memb_addr[H5FD_MEM_NTYPES];
H5FD_mem_t mt;
@@ -867,6 +870,8 @@ h5_fileaccess(void)
HDassert(HDstrlen(multi_letters)==H5FD_MEM_NTYPES);
for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt)) {
memb_fapl[mt] = H5P_DEFAULT;
+ if(NULL == (sv[mt] = (char *)HDmalloc(H5TEST_MULTI_FILENAME_LEN)))
+ return -1;
HDsprintf(sv[mt], "%%s-%c.h5", multi_letters[mt]);
memb_name[mt] = sv[mt];
memb_addr[mt] = (haddr_t)MAX(mt - 1, 0) * (HADDR_MAX / 10);
@@ -874,6 +879,9 @@ h5_fileaccess(void)
if(H5Pset_fapl_multi(fapl, memb_map, memb_fapl, memb_name, memb_addr, FALSE) < 0)
return -1;
+
+ for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt))
+ HDfree(sv[mt]);
}
else if(!HDstrcmp(name, "family")) {
hsize_t fam_size = 100*1024*1024; /*100 MB*/
@@ -989,7 +997,7 @@ h5_get_vfd_fapl(void)
H5FD_mem_t memb_map[H5FD_MEM_NTYPES];
hid_t memb_fapl[H5FD_MEM_NTYPES];
const char *memb_name[H5FD_MEM_NTYPES];
- char sv[H5FD_MEM_NTYPES][1024];
+ char *sv[H5FD_MEM_NTYPES];
haddr_t memb_addr[H5FD_MEM_NTYPES];
H5FD_mem_t mt;
@@ -1001,15 +1009,18 @@ h5_get_vfd_fapl(void)
HDassert(HDstrlen(multi_letters) == H5FD_MEM_NTYPES);
for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt)) {
memb_fapl[mt] = H5P_DEFAULT;
+ sv[mt] = (char *)HDmalloc(H5TEST_MULTI_FILENAME_LEN);
+ HDassert(sv[mt]);
HDsprintf(sv[mt], "%%s-%c.h5", multi_letters[mt]);
memb_name[mt] = sv[mt];
memb_addr[mt] = (haddr_t)MAX(mt - 1, 0) * (HADDR_MAX / 10);
} /* end for */
- if(H5Pset_fapl_multi(fapl, memb_map, memb_fapl, memb_name,
- memb_addr, FALSE) < 0) {
+ if(H5Pset_fapl_multi(fapl, memb_map, memb_fapl, memb_name, memb_addr, FALSE) < 0)
return -1;
- } /* end if */
+
+ for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt))
+ HDfree(sv[mt]);
} else if(!HDstrcmp(tok, "family")) {
/* Family of files, each 1MB and using the default driver */
hsize_t fam_size = 100*1024*1024; /*100 MB*/
@@ -1573,9 +1584,13 @@ h5_make_local_copy(const char *origfilename, const char *local_copy_name)
{
int fd_old = (-1), fd_new = (-1); /* File descriptors for copying data */
ssize_t nread; /* Number of bytes read in */
- char buf[READ_BUF_SIZE]; /* Buffer for copying data */
+ void *buf; /* Buffer for copying data */
const char *filename = H5_get_srcdir_filename(origfilename);; /* Get the test file name to copy */
+ /* Allocate copy buffer */
+ if(NULL == (buf = HDmalloc(READ_BUF_SIZE)))
+ return -1;
+
/* Copy old file into temporary file */
if((fd_old = HDopen(filename, O_RDONLY, 0666)) < 0)
return -1;
@@ -1586,6 +1601,9 @@ h5_make_local_copy(const char *origfilename, const char *local_copy_name)
while((nread = HDread(fd_old, buf, (size_t)READ_BUF_SIZE)) > 0)
if(HDwrite(fd_new, buf, (size_t)nread) < 0)
return -1;
+
+ /* Release memory */
+ HDfree(buf);
/* Close files */
if(HDclose(fd_old) < 0) return -1;
diff --git a/test/tattr.c b/test/tattr.c
index e7b3ece..6f55081 100644
--- a/test/tattr.c
+++ b/test/tattr.c
@@ -8029,7 +8029,7 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl)
htri_t is_shared; /* Is attributes shared? */
hsize_t shared_refcount; /* Reference count of shared attribute */
unsigned attr_value; /* Attribute value */
- unsigned big_value[SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3]; /* Data for "big" attribute */
+ unsigned *big_value; /* Data for "big" attribute */
size_t mesg_count; /* # of shared messages */
unsigned test_shared; /* Index over shared component type */
unsigned u; /* Local index variable */
@@ -8040,8 +8040,10 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl)
/* Output message about test being performed */
MESSAGE(5, ("Testing Writing Shared & Unshared Attributes in Compact & Dense Storage\n"));
- /* Initialize "big" attribute data */
- HDmemset(big_value, 1, sizeof(big_value));
+ /* Allocate & initialize "big" attribute data */
+ big_value = (unsigned *)HDmalloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned));
+ CHECK(big_value, NULL, "HDmalloc");
+ HDmemset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3));
/* Create dataspace for dataset */
sid = H5Screate(H5S_SCALAR);
@@ -8328,6 +8330,9 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl)
CHECK(ret, FAIL, "H5Sclose");
ret = H5Sclose(big_sid);
CHECK(ret, FAIL, "H5Sclose");
+
+ /* Release memory */
+ HDfree(big_value);
} /* test_attr_shared_write() */
/****************************************************************
@@ -8355,7 +8360,7 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl)
htri_t is_shared; /* Is attributes shared? */
hsize_t shared_refcount; /* Reference count of shared attribute */
unsigned attr_value; /* Attribute value */
- unsigned big_value[SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3]; /* Data for "big" attribute */
+ unsigned *big_value; /* Data for "big" attribute */
size_t mesg_count; /* # of shared messages */
unsigned test_shared; /* Index over shared component type */
unsigned u; /* Local index variable */
@@ -8366,8 +8371,10 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl)
/* Output message about test being performed */
MESSAGE(5, ("Testing Renaming Shared & Unshared Attributes in Compact & Dense Storage\n"));
- /* Initialize "big" attribute data */
- HDmemset(big_value, 1, sizeof(big_value));
+ /* Allocate & initialize "big" attribute data */
+ big_value = (unsigned *)HDmalloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned));
+ CHECK(big_value, NULL, "HDmalloc");
+ HDmemset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3));
/* Create dataspace for dataset */
sid = H5Screate(H5S_SCALAR);
@@ -8770,6 +8777,9 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl)
CHECK(ret, FAIL, "H5Sclose");
ret = H5Sclose(big_sid);
CHECK(ret, FAIL, "H5Sclose");
+
+ /* Release memory */
+ HDfree(big_value);
} /* test_attr_shared_rename() */
/****************************************************************
@@ -8796,7 +8806,7 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl)
htri_t is_shared; /* Is attributes shared? */
hsize_t shared_refcount; /* Reference count of shared attribute */
unsigned attr_value; /* Attribute value */
- unsigned big_value[SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3]; /* Data for "big" attribute */
+ unsigned *big_value; /* Data for "big" attribute */
size_t mesg_count; /* # of shared messages */
unsigned test_shared; /* Index over shared component type */
unsigned u; /* Local index variable */
@@ -8807,8 +8817,10 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl)
/* Output message about test being performed */
MESSAGE(5, ("Testing Deleting Shared & Unshared Attributes in Compact & Dense Storage\n"));
- /* Initialize "big" attribute data */
- HDmemset(big_value, 1, sizeof(big_value));
+ /* Allocate & initialize "big" attribute data */
+ big_value = (unsigned *)HDmalloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned));
+ CHECK(big_value, NULL, "HDmalloc");
+ HDmemset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3));
/* Create dataspace for dataset */
sid = H5Screate(H5S_SCALAR);
@@ -9134,6 +9146,9 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl)
CHECK(ret, FAIL, "H5Sclose");
ret = H5Sclose(big_sid);
CHECK(ret, FAIL, "H5Sclose");
+
+ /* Release memory */
+ HDfree(big_value);
} /* test_attr_shared_delete() */
/****************************************************************
@@ -9160,7 +9175,7 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl)
htri_t is_shared; /* Is attributes shared? */
hsize_t shared_refcount; /* Reference count of shared attribute */
unsigned attr_value; /* Attribute value */
- unsigned big_value[SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3]; /* Data for "big" attribute */
+ unsigned *big_value; /* Data for "big" attribute */
size_t mesg_count; /* # of shared messages */
unsigned test_shared; /* Index over shared component type */
unsigned u; /* Local index variable */
@@ -9171,8 +9186,10 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl)
/* Output message about test being performed */
MESSAGE(5, ("Testing Unlinking Object with Shared Attributes in Compact & Dense Storage\n"));
- /* Initialize "big" attribute data */
- HDmemset(big_value, 1, sizeof(big_value));
+ /* Allocate & initialize "big" attribute data */
+ big_value = (unsigned *)HDmalloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned));
+ CHECK(big_value, NULL, "HDmalloc");
+ HDmemset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3));
/* Create dataspace for dataset */
sid = H5Screate(H5S_SCALAR);
@@ -9484,6 +9501,9 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl)
CHECK(ret, FAIL, "H5Sclose");
ret = H5Sclose(big_sid);
CHECK(ret, FAIL, "H5Sclose");
+
+ /* Release memory */
+ HDfree(big_value);
} /* test_attr_shared_unlink() */
/****************************************************************
diff --git a/test/tchecksum.c b/test/tchecksum.c
index ca6c227..cf519c0 100644
--- a/test/tchecksum.c
+++ b/test/tchecksum.c
@@ -37,7 +37,6 @@
/*******************/
/* Local variables */
/*******************/
-uint8_t large_buf[BUF_LEN];
/****************************************************************
@@ -184,33 +183,41 @@ test_chksum_size_four(void)
static void
test_chksum_large(void)
{
+ uint8_t *large_buf; /* Buffer for checksum calculations */
uint32_t chksum; /* Checksum value */
size_t u; /* Local index variable */
+ /* Allocate the buffer */
+ large_buf = (uint8_t *)HDmalloc((size_t)BUF_LEN);
+ CHECK(large_buf, NULL, "HDmalloc");
+
/* Initialize buffer w/known data */
for(u = 0; u < BUF_LEN; u++)
large_buf[u] = (uint8_t)(u * 3);
/* Buffer w/real data */
- chksum = H5_checksum_fletcher32(large_buf, sizeof(large_buf));
+ chksum = H5_checksum_fletcher32(large_buf, (size_t)BUF_LEN);
VERIFY(chksum, 0x85b4e2a, "H5_checksum_fletcher32");
- chksum = H5_checksum_crc(large_buf, sizeof(large_buf));
+ chksum = H5_checksum_crc(large_buf, (size_t)BUF_LEN);
VERIFY(chksum, 0xfbd0f7c0, "H5_checksum_crc");
- chksum = H5_checksum_lookup3(large_buf, sizeof(large_buf), 0);
+ chksum = H5_checksum_lookup3(large_buf, (size_t)BUF_LEN, 0);
VERIFY(chksum, 0x1bd2ee7b, "H5_checksum_lookup3");
/* Buffer w/zero(s) for data */
- HDmemset(large_buf, 0, sizeof(large_buf));
- chksum = H5_checksum_fletcher32(large_buf, sizeof(large_buf));
+ HDmemset(large_buf, 0, (size_t)BUF_LEN);
+ chksum = H5_checksum_fletcher32(large_buf, (size_t)BUF_LEN);
VERIFY(chksum, 0, "H5_checksum_fletcher32");
- chksum = H5_checksum_crc(large_buf, sizeof(large_buf));
+ chksum = H5_checksum_crc(large_buf, (size_t)BUF_LEN);
VERIFY(chksum, 0xfac8b4c4, "H5_checksum_crc");
- chksum = H5_checksum_lookup3(large_buf, sizeof(large_buf), 0);
+ chksum = H5_checksum_lookup3(large_buf, (size_t)BUF_LEN, 0);
VERIFY(chksum, 0x930c7afc, "H5_checksum_lookup3");
+
+ /* Release memory for buffer */
+ HDfree(large_buf);
} /* test_chksum_large() */
diff --git a/test/th5o.c b/test/th5o.c
index 125e11b..c2c4034 100644
--- a/test/th5o.c
+++ b/test/th5o.c
@@ -777,15 +777,21 @@ test_h5o_link(void)
hsize_t dims[2] = {TEST6_DIM1, TEST6_DIM2};
htri_t committed; /* Whether the named datatype is committed */
unsigned new_format; /* Whether to use the new format or not */
- int wdata[TEST6_DIM1][TEST6_DIM2];
- int rdata[TEST6_DIM1][TEST6_DIM2];
- int i, n, j;
+ int *wdata;
+ int *rdata;
+ int i, n;
herr_t ret; /* Value returned from API calls */
+ /* Allocate memory buffers */
+ /* (These are treated as 2-D buffers) */
+ wdata = (int *)HDmalloc((size_t)(TEST6_DIM1 * TEST6_DIM2) * sizeof(int));
+ CHECK(wdata, NULL, "HDmalloc");
+ rdata = (int *)HDmalloc((size_t)(TEST6_DIM1 * TEST6_DIM2) * sizeof(int));
+ CHECK(rdata, NULL, "HDmalloc");
+
/* Initialize the raw data */
- for(i = n = 0; i < TEST6_DIM1; i++)
- for(j = 0; j < TEST6_DIM2; j++)
- wdata[i][j] = n++;
+ for(i = n = 0; i < (TEST6_DIM1 * TEST6_DIM2); i++)
+ wdata[i] = n++;
/* Create the dataspace */
space_id = H5Screate_simple(2 ,dims, NULL);
@@ -840,9 +846,8 @@ test_h5o_link(void)
CHECK(ret, FAIL, "H5Dread");
/* Verify the data */
- for(i = 0; i < TEST6_DIM1; i++)
- for(j = 0; j < TEST6_DIM2; j++)
- VERIFY(wdata[i][j], rdata[i][j], "H5Dread");
+ for(i = 0; i < (TEST6_DIM1 * TEST6_DIM2); i++)
+ VERIFY(wdata[i], rdata[i], "H5Dread");
/* Create a group with no name*/
group_id = H5Gcreate_anon(file_id, H5P_DEFAULT, H5P_DEFAULT);
@@ -879,9 +884,8 @@ test_h5o_link(void)
/* Read data from dataset */
ret = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
CHECK(ret, FAIL, "H5Dread");
- for(i = 0; i < TEST6_DIM1; i++)
- for(j = 0; j < TEST6_DIM2; j++)
- VERIFY(wdata[i][j], rdata[i][j], "H5Dread");
+ for(i = 0; i < (TEST6_DIM1 * TEST6_DIM2); i++)
+ VERIFY(wdata[i], rdata[i], "H5Dread");
/* Close open IDs */
ret = H5Dclose(dset_id);
@@ -897,6 +901,10 @@ test_h5o_link(void)
CHECK(ret, FAIL, "H5Sclose");
ret = H5Pclose(lcpl_id);
CHECK(ret, FAIL, "H5Pclose");
+
+ /* Release buffers */
+ HDfree(wdata);
+ HDfree(rdata);
} /* end test_h5o_link() */
diff --git a/test/theap.c b/test/theap.c
index 2d509bf..9c509a1 100644
--- a/test/theap.c
+++ b/test/theap.c
@@ -44,13 +44,13 @@ typedef struct test_obj {
} test_obj;
/* Array of random element values */
-static test_obj rand_num[NUM_ELEMS];
+static test_obj *rand_num;
/* Array of random elements values, sorted in increasing order */
-static test_obj inc_sort_num[NUM_ELEMS];
+static test_obj *inc_sort_num;
/* Array of random elements values, sorted in decreasing order */
-static test_obj dec_sort_num[NUM_ELEMS];
+static test_obj *dec_sort_num;
static int tst_dec_sort(const void *_i1, const void *_i2)
{
@@ -88,21 +88,29 @@ test_heap_init(void)
time_t curr_time; /* Current time, for seeding random number generator */
size_t u; /* Local index variables */
+ /* Allocate arrays */
+ rand_num = (test_obj *)HDmalloc(sizeof(test_obj) * NUM_ELEMS);
+ CHECK(rand_num, NULL, "HDmalloc");
+ inc_sort_num = (test_obj *)HDmalloc(sizeof(test_obj) * NUM_ELEMS);
+ CHECK(inc_sort_num, NULL, "HDmalloc");
+ dec_sort_num = (test_obj *)HDmalloc(sizeof(test_obj) * NUM_ELEMS);
+ CHECK(dec_sort_num, NULL, "HDmalloc");
+
/* Create randomized set of numbers */
- curr_time=time(NULL);
+ curr_time = HDtime(NULL);
HDsrandom((unsigned)curr_time);
- for(u=0; u<NUM_ELEMS; u++)
+ for(u = 0; u < NUM_ELEMS; u++)
/* Generate random numbers from -1000 to 1000 */
- rand_num[u].val=(int)(HDrandom()%2001)-1001;
+ rand_num[u].val = (int)(HDrandom() % 2001) - 1001;
/* Sort random numbers into increasing order */
- HDmemcpy(inc_sort_num,rand_num,sizeof(test_obj)*NUM_ELEMS);
+ HDmemcpy(inc_sort_num, rand_num, sizeof(test_obj) * NUM_ELEMS);
HDqsort(inc_sort_num, (size_t)NUM_ELEMS, sizeof(test_obj), tst_inc_sort);
/* Sort random numbers into decreasing order */
- HDmemcpy(dec_sort_num,rand_num,sizeof(test_obj)*NUM_ELEMS);
+ HDmemcpy(dec_sort_num, rand_num, sizeof(test_obj) * NUM_ELEMS);
HDqsort(dec_sort_num, (size_t)NUM_ELEMS, sizeof(test_obj), tst_dec_sort);
-} /* end test_tst_init() */
+} /* end test_heap_init() */
/****************************************************************
**
@@ -1023,6 +1031,24 @@ test_heap_incdec(void)
/****************************************************************
**
+** test_heap_term(): Test H5HP (heap) code.
+** Release data for Heap testing
+**
+****************************************************************/
+static void
+test_heap_term(void)
+{
+ /* Release arrays */
+ if(rand_num)
+ HDfree(rand_num);
+ if(inc_sort_num)
+ HDfree(inc_sort_num);
+ if(dec_sort_num)
+ HDfree(dec_sort_num);
+} /* end test_tst_term() */
+
+/****************************************************************
+**
** test_heap(): Main H5HP testing routine.
**
****************************************************************/
@@ -1044,5 +1070,8 @@ test_heap(void)
test_heap_change(); /* Test changing priority of objects on Heap */
test_heap_incdec(); /* Test incrementing & decrementing priority of objects on Heap */
+ /* Release Heap testing data */
+ test_heap_term();
+
} /* end test_heap() */
diff --git a/test/titerate.c b/test/titerate.c
index 9f0b900..3004d62 100644
--- a/test/titerate.c
+++ b/test/titerate.c
@@ -584,9 +584,9 @@ test_iter_group_large(hid_t fapl)
hid_t tid; /* Datatype ID */
hsize_t dims[] = {SPACE1_DIM1};
herr_t ret; /* Generic return value */
- char gname[20]; /* Temporary group name */
- iter_info names[ITER_NGROUPS+2]; /* Names of objects in the root group */
- iter_info *curr_name; /* Pointer to the current name in the root group */
+ char gname[20]; /* Temporary group name */
+ iter_info *names; /* Names of objects in the root group */
+ iter_info *curr_name; /* Pointer to the current name in the root group */
int i;
/* Compound datatype */
@@ -596,7 +596,9 @@ test_iter_group_large(hid_t fapl)
float c;
} s1_t;
- HDmemset(names, 0, sizeof names);
+ /* Allocate & initialize array */
+ names = (iter_info *)HDcalloc(sizeof(iter_info), (ITER_NGROUPS + 2));
+ CHECK(names, NULL, "HDcalloc");
/* Output message about test being performed */
MESSAGE(5, ("Testing Large Group Iteration Functionality\n"));
@@ -686,6 +688,9 @@ test_iter_group_large(hid_t fapl)
/* Close file */
ret = H5Fclose(file);
CHECK(ret, FAIL, "H5Fclose");
+
+ /* Release memory */
+ HDfree(names);
} /* test_iterate_group_large() */
/****************************************************************
diff --git a/test/tskiplist.c b/test/tskiplist.c
index f413674..f30948e 100644
--- a/test/tskiplist.c
+++ b/test/tskiplist.c
@@ -38,9 +38,9 @@
#define NUM_ELEMS 1000
/* Random numbers */
-static int rand_num[NUM_ELEMS];
-static int sort_rand_num[NUM_ELEMS];
-static int rev_sort_rand_num[NUM_ELEMS];
+static int *rand_num;
+static int *sort_rand_num;
+static int *rev_sort_rand_num;
static int tst_sort(const void *i1, const void *i2)
{
@@ -66,23 +66,31 @@ test_skiplist_init(void)
unsigned found; /* Flag to indicate value was inserted already */
size_t u,v; /* Local index variables */
+ /* Allocate arrays */
+ rand_num = (int *)HDmalloc(sizeof(int) * NUM_ELEMS);
+ CHECK(rand_num, NULL, "HDmalloc");
+ sort_rand_num = (int *)HDmalloc(sizeof(int) * NUM_ELEMS);
+ CHECK(sort_rand_num, NULL, "HDmalloc");
+ rev_sort_rand_num = (int *)HDmalloc(sizeof(int) * NUM_ELEMS);
+ CHECK(rev_sort_rand_num, NULL, "HDmalloc");
+
/* Initialize random number seed */
curr_time = HDtime(NULL);
HDsrandom((unsigned)curr_time);
/* Create randomized set of numbers */
- for(u=0; u<NUM_ELEMS; u++) {
+ for(u = 0; u < NUM_ELEMS; u++) {
do {
/* Reset flag */
- found=0;
+ found = 0;
/* Generate random numbers from -5000 to 5000 */
- new_val=(int)(HDrandom()%10001)-5001;
+ new_val = (int)(HDrandom() % 10001) - 5001;
/* Check if the value is already in the array */
- for(v=0; v<u; v++)
- if(rand_num[v]==new_val)
- found=1;
+ for(v = 0; v < u; v++)
+ if(rand_num[v] == new_val)
+ found = 1;
} while(found);
/* Set unique value in array */
@@ -90,7 +98,7 @@ test_skiplist_init(void)
} /* end for */
/* Copy random values to sorted array */
- HDmemcpy(sort_rand_num,rand_num,sizeof(int)*NUM_ELEMS);
+ HDmemcpy(sort_rand_num, rand_num, sizeof(int) * NUM_ELEMS);
/* Sort random numbers */
HDqsort(sort_rand_num, (size_t)NUM_ELEMS, sizeof(int), tst_sort);
@@ -100,7 +108,7 @@ test_skiplist_init(void)
/* Sort random numbers */
HDqsort(rev_sort_rand_num, (size_t)NUM_ELEMS, sizeof(int), tst_rev_sort);
-} /* end test_tst_init() */
+} /* end test_skiplist_init() */
/****************************************************************
**
@@ -1746,6 +1754,24 @@ test_skiplist_remove_first_many(void)
/****************************************************************
**
+** test_skiplist_term(): Test H5SL (skiplist) code.
+** Release data for skip list testing
+**
+****************************************************************/
+static void
+test_skiplist_term(void)
+{
+ /* Release arrays */
+ if(rand_num)
+ HDfree(rand_num);
+ if(sort_rand_num)
+ HDfree(sort_rand_num);
+ if(rev_sort_rand_num)
+ HDfree(rev_sort_rand_num);
+} /* end test_skiplist_term() */
+
+/****************************************************************
+**
** test_skiplist(): Main H5SL testing routine.
**
****************************************************************/
@@ -1784,5 +1810,8 @@ test_skiplist(void)
test_skiplist_remove_first(); /* Test 'remove first' operation */
test_skiplist_remove_first_many(); /* Test 'remove first' operation on large skip lists */
+ /* Release skip list testing data */
+ test_skiplist_term();
+
} /* end test_skiplist() */
diff --git a/test/tvltypes.c b/test/tvltypes.c
index f8a6a91..d00519d 100644
--- a/test/tvltypes.c
+++ b/test/tvltypes.c
@@ -1001,8 +1001,8 @@ test_vltypes_compound_vlen_vlen(void)
float f;
hvl_t v;
} s1;
- s1 wdata[SPACE3_DIM1]; /* data to write */
- s1 rdata[SPACE3_DIM1]; /* data to read */
+ s1 *wdata; /* data to write */
+ s1 *rdata; /* data to read */
hid_t fid1; /* HDF5 File IDs */
hid_t dataset; /* Dataset ID */
hid_t sid1; /* Dataspace ID */
@@ -1016,6 +1016,10 @@ test_vltypes_compound_vlen_vlen(void)
MESSAGE(5, ("Testing Compound Datatypes with VL Atomic Datatype Component Functionality\n"));
/* Allocate and initialize VL data to write */
+ wdata = (s1 *)HDmalloc(sizeof(s1) * SPACE3_DIM1);
+ CHECK(wdata, NULL, "HDmalloc");
+ rdata = (s1 *)HDmalloc(sizeof(s1) * SPACE3_DIM1);
+ CHECK(rdata, NULL, "HDmalloc");
for(i = 0; i < SPACE3_DIM1; i++) {
wdata[i].i = (int)(i * 10);
wdata[i].f = (float)(i * 20) / 3.0F;
@@ -1147,6 +1151,10 @@ test_vltypes_compound_vlen_vlen(void)
/* Close file */
ret = H5Fclose(fid1);
CHECK(ret, FAIL, "H5Fclose");
+
+ /* Release buffers */
+ HDfree(wdata);
+ HDfree(rdata);
} /* end test_vltypes_compound_vlen_vlen() */
/****************************************************************