summaryrefslogtreecommitdiffstats
path: root/test/tsohm.c
diff options
context:
space:
mode:
authorJames Laird <jlaird@hdfgroup.org>2006-12-01 15:51:42 (GMT)
committerJames Laird <jlaird@hdfgroup.org>2006-12-01 15:51:42 (GMT)
commit772730f4d744a0c8428f9c447b7b6073410a2880 (patch)
treef9fb0a1a5613e3c8705c7ea5e0b991cc2acfb82d /test/tsohm.c
parent7855afc4b2a1fd4c266f96aa7a8dc21964e3bda3 (diff)
downloadhdf5-772730f4d744a0c8428f9c447b7b6073410a2880.zip
hdf5-772730f4d744a0c8428f9c447b7b6073410a2880.tar.gz
hdf5-772730f4d744a0c8428f9c447b7b6073410a2880.tar.bz2
[svn-r13004] Much improved shared object header message test, along with some bug fixes
to make the test pass. These changes involve changes to the file format of SOHMs, but that's okay because nobody should have been using SOHMs yet anyway. Tested on Windows, kagiso, copper, and heping
Diffstat (limited to 'test/tsohm.c')
-rw-r--r--test/tsohm.c2240
1 files changed, 2215 insertions, 25 deletions
diff --git a/test/tsohm.c b/test/tsohm.c
index c1dcbc5..482b4af 100644
--- a/test/tsohm.c
+++ b/test/tsohm.c
@@ -27,13 +27,16 @@
*/
/* JAMES: get these three from default fcpl */
#define MAX_INDEXES 6
+
/* Default SOHM values */
#define DEF_NUM_INDEXES 0
const unsigned def_type_flags[MAX_INDEXES] = {0,0,0,0,0,0};
+const unsigned def_minsizes[MAX_INDEXES] = {250,250,250,250,250,250};
#define DEF_L2B 50
#define DEF_B2L 40
/* Non-default SOHM values for testing */
+/* JAMES: make these defined in function */
#define TEST_NUM_INDEXES 4
const unsigned test_type_flags[MAX_INDEXES] =
{H5O_MESG_FILL_FLAG,
@@ -41,11 +44,103 @@ const unsigned test_type_flags[MAX_INDEXES] =
H5O_MESG_SDSPACE_FLAG,
H5O_MESG_PLINE_FLAG,
0, 0};
+const unsigned test_minsizes[MAX_INDEXES] = {0, 2, 40, 100, 3, 1000};
#define TEST_L2B 65
#define TEST_B2L 64
#define FILENAME "tsohm.h5"
+#define NAME_BUF_SIZE 512
+
+/* How much overhead counts as "not much" when converting B-trees, etc. */
+#define OVERHEAD_ALLOWED 1.1
+
+#define NUM_DATASETS 10
+#define NUM_ATTRIBUTES 100
+
+typedef struct dtype1_struct {
+ int i1;
+ char str[10]; /* JAMES */
+ int i2;
+ int i3;
+ int i4;
+ int i5;
+ int i6;
+ int i7;
+ int i8;
+ float f1;
+} dtype1_struct;
+#define DTYPE2_SIZE 1024
+const char *DSETNAME[] = {
+ "dataset0", "dataset1",
+ "dataset2", "dataset3",
+ "dataset4", "dataset5",
+ "dataset6", "dataset7",
+ "dataset8", "dataset9",
+ "dataset10", "dataset11",
+ NULL
+};
+const char *EXTRA_DSETNAME[] = {
+ "ex_dataset0", "ex_dataset1",
+ "ex_dataset2", "ex_dataset3",
+ "ex_dataset4", "ex_dataset5",
+ "ex_dataset6", "ex_dataset7",
+ "ex_dataset8", "ex_dataset9",
+ "ex_dataset10", "ex_dataset11",
+ "ex_dataset12", "ex_dataset13",
+ "ex_dataset14", "ex_dataset15",
+ "ex_dataset16", "ex_dataset17",
+ "ex_dataset18", "ex_dataset19",
+ NULL
+};
+#define SOHM_HELPER_NUM_EX_DSETS 20
+typedef struct complex_t {
+ double re;
+ double im;
+} complex_t;
+#define ENUM_NUM_MEMBS 20
+const char *ENUM_NAME[] = {
+ "enum_member0", "enum_member1",
+ "enum_member2", "enum_member3",
+ "enum_member4", "enum_member5",
+ "enum_member6", "enum_member7",
+ "enum_member8", "enum_member9",
+ "enum_member10", "enum_member11",
+ "enum_member12", "enum_member13",
+ "enum_member14", "enum_member15",
+ "enum_member16", "enum_member17",
+ "enum_member18", "enum_member19",
+ NULL
+};
+const int ENUM_VAL[] = {
+ 0, 13,
+ -500, 63,
+ 64, -64,
+ 65, 2048,
+ 1, 2,
+ -1, 7,
+ 130, -5000,
+ 630, 640,
+ -640, 650,
+ 20480, 10,
+ -1001, -10
+};
+#define SIZE2_RANK1 10
+#define SIZE2_RANK2 20
+#define SIZE2_DIMS {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
+
+#define LONG_STRING "00 index. A long string used for testing. To create new strings, set the first two characters to be some ASCII number other than 00, such as 01."
+
+/* Struct returned from size2_helper function */
+typedef struct size2_helper_struct {
+ h5_stat_size_t empty_size;
+ h5_stat_size_t first_dset;
+ h5_stat_size_t dsets1;
+ h5_stat_size_t dsets2;
+ h5_stat_size_t interleaved;
+ h5_stat_size_t attrs1;
+ h5_stat_size_t attrs2;
+} size2_helper_struct;
/****************************************************************
**
@@ -54,12 +149,13 @@ const unsigned test_type_flags[MAX_INDEXES] =
**
****************************************************************/
static void check_fcpl_values(hid_t fcpl_id, const unsigned nindexes_in,
- const unsigned *flags_in, size_t l2b, size_t b2l)
+ const unsigned *flags_in, const unsigned *minsizes_in,
+ size_t l2b, size_t b2l)
{
unsigned num_indexes;
unsigned index_flags, min_mesg_size;
- unsigned list_size, btree_size;
- unsigned x;
+ size_t list_size, btree_size;
+ unsigned x;
herr_t ret;
/* Verify number of indexes is set to default */
@@ -67,7 +163,7 @@ static void check_fcpl_values(hid_t fcpl_id, const unsigned nindexes_in,
CHECK_I(ret, "H5Pget_shared_mesg_nindexes");
VERIFY(num_indexes, nindexes_in, "H5Pget_shared_mesg_nindexes");
- /* Verify index flags are set to default */
+ /* Verify index flags and minsizes are set */
for(x=1; x<=num_indexes; ++x)
{
ret = H5Pget_shared_mesg_index(fcpl_id, x, &index_flags, &min_mesg_size);
@@ -78,9 +174,9 @@ static void check_fcpl_values(hid_t fcpl_id, const unsigned nindexes_in,
/* Check list-to-btree and btree-to-list values */
ret = H5Pget_shared_mesg_phase_change(fcpl_id, &list_size, &btree_size);
- CHECK_I(ret, "H5Pget_shared_mesg_phase_change");
- VERIFY(list_size, l2b, "H5Pget_shared_mesg_phase_change");
- VERIFY(btree_size, b2l, "H5Pget_shared_mesg_phase_change");
+ CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
+ VERIFY(list_size, l2b, "H5Pset_shared_mesg_phase_change");
+ VERIFY(btree_size, b2l, "H5Pset_shared_mesg_phase_change");
}
@@ -95,7 +191,7 @@ static void test_sohm_fcpl(void)
hid_t fcpl_id = -1;
hid_t fcpl2_id = -1;
unsigned x;
- unsigned bad_flags[MAX_INDEXES];
+ char filename[NAME_BUF_SIZE];
herr_t ret; /* Generic return value */
/* Output message about test being performed */
@@ -105,18 +201,20 @@ static void test_sohm_fcpl(void)
CHECK_I(fcpl_id, "H5Pcreate");
/* Verify fcpl values */
- check_fcpl_values(fcpl_id, DEF_NUM_INDEXES, def_type_flags, DEF_L2B, DEF_B2L);
+ check_fcpl_values(fcpl_id, DEF_NUM_INDEXES, def_type_flags, def_minsizes, DEF_L2B, DEF_B2L);
/* Create a file with this fcpl and make sure that all the values can be
* retrieved.
*/
- fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
+ h5_fixname(FILENAME, H5P_DEFAULT, filename, sizeof filename);
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(fid, "H5Fcreate");
+
fcpl2_id = H5Fget_create_plist(fid);
CHECK_I(fcpl2_id, "H5Fcreate");
/* Verify fcpl values */
- check_fcpl_values(fcpl2_id, DEF_NUM_INDEXES, def_type_flags, DEF_L2B, DEF_B2L);
+ check_fcpl_values(fcpl2_id, DEF_NUM_INDEXES, def_type_flags, def_minsizes, DEF_L2B, DEF_B2L);
ret = H5Pclose(fcpl2_id);
CHECK_I(ret, "H5Pclose");
@@ -126,14 +224,14 @@ static void test_sohm_fcpl(void)
*/
ret = H5Fclose(fid);
CHECK_I(ret, "H5Fclose");
- fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
CHECK_I(fid, "H5Fopen");
fcpl2_id = H5Fget_create_plist(fid);
CHECK_I(ret, "H5Fcreate");
/* Verify fcpl values */
- check_fcpl_values(fcpl_id, DEF_NUM_INDEXES, def_type_flags, DEF_L2B, DEF_B2L);
+ check_fcpl_values(fcpl2_id, DEF_NUM_INDEXES, def_type_flags, def_minsizes, DEF_L2B, DEF_B2L);
/* Clean up */
ret = H5Pclose(fcpl2_id);
@@ -153,23 +251,23 @@ static void test_sohm_fcpl(void)
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
for(x=1; x<=TEST_NUM_INDEXES; ++x)
{
- ret = H5Pset_shared_mesg_index(fcpl_id, x, test_type_flags[x-1], 15 /* JAMES */);
+ ret = H5Pset_shared_mesg_index(fcpl_id, x, test_type_flags[x-1], test_minsizes[x-1]);
CHECK_I(ret, "H5Pset_shared_mesg_index");
}
ret = H5Pset_shared_mesg_phase_change(fcpl_id, TEST_L2B, TEST_B2L);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
- check_fcpl_values(fcpl_id, TEST_NUM_INDEXES, test_type_flags, TEST_L2B, TEST_B2L);
+ check_fcpl_values(fcpl_id, TEST_NUM_INDEXES, test_type_flags, test_minsizes, TEST_L2B, TEST_B2L);
/* Use the fcpl to create a file and get it back again */
- fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(fid, "H5Fcreate");
fcpl2_id = H5Fget_create_plist(fid);
CHECK_I(fcpl2_id, "H5Fcreate");
/* Verify fcpl values */
- check_fcpl_values(fcpl_id, TEST_NUM_INDEXES, test_type_flags, TEST_L2B, TEST_B2L);
+ check_fcpl_values(fcpl2_id, TEST_NUM_INDEXES, test_type_flags, test_minsizes, TEST_L2B, TEST_B2L);
ret = H5Pclose(fcpl2_id);
CHECK_I(ret, "H5Pclose");
@@ -179,14 +277,14 @@ static void test_sohm_fcpl(void)
*/
ret = H5Fclose(fid);
CHECK_I(ret, "H5Fclose");
- fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
CHECK_I(fid, "H5Fopen");
fcpl2_id = H5Fget_create_plist(fid);
CHECK_I(ret, "H5Fcreate");
/* Verify fcpl values */
- check_fcpl_values(fcpl_id, TEST_NUM_INDEXES, test_type_flags, TEST_L2B, TEST_B2L);
+ check_fcpl_values(fcpl2_id, TEST_NUM_INDEXES, test_type_flags, test_minsizes, TEST_L2B, TEST_B2L);
/* Clean up */
ret = H5Pclose(fcpl2_id);
@@ -214,9 +312,13 @@ static void test_sohm_fcpl(void)
ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_FILL_FLAG, 15 /* JAMES */);
CHECK_I(ret, "H5Pset_shared_mesg_index");
ret = H5Pset_shared_mesg_index(fcpl_id, 2, H5O_MESG_FILL_FLAG, 15 /* JAMES */);
- VERIFY(ret, -1, "H5Pset_shared_mesg_index");
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
+ VERIFY(fid, -1, "H5Fcreate");
ret = H5Pset_shared_mesg_index(fcpl_id, 2, H5O_MESG_DTYPE_FLAG | H5O_MESG_FILL_FLAG, 15 /* JAMES */);
- VERIFY(ret, -1, "H5Pset_shared_mesg_index");
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
+ VERIFY(fid, -1, "H5Fcreate");
/* Test list/btree cutoffs. We can set these to any positive value,
* but if the list max is less than the btree min we'll get an error
@@ -228,11 +330,14 @@ static void test_sohm_fcpl(void)
/* Actually, the list max can be exactly 1 greater than the
* btree min, but no more. Also, the errors above shouldn't
- * have corrupted the fcpl.
+ * have corrupted the fcpl, although we do need to reset the
+ * second index that we changed above.
*/
+ ret = H5Pset_shared_mesg_index(fcpl_id, 2, test_type_flags[1], 15 /* JAMES */);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 10, 11);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
- fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(fid, "H5Fcreate");
/* Clean up */
@@ -242,10 +347,2091 @@ static void test_sohm_fcpl(void)
CHECK_I(ret, "H5Fclose");
}
+
+/*-------------------------------------------------------------------------
+ * Function: make_dtype_1
+ *
+ * Purpose: Creates a complicated datatype for use in testing
+ * shared object header messages. The important thing is that
+ * the datatypes must take a lot of space to store on disk.
+ *
+ * Return: Success: datatype ID (should be closed by calling function)
+ * Failure: negative
+ *
+ * Programmer: James Laird
+ * Saturday, August 26, 2006
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+hid_t
+make_dtype_1()
+{
+ hid_t dtype1_id = -1;
+ hid_t str_id = -1;
+
+ /* Create compound datatype. If the user asked for it, check hash value at each step */
+ if((dtype1_id = H5Tcreate( H5T_COMPOUND, sizeof(struct dtype1_struct)))<0) TEST_ERROR
+
+ if(H5Tinsert(dtype1_id,"i1",HOFFSET(struct dtype1_struct,i1),H5T_NATIVE_INT)<0) TEST_ERROR
+
+ str_id = H5Tcopy(H5T_C_S1);
+ if(H5Tset_size(str_id,10)<0) TEST_ERROR
+
+ if(H5Tinsert(dtype1_id,"vl_string",HOFFSET(dtype1_struct,str),str_id)<0) TEST_ERROR
+ if(H5Tinsert(dtype1_id,"i2",HOFFSET(struct dtype1_struct,i2),H5T_NATIVE_INT)<0) TEST_ERROR
+ if(H5Tinsert(dtype1_id,"i3",HOFFSET(struct dtype1_struct,i3),H5T_NATIVE_INT)<0) TEST_ERROR
+ if(H5Tinsert(dtype1_id,"i4",HOFFSET(struct dtype1_struct,i4),H5T_NATIVE_INT)<0) TEST_ERROR
+ if(H5Tinsert(dtype1_id,"i5",HOFFSET(struct dtype1_struct,i5),H5T_NATIVE_INT)<0) TEST_ERROR
+ if(H5Tinsert(dtype1_id,"i6",HOFFSET(struct dtype1_struct,i6),H5T_NATIVE_INT)<0) TEST_ERROR
+ if(H5Tinsert(dtype1_id,"i7",HOFFSET(struct dtype1_struct,i7),H5T_NATIVE_INT)<0) TEST_ERROR
+ if(H5Tinsert(dtype1_id,"i8",HOFFSET(struct dtype1_struct,i8),H5T_NATIVE_INT)<0) TEST_ERROR
+ if(H5Tinsert(dtype1_id,"f1",HOFFSET(struct dtype1_struct,f1),H5T_NATIVE_FLOAT)<0) TEST_ERROR
+
+ if(H5Tclose(str_id) < 0) TEST_ERROR
+
+ return dtype1_id;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Tclose(str_id);
+ H5Tclose(dtype1_id);
+ } H5E_END_TRY
+ return -1;
+}
+
+/*-------------------------------------------------------------------------
+ * Function: make_dtype_2
+ *
+ * Purpose: Creates complicated datatypes for use in testing
+ * shared object header messages. The important thing is that
+ * the datatypes must take a lot of space to store on disk.
+ *
+ * If record_hash is true, uses fid to record hash values
+ * of the intermediate datatypes in the global hash history
+ * table. Otherwise, fid is ignored.
+ *
+ * Return: Success: datatype ID (should be closed by calling function)
+ * Failure: negative
+ *
+ * Programmer: James Laird
+ * Saturday, August 26, 2006
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+hid_t
+make_dtype_2()
+{
+ hid_t dtype2_id = -1;
+ hid_t enum_id= -1;
+ hid_t int_id=-1;
+ int x;
+ hsize_t dims[] = {2, 1, 2, 4};
+ size_t size;
+
+ /* Create an int with a strange precision */
+ if((int_id = H5Tcopy(H5T_NATIVE_INT)) < 0) TEST_ERROR
+ if(H5Tset_precision(int_id, 24) < 0) TEST_ERROR
+
+ /* Create an enumeration using that int */
+ if((enum_id = H5Tenum_create(int_id)) < 0) TEST_ERROR
+
+ for(x=0; x<ENUM_NUM_MEMBS; x++)
+ {
+ if(H5Tenum_insert(enum_id, ENUM_NAME[x], &ENUM_VAL[x]) < 0) TEST_ERROR
+ }
+
+ /* Create arrays of arrays of arrays of enums */
+ if((dtype2_id = H5Tarray_create(enum_id, 3, dims, NULL)) < 0) TEST_ERROR
+ if((dtype2_id = H5Tarray_create(dtype2_id, 4, dims, NULL)) < 0) TEST_ERROR
+ if((dtype2_id = H5Tarray_create(dtype2_id, 2, dims, NULL)) < 0) TEST_ERROR
+ if((dtype2_id = H5Tarray_create(dtype2_id, 1, dims, NULL)) < 0) TEST_ERROR
+
+ if(H5Tclose(enum_id) < 0) TEST_ERROR
+ if(H5Tclose(int_id) < 0) TEST_ERROR
+
+ /* Check the datatype size. If this is different than the #defined
+ * size then the fills values will have the wrong size.
+ */
+ size = H5Tget_size(dtype2_id);
+ if(size != DTYPE2_SIZE) TEST_ERROR
+
+ return dtype2_id;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Tclose(dtype2_id);
+ H5Tclose(enum_id);
+ H5Tclose(int_id);
+ } H5E_END_TRY
+ return -1;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: close_reopen_file
+ *
+ * Purpose: Closes a file and then reopens it. Used to ensure that
+ * SOHMs are written to and read from disk
+ *
+ * Return: Success: new hid_t for the file
+ * Failure: Negative
+ *
+ * Programmer: James Laird
+ * Wednesday, October 4, 2006
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+hid_t
+close_reopen_file(hid_t file, const char* filename)
+{
+ if(H5Fclose(file) < 0) goto error;
+ return H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
+
+error:
+ return -1;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: size1_helper
+ *
+ * Purpose: Creates object headers that use a large datatype message.
+ *
+ * Used in test_sohm_basic. Should close the file ID passed in.
+ * Set test_file_closing to 1 to add file closing and reopening
+ * whenever possible (to test that SOHMs are written correctly
+ * on disk and not just in memory).
+ *
+ * Return: Success: file ID (may not be the same one passed in)
+ * Failure: Negative
+ *
+ * Programmer: James Laird
+ * Monday, April 10, 2006
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+size1_helper(hid_t file, char* filename, int test_file_closing)
+{
+ dtype1_struct wdata = {11, "string", 22, 33, 44, 55, 66, 77, 88, 0.0};
+ dtype1_struct rdata;
+ hid_t dtype1_id, dup_tid, type_id;
+ hid_t space_id;
+ hid_t dset_id;
+ hsize_t dim1[1];
+ int x;
+
+ /* Intialize rdata */
+ strcpy(rdata.str, "\0");
+
+ if((dtype1_id = make_dtype_1()) < 0) TEST_ERROR
+
+ /* Create the dataspace and dataset */
+ dim1[0] = 1;
+ if((space_id=H5Screate_simple(1,dim1,NULL))<0) TEST_ERROR
+
+ if((dset_id = H5Dcreate(file,DSETNAME[0],dtype1_id,space_id,H5P_DEFAULT))<0) TEST_ERROR
+
+ /* Test writing and reading */
+ if(H5Dwrite(dset_id,dtype1_id,H5S_ALL,H5S_ALL,H5P_DEFAULT,&wdata)<0) TEST_ERROR
+
+ if(H5Dread(dset_id,dtype1_id,H5S_ALL,H5S_ALL,H5P_DEFAULT,&rdata)<0) TEST_ERROR
+
+ if(rdata.i1!=wdata.i1 || rdata.i2!=wdata.i2 || HDstrcmp(rdata.str, wdata.str)) {
+ H5_FAILED(); AT();
+ printf("incorrect read data\n");
+ goto error;
+ } /* end if */
+ if(H5Dclose(dset_id)<0) TEST_ERROR
+
+ /* Close and re-open the file if requested*/
+ if(test_file_closing) {
+ if((file = close_reopen_file(file, filename)) < 0) TEST_ERROR
+ }
+
+ /* Create more datasets with the same datatype */
+ if((dset_id = H5Dcreate(file,DSETNAME[1],dtype1_id,space_id,H5P_DEFAULT))<0) TEST_ERROR
+ if(H5Dclose(dset_id)<0) TEST_ERROR
+
+ /* Close and re-open the file if requested*/
+ if(test_file_closing) {
+ if((file = close_reopen_file(file, filename)) < 0) TEST_ERROR
+ }
+
+ if((dset_id = H5Dcreate(file,DSETNAME[2],dtype1_id,space_id,H5P_DEFAULT))<0) TEST_ERROR
+ if(H5Dclose(dset_id)<0) TEST_ERROR
+
+ /* Close and re-open the file if requested*/
+ if(test_file_closing) {
+ if((file = close_reopen_file(file, filename)) < 0) TEST_ERROR
+ }
+
+ if((dset_id = H5Dcreate(file,DSETNAME[3],dtype1_id,space_id,H5P_DEFAULT))<0) TEST_ERROR
+
+ /* Write data to dataset 3 for later */
+ if(H5Dwrite(dset_id,dtype1_id,H5S_ALL,H5S_ALL,H5P_DEFAULT,&wdata)<0) TEST_ERROR
+
+ if(H5Dclose(dset_id)<0) TEST_ERROR
+ if(H5Tclose(dtype1_id)<0) TEST_ERROR
+
+ /* Close and re-open the file if requested*/
+ if(test_file_closing) {
+ if((file = close_reopen_file(file, filename)) < 0) TEST_ERROR
+ }
+
+ /* Make sure the data has been written successfully */
+ if((dset_id = H5Dopen(file, DSETNAME[0]))<0) TEST_ERROR
+
+ if((dtype1_id = H5Dget_type(dset_id))<0) TEST_ERROR
+
+ if((dup_tid = H5Tcopy(dtype1_id))<0) TEST_ERROR
+
+ rdata.i1 = rdata.i2 = 0;
+ strcpy(rdata.str, "\0");
+
+ /* Read data back again */
+ if(H5Dread(dset_id,dup_tid,H5S_ALL,H5S_ALL,H5P_DEFAULT,&rdata)<0) {
+ H5_FAILED(); AT();
+ printf("Can't read data\n");
+ goto error;
+ } /* end if */
+
+ if(rdata.i1!=wdata.i1 || rdata.i2!=wdata.i2 || strcmp(rdata.str, wdata.str)) {
+ H5_FAILED(); AT();
+ printf("incorrect read data\n");
+ goto error;
+ } /* end if */
+
+ if(H5Dclose(dset_id)<0) TEST_ERROR
+ if(H5Tclose(dup_tid)<0) TEST_ERROR
+
+ /* Create several copies of the dataset (this increases the amount of space saved by sharing the datatype message) */
+ for(x=0; x<SOHM_HELPER_NUM_EX_DSETS; x++) {
+ if((type_id = H5Tcopy(dtype1_id)) < 0) TEST_ERROR
+ if((dset_id = H5Dcreate(file,EXTRA_DSETNAME[x],type_id,space_id,H5P_DEFAULT)) < 0) TEST_ERROR
+
+ if(H5Tclose(type_id)<0) TEST_ERROR
+ if(H5Dclose(dset_id)<0) TEST_ERROR
+ /* Close and re-open the file if requested*/
+ if(test_file_closing) {
+ if((file = close_reopen_file(file, filename)) < 0) TEST_ERROR
+ }
+ }
+
+ if(H5Tclose(dtype1_id)<0) TEST_ERROR
+ if(H5Sclose(space_id)<0) TEST_ERROR
+
+ /* Ensure that we can still read data back from dataset 3 */
+ if((dset_id = H5Dopen(file, DSETNAME[3]))<0) TEST_ERROR
+
+ if((dtype1_id = H5Dget_type(dset_id))<0) TEST_ERROR
+
+ if((dup_tid = H5Tcopy(dtype1_id))<0) TEST_ERROR
+
+ rdata.i1 = rdata.i2 = 0;
+
+ /* Read data back again */
+ if(H5Dread(dset_id,dup_tid,H5S_ALL,H5S_ALL,H5P_DEFAULT,&rdata)<0) {
+ H5_FAILED(); AT();
+ printf("Can't read data\n");
+ goto error;
+ } /* end if */
+
+ if(rdata.i1!=wdata.i1 || rdata.i2!=wdata.i2 || strcmp(rdata.str, wdata.str)) {
+ H5_FAILED(); AT();
+ printf("incorrect read data\n");
+ goto error;
+ } /* end if */
+
+ if(H5Dclose(dset_id)<0) TEST_ERROR
+ if(H5Tclose(dtype1_id)<0) TEST_ERROR
+ if(H5Tclose(dup_tid)<0) TEST_ERROR
+ return file;
+
+ error:
+ H5E_BEGIN_TRY {
+ H5Tclose(dtype1_id);
+ H5Tclose(type_id);
+ H5Tclose(dup_tid);
+ H5Dclose(dset_id);
+ H5Fclose(file);
+ } H5E_END_TRY
+ return -1;
+}
+
+/*-------------------------------------------------------------------------
+ * Function: test_sohm_size1
+ *
+ * Purpose: Tests shared object header messages with a large datatype
+ *
+ * Programmer: James Laird
+ * Monday, April 10, 2006
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void test_sohm_size1(void)
+{
+ hid_t file = -1;
+ hid_t fcpl_id = -1;
+ hsize_t norm_oh_size;
+ hsize_t sohm_oh_size;
+ hsize_t sohm_btree_oh_size;
+ h5_stat_size_t norm_empty_filesize;
+ h5_stat_size_t sohm_empty_filesize;
+ h5_stat_size_t sohm_btree_empty_filesize;
+ h5_stat_size_t norm_final_filesize;
+ h5_stat_size_t sohm_final_filesize;
+ h5_stat_size_t sohm_btree_final_filesize;
+ h5_stat_size_t norm_final_filesize2;
+ h5_stat_size_t sohm_final_filesize2;
+ h5_stat_size_t sohm_btree_final_filesize2;
+ H5G_stat_t statbuf;
+ unsigned num_indexes = 1;
+ unsigned index_flags = H5O_MESG_DTYPE_FLAG;
+ unsigned min_mesg_size = 50;
+ unsigned list_max = 11;
+ unsigned btree_min = 10;
+ herr_t ret;
+
+ MESSAGE(5, ("Testing that shared datatypes save space\n"));
+
+
+ /* Create a file with SOHMs disabled and get its size */
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ CHECK_I(fcpl_id, "H5Pcreate");
+
+ ret = H5Pset_shared_mesg_nindexes(fcpl_id, 0);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
+ CHECK_I(file, "H5Fcreate");
+
+ ret = H5Fclose(file);
+ CHECK_I(ret, "H5Fclose");
+
+ /* Get the file size */
+ norm_empty_filesize = h5_get_file_size(FILENAME);
+
+ /* Add a bunch of large datatypes to the file */
+ file = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK_I(file, "H5Fopen");
+ file = size1_helper(file, FILENAME, 0);
+
+ /* Get the size of a dataset object header */
+ ret = H5Gget_objinfo(file, DSETNAME[0], 0, &statbuf);
+ CHECK_I(ret, "H5Gget_objinfo");
+ ret = H5Fclose(file);
+ CHECK_I(ret, "H5Fclose");
+ norm_oh_size = statbuf.ohdr.size;
+
+ /* Get the new file size */
+ norm_final_filesize = h5_get_file_size(FILENAME);
+
+ /* Use the same property list to create a new file. */
+ file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
+ CHECK_I(file, "H5Fcreate");
+
+ ret = H5Pclose(fcpl_id);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Add the same large datatypes, but keep closing and re-opening the file */
+ file = size1_helper(file, FILENAME, 1);
+ ret = H5Fclose(file);
+ CHECK_I(ret, "H5Fclose");
+
+ /* Get the file size */
+ norm_final_filesize2 = h5_get_file_size(FILENAME);
+
+
+
+ /* Now do the same thing for a file with SOHMs enabled */
+ /* Create FCPL with SOHMs enabled */
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ CHECK_I(fcpl_id, "H5Pcreate");
+
+ /* Tests one index holding only datatype messages */
+ ret = H5Pset_shared_mesg_nindexes(fcpl_id, num_indexes);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, index_flags, min_mesg_size);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ ret = H5Pset_shared_mesg_phase_change(fcpl_id, list_max, btree_min);
+ CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
+
+ /* Create a file */
+ file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
+ CHECK_I(file, "H5Fcreate");
+
+ ret = H5Fclose(file);
+ CHECK_I(ret, "H5Fclose");
+
+ sohm_empty_filesize = h5_get_file_size(FILENAME);
+
+ /* Add a bunch of datatypes to this file */
+ file = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK_I(file, "H5Fopen");
+ file = size1_helper(file, FILENAME, 0);
+
+ /* Get the size of a dataset object header */
+ ret = H5Gget_objinfo(file, DSETNAME[0], 0, &statbuf);
+ CHECK_I(ret, "H5Gget_objinfo");
+ ret = H5Fclose(file);
+ CHECK_I(ret, "H5Fclose");
+ sohm_oh_size = statbuf.ohdr.size;
+
+ /* Get the new file size */
+ sohm_final_filesize = h5_get_file_size(FILENAME);
+
+ /* Use the same property list to create a new file. */
+ file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
+ CHECK_I(file, "H5Fcreate");
+
+ ret = H5Pclose(fcpl_id);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Add the same large datatypes, but keep closing and re-opening the file */
+ file = size1_helper(file, FILENAME, 1);
+ ret = H5Fclose(file);
+ CHECK_I(ret, "H5Fclose");
+
+ /* Get the file size */
+ sohm_final_filesize2 = h5_get_file_size(FILENAME);
+
+
+
+ /* Create FCPL with SOHMs enabled that uses a B-tree index */
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ CHECK_I(fcpl_id, "H5Pcreate");
+
+ /* Tests one index holding only datatype messages */
+ ret = H5Pset_shared_mesg_nindexes(fcpl_id, num_indexes);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, index_flags, min_mesg_size);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
+ CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
+
+ /* Create a file */
+ file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
+ CHECK_I(file, "H5Fcreate");
+
+ ret = H5Fclose(file);
+ CHECK_I(ret, "H5Fclose");
+
+ sohm_btree_empty_filesize = h5_get_file_size(FILENAME);
+
+ /* Add a bunch of datatypes to this file */
+ file = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK_I(file, "H5Fopen");
+ file = size1_helper(file, FILENAME, 0);
+
+ /* Get the size of a dataset object header */
+ ret = H5Gget_objinfo(file, DSETNAME[0], 0, &statbuf);
+ CHECK_I(ret, "H5Gget_objinfo");
+ ret = H5Fclose(file);
+ CHECK_I(ret, "H5Fclose");
+ sohm_btree_oh_size = statbuf.ohdr.size;
+
+ /* Get the new file size */
+ sohm_btree_final_filesize = h5_get_file_size(FILENAME);
+
+ /* Use the same property list to create a new file. */
+ file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
+ CHECK_I(file, "H5Fcreate");
+
+ ret = H5Pclose(fcpl_id);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Add the same large datatypes, but keep closing and re-opening the file */
+ file = size1_helper(file, FILENAME, 1);
+ ret = H5Fclose(file);
+ CHECK_I(ret, "H5Fclose");
+
+ /* Get the file size */
+ sohm_btree_final_filesize2 = h5_get_file_size(FILENAME);
+
+
+
+ /* Check that all sizes make sense */
+ /* Object headers in SOHM files should be smaller than normal object
+ * headers. How the SOHM messages are stored shouldn't affect the
+ * size of the object header.
+ */
+ if(sohm_oh_size >= norm_oh_size)
+ VERIFY(sohm_oh_size, 1, "H5Fclose");
+ if(sohm_oh_size != sohm_btree_oh_size)
+ VERIFY(sohm_btree_oh_size, 1, "H5Fclose");
+
+ /* Both sohm files should be bigger than a normal file when empty.
+ * It's hard to say whether a B-tree with no nodes allocated should be
+ * smaller than a list with SOHM_HELPER_NUM_DTYPES elements.
+ * JAMES: The sizes here shouldn't really be 1
+ */
+ if(sohm_empty_filesize <= norm_empty_filesize)
+ VERIFY(sohm_empty_filesize, 1, "H5Fclose");
+
+ if(sohm_btree_empty_filesize <= norm_empty_filesize)
+ VERIFY(sohm_btree_empty_filesize, 1, "H5Fclose");
+
+ /* When full, the sohm btree file should be smaller than the normal file.
+ * The sohm list file should be at least as small, since it doesn't need the
+ * overhead of a B-tree.
+ */
+ if(sohm_btree_final_filesize >= norm_final_filesize)
+ VERIFY(sohm_btree_final_filesize, 1, "H5Fclose");
+ if(sohm_final_filesize > sohm_btree_final_filesize)
+ VERIFY(sohm_final_filesize, 1, "H5Fclose");
+
+ /* This shouldn't change even if we open and close the file */
+ if(sohm_btree_final_filesize2 >= norm_final_filesize2)
+ VERIFY(sohm_btree_final_filesize2, 1, "H5Fclose");
+ if(sohm_final_filesize2 > sohm_btree_final_filesize2)
+ VERIFY(sohm_final_filesize2, 1, "H5Fclose");
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: sohm_attr_helper
+ *
+ * Purpose: Given an fcpl, tests creating attributes with and without
+ * committed datatypes.
+ *
+ * Programmer: James Laird
+ * Thursday, November 30, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static void sohm_attr_helper(hid_t fcpl_id)
+{
+ hid_t file_id;
+ hid_t type_id;
+ hid_t space_id;
+ hid_t group_id;
+ hid_t attr_id;
+ hsize_t dims = 2;
+ int wdata[2] = {7, 42};
+ int rdata[2];
+ herr_t ret;
+ hsize_t x;
+
+ /* Create a file using the fcpl */
+ file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
+ CHECK_I(file_id, "H5Fcreate");
+
+ /* Create a normal datatype and dataset */
+ type_id = H5Tcopy(H5T_NATIVE_INT);
+ CHECK_I(type_id, "H5Tcopy");
+ space_id = H5Screate_simple(1, &dims, &dims);
+ CHECK_I(space_id, "H5Screate_simple");
+
+ /* Create and verify an attribute on a group */
+ group_id = H5Gcreate(file_id, "group", 100);
+ CHECK_I(group_id, "H5Gcreate");
+ attr_id = H5Acreate(group_id, "attribute", type_id, space_id, H5P_DEFAULT);
+ CHECK_I(attr_id, "H5Acreate");
+ ret = H5Awrite(attr_id, H5T_NATIVE_INT, wdata);
+ CHECK_I(ret, "H5Awrite");
+
+ /* Close the datatype and group */
+ ret = H5Tclose(type_id);
+ CHECK_I(ret, "H5Tclose");
+ ret = H5Gclose(group_id);
+ CHECK_I(ret, "H5Gclose");
+
+ /* Flush the file to force data to be written */
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ CHECK_I(ret, "H5Fflush");
+
+ /* Verify */
+ memset(rdata, 0, sizeof(rdata));
+ ret = H5Aread(attr_id, H5T_NATIVE_INT, rdata);
+ CHECK_I(ret, "H5Aread");
+ for(x=0; x<dims; ++x) {
+ VERIFY(rdata[x], wdata[x], "H5Aread");
+ }
+
+ /* Cleanup */
+ ret = H5Aclose(attr_id);
+ CHECK_I(ret, "H5Aclose");
+
+ /* Repeat with a committed datatype */
+ type_id = H5Tcopy(H5T_NATIVE_INT);
+ CHECK_I(type_id, "H5Tcopy");
+ ret = H5Tcommit(file_id, "datatype", type_id);
+ CHECK_I(ret, "H5Tcommit");
+
+ /* Create and verify an attribute */
+ group_id = H5Gcreate(file_id, "another_group", 100);
+ CHECK_I(group_id, "H5Gcreate");
+ attr_id = H5Acreate(group_id, "attribute", type_id, space_id, H5P_DEFAULT);
+ CHECK_I(attr_id, "H5Acreate");
+ ret = H5Awrite(attr_id, H5T_NATIVE_INT, wdata);
+ CHECK_I(ret, "H5Awrite");
+
+ /* Close the datatype and group */
+ ret = H5Tclose(type_id);
+ CHECK_I(ret, "H5Tclose");
+ ret = H5Gclose(group_id);
+ CHECK_I(ret, "H5Gclose");
+
+ /* Flush the file to force data to be written */
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ CHECK_I(ret, "H5Fflush");
+
+ /* Verify */
+ memset(rdata, 0, sizeof(rdata));
+ ret = H5Aread(attr_id, H5T_NATIVE_INT, rdata);
+ CHECK_I(ret, "H5Aread");
+ for(x=0; x<dims; ++x) {
+ VERIFY(rdata[x], wdata[x], "H5Aread");
+ }
+
+ /* Cleanup */
+ ret = H5Aclose(attr_id);
+ CHECK_I(ret, "H5Aclose");
+ ret = H5Sclose(space_id);
+ CHECK_I(ret, "H5Sclose");
+ ret = H5Fclose(file_id);
+ CHECK_I(ret, "H5Fclose");
+}
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_sohm_attrs
+ *
+ * Purpose: Attributes can be shared and can also contain shared
+ * datatype and dataspace messages. Committed datatypes
+ * shouldn't be shared.
+ *
+ * Test permutations of this.
+ *
+ * Programmer: James Laird
+ * Thursday, November 30, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static void test_sohm_attrs()
+{
+ hid_t fcpl_id;
+ herr_t ret;
+
+ MESSAGE(5, ("Testing that shared messages work with attributes\n"));
+
+ /* Create an fcpl with no shared messages */
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ CHECK_I(fcpl_id, "H5Pcreate");
+ ret = H5Pset_shared_mesg_nindexes(fcpl_id, 0);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ /* Make sure attributes can be read with these settings (they'd better!) */
+ sohm_attr_helper(fcpl_id);
+
+
+ /* Run tests with only one kind of message to be shared */
+ ret = H5Pset_shared_mesg_nindexes(fcpl_id, 1);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_ATTR_FLAG, 2);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ /* Verify */
+ sohm_attr_helper(fcpl_id);
+
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_SDSPACE_FLAG, 2);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ sohm_attr_helper(fcpl_id);
+
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_DTYPE_FLAG, 2);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ sohm_attr_helper(fcpl_id);
+
+
+ /* Run with any two types shared */
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_SDSPACE_FLAG | H5O_MESG_DTYPE_FLAG, 2);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ sohm_attr_helper(fcpl_id);
+
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_ATTR_FLAG | H5O_MESG_DTYPE_FLAG, 2);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ sohm_attr_helper(fcpl_id);
+
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_SDSPACE_FLAG | H5O_MESG_ATTR_FLAG, 2);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ sohm_attr_helper(fcpl_id);
+
+
+ /* Run test with all three kinds of message shared */
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_SDSPACE_FLAG | H5O_MESG_DTYPE_FLAG | H5O_MESG_ATTR_FLAG, 2);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ sohm_attr_helper(fcpl_id);
+
+
+ /* Try using two indexes */
+ ret = H5Pset_shared_mesg_nindexes(fcpl_id, 2);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_ATTR_FLAG | H5O_MESG_DTYPE_FLAG, 2);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(fcpl_id, 2, H5O_MESG_SDSPACE_FLAG, 2);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ sohm_attr_helper(fcpl_id);
+
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_DTYPE_FLAG, 2);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ sohm_attr_helper(fcpl_id);
+
+ ret = H5Pset_shared_mesg_index(fcpl_id, 2, H5O_MESG_ATTR_FLAG, 2);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ sohm_attr_helper(fcpl_id);
+
+
+ /* One index for each kind of message */
+ ret = H5Pset_shared_mesg_nindexes(fcpl_id, 3);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(fcpl_id, 3, H5O_MESG_SDSPACE_FLAG, 2);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ sohm_attr_helper(fcpl_id);
+
+
+ /* Close the FCPL */
+ ret = H5Pclose(fcpl_id);
+ CHECK_I(ret, "H5Pclose");
+}
+
+/*-------------------------------------------------------------------------
+ * Function: size2_verify_plist1
+ *
+ * Purpose: Verify that the property list passed in is in fact the
+ * same property list used as dcpl1_id in the size2 helper
+ * function. This ensures that the filters can be read.
+ *
+ * Programmer: James Laird
+ * Wednesday, November 22, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static void size2_verify_plist1(hid_t plist)
+{
+ size_t cd_nelmts;
+ unsigned int cd_value;
+ char name[NAME_BUF_SIZE];
+ H5Z_filter_t filter;
+ hid_t dtype1_id;
+ dtype1_struct fill1;
+ dtype1_struct fill1_correct;
+ herr_t ret;
+
+ /* Hardcoded to correspond to dcpl1_id created in size2_helper */
+ /* Check filters */
+ cd_nelmts = 1;
+ filter = H5Pget_filter(plist, 0, NULL, &cd_nelmts, &cd_value, NAME_BUF_SIZE, name, NULL);
+ CHECK_I(filter, "H5Pget_filter");
+ VERIFY(filter, H5Z_FILTER_SHUFFLE, "H5Pget_filter");
+
+ cd_nelmts = 1;
+ filter = H5Pget_filter(plist, 1, NULL, &cd_nelmts, &cd_value, NAME_BUF_SIZE, name, NULL);
+ CHECK_I(filter, "H5Pget_filter");
+ VERIFY(filter, H5Z_FILTER_DEFLATE, "H5Pget_filter");
+ VERIFY(cd_value, 1, "H5Pget_filter");
+
+ cd_nelmts = 1;
+ filter = H5Pget_filter(plist, 2, NULL, &cd_nelmts, &cd_value, NAME_BUF_SIZE, name, NULL);
+ CHECK_I(filter, "H5Pget_filter");
+ VERIFY(filter, H5Z_FILTER_SHUFFLE, "H5Pget_filter");
+
+ cd_nelmts = 1;
+ filter = H5Pget_filter(plist, 3, NULL, &cd_nelmts, &cd_value, NAME_BUF_SIZE, name, NULL);
+ CHECK_I(filter, "H5Pget_filter");
+ VERIFY(filter, H5Z_FILTER_FLETCHER32, "H5Pget_filter");
+
+
+ /* Check fill value */
+ dtype1_id=make_dtype_1();
+ CHECK_I(dtype1_id, "make_dtype_1");
+ memset(&fill1_correct, '1', sizeof(fill1_correct));
+
+ ret = H5Pget_fill_value(plist, dtype1_id, &fill1);
+ CHECK_I(ret, "H5Pget_fill_value");
+
+ ret = memcmp(&fill1, &fill1_correct, sizeof(fill1_correct));
+ VERIFY(ret, 0, memcmp);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: size2_verify_plist2
+ *
+ * Purpose: Verify that the property list passed in is in fact the
+ * same property list used as dcpl2_id in the size2 helper
+ * function. This ensures that the filters can be read.
+ *
+ * Programmer: James Laird
+ * Wednesday, November 22, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static void size2_verify_plist2(hid_t plist)
+{
+ size_t cd_nelmts;
+ unsigned int cd_value;
+ char name[NAME_BUF_SIZE];
+ H5Z_filter_t filter;
+ hid_t dtype2_id;
+ char fill2[DTYPE2_SIZE];
+ char fill2_correct[DTYPE2_SIZE];
+ herr_t ret;
+
+ /* Hardcoded to correspond to dcpl1_id created in size2_helper */
+ /* Check filters */
+ cd_nelmts = 1;
+ filter = H5Pget_filter(plist, 0, NULL, &cd_nelmts, &cd_value, NAME_BUF_SIZE, name, NULL);
+ CHECK_I(filter, "H5Pget_filter");
+ VERIFY(filter, H5Z_FILTER_DEFLATE, "H5Pget_filter");
+ VERIFY(cd_value, 1, "H5Pget_filter");
+
+ cd_nelmts = 1;
+ filter = H5Pget_filter(plist, 1, NULL, &cd_nelmts, &cd_value, NAME_BUF_SIZE, name, NULL);
+ CHECK_I(filter, "H5Pget_filter");
+ VERIFY(filter, H5Z_FILTER_DEFLATE, "H5Pget_filter");
+ VERIFY(cd_value, 2, "H5Pget_filter");
+
+ cd_nelmts = 1;
+ filter = H5Pget_filter(plist, 2, NULL, &cd_nelmts, &cd_value, NAME_BUF_SIZE, name, NULL);
+ CHECK_I(filter, "H5Pget_filter");
+ VERIFY(filter, H5Z_FILTER_DEFLATE, "H5Pget_filter");
+ VERIFY(cd_value, 2, "H5Pget_filter");
+
+ cd_nelmts = 1;
+ filter = H5Pget_filter(plist, 3, NULL, &cd_nelmts, &cd_value, NAME_BUF_SIZE, name, NULL);
+ CHECK_I(filter, "H5Pget_filter");
+ VERIFY(filter, H5Z_FILTER_DEFLATE, "H5Pget_filter");
+ VERIFY(cd_value, 1, "H5Pget_filter");
+
+ cd_nelmts = 1;
+ filter = H5Pget_filter(plist, 4, NULL, &cd_nelmts, &cd_value, NAME_BUF_SIZE, name, NULL);
+ CHECK_I(filter, "H5Pget_filter");
+ VERIFY(filter, H5Z_FILTER_DEFLATE, "H5Pget_filter");
+ VERIFY(cd_value, 5, "H5Pget_filter");
+
+
+ /* Check fill value */
+ dtype2_id=make_dtype_2();
+ CHECK_I(dtype2_id, "make_dtype_2");
+ memset(&fill2_correct, '2', DTYPE2_SIZE);
+
+ ret = H5Pget_fill_value(plist, dtype2_id, &fill2);
+ CHECK_I(ret, "H5Pget_fill_value");
+
+ ret = memcmp(&fill2, &fill2_correct, DTYPE2_SIZE);
+ VERIFY(ret, 0, memcmp);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: size2_helper
+ *
+ * Purpose: A helper functon for test_sohm_size2.
+ *
+ * Creates a file using the given fcpl, then creates lots
+ * of different kinds of messages within the file and
+ * returns the size of the file for comparison.
+ *
+ * If test_file_closing is not zero, closes and re-opens
+ * the file after every write.
+ *
+ * Doesn't close the property list. Prints an error message
+ * if there's a failure, but doesn't alter its return value.
+ *
+ * Programmer: James Laird
+ * Friday, November 17, 2006
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static size2_helper_struct size2_helper(hid_t fcpl_id, int test_file_closing)
+{
+ hid_t file_id = -1;
+ hid_t dtype1_id=-1;
+ hid_t dtype2_id=-1;
+ hid_t dspace1_id=-1;
+ hid_t dspace2_id=-1;
+ hid_t dcpl1_id=-1;
+ hid_t dcpl2_id=-1;
+ hid_t dset_id=-1;
+ hid_t attr_type_id=-1;
+ hid_t attr_space_id=-1;
+ hid_t attr_id=-1;
+ hid_t group_id=-1;
+ size2_helper_struct ret_val; /* We'll fill in this struct as we go */
+ char attr_string1[NAME_BUF_SIZE];
+ char attr_string2[NAME_BUF_SIZE];
+ char attr_name[NAME_BUF_SIZE];
+ int x;
+ herr_t ret;
+
+ /* Constants used in this function */
+ const int rank1 = SIZE2_RANK1;
+ const int rank2 = SIZE2_RANK2;
+ const hsize_t dims[20] = SIZE2_DIMS;
+ dtype1_struct fill1;
+ char fill2[DTYPE2_SIZE];
+
+ /* Create a file and get its size */
+ /* JAMES: is fixname needed at all? */
+ file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
+ CHECK_I(file_id, "H5Fcreate");
+
+ ret = H5Fclose(file_id);
+ CHECK_I(ret, "H5Fclose");
+
+ /* Get the file size */
+ ret_val.empty_size = h5_get_file_size(FILENAME);
+
+ /* Re-open the file and set up messages to write */
+ file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK_I(file_id, "H5Fopen");
+
+ /* Create two large datatype messages */
+ dtype1_id=make_dtype_1();
+ CHECK_I(dtype1_id, "make_dtype_1");
+ dtype2_id=make_dtype_2(0, file_id);
+ CHECK_I(dtype2_id, "make_dtype_1");
+
+ /* Create some large dataspaces */
+ dspace1_id=H5Screate_simple(rank1, dims, dims);
+ CHECK_I(dspace1_id, "H5Screate_simple");
+ dspace2_id=H5Screate_simple(rank2, dims, dims);
+ CHECK_I(dspace2_id, "H5Screate_simple");
+
+ /* fill1 and fill2 are fill values for the two datatypes.
+ * We'll set them in the DCPL.
+ */
+ memset(&fill1, '1', sizeof(dtype1_struct));
+ memset(&fill2, '2', DTYPE2_SIZE);
+
+ dcpl1_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK_I(dcpl1_id, "H5Pcreate");
+ H5Pset_fill_value(dcpl1_id, dtype1_id, &fill1);
+
+ dcpl2_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK_I(dcpl2_id, "H5Pcreate");
+ H5Pset_fill_value(dcpl2_id, dtype2_id, &fill2);
+
+ /* Filter messages we'll create by setting them in a DCPL. These
+ * values don't need to make sense, they just need to take up space.
+ */
+ ret = H5Pset_chunk(dcpl1_id, rank1, dims);
+ CHECK_I(ret, "H5Pset_chunk");
+ ret = H5Pset_shuffle(dcpl1_id);
+ CHECK_I(ret, "H5Pset_shuffle");
+ ret = H5Pset_deflate(dcpl1_id, 1);
+ CHECK_I(ret, "H5Pset_deflate");
+ ret = H5Pset_shuffle(dcpl1_id);
+ CHECK_I(ret, "H5Pset_shuffle");
+ ret = H5Pset_fletcher32(dcpl1_id);
+ CHECK_I(ret, "H5Pset_fletcher32");
+ /* Make sure that this property list is what it should be */
+ size2_verify_plist1(dcpl1_id);
+
+ /* Second dcpl */
+ ret = H5Pset_chunk(dcpl2_id, rank2, dims);
+ CHECK_I(ret, "H5Pset_chunk");
+ ret = H5Pset_deflate(dcpl2_id, 1);
+ CHECK_I(ret, "H5Pset_deflate");
+ ret = H5Pset_deflate(dcpl2_id, 2);
+ CHECK_I(ret, "H5Pset_deflate");
+ ret = H5Pset_deflate(dcpl2_id, 2);
+ CHECK_I(ret, "H5Pset_deflate");
+ ret = H5Pset_deflate(dcpl2_id, 1);
+ CHECK_I(ret, "H5Pset_deflate");
+ ret = H5Pset_deflate(dcpl2_id, 5);
+ CHECK_I(ret, "H5Pset_deflate");
+ /* Make sure that this property list is what it should be */
+ size2_verify_plist2(dcpl2_id);
+
+ /* Create a dataset with a big datatype, dataspace, fill value,
+ * and filter pipeline.
+ */
+ dset_id = H5Dcreate(file_id, DSETNAME[0], dtype1_id, dspace1_id, dcpl1_id);
+ CHECK_I(dset_id, "H5Dcreate");
+
+ memset(attr_string1, 0, NAME_BUF_SIZE);
+ memset(attr_string2, 0, NAME_BUF_SIZE);
+ strcpy(attr_string1, LONG_STRING);
+ strcpy(attr_string2, LONG_STRING);
+ attr_string2[1] = '1'; /* The second string starts "01 index..." */
+
+ /* Create an attribute on this dataset with a large string value */
+ attr_type_id = H5Tcopy(H5T_C_S1);
+ CHECK_I(attr_type_id, "H5Tcopy");
+ ret = H5Tset_size(attr_type_id ,NAME_BUF_SIZE);
+ CHECK_I(ret, "H5Tset_size");
+ attr_space_id = H5Screate_simple(1, dims, dims);
+ CHECK_I(attr_space_id, "H5Screate_simple");
+
+ attr_id = H5Acreate(dset_id, "attr_name", attr_type_id, attr_space_id, H5P_DEFAULT);
+ CHECK_I(attr_id, "H5Acreate");
+ ret = H5Awrite(attr_id, attr_type_id, attr_string1);
+ CHECK_I(attr_id, "H5Awrite");
+
+ /* Close the file and everything in it. */
+ H5Aclose(attr_id);
+ CHECK_I(attr_id, "H5Aclose");
+ H5Dclose(dset_id);
+ CHECK_I(dset_id, "H5Dclose");
+ H5Fclose(file_id);
+ CHECK_I(file_id, "H5Fclose");
+
+ /* Get the file's size now */
+ ret_val.first_dset = h5_get_file_size(FILENAME);
+
+ /* Re-open the file and create the same dataset several more times. */
+ file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK_I(file_id, "H5Fopen");
+
+ for(x=1; x<NUM_DATASETS; ++x)
+ {
+ dset_id = H5Dcreate(file_id, DSETNAME[x], dtype1_id, dspace1_id, dcpl1_id);
+ CHECK_I(dset_id, "H5Dcreate");
+
+ attr_id = H5Acreate(dset_id, "attr_name", attr_type_id, attr_space_id, H5P_DEFAULT);
+ CHECK_I(attr_id, "H5Acreate");
+ ret = H5Awrite(attr_id, attr_type_id, attr_string1);
+ CHECK_I(ret, "H5Awrite");
+
+ ret = H5Dclose(dset_id);
+ CHECK_I(ret, "H5Dclose");
+ ret = H5Aclose(attr_id);
+ CHECK_I(ret, "H5Aclose");
+
+ if(test_file_closing) {
+ file_id = close_reopen_file(file_id, FILENAME);
+ CHECK_I(file_id, "H5Fopen");
+ }
+ }
+
+ /* Close file and get its size now */
+ H5Fclose(file_id);
+ CHECK_I(file_id, "H5Fclose");
+ ret_val.dsets1 = h5_get_file_size(FILENAME);
+
+
+ /* Now create a new group filled with datasets that use all different messages */
+ file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK_I(file_id, "H5Fopen");
+ group_id = H5Gcreate(file_id, "group", 0);
+ CHECK_I(group_id, "H5Gcreate");
+
+ /* Create NUM_DATASETS datasets in the new group */
+ for(x=0; x<NUM_DATASETS; ++x)
+ {
+ dset_id = H5Dcreate(group_id, DSETNAME[x], dtype2_id, dspace2_id, dcpl2_id);
+ CHECK_I(dset_id, "H5Dcreate");
+
+ attr_id = H5Acreate(dset_id, "attr_name", attr_type_id, attr_space_id, H5P_DEFAULT);
+ CHECK_I(attr_id, "H5Acreate");
+ ret = H5Awrite(attr_id, attr_type_id, attr_string2);
+ CHECK_I(ret, "H5Awrite");
+
+ ret = H5Dclose(dset_id);
+ CHECK_I(ret, "H5Dclose");
+ ret = H5Aclose(attr_id);
+ CHECK_I(ret, "H5Aclose");
+
+ if(test_file_closing) {
+ ret = H5Gclose(group_id);
+ CHECK_I(ret, "H5Gclose");
+ file_id = close_reopen_file(file_id, FILENAME);
+ CHECK_I(file_id, "H5Fopen");
+ group_id = H5Gopen(file_id, "group");
+ CHECK_I(group_id, "H5Gopen");
+ }
+ }
+
+ /* Close file and get its size now */
+ ret = H5Gclose(group_id);
+ CHECK_I(ret, "H5Gclose");
+ H5Fclose(file_id);
+ CHECK_I(file_id, "H5Fclose");
+ ret_val.dsets2 = h5_get_file_size(FILENAME);
+
+
+ /* Create a new group and interleave writes of datasets types 1 and 2. */
+ file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK_I(file_id, "H5Fopen");
+ group_id = H5Gcreate(file_id, "interleaved group", 0);
+ CHECK_I(group_id, "H5Gcreate");
+
+ /* Create NUM_DATASETS datasets in the new group */
+ for(x=0; x<NUM_DATASETS; x+=2)
+ {
+ dset_id = H5Dcreate(group_id, DSETNAME[x], dtype1_id, dspace1_id, dcpl1_id);
+ CHECK_I(dset_id, "H5Dcreate");
+
+ attr_id = H5Acreate(dset_id, "attr_name", attr_type_id, attr_space_id, H5P_DEFAULT);
+ CHECK_I(attr_id, "H5Acreate");
+ ret = H5Awrite(attr_id, attr_type_id, attr_string1);
+ CHECK_I(ret, "H5Awrite");
+
+ ret = H5Dclose(dset_id);
+ CHECK_I(ret, "H5Dclose");
+ ret = H5Aclose(attr_id);
+ CHECK_I(ret, "H5Aclose");
+
+ dset_id = H5Dcreate(group_id, DSETNAME[x+1], dtype2_id, dspace2_id, dcpl2_id);
+ CHECK_I(dset_id, "H5Dcreate");
+
+ attr_id = H5Acreate(dset_id, "attr_name", attr_type_id, attr_space_id, H5P_DEFAULT);
+ CHECK_I(attr_id, "H5Acreate");
+ ret = H5Awrite(attr_id, attr_type_id, attr_string2);
+ CHECK_I(ret, "H5Awrite");
+
+ ret = H5Dclose(dset_id);
+ CHECK_I(ret, "H5Dclose");
+ ret = H5Aclose(attr_id);
+ CHECK_I(ret, "H5Aclose");
+
+ if(test_file_closing) {
+ ret = H5Gclose(group_id);
+ CHECK_I(ret, "H5Gclose");
+ file_id = close_reopen_file(file_id, FILENAME);
+ CHECK_I(file_id, "H5Fopen");
+ group_id = H5Gopen(file_id, "interleaved group");
+ CHECK_I(group_id, "H5Gopen");
+ }
+ }
+
+ /* Close file and get its size now */
+ ret = H5Gclose(group_id);
+ CHECK_I(ret, "H5Gclose");
+ H5Fclose(file_id);
+ CHECK_I(file_id, "H5Fclose");
+ ret_val.interleaved = h5_get_file_size(FILENAME);
+
+ /* Create lots of new attribute messages on the group
+ * (using different strings for the attribute)
+ */
+ file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK_I(file_id, "H5Fopen");
+ group_id = H5Gopen(file_id, "group");
+ CHECK_I(group_id, "H5Gopen");
+
+ strcpy(attr_name, "00 index");
+
+ for(x=0; x<NUM_ATTRIBUTES; ++x)
+ {
+ /* Create a unique name and value for each attribute */
+ attr_string1[0] = attr_name[0] = (x / 10) + '0';
+ attr_string1[1] = attr_name[1] = (x % 10) + '0';
+
+ /* Create an attribute on the group */
+ attr_id = H5Acreate(group_id, attr_name, attr_type_id, attr_space_id, H5P_DEFAULT);
+ CHECK_I(attr_id, "H5Acreate");
+ ret = H5Awrite(attr_id, attr_type_id, attr_string1);
+ CHECK_I(ret, "H5Awrite");
+
+ ret = H5Aclose(attr_id);
+ CHECK_I(ret, "H5Aclose");
+
+ if(test_file_closing) {
+ ret = H5Gclose(group_id);
+ CHECK_I(ret, "H5Gclose");
+ file_id = close_reopen_file(file_id, FILENAME);
+ CHECK_I(file_id, "H5Fopen");
+ group_id = H5Gopen(file_id, "group");
+ CHECK_I(group_id, "H5Gopen");
+ }
+ }
+
+ /* Close file and get its size now */
+ ret = H5Gclose(group_id);
+ CHECK_I(ret, "H5Gclose");
+ H5Fclose(file_id);
+ CHECK_I(file_id, "H5Fclose");
+ ret_val.attrs1 = h5_get_file_size(FILENAME);
+
+
+ /* Create all of the attributes again on the other group */
+ file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK_I(file_id, "H5Fopen");
+ group_id = H5Gopen(file_id, "interleaved group");
+ CHECK_I(group_id, "H5Gopen");
+
+ for(x=0; x<NUM_ATTRIBUTES; ++x)
+ {
+ /* Create the same name and value for each attribute as before */
+ attr_string1[0] = attr_name[0] = (x / 10) + '0';
+ attr_string1[1] = attr_name[1] = (x % 10) + '0';
+
+ /* Create an attribute on the group */
+ attr_id = H5Acreate(group_id, attr_name, attr_type_id, attr_space_id, H5P_DEFAULT);
+ CHECK_I(attr_id, "H5Acreate");
+ ret = H5Awrite(attr_id, attr_type_id, attr_string1);
+ CHECK_I(ret, "H5Awrite");
+
+ ret = H5Aclose(attr_id);
+ CHECK_I(ret, "H5Aclose");
+
+ if(test_file_closing) {
+ ret = H5Gclose(group_id);
+ CHECK_I(ret, "H5Gclose");
+ file_id = close_reopen_file(file_id, FILENAME);
+ CHECK_I(file_id, "H5Fopen");
+ group_id = H5Gopen(file_id, "interleaved group");
+ CHECK_I(group_id, "H5Gopen");
+ }
+ }
+ /* Close file and get its size now */
+ ret = H5Gclose(group_id);
+ CHECK_I(ret, "H5Gclose");
+ H5Fclose(file_id);
+ CHECK_I(file_id, "H5Fclose");
+ ret_val.attrs2 = h5_get_file_size(FILENAME);
+
+
+ /* Close everything */
+ ret = H5Sclose(attr_space_id);
+ CHECK_I(ret, "H5Sclose");
+ ret = H5Tclose(attr_type_id);
+ CHECK_I(ret, "H5Sclose");
+ ret = H5Tclose(dtype1_id);
+ CHECK_I(ret, "H5Tclose");
+ ret = H5Tclose(dtype2_id);
+ CHECK_I(ret, "H5Tclose");
+ ret = H5Sclose(dspace1_id);
+ CHECK_I(ret, "H5Sclose");
+ ret = H5Sclose(dspace2_id);
+ CHECK_I(ret, "H5Sclose");
+ ret = H5Pclose(dcpl1_id);
+ CHECK_I(ret, "H5Pclose");
+ ret = H5Pclose(dcpl2_id);
+ CHECK_I(ret, "H5Pclose");
+
+ return ret_val;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: size2_verify
+ *
+ * Purpose: A helper functon to verify the file created by size2_helper.
+ *
+ * Runs various tests (not exhaustive) to ensure that the
+ * file FILENAME actually has the structure that size2_helper
+ * should have created.
+ *
+ * Programmer: James Laird
+ * Friday, November 17, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+static void size2_verify()
+{
+ hid_t file_id = -1;
+ hid_t dset_id=-1;
+ hid_t plist_id=-1;
+ hid_t space_id=-1;
+ hid_t group1_id, group2_id;
+ hid_t attr1_id, attr2_id;
+ hid_t attr_type_id;
+ int x, y;
+ herr_t ret;
+ char attr_string[NAME_BUF_SIZE];
+ char attr_correct_string[NAME_BUF_SIZE];
+ char attr_name[NAME_BUF_SIZE];
+ int ndims;
+ hsize_t dims[20];
+ hsize_t correct_dims[20] = SIZE2_DIMS;
+
+ file_id = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK_I(file_id, "H5Fopen");
+
+
+ /* Verify property lists and dataspaces */
+
+ /* Get property lists from first batch of datasets */
+ for(x=0; x<NUM_DATASETS; ++x) {
+ dset_id = H5Dopen(file_id, DSETNAME[x]);
+ CHECK_I(dset_id, "H5Dopen");
+ plist_id = H5Dget_create_plist(dset_id);
+ CHECK_I(plist_id, "H5Dget_create_plist");
+ size2_verify_plist1(plist_id);
+ ret = H5Pclose(plist_id);
+ CHECK_I(ret, "H5Pclose");
+
+ space_id = H5Dget_space(dset_id);
+ CHECK_I(space_id, "H5Dget_space");
+ ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
+ CHECK_I(ndims, "H5Sget_simple_extent_dims");
+ VERIFY(ndims, SIZE2_RANK1, "H5Sget_simple_extent_dims");
+ for(y=0; y<ndims; ++y) {
+ VERIFY(dims[y], correct_dims[y], "H5Sget_simple_extent_dims");
+ }
+ ret = H5Sclose(space_id);
+ CHECK_I(ret, "H5Sclose");
+
+ ret = H5Dclose(dset_id);
+ CHECK_I(ret, "H5Dclose");
+ }
+ /* Get property lists from second batch of datasets */
+ group1_id = H5Gopen(file_id, "group");
+ CHECK_I(group1_id, "H5Gopen");
+ for(x=0; x<NUM_DATASETS; ++x)
+ {
+ dset_id = H5Dopen(group1_id, DSETNAME[x]);
+ CHECK_I(dset_id, "H5Dopen");
+ plist_id = H5Dget_create_plist(dset_id);
+ CHECK_I(plist_id, "H5Dget_create_plist");
+ size2_verify_plist2(plist_id);
+ ret = H5Pclose(plist_id);
+ CHECK_I(ret, "H5Pclose");
+
+ space_id = H5Dget_space(dset_id);
+ CHECK_I(space_id, "H5Dget_space");
+ ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
+ CHECK_I(ndims, "H5Sget_simple_extent_dims");
+ VERIFY(ndims, SIZE2_RANK2, "H5Sget_simple_extent_dims");
+ for(y=0; y<ndims; ++y) {
+ VERIFY(dims[y], correct_dims[y], "H5Sget_simple_extent_dims");
+ }
+ ret = H5Sclose(space_id);
+ CHECK_I(ret, "H5Sclose");
+
+ ret = H5Dclose(dset_id);
+ CHECK_I(ret, "H5Dclose");
+ }
+ ret = H5Gclose(group1_id);
+ CHECK_I(ret, "H5Gclose");
+
+ /* Get property lists from interleaved group of datasets */
+ group1_id = H5Gopen(file_id, "interleaved group");
+ CHECK_I(group1_id, "H5Gopen");
+ for(x=0; x<NUM_DATASETS; x += 2) {
+ /* First "type 1" dataset */
+ dset_id = H5Dopen(group1_id, DSETNAME[x]);
+ CHECK_I(dset_id, "H5Dopen");
+ plist_id = H5Dget_create_plist(dset_id);
+ CHECK_I(plist_id, "H5Dget_create_plist");
+ size2_verify_plist1(plist_id);
+ ret = H5Pclose(plist_id);
+ CHECK_I(ret, "H5Pclose");
+
+ space_id = H5Dget_space(dset_id);
+ CHECK_I(space_id, "H5Dget_space");
+ ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
+ CHECK_I(ndims, "H5Sget_simple_extent_dims");
+ VERIFY(ndims, SIZE2_RANK1, "H5Sget_simple_extent_dims");
+ for(y=0; y<ndims; ++y) {
+ VERIFY(dims[y], correct_dims[y], "H5Sget_simple_extent_dims");
+ }
+ ret = H5Sclose(space_id);
+ CHECK_I(ret, "H5Sclose");
+
+ ret = H5Dclose(dset_id);
+ CHECK_I(ret, "H5Dclose");
+
+ /* Second "type 2" dataset */
+ dset_id = H5Dopen(group1_id, DSETNAME[x+1]);
+ CHECK_I(dset_id, "H5Dopen");
+ plist_id = H5Dget_create_plist(dset_id);
+ CHECK_I(plist_id, "H5Dget_create_plist");
+ size2_verify_plist2(plist_id);
+ ret = H5Pclose(plist_id);
+ CHECK_I(ret, "H5Pclose");
+
+ space_id = H5Dget_space(dset_id);
+ CHECK_I(space_id, "H5Dget_space");
+ ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
+ CHECK_I(ndims, "H5Sget_simple_extent_dims");
+ VERIFY(ndims, SIZE2_RANK2, "H5Sget_simple_extent_dims");
+ for(y=0; y<ndims; ++y) {
+ VERIFY(dims[y], correct_dims[y], "H5Sget_simple_extent_dims");
+ }
+ ret = H5Sclose(space_id);
+ CHECK_I(ret, "H5Sclose");
+ ret = H5Dclose(dset_id);
+ CHECK_I(ret, "H5Dclose");
+ }
+ ret = H5Gclose(group1_id);
+ CHECK_I(ret, "H5Gclose");
+
+
+ /* Verify attributes */
+
+ /* Create attribute data type */
+ attr_type_id = H5Tcopy(H5T_C_S1);
+ CHECK_I(attr_type_id, "H5Tcopy");
+ ret = H5Tset_size(attr_type_id ,NAME_BUF_SIZE);
+ CHECK_I(ret, "H5Tset_size");
+
+ /* Read attributes on both groups and verify that they are correct */
+ group1_id = H5Gopen(file_id, "group");
+ CHECK_I(group1_id, "H5Gopen");
+ group2_id = H5Gopen(file_id, "interleaved group");
+ CHECK_I(group2_id, "H5Gopen");
+
+ memset(attr_string, 0, NAME_BUF_SIZE);
+ memset(attr_correct_string, 0, NAME_BUF_SIZE);
+ strcpy(attr_correct_string, LONG_STRING);
+ strcpy(attr_name, "00 index");
+
+ for(x=0; x<NUM_ATTRIBUTES; ++x)
+ {
+ /* Create the name and correct value for each attribute */
+ attr_correct_string[0] = attr_name[0] = (x / 10) + '0';
+ attr_correct_string[1] = attr_name[1] = (x % 10) + '0';
+
+ attr1_id = H5Aopen_name(group1_id, attr_name);
+ CHECK_I(attr1_id, "H5Aopen_name");
+ attr2_id = H5Aopen_name(group2_id, attr_name);
+ CHECK_I(attr2_id, "H5Aopen_name");
+
+ ret = H5Aread(attr1_id, attr_type_id, attr_string);
+ CHECK_I(ret, "H5Aread");
+ VERIFY_STR(attr_string, attr_correct_string, "H5Aread");
+ ret = H5Aread(attr2_id, attr_type_id, attr_string);
+ CHECK_I(ret, "H5Aread");
+ VERIFY_STR(attr_string, attr_correct_string, "H5Aread");
+
+ ret = H5Aclose(attr1_id);
+ CHECK_I(attr1_id, "H5Aclose");
+ ret = H5Aclose(attr2_id);
+ CHECK_I(attr2_id, "H5Aclose");
+ }
+
+ /* Close everything */
+ ret = H5Tclose(attr_type_id);
+ CHECK_I(ret, "H5Tclose");
+ ret = H5Gclose(group1_id);
+ CHECK_I(ret, "H5Gclose");
+ ret = H5Gclose(group2_id);
+ CHECK_I(ret, "H5Gclose");
+ ret = H5Fclose(file_id);
+ CHECK_I(ret, "H5Fclose");
+}
+/*-------------------------------------------------------------------------
+ * Function: test_sohm_size2
+ *
+ * Purpose: Tests shared object header messages using size2_helper to
+ * create different kinds of big messages.
+ *
+ * If close_reopen is set, closes and reopens the HDF5 file
+ * repeatedly while writing.
+ *
+ * This test works by first creating FCPLs with various
+ * parameters, then creating a standard file that includes
+ * every kind of message that can be shared using the helper
+ * function size2_helper. The test measures the size of the
+ * file at various points. Once all of the files have been
+ * generated, the test compares the measured sizes of the files.
+ *
+ *
+ * Programmer: James Laird
+ * Friday, November 17, 2006
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void test_sohm_size2(int close_reopen)
+{
+ hid_t fcpl_id = -1;
+ /* Sizes for file with no shared messages at all */
+ size2_helper_struct norm_sizes;
+ /* Sizes for files with all messages in one index */
+ size2_helper_struct list_index_med, list_index_big;
+ size2_helper_struct btree_index, list_index_small;
+ /* Sizes for files with messages in three different indexes */
+ size2_helper_struct mult_index_med, mult_index_btree;
+ /* Sizes for files that don't share all kinds of messages */
+ size2_helper_struct share_some_med, share_some_btree;
+ /* Sizes for files that share different sizes of messages */
+ size2_helper_struct share_some_toobig_index, share_tiny_index, type_space_index;
+ herr_t ret;
+
+ if(close_reopen == 0) {
+ MESSAGE(5, ("Testing that shared object header messages save space\n"));
+ }
+ else {
+ MESSAGE(5, ("Testing that shared messages save space when file is closed and reopened\n"));
+ }
+
+ /* Create an fcpl with SOHMs disabled */
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ CHECK_I(fcpl_id, "H5Pcreate");
+
+ ret = H5Pset_shared_mesg_nindexes(fcpl_id, 0);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ /* Find out what size file this makes */
+ norm_sizes = size2_helper(fcpl_id, close_reopen);
+ /* Check that the file was created correctly */
+ size2_verify();
+
+ ret = H5Pclose(fcpl_id);
+ CHECK_I(ret, "H5Pclose");
+
+
+ /* Create an fcpl with one big index */
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ CHECK_I(fcpl_id, "H5Pcreate");
+
+ ret = H5Pset_shared_mesg_nindexes(fcpl_id, 1);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_ALL_FLAG, 20);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+
+ /* Set the indexes to use a medium-sized list */
+ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 30, 25);
+ CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
+
+ /* Find out what size file this makes */
+ list_index_med = size2_helper(fcpl_id, close_reopen);
+ /* Check that the file was created correctly */
+ size2_verify();
+
+
+ /* Try making the list really big */
+ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 1000, 900);
+ CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
+
+ /* Find out what size file this makes */
+ list_index_big = size2_helper(fcpl_id, close_reopen);
+ /* Check that the file was created correctly */
+ size2_verify();
+
+
+ /* Use a B-tree instead of a list */
+ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
+ CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
+
+ btree_index = size2_helper(fcpl_id, close_reopen);
+ /* Check that the file was created correctly */
+ size2_verify();
+
+
+ /* Use such a small list that it'll become a B-tree */
+ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 10, 0);
+ CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
+
+ list_index_small = size2_helper(fcpl_id, close_reopen);
+ /* Check that the file was created correctly */
+ size2_verify();
+
+ ret = H5Pclose(fcpl_id);
+ CHECK_I(ret, "H5Pclose");
+
+
+ /* Create a new property list that puts messages in different indexes. */
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ CHECK_I(fcpl_id, "H5Pcreate");
+
+ /* JAMES: should be zero-indexed? */
+ ret = H5Pset_shared_mesg_nindexes(fcpl_id, 3);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_SDSPACE_FLAG | H5O_MESG_DTYPE_FLAG, 20);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ ret = H5Pset_shared_mesg_index(fcpl_id, 2, H5O_MESG_FILL_FLAG | H5O_MESG_PLINE_FLAG, 20);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ ret = H5Pset_shared_mesg_index(fcpl_id, 3, H5O_MESG_ATTR_FLAG, 20);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+
+ /* Use lists that are the same size as the "medium" list on the previous
+ * run.
+ */
+ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 30, 25);
+ CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
+
+ mult_index_med = size2_helper(fcpl_id, close_reopen);
+ size2_verify();
+
+
+ /* Use all B-trees */
+ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
+ CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
+
+ mult_index_btree = size2_helper(fcpl_id, close_reopen);
+ size2_verify();
+
+
+ ret = H5Pset_shared_mesg_phase_change(fcpl_id, NUM_DATASETS, 0);
+ CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
+
+ /* Edit the same property list (this should work) and don't share all messages.
+ * Also create one index that holds no messages, to make sure this doesn't
+ * break anything.
+ */
+ ret = H5Pset_shared_mesg_nindexes(fcpl_id, 3);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_PLINE_FLAG, 20);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+/* JAMES ret = H5Pset_shared_mesg_index(fcpl_id, 2, H5O_MESG_NONE_FLAG, 20);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+*/
+ ret = H5Pset_shared_mesg_index(fcpl_id, 2, H5O_MESG_DTYPE_FLAG | H5O_MESG_FILL_FLAG, 100000);
+ ret = H5Pset_shared_mesg_index(fcpl_id, 3, H5O_MESG_ATTR_FLAG | H5O_MESG_SDSPACE_FLAG, 20);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+
+ /* Use "normal-sized" lists. */
+ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 30, 25);
+ CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
+
+ share_some_med = size2_helper(fcpl_id, close_reopen);
+ size2_verify();
+
+ /* Use btrees. */
+ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
+ CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
+
+ share_some_btree = size2_helper(fcpl_id, close_reopen);
+ size2_verify();
+
+
+ /* Change the second index to hold only gigantic messages. Result should
+ * be the same as the previous file.
+ */
+ ret = H5Pset_shared_mesg_index(fcpl_id, 2, H5O_MESG_DTYPE_FLAG | H5O_MESG_FILL_FLAG, 100000);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+
+ share_some_toobig_index = size2_helper(fcpl_id, close_reopen);
+ size2_verify();
+
+
+ /* Share even tiny dataspace and datatype messages. This should result in
+ * attribute datatypes being shared. Make this one use "really big" lists.
+ * It turns out that attribute dataspaces are just big enough that it saves
+ * some space to share them, while sharing datatypes creates as much overhead
+ * as one gains from sharing them.
+ */
+ ret = H5Pset_shared_mesg_nindexes(fcpl_id, 1);
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_DTYPE_FLAG | H5O_MESG_SDSPACE_FLAG, 1);
+ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 1000, 900);
+
+ share_tiny_index = size2_helper(fcpl_id, close_reopen);
+ size2_verify();
+
+ /* Create the same file but don't share the really tiny messages */
+ ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_DTYPE_FLAG | H5O_MESG_SDSPACE_FLAG, 100);
+ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 1000, 900);
+
+ type_space_index = size2_helper(fcpl_id, close_reopen);
+ size2_verify();
+
+ ret = H5Pclose(fcpl_id);
+ CHECK_I(ret, "H5Pclose");
+
+
+
+ /* Check that all sizes make sense. There is lots of room for inexact
+ * results here since so many different factors contribute to file size.
+ */
+
+
+ /* Check sizes of all files created using a single index first */
+
+ /* The empty size of each file with shared messages enabled should be the
+ * same and should be bigger than a normal file.
+ */
+ if(norm_sizes.empty_size > list_index_med.empty_size)
+ VERIFY(norm_sizes.empty_size, 1, "h5_get_file_size");
+ if(list_index_med.empty_size != list_index_big.empty_size)
+ VERIFY(list_index_med.empty_size, list_index_big.empty_size, "h5_get_file_size");
+ if(list_index_med.empty_size != btree_index.empty_size)
+ VERIFY(list_index_med.empty_size, btree_index.empty_size, "h5_get_file_size");
+ if(list_index_med.empty_size != list_index_small.empty_size)
+ VERIFY(list_index_med.empty_size, list_index_small.empty_size, "h5_get_file_size");
+ /* The files with indexes shouldn't be that much bigger than an
+ * empty file.
+ */
+ if(list_index_med.empty_size > norm_sizes.empty_size * OVERHEAD_ALLOWED)
+ VERIFY(0, 1, "h5_get_file_size");
+
+
+ /* Once one dataset has been created (with one of every kind of message),
+ * the normal file should still be smallest. The very small list
+ * btree_convert should be smaller than the B-tree since it has no
+ * extra overhead. The small list should also be smaller than the B-tree.
+ * The very large list should be much larger than anything else.
+ */
+ if(norm_sizes.first_dset >= list_index_small.first_dset)
+ VERIFY(norm_sizes.first_dset, 1, "h5_get_file_size");
+ if(list_index_small.first_dset >= btree_index.first_dset)
+ VERIFY(list_index_small.first_dset, 1, "h5_get_file_size");
+ if(list_index_med.first_dset >= btree_index.first_dset)
+ VERIFY(btree_index.first_dset, 1, "h5_get_file_size");
+ if(btree_index.first_dset >= list_index_big.first_dset)
+ VERIFY(list_index_med.first_dset, 1, "h5_get_file_size");
+
+
+ /* Once a few copies of the same dataset have been created, the
+ * very small list shouldn't have become a B-tree yet, so it should
+ * be the smallest file. A larger list should be next, followed
+ * by a B-tree, followed by a normal file, followed by a
+ * list that is too large.
+ */
+ if(list_index_small.dsets1 >= list_index_med.dsets1)
+ VERIFY(btree_index.dsets1, 1, "h5_get_file_size");
+ if(list_index_med.dsets1 >= btree_index.dsets1)
+ VERIFY(list_index_med.dsets1, 1, "h5_get_file_size");
+ if(btree_index.dsets1 >= norm_sizes.dsets1)
+ VERIFY(btree_index.dsets1, 1, "h5_get_file_size");
+ if(norm_sizes.dsets1 >= list_index_big.dsets1)
+ VERIFY(list_index_big.dsets1, 1, "h5_get_file_size");
+
+ /* The size gain should have been the same for each of the lists;
+ * their overhead is fixed. The B-tree should have gained at least
+ * as much, and the normal file more than that.
+ */
+ if((list_index_small.dsets1 - list_index_small.first_dset) !=
+ (list_index_med.dsets1 - list_index_med.first_dset))
+ VERIFY(0, 1, "h5_get_file_size");
+ if((list_index_med.dsets1 - list_index_med.first_dset) !=
+ (list_index_big.dsets1 - list_index_big.first_dset))
+ VERIFY(0, 1, "h5_get_file_size");
+ if((list_index_big.dsets1 - list_index_big.first_dset) >
+ (btree_index.dsets1 - btree_index.first_dset))
+ VERIFY(0, 1, "h5_get_file_size");
+ if((btree_index.dsets1 - btree_index.first_dset) >=
+ (norm_sizes.dsets1 - norm_sizes.first_dset))
+ VERIFY(0, 1, "h5_get_file_size");
+
+
+ /* Once another kind of each message has been written, the very small list
+ * should convert into a B-tree. Now the list should be smallest, then
+ * the B-trees (although the converted B-tree file may be a little bigger),
+ * then the normal file. The largest list may or may not be bigger than
+ * the normal file.
+ */
+ if(list_index_med.dsets2 >= btree_index.dsets2)
+ VERIFY(list_index_med.dsets2, 1, "h5_get_file_size");
+ if(btree_index.dsets2 > list_index_small.dsets2)
+ VERIFY(btree_index.dsets2, 1, "h5_get_file_size");
+ if(list_index_small.dsets2 >= norm_sizes.dsets2)
+ VERIFY(btree_index.dsets2, 1, "h5_get_file_size");
+ /* If the small list (now a B-tree) is bigger than the existing B-tree,
+ * it shouldn't be much bigger.
+ * It seems that the small lists tends to be pretty big anyway. Allow
+ * for it to have twice as much overhead.
+ */
+ if(list_index_small.dsets2 > btree_index.dsets2 * OVERHEAD_ALLOWED * OVERHEAD_ALLOWED)
+ VERIFY(0, 1, "h5_get_file_size");
+ /* The lists should have grown the least since they share messages and
+ * have no extra overhead. The normal file should have grown more than
+ * either the lists or the B-tree. The B-tree may not have grown more
+ * than the lists, depending on whether it needed to split nodes or not.
+ */
+ if((list_index_med.dsets2 - list_index_med.dsets1) !=
+ (list_index_big.dsets2 - list_index_big.dsets1))
+ VERIFY(0, 1, "h5_get_file_size");
+ if((list_index_big.dsets2 - list_index_big.dsets1) >
+ (btree_index.dsets2 - btree_index.dsets1))
+ VERIFY(0, 1, "h5_get_file_size");
+ if((btree_index.dsets2 - btree_index.dsets1) >=
+ (norm_sizes.dsets2 - norm_sizes.dsets1))
+ VERIFY(0, 1, "h5_get_file_size");
+
+
+ /* Interleaving the writes should have no effect on how the messages are
+ * shared. No new messages should be written to the indexes, so the
+ * sohm files will only get a little bit bigger.
+ */
+ if(list_index_med.interleaved >= btree_index.interleaved)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(btree_index.interleaved > list_index_small.interleaved)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(list_index_small.interleaved >= norm_sizes.interleaved)
+ VERIFY(0, 1, "h5_get_file_size");
+ /* The lists should still have grown the same amount. The converted
+ * B-tree shouldn't have grown more than the index that was originally
+ * a B-tree (although it might have grown less if there was extra free
+ * space within the file).
+ */
+ if((list_index_med.interleaved - list_index_med.dsets2) !=
+ (list_index_big.interleaved - list_index_big.dsets2))
+ VERIFY(0, 1, "h5_get_file_size");
+ if((list_index_big.interleaved - list_index_big.dsets2) >
+ (btree_index.interleaved - btree_index.dsets2))
+ VERIFY(0, 1, "h5_get_file_size");
+ if((list_index_small.interleaved - list_index_small.dsets2) >
+ (btree_index.interleaved - btree_index.dsets2))
+ VERIFY(0, 1, "h5_get_file_size");
+ if((btree_index.interleaved - btree_index.dsets2) >=
+ (norm_sizes.interleaved - norm_sizes.dsets2))
+ VERIFY(0, 1, "h5_get_file_size");
+
+ /* After many attributes have been written, both the small and medium lists
+ * should have become B-trees and be about the same size as the index
+ * that started as a B-tree.
+ * Add in OVERHEAD_ALLOWED as a fudge factor here, since the allocation
+ * of file space can be hard to predict.
+
+ */
+ if(btree_index.attrs1 > list_index_small.attrs1)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(btree_index.attrs1 > list_index_med.attrs1)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(list_index_med.attrs1 > btree_index.attrs1 * OVERHEAD_ALLOWED)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(list_index_small.attrs1 > btree_index.attrs1 * OVERHEAD_ALLOWED)
+ VERIFY(0, 1, "h5_get_file_size");
+ /* Neither of the converted lists should be too much bigger than
+ * the index that was originally a B-tree.
+ */
+ if(list_index_small.attrs1 > btree_index.attrs1 * OVERHEAD_ALLOWED)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(list_index_med.attrs1 > btree_index.attrs1 * OVERHEAD_ALLOWED)
+ VERIFY(0, 1, "h5_get_file_size");
+ /* The "normal" file should have had less overhead, so should gain less
+ * size than any of the other indexes since none of these attribute
+ * messages could be shared. The large list should have gained
+ * less overhead than the B-tree indexes.
+ */
+ if((norm_sizes.attrs1 - norm_sizes.interleaved) >=
+ (list_index_big.attrs1 - list_index_big.interleaved))
+ VERIFY(0, 1, "h5_get_file_size");
+ if((list_index_big.attrs1 - list_index_big.interleaved) >=
+ (list_index_small.attrs1 - list_index_small.interleaved))
+ VERIFY(0, 1, "h5_get_file_size");
+ if((list_index_small.attrs1 - list_index_small.interleaved) >
+ (btree_index.attrs1 - btree_index.interleaved))
+ VERIFY(0, 1, "h5_get_file_size");
+
+
+ /* Writing another copy of each attribute shouldn't change the ordering
+ * of sizes. The big list index is still too big to be smaller than a
+ * normal file. The B-tree indexes should all be about the same size.
+ */
+ if(btree_index.attrs2 > list_index_small.attrs2)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(list_index_small.attrs2 > btree_index.attrs2 * OVERHEAD_ALLOWED)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(btree_index.attrs2 > list_index_med.attrs2)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(list_index_med.attrs2 > btree_index.attrs2 * OVERHEAD_ALLOWED)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(list_index_med.attrs2 >= norm_sizes.attrs2)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(list_index_big.attrs2 >= norm_sizes.attrs2)
+ VERIFY(0, 1, "h5_get_file_size");
+ /* All of the B-tree indexes should have gained about the same amount
+ * of space; at least as much as the list index and less than a normal
+ * file.
+ */
+ if((list_index_small.attrs2 - list_index_small.attrs1) >
+ (btree_index.attrs2 - btree_index.attrs1))
+ VERIFY(0, 1, "h5_get_file_size");
+ if((list_index_med.attrs2 - list_index_med.attrs1) >
+ (btree_index.attrs2 - btree_index.attrs1))
+ VERIFY(0, 1, "h5_get_file_size");
+ if((list_index_big.attrs2 - list_index_big.attrs1) >
+ (list_index_med.attrs2 - list_index_med.attrs1))
+ VERIFY(0, 1, "h5_get_file_size");
+ if((btree_index.attrs2 - btree_index.attrs1) >=
+ (norm_sizes.attrs2 - norm_sizes.attrs1))
+ VERIFY(0, 1, "h5_get_file_size");
+
+ /* Done checking the first few files that use a single index. */
+
+
+ /* Start comparing other kinds of files with these "standard"
+ * one-index files
+ */
+
+ /* Check files with multiple indexes. */
+ /* These files should be larger when first created than one-index
+ * files.
+ */
+ if(mult_index_med.empty_size <= list_index_med.empty_size)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(mult_index_btree.empty_size != mult_index_med.empty_size)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ /* When the first dataset is written, they should grow quite a bit as
+ * many different indexes must be created.
+ */
+ if(mult_index_med.first_dset - mult_index_med.empty_size <=
+ list_index_med.first_dset - list_index_med.empty_size)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(mult_index_btree.first_dset - mult_index_btree.empty_size <=
+ btree_index.first_dset - btree_index.empty_size)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ /* Once that initial overhead is out of the way and the lists/btrees
+ * have been created, files with more than one index should grow at
+ * the same rate or slightly faster than files with just one index
+ * and one heap.
+ */
+ if(mult_index_med.dsets1 - mult_index_med.first_dset !=
+ list_index_med.dsets1 - list_index_med.first_dset)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(mult_index_btree.dsets1 - mult_index_btree.first_dset !=
+ btree_index.dsets1 - btree_index.first_dset)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ if(mult_index_med.dsets2 - mult_index_med.dsets1 >
+ (list_index_med.dsets2 - list_index_med.dsets1) * OVERHEAD_ALLOWED)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(mult_index_med.dsets2 - mult_index_med.dsets1 <
+ list_index_med.dsets2 - list_index_med.dsets1)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(mult_index_btree.dsets2 - mult_index_btree.dsets1 >
+ (btree_index.dsets2 - btree_index.dsets1) * OVERHEAD_ALLOWED)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(mult_index_btree.dsets2 - mult_index_btree.dsets1 <
+ btree_index.dsets2 - btree_index.dsets1)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ if(mult_index_med.interleaved - mult_index_med.dsets2 !=
+ list_index_med.interleaved - list_index_med.dsets2)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(mult_index_btree.interleaved - mult_index_btree.dsets2 !=
+ btree_index.interleaved - btree_index.dsets2)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ /* When all the attributes are added, only the index holding attributes
+ * will become a B-tree. Skip the interleaved to attrs1 interval when
+ * this happens because it's hard to predict exactly how much space this
+ * will take.
+ */
+ if(mult_index_med.attrs2 - mult_index_med.attrs1 !=
+ list_index_med.attrs2 - list_index_med.attrs1)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(mult_index_btree.attrs2 - mult_index_btree.attrs1 !=
+ btree_index.attrs2 - btree_index.attrs1)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ /* The final file size for both of the multiple index files should be
+ * smaller than a normal file but bigger than any of the one-index files.
+ */
+ if(mult_index_med.attrs2 >= norm_sizes.attrs2)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(mult_index_btree.attrs2 >= norm_sizes.attrs2)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(mult_index_med.attrs2 * OVERHEAD_ALLOWED < btree_index.attrs2)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(mult_index_btree.attrs2 * OVERHEAD_ALLOWED < btree_index.attrs2)
+ VERIFY(0, 1, "h5_get_file_size");
+
+
+ /* Check files that don't share all messages. */
+ /* These files have three indexes like the files above, so they should be
+ * the same size when created.
+ */
+ if(share_some_med.empty_size != mult_index_med.empty_size)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_some_med.empty_size != share_some_btree.empty_size)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ /* When the first dataset is created, they should be not quite as big
+ * as equivalent files that share all messages (since shared messages
+ * have a little bit of overhead).
+ */
+ if(share_some_med.first_dset >= mult_index_med.first_dset)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_some_btree.first_dset >= mult_index_btree.first_dset)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ /* The files that share some should have a growth rate in between
+ * files that share all messages and normal files
+ */
+ if(share_some_med.interleaved - share_some_med.first_dset <=
+ mult_index_med.interleaved - mult_index_med.first_dset)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_some_med.interleaved - share_some_med.first_dset >=
+ norm_sizes.interleaved - norm_sizes.first_dset)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_some_btree.interleaved - share_some_btree.first_dset <=
+ mult_index_btree.interleaved - mult_index_btree.first_dset)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_some_btree.interleaved - share_some_btree.first_dset >=
+ norm_sizes.interleaved - norm_sizes.first_dset)
+ VERIFY(0, 1, "h5_get_file_size");
+
+
+ /* Check the file that only stored gigantic messages in its second
+ * index. Since no messages were that big, it should be identical
+ * to the file with an empty index.
+ */
+ if(share_some_btree.empty_size != share_some_toobig_index.empty_size)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_some_btree.first_dset != share_some_toobig_index.first_dset)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_some_btree.dsets1 != share_some_toobig_index.dsets1)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_some_btree.dsets2 != share_some_toobig_index.dsets2)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_some_btree.interleaved != share_some_toobig_index.interleaved)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_some_btree.attrs1 != share_some_toobig_index.attrs1)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_some_btree.attrs2 != share_some_toobig_index.attrs2)
+ VERIFY(0, 1, "h5_get_file_size");
+
+
+ /* Check the file that shares even very tiny messages. Once messages
+ * are written to it, it should gain a little space from sharing the
+ * messages and lose a little space to overhead so that it's just slightly
+ * smaller than a file that doesn't share tiny messages.
+ * If the overhead increases or the size of messages decreases, these
+ * numbers may be off.
+ */
+ if(share_tiny_index.empty_size != type_space_index.empty_size)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ if(share_tiny_index.first_dset >= type_space_index.first_dset)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_tiny_index.first_dset * OVERHEAD_ALLOWED < type_space_index.first_dset)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ if(share_tiny_index.dsets1 >= type_space_index.dsets1)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_tiny_index.dsets1 * OVERHEAD_ALLOWED < type_space_index.dsets1)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ if(share_tiny_index.dsets2 >= type_space_index.dsets2)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_tiny_index.dsets2 * OVERHEAD_ALLOWED < type_space_index.dsets2)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ if(share_tiny_index.interleaved >= type_space_index.interleaved)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_tiny_index.interleaved * OVERHEAD_ALLOWED < type_space_index.interleaved)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ if(share_tiny_index.attrs1 >= type_space_index.attrs1)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_tiny_index.attrs1 * OVERHEAD_ALLOWED < type_space_index.attrs1)
+ VERIFY(0, 1, "h5_get_file_size");
+
+ if(share_tiny_index.attrs2 >= type_space_index.attrs2)
+ VERIFY(0, 1, "h5_get_file_size");
+ if(share_tiny_index.attrs2 * OVERHEAD_ALLOWED < type_space_index.attrs2)
+ VERIFY(0, 1, "h5_get_file_size");
+}
+
+
+
/****************************************************************
-**
+**
** test_sohm(): Main Shared Object Header Message testing routine.
**
****************************************************************/
@@ -256,7 +2442,11 @@ test_sohm(void)
MESSAGE(5, ("Testing Shared Object Header Messages\n"));
test_sohm_fcpl(); /* Test SOHMs and file creation plists */
- /* JAMES: test SOHMs and H5*copy (especially when file SOHM properties differ */
+ test_sohm_size1(); /* Tests the sizes of files with one SOHM */
+ test_sohm_attrs(); /* Tests shared messages in attributes */
+ test_sohm_size2(0); /* Tests the sizes of files with multiple SOHMs */
+ test_sohm_size2(1); /* Tests the sizes of files with multiple SOHMs */
+
} /* test_sohm() */