/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * * the files COPYING and Copyright.html. COPYING can be found at the root * * of the source code distribution tree; Copyright.html can be found at the * * root level of an installed copy of the electronic HDF5 document set and * * is linked from the top-level documents page. It can also be found at * * http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have * * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /*********************************************************** * * Test program: tsohm * * Test Shared Object Header Messages * *************************************************************/ #include "testhdf5.h" /* Default SOHM values */ #define DEF_NUM_INDEXES 0 const unsigned def_type_flags[H5O_SHMESG_MAX_NINDEXES] = {0,0,0,0,0,0}; const unsigned def_minsizes[H5O_SHMESG_MAX_NINDEXES] = {250,250,250,250,250,250}; #define DEF_L2B 50 #define DEF_B2L 40 /* Non-default SOHM values for testing */ #define TEST_NUM_INDEXES 4 const unsigned test_type_flags[H5O_SHMESG_MAX_NINDEXES] = {H5O_MESG_FILL_FLAG, H5O_MESG_DTYPE_FLAG | H5O_MESG_ATTR_FLAG, H5O_MESG_SDSPACE_FLAG, H5O_MESG_PLINE_FLAG, 0, 0}; const unsigned test_minsizes[H5O_SHMESG_MAX_NINDEXES] = {0, 2, 40, 100, 3, 1000}; #define TEST_L2B 65 #define TEST_B2L 64 #define FILENAME "tsohm.h5" #define FILENAME_SRC "tsohm_src.h5" #define FILENAME_DST "tsohm_dst.h5" #define NAME_BUF_SIZE 512 /* How much overhead counts as "not much" when converting B-trees, etc. */ #define OVERHEAD_ALLOWED 1.1 #define NUM_DATASETS 10 #define NUM_ATTRIBUTES 100 typedef struct dtype1_struct { int i1; char str[10]; int i2; int i3; int i4; int i5; int i6; int i7; int i8; float f1; } dtype1_struct; #define DTYPE2_SIZE 1024 const char *DSETNAME[] = { "dataset0", "dataset1", "dataset2", "dataset3", "dataset4", "dataset5", "dataset6", "dataset7", "dataset8", "dataset9", "dataset10", "dataset11", NULL }; const char *EXTRA_DSETNAME[] = { "ex_dataset0", "ex_dataset1", "ex_dataset2", "ex_dataset3", "ex_dataset4", "ex_dataset5", "ex_dataset6", "ex_dataset7", "ex_dataset8", "ex_dataset9", "ex_dataset10", "ex_dataset11", "ex_dataset12", "ex_dataset13", "ex_dataset14", "ex_dataset15", "ex_dataset16", "ex_dataset17", "ex_dataset18", "ex_dataset19", NULL }; #define SOHM_HELPER_NUM_EX_DSETS 20 typedef struct complex_t { double re; double im; } complex_t; #define ENUM_NUM_MEMBS 20 const char *ENUM_NAME[] = { "enum_member0", "enum_member1", "enum_member2", "enum_member3", "enum_member4", "enum_member5", "enum_member6", "enum_member7", "enum_member8", "enum_member9", "enum_member10", "enum_member11", "enum_member12", "enum_member13", "enum_member14", "enum_member15", "enum_member16", "enum_member17", "enum_member18", "enum_member19", NULL }; const int ENUM_VAL[] = { 0, 13, -500, 63, 64, -64, 65, 2048, 1, 2, -1, 7, 130, -5000, 630, 640, -640, 650, 20480, 10, -1001, -10 }; #define SIZE2_RANK1 10 #define SIZE2_RANK2 20 #define SIZE2_DIMS {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20} #define LONG_STRING "00 index. A long string used for testing. To create new strings, set the first two characters to be some ASCII number other than 00, such as 01." /* Struct returned from size2_helper function */ typedef struct size2_helper_struct { h5_stat_size_t empty_size; h5_stat_size_t first_dset; h5_stat_size_t dsets1; h5_stat_size_t dsets2; h5_stat_size_t interleaved; h5_stat_size_t attrs1; h5_stat_size_t attrs2; } size2_helper_struct; /* Number of distinct messages for the sohm_delete test */ #define DELETE_NUM_MESGS 7 #define HALF_DELETE_NUM_MESGS 3 #define DELETE_DIMS {1,1,1,1,1,1,1} /* Number of dimensions in extend_dset test */ #define EXTEND_NDIMS 2 /* Helper function prototypes */ static hid_t make_dtype_1(void); static hid_t make_dtype_2(void); static hid_t close_reopen_file(hid_t file, const char* filename); static void test_sohm_attrs(void); static void size2_verify(void); static void test_sohm_delete(void); static void test_sohm_delete_revert(void); static void test_sohm_extlink(void); /**************************************************************** ** ** check_fcpl_values(): Helper function for test_sohm_fcpl. ** Verifies that the *_in and *_out parameters are equal. ** ****************************************************************/ static void check_fcpl_values(hid_t fcpl_id, const unsigned nindexes_in, const unsigned *flags_in, const unsigned *minsizes_in, unsigned l2b, unsigned b2l) { unsigned num_indexes; unsigned index_flags, min_mesg_size; unsigned list_size, btree_size; unsigned x; herr_t ret; /* Verify number of indexes is set to default */ ret = H5Pget_shared_mesg_nindexes(fcpl_id, &num_indexes); CHECK_I(ret, "H5Pget_shared_mesg_nindexes"); VERIFY(num_indexes, nindexes_in, "H5Pget_shared_mesg_nindexes"); /* Verify index flags and minsizes are set */ for(x=0; x= norm_oh_size) VERIFY(sohm_oh_size, 1, "H5Fclose"); if(sohm_oh_size != sohm_btree_oh_size) VERIFY(sohm_btree_oh_size, 1, "H5Fclose"); /* Both sohm files should be bigger than a normal file when empty. * It's hard to say whether a B-tree with no nodes allocated should be * smaller than a list with SOHM_HELPER_NUM_DTYPES elements. * The sizes here shouldn't really be 1; it's just used to ensure that the * error code triggers. */ if(sohm_empty_filesize <= norm_empty_filesize) VERIFY(sohm_empty_filesize, 1, "H5Fclose"); if(sohm_btree_empty_filesize <= norm_empty_filesize) VERIFY(sohm_btree_empty_filesize, 1, "H5Fclose"); /* When full, the sohm btree file should be smaller than the normal file. * The sohm list file should be at least as small, since it doesn't need the * overhead of a B-tree. */ if(sohm_btree_final_filesize >= norm_final_filesize) VERIFY(sohm_btree_final_filesize, 1, "H5Fclose"); if(sohm_final_filesize > sohm_btree_final_filesize) VERIFY(sohm_final_filesize, 1, "H5Fclose"); /* This shouldn't change even if we open and close the file */ if(sohm_btree_final_filesize2 >= norm_final_filesize2) VERIFY(sohm_btree_final_filesize2, 1, "H5Fclose"); if(sohm_final_filesize2 > sohm_btree_final_filesize2) VERIFY(sohm_final_filesize2, 1, "H5Fclose"); } /*------------------------------------------------------------------------- * Function: sohm_attr_helper * * Purpose: Given an fcpl, tests creating attributes with and without * committed datatypes. * * Programmer: James Laird * Thursday, November 30, 2006 * *------------------------------------------------------------------------- */ static void sohm_attr_helper(hid_t fcpl_id) { hid_t file_id; hid_t type_id; hid_t space_id; hid_t group_id; hid_t attr_id; hsize_t dims = 2; int wdata[2] = {7, 42}; int rdata[2]; herr_t ret; hsize_t x; /* Create a file using the fcpl */ file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT); CHECK_I(file_id, "H5Fcreate"); /* Create a normal datatype and dataset */ type_id = H5Tcopy(H5T_NATIVE_INT); CHECK_I(type_id, "H5Tcopy"); space_id = H5Screate_simple(1, &dims, &dims); CHECK_I(space_id, "H5Screate_simple"); /* Create and verify an attribute on a group */ group_id = H5Gcreate(file_id, "group", 100); CHECK_I(group_id, "H5Gcreate"); attr_id = H5Acreate(group_id, "attribute", type_id, space_id, H5P_DEFAULT); CHECK_I(attr_id, "H5Acreate"); ret = H5Awrite(attr_id, H5T_NATIVE_INT, wdata); CHECK_I(ret, "H5Awrite"); /* Close the datatype and group */ ret = H5Tclose(type_id); CHECK_I(ret, "H5Tclose"); ret = H5Gclose(group_id); CHECK_I(ret, "H5Gclose"); /* Flush the file to force data to be written */ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); CHECK_I(ret, "H5Fflush"); /* Verify */ memset(rdata, 0, sizeof(rdata)); ret = H5Aread(attr_id, H5T_NATIVE_INT, rdata); CHECK_I(ret, "H5Aread"); for(x=0; x list_index_med.empty_size) VERIFY(norm_sizes.empty_size, 1, "h5_get_file_size"); if(list_index_med.empty_size != list_index_big.empty_size) VERIFY(list_index_med.empty_size, list_index_big.empty_size, "h5_get_file_size"); if(list_index_med.empty_size != btree_index.empty_size) VERIFY(list_index_med.empty_size, btree_index.empty_size, "h5_get_file_size"); if(list_index_med.empty_size != list_index_small.empty_size) VERIFY(list_index_med.empty_size, list_index_small.empty_size, "h5_get_file_size"); /* The files with indexes shouldn't be that much bigger than an * empty file. */ if(list_index_med.empty_size > norm_sizes.empty_size * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); /* Once one dataset has been created (with one of every kind of message), * the normal file should still be smallest. The very small list * btree_convert should be smaller than the B-tree since it has no * extra overhead. The small list should also be smaller than the B-tree. * The very large list should be much larger than anything else. */ if(norm_sizes.first_dset >= list_index_small.first_dset) VERIFY(norm_sizes.first_dset, 1, "h5_get_file_size"); if(list_index_small.first_dset >= btree_index.first_dset) VERIFY(list_index_small.first_dset, 1, "h5_get_file_size"); if(list_index_med.first_dset >= btree_index.first_dset) VERIFY(btree_index.first_dset, 1, "h5_get_file_size"); if(btree_index.first_dset >= list_index_big.first_dset) VERIFY(list_index_med.first_dset, 1, "h5_get_file_size"); /* Once a few copies of the same dataset have been created, the * very small list shouldn't have become a B-tree yet, so it should * be the smallest file. A larger list should be next, followed * by a B-tree, followed by a normal file, followed by a * list that is too large. */ if(list_index_small.dsets1 >= list_index_med.dsets1) VERIFY(btree_index.dsets1, 1, "h5_get_file_size"); if(list_index_med.dsets1 >= btree_index.dsets1) VERIFY(list_index_med.dsets1, 1, "h5_get_file_size"); if(btree_index.dsets1 >= norm_sizes.dsets1) VERIFY(btree_index.dsets1, 1, "h5_get_file_size"); if(norm_sizes.dsets1 >= list_index_big.dsets1) VERIFY(list_index_big.dsets1, 1, "h5_get_file_size"); /* The size gain should have been the same for each of the lists; * their overhead is fixed. The B-tree should have gained at least * as much, and the normal file more than that. */ if((list_index_small.dsets1 - list_index_small.first_dset) != (list_index_med.dsets1 - list_index_med.first_dset)) VERIFY(0, 1, "h5_get_file_size"); if((list_index_med.dsets1 - list_index_med.first_dset) != (list_index_big.dsets1 - list_index_big.first_dset)) VERIFY(0, 1, "h5_get_file_size"); if((list_index_big.dsets1 - list_index_big.first_dset) > (btree_index.dsets1 - btree_index.first_dset)) VERIFY(0, 1, "h5_get_file_size"); if((btree_index.dsets1 - btree_index.first_dset) >= (norm_sizes.dsets1 - norm_sizes.first_dset)) VERIFY(0, 1, "h5_get_file_size"); /* Once another kind of each message has been written, the very small list * should convert into a B-tree. Now the list should be smallest, then * the B-trees (although the converted B-tree file may be a little bigger), * then the normal file. The largest list may or may not be bigger than * the normal file. */ if(list_index_med.dsets2 >= btree_index.dsets2) VERIFY(list_index_med.dsets2, 1, "h5_get_file_size"); if(btree_index.dsets2 > list_index_small.dsets2) VERIFY(btree_index.dsets2, 1, "h5_get_file_size"); if(list_index_small.dsets2 >= norm_sizes.dsets2) VERIFY(btree_index.dsets2, 1, "h5_get_file_size"); /* If the small list (now a B-tree) is bigger than the existing B-tree, * it shouldn't be much bigger. * It seems that the small lists tends to be pretty big anyway. Allow * for it to have twice as much overhead. */ if(list_index_small.dsets2 > btree_index.dsets2 * OVERHEAD_ALLOWED * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); /* The lists should have grown the least since they share messages and * have no extra overhead. The normal file should have grown more than * either the lists or the B-tree. The B-tree may not have grown more * than the lists, depending on whether it needed to split nodes or not. */ if((list_index_med.dsets2 - list_index_med.dsets1) != (list_index_big.dsets2 - list_index_big.dsets1)) VERIFY(0, 1, "h5_get_file_size"); if((list_index_big.dsets2 - list_index_big.dsets1) > (btree_index.dsets2 - btree_index.dsets1)) VERIFY(0, 1, "h5_get_file_size"); if((btree_index.dsets2 - btree_index.dsets1) >= (norm_sizes.dsets2 - norm_sizes.dsets1)) VERIFY(0, 1, "h5_get_file_size"); /* Interleaving the writes should have no effect on how the messages are * shared. No new messages should be written to the indexes, so the * sohm files will only get a little bit bigger. */ if(list_index_med.interleaved >= btree_index.interleaved) VERIFY(0, 1, "h5_get_file_size"); if(btree_index.interleaved > list_index_small.interleaved) VERIFY(0, 1, "h5_get_file_size"); if(list_index_small.interleaved >= norm_sizes.interleaved) VERIFY(0, 1, "h5_get_file_size"); /* The lists should still have grown the same amount. The converted * B-tree shouldn't have grown more than the index that was originally * a B-tree (although it might have grown less if there was extra free * space within the file). */ if((list_index_med.interleaved - list_index_med.dsets2) != (list_index_big.interleaved - list_index_big.dsets2)) VERIFY(0, 1, "h5_get_file_size"); if((list_index_big.interleaved - list_index_big.dsets2) > (btree_index.interleaved - btree_index.dsets2)) VERIFY(0, 1, "h5_get_file_size"); if((list_index_small.interleaved - list_index_small.dsets2) > (btree_index.interleaved - btree_index.dsets2)) VERIFY(0, 1, "h5_get_file_size"); if((btree_index.interleaved - btree_index.dsets2) >= (norm_sizes.interleaved - norm_sizes.dsets2)) VERIFY(0, 1, "h5_get_file_size"); /* After many attributes have been written, both the small and medium lists * should have become B-trees and be about the same size as the index * that started as a B-tree. * Add in OVERHEAD_ALLOWED as a fudge factor here, since the allocation * of file space can be hard to predict. */ if(btree_index.attrs1 > list_index_small.attrs1) VERIFY(0, 1, "h5_get_file_size"); if(btree_index.attrs1 > list_index_med.attrs1 * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); if(list_index_med.attrs1 > btree_index.attrs1 * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); if(list_index_small.attrs1 > btree_index.attrs1 * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); /* Neither of the converted lists should be too much bigger than * the index that was originally a B-tree. */ if(list_index_small.attrs1 > btree_index.attrs1 * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); if(list_index_med.attrs1 > btree_index.attrs1 * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); /* The "normal" file should have had less overhead, so should gain less * size than any of the other indexes since none of these attribute * messages could be shared. The large list should have gained * less overhead than the B-tree indexes. */ if((norm_sizes.attrs1 - norm_sizes.interleaved) >= (list_index_big.attrs1 - list_index_big.interleaved)) VERIFY(0, 1, "h5_get_file_size"); if((list_index_big.attrs1 - list_index_big.interleaved) >= (list_index_small.attrs1 - list_index_small.interleaved)) VERIFY(0, 1, "h5_get_file_size"); if((list_index_small.attrs1 - list_index_small.interleaved) > (btree_index.attrs1 - btree_index.interleaved)) VERIFY(0, 1, "h5_get_file_size"); /* Writing another copy of each attribute shouldn't change the ordering * of sizes. The big list index is still too big to be smaller than a * normal file. The B-tree indexes should all be about the same size. */ if(btree_index.attrs2 > list_index_small.attrs2) VERIFY(0, 1, "h5_get_file_size"); if(list_index_small.attrs2 > btree_index.attrs2 * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); if(btree_index.attrs2 > list_index_med.attrs2 * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); if(list_index_med.attrs2 > btree_index.attrs2 * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); if(list_index_med.attrs2 >= norm_sizes.attrs2) VERIFY(0, 1, "h5_get_file_size"); if(list_index_big.attrs2 >= norm_sizes.attrs2) VERIFY(0, 1, "h5_get_file_size"); /* All of the B-tree indexes should have gained about the same amount * of space; at least as much as the list index and less than a normal * file. */ if((list_index_small.attrs2 - list_index_small.attrs1) > (btree_index.attrs2 - btree_index.attrs1)) VERIFY(0, 1, "h5_get_file_size"); if((list_index_med.attrs2 - list_index_med.attrs1) > (btree_index.attrs2 - btree_index.attrs1)) VERIFY(0, 1, "h5_get_file_size"); if((list_index_big.attrs2 - list_index_big.attrs1) > (list_index_med.attrs2 - list_index_med.attrs1)) VERIFY(0, 1, "h5_get_file_size"); if((btree_index.attrs2 - btree_index.attrs1) >= (norm_sizes.attrs2 - norm_sizes.attrs1)) VERIFY(0, 1, "h5_get_file_size"); /* Done checking the first few files that use a single index. */ /* Start comparing other kinds of files with these "standard" * one-index files */ /* Check files with multiple indexes. */ /* These files should be larger when first created than one-index * files. */ if(mult_index_med.empty_size <= list_index_med.empty_size) VERIFY(0, 1, "h5_get_file_size"); if(mult_index_btree.empty_size != mult_index_med.empty_size) VERIFY(0, 1, "h5_get_file_size"); /* When the first dataset is written, they should grow quite a bit as * many different indexes must be created. */ if(mult_index_med.first_dset - mult_index_med.empty_size <= list_index_med.first_dset - list_index_med.empty_size) VERIFY(0, 1, "h5_get_file_size"); if(mult_index_btree.first_dset - mult_index_btree.empty_size <= btree_index.first_dset - btree_index.empty_size) VERIFY(0, 1, "h5_get_file_size"); /* Once that initial overhead is out of the way and the lists/btrees * have been created, files with more than one index should grow at * the same rate or slightly faster than files with just one index * and one heap. */ if(mult_index_med.dsets1 - mult_index_med.first_dset != list_index_med.dsets1 - list_index_med.first_dset) VERIFY(0, 1, "h5_get_file_size"); if(mult_index_btree.dsets1 - mult_index_btree.first_dset != btree_index.dsets1 - btree_index.first_dset) VERIFY(0, 1, "h5_get_file_size"); if(mult_index_med.dsets2 - mult_index_med.dsets1 > (list_index_med.dsets2 - list_index_med.dsets1) * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); if(mult_index_med.dsets2 - mult_index_med.dsets1 < list_index_med.dsets2 - list_index_med.dsets1) VERIFY(0, 1, "h5_get_file_size"); if(mult_index_btree.dsets2 - mult_index_btree.dsets1 > (btree_index.dsets2 - btree_index.dsets1) * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); if(mult_index_btree.dsets2 - mult_index_btree.dsets1 < btree_index.dsets2 - btree_index.dsets1) VERIFY(0, 1, "h5_get_file_size"); if(mult_index_med.interleaved - mult_index_med.dsets2 != list_index_med.interleaved - list_index_med.dsets2) VERIFY(0, 1, "h5_get_file_size"); if(mult_index_btree.interleaved - mult_index_btree.dsets2 != btree_index.interleaved - btree_index.dsets2) VERIFY(0, 1, "h5_get_file_size"); /* When all the attributes are added, only the index holding attributes * will become a B-tree. Skip the interleaved to attrs1 interval when * this happens because it's hard to predict exactly how much space this * will take. */ if(mult_index_med.attrs2 - mult_index_med.attrs1 != list_index_med.attrs2 - list_index_med.attrs1) VERIFY(0, 1, "h5_get_file_size"); if(mult_index_btree.attrs2 - mult_index_btree.attrs1 != btree_index.attrs2 - btree_index.attrs1) VERIFY(0, 1, "h5_get_file_size"); /* The final file size for both of the multiple index files should be * smaller than a normal file but bigger than any of the one-index files. */ if(mult_index_med.attrs2 >= norm_sizes.attrs2) VERIFY(0, 1, "h5_get_file_size"); if(mult_index_btree.attrs2 >= norm_sizes.attrs2) VERIFY(0, 1, "h5_get_file_size"); if(mult_index_med.attrs2 * OVERHEAD_ALLOWED < btree_index.attrs2) VERIFY(0, 1, "h5_get_file_size"); if(mult_index_btree.attrs2 * OVERHEAD_ALLOWED < btree_index.attrs2) VERIFY(0, 1, "h5_get_file_size"); /* Check files that don't share all messages. */ /* These files have three indexes like the files above, so they should be * the same size when created. */ if(share_some_med.empty_size != mult_index_med.empty_size) VERIFY(0, 1, "h5_get_file_size"); if(share_some_med.empty_size != share_some_btree.empty_size) VERIFY(0, 1, "h5_get_file_size"); /* When the first dataset is created, they should be not quite as big * as equivalent files that share all messages (since shared messages * have a little bit of overhead). */ if(share_some_med.first_dset >= mult_index_med.first_dset) VERIFY(0, 1, "h5_get_file_size"); if(share_some_btree.first_dset >= mult_index_btree.first_dset) VERIFY(0, 1, "h5_get_file_size"); /* The files that share some should have a growth rate in between * files that share all messages and normal files */ if(share_some_med.interleaved - share_some_med.first_dset <= mult_index_med.interleaved - mult_index_med.first_dset) VERIFY(0, 1, "h5_get_file_size"); if(share_some_med.interleaved - share_some_med.first_dset >= norm_sizes.interleaved - norm_sizes.first_dset) VERIFY(0, 1, "h5_get_file_size"); if(share_some_btree.interleaved - share_some_btree.first_dset <= mult_index_btree.interleaved - mult_index_btree.first_dset) VERIFY(0, 1, "h5_get_file_size"); if(share_some_btree.interleaved - share_some_btree.first_dset >= norm_sizes.interleaved - norm_sizes.first_dset) VERIFY(0, 1, "h5_get_file_size"); /* Check the file that only stored gigantic messages in its second * index. Since no messages were that big, it should be identical * to the file with an empty index. */ if(share_some_btree.empty_size != share_some_toobig_index.empty_size) VERIFY(0, 1, "h5_get_file_size"); if(share_some_btree.first_dset != share_some_toobig_index.first_dset) VERIFY(0, 1, "h5_get_file_size"); if(share_some_btree.dsets1 != share_some_toobig_index.dsets1) VERIFY(0, 1, "h5_get_file_size"); if(share_some_btree.dsets2 != share_some_toobig_index.dsets2) VERIFY(0, 1, "h5_get_file_size"); if(share_some_btree.interleaved != share_some_toobig_index.interleaved) VERIFY(0, 1, "h5_get_file_size"); if(share_some_btree.attrs1 != share_some_toobig_index.attrs1) VERIFY(0, 1, "h5_get_file_size"); if(share_some_btree.attrs2 != share_some_toobig_index.attrs2) VERIFY(0, 1, "h5_get_file_size"); /* Check the file that shares even very tiny messages. Once messages * are written to it, it should gain a little space from sharing the * messages and lose a little space to overhead so that it's just slightly * smaller than a file that doesn't share tiny messages. * If the overhead increases or the size of messages decreases, these * numbers may be off. */ if(share_tiny_index.empty_size != type_space_index.empty_size) VERIFY(0, 1, "h5_get_file_size"); if(share_tiny_index.first_dset >= type_space_index.first_dset) VERIFY(0, 1, "h5_get_file_size"); if(share_tiny_index.first_dset * OVERHEAD_ALLOWED < type_space_index.first_dset) VERIFY(0, 1, "h5_get_file_size"); if(share_tiny_index.dsets1 >= type_space_index.dsets1) VERIFY(0, 1, "h5_get_file_size"); if(share_tiny_index.dsets1 * OVERHEAD_ALLOWED < type_space_index.dsets1) VERIFY(0, 1, "h5_get_file_size"); if(share_tiny_index.dsets2 >= type_space_index.dsets2) VERIFY(0, 1, "h5_get_file_size"); if(share_tiny_index.dsets2 * OVERHEAD_ALLOWED < type_space_index.dsets2) VERIFY(0, 1, "h5_get_file_size"); if(share_tiny_index.interleaved >= type_space_index.interleaved) VERIFY(0, 1, "h5_get_file_size"); if(share_tiny_index.interleaved * OVERHEAD_ALLOWED < type_space_index.interleaved) VERIFY(0, 1, "h5_get_file_size"); if(share_tiny_index.attrs1 >= type_space_index.attrs1) VERIFY(0, 1, "h5_get_file_size"); if(share_tiny_index.attrs1 * OVERHEAD_ALLOWED < type_space_index.attrs1) VERIFY(0, 1, "h5_get_file_size"); if(share_tiny_index.attrs2 >= type_space_index.attrs2) VERIFY(0, 1, "h5_get_file_size"); if(share_tiny_index.attrs2 * OVERHEAD_ALLOWED < type_space_index.attrs2) VERIFY(0, 1, "h5_get_file_size"); } /*------------------------------------------------------------------------- * Function: delete_helper_write * * Purpose: Creates a dataset and attribute in file FILE_ID using value X * in the DSPACE_ID and DCPL_ID arrays. * * Programmer: James Laird * Tuesday, December 19, 2006 * * Modifications: * *------------------------------------------------------------------------- */ static void delete_helper_write(hid_t file_id, hid_t *dspace_id, hid_t *dcpl_id, int x) { hid_t dset_id = -1; hid_t attr_id = -1; char wdata; herr_t ret; /* Create dataset */ dset_id = H5Dcreate(file_id, DSETNAME[x], H5T_NATIVE_CHAR, dspace_id[x], dcpl_id[x]); CHECK_I(dset_id, "H5Dcreate"); /* Write data to dataset */ wdata = x + 'a'; ret = H5Dwrite(dset_id, H5T_NATIVE_CHAR, dspace_id[x], dspace_id[x], H5P_DEFAULT, &wdata); CHECK_I(ret, "H5Dwrite"); /* Create an attribute on the dataset. */ attr_id = H5Acreate(dset_id, "attr_name", H5T_NATIVE_CHAR, dspace_id[x], H5P_DEFAULT); CHECK_I(attr_id, "H5Acreate"); /* Write to attribute */ ret = H5Awrite(attr_id, H5T_NATIVE_CHAR, &wdata); CHECK_I(ret, "H5Awrite"); ret = H5Aclose(attr_id); CHECK_I(ret, "H5Aclose"); ret = H5Dclose(dset_id); CHECK_I(ret, "H5Dclose"); } /*------------------------------------------------------------------------- * Function: delete_helper_read * * Purpose: Checks the value of the dataset and attribute created by * delete_helper_write. * * Programmer: James Laird * Tuesday, December 19, 2006 * * Modifications: * *------------------------------------------------------------------------- */ static void delete_helper_read(hid_t file_id, hid_t *dspace_id, int x) { hid_t dset_id = -1; hid_t attr_id = -1; char rdata; herr_t ret; /* Open dataset */ dset_id = H5Dopen(file_id, DSETNAME[x]); CHECK_I(dset_id, "H5Dcreate"); /* Read */ rdata = '\0'; ret = H5Dread(dset_id, H5T_NATIVE_CHAR, dspace_id[x], dspace_id[x], H5P_DEFAULT, &rdata); CHECK_I(ret, "H5Dread"); VERIFY(rdata, (x + 'a'), "H5Dread"); /* Open attribute */ attr_id = H5Aopen_name(dset_id, "attr_name"); CHECK_I(attr_id, "H5Aopen"); /* Read */ rdata = '\0'; ret = H5Aread(attr_id, H5T_NATIVE_CHAR, &rdata); CHECK_I(ret, "H5Dread"); VERIFY(rdata, (x + 'a'), "H5Dread"); /* Cleanup */ ret = H5Aclose(attr_id); CHECK_I(ret, "H5Aclose"); ret = H5Dclose(dset_id); CHECK_I(ret, "H5Dclose"); } /*------------------------------------------------------------------------- * Function: delete_helper * * Purpose: Creates some shared messages, deletes them, and creates some * more messages. The second batch of messages should use the * space freed by the first batch, so should be about the same * size as a file that never had the first batch of messages * created. * * FCPL_ID is the file creation property list to use. * DSPACE_ID and DCPL_ID are arrays of different dataspaces * and property lists with filter pipelines used to create the * messages. * * Programmer: James Laird * Tuesday, December 19, 2006 * * Modifications: * *------------------------------------------------------------------------- */ static void delete_helper(hid_t fcpl_id, hid_t *dspace_id, hid_t *dcpl_id) { hid_t file_id=-1; int x; h5_stat_size_t norm_filesize; h5_stat_size_t deleted_filesize; herr_t ret; /* Get the size of a "normal" file with no deleted messages */ file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT); CHECK_I(file_id, "H5Fcreate"); /* Create batch of messages in the file starting at message 2 */ for(x=HALF_DELETE_NUM_MESGS; x deleted_filesize * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); if(deleted_filesize > norm_filesize * OVERHEAD_ALLOWED) VERIFY(0, 1, "h5_get_file_size"); } /*------------------------------------------------------------------------- * Function: test_sohm_delete * * Purpose: Tests shared object header message deletion. * * Creates lots of shared messages, then ensures that they * can be deleted without corrupting the remaining messages. * Also checks that indexes convert from B-trees back into * lists. * * Programmer: James Laird * Tuesday, December 19, 2006 * * Modifications: * *------------------------------------------------------------------------- */ static void test_sohm_delete(void) { hid_t fcpl_id; /* We'll use dataspaces, filter pipelines, and attributes for this * test. Create a number of distinct messages of each type. */ hid_t dspace_id[DELETE_NUM_MESGS] = {0}; hid_t dcpl_id[DELETE_NUM_MESGS] = {0}; int x; hsize_t dims[] = DELETE_DIMS; herr_t ret; /* Create a number of different dataspaces. * For simplicity, each dataspace has only one element. */ for(x=0; x=0; --x) { ret = H5Sclose(dspace_id[x]); CHECK_I(ret, "H5Sclose"); ret = H5Pclose(dcpl_id[x]); CHECK_I(ret, "H5Pclose"); } } /*------------------------------------------------------------------------- * Function: test_sohm_delete_revert_helper * * Purpose: Tests that shared object header message deletion returns * the file to its previous state using the supplied FCPL. * * Creates shared messages and then deletes them. Ensures * that the file has not grown in size. * * Programmer: James Laird * Wednesday, January 3, 2007 * * Modifications: * *------------------------------------------------------------------------- */ static void test_sohm_delete_revert_helper(hid_t fcpl_id) { hid_t file_id; hid_t dspace_id; hid_t dset_id; hsize_t dims[1] = {1}; h5_stat_size_t initial_filesize, deleted_filesize; herr_t ret; /* Create a dataspace for later */ dspace_id = H5Screate_simple(1, dims, dims); CHECK_I(dspace_id, "H5Screate_simple"); /* Create a file using the FCPL supplied*/ file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT); CHECK_I(file_id, "H5Fcreate"); /* Close the file and get its size */ ret = H5Fclose(file_id); CHECK_I(ret, "H5Fclose"); initial_filesize = h5_get_file_size(FILENAME); /* Re-create the file and create a dataset in it */ file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT); CHECK_I(file_id, "H5Fcreate"); dset_id = H5Dcreate(file_id, "dset", H5T_NATIVE_SHORT, dspace_id, H5P_DEFAULT); CHECK_I(dset_id, "H5Dcreate"); /* Close the dataset and delete it */ ret = H5Dclose(dset_id); CHECK_I(ret, "H5Dclose"); ret = H5Ldelete(file_id, "dset", H5P_DEFAULT); CHECK_I(ret, "H5Ldelete"); /* Close the file and get its size */ ret = H5Fclose(file_id); CHECK_I(ret, "H5Fclose"); deleted_filesize = h5_get_file_size(FILENAME); VERIFY(deleted_filesize, initial_filesize, "h5_get_file_size"); /* Repeat, creating two datasets in the file */ file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT); CHECK_I(file_id, "H5Fcreate"); /* Create and close the first dataset */ dset_id = H5Dcreate(file_id, "dset", H5T_NATIVE_SHORT, dspace_id, H5P_DEFAULT); CHECK_I(dset_id, "H5Dcreate"); ret = H5Dclose(dset_id); CHECK_I(ret, "H5Dclose"); /* Create and close the second. These messages should be shared */ dset_id = H5Dcreate(file_id, "dset2", H5T_NATIVE_SHORT, dspace_id, H5P_DEFAULT); CHECK_I(dset_id, "H5Dcreate"); ret = H5Dclose(dset_id); CHECK_I(ret, "H5Dclose"); /* Delete both datasets */ ret = H5Ldelete(file_id, "dset", H5P_DEFAULT); CHECK_I(ret, "H5Ldelete"); ret = H5Ldelete(file_id, "dset2", H5P_DEFAULT); CHECK_I(ret, "H5Ldelete"); /* Close the file and get its size */ ret = H5Fclose(file_id); CHECK_I(ret, "H5Fclose"); deleted_filesize = h5_get_file_size(FILENAME); VERIFY(deleted_filesize, initial_filesize, "h5_get_file_size"); /* Cleanup */ ret = H5Sclose(dspace_id); CHECK_I(ret, "H5Sclose"); } /*------------------------------------------------------------------------- * Function: test_sohm_delete_revert * * Purpose: Calls test_sohm_delete_revert_helper with different FCPLs. * * Programmer: James Laird * Wednesday, January 3, 2007 * * Modifications: * *------------------------------------------------------------------------- */ static void test_sohm_delete_revert(void) { hid_t fcpl_id; herr_t ret; /* Create an fcpl with messages in two indexes */ fcpl_id = H5Pcreate(H5P_FILE_CREATE); CHECK_I(fcpl_id, "H5Pcreate"); ret = H5Pset_shared_mesg_nindexes(fcpl_id, 2); CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_MESG_DTYPE_FLAG, 10); CHECK_I(ret, "H5Pset_shared_mesg_index"); ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_MESG_SDSPACE_FLAG, 10); CHECK_I(ret, "H5Pset_shared_mesg_index"); /* Call the helper function to test this FCPL. */ test_sohm_delete_revert_helper(fcpl_id); /* Try using B-trees */ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0); CHECK_I(ret, "H5Pset_shared_mesg_phase_change"); test_sohm_delete_revert_helper(fcpl_id); /* Try sharing all messages */ ret = H5Pset_shared_mesg_nindexes(fcpl_id, 1); CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_MESG_ALL_FLAG, 10); CHECK_I(ret, "H5Pset_shared_mesg_index"); ret = H5Pset_shared_mesg_phase_change(fcpl_id, 10, 5); test_sohm_delete_revert_helper(fcpl_id); /* Try using B-trees */ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0); CHECK_I(ret, "H5Pset_shared_mesg_phase_change"); test_sohm_delete_revert_helper(fcpl_id); /* There should be at least two messages in the test (datatype and * dataspace). Use an index that will transition from a list to * a B-tree and back. */ ret = H5Pset_shared_mesg_phase_change(fcpl_id, 1, 2); CHECK_I(ret, "H5Pset_shared_mesg_phase_change"); test_sohm_delete_revert_helper(fcpl_id); ret = H5Pclose(fcpl_id); CHECK_I(ret, "H5Pclose"); } /*------------------------------------------------------------------------- * Function: test_sohm_extlink_helper * * Purpose: Tests that a dataset created through an external link can * be opened (that shared messages were created or not and * were shared in the right file). * * Programmer: James Laird * Friday, December 22, 2006 * * Modifications: * *------------------------------------------------------------------------- */ static void test_sohm_extlink_helper(hid_t src_fcpl_id, hid_t dst_fcpl_id) { hid_t src_file_id = -1; hid_t dst_file_id = -1; hid_t space_id = -1; hid_t dset_id = -1; hsize_t dims[] = {1, 1}; herr_t ret; /* Create files */ src_file_id = H5Fcreate(FILENAME_SRC, H5F_ACC_TRUNC, src_fcpl_id, H5P_DEFAULT); CHECK_I(src_file_id, "H5Fcreate"); dst_file_id = H5Fcreate(FILENAME_DST, H5F_ACC_TRUNC, dst_fcpl_id, H5P_DEFAULT); CHECK_I(dst_file_id, "H5Fcreate"); /* Create an external link from the source file to the destination file */ ret = H5Lcreate_external(FILENAME_DST, "/", src_file_id, "ext_link", H5P_DEFAULT, H5P_DEFAULT); CHECK_I(ret, "H5Lcreate_external"); /* Create a dataset through the external link */ space_id = H5Screate_simple(2, dims, dims); CHECK_I(space_id, "H5Screate_simple"); dset_id = H5Dcreate(src_file_id, "ext_link/dataset", H5T_NATIVE_FLOAT, space_id, H5P_DEFAULT); CHECK_I(dset_id, "H5Dcreate"); /* Close the dataset and both files to make sure everything gets flushed * out of memory */ ret = H5Dclose(dset_id); CHECK_I(ret, "H5Dclose"); ret = H5Fclose(src_file_id); CHECK_I(ret, "H5Fclose"); ret = H5Fclose(dst_file_id); CHECK_I(ret, "H5Fclose"); /* Ensure that the dataset can be opened. If the messages were written in * the wrong file, it'll be impossible to read the dataset's object * header. */ dst_file_id = H5Fopen(FILENAME_DST, H5F_ACC_RDONLY, H5P_DEFAULT); CHECK_I(dst_file_id, "H5Fopen"); dset_id = H5Dopen(dst_file_id, "dataset"); CHECK_I(dset_id, "H5Dopen"); /* Cleanup */ ret = H5Dclose(dset_id); CHECK_I(ret, "H5Dclose"); ret = H5Fclose(dst_file_id); CHECK_I(ret, "H5Fclose"); } /*------------------------------------------------------------------------- * Function: test_sohm_extlink * * Purpose: Test creating SOHMs through external links (to make sure that * they're created in the correct file). * * Programmer: James Laird * Friday, December 22, 2006 * * Modifications: * *------------------------------------------------------------------------- */ static void test_sohm_extlink(void) { hid_t fcpl_id = -1; herr_t ret; /* Create fcpl */ fcpl_id = H5Pcreate(H5P_FILE_CREATE); CHECK_I(fcpl_id, "H5Pcreate"); ret = H5Pset_shared_mesg_nindexes(fcpl_id, 1); CHECK_I(ret, "H5Pset_shared_mesg_nindexes"); ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_MESG_ALL_FLAG, 16); CHECK_I(ret, "H5Pset_shared_mesg_index"); /* Test using external links when the source or destination file uses * shared messages */ test_sohm_extlink_helper(fcpl_id, H5P_DEFAULT); test_sohm_extlink_helper(H5P_DEFAULT, fcpl_id); test_sohm_extlink_helper(fcpl_id, fcpl_id); } /*------------------------------------------------------------------------- * Function: test_sohm_extend_dset_helper * * Purpose: Tests extending a dataset's dataspace. * * Programmer: James Laird * Wednesday, January 10, 2007 * * Modifications: * *------------------------------------------------------------------------- */ static void test_sohm_extend_dset_helper(hid_t fcpl_id) { hid_t file_id = -1; hid_t orig_space_id = -1; hid_t space1_id, space2_id; hid_t dcpl_id = -1; hid_t dset1_id, dset2_id; hsize_t dims1[] = {1, 2}; hsize_t max_dims[] = {H5S_UNLIMITED, 2}; hsize_t dims2[] = {5, 2}; hsize_t out_dims[2]; hsize_t out_maxdims[2]; long data[10] = {0}; int x; herr_t ret; /* Create file */ file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT); CHECK_I(file_id, "H5Fcreate"); /* Create property list with chunking */ dcpl_id = H5Pcreate(H5P_DATASET_CREATE); CHECK_I(dcpl_id, "H5Pcreate"); ret = H5Pset_chunk(dcpl_id, 2, dims1); CHECK_I(ret, "H5Pset_chunk"); /* Create a dataspace and a dataset*/ orig_space_id = H5Screate_simple(EXTEND_NDIMS, dims1, max_dims); CHECK_I(orig_space_id, "H5Screate_simple"); dset1_id = H5Dcreate(file_id, "dataset", H5T_NATIVE_LONG, orig_space_id, dcpl_id); CHECK_I(dset1_id, "H5Dcreate"); /* Create another dataset with the same dataspace */ dset2_id = H5Dcreate(file_id, "dataset2", H5T_NATIVE_LONG, orig_space_id, dcpl_id); CHECK_I(dset2_id, "H5Dcreate"); /* Extend the first dataset */ ret = H5Dextend(dset1_id, dims2); CHECK_I(ret, "H5Dextend"); /* Get the dataspaces from the datasets */ space1_id = H5Dget_space(dset1_id); CHECK_I(space1_id, "H5Dget_space"); space2_id = H5Dget_space(dset2_id); CHECK_I(space2_id, "H5Dget_space"); /* Verify the dataspaces */ ret = H5Sget_simple_extent_dims(space1_id, out_dims, out_maxdims); CHECK_I(ret, "H5Sget_simple_extent_dims"); for(x=0; x