diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2016-04-02 09:39:32 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2016-04-02 09:39:32 (GMT) |
commit | 9d2178ab886ae957cfe11b6fe09f9e7f0e9ce369 (patch) | |
tree | d4256c01e3f5364ca39ec230a6a897d16f51273d /test/dsets.c | |
parent | e45885dea912a18b9fd6b1450d3ff196dcb749eb (diff) | |
download | hdf5-9d2178ab886ae957cfe11b6fe09f9e7f0e9ce369.zip hdf5-9d2178ab886ae957cfe11b6fe09f9e7f0e9ce369.tar.gz hdf5-9d2178ab886ae957cfe11b6fe09f9e7f0e9ce369.tar.bz2 |
[svn-r29607] Description:
Bring "don't filter partial edge chunks" capability from revise_chunks
to trunk.
Tested on:
MacOSX/64 10.11.4 (amazon) w/debug, production & parallel
(h5committest forthcoming)
Diffstat (limited to 'test/dsets.c')
-rw-r--r-- | test/dsets.c | 227 |
1 files changed, 209 insertions, 18 deletions
diff --git a/test/dsets.c b/test/dsets.c index 2cb51d3..b3f11b8 100644 --- a/test/dsets.c +++ b/test/dsets.c @@ -44,10 +44,13 @@ const char *FILENAME[] = { "huge_chunks", /* 7 */ "chunk_cache", /* 8 */ "big_chunk", /* 9 */ - "chunk_expand", /* 10 */ - "copy_dcpl_newfile",/* 11 */ - "layout_extend", /* 12 */ - "zero_chunk", /* 13 */ + "chunk_fast", /* 10 */ + "chunk_expand", /* 11 */ + "chunk_fixed", /* 12 */ + "copy_dcpl_newfile",/* 13 */ + "partial_chunks", /* 14 */ + "layout_extend", /* 15 */ + "zero_chunk", /* 16 */ NULL }; #define FILENAME_BUF_SIZE 1024 @@ -125,6 +128,7 @@ const char *FILENAME[] = { #define H5Z_FILTER_DEPREC 309 #define H5Z_FILTER_EXPAND 310 #define H5Z_FILTER_CAN_APPLY_TEST2 311 +#define H5Z_FILTER_COUNT 312 /* Flags for testing filters */ #define DISABLE_FLETCHER32 0 @@ -198,6 +202,8 @@ const char *FILENAME[] = { #define DSET_DIM2 200 int points[DSET_DIM1][DSET_DIM2], check[DSET_DIM1][DSET_DIM2]; double points_dbl[DSET_DIM1][DSET_DIM2], check_dbl[DSET_DIM1][DSET_DIM2]; +size_t count_nbytes_read = 0; +size_t count_nbytes_written = 0; /* Local prototypes for filter functions */ static size_t filter_bogus(unsigned int flags, size_t cd_nelmts, @@ -212,6 +218,49 @@ static size_t filter_corrupt(unsigned int flags, size_t cd_nelmts, const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf); static size_t filter_expand(unsigned int flags, size_t cd_nelmts, const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf); +static size_t filter_count(unsigned int flags, size_t cd_nelmts, + const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf); + +/* This message derives from H5Z */ +const H5Z_class2_t H5Z_COUNT[1] = {{ + H5Z_CLASS_T_VERS, /* H5Z_class_t version */ + H5Z_FILTER_COUNT, /* Filter id number */ + 1, 1, /* Encoding and decoding enabled */ + "count", /* Filter name for debugging */ + NULL, /* The "can apply" callback */ + NULL, /* The "set local" callback */ + filter_count, /* The actual filter function */ +}}; + + +/*------------------------------------------------------------------------- + * Function: filter_count + * + * Purpose: This filter counts the number of bytes read and written, + * incrementing count_nbytes_read or count_nbytes_written as + * appropriate. + * + * Return: Success: Data chunk size + * + * Failure: 0 + * + * Programmer: Neil Fortner + * Wednesday, March 17, 2010 + * + *------------------------------------------------------------------------- + */ +static size_t +filter_count(unsigned int flags, size_t H5_ATTR_UNUSED cd_nelmts, + const unsigned int H5_ATTR_UNUSED *cd_values, size_t nbytes, + size_t H5_ATTR_UNUSED *buf_size, void H5_ATTR_UNUSED **buf) +{ + if(flags & H5Z_FLAG_REVERSE) + count_nbytes_read += nbytes; + else + count_nbytes_written += nbytes; + + return nbytes; +} /*------------------------------------------------------------------------- @@ -905,7 +954,7 @@ test_layout_extend(hid_t fapl) TESTING("extendible dataset with various layout"); /* Create a file */ - h5_fixname(FILENAME[12], fapl, filename, sizeof filename); + h5_fixname(FILENAME[15], fapl, filename, sizeof filename); if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR @@ -2798,7 +2847,7 @@ test_nbit_int(hid_t file) mask = ~((unsigned)~0 << (precision + offset)) & ((unsigned)~0 << offset); for(i=0; i<(size_t)size[0]; i++) { for(j=0; j<(size_t)size[1]; j++) { - if((new_data[i][j] & mask) != (orig_data[i][j] & mask)) { + if(((unsigned)new_data[i][j] & mask) != ((unsigned)orig_data[i][j] & mask)) { H5_FAILED(); printf(" Read different values than written.\n"); printf(" At index %lu,%lu\n", (unsigned long)i, (unsigned long)j); @@ -3337,9 +3386,9 @@ test_nbit_compound(hid_t file) s_mask = ~((unsigned)~0 << (precision[2] + offset[2])) & ((unsigned)~0 << offset[2]); for(i=0; i<size[0]; i++) { for(j=0; j<size[1]; j++) { - if((new_data[i][j].i & i_mask) != (orig_data[i][j].i & i_mask) || - (new_data[i][j].c & c_mask) != (orig_data[i][j].c & c_mask) || - (new_data[i][j].s & s_mask) != (orig_data[i][j].s & s_mask) || + if(((unsigned)new_data[i][j].i & i_mask) != ((unsigned)orig_data[i][j].i & i_mask) || + ((unsigned)new_data[i][j].c & c_mask) != ((unsigned)orig_data[i][j].c & c_mask) || + ((unsigned)new_data[i][j].s & s_mask) != ((unsigned)orig_data[i][j].s & s_mask) || (orig_data[i][j].f==orig_data[i][j].f && new_data[i][j].f != orig_data[i][j].f)) { H5_FAILED(); @@ -3595,16 +3644,16 @@ test_nbit_compound_2(hid_t file) for(m = 0; m < (size_t)array_dims[0]; m++) for(n = 0; n < (size_t)array_dims[1]; n++) - if((new_data[i][j].b[m][n]&b_mask)!=(orig_data[i][j].b[m][n]&b_mask)) { + if(((unsigned)new_data[i][j].b[m][n] & b_mask)!=((unsigned)orig_data[i][j].b[m][n] & b_mask)) { b_failed = 1; goto out; } for(m = 0; m < (size_t)array_dims[0]; m++) for(n = 0; n < (size_t)array_dims[1]; n++) - if((new_data[i][j].d[m][n].i & i_mask)!=(orig_data[i][j].d[m][n].i & i_mask)|| - (new_data[i][j].d[m][n].c & c_mask)!=(orig_data[i][j].d[m][n].c & c_mask)|| - (new_data[i][j].d[m][n].s & s_mask)!=(orig_data[i][j].d[m][n].s & s_mask)|| + if(((unsigned)new_data[i][j].d[m][n].i & i_mask) != ((unsigned)orig_data[i][j].d[m][n].i & i_mask)|| + ((unsigned)new_data[i][j].d[m][n].c & c_mask) != ((unsigned)orig_data[i][j].d[m][n].c & c_mask)|| + ((unsigned)new_data[i][j].d[m][n].s & s_mask) != ((unsigned)orig_data[i][j].d[m][n].s & s_mask)|| (new_data[i][j].d[m][n].f==new_data[i][j].d[m][n].f && new_data[i][j].d[m][n].f != new_data[i][j].d[m][n].f)) { d_failed = 1; @@ -3612,9 +3661,9 @@ test_nbit_compound_2(hid_t file) } out: - if((new_data[i][j].a.i & i_mask)!=(orig_data[i][j].a.i & i_mask)|| - (new_data[i][j].a.c & c_mask)!=(orig_data[i][j].a.c & c_mask)|| - (new_data[i][j].a.s & s_mask)!=(orig_data[i][j].a.s & s_mask)|| + if(((unsigned)new_data[i][j].a.i & i_mask) != ((unsigned)orig_data[i][j].a.i & i_mask)|| + ((unsigned)new_data[i][j].a.c & c_mask) != ((unsigned)orig_data[i][j].a.c & c_mask)|| + ((unsigned)new_data[i][j].a.s & s_mask) != ((unsigned)orig_data[i][j].a.s & s_mask)|| (new_data[i][j].a.f==new_data[i][j].a.f && new_data[i][j].a.f != new_data[i][j].a.f)|| new_data[i][j].v != orig_data[i][j].v || b_failed || d_failed) { @@ -6067,7 +6116,7 @@ test_copy_dcpl(hid_t file, hid_t fapl) /* Create a second file and create 2 datasets with the copies of the DCPLs in the first * file. Test whether the copies of DCPLs work. */ - h5_fixname(FILENAME[11], fapl, filename, sizeof filename); + h5_fixname(FILENAME[13], fapl, filename, sizeof filename); if((new_file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR @@ -7849,6 +7898,147 @@ error: /*------------------------------------------------------------------------- + * + * test_unfiltered_edge_chunks(): + * Tests that partial edge chunks aren't filtered when the + * H5D_CHUNK_FILTER_PARTIAL_CHUNKS option is set. + * + * Programmer: Neil Fortner; 17th March, 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +test_unfiltered_edge_chunks(hid_t fapl) +{ + hid_t fid = -1; /* File id */ + hid_t did = -1; /* Dataset id */ + hid_t sid = -1; /* Dataspace id */ + hid_t dcpl = -1; /* DCPL id */ + hsize_t dim[2] = {4, 3}; /* Dataset dimensions */ + hsize_t cdim[2] = {2, 2}; /* Chunk dimension */ + char wbuf[4][3]; /* Write buffer */ + char rbuf[4][3]; /* Read buffer */ + char filename[FILENAME_BUF_SIZE] = ""; /* old test file name */ + unsigned opts; /* Chunk options */ + unsigned i, j; /* Local index variables */ + + /* Output message about test being performed */ + TESTING("disabled partial chunk filters"); + + h5_fixname(FILENAME[14], fapl, filename, sizeof filename); + + /* Create the file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + TEST_ERROR + + /* Register byte-counting filter */ + if(H5Zregister(H5Z_COUNT) < 0) + TEST_ERROR + + /* Create dataspace */ + if((sid = H5Screate_simple(2, dim, NULL)) < 0) + TEST_ERROR + + /* Create DCPL */ + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR + + /* Set chunk dimensions */ + if(H5Pset_chunk(dcpl, 2, cdim) < 0) + TEST_ERROR + + /* Add "count" filter */ + if(H5Pset_filter(dcpl, H5Z_FILTER_COUNT, 0u, (size_t)0, NULL) < 0) + TEST_ERROR + + /* Disable filters on partial chunks */ + if(H5Pget_chunk_opts(dcpl, &opts) < 0) + TEST_ERROR + opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS; + if(H5Pset_chunk_opts(dcpl, opts) < 0) + TEST_ERROR + + /* Initialize write buffer */ + for(i=0; i<dim[0]; i++) + for(j=0; j<dim[1]; j++) + wbuf[i][j] = (char)(2 * i) - (char)j; + + /* Reset byte counts */ + count_nbytes_read = (size_t)0; + count_nbytes_written = (size_t)0; + + /* Create dataset */ + if((did = H5Dcreate2(fid, DSET_CHUNKED_NAME, H5T_NATIVE_CHAR, sid, + H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + TEST_ERROR + + /* Nothing should have been written, as we are not using early allocation */ + if(count_nbytes_read != (size_t)0) + TEST_ERROR + if(count_nbytes_written != (size_t)0) + TEST_ERROR + + /* Write data */ + if(H5Dwrite(did, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0) + TEST_ERROR + + /* Close dataset */ + if(H5Dclose(did) < 0) + TEST_ERROR + + /* Make sure only 2 of the 4 chunks were written through the filter (4 bytes + * each) */ + if(count_nbytes_read != (size_t)0) + TEST_ERROR + if(count_nbytes_written != (size_t)(2 * cdim[0] * cdim[1])) + TEST_ERROR + + /* Reopen the dataset */ + if((did = H5Dopen2(fid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0) + TEST_ERROR + + /* Read the dataset */ + if(H5Dread(did, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0) + TEST_ERROR + + /* Verify that data read == data written */ + for(i=0; i<dim[0]; i++) + for(j=0; j<dim[1]; j++) + if(rbuf[i][j] != wbuf[i][j]) + TEST_ERROR + + /* Make sure only 2 of the 4 chunks were read through the filter (4 bytes + * each) */ + if(count_nbytes_read != (size_t)(2 * cdim[0] * cdim[1])) + TEST_ERROR + if(count_nbytes_written != (size_t)(2 * cdim[0] * cdim[1])) + TEST_ERROR + + /* Close IDs */ + if(H5Dclose(did) < 0) + TEST_ERROR + if(H5Pclose(dcpl) < 0) + TEST_ERROR + if(H5Sclose(sid) < 0) + TEST_ERROR + if(H5Fclose(fid) < 0) + TEST_ERROR + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Dclose(did); + H5Pclose(dcpl); + H5Sclose(sid); + H5Fclose(fid); + } H5E_END_TRY; + return -1; +} /* test_unfiltered_edge_chunks */ + + +/*------------------------------------------------------------------------- * Function: test_large_chunk_shrink * * Purpose: Tests support for shrinking a chunk larger than 1 MB by a @@ -7990,7 +8180,7 @@ test_zero_dim_dset(hid_t fapl) TESTING("shrinking large chunk"); - h5_fixname(FILENAME[13], fapl, filename, sizeof filename); + h5_fixname(FILENAME[16], fapl, filename, sizeof filename); /* Create file */ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR @@ -9185,6 +9375,7 @@ main(void) nerrors += (test_big_chunks_bypass_cache(my_fapl) < 0 ? 1 : 0); nerrors += (test_chunk_expand(my_fapl) < 0 ? 1 : 0); nerrors += (test_layout_extend(my_fapl) < 0 ? 1 : 0); + nerrors += (test_unfiltered_edge_chunks(my_fapl) < 0 ? 1 : 0); nerrors += (test_large_chunk_shrink(my_fapl) < 0 ? 1 : 0); nerrors += (test_zero_dim_dset(my_fapl) < 0 ? 1 : 0); |