diff options
author | Raymond Lu <songyulu@hdfgroup.org> | 2009-03-03 19:35:34 (GMT) |
---|---|---|
committer | Raymond Lu <songyulu@hdfgroup.org> | 2009-03-03 19:35:34 (GMT) |
commit | db81699a135a888338cf8f6445bd620ad2c982cb (patch) | |
tree | a5e57f2f91af8640f2784f1784420057dc97b649 | |
parent | 19105560d05e18a2793ef330bac1234aed2e8914 (diff) | |
download | hdf5-db81699a135a888338cf8f6445bd620ad2c982cb.zip hdf5-db81699a135a888338cf8f6445bd620ad2c982cb.tar.gz hdf5-db81699a135a888338cf8f6445bd620ad2c982cb.tar.bz2 |
[svn-r16538] Added more test case for bypassing the cache. In test_big_chunks_bypass_cache,
test the correctness of the data when the fill value is defined or not. The
library should let the chunks bypass the cache depending on the size of the
chunks and whether to write fill value to the chunks.
Tested on jam - simple change.
-rw-r--r-- | test/dsets.c | 66 |
1 files changed, 49 insertions, 17 deletions
diff --git a/test/dsets.c b/test/dsets.c index 3d1dfd3..e95e8af 100644 --- a/test/dsets.c +++ b/test/dsets.c @@ -165,7 +165,8 @@ const char *FILENAME[] = { #define TOO_HUGE_CHUNK_DIM2_2 ((hsize_t)1024) /* Parameters for testing bypassing chunk cache */ -#define BYPASS_DATASET "Dset" +#define BYPASS_DATASET1 "Dset1" +#define BYPASS_DATASET2 "Dset2" #define BYPASS_DIM 1000 #define BYPASS_CHUNK_DIM 500 #define BYPASS_FILL_VALUE 7 @@ -6697,7 +6698,7 @@ error: * bypasses the cache. * * Note: This test is not very conclusive - it doesn't actually check - * is the chunks bypass the cache... :-( -QAK + * if the chunks bypass the cache... :-( -QAK * * Return: Success: 0 * Failure: -1 @@ -6720,7 +6721,8 @@ test_big_chunks_bypass_cache(hid_t fapl) size_t rdcc_nelmts, rdcc_nbytes; int fvalue = BYPASS_FILL_VALUE; hsize_t count, stride, offset, block; - static int wdata[BYPASS_CHUNK_DIM], rdata[BYPASS_DIM]; + static int wdata[BYPASS_CHUNK_DIM/2], rdata1[BYPASS_DIM], + rdata2[BYPASS_CHUNK_DIM/2]; int i, j; herr_t ret; /* Generic return value */ @@ -6755,51 +6757,81 @@ test_big_chunks_bypass_cache(hid_t fapl) if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_IFSET) < 0) FAIL_STACK_ERROR if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR) < 0) FAIL_STACK_ERROR - /* Try to create dataset */ - if((dsid = H5Dcreate2(fid, BYPASS_DATASET, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + /* Create a first dataset */ + if((dsid = H5Dcreate2(fid, BYPASS_DATASET1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR /* Select first chunk to write the data */ offset = 0; count = 1; stride = 1; - block = BYPASS_CHUNK_DIM; + block = BYPASS_CHUNK_DIM / 2; if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, &offset, &stride, &count, &block) < 0) FAIL_STACK_ERROR /* Initialize data to write */ - for(i = 0; i < BYPASS_CHUNK_DIM; i++) + for(i = 0; i < BYPASS_CHUNK_DIM / 2; i++) wdata[i] = i; - /* This write should bypass the cache because the chunk is bigger than the cache size - * and it's not allocated on disk. */ + /* This write should go through the cache because fill value is used. */ if(H5Dwrite(dsid, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, wdata) < 0) FAIL_STACK_ERROR if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR /* Reopen the dataset */ - if((dsid = H5Dopen2(fid, BYPASS_DATASET, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR + if((dsid = H5Dopen2(fid, BYPASS_DATASET1, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR /* Reads both 2 chunks. Reading the second chunk should bypass the cache because the * chunk is bigger than the cache size and it isn't allocated on disk. */ - if(H5Dread(dsid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata) < 0) + if(H5Dread(dsid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata1) < 0) FAIL_STACK_ERROR - for(i = 0; i < BYPASS_CHUNK_DIM; i++) - if(rdata[i] != i) { + for(i = 0; i < BYPASS_CHUNK_DIM / 2; i++) + if(rdata1[i] != i) { printf(" Read different values than written in the 1st chunk.\n"); - printf(" At line %d and index %d, rdata = %d. It should be %d.\n", __LINE__, i, rdata[i], i); + printf(" At line %d and index %d, rdata1 = %d. It should be %d.\n", __LINE__, i, rdata1[i], i); TEST_ERROR } /* end if */ - for(j = BYPASS_CHUNK_DIM; j < BYPASS_DIM; j++) - if(rdata[j] != fvalue) { + for(j = BYPASS_CHUNK_DIM / 2; j < BYPASS_DIM; j++) + if(rdata1[j] != fvalue) { printf(" Read different values than written in the 2nd chunk.\n"); - printf(" At line %d and index %d, rdata = %d. It should be %d.\n", __LINE__, i, rdata[i], fvalue); + printf(" At line %d and index %d, rdata1 = %d. It should be %d.\n", __LINE__, i, rdata1[i], fvalue); TEST_ERROR } /* end if */ + + /* Close the first dataset */ + if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR + /* Create a second dataset without fill value. This time, both write + * and read should bypass the cache because the chunk is bigger than the + * cache size and it's not allocated on disk. */ + if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_NEVER) < 0) FAIL_STACK_ERROR + + if((dsid = H5Dcreate2(fid, BYPASS_DATASET2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + if(H5Dwrite(dsid, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, wdata) < 0) + FAIL_STACK_ERROR + + if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR + + /* Reopen the dataset */ + if((dsid = H5Dopen2(fid, BYPASS_DATASET2, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR + + /* Read back only the part that was written to the file. Reading the + * half chunk should bypass the cache because the chunk is bigger than + * the cache size. */ + if(H5Dread(dsid, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, rdata2) < 0) + + for(i = 0; i < BYPASS_CHUNK_DIM / 2; i++) + if(rdata2[i] != i) { + printf(" Read different values than written in the chunk.\n"); + printf(" At line %d and index %d, rdata2 = %d. It should be %d.\n", __LINE__, i, rdata2[i], i); + TEST_ERROR + } /* end if */ + /* Close IDs */ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR |