summaryrefslogtreecommitdiffstats
path: root/test/dsets.c
diff options
context:
space:
mode:
Diffstat (limited to 'test/dsets.c')
-rw-r--r--test/dsets.c203
1 files changed, 172 insertions, 31 deletions
diff --git a/test/dsets.c b/test/dsets.c
index 5ea5774..2df76cd 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -51,6 +51,7 @@ const char *FILENAME[] = {
"random_chunks",
"huge_chunks",
"chunk_cache",
+ "big_chunk",
"chunk_fast",
NULL
};
@@ -173,6 +174,12 @@ const char *FILENAME[] = {
#define TOO_HUGE_CHUNK_DIM2_1 ((hsize_t)1024)
#define TOO_HUGE_CHUNK_DIM2_2 ((hsize_t)1024)
+/* Parameters for testing bypassing chunk cache */
+#define BYPASS_DATASET "Dset"
+#define BYPASS_DIM 1000
+#define BYPASS_CHUNK_DIM 500
+#define BYPASS_FILL_VALUE 7
+
/* Shared global arrays */
#define DSET_DIM1 100
#define DSET_DIM2 200
@@ -2607,11 +2614,11 @@ test_nbit_int(hid_t file)
if((dataset = H5Dcreate2(file, DSET_NBIT_INT_NAME, datatype,
space, H5P_DEFAULT, dc, H5P_DEFAULT)) < 0) goto error;
- /* Initialize data, assuming size of long_long >= size of int */
+ /* Initialize data, assuming size of long long >= size of int */
for(i= 0;i< (size_t)size[0]; i++)
for(j = 0; j < (size_t)size[1]; j++) {
- orig_data[i][j] = (int)(((long_long)HDrandom() %
- (long_long)HDpow(2.0, (double)(precision - 1))) << offset);
+ orig_data[i][j] = (int)(((long long)HDrandom() %
+ (long long)HDpow(2.0, (double)(precision - 1))) << offset);
/* even-numbered values are negtive */
if((i*size[1]+j+1)%2 == 0)
@@ -2917,7 +2924,7 @@ test_nbit_double(hid_t file)
goto error;
/* Check that the values read are the same as the values written
- * Assume size of long_long = size of double
+ * Assume size of long long = size of double
*/
for(i=0; i<(size_t)size[0]; i++) {
for(j=0; j<(size_t)size[1]; j++) {
@@ -3019,13 +3026,13 @@ test_nbit_array(hid_t file)
if((dataset = H5Dcreate2(file, DSET_NBIT_ARRAY_NAME, array_datatype,
space, H5P_DEFAULT, dc, H5P_DEFAULT)) < 0) goto error;
- /* Initialize data, assuming size of long_long >= size of unsigned int */
+ /* Initialize data, assuming size of long long >= size of unsigned int */
for(i= 0;i< (size_t)size[0]; i++)
for(j = 0; j < (size_t)size[1]; j++)
for(m = 0; m < (size_t)adims[0]; m++)
for(n = 0; n < (size_t)adims[1]; n++)
- orig_data[i][j][m][n] = (unsigned int)(((long_long)HDrandom() %
- (long_long)HDpow(2.0, (double)precision)) << offset);
+ orig_data[i][j][m][n] = (unsigned int)(((long long)HDrandom() %
+ (long long)HDpow(2.0, (double)precision)) << offset);
PASSED();
#else
SKIPPED();
@@ -3201,15 +3208,15 @@ test_nbit_compound(hid_t file)
if((dataset = H5Dcreate2(file, DSET_NBIT_COMPOUND_NAME, cmpd_tid,
space, H5P_DEFAULT, dc, H5P_DEFAULT)) < 0) goto error;
- /* Initialize data, assuming size of long_long >= size of member datatypes */
+ /* Initialize data, assuming size of long long >= size of member datatypes */
for(i= 0;i< (size_t)size[0]; i++)
for(j = 0; j < (size_t)size[1]; j++) {
- orig_data[i][j].i = (int)(((long_long)HDrandom() %
- (long_long)HDpow(2.0, (double)(precision[0]-1))) << offset[0]);
- orig_data[i][j].c = (char)(((long_long)HDrandom() %
- (long_long)HDpow(2.0, (double)(precision[1]-1))) << offset[1]);
- orig_data[i][j].s = (short)(((long_long)HDrandom() %
- (long_long)HDpow(2.0, (double)(precision[2]-1))) << offset[2]);
+ orig_data[i][j].i = (int)(((long long)HDrandom() %
+ (long long)HDpow(2.0, (double)(precision[0]-1))) << offset[0]);
+ orig_data[i][j].c = (char)(((long long)HDrandom() %
+ (long long)HDpow(2.0, (double)(precision[1]-1))) << offset[1]);
+ orig_data[i][j].s = (short)(((long long)HDrandom() %
+ (long long)HDpow(2.0, (double)(precision[2]-1))) << offset[2]);
orig_data[i][j].f = float_val[i][j];
/* some even-numbered integer values are negtive */
@@ -3448,33 +3455,33 @@ test_nbit_compound_2(hid_t file)
if((dataset = H5Dcreate2(file, DSET_NBIT_COMPOUND_NAME_2, cmpd_tid2,
space, H5P_DEFAULT, dc, H5P_DEFAULT)) < 0) goto error;
- /* Initialize data, assuming size of long_long >= size of member datatypes */
+ /* Initialize data, assuming size of long long >= size of member datatypes */
for(i= 0;i< (size_t)size[0]; i++)
for(j = 0; j < (size_t)size[1]; j++) {
- orig_data[i][j].a.i = (int)(((long_long)HDrandom() %
- (long_long)HDpow(2.0, (double)(precision[0]-1))) << offset[0]);
- orig_data[i][j].a.c = (char)(((long_long)HDrandom() %
- (long_long)HDpow(2.0, (double)(precision[1]-1))) << offset[1]);
- orig_data[i][j].a.s = (short)(-((long_long)HDrandom() %
- (long_long)HDpow(2.0, (double)(precision[2]-1))) << offset[2]);
+ orig_data[i][j].a.i = (int)(((long long)HDrandom() %
+ (long long)HDpow(2.0, (double)(precision[0]-1))) << offset[0]);
+ orig_data[i][j].a.c = (char)(((long long)HDrandom() %
+ (long long)HDpow(2.0, (double)(precision[1]-1))) << offset[1]);
+ orig_data[i][j].a.s = (short)(-((long long)HDrandom() %
+ (long long)HDpow(2.0, (double)(precision[2]-1))) << offset[2]);
orig_data[i][j].a.f = float_val[i][j];
- orig_data[i][j].v = (unsigned int)(((long_long)HDrandom() %
- (long_long)HDpow(2.0, (double)precision[3])) << offset[3]);
+ orig_data[i][j].v = (unsigned int)(((long long)HDrandom() %
+ (long long)HDpow(2.0, (double)precision[3])) << offset[3]);
for(m = 0; m < (size_t)array_dims[0]; m++)
for(n = 0; n < (size_t)array_dims[1]; n++)
- orig_data[i][j].b[m][n] = (char)(((long_long)HDrandom() %
- (long_long)HDpow(2.0, (double)(precision[4]-1))) << offset[4]);
+ orig_data[i][j].b[m][n] = (char)(((long long)HDrandom() %
+ (long long)HDpow(2.0, (double)(precision[4]-1))) << offset[4]);
for(m = 0; m < (size_t)array_dims[0]; m++)
for(n = 0; n < (size_t)array_dims[1]; n++) {
- orig_data[i][j].d[m][n].i = (int)(-((long_long)HDrandom() %
- (long_long)HDpow(2.0, (double)(precision[0]-1))) << offset[0]);
- orig_data[i][j].d[m][n].c = (char)(((long_long)HDrandom() %
- (long_long)HDpow(2.0, (double)(precision[1]-1))) << offset[1]);
- orig_data[i][j].d[m][n].s = (short)(((long_long)HDrandom() %
- (long_long)HDpow(2.0, (double)(precision[2]-1))) << offset[2]);
+ orig_data[i][j].d[m][n].i = (int)(-((long long)HDrandom() %
+ (long long)HDpow(2.0, (double)(precision[0]-1))) << offset[0]);
+ orig_data[i][j].d[m][n].c = (char)(((long long)HDrandom() %
+ (long long)HDpow(2.0, (double)(precision[1]-1))) << offset[1]);
+ orig_data[i][j].d[m][n].s = (short)(((long long)HDrandom() %
+ (long long)HDpow(2.0, (double)(precision[2]-1))) << offset[2]);
orig_data[i][j].d[m][n].f = float_val[i][j];
}
}
@@ -6690,6 +6697,139 @@ error:
/*-------------------------------------------------------------------------
+ * Function: test_big_chunks_bypass_cache
+ *
+ * Purpose: When the chunk size is bigger than the cache size and the
+ * chunk isn't on disk, this test verifies that the library
+ * bypasses the cache.
+ *
+ * Note: This test is not very conclusive - it doesn't actually check
+ * is the chunks bypass the cache... :-( -QAK
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Raymond Lu
+ * 11 Feb 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_big_chunks_bypass_cache(hid_t fapl)
+{
+ char filename[FILENAME_BUF_SIZE];
+ hid_t fid = -1; /* File ID */
+ hid_t fapl_local = -1; /* File access property list ID */
+ hid_t dcpl = -1; /* Dataset creation property list ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t dsid = -1; /* Dataset ID */
+ hsize_t dim, chunk_dim; /* Dataset and chunk dimensions */
+ size_t rdcc_nelmts, rdcc_nbytes;
+ int fvalue = BYPASS_FILL_VALUE;
+ hsize_t count, stride, offset, block;
+ static int wdata[BYPASS_CHUNK_DIM], rdata[BYPASS_DIM];
+ int i, j;
+ herr_t ret; /* Generic return value */
+
+ TESTING("big chunks bypassing the cache");
+
+ h5_fixname(FILENAME[9], fapl, filename, sizeof filename);
+
+ /* Copy fapl passed to this function (as we will be modifying it) */
+ if((fapl_local = H5Pcopy(fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Define cache size to be smaller than chunk size */
+ rdcc_nelmts = BYPASS_CHUNK_DIM/5;
+ rdcc_nbytes = sizeof(int)*BYPASS_CHUNK_DIM/5;
+ if(H5Pset_cache(fapl_local, 0, rdcc_nelmts, rdcc_nbytes, 0) < 0) FAIL_STACK_ERROR
+
+ /* Create file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_local)) < 0) FAIL_STACK_ERROR
+
+ /* Create 1-D dataspace */
+ dim = BYPASS_DIM;
+ if((sid = H5Screate_simple(1, &dim, NULL)) < 0) FAIL_STACK_ERROR
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
+
+ /* Define chunk size. There will be only 2 chunks in the dataset. */
+ chunk_dim = BYPASS_CHUNK_DIM;
+ if(H5Pset_chunk(dcpl, 1, &chunk_dim) < 0) FAIL_STACK_ERROR
+
+ /* Define fill value, fill time, and chunk allocation time */
+ if(H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fvalue) < 0) FAIL_STACK_ERROR
+ if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_IFSET) < 0) FAIL_STACK_ERROR
+ if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR) < 0) FAIL_STACK_ERROR
+
+ /* Try to create dataset */
+ if((dsid = H5Dcreate2(fid, BYPASS_DATASET, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Select first chunk to write the data */
+ offset = 0;
+ count = 1;
+ stride = 1;
+ block = BYPASS_CHUNK_DIM;
+ if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, &offset, &stride, &count, &block) < 0)
+ FAIL_STACK_ERROR
+
+ /* Initialize data to write */
+ for(i = 0; i < BYPASS_CHUNK_DIM; i++)
+ wdata[i] = i;
+
+ /* This write should bypass the cache because the chunk is bigger than the cache size
+ * and it's not allocated on disk. */
+ if(H5Dwrite(dsid, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, wdata) < 0)
+ FAIL_STACK_ERROR
+
+ if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+
+ /* Reopen the dataset */
+ if((dsid = H5Dopen2(fid, BYPASS_DATASET, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+
+ /* Reads both 2 chunks. Reading the second chunk should bypass the cache because the
+ * chunk is bigger than the cache size and it isn't allocated on disk. */
+ if(H5Dread(dsid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata) < 0)
+ FAIL_STACK_ERROR
+
+ for(i = 0; i < BYPASS_CHUNK_DIM; i++)
+ if(rdata[i] != i) {
+ printf(" Read different values than written in the 1st chunk.\n");
+ printf(" At line %d and index %d, rdata = %d. It should be %d.\n", __LINE__, i, rdata[i], i);
+ TEST_ERROR
+ } /* end if */
+
+ for(j = BYPASS_CHUNK_DIM; j < BYPASS_DIM; j++)
+ if(rdata[j] != fvalue) {
+ printf(" Read different values than written in the 2nd chunk.\n");
+ printf(" At line %d and index %d, rdata = %d. It should be %d.\n", __LINE__, i, rdata[i], fvalue);
+ TEST_ERROR
+ } /* end if */
+
+ /* Close IDs */
+ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR
+ if(H5Pclose(fapl_local) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Pclose(fapl_local);
+ H5Dclose(dsid);
+ H5Sclose(sid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
+} /* end test_big_chunks_bypass_cache() */
+
+
+/*-------------------------------------------------------------------------
* Function: test_chunk_fast
*
* Purpose: Tests support for extensible arrays as chunk index.
@@ -7050,6 +7190,7 @@ main(void)
#endif /* H5_NO_DEPRECATED_SYMBOLS */
nerrors += (test_huge_chunks(my_fapl) < 0 ? 1 : 0);
nerrors += (test_chunk_cache(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_big_chunks_bypass_cache(my_fapl) < 0 ? 1 : 0);
nerrors += (test_chunk_fast(my_fapl) < 0 ? 1 : 0);
if(H5Fclose(file) < 0)