From 54dff331778a660824961fdf6359e4eae998a13c Mon Sep 17 00:00:00 2001 From: Vailin Choi Date: Mon, 15 May 2017 10:25:15 -0500 Subject: Incorporate additional code changes for the H5DOread_chunk patch from GE Heathcare (HDFFV-9934) This is the similar set of changes that was checked in to 1.8 branch: Incorporate the code changes that were missing from the original patch: (1) Additional tests in hl/test/test_dset_opt.c (2) Fix in src/H5Dchunk.c for direct access when an entry is in chunk cache but not dirty Tested on platypus, ostrich, kituo, mayll, osx1010test, moohan, quail, emu. --- hl/test/test_dset_opt.c | 1188 +++++++++++++++++++++++++++++++++-------------- src/H5Dchunk.c | 43 +- 2 files changed, 859 insertions(+), 372 deletions(-) diff --git a/hl/test/test_dset_opt.c b/hl/test/test_dset_opt.c index b868a2a..4b5fa19 100644 --- a/hl/test/test_dset_opt.c +++ b/hl/test/test_dset_opt.c @@ -26,6 +26,7 @@ #define FILE_NAME "test_dectris.h5" +/* Datasets for Direct Write tests */ #define DATASETNAME1 "direct_write" #define DATASETNAME2 "skip_one_filter" #define DATASETNAME3 "skip_two_filters" @@ -33,13 +34,20 @@ #define DATASETNAME5 "contiguous_dset" #define DATASETNAME6 "invalid_argue" #define DATASETNAME7 "overwrite_chunk" +/* Datasets for Direct Read tests */ +#define DATASETNAME8 "disabled_chunk_cache" +#define DATASETNAME9 "flush_chunk_cache" +#define DATASETNAME10 "read_w_valid_cache" +#define DATASETNAME11 "unallocated_chunk" +#define DATASETNAME12 "unfiltered_data" + #define RANK 2 #define NX 16 #define NY 16 #define CHUNK_NX 4 #define CHUNK_NY 4 -#define DEFLATE_SIZE_ADJUST(s) (ceil(((double)(s))*(double)1.001F)+12) +#define DEFLATE_SIZE_ADJUST(s) (ceil(((double)(s))*1.001)+12) /* Temporary filter IDs used for testing */ #define H5Z_FILTER_BOGUS1 305 @@ -84,10 +92,10 @@ const H5Z_class2_t H5Z_BOGUS2[1] = {{ /*------------------------------------------------------------------------- * Function: test_direct_chunk_write * - * Purpose: Test the basic functionality of H5DOwrite_chunk/H5DOread_chunk + * Purpose: Test the basic functionality of H5DOwrite_chunk * - * Return: Success: 0 - * Failure: 1 + * Return: Success: 0 + * Failure: 1 * * Programmer: Raymond Lu * 30 November 2012 @@ -110,31 +118,24 @@ test_direct_chunk_write (hid_t file) int i, j, n; unsigned filter_mask = 0; - unsigned read_filter_mask = 0; int direct_buf[CHUNK_NX][CHUNK_NY]; int check_chunk[CHUNK_NX][CHUNK_NY]; hsize_t offset[2] = {0, 0}; size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int); const Bytef *z_src = (const Bytef*)(direct_buf); - Bytef *z_dst; /*destination buffer */ + Bytef *z_dst = NULL; /*destination buffer */ uLongf z_dst_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size); uLong z_src_nbytes = (uLong)buf_size; - int aggression = 9; /* Compression aggression setting */ - void *outbuf = NULL; /* Pointer to new buffer */ - - /* For H5DOread_chunk() */ - void *readbuf = NULL; /* Buffer for reading data */ - const Bytef *pt_readbuf; /* Point to the buffer for data read */ - hsize_t read_chunk_nbytes; /* Size of chunk on disk */ - int read_dst_buf[CHUNK_NX][CHUNK_NY]; /* Buffer to hold un-compressed data */ + int aggression = 9; /* Compression aggression setting */ + void *outbuf = NULL; /* Pointer to new buffer */ hsize_t start[2]; /* Start of hyperslab */ hsize_t stride[2]; /* Stride of hyperslab */ hsize_t count[2]; /* Block count */ hsize_t block[2]; /* Block sizes */ - TESTING("basic functionality of H5DOwrite_chunk/H5DOread_chunk"); + TESTING("basic functionality of H5DOwrite_chunk"); /* * Create the data space with unlimited dimensions. @@ -165,76 +166,26 @@ test_direct_chunk_write (hid_t file) cparms, H5P_DEFAULT)) < 0) goto error; + /* Initialize the dataset */ + for(i = n = 0; i < NX; i++) + for(j = 0; j < NY; j++) + data[i][j] = n++; + if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) goto error; - HDmemset(data, 0, sizeof(data)); - /* Initialize data for the first chunk */ - for(i = n = 0; i < CHUNK_NX; i++) - for(j = 0; j < CHUNK_NY; j++) - data[i][j] = n++; - /* - * Write the data for the dataset. */ + * Write the data for the dataset. It should stay in the chunk cache. + * It will be evicted from the cache by the H5DOwrite_chunk calls. + */ if((status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, data)) < 0) goto error; - if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0) - goto error; - - if(H5Dclose(dataset) < 0) - goto error; - - if((dataset = H5Dopen2(file, DATASETNAME1, H5P_DEFAULT)) < 0) - goto error; - - offset[0] = offset[1] = 0; - - /* Get the size of the compressed chunk */ - ret = H5Dget_chunk_storage_size(dataset, offset, &read_chunk_nbytes); - - readbuf = HDmalloc(read_chunk_nbytes); - pt_readbuf = (const Bytef *)readbuf; - - /* Test to use H5DOread_chunk() to read the chunk back */ - if((status = H5DOread_chunk(dataset, H5P_DEFAULT, offset, &read_filter_mask, readbuf)) < 0) - goto error; - - /* uncompress(Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen) */ - ret = uncompress((Bytef *)read_dst_buf, (uLongf *)&buf_size, pt_readbuf, (uLong)read_chunk_nbytes); - - /* Check for various zlib errors */ - if(Z_BUF_ERROR == ret) { - HDfprintf(stderr, "error: not enough room in output buffer"); - goto error; - } else if(Z_MEM_ERROR == ret) { - HDfprintf(stderr, "error: not enough memory"); - goto error; - } else if(Z_OK != ret) { - HDfprintf(stderr, "error: corrupted input data"); - goto error; - } - - /* Check that the values read are the same as the values written */ - for(i = 0; i < CHUNK_NX; i++) { - for(j = 0; j < CHUNK_NY; j++) { - if(data[i][j] != read_dst_buf[i][j]) { - printf(" 1. Read different values than written."); - printf(" At index %d,%d\n", i, j); - printf(" data=%d, read_dst_buf=%d\n", data[i][j], read_dst_buf[i][j]); - goto error; - } - } - } - - if(readbuf) - HDfree(readbuf); - /* Initialize data for one chunk */ for(i = n = 0; i < CHUNK_NX; i++) for(j = 0; j < CHUNK_NY; j++) - direct_buf[i][j] = n++; + direct_buf[i][j] = n++; /* Allocate output (compressed) buffer */ outbuf = HDmalloc(z_dst_nbytes); @@ -255,8 +206,8 @@ test_direct_chunk_write (hid_t file) goto error; } - /* Write the compressed chunk data repeatedly to cover all the chunks in the - * dataset, using the direct writing function. */ + /* Write the compressed chunk data repeatedly to cover all the chunks in the + * dataset, using the direct writing function. */ for(i=0; islot[udata.idx_hint]; + hbool_t flush; /* Sanity checks */ HDassert(udata.idx_hint < rdcc->nslots); HDassert(rdcc->slot[udata.idx_hint]); - /* If the cached chunk is dirty, it must be flushed to get accurate size */ - if( ent->dirty == TRUE ) { + flush = (ent->dirty == TRUE) ? TRUE : FALSE; - /* Fill the DXPL cache values for later use */ - if(H5D__get_dxpl_cache(io_info.raw_dxpl_id, &dxpl_cache) < 0) + /* Fill the DXPL cache values for later use */ + if(H5D__get_dxpl_cache(io_info.raw_dxpl_id, &dxpl_cache) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") - /* Flush the chunk to disk and clear the cache entry */ - if(H5D__chunk_cache_evict(dset, io_info.md_dxpl_id, dxpl_cache, rdcc->slot[udata.idx_hint], TRUE) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk") + /* Flush the chunk to disk and clear the cache entry */ + if(H5D__chunk_cache_evict(dset, io_info.md_dxpl_id, dxpl_cache, rdcc->slot[udata.idx_hint], flush) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk") - /* Reset fields about the chunk we are looking for */ - udata.filter_mask = 0; - udata.chunk_block.offset = HADDR_UNDEF; - udata.chunk_block.length = 0; - udata.idx_hint = UINT_MAX; + /* Reset fields about the chunk we are looking for */ + udata.filter_mask = 0; + udata.chunk_block.offset = HADDR_UNDEF; + udata.chunk_block.length = 0; + udata.idx_hint = UINT_MAX; - /* Get the new file address / chunk size after flushing */ - if(H5D__chunk_lookup(dset, io_info.md_dxpl_id, scaled, &udata) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") - } + /* Get the new file address / chunk size after flushing */ + if(H5D__chunk_lookup(dset, io_info.md_dxpl_id, scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") } /* Make sure the address of the chunk is returned. */ @@ -648,6 +649,8 @@ H5D__get_chunk_storage_size(H5D_t *dset, hid_t dxpl_id, const hsize_t *offset, h HDassert(offset); HDassert(storage_size); + *storage_size = 0; + io_info.dset = dset; io_info.raw_dxpl_id = dxpl_id; io_info.md_dxpl_id = dxpl_id; @@ -660,10 +663,8 @@ H5D__get_chunk_storage_size(H5D_t *dset, hid_t dxpl_id, const hsize_t *offset, h #endif /* H5_DEBUG_BUILD */ /* Allocate dataspace and initialize it if it hasn't been. */ - if(!(*layout->ops->is_space_alloc)(&layout->storage)) { - *storage_size = 0; + if(!(*layout->ops->is_space_alloc)(&layout->storage)) HGOTO_DONE(SUCCEED) - } /* Calculate the index of this chunk */ H5VM_chunk_scaled(dset->shared->ndims, offset, layout->u.chunk.dim, scaled); @@ -2955,7 +2956,7 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled, /* Check for chunk in cache */ if(dset->shared->cache.chunk.nslots > 0) { /* Determine the chunk's location in the hash table */ - idx = H5D__chunk_hash_val(dset->shared, scaled); + idx = H5D__chunk_hash_val(dset->shared, scaled); /* Get the chunk cache entry for that location */ ent = dset->shared->cache.chunk.slot[idx]; @@ -2979,7 +2980,7 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled, udata->idx_hint = idx; udata->chunk_block.offset = ent->chunk_block.offset; udata->chunk_block.length = ent->chunk_block.length;; - udata->chunk_idx = ent->chunk_idx; + udata->chunk_idx = ent->chunk_idx; } /* end if */ else { /* Invalidate idx_hint, to signal that the chunk is not in cache */ -- cgit v0.12