From cb8e82a53d91067b16deba7ebc76307a336b0f02 Mon Sep 17 00:00:00 2001 From: Vailin Choi Date: Tue, 25 Apr 2017 18:08:53 -0500 Subject: Incorporate patch from GE Healthcare (HDFFV-9934) This is the same patch that had already been integrated to 1.8 branch (pull request #387). Tested on moohan, ostrich, platypus, emu, osx1010test, quail, kituo, mayll. --- hl/src/H5DO.c | 81 ++++++++- hl/src/H5DOpublic.h | 6 + hl/test/test_dset_opt.c | 461 +++++++++++++++++++++++++++++++++++++++--------- src/H5D.c | 40 +++++ src/H5Dchunk.c | 282 ++++++++++++++++++++++++++--- src/H5Dio.c | 149 +++++++++++----- src/H5Dpkg.h | 3 + src/H5Dpublic.h | 6 + src/H5Pdxpl.c | 28 +++ 9 files changed, 898 insertions(+), 158 deletions(-) diff --git a/hl/src/H5DO.c b/hl/src/H5DO.c index 8db6768..22d40fc 100644 --- a/hl/src/H5DO.c +++ b/hl/src/H5DO.c @@ -32,7 +32,7 @@ * Return: Non-negative on success/Negative on failure * * Programmer: Raymond Lu - * 30 July 2012 + * 30 July 2012 * *------------------------------------------------------------------------- */ @@ -60,7 +60,7 @@ H5DOwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, const hsize_t *o /* If the user passed in a default DXPL, create one to pass to H5Dwrite() */ if(H5P_DEFAULT == dxpl_id) { - if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) + if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) goto done; created_dxpl = TRUE; } /* end if */ @@ -87,16 +87,87 @@ done: if(H5Pclose(dxpl_id) < 0) ret_value = FAIL; } /* end if */ - else + else { /* Reset the direct write flag on user DXPL */ + do_direct_write = FALSE; if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME, &do_direct_write) < 0) ret_value = FAIL; + } return(ret_value); } /* end H5DOwrite_chunk() */ -/* +/*------------------------------------------------------------------------- + * Function: H5DOread_chunk + * + * Purpose: Reads an entire chunk from the file directly. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Matthew Strong (GE Healthcare) + * 14 February 2016 + * + *--------------------------------------------------------------------------- + */ +herr_t +H5DOread_chunk(hid_t dset_id, hid_t dxpl_id, const hsize_t *offset, uint32_t *filters, + void *buf) +{ + hbool_t created_dxpl = FALSE; /* Whether we created a DXPL */ + hbool_t do_direct_read = TRUE; /* Flag for direct writes */ + herr_t ret_value = FAIL; /* Return value */ + + /* Check arguments */ + if(dset_id < 0) + goto done; + if(!buf) + goto done; + if(!offset) + goto done; + if(!filters) + goto done; + + /* If the user passed in a default DXPL, create one to pass to H5Dwrite() */ + if(H5P_DEFAULT == dxpl_id) { + if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) + goto done; + created_dxpl = TRUE; + } /* end if */ + + /* Set direct write parameters */ + if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME, &do_direct_read) < 0) + goto done; + if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_READ_OFFSET_NAME, &offset) < 0) + goto done; + + /* Read chunk */ + if(H5Dread(dset_id, 0, H5S_ALL, H5S_ALL, dxpl_id, buf) < 0) + goto done; + /* Get the filter mask */ + if(H5Pget(dxpl_id, H5D_XFER_DIRECT_CHUNK_READ_FILTERS_NAME, filters) < 0) + goto done; + + /* Indicate success */ + ret_value = SUCCEED; + +done: + if(created_dxpl) { + if(H5Pclose(dxpl_id) < 0) + ret_value = FAIL; + } /* end if */ + else { + /* Reset the direct read flag on user DXPL */ + do_direct_read = FALSE; + if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME, &do_direct_read) < 0) + ret_value = FAIL; + } + + return(ret_value); +} /* end H5DOread_chunk() */ + + +/*------------------------------------------------------------------------- * Function: H5DOappend() * * Purpose: To append elements to a dataset. @@ -122,6 +193,8 @@ done: * ret_value = FAIL; * goto done; * } + * + *------------------------------------------------------------------------- */ herr_t H5DOappend(hid_t dset_id, hid_t dxpl_id, unsigned axis, size_t extension, diff --git a/hl/src/H5DOpublic.h b/hl/src/H5DOpublic.h index ce0d32e..d5c8de4 100644 --- a/hl/src/H5DOpublic.h +++ b/hl/src/H5DOpublic.h @@ -28,6 +28,12 @@ extern "C" { H5_HLDLL herr_t H5DOwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, const hsize_t *offset, size_t data_size, const void *buf); +H5_HLDLL herr_t H5DOread_chunk(hid_t dset_id, /*in*/ + hid_t dxpl_id, /*in*/ + const hsize_t *offset, /*in*/ + uint32_t *filters, /*out*/ + void *buf); /*out*/ + H5_HLDLL herr_t H5DOappend(hid_t dset_id, hid_t dxpl_id, unsigned axis, size_t extension, hid_t memtype, const void *buf); diff --git a/hl/test/test_dset_opt.c b/hl/test/test_dset_opt.c index 03c467a..d3a6b5c 100644 --- a/hl/test/test_dset_opt.c +++ b/hl/test/test_dset_opt.c @@ -76,11 +76,10 @@ const H5Z_class2_t H5Z_BOGUS2[1] = {{ /*------------------------------------------------------------------------- * Function: test_direct_chunk_write * - * Purpose: Test the basic functionality of H5DOwrite_chunk + * Purpose: Test the basic functionality of H5DOwrite_chunk/H5DOread_chunk * * Return: Success: 0 - * - * Failure: 1 + * Failure: 1 * * Programmer: Raymond Lu * 30 November 2012 @@ -103,24 +102,31 @@ test_direct_chunk_write (hid_t file) int i, j, n; unsigned filter_mask = 0; + unsigned read_filter_mask = 0; int direct_buf[CHUNK_NX][CHUNK_NY]; int check_chunk[CHUNK_NX][CHUNK_NY]; hsize_t offset[2] = {0, 0}; size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int); const Bytef *z_src = (const Bytef*)(direct_buf); - Bytef *z_dst; /*destination buffer */ + Bytef *z_dst; /*destination buffer */ uLongf z_dst_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size); uLong z_src_nbytes = (uLong)buf_size; - int aggression = 9; /* Compression aggression setting */ - void *outbuf = NULL; /* Pointer to new buffer */ + int aggression = 9; /* Compression aggression setting */ + void *outbuf = NULL; /* Pointer to new buffer */ + + /* For H5DOread_chunk() */ + void *readbuf = NULL; /* Buffer for reading data */ + const Bytef *pt_readbuf; /* Point to the buffer for data read */ + hsize_t read_chunk_nbytes; /* Size of chunk on disk */ + int read_dst_buf[CHUNK_NX][CHUNK_NY]; /* Buffer to hold un-compressed data */ hsize_t start[2]; /* Start of hyperslab */ hsize_t stride[2]; /* Stride of hyperslab */ hsize_t count[2]; /* Block count */ hsize_t block[2]; /* Block sizes */ - TESTING("basic functionality of H5DOwrite_chunk"); + TESTING("basic functionality of H5DOwrite_chunk/H5DOread_chunk"); /* * Create the data space with unlimited dimensions. @@ -151,26 +157,76 @@ test_direct_chunk_write (hid_t file) cparms, H5P_DEFAULT)) < 0) goto error; - /* Initialize the dataset */ - for(i = n = 0; i < NX; i++) - for(j = 0; j < NY; j++) - data[i][j] = n++; - if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) goto error; + HDmemset(data, 0, sizeof(data)); + /* Initialize data for the first chunk */ + for(i = n = 0; i < CHUNK_NX; i++) + for(j = 0; j < CHUNK_NY; j++) + data[i][j] = n++; + /* - * Write the data for the dataset. It should stay in the chunk cache. - * It will be evicted from the cache by the H5DOwrite_chunk calls. - */ + * Write the data for the dataset. */ if((status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, data)) < 0) goto error; + if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0) + goto error; + + if(H5Dclose(dataset) < 0) + goto error; + + if((dataset = H5Dopen2(file, DATASETNAME1, H5P_DEFAULT)) < 0) + goto error; + + offset[0] = offset[1] = 0; + + /* Get the size of the compressed chunk */ + ret = H5Dget_chunk_storage_size(dataset, offset, &read_chunk_nbytes); + + readbuf = HDmalloc(read_chunk_nbytes); + pt_readbuf = (const Bytef *)readbuf; + + /* Test to use H5DOread_chunk() to read the chunk back */ + if((status = H5DOread_chunk(dataset, H5P_DEFAULT, offset, &read_filter_mask, readbuf)) < 0) + goto error; + + /* uncompress(Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen) */ + ret = uncompress((Bytef *)read_dst_buf, (uLongf *)&buf_size, pt_readbuf, (uLong)read_chunk_nbytes); + + /* Check for various zlib errors */ + if(Z_BUF_ERROR == ret) { + HDfprintf(stderr, "error: not enough room in output buffer"); + goto error; + } else if(Z_MEM_ERROR == ret) { + HDfprintf(stderr, "error: not enough memory"); + goto error; + } else if(Z_OK != ret) { + HDfprintf(stderr, "error: corrupted input data"); + goto error; + } + + /* Check that the values read are the same as the values written */ + for(i = 0; i < CHUNK_NX; i++) { + for(j = 0; j < CHUNK_NY; j++) { + if(data[i][j] != read_dst_buf[i][j]) { + printf(" 1. Read different values than written."); + printf(" At index %d,%d\n", i, j); + printf(" data=%d, read_dst_buf=%d\n", data[i][j], read_dst_buf[i][j]); + goto error; + } + } + } + + if(readbuf) + HDfree(readbuf); + /* Initialize data for one chunk */ for(i = n = 0; i < CHUNK_NX; i++) for(j = 0; j < CHUNK_NY; j++) - direct_buf[i][j] = n++; + direct_buf[i][j] = n++; /* Allocate output (compressed) buffer */ outbuf = HDmalloc(z_dst_nbytes); @@ -181,18 +237,18 @@ test_direct_chunk_write (hid_t file) /* Check for various zlib errors */ if(Z_BUF_ERROR == ret) { - fprintf(stderr, "overflow"); + HDfprintf(stderr, "overflow"); goto error; } else if(Z_MEM_ERROR == ret) { - fprintf(stderr, "deflate memory error"); + HDfprintf(stderr, "deflate memory error"); goto error; } else if(Z_OK != ret) { - fprintf(stderr, "other deflate error"); + HDfprintf(stderr, "other deflate error"); goto error; } - /* Write the compressed chunk data repeatedly to cover all the chunks in the - * dataset, using the direct writing function. */ + /* Write the compressed chunk data repeatedly to cover all the chunks in the + * dataset, using the direct writing function. */ for(i=0; ishared->layout.type) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset") + + /* Call private function */ + if(H5D__get_chunk_storage_size(dset, H5P_DATASET_XFER_DEFAULT, offset, chunk_nbytes) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get storage size of chunk") + +done: + FUNC_LEAVE_API(ret_value); +} diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index ce684a0..4905a70 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -303,6 +303,9 @@ static herr_t H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id, H5D_chunk_coll_info_t *chunk_info, size_t chunk_size, const void *fill_buf); #endif /* H5_HAVE_PARALLEL */ +static int +H5D__chunk_dump_index_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata); + /*********************/ /* Package Variables */ /*********************/ @@ -369,8 +372,7 @@ H5FL_EXTERN(H5S_sel_iter_t); /*------------------------------------------------------------------------- * Function: H5D__chunk_direct_write * - * Purpose: Internal routine to write a chunk - * directly into the file. + * Purpose: Internal routine to write a chunk directly into the file. * * Return: Non-negative on success/Negative on failure * @@ -408,7 +410,7 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, /* Allocate dataspace and initialize it if it hasn't been. */ if(!(*layout->ops->is_space_alloc)(&layout->storage)) { - /* Allocate storage */ + /* Allocate storage */ if(H5D__alloc_storage(&io_info, H5D_ALLOC_WRITE, FALSE, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage") } @@ -465,7 +467,7 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") if(H5D__chunk_cache_evict(dset, io_info.md_dxpl_id, dxpl_cache, rdcc->slot[udata.idx_hint], FALSE) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk") + HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk") } /* end if */ /* Write the data to the file */ @@ -491,6 +493,244 @@ done: /*------------------------------------------------------------------------- + * Function: H5D__chunk_direct_read + * + * Purpose: Internal routine to read a chunk directly from the file. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Matthew Strong (GE Healthcare) + * 14 February 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D__chunk_direct_read(const H5D_t *dset, hid_t dxpl_id, hsize_t *offset, + uint32_t* filters, void *buf) +{ + const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */ + const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* raw data chunk cache */ + H5D_chunk_ud_t udata; /* User data for querying chunk info */ + hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */ + H5D_io_info_t io_info; /* to hold the dset and two dxpls (meta and raw data) */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ + hbool_t md_dxpl_generated = FALSE; /* bool to indicate whether we should free the md_dxpl_id at exit */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC_TAG(dxpl_id, dset->oloc.addr, FAIL) + + /* Check args */ + HDassert(dset && H5D_CHUNKED == layout->type); + HDassert(offset); + HDassert(filters); + HDassert(buf); + + io_info.dset = dset; + io_info.raw_dxpl_id = dxpl_id; + io_info.md_dxpl_id = dxpl_id; + + /* set the dxpl IO type for sanity checking at the FD layer */ +#ifdef H5_DEBUG_BUILD + if(H5D_set_io_info_dxpls(&io_info, dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't set metadata and raw data dxpls") + md_dxpl_generated = TRUE; +#endif /* H5_DEBUG_BUILD */ + + /* Allocate dataspace and initialize it if it hasn't been. */ + if(!(*layout->ops->is_space_alloc)(&layout->storage)) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "storage is not initialized") + + /* Calculate the index of this chunk */ + H5VM_chunk_scaled(dset->shared->ndims, offset, layout->u.chunk.dim, scaled); + scaled[dset->shared->ndims] = 0; + + /* Reset fields about the chunk we are looking for */ + udata.filter_mask = 0; + udata.chunk_block.offset = HADDR_UNDEF; + udata.chunk_block.length = 0; + udata.idx_hint = UINT_MAX; + + /* Find out the file address of the chunk */ + if(H5D__chunk_lookup(dset, io_info.md_dxpl_id, scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") + + /* Sanity check */ + HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || + (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0)); + + /* Check if the requested chunk exists in the chunk cache */ + if(UINT_MAX != udata.idx_hint) { + H5D_rdcc_ent_t *ent = rdcc->slot[udata.idx_hint]; + + /* Sanity checks */ + HDassert(udata.idx_hint < rdcc->nslots); + HDassert(rdcc->slot[udata.idx_hint]); + + /* If the cached chunk is dirty, it must be flushed to get accurate size */ + if( ent->dirty == TRUE ) { + + /* Fill the DXPL cache values for later use */ + if(H5D__get_dxpl_cache(io_info.raw_dxpl_id, &dxpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + + /* Flush the chunk to disk and clear the cache entry */ + if(H5D__chunk_cache_evict(dset, io_info.md_dxpl_id, dxpl_cache, rdcc->slot[udata.idx_hint], TRUE) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk") + + /* Reset fields about the chunk we are looking for */ + udata.filter_mask = 0; + udata.chunk_block.offset = HADDR_UNDEF; + udata.chunk_block.length = 0; + udata.idx_hint = UINT_MAX; + + /* Get the new file address / chunk size after flushing */ + if(H5D__chunk_lookup(dset, io_info.md_dxpl_id, scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") + } + } + + /* Make sure the address of the chunk is returned. */ + if(!H5F_addr_defined(udata.chunk_block.offset)) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined") + + /* Read the chunk data into the supplied buffer */ + if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, udata.chunk_block.length, io_info.raw_dxpl_id, buf) < 0) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") + + /* Return the filter mask */ + *filters = udata.filter_mask; + +done: +#ifdef H5_DEBUG_BUILD + if(md_dxpl_generated && H5I_dec_ref(io_info.md_dxpl_id) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "can't close metadata dxpl") +#endif /* H5_DEBUG_BUILD */ + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__chunk_direct_read() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__get_chunk_storage_size + * + * Purpose: Internal routine to read the storage size of a chunk on disk. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Matthew Strong (GE Healthcare) + * 20 October 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D__get_chunk_storage_size(H5D_t *dset, hid_t dxpl_id, const hsize_t *offset, hsize_t *storage_size) +{ + const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */ + const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* raw data chunk cache */ + hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */ + H5D_io_info_t io_info; /* to hold the dset and two dxpls (meta and raw data) */ + H5D_chunk_ud_t udata; /* User data for querying chunk info */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ + hbool_t md_dxpl_generated = FALSE; /* bool to indicate whether we should free the md_dxpl_id at exit */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC_TAG(dxpl_id, dset->oloc.addr, FAIL) + + /* Check args */ + HDassert(dset && H5D_CHUNKED == layout->type); + HDassert(offset); + HDassert(storage_size); + + io_info.dset = dset; + io_info.raw_dxpl_id = dxpl_id; + io_info.md_dxpl_id = dxpl_id; + + /* set the dxpl IO type for sanity checking at the FD layer */ +#ifdef H5_DEBUG_BUILD + if(H5D_set_io_info_dxpls(&io_info, dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't set metadata and raw data dxpls") + md_dxpl_generated = TRUE; +#endif /* H5_DEBUG_BUILD */ + + /* Allocate dataspace and initialize it if it hasn't been. */ + if(!(*layout->ops->is_space_alloc)(&layout->storage)) { + *storage_size = 0; + HGOTO_DONE(SUCCEED) + } + + /* Calculate the index of this chunk */ + H5VM_chunk_scaled(dset->shared->ndims, offset, layout->u.chunk.dim, scaled); + scaled[dset->shared->ndims] = 0; + + /* Reset fields about the chunk we are looking for */ + udata.chunk_block.offset = HADDR_UNDEF; + udata.chunk_block.length = 0; + udata.idx_hint = UINT_MAX; + + /* Find out the file address of the chunk */ + if(H5D__chunk_lookup(dset, io_info.md_dxpl_id, scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") + + /* Sanity check */ + HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || + (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0)); + + /* The requested chunk is not in cache or on disk */ + if(!H5F_addr_defined(udata.chunk_block.offset) && UINT_MAX == udata.idx_hint) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk storage is not allocated") + + /* Check if there are filters registered to the dataset */ + if( dset->shared->dcpl_cache.pline.nused > 0 ) { + /* Check if the requested chunk exists in the chunk cache */ + if(UINT_MAX != udata.idx_hint) { + /* Sanity checks */ + HDassert(udata.idx_hint < rdcc->nslots); + HDassert(rdcc->slot[udata.idx_hint]); + H5D_rdcc_ent_t *ent = rdcc->slot[udata.idx_hint]; + + /* If the cached chunk is dirty, it must be flushed to get accurate size */ + if( ent->dirty == TRUE ) { + /* Fill the DXPL cache values for later use */ + if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + + /* Flush the chunk to disk and clear the cache entry */ + if(H5D__chunk_cache_evict(dset, io_info.md_dxpl_id, dxpl_cache, rdcc->slot[udata.idx_hint], TRUE) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk") + + /* Reset fields about the chunk we are looking for */ + udata.chunk_block.offset = HADDR_UNDEF; + udata.chunk_block.length = 0; + udata.idx_hint = UINT_MAX; + + /* Get the new file address / chunk size after flushing */ + if(H5D__chunk_lookup(dset, io_info.md_dxpl_id, scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") + } + } + + /* Make sure the address of the chunk is returned. */ + if(!H5F_addr_defined(udata.chunk_block.offset)) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined") + + /* Return the chunk size on disk */ + *storage_size = udata.chunk_block.length; + } + /* There are no filters registered, return the chunk size from the storage layout */ + else + *storage_size = dset->shared->layout.u.chunk.size; + +done: +#ifdef H5_DEBUG_BUILD + if(md_dxpl_generated && H5I_dec_ref(io_info.md_dxpl_id) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "can't close metadata dxpl") +#endif /* H5_DEBUG_BUILD */ + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__get_chunk_storage_size */ + + +/*------------------------------------------------------------------------- * Function: H5D__chunk_set_info_real * * Purpose: Internal routine to set the information about chunks for a dataset @@ -519,22 +759,22 @@ H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, /* Compute the # of chunks in dataset dimensions */ for(u = 0, layout->nchunks = 1, layout->max_nchunks = 1; u < ndims; u++) { /* Round up to the next integer # of chunks, to accomodate partial chunks */ - layout->chunks[u] = ((curr_dims[u] + layout->dim[u]) - 1) / layout->dim[u]; + layout->chunks[u] = ((curr_dims[u] + layout->dim[u]) - 1) / layout->dim[u]; if(H5S_UNLIMITED == max_dims[u]) layout->max_chunks[u] = H5S_UNLIMITED; else layout->max_chunks[u] = ((max_dims[u] + layout->dim[u]) - 1) / layout->dim[u]; /* Accumulate the # of chunks */ - layout->nchunks *= layout->chunks[u]; - layout->max_nchunks *= layout->max_chunks[u]; + layout->nchunks *= layout->chunks[u]; + layout->max_nchunks *= layout->max_chunks[u]; } /* end for */ /* Get the "down" sizes for each dimension */ if(H5VM_array_down(ndims, layout->chunks, layout->down_chunks) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't compute 'down' chunk size value") if(H5VM_array_down(ndims, layout->max_chunks, layout->max_down_chunks) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't compute 'down' chunk size value") + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't compute 'down' chunk size value") done: FUNC_LEAVE_NOAPI(ret_value) @@ -2383,14 +2623,14 @@ H5D__chunk_dest(H5D_t *dset, hid_t dxpl_id) /* Flush all the cached chunks */ for(ent = rdcc->head; ent; ent = next) { - next = ent->next; - if(H5D__chunk_cache_evict(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0) - nerrors++; + next = ent->next; + if(H5D__chunk_cache_evict(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0) + nerrors++; } /* end for */ /* Continue even if there are failures. */ if(nerrors) - HDONE_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks") + HDONE_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks") /* Release cache structures */ if(rdcc->slot) @@ -2997,27 +3237,27 @@ H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t HDassert(ent->idx < rdcc->nslots); if(flush) { - /* Flush */ - if(H5D__chunk_flush_entry(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0) - HDONE_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer") + /* Flush */ + if(H5D__chunk_flush_entry(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0) + HDONE_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer") } /* end if */ else { /* Don't flush, just free chunk */ - if(ent->chunk != NULL) - ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, + if(ent->chunk != NULL) + ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, ((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL : &(dset->shared->dcpl_cache.pline))); } /* end else */ /* Unlink from list */ if(ent->prev) - ent->prev->next = ent->next; + ent->prev->next = ent->next; else - rdcc->head = ent->next; + rdcc->head = ent->next; if(ent->next) - ent->next->prev = ent->prev; + ent->next->prev = ent->prev; else - rdcc->tail = ent->prev; + rdcc->tail = ent->prev; ent->prev = ent->next = NULL; /* Unlink from temporary list */ diff --git a/src/H5Dio.c b/src/H5Dio.c index dfd3f17..572e6cf 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -125,9 +125,13 @@ H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, void *buf/*out*/) { H5D_t *dset = NULL; - const H5S_t *mem_space = NULL; - const H5S_t *file_space = NULL; - herr_t ret_value = SUCCEED; /* Return value */ + const H5S_t *mem_space = NULL; + const H5S_t *file_space = NULL; + H5P_genplist_t *plist; /* Property list pointer */ + hsize_t *direct_offset = NULL; + hbool_t direct_read = FALSE; + uint32_t direct_filters = 0; + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) H5TRACE6("e", "iiiiix", dset_id, mem_type_id, mem_space_id, file_space_id, @@ -135,28 +139,29 @@ H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, /* check arguments */ if(NULL == (dset = (H5D_t *)H5I_object_verify(dset_id, H5I_DATASET))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") if(NULL == dset->oloc.file) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") if(mem_space_id < 0 || file_space_id < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space") if(H5S_ALL != mem_space_id) { - if(NULL == (mem_space = (const H5S_t *)H5I_object_verify(mem_space_id, H5I_DATASPACE))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space") + if(NULL == (mem_space = (const H5S_t *)H5I_object_verify(mem_space_id, H5I_DATASPACE))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space") - /* Check for valid selection */ - if(H5S_SELECT_VALID(mem_space) != TRUE) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent") + /* Check for valid selection */ + if(H5S_SELECT_VALID(mem_space) != TRUE) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent") } /* end if */ + if(H5S_ALL != file_space_id) { - if(NULL == (file_space = (const H5S_t *)H5I_object_verify(file_space_id, H5I_DATASPACE))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space") + if(NULL == (file_space = (const H5S_t *)H5I_object_verify(file_space_id, H5I_DATASPACE))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space") - /* Check for valid selection */ - if(H5S_SELECT_VALID(file_space) != TRUE) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent") + /* Check for valid selection */ + if(H5S_SELECT_VALID(file_space) != TRUE) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent") } /* end if */ /* Get the default dataset transfer property list if the user didn't provide one */ @@ -166,9 +171,55 @@ H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, if(TRUE != H5P_isa_class(plist_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms") - /* read raw data */ - if(H5D__read(dset, mem_type_id, mem_space, file_space, plist_id, buf/*out*/) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") + /* Get the dataset transfer property list */ + if(NULL == (plist = (H5P_genplist_t *)H5I_object(plist_id))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list") + + /* Retrieve the 'direct read' flag */ + if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME, &direct_read) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting flag for direct chunk read") + + if(direct_read) { + unsigned u; + hsize_t dims[H5O_LAYOUT_NDIMS]; + hsize_t internal_offset[H5O_LAYOUT_NDIMS]; + + if(H5D_CHUNKED != dset->shared->layout.type) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset") + + /* Get the direct chunk offset property */ + if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_READ_OFFSET_NAME, &direct_offset) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting direct offset from xfer properties") + + /* The library's chunking code requires the offset terminates with a zero. So transfer the + * offset array to an internal offset array */ + for(u = 0; u < dset->shared->ndims; u++) { + /* Make sure the offset doesn't exceed the dataset's dimensions */ + if(direct_offset[u] > dset->shared->curr_dims[u]) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset") + + /* Make sure the offset fall right on a chunk's boundary */ + if(direct_offset[u] % dset->shared->layout.u.chunk.dim[u]) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset doesn't fall on chunks's boundary") + + internal_offset[u] = direct_offset[u]; + } /* end for */ + + /* Terminate the offset with a zero */ + internal_offset[dset->shared->ndims] = 0; + + /* Read the raw chunk */ + if(H5D__chunk_direct_read(dset, plist_id, internal_offset, &direct_filters, buf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read chunk directly") + /* Set the chunk filter mask property */ + if(H5P_set(plist, H5D_XFER_DIRECT_CHUNK_READ_FILTERS_NAME, &direct_filters) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error setting filter mask xfer property") + } + else { + /* read raw data */ + if(H5D__read(dset, mem_type_id, mem_space, file_space, plist_id, buf/*out*/) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") + } done: FUNC_LEAVE_API(ret_value) @@ -245,28 +296,28 @@ H5Dwrite(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, /* Check dataspace selections if this is not a direct write */ if(!direct_write) { if(mem_space_id < 0 || file_space_id < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace") - - if(H5S_ALL != mem_space_id) { - if(NULL == (mem_space = (const H5S_t *)H5I_object_verify(mem_space_id, H5I_DATASPACE))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace") - - /* Check for valid selection */ - if(H5S_SELECT_VALID(mem_space) != TRUE) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "memory selection+offset not within extent") - } /* end if */ - if(H5S_ALL != file_space_id) { - if(NULL == (file_space = (const H5S_t *)H5I_object_verify(file_space_id, H5I_DATASPACE))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace") - - /* Check for valid selection */ - if(H5S_SELECT_VALID(file_space) != TRUE) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "file selection+offset not within extent") - } /* end if */ + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace") + + if(H5S_ALL != mem_space_id) { + if(NULL == (mem_space = (const H5S_t *)H5I_object_verify(mem_space_id, H5I_DATASPACE))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace") + + /* Check for valid selection */ + if(H5S_SELECT_VALID(mem_space) != TRUE) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "memory selection+offset not within extent") + } /* end if */ + if(H5S_ALL != file_space_id) { + if(NULL == (file_space = (const H5S_t *)H5I_object_verify(file_space_id, H5I_DATASPACE))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace") + + /* Check for valid selection */ + if(H5S_SELECT_VALID(file_space) != TRUE) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "file selection+offset not within extent") + } /* end if */ } if(H5D__pre_write(dset, direct_write, mem_type_id, mem_space, file_space, dxpl_id, buf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't prepare for writing data") + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't prepare for writing data") done: FUNC_LEAVE_API(ret_value) @@ -300,15 +351,15 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id, uint32_t direct_filters; hsize_t *direct_offset; uint32_t direct_datasize; - hsize_t internal_offset[H5O_LAYOUT_NDIMS]; - unsigned u; /* Local index variable */ + hsize_t internal_offset[H5O_LAYOUT_NDIMS]; + unsigned u; /* Local index variable */ /* Get the dataset transfer property list */ if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list") if(H5D_CHUNKED != dset->shared->layout.type) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset") + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset") /* Retrieve parameters for direct chunk write */ if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME, &direct_filters) < 0) @@ -318,19 +369,19 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id, if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_NAME, &direct_datasize) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting data size for direct chunk write") - /* The library's chunking code requires the offset terminates with a zero. So transfer the + /* The library's chunking code requires the offset terminates with a zero. So transfer the * offset array to an internal offset array */ - for(u = 0; u < dset->shared->ndims; u++) { - /* Make sure the offset doesn't exceed the dataset's dimensions */ + for(u = 0; u < dset->shared->ndims; u++) { + /* Make sure the offset doesn't exceed the dataset's dimensions */ if(direct_offset[u] > dset->shared->curr_dims[u]) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset") + HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset") /* Make sure the offset fall right on a chunk's boundary */ - if(direct_offset[u] % dset->shared->layout.u.chunk.dim[u]) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset doesn't fall on chunks's boundary") + if(direct_offset[u] % dset->shared->layout.u.chunk.dim[u]) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset doesn't fall on chunks's boundary") - internal_offset[u] = direct_offset[u]; - } /* end for */ + internal_offset[u] = direct_offset[u]; + } /* end for */ /* Terminate the offset with a zero */ internal_offset[dset->shared->ndims] = 0; @@ -342,7 +393,7 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id, else { /* Normal write */ /* write raw data */ if(H5D__write(dset, mem_type_id, mem_space, file_space, dxpl_id, buf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data") + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data") } /* end else */ done: diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 8a4711f..b123b81 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -578,6 +578,7 @@ H5_DLL herr_t H5D__get_space_status(H5D_t *dset, H5D_space_status_t *allocation, H5_DLL herr_t H5D__alloc_storage(const H5D_io_info_t *io_info, H5D_time_alloc_t time_alloc, hbool_t full_overwrite, hsize_t old_dim[]); H5_DLL herr_t H5D__get_storage_size(H5D_t *dset, hid_t dxpl_id, hsize_t *storage_size); +H5_DLL herr_t H5D__get_chunk_storage_size(H5D_t *dset, hid_t dxpl_id, const hsize_t *offset, hsize_t *storage_size); H5_DLL haddr_t H5D__get_offset(const H5D_t *dset); H5_DLL void *H5D__vlen_get_buf_size_alloc(size_t size, void *info); H5_DLL herr_t H5D__vlen_get_buf_size(void *elem, hid_t type_id, unsigned ndim, @@ -685,6 +686,8 @@ H5_DLL herr_t H5D__chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_storage_t *store); H5_DLL herr_t H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, hsize_t *offset, uint32_t data_size, const void *buf); +H5_DLL herr_t H5D__chunk_direct_read(const H5D_t *dset, hid_t dxpl_id, hsize_t *offset, + uint32_t *filters, void *buf); #ifdef H5D_CHUNK_DEBUG H5_DLL herr_t H5D__chunk_stats(const H5D_t *dset, hbool_t headers); #endif /* H5D_CHUNK_DEBUG */ diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index fe408b2..baa844a 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -40,6 +40,11 @@ #define H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME "direct_chunk_filters" #define H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_NAME "direct_chunk_offset" #define H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_NAME "direct_chunk_datasize" + +/* Property names for H5LTDdirect_chunk_read */ +#define H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME "direct_chunk_read_flag" +#define H5D_XFER_DIRECT_CHUNK_READ_OFFSET_NAME "direct_chunk_read_offset" +#define H5D_XFER_DIRECT_CHUNK_READ_FILTERS_NAME "direct_chunk_read_filters" /*******************/ /* Public Typedefs */ @@ -146,6 +151,7 @@ H5_DLL hid_t H5Dget_type(hid_t dset_id); H5_DLL hid_t H5Dget_create_plist(hid_t dset_id); H5_DLL hid_t H5Dget_access_plist(hid_t dset_id); H5_DLL hsize_t H5Dget_storage_size(hid_t dset_id); +H5_DLL herr_t H5Dget_chunk_storage_size(hid_t dset_id, const hsize_t *offset, hsize_t *chunk_bytes); H5_DLL haddr_t H5Dget_offset(hid_t dset_id); H5_DLL herr_t H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, void *buf/*out*/); diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c index fdb402f..3c53c15 100644 --- a/src/H5Pdxpl.c +++ b/src/H5Pdxpl.c @@ -169,6 +169,13 @@ #define H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_DEF NULL #define H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_SIZE sizeof(uint32_t) #define H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_DEF 0 +/* Definitions for properties of direct chunk read */ +#define H5D_XFER_DIRECT_CHUNK_READ_FLAG_SIZE sizeof(hbool_t) +#define H5D_XFER_DIRECT_CHUNK_READ_FLAG_DEF FALSE +#define H5D_XFER_DIRECT_CHUNK_READ_FILTERS_SIZE sizeof(uint32_t) +#define H5D_XFER_DIRECT_CHUNK_READ_FILTERS_DEF 0 +#define H5D_XFER_DIRECT_CHUNK_READ_OFFSET_SIZE sizeof(hsize_t *) +#define H5D_XFER_DIRECT_CHUNK_READ_OFFSET_DEF NULL /* Ring type - private property */ #define H5AC_XFER_RING_SIZE sizeof(unsigned) #define H5AC_XFER_RING_DEF H5AC_RING_USER @@ -293,6 +300,9 @@ static const hbool_t H5D_def_direct_chunk_flag_g = H5D_XFER_DIRECT_CHUNK_WRITE_F static const uint32_t H5D_def_direct_chunk_filters_g = H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_DEF; /* Default value for the filters of direct chunk write */ static const hsize_t *H5D_def_direct_chunk_offset_g = H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_DEF; /* Default value for the offset of direct chunk write */ static const uint32_t H5D_def_direct_chunk_datasize_g = H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_DEF; /* Default value for the datasize of direct chunk write */ +static const hbool_t direct_chunk_read_flag = H5D_XFER_DIRECT_CHUNK_READ_FLAG_DEF; /* Default value for the flag of direct chunk read */ +static const hsize_t *direct_chunk_read_offset = H5D_XFER_DIRECT_CHUNK_READ_OFFSET_DEF; /* Default value for the offset of direct chunk read */ +static const uint32_t direct_chunk_read_filters = H5D_XFER_DIRECT_CHUNK_READ_FILTERS_DEF; /* Default value for the filters of direct chunk read */ static const H5AC_ring_t H5D_ring_g = H5AC_XFER_RING_DEF; /* Default value for the cache entry ring type */ #ifdef H5_DEBUG_BUILD static const H5FD_dxpl_type_t H5D_dxpl_type_g = H5FD_NOIO_DXPL; /* Default value for the dxpl type */ @@ -497,6 +507,24 @@ H5P__dxfr_reg_prop(H5P_genclass_t *pclass) NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") + /* Register the property of flag for direct chunk read */ + /* (Note: this property should not have an encode/decode callback) */ + if(H5P_register_real(pclass, H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME, H5D_XFER_DIRECT_CHUNK_READ_FLAG_SIZE, &direct_chunk_read_flag, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") + + /* Register the property of filter for direct chunk read */ + /* (Note: this property should not have an encode/decode callback) */ + if(H5P_register_real(pclass, H5D_XFER_DIRECT_CHUNK_READ_FILTERS_NAME, H5D_XFER_DIRECT_CHUNK_READ_FILTERS_SIZE, &direct_chunk_read_filters, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") + + /* Register the property of offset for direct chunk read */ + /* (Note: this property should not have an encode/decode callback) */ + if(H5P_register_real(pclass, H5D_XFER_DIRECT_CHUNK_READ_OFFSET_NAME, H5D_XFER_DIRECT_CHUNK_READ_OFFSET_SIZE, &direct_chunk_read_offset, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") + /* Register the ring property (private) */ if(H5P_register_real(pclass, H5AC_RING_NAME, H5AC_XFER_RING_SIZE, &H5D_ring_g, NULL, NULL, NULL, H5AC_XFER_RING_ENC, H5AC_XFER_RING_DEC, -- cgit v0.12 From bacb45b38ae8513024baf2dfe08c8918625489e2 Mon Sep 17 00:00:00 2001 From: Vailin Choi Date: Wed, 26 Apr 2017 14:00:34 -0500 Subject: Modifications based on pull request review. Made couple changes based on the review comments. --- hl/src/H5DO.c | 4 ++-- src/H5D.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hl/src/H5DO.c b/hl/src/H5DO.c index 22d40fc..99cf2f7 100644 --- a/hl/src/H5DO.c +++ b/hl/src/H5DO.c @@ -94,7 +94,7 @@ done: ret_value = FAIL; } - return(ret_value); + return ret_value; } /* end H5DOwrite_chunk() */ @@ -163,7 +163,7 @@ done: ret_value = FAIL; } - return(ret_value); + return ret_value; } /* end H5DOread_chunk() */ diff --git a/src/H5D.c b/src/H5D.c index a6de952..fc2024a 100644 --- a/src/H5D.c +++ b/src/H5D.c @@ -1126,4 +1126,4 @@ H5Dget_chunk_storage_size(hid_t dset_id, const hsize_t *offset, hsize_t *chunk_n done: FUNC_LEAVE_API(ret_value); -} +} /* H5Dget_chunk_storage_size() */ -- cgit v0.12