diff options
author | Quincey Koziol <koziol@koziol.gov> | 2019-08-21 01:38:10 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@koziol.gov> | 2019-08-21 01:38:10 (GMT) |
commit | 2ef7eb51b99a3e015efdecedd64c0be1ad77615c (patch) | |
tree | cc87cf7b023218e646e5b3097358eb9f652cee67 | |
parent | 67d7d28c038fb027feed570d6e6b25fdab4865dc (diff) | |
download | hdf5-2ef7eb51b99a3e015efdecedd64c0be1ad77615c.zip hdf5-2ef7eb51b99a3e015efdecedd64c0be1ad77615c.tar.gz hdf5-2ef7eb51b99a3e015efdecedd64c0be1ad77615c.tar.bz2 |
Progress toward moving the dataset routines to using the 'shared' file pointer
instead of the 'top' file pointer.
-rw-r--r-- | src/H5Dchunk.c | 25 | ||||
-rw-r--r-- | src/H5Dint.c | 2 | ||||
-rw-r--r-- | src/H5Dmpio.c | 6 |
3 files changed, 18 insertions, 15 deletions
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index ec8ea4d..082aac3 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -393,7 +393,6 @@ H5D__chunk_direct_write(const H5D_t *dset, uint32_t filters, hsize_t *offset, H5D_chk_idx_info_t idx_info; /* Chunked index info */ hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */ - H5D_io_info_t io_info; /* to hold the dset info */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_TAG(dset->oloc.addr) @@ -401,13 +400,17 @@ H5D__chunk_direct_write(const H5D_t *dset, uint32_t filters, hsize_t *offset, /* Sanity checks */ HDassert(layout->type == H5D_CHUNKED); - io_info.dset = dset; - /* Allocate dataspace and initialize it if it hasn't been. */ - if(!H5D__chunk_is_space_alloc(&layout->storage)) + if(!H5D__chunk_is_space_alloc(&layout->storage)) { + H5D_io_info_t io_info; /* to hold the dset info */ + + io_info.dset = dset; + io_info.f_sh = H5F_SHARED(dset->oloc.file); + /* Allocate storage */ if(H5D__alloc_storage(&io_info, H5D_ALLOC_WRITE, FALSE, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage") + } /* end if */ /* Calculate the index of this chunk */ H5VM_chunk_scaled(dset->shared->ndims, offset, layout->u.chunk.dim, scaled); @@ -467,7 +470,7 @@ H5D__chunk_direct_write(const H5D_t *dset, uint32_t filters, hsize_t *offset, } /* end if */ /* Write the data to the file */ - if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, data_size, buf) < 0) + if(H5F_shared_block_write(H5F_SHARED(dset->oloc.file), H5FD_MEM_DRAW, udata.chunk_block.offset, data_size, buf) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write raw data to file") /* Insert the chunk record into the index */ @@ -570,9 +573,9 @@ H5D__chunk_direct_read(const H5D_t *dset, hsize_t *offset, uint32_t* filters, HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined") /* Read the chunk data into the supplied buffer */ - if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, udata.chunk_block.length, buf) < 0) + if(H5F_shared_block_read(H5F_SHARED(dset->oloc.file), H5FD_MEM_DRAW, udata.chunk_block.offset, udata.chunk_block.length, buf) < 0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") - + /* Return the filter mask */ *filters = udata.filter_mask; @@ -3332,7 +3335,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset) /* Write the data to the file */ HDassert(H5F_addr_defined(udata.chunk_block.offset)); H5_CHECK_OVERFLOW(udata.chunk_block.length, hsize_t, size_t); - if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, (size_t)udata.chunk_block.length, buf) < 0) + if(H5F_shared_block_write(H5F_SHARED(dset->oloc.file), H5FD_MEM_DRAW, udata.chunk_block.offset, (size_t)udata.chunk_block.length, buf) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write raw data to file") /* Insert the chunk record into the index */ @@ -3803,7 +3806,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, * size in memory, so allocate memory big enough. */ if(NULL == (chunk = H5D__chunk_mem_alloc(my_chunk_alloc, (udata->new_unfilt_chunk ? old_pline : pline)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk") - if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, my_chunk_alloc, chunk) < 0) + if(H5F_shared_block_read(H5F_SHARED(dset->oloc.file), H5FD_MEM_DRAW, chunk_addr, my_chunk_alloc, chunk) < 0) HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk") if(old_pline && old_pline->nused) { @@ -4533,7 +4536,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ } /* end if */ else { #endif /* H5_HAVE_PARALLEL */ - if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, chunk_size, *fill_buf) < 0) + if(H5F_shared_block_write(H5F_SHARED(dset->oloc.file), H5FD_MEM_DRAW, udata.chunk_block.offset, chunk_size, *fill_buf) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") #ifdef H5_HAVE_PARALLEL } /* end else */ @@ -4918,7 +4921,7 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info, HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set transfer mode") /* Low-level write (collective) */ - if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, (haddr_t)0, (blocks) ? (size_t)1 : (size_t)0, fill_buf) < 0) + if(H5F_shared_block_write(H5F_SHARED(dset->oloc.file), H5FD_MEM_DRAW, (haddr_t)0, (blocks) ? (size_t)1 : (size_t)0, fill_buf) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") /* Barrier so processes don't race ahead */ diff --git a/src/H5Dint.c b/src/H5Dint.c index 1e9c6ff..ada542e 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -2932,7 +2932,7 @@ H5D__flush_sieve_buf(H5D_t *dataset) HDassert(dataset->shared->layout.type != H5D_COMPACT); /* We should never have a sieve buffer for compact storage */ /* Write dirty data sieve buffer to file */ - if(H5F_block_write(dataset->oloc.file, H5FD_MEM_DRAW, dataset->shared->cache.contig.sieve_loc, + if(H5F_shared_block_write(H5F_SHARED(dataset->oloc.file), H5FD_MEM_DRAW, dataset->shared->cache.contig.sieve_loc, dataset->shared->cache.contig.sieve_size, dataset->shared->cache.contig.sieve_buf) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed") diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 467b1b7..0dbbe9f 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -457,7 +457,7 @@ H5D__mpio_select_read(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATT FUNC_ENTER_PACKAGE H5_CHECK_OVERFLOW(mpi_buf_count, hsize_t, size_t); - if(H5F_block_read(io_info->dset->oloc.file, H5FD_MEM_DRAW, store_contig->dset_addr, (size_t)mpi_buf_count, io_info->u.rbuf) < 0) + if(H5F_shared_block_read(io_info->f_sh, H5FD_MEM_DRAW, store_contig->dset_addr, (size_t)mpi_buf_count, io_info->u.rbuf) < 0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "can't finish collective parallel read") done: @@ -487,7 +487,7 @@ H5D__mpio_select_write(const H5D_io_info_t *io_info, const H5D_type_info_t H5_AT /*OKAY: CAST DISCARDS CONST QUALIFIER*/ H5_CHECK_OVERFLOW(mpi_buf_count, hsize_t, size_t); - if(H5F_block_write(io_info->dset->oloc.file, H5FD_MEM_DRAW, store_contig->dset_addr, (size_t)mpi_buf_count, io_info->u.wbuf) < 0) + if(H5F_shared_block_write(io_info->f_sh, H5FD_MEM_DRAW, store_contig->dset_addr, (size_t)mpi_buf_count, io_info->u.wbuf) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "can't finish collective parallel write") done: @@ -3153,7 +3153,7 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk if(H5CX_set_io_xfer_mode(H5FD_MPIO_INDEPENDENT) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set MPI-I/O transfer mode") - if(H5F_block_read(io_info->dset->oloc.file, H5FD_MEM_DRAW, chunk_entry->chunk_states.chunk_current.offset, + if(H5F_shared_block_read(io_info->f_sh, H5FD_MEM_DRAW, chunk_entry->chunk_states.chunk_current.offset, chunk_entry->chunk_states.new_chunk.length, chunk_entry->buf) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to read raw data chunk") |