diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2004-09-30 03:46:58 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2004-09-30 03:46:58 (GMT) |
commit | 7d457ba693ea827a55277563525a4fbdb04ce2fd (patch) | |
tree | 7f1bddb78fa55c5c2edac730545ac259591500a1 /src | |
parent | e0c4a752e6522a2d6bf50cef42bccf165264a89c (diff) | |
download | hdf5-7d457ba693ea827a55277563525a4fbdb04ce2fd.zip hdf5-7d457ba693ea827a55277563525a4fbdb04ce2fd.tar.gz hdf5-7d457ba693ea827a55277563525a4fbdb04ce2fd.tar.bz2 |
[svn-r9342] Purpose:
Bug fix/code cleanup
Description:
Clean up raw data I/O code to bundle the I/O parameters (dataset, DXPL ID,
etc) into a single struct to pass around through the dataset I/O routines,
since they are always passed together, until very near the bottom of the I/O
stack.
Platforms tested:
FreeBSD 4.10 (sleipnir) w/parallel
Solaris 2.7 (arabica)
IRIX64 6.5 (modi4)
h5committest
Diffstat (limited to 'src')
-rw-r--r-- | src/H5Bprivate.h | 2 | ||||
-rw-r--r-- | src/H5D.c | 74 | ||||
-rw-r--r-- | src/H5Dcompact.c | 14 | ||||
-rw-r--r-- | src/H5Dcontig.c | 271 | ||||
-rw-r--r-- | src/H5Dio.c | 226 | ||||
-rw-r--r-- | src/H5Distore.c | 384 | ||||
-rw-r--r-- | src/H5Dmpio.c | 53 | ||||
-rw-r--r-- | src/H5Dpkg.h | 36 | ||||
-rw-r--r-- | src/H5Dprivate.h | 63 | ||||
-rw-r--r-- | src/H5Dseq.c | 268 | ||||
-rw-r--r-- | src/H5Gnode.c | 4 | ||||
-rw-r--r-- | src/H5Oefl.c | 23 | ||||
-rw-r--r-- | src/H5Oprivate.h | 18 | ||||
-rw-r--r-- | src/H5Smpio.c | 303 | ||||
-rw-r--r-- | src/H5Sprivate.h | 24 | ||||
-rw-r--r-- | src/H5Sselect.c | 257 | ||||
-rw-r--r-- | src/H5T.c | 4 | ||||
-rw-r--r-- | src/Makefile.in | 2 |
18 files changed, 661 insertions, 1365 deletions
diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h index 961c7d9..87e57f3 100644 --- a/src/H5Bprivate.h +++ b/src/H5Bprivate.h @@ -97,7 +97,7 @@ typedef struct H5B_shared_t { typedef struct H5B_class_t { H5B_subid_t id; /*id as found in file*/ size_t sizeof_nkey; /*size of native (memory) key*/ - size_t (*get_sizeof_rkey)(H5F_t*, const void*); /*raw key size */ + size_t (*get_sizeof_rkey)(const H5F_t*, const void*); /*raw key size */ H5RC_t * (*get_shared)(H5F_t*, const void*); /*shared info for node */ herr_t (*new_node)(H5F_t*, hid_t, H5B_ins_t, void*, void*, void*, haddr_t*); int (*cmp2)(H5F_t*, hid_t, void*, void*, void*); /*compare 2 keys */ @@ -2271,9 +2271,19 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space } else if (max_points * H5T_get_size (type) > max_storage) { HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "data space size exceeds external storage size") } - } else if (ndims>0 && max_dim[0]>dim[0]) { - HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, NULL, "extendible contiguous non-external dataset") - } + + /* Set the I/O functions for this layout type */ + new_dset->shared->layout.readvv=H5O_efl_readvv; + new_dset->shared->layout.writevv=H5O_efl_writevv; + } /* end if */ + else { + if (ndims>0 && max_dim[0]>dim[0]) + HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, NULL, "extendible contiguous non-external dataset") + + /* Set the I/O functions for this layout type */ + new_dset->shared->layout.readvv=H5D_contig_readvv; + new_dset->shared->layout.writevv=H5D_contig_writevv; + } /* end else */ /* Compute the total size of a chunk */ tmp_size = H5S_GET_EXTENT_NPOINTS(new_dset->shared->space) * @@ -2321,6 +2331,10 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space for (u=1, new_dset->shared->layout.u.chunk.size=new_dset->shared->layout.u.chunk.dim[0]; u<new_dset->shared->layout.u.chunk.ndims; u++) new_dset->shared->layout.u.chunk.size *= new_dset->shared->layout.u.chunk.dim[u]; + /* Set the I/O functions for this layout type */ + new_dset->shared->layout.readvv=H5D_istore_readvv; + new_dset->shared->layout.writevv=H5D_istore_writevv; + /* Initialize the chunk cache for the dataset */ if(H5D_istore_init(file,new_dset)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize chunk cache") @@ -2349,6 +2363,10 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space comp_data_size=H5O_MAX_SIZE-H5O_layout_meta_size(file, &(new_dset->shared->layout)); if(new_dset->shared->layout.u.compact.size > comp_data_size) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "compact dataset size is bigger than header message maximum size") + + /* Set the I/O functions for this layout type */ + new_dset->shared->layout.readvv=H5D_compact_readvv; + new_dset->shared->layout.writevv=H5D_compact_writevv; } /* end case */ break; @@ -2385,7 +2403,7 @@ done: if (!ret_value && new_dset && new_dset->shared) { if( new_dset->shared) { if(new_dset->shared->layout.type==H5D_CHUNKED && chunk_init) { - if(H5D_istore_dest(new_dset->ent.file,H5AC_dxpl_id,new_dset)<0) + if(H5D_istore_dest(new_dset,H5AC_dxpl_id)<0) HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, NULL, "unable to destroy chunk cache") } /* end if */ if (new_dset->shared->space) { @@ -2651,6 +2669,10 @@ H5D_open_oid(const H5G_entry_t *ent, hid_t dxpl_id) H5T_get_size(dataset->shared->type); H5_ASSIGN_OVERFLOW(dataset->shared->layout.u.contig.size,tmp_size,hssize_t,hsize_t); } /* end if */ + + /* Set the I/O functions for this layout type */ + dataset->shared->layout.readvv=H5D_contig_readvv; + dataset->shared->layout.writevv=H5D_contig_writevv; break; case H5D_CHUNKED: @@ -2673,9 +2695,16 @@ H5D_open_oid(const H5G_entry_t *ent, hid_t dxpl_id) if(H5D_istore_init(dataset->ent.file,dataset)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize chunk cache") } + + /* Set the I/O functions for this layout type */ + dataset->shared->layout.readvv=H5D_istore_readvv; + dataset->shared->layout.writevv=H5D_istore_writevv; break; case H5D_COMPACT: + /* Set the I/O functions for this layout type */ + dataset->shared->layout.readvv=H5D_compact_readvv; + dataset->shared->layout.writevv=H5D_compact_writevv; break; default: @@ -2748,10 +2777,15 @@ H5D_open_oid(const H5G_entry_t *ent, hid_t dxpl_id) if((dataset->shared->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->shared->layout.u.contig.addr)) || (dataset->shared->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->shared->layout.u.chunk.addr))) { HDmemset(&dataset->shared->efl,0,sizeof(H5O_efl_t)); - if(NULL != H5O_read(&(dataset->ent), H5O_EFL_ID, 0, &dataset->shared->efl, dxpl_id)) + if(NULL != H5O_read(&(dataset->ent), H5O_EFL_ID, 0, &dataset->shared->efl, dxpl_id)) { if(H5P_set(plist, H5D_CRT_EXT_FILE_LIST_NAME, &dataset->shared->efl) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set external file list") - } + + /* Override the I/O functions for this layout type */ + dataset->shared->layout.readvv=H5O_efl_readvv; + dataset->shared->layout.writevv=H5O_efl_writevv; + } /* end if */ + } /* end if */ /* * Make sure all storage is properly initialized. @@ -2854,7 +2888,7 @@ H5D_close(H5D_t *dataset) case H5D_CHUNKED: /* Flush and destroy chunks in the cache */ - if(H5D_istore_dest(dataset->ent.file,H5AC_dxpl_id,dataset)<0) + if(H5D_istore_dest(dataset,H5AC_dxpl_id)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy chunk cache") break; @@ -3000,7 +3034,7 @@ H5D_extend (H5D_t *dataset, const hsize_t *size, hid_t dxpl_id) /* Update the index values for the cached chunks for this dataset */ if(H5D_CHUNKED == dataset->shared->layout.type) - if(H5D_istore_update_cache(dataset->ent.file, dxpl_id, dataset) < 0) + if(H5D_istore_update_cache(dataset, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices") /* Allocate space for the new parts of the dataset, if appropriate */ @@ -3144,7 +3178,7 @@ H5D_alloc_storage (H5F_t *f, hid_t dxpl_id, H5D_t *dset/*in,out*/, H5D_time_allo case H5D_CONTIGUOUS: if(layout->u.contig.addr==HADDR_UNDEF) { /* Reserve space in the file for the entire array */ - if (H5D_contig_create (f, dxpl_id, dset/*out*/)<0) + if (H5D_contig_create (f, dxpl_id, layout/*out*/)<0) HGOTO_ERROR (H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize contiguous storage") /* Indicate that we set the storage addr */ @@ -3305,7 +3339,7 @@ H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hid_t dxpl_id) /* Don't write default fill values to external files */ /* If we will be immediately overwriting the values, don't bother to clear them */ if((dset->shared->efl.nused==0 || dset->shared->fill.buf) && !full_overwrite) { - if (H5D_contig_fill(dset->ent.file, dxpl_id, dset)<0) + if (H5D_contig_fill(dset, dxpl_id)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset") } /* end if */ break; @@ -3315,7 +3349,7 @@ H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hid_t dxpl_id) * Allocate file space * for all chunks now and initialize each chunk with the fill value. */ - if (H5D_istore_allocate(dset->ent.file, dxpl_id, dset, full_overwrite)<0) + if (H5D_istore_allocate(dset, dxpl_id, full_overwrite)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset") break; @@ -3401,7 +3435,7 @@ H5D_get_storage_size(H5D_t *dset, hid_t dxpl_id) if(dset->shared->layout.u.chunk.addr == HADDR_UNDEF) ret_value=0; else - ret_value = H5D_istore_allocated(dset->ent.file, dxpl_id, dset); + ret_value = H5D_istore_allocated(dset, dxpl_id); break; case H5D_CONTIGUOUS: @@ -3917,7 +3951,7 @@ done: * Function: H5D_set_extent * * Purpose: Based in H5D_extend, allows change to a lower dimension, - * calls H5S_set_extent and H5F_istore_prune_by_extent instead + * calls H5S_set_extent and H5D_istore_prune_by_extent instead * * Return: Success: SUCCEED, Failure: FAIL * @@ -3989,7 +4023,7 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id) /* Update the index values for the cached chunks for this dataset */ if(H5D_CHUNKED == dset->shared->layout.type) - if(H5D_istore_update_cache(dset->ent.file, dxpl_id, dset) < 0) + if(H5D_istore_update_cache(dset, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices") /* Allocate space for the new parts of the dataset, if appropriate */ @@ -4004,6 +4038,7 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id) *------------------------------------------------------------------------- */ if(shrink && H5D_CHUNKED == dset->shared->layout.type) { + H5D_io_info_t io_info; /* Dataset I/O info */ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ @@ -4011,12 +4046,15 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id) if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + /* Construct dataset I/O info */ + H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,NULL); + /* Remove excess chunks */ - if(H5D_istore_prune_by_extent(dset->ent.file, dxpl_cache, dxpl_id, dset) < 0) + if(H5D_istore_prune_by_extent(&io_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks ") /* Reset the elements outsize the new dimensions, but in existing chunks */ - if(H5D_istore_initialize_by_extent(dset->ent.file, dxpl_cache, dxpl_id, dset) < 0) + if(H5D_istore_initialize_by_extent(&io_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to initialize chunks ") } /* end if */ } /* end if */ @@ -4076,7 +4114,7 @@ H5D_flush(H5F_t *f, hid_t dxpl_id, unsigned flags) /* Flush the raw data buffer, if we have a dirty one */ if (dataset->shared->cache.contig.sieve_buf && dataset->shared->cache.contig.sieve_dirty) { - assert(dataset->shared->layout.type!=H5D_COMPACT); + assert(dataset->shared->layout.type!=H5D_COMPACT); /* We should never have a sieve buffer for compact storage */ /* Write dirty data sieve buffer to file */ if (H5F_block_write(f, H5FD_MEM_DRAW, dataset->shared->cache.contig.sieve_loc, @@ -4094,7 +4132,7 @@ H5D_flush(H5F_t *f, hid_t dxpl_id, unsigned flags) case H5D_CHUNKED: /* Flush the raw data cache */ - if (H5D_istore_flush(f, dxpl_id, dataset, flags & (H5F_FLUSH_INVALIDATE | H5F_FLUSH_CLEAR_ONLY)) < 0) + if (H5D_istore_flush(dataset, dxpl_id, flags & (H5F_FLUSH_INVALIDATE | H5F_FLUSH_CLEAR_ONLY)) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush raw data cache") break; diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c index 318af13..658aae0 100644 --- a/src/H5Dcompact.c +++ b/src/H5Dcompact.c @@ -57,7 +57,7 @@ *------------------------------------------------------------------------- */ ssize_t -H5D_compact_readvv(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const H5D_t *dset, +H5D_compact_readvv(H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[], void *buf) @@ -66,10 +66,10 @@ H5D_compact_readvv(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const H5D_t *dset, FUNC_ENTER_NOAPI(H5D_compact_readvv, FAIL); - assert(dset); + assert(io_info->dset); /* Use the vectorized memory copy routine to do actual work */ - if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,dset->shared->layout.u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0) + if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,io_info->dset->shared->layout.u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed"); done: @@ -101,7 +101,7 @@ done: *------------------------------------------------------------------------- */ ssize_t -H5D_compact_writevv(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, H5D_t *dset, +H5D_compact_writevv(H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[], const void *buf) @@ -110,13 +110,13 @@ H5D_compact_writevv(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, H5D_t *dset, FUNC_ENTER_NOAPI(H5D_compact_writevv, FAIL); - assert(dset); + assert(io_info->dset); /* Use the vectorized memory copy routine to do actual work */ - if((ret_value=H5V_memcpyvv(dset->shared->layout.u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0) + if((ret_value=H5V_memcpyvv(io_info->dset->shared->layout.u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed"); - dset->shared->layout.u.compact.dirty = TRUE; + io_info->dset->shared->layout.u.compact.dirty = TRUE; done: FUNC_LEAVE_NOAPI(ret_value); diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c index a69ab57..6cb3b39 100644 --- a/src/H5Dcontig.c +++ b/src/H5Dcontig.c @@ -41,8 +41,8 @@ #include "H5Vprivate.h" /* Vector and array functions */ /* Private prototypes */ -static herr_t H5D_contig_write(H5F_t *f, hid_t dxpl_id, H5D_t *dset, - hsize_t offset, size_t size, const void *buf); +static herr_t H5D_contig_write(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache, + hid_t dxpl_id, const H5D_storage_t *store, hsize_t offset, size_t size, const void *buf); /* Declare a PQ free list to manage the sieve buffer information */ H5FL_BLK_DEFINE(sieve_buf); @@ -69,7 +69,7 @@ H5FL_BLK_DEFINE_STATIC(zero_fill); *------------------------------------------------------------------------- */ herr_t -H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5D_t *dset) +H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout /*out */ ) { herr_t ret_value=SUCCEED; /* Return value */ @@ -77,10 +77,10 @@ H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5D_t *dset) /* check args */ assert(f); - assert(dset); + assert(layout); /* Allocate space for the contiguous data */ - if (HADDR_UNDEF==(dset->shared->layout.u.contig.addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, dset->shared->layout.u.contig.size))) + if (HADDR_UNDEF==(layout->u.contig.addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, layout->u.contig.size))) HGOTO_ERROR (H5E_IO, H5E_NOSPACE, FAIL, "unable to reserve file space"); done: @@ -106,8 +106,11 @@ done: *------------------------------------------------------------------------- */ herr_t -H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset) +H5D_contig_fill(H5D_t *dset, hid_t dxpl_id) { + H5D_storage_t store; /* Union of storage info for dataset */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ hssize_t snpoints; /* Number of points in space (for error checking) */ size_t npoints; /* Number of points in space */ size_t ptsperbuf; /* Maximum # of points which fit in the buffer */ @@ -129,7 +132,6 @@ H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset) FUNC_ENTER_NOAPI(H5D_contig_fill, FAIL); /* Check args */ - assert(f); assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); assert(dset && H5D_CONTIGUOUS==dset->shared->layout.type); assert(H5F_addr_defined(dset->shared->layout.u.contig.addr)); @@ -138,19 +140,34 @@ H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset) #ifdef H5_HAVE_PARALLEL /* Retrieve MPI parameters */ - if(IS_H5FD_MPI(f)) { + if(IS_H5FD_MPI(dset->ent.file)) { /* Get the MPI communicator */ - if (MPI_COMM_NULL == (mpi_comm=H5F_mpi_get_comm(f))) + if (MPI_COMM_NULL == (mpi_comm=H5F_mpi_get_comm(dset->ent.file))) HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI communicator"); /* Get the MPI rank */ - if ((mpi_rank=H5F_mpi_get_rank(f))<0) + if ((mpi_rank=H5F_mpi_get_rank(dset->ent.file))<0) HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI rank"); /* Set the MPI-capable file driver flag */ using_mpi=1; + + /* Fill the DXPL cache values for later use */ + if (H5D_get_dxpl_cache(H5AC_ind_dxpl_id,&dxpl_cache)<0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") } /* end if */ + else { #endif /* H5_HAVE_PARALLEL */ + /* Fill the DXPL cache values for later use */ + if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") +#ifdef H5_HAVE_PARALLEL + } /* end else */ +#endif /* H5_HAVE_PARALLEL */ + + /* Initialize storage info for this dataset */ + store.contig.dset_addr=dset->shared->layout.u.contig.addr; + store.contig.dset_size=dset->shared->layout.u.contig.size; /* Get size of elements */ elmt_size=H5T_get_size(dset->shared->type); @@ -216,7 +233,7 @@ H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset) /* Write the chunks out from only one process */ /* !! Use the internal "independent" DXPL!! -QAK */ if(H5_PAR_META_WRITE==mpi_rank) { - if (H5D_contig_write(f, H5AC_ind_dxpl_id, dset, offset, size, buf)<0) + if (H5D_contig_write(dset, dxpl_cache, H5AC_ind_dxpl_id, &store, offset, size, buf)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to write fill value to dataset"); } /* end if */ @@ -226,7 +243,7 @@ H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset) else { #endif /* H5_HAVE_PARALLEL */ H5_CHECK_OVERFLOW(size,size_t,hsize_t); - if (H5D_contig_write(f, dxpl_id, dset, offset, size, buf)<0) + if (H5D_contig_write(dset, dxpl_cache, dxpl_id, &store, offset, size, buf)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to write fill value to dataset"); #ifdef H5_HAVE_PARALLEL } /* end else */ @@ -342,9 +359,11 @@ H5D_contig_get_addr(const H5D_t *dset) *------------------------------------------------------------------------- */ static herr_t -H5D_contig_write(H5F_t *f, hid_t dxpl_id, H5D_t *dset, +H5D_contig_write(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache, + hid_t dxpl_id, const H5D_storage_t *store, hsize_t offset, size_t size, const void *buf) { + H5D_io_info_t io_info; /* Dataset I/O info */ hsize_t dset_off=offset; /* Offset in dataset */ size_t dset_len=size; /* Length in dataset */ size_t dset_curr_seq=0; /* "Current sequence" in dataset */ @@ -355,11 +374,13 @@ H5D_contig_write(H5F_t *f, hid_t dxpl_id, H5D_t *dset, FUNC_ENTER_NOAPI(H5D_contig_write, FAIL); - assert (f); assert (dset); + assert (dxpl_cache); + assert (store); assert (buf); - if (H5D_contig_writevv(f, dxpl_id, dset, dset->shared->layout.u.contig.addr, dset->shared->layout.u.contig.size, + H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,store); + if (H5D_contig_writevv(&io_info, 1, &dset_curr_seq, &dset_len, &dset_off, 1, &mem_curr_seq, &mem_len, &mem_off, buf)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vector write failed"); @@ -389,13 +410,15 @@ done: *------------------------------------------------------------------------- */ ssize_t -H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, - haddr_t dset_addr, hsize_t dset_size, +H5D_contig_readvv(H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], void *_buf) { - unsigned char *buf=(unsigned char *)_buf; /* Pointer to buffer to fill */ + H5F_t *file=io_info->dset->ent.file; /* File for dataset */ + H5D_rdcdc_t *dset_contig=&(io_info->dset->shared->cache.contig); /* Cached information about contiguous data */ + const H5D_contig_storage_t *store_contig=&(io_info->store->contig); /* Contiguous storage info for this I/O operation */ + unsigned char *buf=(unsigned char *)_buf; /* Pointer to buffer to fill */ haddr_t addr; /* Actual address to read */ size_t size; /* Size of sequence in bytes */ size_t u; /* Counting variable */ @@ -405,15 +428,16 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, FUNC_ENTER_NOAPI(H5D_contig_readvv, FAIL); /* Check args */ - assert(f); - assert(dset); + assert(io_info); + assert(io_info->dset); + assert(io_info->store); assert(buf); /* Check if data sieving is enabled */ - if(H5F_HAS_FEATURE(f,H5FD_FEAT_DATA_SIEVE)) { - haddr_t sieve_start, sieve_end; /* Start & end locations of sieve buffer */ + if(H5F_HAS_FEATURE(file,H5FD_FEAT_DATA_SIEVE)) { + haddr_t sieve_start=HADDR_UNDEF, sieve_end=HADDR_UNDEF; /* Start & end locations of sieve buffer */ haddr_t contig_end; /* End locations of block to write */ - size_t sieve_size; /* size of sieve buffer */ + size_t sieve_size=(size_t)-1; /* size of sieve buffer */ haddr_t abs_eoa; /* Absolute end of file address */ haddr_t rel_eoa; /* Relative end of file address */ hsize_t max_data; /* Actual maximum size of data to cache */ @@ -423,9 +447,9 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, v=*mem_curr_seq; /* Stash local copies of these value */ - if(dset->shared->cache.contig.sieve_buf!=NULL) { - sieve_start=dset->shared->cache.contig.sieve_loc; - sieve_size=dset->shared->cache.contig.sieve_size; + if(dset_contig->sieve_buf!=NULL) { + sieve_start=dset_contig->sieve_loc; + sieve_size=dset_contig->sieve_size; sieve_end=sieve_start+sieve_size; } /* end if */ @@ -438,52 +462,52 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, size=dset_len_arr[u]; /* Compute offset on disk */ - addr=dset_addr+dset_offset_arr[u]; + addr=store_contig->dset_addr+dset_offset_arr[u]; /* Compute offset in memory */ buf = (unsigned char *)_buf + mem_offset_arr[v]; /* Check if the sieve buffer is allocated yet */ - if(dset->shared->cache.contig.sieve_buf==NULL) { + if(dset_contig->sieve_buf==NULL) { /* Check if we can actually hold the I/O request in the sieve buffer */ - if(size>dset->shared->cache.contig.sieve_buf_size) { - if (H5F_block_read(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0) + if(size>dset_contig->sieve_buf_size) { + if (H5F_block_read(file, H5FD_MEM_DRAW, addr, size, io_info->dxpl_id, buf)<0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed"); } /* end if */ else { /* Allocate room for the data sieve buffer */ - if (NULL==(dset->shared->cache.contig.sieve_buf=H5FL_BLK_MALLOC(sieve_buf,dset->shared->cache.contig.sieve_buf_size))) + if (NULL==(dset_contig->sieve_buf=H5FL_BLK_MALLOC(sieve_buf,dset_contig->sieve_buf_size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); /* Determine the new sieve buffer size & location */ - dset->shared->cache.contig.sieve_loc=addr; + dset_contig->sieve_loc=addr; /* Make certain we don't read off the end of the file */ - if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(f))) + if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(file))) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to determine file size"); /* Adjust absolute EOA address to relative EOA address */ - rel_eoa=abs_eoa-H5F_get_base_addr(f); + rel_eoa=abs_eoa-H5F_get_base_addr(file); /* Set up the buffer parameters */ - max_data=dset_size-dset_offset_arr[u]; + max_data=store_contig->dset_size-dset_offset_arr[u]; /* Compute the size of the sieve buffer */ - H5_ASSIGN_OVERFLOW(dset->shared->cache.contig.sieve_size,MIN3(rel_eoa-dset->shared->cache.contig.sieve_loc,max_data,dset->shared->cache.contig.sieve_buf_size),hsize_t,size_t); + H5_ASSIGN_OVERFLOW(dset_contig->sieve_size,MIN3(rel_eoa-dset_contig->sieve_loc,max_data,dset_contig->sieve_buf_size),hsize_t,size_t); /* Read the new sieve buffer */ - if (H5F_block_read(f, H5FD_MEM_DRAW, dset->shared->cache.contig.sieve_loc, dset->shared->cache.contig.sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0) + if (H5F_block_read(file, H5FD_MEM_DRAW, dset_contig->sieve_loc, dset_contig->sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed"); /* Grab the data out of the buffer (must be first piece of data in buffer ) */ - HDmemcpy(buf,dset->shared->cache.contig.sieve_buf,size); + HDmemcpy(buf,dset_contig->sieve_buf,size); /* Reset sieve buffer dirty flag */ - dset->shared->cache.contig.sieve_dirty=0; + dset_contig->sieve_dirty=0; /* Stash local copies of these value */ - sieve_start=dset->shared->cache.contig.sieve_loc; - sieve_size=dset->shared->cache.contig.sieve_size; + sieve_start=dset_contig->sieve_loc; + sieve_size=dset_contig->sieve_size; sieve_end=sieve_start+sieve_size; } /* end else */ } /* end if */ @@ -493,7 +517,7 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, /* If entire read is within the sieve buffer, read it from the buffer */ if(addr>=sieve_start && contig_end<sieve_end) { - unsigned char *base_sieve_buf=dset->shared->cache.contig.sieve_buf+(addr-sieve_start); + unsigned char *base_sieve_buf=dset_contig->sieve_buf+(addr-sieve_start); /* Grab the data out of the buffer */ HDmemcpy(buf,base_sieve_buf,size); @@ -501,68 +525,68 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, /* Entire request is not within this data sieve buffer */ else { /* Check if we can actually hold the I/O request in the sieve buffer */ - if(size>dset->shared->cache.contig.sieve_buf_size) { + if(size>dset_contig->sieve_buf_size) { /* Check for any overlap with the current sieve buffer */ if((sieve_start>=addr && sieve_start<(contig_end+1)) || ((sieve_end-1)>=addr && (sieve_end-1)<(contig_end+1))) { /* Flush the sieve buffer, if it's dirty */ - if(dset->shared->cache.contig.sieve_dirty) { + if(dset_contig->sieve_dirty) { /* Write to file */ - if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0) + if (H5F_block_write(file, H5FD_MEM_DRAW, sieve_start, sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed"); /* Reset sieve buffer dirty flag */ - dset->shared->cache.contig.sieve_dirty=0; + dset_contig->sieve_dirty=0; } /* end if */ } /* end if */ /* Read directly into the user's buffer */ - if (H5F_block_read(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0) + if (H5F_block_read(file, H5FD_MEM_DRAW, addr, size, io_info->dxpl_id, buf)<0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed"); } /* end if */ /* Element size fits within the buffer size */ else { /* Flush the sieve buffer if it's dirty */ - if(dset->shared->cache.contig.sieve_dirty) { + if(dset_contig->sieve_dirty) { /* Write to file */ - if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0) + if (H5F_block_write(file, H5FD_MEM_DRAW, sieve_start, sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed"); /* Reset sieve buffer dirty flag */ - dset->shared->cache.contig.sieve_dirty=0; + dset_contig->sieve_dirty=0; } /* end if */ /* Determine the new sieve buffer size & location */ - dset->shared->cache.contig.sieve_loc=addr; + dset_contig->sieve_loc=addr; /* Make certain we don't read off the end of the file */ - if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(f))) + if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(file))) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to determine file size"); /* Adjust absolute EOA address to relative EOA address */ - rel_eoa=abs_eoa-H5F_get_base_addr(f); + rel_eoa=abs_eoa-H5F_get_base_addr(file); /* Only need this when resizing sieve buffer */ - max_data=dset_size-dset_offset_arr[u]; + max_data=store_contig->dset_size-dset_offset_arr[u]; /* Compute the size of the sieve buffer */ /* Don't read off the end of the file, don't read past the end of the data element and don't read more than the buffer size */ - H5_ASSIGN_OVERFLOW(dset->shared->cache.contig.sieve_size,MIN3(rel_eoa-dset->shared->cache.contig.sieve_loc,max_data,dset->shared->cache.contig.sieve_buf_size),hsize_t,size_t); + H5_ASSIGN_OVERFLOW(dset_contig->sieve_size,MIN3(rel_eoa-dset_contig->sieve_loc,max_data,dset_contig->sieve_buf_size),hsize_t,size_t); /* Update local copies of sieve information */ - sieve_start=dset->shared->cache.contig.sieve_loc; - sieve_size=dset->shared->cache.contig.sieve_size; + sieve_start=dset_contig->sieve_loc; + sieve_size=dset_contig->sieve_size; sieve_end=sieve_start+sieve_size; /* Read the new sieve buffer */ - if (H5F_block_read(f, H5FD_MEM_DRAW, dset->shared->cache.contig.sieve_loc, dset->shared->cache.contig.sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0) + if (H5F_block_read(file, H5FD_MEM_DRAW, dset_contig->sieve_loc, dset_contig->sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed"); /* Grab the data out of the buffer (must be first piece of data in buffer ) */ - HDmemcpy(buf,dset->shared->cache.contig.sieve_buf,size); + HDmemcpy(buf,dset_contig->sieve_buf,size); /* Reset sieve buffer dirty flag */ - dset->shared->cache.contig.sieve_dirty=0; + dset_contig->sieve_dirty=0; } /* end else */ } /* end else */ } /* end else */ @@ -593,13 +617,13 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, size=dset_len_arr[u]; /* Compute offset on disk */ - addr=dset_addr+dset_offset_arr[u]; + addr=store_contig->dset_addr+dset_offset_arr[u]; /* Compute offset in memory */ buf = (unsigned char *)_buf + mem_offset_arr[v]; /* Write data */ - if (H5F_block_read(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0) + if (H5F_block_read(file, H5FD_MEM_DRAW, addr, size, io_info->dxpl_id, buf)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed"); /* Update memory information */ @@ -649,12 +673,14 @@ done: *------------------------------------------------------------------------- */ ssize_t -H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, - haddr_t dset_addr, hsize_t dset_size, +H5D_contig_writevv(H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], const void *_buf) { + H5F_t *file=io_info->dset->ent.file; /* File for dataset */ + H5D_rdcdc_t *dset_contig=&(io_info->dset->shared->cache.contig); /* Cached information about contiguous data */ + const H5D_contig_storage_t *store_contig=&(io_info->store->contig); /* Contiguous storage info for this I/O operation */ const unsigned char *buf=_buf; /* Pointer to buffer to fill */ haddr_t addr; /* Actual address to read */ size_t size; /* Size of sequence in bytes */ @@ -665,15 +691,16 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, FUNC_ENTER_NOAPI(H5D_contig_writevv, FAIL); /* Check args */ - assert(f); - assert(dset); + assert(io_info); + assert(io_info->dset); + assert(io_info->store); assert(buf); /* Check if data sieving is enabled */ - if(H5F_HAS_FEATURE(f,H5FD_FEAT_DATA_SIEVE)) { - haddr_t sieve_start, sieve_end; /* Start & end locations of sieve buffer */ + if(H5F_HAS_FEATURE(file,H5FD_FEAT_DATA_SIEVE)) { + haddr_t sieve_start=HADDR_UNDEF, sieve_end=HADDR_UNDEF; /* Start & end locations of sieve buffer */ haddr_t contig_end; /* End locations of block to write */ - size_t sieve_size; /* size of sieve buffer */ + size_t sieve_size=(size_t)-1; /* size of sieve buffer */ haddr_t abs_eoa; /* Absolute end of file address */ haddr_t rel_eoa; /* Relative end of file address */ hsize_t max_data; /* Actual maximum size of data to cache */ @@ -683,9 +710,9 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, v=*mem_curr_seq; /* Stash local copies of these values */ - if(dset->shared->cache.contig.sieve_buf!=NULL) { - sieve_start=dset->shared->cache.contig.sieve_loc; - sieve_size=dset->shared->cache.contig.sieve_size; + if(dset_contig->sieve_buf!=NULL) { + sieve_start=dset_contig->sieve_loc; + sieve_size=dset_contig->sieve_size; sieve_end=sieve_start+sieve_size; } /* end if */ @@ -698,55 +725,55 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, size=dset_len_arr[u]; /* Compute offset on disk */ - addr=dset_addr+dset_offset_arr[u]; + addr=store_contig->dset_addr+dset_offset_arr[u]; /* Compute offset in memory */ buf = (const unsigned char *)_buf + mem_offset_arr[v]; /* No data sieve buffer yet, go allocate one */ - if(dset->shared->cache.contig.sieve_buf==NULL) { + if(dset_contig->sieve_buf==NULL) { /* Check if we can actually hold the I/O request in the sieve buffer */ - if(size>dset->shared->cache.contig.sieve_buf_size) { - if (H5F_block_write(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0) + if(size>dset_contig->sieve_buf_size) { + if (H5F_block_write(file, H5FD_MEM_DRAW, addr, size, io_info->dxpl_id, buf)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed"); } /* end if */ else { /* Allocate room for the data sieve buffer */ - if (NULL==(dset->shared->cache.contig.sieve_buf=H5FL_BLK_MALLOC(sieve_buf,dset->shared->cache.contig.sieve_buf_size))) + if (NULL==(dset_contig->sieve_buf=H5FL_BLK_MALLOC(sieve_buf,dset_contig->sieve_buf_size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); /* Determine the new sieve buffer size & location */ - dset->shared->cache.contig.sieve_loc=addr; + dset_contig->sieve_loc=addr; /* Make certain we don't read off the end of the file */ - if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(f))) + if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(file))) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to determine file size"); /* Adjust absolute EOA address to relative EOA address */ - rel_eoa=abs_eoa-H5F_get_base_addr(f); + rel_eoa=abs_eoa-H5F_get_base_addr(file); /* Set up the buffer parameters */ - max_data=dset_size-dset_offset_arr[u]; + max_data=store_contig->dset_size-dset_offset_arr[u]; /* Compute the size of the sieve buffer */ - H5_ASSIGN_OVERFLOW(dset->shared->cache.contig.sieve_size,MIN3(rel_eoa-dset->shared->cache.contig.sieve_loc,max_data,dset->shared->cache.contig.sieve_buf_size),hsize_t,size_t); + H5_ASSIGN_OVERFLOW(dset_contig->sieve_size,MIN3(rel_eoa-dset_contig->sieve_loc,max_data,dset_contig->sieve_buf_size),hsize_t,size_t); /* Check if there is any point in reading the data from the file */ - if(dset->shared->cache.contig.sieve_size>size) { + if(dset_contig->sieve_size>size) { /* Read the new sieve buffer */ - if (H5F_block_read(f, H5FD_MEM_DRAW, dset->shared->cache.contig.sieve_loc, dset->shared->cache.contig.sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0) + if (H5F_block_read(file, H5FD_MEM_DRAW, dset_contig->sieve_loc, dset_contig->sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed"); } /* end if */ /* Grab the data out of the buffer (must be first piece of data in buffer ) */ - HDmemcpy(dset->shared->cache.contig.sieve_buf,buf,size); + HDmemcpy(dset_contig->sieve_buf,buf,size); /* Set sieve buffer dirty flag */ - dset->shared->cache.contig.sieve_dirty=1; + dset_contig->sieve_dirty=1; /* Stash local copies of these values */ - sieve_start=dset->shared->cache.contig.sieve_loc; - sieve_size=dset->shared->cache.contig.sieve_size; + sieve_start=dset_contig->sieve_loc; + sieve_size=dset_contig->sieve_size; sieve_end=sieve_start+sieve_size; } /* end else */ } /* end if */ @@ -756,120 +783,120 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, /* If entire write is within the sieve buffer, write it to the buffer */ if(addr>=sieve_start && contig_end<sieve_end) { - unsigned char *base_sieve_buf=dset->shared->cache.contig.sieve_buf+(addr-sieve_start); + unsigned char *base_sieve_buf=dset_contig->sieve_buf+(addr-sieve_start); /* Put the data into the sieve buffer */ HDmemcpy(base_sieve_buf,buf,size); /* Set sieve buffer dirty flag */ - dset->shared->cache.contig.sieve_dirty=1; + dset_contig->sieve_dirty=1; } /* end if */ /* Entire request is not within this data sieve buffer */ else { /* Check if we can actually hold the I/O request in the sieve buffer */ - if(size>dset->shared->cache.contig.sieve_buf_size) { + if(size>dset_contig->sieve_buf_size) { /* Check for any overlap with the current sieve buffer */ if((sieve_start>=addr && sieve_start<(contig_end+1)) || ((sieve_end-1)>=addr && (sieve_end-1)<(contig_end+1))) { /* Flush the sieve buffer, if it's dirty */ - if(dset->shared->cache.contig.sieve_dirty) { + if(dset_contig->sieve_dirty) { /* Write to file */ - if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0) + if (H5F_block_write(file, H5FD_MEM_DRAW, sieve_start, sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed"); /* Reset sieve buffer dirty flag */ - dset->shared->cache.contig.sieve_dirty=0; + dset_contig->sieve_dirty=0; } /* end if */ /* Force the sieve buffer to be re-read the next time */ - dset->shared->cache.contig.sieve_loc=HADDR_UNDEF; - dset->shared->cache.contig.sieve_size=0; + dset_contig->sieve_loc=HADDR_UNDEF; + dset_contig->sieve_size=0; } /* end if */ /* Write directly from the user's buffer */ - if (H5F_block_write(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0) + if (H5F_block_write(file, H5FD_MEM_DRAW, addr, size, io_info->dxpl_id, buf)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed"); } /* end if */ /* Element size fits within the buffer size */ else { /* Check if it is possible to (exactly) prepend or append to existing (dirty) sieve buffer */ if(((addr+size)==sieve_start || addr==sieve_end) && - (size+sieve_size)<=dset->shared->cache.contig.sieve_buf_size && - dset->shared->cache.contig.sieve_dirty) { + (size+sieve_size)<=dset_contig->sieve_buf_size && + dset_contig->sieve_dirty) { /* Prepend to existing sieve buffer */ if((addr+size)==sieve_start) { /* Move existing sieve information to correct location */ - HDmemmove(dset->shared->cache.contig.sieve_buf+size,dset->shared->cache.contig.sieve_buf,sieve_size); + HDmemmove(dset_contig->sieve_buf+size,dset_contig->sieve_buf,sieve_size); /* Copy in new information (must be first in sieve buffer) */ - HDmemcpy(dset->shared->cache.contig.sieve_buf,buf,size); + HDmemcpy(dset_contig->sieve_buf,buf,size); /* Adjust sieve location */ - dset->shared->cache.contig.sieve_loc=addr; + dset_contig->sieve_loc=addr; } /* end if */ /* Append to existing sieve buffer */ else { /* Copy in new information */ - HDmemcpy(dset->shared->cache.contig.sieve_buf+sieve_size,buf,size); + HDmemcpy(dset_contig->sieve_buf+sieve_size,buf,size); } /* end else */ /* Adjust sieve size */ - dset->shared->cache.contig.sieve_size += size; + dset_contig->sieve_size += size; /* Update local copies of sieve information */ - sieve_start=dset->shared->cache.contig.sieve_loc; - sieve_size=dset->shared->cache.contig.sieve_size; + sieve_start=dset_contig->sieve_loc; + sieve_size=dset_contig->sieve_size; sieve_end=sieve_start+sieve_size; } /* end if */ /* Can't add the new data onto the existing sieve buffer */ else { /* Flush the sieve buffer if it's dirty */ - if(dset->shared->cache.contig.sieve_dirty) { + if(dset_contig->sieve_dirty) { /* Write to file */ - if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0) + if (H5F_block_write(file, H5FD_MEM_DRAW, sieve_start, sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed"); /* Reset sieve buffer dirty flag */ - dset->shared->cache.contig.sieve_dirty=0; + dset_contig->sieve_dirty=0; } /* end if */ /* Determine the new sieve buffer size & location */ - dset->shared->cache.contig.sieve_loc=addr; + dset_contig->sieve_loc=addr; /* Make certain we don't read off the end of the file */ - if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(f))) + if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(file))) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to determine file size"); /* Adjust absolute EOA address to relative EOA address */ - rel_eoa=abs_eoa-H5F_get_base_addr(f); + rel_eoa=abs_eoa-H5F_get_base_addr(file); /* Only need this when resizing sieve buffer */ - max_data=dset_size-dset_offset_arr[u]; + max_data=store_contig->dset_size-dset_offset_arr[u]; /* Compute the size of the sieve buffer */ /* Don't read off the end of the file, don't read past the end of the data element and don't read more than the buffer size */ - H5_ASSIGN_OVERFLOW(dset->shared->cache.contig.sieve_size,MIN3(rel_eoa-dset->shared->cache.contig.sieve_loc,max_data,dset->shared->cache.contig.sieve_buf_size),hsize_t,size_t); + H5_ASSIGN_OVERFLOW(dset_contig->sieve_size,MIN3(rel_eoa-dset_contig->sieve_loc,max_data,dset_contig->sieve_buf_size),hsize_t,size_t); /* Update local copies of sieve information */ - sieve_start=dset->shared->cache.contig.sieve_loc; - sieve_size=dset->shared->cache.contig.sieve_size; + sieve_start=dset_contig->sieve_loc; + sieve_size=dset_contig->sieve_size; sieve_end=sieve_start+sieve_size; /* Check if there is any point in reading the data from the file */ - if(dset->shared->cache.contig.sieve_size>size) { + if(dset_contig->sieve_size>size) { /* Read the new sieve buffer */ - if (H5F_block_read(f, H5FD_MEM_DRAW, dset->shared->cache.contig.sieve_loc, dset->shared->cache.contig.sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0) + if (H5F_block_read(file, H5FD_MEM_DRAW, dset_contig->sieve_loc, dset_contig->sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed"); } /* end if */ /* Grab the data out of the buffer (must be first piece of data in buffer ) */ - HDmemcpy(dset->shared->cache.contig.sieve_buf,buf,size); + HDmemcpy(dset_contig->sieve_buf,buf,size); /* Set sieve buffer dirty flag */ - dset->shared->cache.contig.sieve_dirty=1; + dset_contig->sieve_dirty=1; } /* end else */ } /* end else */ @@ -902,13 +929,13 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, size=dset_len_arr[u]; /* Compute offset on disk */ - addr=dset_addr+dset_offset_arr[u]; + addr=store_contig->dset_addr+dset_offset_arr[u]; /* Compute offset in memory */ buf = (const unsigned char *)_buf + mem_offset_arr[v]; /* Write data */ - if (H5F_block_write(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0) + if (H5F_block_write(file, H5FD_MEM_DRAW, addr, size, io_info->dxpl_id, buf)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed"); /* Update memory information */ diff --git a/src/H5Dio.c b/src/H5Dio.c index ece6a1b..95c4c37 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -1119,10 +1119,23 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, uint8_t *bkg_buf = NULL; /*background buffer */ hsize_t smine_start; /*strip mine start loc */ size_t n, smine_nelmts; /*elements per strip */ + H5D_storage_t store; /*union of storage info for dataset */ + H5D_io_info_t io_info; /* Dataset I/O info */ herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_contig_read) + /* Initialize storage info for this dataset */ + if (dataset->shared->efl.nused>0) + HDmemcpy(&store.efl,&(dataset->shared->efl),sizeof(H5O_efl_t)); + else { + store.contig.dset_addr=dataset->shared->layout.u.contig.addr; + store.contig.dset_size=dataset->shared->layout.u.contig.size; + } /* end if */ + + /* Construct dataset I/O info */ + H5D_BUILD_IO_INFO(&io_info,dataset,dxpl_cache,dxpl_id,&store); + /* * If there is no type conversion then read directly into the * application's buffer. This saves at least one mem-to-mem copy. @@ -1137,8 +1150,7 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, || dataset->shared->efl.nused>0 || 0 == nelmts || dataset->shared->layout.type==H5D_COMPACT); H5_CHECK_OVERFLOW(nelmts,hsize_t,size_t); - status = (sconv->read)(dataset->ent.file, dxpl_cache, dxpl_id, - dataset, (H5D_storage_t *)&(dataset->shared->efl), + status = (sconv->read)(&io_info, dataset->shared->layout.readvv, (size_t)nelmts, H5T_get_size(dataset->shared->type), file_space, mem_space, buf/*out*/); @@ -1240,12 +1252,7 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, H5_timer_begin(&timer); #endif /* Sanity check that space is allocated, then read data from it */ - assert(((dataset->shared->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr)) - || (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr))) - || dataset->shared->efl.nused>0 || 0 == nelmts - || dataset->shared->layout.type==H5D_COMPACT); - n = H5S_select_fgath(dataset->ent.file, dxpl_cache, dxpl_id, - dataset, (H5D_storage_t *)&(dataset->shared->efl), + n = H5S_select_fgath(&io_info, dataset->shared->layout.readvv, file_space, &file_iter, smine_nelmts, tconv_buf/*out*/); @@ -1370,10 +1377,23 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset, uint8_t *bkg_buf = NULL; /*background buffer */ hsize_t smine_start; /*strip mine start loc */ size_t n, smine_nelmts; /*elements per strip */ + H5D_storage_t store; /*union of storage info for dataset */ + H5D_io_info_t io_info; /* Dataset I/O info */ herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_contig_write) + /* Initialize storage info for this dataset */ + if (dataset->shared->efl.nused>0) + HDmemcpy(&store.efl,&(dataset->shared->efl),sizeof(H5O_efl_t)); + else { + store.contig.dset_addr=dataset->shared->layout.u.contig.addr; + store.contig.dset_size=dataset->shared->layout.u.contig.size; + } /* end if */ + + /* Construct dataset I/O info */ + H5D_BUILD_IO_INFO(&io_info,dataset,dxpl_cache,dxpl_id,&store); + /* * If there is no type conversion then write directly from the * application's buffer. This saves at least one mem-to-mem copy. @@ -1383,8 +1403,7 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset, H5_timer_begin(&timer); #endif H5_CHECK_OVERFLOW(nelmts,hsize_t,size_t); - status = (sconv->write)(dataset->ent.file, dxpl_cache, dxpl_id, - dataset, (H5D_storage_t *)&(dataset->shared->efl), + status = (sconv->write)(&io_info, dataset->shared->layout.writevv, (size_t)nelmts, H5T_get_size(dataset->shared->type), file_space, mem_space, buf); @@ -1503,8 +1522,7 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset, #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif - n = H5S_select_fgath(dataset->ent.file, dxpl_cache, dxpl_id, - dataset, (H5D_storage_t *)&(dataset->shared->efl), + n = H5S_select_fgath(&io_info, dataset->shared->layout.readvv, file_space, &bkg_iter, smine_nelmts, bkg_buf/*out*/); @@ -1534,8 +1552,7 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset, #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif - status = H5S_select_fscat(dataset->ent.file, dxpl_cache, dxpl_id, - dataset, (H5D_storage_t *)&(dataset->shared->efl), + status = H5S_select_fscat(&io_info, dataset->shared->layout.writevv, file_space, &file_iter, smine_nelmts, tconv_buf); #ifdef H5S_DEBUG @@ -1619,6 +1636,7 @@ H5D_chunk_read(hsize_t nelmts, H5D_t *dataset, uint8_t *tconv_buf = NULL; /*data type conv buffer */ uint8_t *bkg_buf = NULL; /*background buffer */ H5D_storage_t store; /*union of EFL and chunk pointer in file space */ + H5D_io_info_t io_info; /* Dataset I/O info */ herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_read) @@ -1627,6 +1645,9 @@ H5D_chunk_read(hsize_t nelmts, H5D_t *dataset, if(H5D_create_chunk_map(dataset, mem_type, file_space, mem_space, &fm)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't build chunk mapping") + /* Construct dataset I/O info */ + H5D_BUILD_IO_INFO(&io_info,dataset,dxpl_cache,dxpl_id,&store); + /* * If there is no type conversion then read directly into the * application's buffer. This saves at least one mem-to-mem copy. @@ -1656,8 +1677,7 @@ H5D_chunk_read(hsize_t nelmts, H5D_t *dataset, store.chunk.index = chunk_info->index; /* Perform the actual read operation */ - status = (sconv->read)(dataset->ent.file, dxpl_cache, dxpl_id, - dataset, &store, + status = (sconv->read)(&io_info,dataset->shared->layout.readvv, chunk_info->chunk_points, H5T_get_size(dataset->shared->type), chunk_info->fspace, chunk_info->mspace, buf); @@ -1782,8 +1802,7 @@ H5D_chunk_read(hsize_t nelmts, H5D_t *dataset, assert(((dataset->shared->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr)) || (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr))) || dataset->shared->efl.nused>0 || dataset->shared->layout.type==H5D_COMPACT); - n = H5S_select_fgath(dataset->ent.file, dxpl_cache, dxpl_id, - dataset, &store, + n = H5S_select_fgath(&io_info, dataset->shared->layout.readvv, chunk_info->fspace, &file_iter, smine_nelmts, tconv_buf/*out*/); @@ -1936,32 +1955,18 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset, uint8_t *tconv_buf = NULL; /*data type conv buffer */ uint8_t *bkg_buf = NULL; /*background buffer */ H5D_storage_t store; /*union of EFL and chunk pointer in file space */ + H5D_io_info_t io_info; /* Dataset I/O info */ herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_write) -#ifdef QAK -{ - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - Entering, time=%f\n",FUNC,mpi_rank,time); -} -#endif /* QAK */ /* Map elements between file and memory for each chunk*/ if(H5D_create_chunk_map(dataset, mem_type, file_space, mem_space, &fm)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't build chunk mapping") -#ifdef QAK -{ - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - After creating chunk map, time=%f\n",FUNC,mpi_rank,time); -} -#endif /* QAK */ + /* Construct dataset I/O info */ + H5D_BUILD_IO_INFO(&io_info,dataset,dxpl_cache,dxpl_id,&store); + /* * If there is no type conversion then write directly from the * application's buffer. This saves at least one mem-to-mem copy. @@ -1970,15 +1975,6 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset, #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif -#ifdef QAK -{ - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - Performing optimized I/O, time=%f\n",FUNC,mpi_rank,time); -} -#endif /* QAK */ /* Get first node in chunk tree */ chunk_node=H5TB_first(fm.fsel->root); @@ -1994,8 +1990,7 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset, store.chunk.index = chunk_info->index; /* Perform the actual write operation */ - status = (sconv->write)(dataset->ent.file, dxpl_cache, dxpl_id, - dataset, &store, + status = (sconv->write)(&io_info, dataset->shared->layout.writevv, chunk_info->chunk_points, H5T_get_size(dataset->shared->type), chunk_info->fspace, chunk_info->mspace, buf); @@ -2007,15 +2002,6 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset, /* Get the next chunk node in the tree */ chunk_node=H5TB_next(chunk_node); } /* end while */ -#ifdef QAK -{ - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - Done performing optimized I/O, time=%f\n",FUNC,mpi_rank,time); -} -#endif /* QAK */ #ifdef H5S_DEBUG H5_timer_end(&(sconv->stats[0].write_timer), &timer); @@ -2026,13 +2012,6 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset, /* direct xfer accomplished successfully */ HGOTO_DONE(SUCCEED) } /* end if */ -#ifdef QAK -{ - int mpi_rank; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - HDfprintf(stderr,"%s: rank=%d - Performing NON-optimized I/O\n",FUNC,mpi_rank); -} -#endif /* QAK */ /* * This is the general case (type conversion, usually). @@ -2151,8 +2130,7 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset, #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif - n = H5S_select_fgath(dataset->ent.file, dxpl_cache, dxpl_id, - dataset, &store, + n = H5S_select_fgath(&io_info, dataset->shared->layout.readvv, chunk_info->fspace, &bkg_iter, smine_nelmts, bkg_buf/*out*/); @@ -2183,8 +2161,7 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset, #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif - status = H5S_select_fscat(dataset->ent.file, dxpl_cache, dxpl_id, - dataset, &store, + status = H5S_select_fscat(&io_info, dataset->shared->layout.writevv, chunk_info->fspace, &file_iter, smine_nelmts, tconv_buf); @@ -2377,15 +2354,6 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_create_chunk_map) -#ifdef QAK -{ - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - Entering, time=%f\n",FUNC,mpi_rank,time); -} -#endif /* QAK */ /* Get layout for dataset */ fm->layout = &(dataset->shared->layout); @@ -2468,28 +2436,9 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp fm->last_chunk_info=NULL; } /* end if */ else { -#ifdef QAK - { - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - Before creating chunk selections, time=%f\n",FUNC,mpi_rank,time); - } -#endif /* QAK */ /* Build the file selection for each chunk */ if(H5D_create_chunk_file_map_hyper(fm)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create file chunk selections") -#ifdef QAK - { - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - After creating file chunk selections, time=%f\n",FUNC,mpi_rank,time); - HDfprintf(stderr,"%s: rank=%d - H5S_select_shape_same=%d\n",FUNC,mpi_rank,H5S_select_shape_same(file_space,equiv_mspace)); - } -#endif /* QAK */ /* Clean file chunks' hyperslab span "scratch" information */ curr_node=H5TB_first(fm->fsel->root); @@ -2572,16 +2521,6 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp } /* end if */ } /* end else */ -#ifdef QAK -{ - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - After creating chunk selections, time=%f\n",FUNC,mpi_rank,time); -} -#endif /* QAK */ - done: /* Release the [potentially partially built] chunk mapping information if an error occurs */ if(ret_value<0) { @@ -2611,15 +2550,6 @@ done: HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't decrement temporary datatype ID") } /* end if */ -#ifdef QAK -{ - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - Leaving, time=%f\n",FUNC,mpi_rank,time); -} -#endif /* QAK */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_create_chunk_map() */ @@ -2905,18 +2835,6 @@ H5D_create_chunk_mem_map_hyper(const fm_map *fm) herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_create_chunk_mem_map_hyper) -#ifdef QAK -{ - hsize_t mem_dims[H5O_LAYOUT_NDIMS]; /* Dimensions of memory space */ - - if(H5S_get_simple_extent_dims(fm->mem_space, mem_dims, NULL)<0) - HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality") - - HDfprintf(stderr,"%s: mem_dims={",FUNC); - for(u=0; u<fm->m_ndims; u++) - HDfprintf(stderr,"%Hd%s",mem_dims[u],(u<(fm->m_ndims-1) ? ", " : "}\n")); -} -#endif /* QAK */ /* Sanity check */ assert(fm->f_ndims>0); @@ -2960,22 +2878,6 @@ H5D_create_chunk_mem_map_hyper(const fm_map *fm) assert(fm->m_ndims==fm->f_ndims); for(u=0; u<fm->f_ndims; u++) adjust[u]=file_sel_start[u]-mem_sel_start[u]; -#ifdef QAK - { - int mpi_rank; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - if(mpi_rank==1) { - HDfprintf(stderr,"%s: rank=%d - adjust={",FUNC,mpi_rank); - for(u=0; u<fm->f_ndims; u++) - HDfprintf(stderr,"%Hd%s",adjust[u],(u<(fm->f_ndims-1) ? ", " : "}\n")); - } /* end if */ - } -#endif /* QAK */ -#ifdef QAK - HDfprintf(stderr,"%s: adjust={",FUNC); - for(u=0; u<fm->f_ndims; u++) - HDfprintf(stderr,"%Hd%s",adjust[u],(u<(fm->f_ndims-1) ? ", " : "}\n")); -#endif /* QAK */ /* Iterate over each chunk in the chunk list */ curr_node=H5TB_first(fm->fsel->root); @@ -3003,49 +2905,11 @@ H5D_create_chunk_mem_map_hyper(const fm_map *fm) /* Compensate for the chunk offset */ for(u=0; u<fm->f_ndims; u++) chunk_adjust[u]=adjust[u]-chunk_info->coords[u]; /*lint !e771 The adjust array will always be initialized */ -#ifdef QAK - { - int mpi_rank; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - if(mpi_rank==1) { - HDfprintf(stderr,"%s: rank=%d - Before adjusting memory selection\n",FUNC,mpi_rank); - HDfprintf(stderr,"%s: rank=%d - chunk_adjust={",FUNC,mpi_rank); - for(u=0; u<fm->f_ndims; u++) - HDfprintf(stderr,"%Hd%s",chunk_adjust[u],(u<(fm->f_ndims-1) ? ", " : "}\n")); - } /* end if */ - } -#endif /* QAK */ -#ifdef QAK - HDfprintf(stderr,"%s: Before adjusting memory selection\n",FUNC); - HDfprintf(stderr,"%s: chunk_adjust={",FUNC); - for(u=0; u<fm->f_ndims; u++) - HDfprintf(stderr,"%Hd%s",chunk_adjust[u],(u<(fm->f_ndims-1) ? ", " : "}\n")); -#endif /* QAK */ + /* Adjust the selection */ if(H5S_hyper_adjust(chunk_info->mspace,chunk_adjust)<0) /*lint !e772 The chunk_adjust array will always be initialized */ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't adjust chunk selection") -#ifdef QAK - { - int mpi_rank; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - if(mpi_rank==1) - HDfprintf(stderr,"%s: rank=%d - After adjusting memory selection\n",FUNC,mpi_rank); - } -#endif /* QAK */ -#ifdef QAK - HDfprintf(stderr,"%s: After adjusting memory selection\n",FUNC); - { - hsize_t mem_dims[H5O_LAYOUT_NDIMS]; /* Dimensions of memory space */ - - if(H5S_get_simple_extent_dims(chunk_info->mspace, mem_dims, NULL)<0) - HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality") - - HDfprintf(stderr,"%s: mem_dims={",FUNC); - for(u=0; u<fm->m_ndims; u++) - HDfprintf(stderr,"%Hd%s",mem_dims[u],(u<(fm->m_ndims-1) ? ", " : "}\n")); - } -#endif /* QAK */ /* Get the next chunk node in the TBBT */ curr_node=H5TB_next(curr_node); } /* end while */ diff --git a/src/H5Distore.c b/src/H5Distore.c index b98b620..e84488f 100644 --- a/src/H5Distore.c +++ b/src/H5Distore.c @@ -144,7 +144,7 @@ typedef struct H5D_istore_ud1_t { /* Private prototypes */ static void *H5D_istore_chunk_alloc(size_t size, const H5O_pline_t *pline); static void *H5D_istore_chunk_xfree(void *chk, const H5O_pline_t *pline); -static herr_t H5D_istore_shared_create (H5F_t *f, H5O_layout_t *layout); +static herr_t H5D_istore_shared_create (const H5F_t *f, H5O_layout_t *layout); static herr_t H5D_istore_shared_free (void *page); /* B-tree iterator callbacks */ @@ -156,7 +156,7 @@ static int H5D_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr void *_rt_key, void *_udata); /* B-tree callbacks */ -static size_t H5D_istore_sizeof_rkey(H5F_t *f, const void *_udata); +static size_t H5D_istore_sizeof_rkey(const H5F_t *f, const void *_udata); static H5RC_t *H5D_istore_get_shared(H5F_t *f, const void *_udata); static herr_t H5D_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t, void *_lt_key, void *_udata, void *_rt_key, @@ -241,7 +241,7 @@ H5FL_BLK_DEFINE_STATIC(chunk_page); *------------------------------------------------------------------------- */ static size_t -H5D_istore_sizeof_rkey(H5F_t UNUSED *f, const void *_udata) +H5D_istore_sizeof_rkey(const H5F_t UNUSED *f, const void *_udata) { const H5D_istore_ud1_t *udata = (const H5D_istore_ud1_t *) _udata; size_t nbytes; @@ -926,11 +926,8 @@ H5D_istore_iter_dump (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, hadd *------------------------------------------------------------------------- */ herr_t -H5D_istore_init (H5F_t *f, H5D_t *dset) +H5D_istore_init (const H5F_t *f, H5D_t *dset) { - H5D_istore_ud1_t udata; - H5B_shared_t *shared; /* Shared B-tree node info */ - size_t u; /* Local index variable */ H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); herr_t ret_value=SUCCEED; /* Return value */ @@ -970,8 +967,7 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, - hid_t dxpl_id, H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset) +H5D_istore_flush_entry(H5D_io_info_t *io_info, H5D_rdcc_ent_t *ent, hbool_t reset) { herr_t ret_value=SUCCEED; /*return value */ unsigned u; /*counters */ @@ -981,7 +977,8 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, FUNC_ENTER_NOAPI_NOINIT(H5D_istore_flush_entry); - assert(f); + assert(io_info); + assert(io_info->dset); assert(ent); assert(!ent->locked); @@ -989,16 +986,16 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, if (ent->dirty) { H5D_istore_ud1_t udata; /*pass through B-tree */ - udata.mesg = &dset->shared->layout; + udata.mesg = &io_info->dset->shared->layout; udata.key.filter_mask = 0; udata.addr = HADDR_UNDEF; udata.key.nbytes = ent->chunk_size; - for (u=0; u<dset->shared->layout.u.chunk.ndims; u++) + for (u=0; u<io_info->dset->shared->layout.u.chunk.ndims; u++) udata.key.offset[u] = ent->offset[u]; alloc = ent->alloc_size; /* Should the chunk be filtered before writing it to disk? */ - if (dset->shared->dcpl_cache.pline.nused) { + if (io_info->dset->shared->dcpl_cache.pline.nused) { if (!reset) { /* * Copy the chunk to a new buffer before running it through @@ -1020,8 +1017,8 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, point_of_no_return = TRUE; ent->chunk = NULL; } - if (H5Z_pipeline(&(dset->shared->dcpl_cache.pline), 0, &(udata.key.filter_mask), dxpl_cache->err_detect, - dxpl_cache->filter_cb, &(udata.key.nbytes), &alloc, &buf)<0) + if (H5Z_pipeline(&(io_info->dset->shared->dcpl_cache.pline), 0, &(udata.key.filter_mask), io_info->dxpl_cache->err_detect, + io_info->dxpl_cache->filter_cb, &(udata.key.nbytes), &alloc, &buf)<0) HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed") } @@ -1029,14 +1026,16 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, * Create the chunk it if it doesn't exist, or reallocate the chunk if * its size changed. Then write the data into the file. */ - if (H5B_insert(f, dxpl_id, H5B_ISTORE, dset->shared->layout.u.chunk.addr, &udata)<0) + if (H5B_insert(io_info->dset->ent.file, io_info->dxpl_id, H5B_ISTORE, io_info->dset->shared->layout.u.chunk.addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk"); - if (H5F_block_write(f, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, dxpl_id, buf)<0) + if (H5F_block_write(io_info->dset->ent.file, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, io_info->dxpl_id, buf)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file"); /* Mark cache entry as clean */ ent->dirty = FALSE; - dset->shared->cache.chunk.nflushes++; +#ifdef H5D_ISTORE_DEBUG + io_info->dset->shared->cache.chunk.nflushes++; +#endif /* H5D_ISTORE_DEBUG */ } /* end if */ /* Reset, but do not free or removed from list */ @@ -1045,7 +1044,7 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, if(buf==ent->chunk) buf = NULL; if(ent->chunk!=NULL) - ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(dset->shared->dcpl_cache.pline)); + ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(io_info->dset->shared->dcpl_cache.pline)); } /* end if */ done: @@ -1061,7 +1060,7 @@ done: */ if (ret_value<0 && point_of_no_return) { if(ent->chunk) - ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(dset->shared->dcpl_cache.pline)); + ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(io_info->dset->shared->dcpl_cache.pline)); } /* end if */ FUNC_LEAVE_NOAPI(ret_value); @@ -1087,28 +1086,27 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5D_istore_preempt(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, H5D_rdcc_ent_t * ent, hbool_t flush) +H5D_istore_preempt(H5D_io_info_t *io_info, H5D_rdcc_ent_t * ent, hbool_t flush) { - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); + H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk); herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_istore_preempt); - assert(f); + assert(io_info); assert(ent); assert(!ent->locked); assert(ent->idx < rdcc->nslots); if(flush) { /* Flush */ - if(H5D_istore_flush_entry(f, dxpl_cache, dxpl_id, dset, ent, TRUE) < 0) + if(H5D_istore_flush_entry(io_info, ent, TRUE) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer"); } else { /* Don't flush, just free chunk */ if(ent->chunk != NULL) - ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(dset->shared->dcpl_cache.pline)); + ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(io_info->dset->shared->dcpl_cache.pline)); } /* Unlink from list */ @@ -1154,8 +1152,9 @@ done: *------------------------------------------------------------------------- */ herr_t -H5D_istore_flush (H5F_t *f, hid_t dxpl_id, H5D_t *dset, unsigned flags) +H5D_istore_flush (H5D_t *dset, hid_t dxpl_id, unsigned flags) { + H5D_io_info_t io_info; /* Temporary I/O info object */ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); @@ -1169,6 +1168,9 @@ H5D_istore_flush (H5F_t *f, hid_t dxpl_id, H5D_t *dset, unsigned flags) if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + /* Construct dataset I/O info */ + H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,NULL); + for (ent=rdcc->head; ent; ent=next) { next = ent->next; if ((flags&H5F_FLUSH_CLEAR_ONLY)) { @@ -1176,10 +1178,10 @@ H5D_istore_flush (H5F_t *f, hid_t dxpl_id, H5D_t *dset, unsigned flags) ent->dirty = FALSE; } /* end if */ else if ((flags&H5F_FLUSH_INVALIDATE)) { - if (H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, ent, TRUE )<0) + if (H5D_istore_preempt(&io_info, ent, TRUE )<0) nerrors++; } else { - if (H5D_istore_flush_entry(f, dxpl_cache, dxpl_id, dset, ent, FALSE)<0) + if (H5D_istore_flush_entry(&io_info, ent, FALSE)<0) nerrors++; } } /* end for */ @@ -1210,8 +1212,9 @@ done: *------------------------------------------------------------------------- */ herr_t -H5D_istore_dest (H5F_t *f, hid_t dxpl_id, H5D_t *dset) +H5D_istore_dest (H5D_t *dset, hid_t dxpl_id) { + H5D_io_info_t io_info; /* Temporary I/O info object */ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); @@ -1221,10 +1224,15 @@ H5D_istore_dest (H5F_t *f, hid_t dxpl_id, H5D_t *dset) FUNC_ENTER_NOAPI(H5D_istore_dest, FAIL); + assert(dset); + /* Fill the DXPL cache values for later use */ if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + /* Construct dataset I/O info */ + H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,NULL); + /* Flush all the cached chunks */ for (ent=rdcc->head; ent; ent=next) { #ifdef H5D_ISTORE_DEBUG @@ -1232,7 +1240,7 @@ H5D_istore_dest (H5F_t *f, hid_t dxpl_id, H5D_t *dset) HDfflush(stderr); #endif next = ent->next; - if (H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, ent, TRUE )<0) + if (H5D_istore_preempt(&io_info, ent, TRUE )<0) nerrors++; } if (nerrors) @@ -1265,7 +1273,7 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5D_istore_shared_create (H5F_t *f, H5O_layout_t *layout) +H5D_istore_shared_create (const H5F_t *f, H5O_layout_t *layout) { H5D_istore_ud1_t udata; H5B_shared_t *shared; /* Shared B-tree node info */ @@ -1362,11 +1370,10 @@ H5D_istore_shared_free (void *_shared) *------------------------------------------------------------------------- */ static herr_t -H5D_istore_prune (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H5D_t *dset, - size_t size) +H5D_istore_prune (H5D_io_info_t *io_info, size_t size) { int i, j, nerrors=0; - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); + const H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk); size_t total = rdcc->nbytes; const int nmeth=2; /*number of methods */ int w[1]; /*weighting as an interval */ @@ -1386,7 +1393,7 @@ H5D_istore_prune (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H * begins. The pointers participating in the list traversal are each * given a chance at preemption before any of the pointers are advanced. */ - w[0] = (int)(rdcc->nused * H5F_RDCC_W0(f)); + w[0] = (int)(rdcc->nused * H5F_RDCC_W0(io_info->dset->ent.file)); p[0] = rdcc->head; p[1] = NULL; @@ -1442,7 +1449,7 @@ H5D_istore_prune (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H if (n[j]==cur) n[j] = cur->next; } - if (H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, cur, TRUE)<0) + if (H5D_istore_preempt(io_info, cur, TRUE)<0) nerrors++; } } @@ -1497,18 +1504,18 @@ done: *------------------------------------------------------------------------- */ static void * -H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5D_istore_lock(H5D_io_info_t *io_info, H5D_istore_ud1_t *udata, hbool_t relax, unsigned *idx_hint/*in,out*/) { - unsigned idx=0; /*hash index number */ - hbool_t found = FALSE; /*already in cache? */ + H5D_t *dset=io_info->dset; /* Local pointer to the dataset info */ const H5O_pline_t *pline=&(dset->shared->dcpl_cache.pline); /* I/O pipeline info */ const H5O_layout_t *layout=&(dset->shared->layout); /* Dataset layout */ const H5O_fill_t *fill=&(dset->shared->dcpl_cache.fill); /* Fill value info */ H5D_fill_time_t fill_time=dset->shared->dcpl_cache.fill_time; /* Fill time */ H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);/*raw data chunk cache*/ H5D_rdcc_ent_t *ent = NULL; /*cache entry */ + unsigned idx=0; /*hash index number */ + hbool_t found = FALSE; /*already in cache? */ unsigned u; /*counters */ size_t chunk_size=0; /*size of a chunk */ void *chunk=NULL; /*the file chunk */ @@ -1516,11 +1523,11 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, FUNC_ENTER_NOAPI_NOINIT(H5D_istore_lock); - assert(f); + assert(io_info); assert(dset); - assert(store); - assert(dxpl_cache); - assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); + assert(io_info->dxpl_cache); + assert(io_info->store); + assert(TRUE==H5P_isa_class(io_info->dxpl_id,H5P_DATASET_XFER)); /* Get the chunk's size */ assert(layout->u.chunk.size>0); @@ -1528,12 +1535,12 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, /* Search for the chunk in the cache */ if (rdcc->nslots>0) { - idx=H5D_HASH(dset->shared,store->chunk.index); + idx=H5D_HASH(dset->shared,io_info->store->chunk.index); ent = rdcc->slot[idx]; if (ent) { - for (u=0, found=TRUE; u<dset->shared->layout.u.chunk.ndims; u++) { - if (store->chunk.offset[u]!=ent->offset[u]) { + for (u=0, found=TRUE; u<layout->u.chunk.ndims; u++) { + if (io_info->store->chunk.offset[u]!=ent->offset[u]) { found = FALSE; break; } /* end if */ @@ -1545,7 +1552,9 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, /* * Already in the cache. Count a hit. */ +#ifdef H5D_ISTORE_DEBUG rdcc->nhits++; +#endif /* H5D_ISTORE_DEBUG */ } else if (!found && relax) { /* @@ -1557,8 +1566,8 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, #ifdef H5D_ISTORE_DEBUG HDputc('w', stderr); HDfflush(stderr); -#endif rdcc->nhits++; +#endif if (NULL==(chunk=H5D_istore_chunk_alloc (chunk_size,pline))) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk"); @@ -1576,7 +1585,7 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, * Not in the cache. Read it from the file and count this as a miss * if it's in the file or an init if it isn't. */ - chunk_addr = H5D_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, udata); + chunk_addr = H5D_istore_get_addr(io_info, udata); } /* end else */ if (H5F_addr_defined(chunk_addr)) { @@ -1590,15 +1599,17 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, chunk_alloc = udata->key.nbytes; if (NULL==(chunk = H5D_istore_chunk_alloc (chunk_alloc,pline))) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk"); - if (H5F_block_read(f, H5FD_MEM_DRAW, chunk_addr, udata->key.nbytes, dxpl_id, chunk)<0) + if (H5F_block_read(dset->ent.file, H5FD_MEM_DRAW, chunk_addr, udata->key.nbytes, io_info->dxpl_id, chunk)<0) HGOTO_ERROR (H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk"); if (pline->nused) - if (H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->key.filter_mask), dxpl_cache->err_detect, - dxpl_cache->filter_cb, &(udata->key.nbytes), &chunk_alloc, &chunk)<0) { + if (H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->key.filter_mask), io_info->dxpl_cache->err_detect, + io_info->dxpl_cache->filter_cb, &(udata->key.nbytes), &chunk_alloc, &chunk)<0) { HGOTO_ERROR(H5E_PLINE, H5E_READERROR, NULL, "data pipeline read failed"); } +#ifdef H5D_ISTORE_DEBUG rdcc->nmisses++; +#endif /* H5D_ISTORE_DEBUG */ } else { H5D_fill_value_t fill_status; @@ -1636,7 +1647,9 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, else HDmemset(chunk,0,chunk_size); #endif /* H5_USING_PURIFY */ +#ifdef H5D_ISTORE_DEBUG rdcc->ninits++; +#endif /* H5D_ISTORE_DEBUG */ } /* end else */ } assert (found || chunk_size>0); @@ -1652,10 +1665,10 @@ else HDputc('#', stderr); HDfflush(stderr); #endif - if (H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, ent, TRUE)<0) + if (H5D_istore_preempt(io_info, ent, TRUE)<0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk from cache"); } - if (H5D_istore_prune(f, dxpl_cache, dxpl_id, dset, chunk_size)<0) + if (H5D_istore_prune(io_info, chunk_size)<0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk(s) from cache"); /* Create a new entry */ @@ -1665,7 +1678,7 @@ else ent->chunk_size = chunk_size; ent->alloc_size = chunk_size; for (u=0; u<layout->u.chunk.ndims; u++) - ent->offset[u] = store->chunk.offset[u]; + ent->offset[u] = io_info->store->chunk.offset[u]; ent->rd_count = chunk_size; ent->wr_count = chunk_size; ent->chunk = chunk; @@ -1767,17 +1780,19 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5D_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5D_istore_unlock(H5D_io_info_t *io_info, hbool_t dirty, unsigned idx_hint, uint8_t *chunk, size_t naccessed) { - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); + const H5O_layout_t *layout=&(io_info->dset->shared->layout); /* Dataset layout */ + const H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk); H5D_rdcc_ent_t *ent = NULL; int found = -1; unsigned u; FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_unlock); + assert(io_info); + if (UINT_MAX==idx_hint) { /*not in cache*/ } else { @@ -1799,17 +1814,17 @@ H5D_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, HDmemset (&x, 0, sizeof x); x.dirty = TRUE; - for (u=0; u<dset->shared->layout.u.chunk.ndims; u++) - x.offset[u] = store->chunk.offset[u]; - assert(dset->shared->layout.u.chunk.size>0); - H5_ASSIGN_OVERFLOW(x.chunk_size,dset->shared->layout.u.chunk.size,hsize_t,size_t); + for (u=0; u<layout->u.chunk.ndims; u++) + x.offset[u] = io_info->store->chunk.offset[u]; + assert(layout->u.chunk.size>0); + H5_ASSIGN_OVERFLOW(x.chunk_size,layout->u.chunk.size,hsize_t,size_t); x.alloc_size = x.chunk_size; x.chunk = chunk; - H5D_istore_flush_entry (f, dxpl_cache, dxpl_id, dset, &x, TRUE); + H5D_istore_flush_entry (io_info, &x, TRUE); } else { if(chunk) - H5D_istore_chunk_xfree (chunk,&(dset->shared->dcpl_cache.pline)); + H5D_istore_chunk_xfree (chunk,&(io_info->dset->shared->dcpl_cache.pline)); } } else { /* @@ -1846,12 +1861,12 @@ H5D_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, *------------------------------------------------------------------------- */ ssize_t -H5D_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5D_istore_readvv(H5D_io_info_t *io_info, size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], void *buf) { + H5D_t *dset=io_info->dset; /* Local pointer to the dataset info */ H5D_istore_ud1_t udata; /*B-tree pass-through */ haddr_t chunk_addr; /* Chunk address on disk */ size_t u; /* Local index variables */ @@ -1860,11 +1875,11 @@ H5D_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp FUNC_ENTER_NOAPI(H5D_istore_readvv, FAIL); /* Check args */ - assert(f); - assert(dxpl_cache); + assert(io_info); assert(dset && H5D_CHUNKED==dset->shared->layout.type); assert(dset->shared->layout.u.chunk.ndims>0 && dset->shared->layout.u.chunk.ndims<=H5O_LAYOUT_NDIMS); - assert(store); + assert(io_info->dxpl_cache); + assert(io_info->store); assert(chunk_len_arr); assert(chunk_offset_arr); assert(mem_len_arr); @@ -1873,22 +1888,23 @@ H5D_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp #ifndef NDEBUG for (u=0; u<dset->shared->layout.u.chunk.ndims; u++) - assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */ + assert(io_info->store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */ #endif /* Get the address of this chunk on disk */ #ifdef QAK -HDfprintf(stderr,"%s: store->chunk.offset={",FUNC); +HDfprintf(stderr,"%s: io_info->store->chunk.offset={",FUNC); for(u=0; u<dset->shared->layout.u.chunk.ndims; u++) - HDfprintf(stderr,"%Hd%s",store->chunk.offset[u],(u<(dset->shared->layout.u.chunk.ndims-1) ? ", " : "}\n")); + HDfprintf(stderr,"%Hd%s",io_info->store->chunk.offset[u],(u<(dset->shared->layout.u.chunk.ndims-1) ? ", " : "}\n")); #endif /* QAK */ - chunk_addr=H5D_istore_get_addr(f, dxpl_id, &(dset->shared->layout), store->chunk.offset, &udata); + chunk_addr=H5D_istore_get_addr(io_info, &udata); #ifdef QAK HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Zu\n",FUNC,chunk_addr,dset->shared->layout.u.chunk.size); HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]); HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]); HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]); HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_arr[*mem_curr_seq]); +HDfprintf(stderr,"%s: buf=%p\n",FUNC,buf); #endif /* QAK */ /* @@ -1903,13 +1919,25 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a * read-through of only the elements requested. */ if ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes && dset->shared->dcpl_cache.pline.nused==0 && chunk_addr!=HADDR_UNDEF) - || (IS_H5FD_MPI(f) && (H5F_ACC_RDWR & H5F_get_intent(f)))) { + || (IS_H5FD_MPI(dset->ent.file) && (H5F_ACC_RDWR & H5F_get_intent(dset->ent.file)))) { + H5D_io_info_t chk_io_info; /* Temporary I/O info object */ + H5D_storage_t chk_store; /* Chunk storage information */ + #ifdef H5_HAVE_PARALLEL /* Additional sanity check when operating in parallel */ if (chunk_addr==HADDR_UNDEF || dset->shared->dcpl_cache.pline.nused>0) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk"); #endif /* H5_HAVE_PARALLEL */ - if ((ret_value=H5D_contig_readvv(f, dxpl_id, dset, chunk_addr, (hsize_t)dset->shared->layout.u.chunk.size, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0) + + /* Set up the storage information for the chunk */ + chk_store.contig.dset_addr=chunk_addr; + chk_store.contig.dset_size=(hsize_t)dset->shared->layout.u.chunk.size; + + /* Set up new dataset I/O info */ + H5D_BUILD_IO_INFO(&chk_io_info,dset,io_info->dxpl_cache,io_info->dxpl_id,&chk_store); + + /* Do I/O directly on chunk without reading it into the cache */ + if ((ret_value=H5D_contig_readvv(&chk_io_info, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0) HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL, "unable to read raw data to file"); } /* end if */ else { @@ -1934,13 +1962,13 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a /* Check if the chunk is in the cache (but hasn't been written to disk yet) */ if (rdcc->nslots>0) { - unsigned idx=H5D_HASH(dset->shared,store->chunk.index); /* Cache entry index */ + unsigned idx=H5D_HASH(dset->shared,io_info->store->chunk.index); /* Cache entry index */ H5D_rdcc_ent_t *ent = rdcc->slot[idx]; /* Cache entry */ /* Potential match... */ if (ent) { for (u=0, found=TRUE; u<dset->shared->layout.u.chunk.ndims; u++) { - if (store->chunk.offset[u]!=ent->offset[u]) { + if (io_info->store->chunk.offset[u]!=ent->offset[u]) { found = FALSE; break; } /* end if */ @@ -2001,8 +2029,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a * Lock the chunk, copy from application to chunk, then unlock the * chunk. */ - if (NULL==(chunk=H5D_istore_lock(f, dxpl_cache, dxpl_id, dset, store, - &udata, FALSE, &idx_hint))) + if (NULL==(chunk=H5D_istore_lock(io_info, &udata, FALSE, &idx_hint))) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk"); /* Use the vectorized memory copy routine to do actual work */ @@ -2010,8 +2037,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "vectorized memcpy failed"); H5_CHECK_OVERFLOW(naccessed,ssize_t,size_t); - if (H5D_istore_unlock(f, dxpl_cache, dxpl_id, dset, store, - FALSE, idx_hint, chunk, (size_t)naccessed)<0) + if (H5D_istore_unlock(io_info, FALSE, idx_hint, chunk, (size_t)naccessed)<0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk"); /* Set return value */ @@ -2039,12 +2065,12 @@ done: *------------------------------------------------------------------------- */ ssize_t -H5D_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, - hid_t dxpl_id, H5D_t *dset, const H5D_storage_t *store, +H5D_istore_writevv(H5D_io_info_t *io_info, size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], const void *buf) { + H5D_t *dset=io_info->dset; /* Local pointer to the dataset info */ H5D_istore_ud1_t udata; /*B-tree pass-through */ haddr_t chunk_addr; /* Chunk address on disk */ size_t u; /* Local index variables */ @@ -2053,11 +2079,11 @@ H5D_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, FUNC_ENTER_NOAPI(H5D_istore_writevv, FAIL); /* Check args */ - assert(f); - assert(dxpl_cache); + assert(io_info); assert(dset && H5D_CHUNKED==dset->shared->layout.type); assert(dset->shared->layout.u.chunk.ndims>0 && dset->shared->layout.u.chunk.ndims<=H5O_LAYOUT_NDIMS); - assert(store); + assert(io_info->dxpl_cache); + assert(io_info->store); assert(chunk_len_arr); assert(chunk_offset_arr); assert(mem_len_arr); @@ -2066,16 +2092,16 @@ H5D_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, #ifndef NDEBUG for (u=0; u<dset->shared->layout.u.chunk.ndims; u++) - assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */ + assert(io_info->store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */ #endif /* Get the address of this chunk on disk */ #ifdef QAK -HDfprintf(stderr,"%s: store->chunk.offset={",FUNC); +HDfprintf(stderr,"%s: io_info->store->chunk.offset={",FUNC); for(u=0; u<dset->shared->layout.u.chunk.ndims; u++) - HDfprintf(stderr,"%Hd%s",store->chunk.offset[u],(u<(dset->shared->layout.u.chunk.ndims-1) ? ", " : "}\n")); + HDfprintf(stderr,"%Hd%s",io_info->store->chunk.offset[u],(u<(dset->shared->layout.u.chunk.ndims-1) ? ", " : "}\n")); #endif /* QAK */ - chunk_addr=H5D_istore_get_addr(f, dxpl_id, &(dset->shared->layout), store->chunk.offset, &udata); + chunk_addr=H5D_istore_get_addr(io_info, &udata); #ifdef QAK HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Zu\n",FUNC,chunk_addr,dset->shared->layout.u.chunk.size); HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]); @@ -2096,13 +2122,25 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a * write-through of only the elements requested. */ if ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes && dset->shared->dcpl_cache.pline.nused==0 && chunk_addr!=HADDR_UNDEF) - || (IS_H5FD_MPI(f) && (H5F_ACC_RDWR & H5F_get_intent(f)))) { + || (IS_H5FD_MPI(dset->ent.file) && (H5F_ACC_RDWR & H5F_get_intent(dset->ent.file)))) { + H5D_io_info_t chk_io_info; /* Temporary I/O info object */ + H5D_storage_t chk_store; /* Chunk storage information */ + #ifdef H5_HAVE_PARALLEL /* Additional sanity check when operating in parallel */ if (chunk_addr==HADDR_UNDEF || dset->shared->dcpl_cache.pline.nused>0) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk"); #endif /* H5_HAVE_PARALLEL */ - if ((ret_value=H5D_contig_writevv(f, dxpl_id, dset, chunk_addr, (hsize_t)dset->shared->layout.u.chunk.size, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0) + + /* Set up the storage information for the chunk */ + chk_store.contig.dset_addr=chunk_addr; + chk_store.contig.dset_size=(hsize_t)dset->shared->layout.u.chunk.size; + + /* Set up new dataset I/O info */ + H5D_BUILD_IO_INFO(&chk_io_info,dset,io_info->dxpl_cache,io_info->dxpl_id,&chk_store); + + /* Do I/O directly on chunk without reading it into the cache */ + if ((ret_value=H5D_contig_writevv(&chk_io_info, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file"); } /* end if */ else { @@ -2142,8 +2180,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a } /* end if */ #endif /* OLD_WAY */ - if (NULL==(chunk=H5D_istore_lock(f, dxpl_cache, dxpl_id, dset, store, - &udata, relax, &idx_hint))) + if (NULL==(chunk=H5D_istore_lock(io_info, &udata, relax, &idx_hint))) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk"); /* Use the vectorized memory copy routine to do actual work */ @@ -2151,8 +2188,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed"); H5_CHECK_OVERFLOW(naccessed,ssize_t,size_t); - if (H5D_istore_unlock(f, dxpl_cache, dxpl_id, dset, store, - TRUE, idx_hint, chunk, (size_t)naccessed)<0) + if (H5D_istore_unlock(io_info, TRUE, idx_hint, chunk, (size_t)naccessed)<0) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "uanble to unlock raw data chunk"); /* Set return value */ @@ -2235,9 +2271,10 @@ done: *------------------------------------------------------------------------- */ hsize_t -H5D_istore_allocated(H5F_t *f, hid_t dxpl_id, H5D_t *dset) +H5D_istore_allocated(H5D_t *dset, hid_t dxpl_id) { - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ + H5D_io_info_t io_info; /* Temporary I/O info object */ + const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ H5D_rdcc_ent_t *ent; /*cache entry */ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ @@ -2246,20 +2283,25 @@ H5D_istore_allocated(H5F_t *f, hid_t dxpl_id, H5D_t *dset) FUNC_ENTER_NOAPI(H5D_istore_allocated, 0); + assert(dset); + /* Fill the DXPL cache values for later use */ if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, 0, "can't fill dxpl cache") + /* Construct dataset I/O info */ + H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,NULL); + /* Search for cached chunks that haven't been written out */ for(ent = rdcc->head; ent; ent = ent->next) { /* Flush the chunk out to disk, to make certain the size is correct later */ - if (H5D_istore_flush_entry(f, dxpl_cache, dxpl_id, dset, ent, FALSE)<0) + if (H5D_istore_flush_entry(&io_info, ent, FALSE)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, 0, "cannot flush indexed storage buffer"); } /* end for */ HDmemset(&udata, 0, sizeof udata); udata.mesg = &dset->shared->layout; - if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5D_istore_iter_allocated, dset->shared->layout.u.chunk.addr, &udata)<0) + if (H5B_iterate(dset->ent.file, dxpl_id, H5B_ISTORE, H5D_istore_iter_allocated, dset->shared->layout.u.chunk.addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over chunk B-tree"); /* Set return value */ @@ -2289,8 +2331,7 @@ done: *------------------------------------------------------------------------- */ haddr_t -H5D_istore_get_addr(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, - const hssize_t offset[], H5D_istore_ud1_t *_udata) +H5D_istore_get_addr(H5D_io_info_t *io_info, H5D_istore_ud1_t *_udata) { H5D_istore_ud1_t tmp_udata; /* Information about a chunk */ H5D_istore_ud1_t *udata; /* Pointer to information about a chunk */ @@ -2299,21 +2340,22 @@ H5D_istore_get_addr(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_get_addr); - assert(f); - assert(layout && (layout->u.chunk.ndims > 0)); - assert(offset); + assert(io_info); + assert(io_info->dset); + assert(io_info->dset->shared->layout.u.chunk.ndims > 0); + assert(io_info->store->chunk.offset); /* Check for udata struct to return */ udata = (_udata!=NULL ? _udata : &tmp_udata); /* Initialize the information about the chunk we are looking for */ - for (u=0; u<layout->u.chunk.ndims; u++) - udata->key.offset[u] = offset[u]; - udata->mesg = layout; + for (u=0; u<io_info->dset->shared->layout.u.chunk.ndims; u++) + udata->key.offset[u] = io_info->store->chunk.offset[u]; + udata->mesg = &(io_info->dset->shared->layout); udata->addr = HADDR_UNDEF; /* Go get the chunk information */ - if (H5B_find (f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, udata)<0) { + if (H5B_find (io_info->dset->ent.file, io_info->dxpl_id, H5B_ISTORE, io_info->dset->shared->layout.u.chunk.addr, udata)<0) { /* Note: don't push error on stack, leave that to next higher level, * since many times the B-tree is searched in order to determine * if a chunk exists in the B-tree or not. -QAK @@ -2448,9 +2490,10 @@ H5D_istore_chunk_xfree(void *chk, const H5O_pline_t *pline) *------------------------------------------------------------------------- */ herr_t -H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, - hbool_t full_overwrite) +H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite) { + H5D_io_info_t io_info; /* Dataset I/O info */ + H5D_storage_t store; /* Dataset storage information */ hssize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */ hsize_t chunk_size; /* Size of chunk in bytes */ unsigned filter_mask=0; /* Filter mask for chunks that have them */ @@ -2461,7 +2504,8 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, unsigned should_fill=0; /* Whether fill values should be written */ H5D_istore_ud1_t udata; /* B-tree pass-through for creating chunk */ void *chunk=NULL; /* Chunk buffer for writing fill values */ - H5P_genplist_t *dx_plist; /* Data xfer property list */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ #ifdef H5_HAVE_PARALLEL MPI_Comm mpi_comm=MPI_COMM_NULL; /* MPI communicator for file */ int mpi_rank=(-1); /* This process's rank */ @@ -2473,8 +2517,6 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, unsigned chunk_exists; /* Flag to indicate whether a chunk exists already */ int i; /* Local index variable */ unsigned u; /* Local index variable */ - H5Z_EDC_t edc; /* Decide whether to enable EDC for read */ - H5Z_cb_t cb_struct; H5P_genplist_t *dc_plist; /* Property list */ int space_ndims; /* Dataset's space rank */ hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Dataset's dataspace dimensions */ @@ -2483,11 +2525,10 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, FUNC_ENTER_NOAPI(H5D_istore_allocate, FAIL); /* Check args */ - assert(f); - assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); assert(dset && H5D_CHUNKED==dset->shared->layout.type); assert(dset->shared->layout.u.chunk.ndims>0 && dset->shared->layout.u.chunk.ndims<=H5O_LAYOUT_NDIMS); assert(H5F_addr_defined(dset->shared->layout.u.chunk.addr)); + assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); /* Get dataset's creation property list */ if (NULL == (dc_plist = H5I_object(dset->shared->dcpl_id))) @@ -2506,23 +2547,19 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, if(H5P_get(dc_plist, H5D_CRT_FILL_TIME_NAME, &fill_time) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve fill time"); - /* Get necessary properties from dataset transfer property list */ - if (NULL == (dx_plist = H5P_object_verify(dxpl_id,H5P_DATASET_XFER))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list"); - if(H5P_get(dx_plist,H5D_XFER_EDC_NAME,&edc)<0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get edc information"); - if(H5P_get(dx_plist,H5D_XFER_FILTER_CB_NAME,&cb_struct)<0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get filter callback struct"); + /* Fill the DXPL cache values for later use */ + if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") #ifdef H5_HAVE_PARALLEL /* Retrieve MPI parameters */ - if(IS_H5FD_MPI(f)) { + if(IS_H5FD_MPI(dset->ent.file)) { /* Get the MPI communicator */ - if (MPI_COMM_NULL == (mpi_comm=H5F_mpi_get_comm(f))) + if (MPI_COMM_NULL == (mpi_comm=H5F_mpi_get_comm(dset->ent.file))) HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI communicator"); /* Get the MPI rank */ - if ((mpi_rank=H5F_mpi_get_rank(f))<0) + if ((mpi_rank=H5F_mpi_get_rank(dset->ent.file))<0) HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI rank"); /* Set the MPI-capable file driver flag */ @@ -2579,7 +2616,7 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, size_t nbytes=(size_t)chunk_size; /* Push the chunk through the filters */ - if (H5Z_pipeline(&pline, 0, &filter_mask, edc, cb_struct, &nbytes, &buf_size, &chunk)<0) + if (H5Z_pipeline(&pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &nbytes, &buf_size, &chunk)<0) HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed"); /* Keep the number of bytes the chunk turned in to */ @@ -2587,12 +2624,16 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, } /* end if */ } /* end if */ + /* Set up dataset I/O info */ + store.chunk.offset=chunk_offset; + H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,&store); + /* Loop over all chunks */ carry=0; while (carry==0) { /* Check if the chunk exists yet on disk */ chunk_exists=1; - if(H5D_istore_get_addr(f,dxpl_id,&(dset->shared->layout),chunk_offset, NULL)==HADDR_UNDEF) { + if(H5D_istore_get_addr(&io_info,NULL)==HADDR_UNDEF) { const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ H5D_rdcc_ent_t *ent = NULL; /*cache entry */ @@ -2621,7 +2662,7 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, udata.key.offset[u] = chunk_offset[u]; /* Allocate the chunk with all processes */ - if (H5B_insert(f, dxpl_id, H5B_ISTORE, dset->shared->layout.u.chunk.addr, &udata)<0) + if (H5B_insert(dset->ent.file, dxpl_id, H5B_ISTORE, dset->shared->layout.u.chunk.addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk"); /* Check if fill values should be written to blocks */ @@ -2632,7 +2673,7 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, /* Write the chunks out from only one process */ /* !! Use the internal "independent" DXPL!! -QAK */ if(H5_PAR_META_WRITE==mpi_rank) { - if (H5F_block_write(f, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, H5AC_ind_dxpl_id, chunk)<0) + if (H5F_block_write(dset->ent.file, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, H5AC_ind_dxpl_id, chunk)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file"); } /* end if */ @@ -2641,7 +2682,7 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, } /* end if */ else { #endif /* H5_HAVE_PARALLEL */ - if (H5F_block_write(f, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, dxpl_id, chunk)<0) + if (H5F_block_write(dset->ent.file, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, dxpl_id, chunk)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file"); #ifdef H5_HAVE_PARALLEL } /* end else */ @@ -2782,10 +2823,10 @@ done: *------------------------------------------------------------------------- */ herr_t -H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, - hid_t dxpl_id, H5D_t *dset) +H5D_istore_prune_by_extent(H5D_io_info_t *io_info) { - H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ + H5D_t *dset=io_info->dset; /* Local pointer to the dataset info */ + const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ H5D_rdcc_ent_t *ent = NULL, *next = NULL; /*cache entry */ unsigned u; /*counters */ int found; /*remove this entry */ @@ -2796,8 +2837,7 @@ H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, FUNC_ENTER_NOAPI(H5D_istore_prune_by_extent, FAIL); /* Check args */ - assert(f); - assert(dxpl_cache); + assert(io_info); assert(dset && H5D_CHUNKED == dset->shared->layout.type); assert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS); assert(H5F_addr_defined(dset->shared->layout.u.chunk.addr)); @@ -2831,7 +2871,7 @@ H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, #endif /* Preempt the entry from the cache, but do not flush it to disk */ - if(H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, ent, FALSE) < 0) + if(H5D_istore_preempt(io_info, ent, FALSE) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to preempt chunk"); found=0; @@ -2848,7 +2888,7 @@ H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, udata.mesg = &dset->shared->layout; udata.dims = curr_dims; - if(H5B_iterate(f, dxpl_id, H5B_ISTORE, H5D_istore_prune_extent, dset->shared->layout.u.chunk.addr, &udata) < 0) + if(H5B_iterate(dset->ent.file, io_info->dxpl_id, H5B_ISTORE, H5D_istore_prune_extent, dset->shared->layout.u.chunk.addr, &udata) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over B-tree"); done: @@ -2979,9 +3019,9 @@ H5D_istore_remove(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key /*in,out *------------------------------------------------------------------------- */ herr_t -H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, - hid_t dxpl_id, H5D_t *dset) +H5D_istore_initialize_by_extent(H5D_io_info_t *io_info) { + const H5O_layout_t *layout=&(io_info->dset->shared->layout); /* Dataset layout */ uint8_t *chunk = NULL; /*the file chunk */ unsigned idx_hint = 0; /*input value for H5F_istore_lock */ hssize_t chunk_offset[H5O_LAYOUT_NDIMS]; /*logical location of the chunks */ @@ -3013,14 +3053,13 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca FUNC_ENTER_NOAPI(H5D_istore_initialize_by_extent, FAIL); /* Check args */ - assert(f); - assert(dxpl_cache); - assert(dset && H5D_CHUNKED == dset->shared->layout.type); - assert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS); - assert(H5F_addr_defined(dset->shared->layout.u.chunk.addr)); + assert(io_info); + assert(io_info->dset && H5D_CHUNKED == layout->type); + assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + assert(H5F_addr_defined(layout->u.chunk.addr)); /* Get dataset's creation property list */ - if (NULL == (dc_plist = H5I_object(dset->shared->dcpl_id))) + if (NULL == (dc_plist = H5I_object(io_info->dset->shared->dcpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list") /* Get necessary properties from property list */ @@ -3036,7 +3075,7 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca HDmemset(count, 0, sizeof(count)); /* Go get the rank & dimensions */ - if((srank = H5S_get_simple_extent_dims(dset->shared->space, curr_dims, NULL)) < 0) + if((srank = H5S_get_simple_extent_dims(io_info->dset->shared->space, curr_dims, NULL)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions"); H5_ASSIGN_OVERFLOW(rank,srank,int,unsigned); @@ -3045,9 +3084,9 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca size[u] = curr_dims[u]; /* Round up to the next integer # of chunks, to accomodate partial chunks */ - chunks[u] = ((curr_dims[u]+dset->shared->layout.u.chunk.dim[u])-1) / dset->shared->layout.u.chunk.dim[u]; + chunks[u] = ((curr_dims[u]+layout->u.chunk.dim[u])-1) / layout->u.chunk.dim[u]; } /* end for */ - size[u] = dset->shared->layout.u.chunk.dim[u]; + size[u] = layout->u.chunk.dim[u]; /* Get the "down" sizes for each dimension */ if(H5V_array_down(rank,chunks,down_chunks)<0) @@ -3055,7 +3094,7 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca /* Create a data space for a chunk & set the extent */ for(u = 0; u < rank; u++) - chunk_dims[u] = dset->shared->layout.u.chunk.dim[u]; + chunk_dims[u] = layout->u.chunk.dim[u]; if(NULL == (space_chunk = H5S_create_simple(rank,chunk_dims,NULL))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace"); @@ -3064,18 +3103,22 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca * loop through the chunks copying each chunk from the application to the * chunk cache. */ - for(u = 0; u < dset->shared->layout.u.chunk.ndims; u++) { - idx_max[u] = (size[u] - 1) / dset->shared->layout.u.chunk.dim[u] + 1; + for(u = 0; u < layout->u.chunk.ndims; u++) { + idx_max[u] = (size[u] - 1) / layout->u.chunk.dim[u] + 1; idx_cur[u] = 0; } /* end for */ + /* Point to local dataset storage info */ + assert(io_info->store==NULL); /* Make certain we aren't blowing anything away */ + io_info->store=&store; + /* Loop over all chunks */ carry=0; while(carry==0) { - for(u = 0, naccessed = 1; u < dset->shared->layout.u.chunk.ndims; u++) { + for(u = 0, naccessed = 1; u < layout->u.chunk.ndims; u++) { /* The location and size of the chunk being accessed */ - chunk_offset[u] = idx_cur[u] * (hssize_t)(dset->shared->layout.u.chunk.dim[u]); - sub_size[u] = MIN((idx_cur[u] + 1) * dset->shared->layout.u.chunk.dim[u], + chunk_offset[u] = idx_cur[u] * (hssize_t)(layout->u.chunk.dim[u]); + sub_size[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u], size[u]) - chunk_offset[u]; naccessed *= sub_size[u]; } /* end for */ @@ -3084,8 +3127,8 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca * Figure out what chunks have to be initialized. These are the chunks where the dataspace * extent boundary is within the chunk */ - for(u = 0, found = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++) { - end_chunk = chunk_offset[u] + dset->shared->layout.u.chunk.dim[u]; + for(u = 0, found = 0; u < layout->u.chunk.ndims - 1; u++) { + end_chunk = chunk_offset[u] + layout->u.chunk.dim[u]; if(end_chunk > size[u]) { found = 1; break; @@ -3095,27 +3138,26 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca if(found) { /* Calculate the index of this chunk */ - if(H5V_chunk_index(rank,chunk_offset,dset->shared->layout.u.chunk.dim,down_chunks,&store.chunk.index)<0) + if(H5V_chunk_index(rank,chunk_offset,layout->u.chunk.dim,down_chunks,&store.chunk.index)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") store.chunk.offset=chunk_offset; - if(NULL == (chunk = H5D_istore_lock(f, dxpl_cache, dxpl_id, dset, - &store, NULL, FALSE, &idx_hint))) + if(NULL == (chunk = H5D_istore_lock(io_info, NULL, FALSE, &idx_hint))) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk"); if(H5S_select_all(space_chunk,1) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to select space"); for(u = 0; u < rank; u++) - count[u] = MIN((idx_cur[u] + 1) * dset->shared->layout.u.chunk.dim[u], size[u] - chunk_offset[u]); + count[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u], size[u] - chunk_offset[u]); #ifdef H5D_ISTORE_DEBUG HDfputs("cache:initialize:offset:[", stdout); - for(u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++) + for(u = 0; u < layout->u.chunk.ndims - 1; u++) HDfprintf(stdout, "%s%Hd", u ? ", " : "", chunk_offset[u]); HDfputs("]", stdout); HDfputs(":count:[", stdout); - for(u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++) + for(u = 0; u < layout->u.chunk.ndims - 1; u++) HDfprintf(stdout, "%s%Hd", u ? ", " : "", count[u]); HDfputs("]\n", stdout); #endif @@ -3132,13 +3174,12 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca if(H5S_select_fill(fill.buf, (size_t)size[rank], space_chunk, chunk) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "filling selection failed"); - if(H5D_istore_unlock(f, dxpl_cache, dxpl_id, dset, &store, - TRUE, idx_hint, chunk, (size_t)naccessed) < 0) + if(H5D_istore_unlock(io_info, TRUE, idx_hint, chunk, (size_t)naccessed) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk"); } /*found */ /* Increment indices */ - for(i = dset->shared->layout.u.chunk.ndims - 1, carry = 1; i >= 0 && carry; --i) { + for(i = layout->u.chunk.ndims - 1, carry = 1; i >= 0 && carry; --i) { if(++idx_cur[i] >= idx_max[i]) idx_cur[i] = 0; else @@ -3220,8 +3261,9 @@ done: *------------------------------------------------------------------------- */ herr_t -H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset) +H5D_istore_update_cache(H5D_t *dset, hid_t dxpl_id) { + H5D_io_info_t io_info; /* Temporary I/O info object */ H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */ H5D_rdcc_ent_t *ent, *next; /*cache entry */ H5D_rdcc_ent_t *old_ent; /* Old cache entry */ @@ -3240,7 +3282,6 @@ H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset) FUNC_ENTER_NOAPI(H5D_istore_update_cache, FAIL); /* Check args */ - assert(f); assert(dset && H5D_CHUNKED == dset->shared->layout.type); assert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS); @@ -3261,6 +3302,9 @@ H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset) if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + /* Construct dataset I/O info */ + H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,NULL); + /* Recompute the index for each cached chunk that is in a dataset */ for(ent = rdcc->head; ent; ent = next) { next=ent->next; @@ -3284,7 +3328,7 @@ H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset) next=old_ent->next; /* Remove the old entry from the cache */ - if (H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, old_ent, TRUE )<0) + if (H5D_istore_preempt(&io_info, old_ent, TRUE )<0) HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks"); } /* end if */ diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 5a2559e..c5f227d 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -40,10 +40,9 @@ #ifdef H5_HAVE_PARALLEL static herr_t -H5D_mpio_spaces_xfer(H5F_t *f, const H5D_t *dset, size_t elmt_size, +H5D_mpio_spaces_xfer(H5D_io_info_t *io_info, size_t elmt_size, const H5S_t *file_space, const H5S_t *mem_space, - hid_t dxpl_id, void *buf/*out*/, - const H5D_storage_t *store, + void *buf/*out*/, hbool_t do_write); @@ -91,11 +90,9 @@ H5D_mpio_spaces_xfer(H5F_t *f, const H5D_t *dset, size_t elmt_size, *------------------------------------------------------------------------- */ static herr_t -H5D_mpio_spaces_xfer(H5F_t *f, const H5D_t *dset, size_t elmt_size, +H5D_mpio_spaces_xfer(H5D_io_info_t *io_info, size_t elmt_size, const H5S_t *file_space, const H5S_t *mem_space, - hid_t dxpl_id, void *_buf /*out*/, - const H5D_storage_t *store, - hbool_t do_write ) + void *_buf /*out*/, hbool_t do_write ) { haddr_t addr; /* Address of dataset (or selection) within file */ size_t mpi_buf_count, mpi_file_count; /* Number of "objects" to transfer */ @@ -111,14 +108,14 @@ H5D_mpio_spaces_xfer(H5F_t *f, const H5D_t *dset, size_t elmt_size, FUNC_ENTER_NOAPI_NOINIT(H5D_mpio_spaces_xfer); /* Check args */ - assert (f); - assert (dset); + assert (io_info); + assert (io_info->dset); assert (file_space); assert (mem_space); assert (buf); - assert (IS_H5FD_MPIO(f)); + assert (IS_H5FD_MPIO(io_info->dset->ent.file)); /* Make certain we have the correct type of property list */ - assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); + assert(TRUE==H5P_isa_class(io_info->dxpl_id,H5P_DATASET_XFER)); /* create the MPI buffer type */ if (H5S_mpio_space_type( mem_space, elmt_size, @@ -139,21 +136,21 @@ H5D_mpio_spaces_xfer(H5F_t *f, const H5D_t *dset, size_t elmt_size, HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't create MPI file type"); /* Get the base address of the contiguous dataset or the chunk */ - if(dset->shared->layout.type == H5D_CONTIGUOUS) - addr = H5D_contig_get_addr(dset) + mpi_file_offset; + if(io_info->dset->shared->layout.type == H5D_CONTIGUOUS) + addr = H5D_contig_get_addr(io_info->dset) + mpi_file_offset; else { haddr_t chunk_addr; /* for collective chunk IO */ - assert(dset->shared->layout.type == H5D_CHUNKED); - chunk_addr=H5D_istore_get_addr(f,dxpl_id,&(dset->shared->layout),store->chunk.offset,NULL); - addr = H5F_BASE_ADDR(f) + chunk_addr + mpi_file_offset; + assert(io_info->dset->shared->layout.type == H5D_CHUNKED); + chunk_addr=H5D_istore_get_addr(io_info,NULL); + addr = H5F_BASE_ADDR(io_info->dset->ent.file) + chunk_addr + mpi_file_offset; } /* * Pass buf type, file type to the file driver. Request an MPI type * transfer (instead of an elementary byteblock transfer). */ - if(H5FD_mpi_setup_collective(dxpl_id, mpi_buf_type, mpi_file_type)<0) + if(H5FD_mpi_setup_collective(io_info->dxpl_id, mpi_buf_type, mpi_file_type)<0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O properties"); plist_is_setup=1; @@ -162,17 +159,17 @@ H5D_mpio_spaces_xfer(H5F_t *f, const H5D_t *dset, size_t elmt_size, /* transfer the data */ if (do_write) { - if (H5F_block_write(f, H5FD_MEM_DRAW, addr, mpi_buf_count, dxpl_id, buf) <0) + if (H5F_block_write(io_info->dset->ent.file, H5FD_MEM_DRAW, addr, mpi_buf_count, io_info->dxpl_id, buf) <0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL,"MPI write failed"); } else { - if (H5F_block_read (f, H5FD_MEM_DRAW, addr, mpi_buf_count, dxpl_id, buf) <0) + if (H5F_block_read (io_info->dset->ent.file, H5FD_MEM_DRAW, addr, mpi_buf_count, io_info->dxpl_id, buf) <0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL,"MPI read failed"); } done: /* Reset the dxpl settings */ if(plist_is_setup) { - if(H5FD_mpi_teardown_collective(dxpl_id)<0) + if(H5FD_mpi_teardown_collective(io_info->dxpl_id)<0) HDONE_ERROR(H5E_DATASPACE, H5E_CANTFREE, FAIL, "unable to reset dxpl values"); } /* end if */ @@ -211,8 +208,8 @@ done: *------------------------------------------------------------------------- */ herr_t -H5D_mpio_spaces_read(H5F_t *f, const H5D_dxpl_cache_t UNUSED *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5D_mpio_spaces_read(H5D_io_info_t *io_info, + H5O_layout_readvv_func_t UNUSED op, size_t UNUSED nelmts, size_t elmt_size, const H5S_t *file_space, const H5S_t *mem_space, void *buf/*out*/) @@ -221,8 +218,8 @@ H5D_mpio_spaces_read(H5F_t *f, const H5D_dxpl_cache_t UNUSED *dxpl_cache, hid_t FUNC_ENTER_NOAPI_NOFUNC(H5D_mpio_spaces_read); - ret_value = H5D_mpio_spaces_xfer(f, dset, elmt_size, file_space, - mem_space, dxpl_id, buf, store, 0/*read*/); + ret_value = H5D_mpio_spaces_xfer(io_info, elmt_size, file_space, + mem_space, buf, 0/*read*/); FUNC_LEAVE_NOAPI(ret_value); } /* end H5D_mpio_spaces_read() */ @@ -249,8 +246,8 @@ H5D_mpio_spaces_read(H5F_t *f, const H5D_dxpl_cache_t UNUSED *dxpl_cache, hid_t *------------------------------------------------------------------------- */ herr_t -H5D_mpio_spaces_write(H5F_t *f, const H5D_dxpl_cache_t UNUSED *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5D_mpio_spaces_write(H5D_io_info_t *io_info, + H5O_layout_writevv_func_t UNUSED op, size_t UNUSED nelmts, size_t elmt_size, const H5S_t *file_space, const H5S_t *mem_space, const void *buf) @@ -260,8 +257,8 @@ H5D_mpio_spaces_write(H5F_t *f, const H5D_dxpl_cache_t UNUSED *dxpl_cache, hid_t FUNC_ENTER_NOAPI_NOFUNC(H5D_mpio_spaces_write); /*OKAY: CAST DISCARDS CONST QUALIFIER*/ - ret_value = H5D_mpio_spaces_xfer(f, dset, elmt_size, file_space, - mem_space, dxpl_id, (void*)buf, store, 1/*write*/); + ret_value = H5D_mpio_spaces_xfer(io_info, elmt_size, file_space, + mem_space, (void*)buf, 1/*write*/); FUNC_LEAVE_NOAPI(ret_value); } /* end H5D_mpio_spaces_write() */ diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 293d869..51ea060 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -46,16 +46,25 @@ /* Set the minimum object header size to create objects with */ #define H5D_MINHDR_SIZE 256 +/* [Simple] Macro to construct a H5D_io_info_t from it's components */ +#define H5D_BUILD_IO_INFO(io_info,ds,dxpl_c,dxpl_i,str) \ + (io_info)->dset=ds; \ + (io_info)->dxpl_cache=dxpl_c; \ + (io_info)->dxpl_id=dxpl_i; \ + (io_info)->store=str + /****************************/ /* Package Private Typedefs */ /****************************/ /* The raw data chunk cache */ typedef struct H5D_rdcc_t { +#ifdef H5D_ISTORE_DEBUG unsigned ninits; /* Number of chunk creations */ unsigned nhits; /* Number of cache hits */ unsigned nmisses;/* Number of cache misses */ unsigned nflushes;/* Number of cache flushes */ +#endif /* H5D_ISTORE_DEBUG */ size_t nbytes; /* Current cached raw data in bytes */ size_t nslots; /* Number of chunk slots allocated */ struct H5D_rdcc_ent_t *head; /* Head of doubly linked list */ @@ -132,23 +141,20 @@ H5_DLL herr_t H5D_alloc_storage (H5F_t *f, hid_t dxpl_id, H5D_t *dset, H5D_time_ hbool_t update_time, hbool_t full_overwrite); /* Functions that operate on contiguous storage */ -H5_DLL herr_t H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5D_t *dset); -H5_DLL herr_t H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset); +H5_DLL herr_t H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout); +H5_DLL herr_t H5D_contig_fill(H5D_t *dset, hid_t dxpl_id); /* Functions that operate on indexed storage */ -H5_DLL herr_t H5D_istore_init (H5F_t *f, H5D_t *dset); -H5_DLL herr_t H5D_istore_flush (H5F_t *f, hid_t dxpl_id, H5D_t *dset, unsigned flags); -H5_DLL herr_t H5D_istore_create(H5F_t *f, hid_t dxpl_id, - H5O_layout_t *layout/*in,out*/); -H5_DLL herr_t H5D_istore_dest (H5F_t *f, hid_t dxpl_id, H5D_t *dset); -H5_DLL herr_t H5D_istore_allocate (H5F_t *f, hid_t dxpl_id, - const H5D_t *dset, hbool_t full_overwrite); -H5_DLL hsize_t H5D_istore_allocated(H5F_t *f, hid_t dxpl_id, H5D_t *dset); -H5_DLL herr_t H5D_istore_prune_by_extent( H5F_t *f, - const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H5D_t *dset); -H5_DLL herr_t H5D_istore_initialize_by_extent( H5F_t *f, - const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H5D_t *dset); -H5_DLL herr_t H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset); +H5_DLL herr_t H5D_istore_init (const H5F_t *f, H5D_t *dset); +H5_DLL herr_t H5D_istore_flush (H5D_t *dset, hid_t dxpl_id, unsigned flags); +H5_DLL herr_t H5D_istore_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout); +H5_DLL herr_t H5D_istore_dest (H5D_t *dset, hid_t dxpl_id); +H5_DLL herr_t H5D_istore_allocate (H5D_t *dset, hid_t dxpl_id, + hbool_t full_overwrite); +H5_DLL hsize_t H5D_istore_allocated(H5D_t *dset, hid_t dxpl_id); +H5_DLL herr_t H5D_istore_prune_by_extent(H5D_io_info_t *io_info); +H5_DLL herr_t H5D_istore_initialize_by_extent(H5D_io_info_t *io_info); +H5_DLL herr_t H5D_istore_update_cache(H5D_t *dset, hid_t dxpl_id); H5_DLL herr_t H5D_istore_dump_btree(H5F_t *f, hid_t dxpl_id, FILE *stream, unsigned ndims, haddr_t addr); #ifdef H5D_ISTORE_DEBUG diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h index 610dab6..a933204 100644 --- a/src/H5Dprivate.h +++ b/src/H5Dprivate.h @@ -168,12 +168,20 @@ typedef struct H5D_t H5D_t; /* Typedef for dataset storage information */ +typedef struct { + hsize_t index; /* "Index" of chunk in dataset (must be first for TBBT routines) */ + hssize_t *offset; /* Chunk's coordinates in elements */ +} H5D_chunk_storage_t; + +typedef struct { + haddr_t dset_addr; /* Address of dataset in file */ + hsize_t dset_size; /* Total size of dataset in file */ +} H5D_contig_storage_t; + typedef union H5D_storage_t { H5O_efl_t efl; /* External file list information for dataset */ - struct { - hsize_t index; /* "Index" of chunk in dataset (must be first for TBBT routines) */ - hssize_t *offset; /* Chunk's coordinates in elements */ - } chunk; + H5D_chunk_storage_t chunk; /* Chunk information for dataset */ + H5D_contig_storage_t contig; /* Contiguous information for dataset */ } H5D_storage_t; /* Typedef for cached dataset transfer property list information */ @@ -199,6 +207,14 @@ typedef struct H5D_dcpl_cache_t { H5D_fill_time_t fill_time; /* Fill time (H5D_CRT_FILL_TIME_NAME) */ } H5D_dcpl_cache_t; +/* Typedef for common raw data I/O operation info */ +typedef struct H5D_io_info_t { + H5D_t *dset; /* Pointer to dataset being operated on */ + const H5D_dxpl_cache_t *dxpl_cache; /* Pointer to cache DXPL info */ + hid_t dxpl_id; /* Original DXPL ID */ + const H5D_storage_t *store; /* Dataset storage info */ +} H5D_io_info_t; + /* Library-private functions defined in H5D package */ H5_DLL herr_t H5D_init(void); H5_DLL H5D_t *H5D_open(H5G_entry_t *ent, hid_t dxpl_id); @@ -217,39 +233,25 @@ H5_DLL herr_t H5D_flush(H5F_t *f, hid_t dxpl_id, unsigned flags); H5_DLL herr_t H5D_get_dxpl_cache(hid_t dxpl_id, H5D_dxpl_cache_t **cache); H5_DLL herr_t H5D_get_dxpl_cache_real(hid_t dxpl_id, H5D_dxpl_cache_t *cache); -/* Functions that operate on byte sequences in memory and on disk */ -H5_DLL ssize_t H5D_seq_readvv(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, - hid_t dxpl_id, H5D_t *dset, const H5D_storage_t *store, - size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], - size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], - void *buf); -H5_DLL ssize_t H5D_seq_writevv(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, - hid_t dxpl_id, H5D_t *dset, const H5D_storage_t *store, - size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], - size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], - const void *buf); - /* Functions that operate on contiguous storage */ H5_DLL herr_t H5D_contig_delete(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout); H5_DLL haddr_t H5D_contig_get_addr(const H5D_t *dset); -H5_DLL ssize_t H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, - haddr_t dset_addr, hsize_t dset_size, +H5_DLL ssize_t H5D_contig_readvv(H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], void *buf); -H5_DLL ssize_t H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, - haddr_t dset_addr, hsize_t dset_size, +H5_DLL ssize_t H5D_contig_writevv(H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], const void *buf); /* Functions that operate on compact dataset storage */ -H5_DLL ssize_t H5D_compact_readvv(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, +H5_DLL ssize_t H5D_compact_readvv(H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[], void *buf); -H5_DLL ssize_t H5D_compact_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset, +H5_DLL ssize_t H5D_compact_writevv(H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[], const void *buf); @@ -260,18 +262,15 @@ struct H5D_istore_ud1_t; /*define at H5Distore.c*/ /* Functions that operate on indexed storage */ H5_DLL herr_t H5D_istore_delete(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout); -H5_DLL ssize_t H5D_istore_readvv(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, - hid_t dxpl_id, H5D_t *dset, const H5D_storage_t *store, +H5_DLL ssize_t H5D_istore_readvv(H5D_io_info_t *io_info, size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], void *buf); -H5_DLL ssize_t H5D_istore_writevv(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, - hid_t dxpl_id, H5D_t *dset, const H5D_storage_t *store, +H5_DLL ssize_t H5D_istore_writevv(H5D_io_info_t *io_info, size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], const void *buf); -H5_DLL haddr_t H5D_istore_get_addr(H5F_t *f, hid_t dxpl_id, - const H5O_layout_t *layout, const hssize_t offset[], +H5_DLL haddr_t H5D_istore_get_addr(H5D_io_info_t *io_info, struct H5D_istore_ud1_t *_udata); H5_DLL herr_t H5D_istore_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent, int fwidth, int ndims); @@ -281,15 +280,15 @@ H5_DLL herr_t H5D_istore_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * str struct H5S_t; /* MPI-IO function to read directly from app buffer to file rky980813 */ -H5_DLL herr_t H5D_mpio_spaces_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5_DLL herr_t H5D_mpio_spaces_read(H5D_io_info_t *io_info, + H5O_layout_readvv_func_t op, size_t nelmts, size_t elmt_size, const struct H5S_t *file_space, const struct H5S_t *mem_space, void *buf/*out*/); /* MPI-IO function to write directly from app buffer to file rky980813 */ -H5_DLL herr_t H5D_mpio_spaces_write(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5_DLL herr_t H5D_mpio_spaces_write(H5D_io_info_t *io_info, + H5O_layout_writevv_func_t op, size_t nelmts, size_t elmt_size, const struct H5S_t *file_space, const struct H5S_t *mem_space, const void *buf); diff --git a/src/H5Dseq.c b/src/H5Dseq.c deleted file mode 100644 index 044bc47..0000000 --- a/src/H5Dseq.c +++ /dev/null @@ -1,268 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by the Board of Trustees of the University of Illinois. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - * Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu> - * Thursday, September 28, 2000 - * - * Purpose: Provides I/O facilities for sequences of bytes stored with various - * layout policies. These routines are similar to the H5Farray.c routines, - * these deal in terms of byte offsets and lengths, not coordinates and - * hyperslab sizes. - * - */ - -#define H5D_PACKAGE /*suppress error about including H5Dpkg */ - -/* Pablo information */ -/* (Put before include files to avoid problems with inline functions) */ -#define PABLO_MASK H5Dseq_mask - -#include "H5private.h" /* Generic Functions */ -#include "H5Dpkg.h" /* Datasets */ -#include "H5Eprivate.h" /* Error handling */ -#include "H5Fprivate.h" /* Files */ -#include "H5FDprivate.h" /* File drivers */ -#include "H5Iprivate.h" /* IDs */ -#include "H5MFprivate.h" /* File space management */ -#include "H5MMprivate.h" /* Memory management */ -#include "H5Oprivate.h" /* Object headers */ -#include "H5Pprivate.h" /* Property lists */ -#include "H5Vprivate.h" /* Vector and array functions */ - - -/*------------------------------------------------------------------------- - * Function: H5D_seq_readvv - * - * Purpose: Reads in a vector of byte sequences from a file dataset into a - * buffer in in memory. The data is read from file F and the array's size - * and storage information is in LAYOUT. External files are described - * according to the external file list, EFL. The vector of byte sequences - * offsets is in the DSET_OFFSET array into the dataset (offsets are in - * terms of bytes) and the size of each sequence is in the SEQ_LEN array. - * The total size of the file array is implied in the LAYOUT argument. - * Bytes read into BUF are sequentially stored in the buffer, each sequence - * from the vector stored directly after the previous. The number of - * sequences is NSEQ. - * Purpose: Reads a vector of byte sequences from a vector of byte - * sequences in a file dataset into a buffer in memory. The data is - * read from file F and the array's size and storage information is in - * LAYOUT. External files and chunks are described according to the - * storage information, STORE. The vector of byte sequences offsets for - * the file is in the DSET_OFFSET_ARR array into the dataset (offsets are - * in terms of bytes) and the size of each sequence is in the DSET_LEN_ARR - * array. The vector of byte sequences offsets for memory is in the - * MEM_OFFSET_ARR array into the dataset (offsets are in terms of bytes) - * and the size of each sequence is in the MEM_LEN_ARR array. The total - * size of the file array is implied in the LAYOUT argument. The maximum - * number of sequences in the file dataset and the memory buffer are - * DSET_MAX_NSEQ & MEM_MAX_NSEQ respectively. The current sequence being - * operated on in the file dataset and the memory buffer are DSET_CURR_SEQ - * & MEM_CURR_SEQ respectively. The current sequence being operated on - * will be updated as a result of the operation, as will the offsets and - * lengths of the file dataset and memory buffer sequences. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * Wednesday, May 7, 2003 - * - * Modifications: - * - *------------------------------------------------------------------------- - */ -ssize_t -H5D_seq_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, - size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], - size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], - void *buf/*out*/) -{ - ssize_t ret_value; /* Return value */ - - FUNC_ENTER_NOAPI(H5D_seq_readvv, FAIL); - - /* Check args */ - assert(f); - assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); /* Make certain we have the correct type of property list */ - assert(dset); - assert(dset_curr_seq); - assert(*dset_curr_seq<dset_max_nseq); - assert(dset_len_arr); - assert(dset_offset_arr); - assert(mem_curr_seq); - assert(*mem_curr_seq<mem_max_nseq); - assert(mem_len_arr); - assert(mem_offset_arr); - assert(buf); - - switch (dset->shared->layout.type) { - case H5D_CONTIGUOUS: - /* Read directly from file if the dataset is in an external file */ - if (store && store->efl.nused>0) { - /* Note: We can't use data sieve buffers for datasets in external files - * because the 'addr' of all external files is set to 0 (above) and - * all datasets in external files would alias to the same set of - * file offsets, totally mixing up the data sieve buffer information. -QAK - */ - if((ret_value=H5O_efl_readvv(&(store->efl), - dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, - mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, - buf))<0) - HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "external data read failed"); - } else { - /* Pass along the vector of sequences to read */ - if((ret_value=H5D_contig_readvv(f, dxpl_id, dset, - dset->shared->layout.u.contig.addr, dset->shared->layout.u.contig.size, - dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, - mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, - buf))<0) - HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed"); - } /* end else */ - break; - - case H5D_CHUNKED: - assert(store); - if((ret_value=H5D_istore_readvv(f, dxpl_cache, dxpl_id, dset, store, - dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, - mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, - buf))<0) - HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "istore read failed"); - break; - - case H5D_COMPACT: - /* Pass along the vector of sequences to read */ - if((ret_value=H5D_compact_readvv(f, dxpl_id, dset, - dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, - mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, - buf))<0) - HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "compact read failed"); - break; - - default: - assert("not implemented yet" && 0); - HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout"); - } /* end switch() */ - -done: - FUNC_LEAVE_NOAPI(ret_value); -} /* H5D_seq_readvv() */ - - -/*------------------------------------------------------------------------- - * Function: H5D_seq_writevv - * - * Purpose: Writes a vector of byte sequences from a buffer in memory into - * a vector of byte sequences in a file dataset. The data is written to - * file F and the array's size and storage information is in LAYOUT. - * External files and chunks are described according to the storage - * information, STORE. The vector of byte sequences offsets for the file - * is in the DSET_OFFSET_ARR array into the dataset (offsets are in - * terms of bytes) and the size of each sequence is in the DSET_LEN_ARR - * array. The vector of byte sequences offsets for memory is in the - * MEM_OFFSET_ARR array into the dataset (offsets are in terms of bytes) - * and the size of each sequence is in the MEM_LEN_ARR array. The total - * size of the file array is implied in the LAYOUT argument. The maximum - * number of sequences in the file dataset and the memory buffer are - * DSET_MAX_NSEQ & MEM_MAX_NSEQ respectively. The current sequence being - * operated on in the file dataset and the memory buffer are DSET_CURR_SEQ - * & MEM_CURR_SEQ respectively. The current sequence being operated on - * will be updated as a result of the operation, as will the offsets and - * lengths of the file dataset and memory buffer sequences. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * Friday, May 2, 2003 - * - * Modifications: - * - *------------------------------------------------------------------------- - */ -ssize_t -H5D_seq_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, - hid_t dxpl_id, struct H5D_t *dset, const H5D_storage_t *store, - size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], - size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], - const void *buf) -{ - ssize_t ret_value; /* Return value */ - - FUNC_ENTER_NOAPI(H5D_seq_writevv, FAIL); - - /* Check args */ - assert(f); - assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); /* Make certain we have the correct type of property list */ - assert(dset); - assert(dset_curr_seq); - assert(*dset_curr_seq<dset_max_nseq); - assert(dset_len_arr); - assert(dset_offset_arr); - assert(mem_curr_seq); - assert(*mem_curr_seq<mem_max_nseq); - assert(mem_len_arr); - assert(mem_offset_arr); - assert(buf); - - switch (dset->shared->layout.type) { - case H5D_CONTIGUOUS: - /* Write directly to file if the dataset is in an external file */ - if (store && store->efl.nused>0) { - /* Note: We can't use data sieve buffers for datasets in external files - * because the 'addr' of all external files is set to 0 (above) and - * all datasets in external files would alias to the same set of - * file offsets, totally mixing up the data sieve buffer information. -QAK - */ - if ((ret_value=H5O_efl_writevv(&(store->efl), - dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, - mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, - buf))<0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "external data write failed"); - } else { - /* Pass along the vector of sequences to write */ - if ((ret_value=H5D_contig_writevv(f, dxpl_id, dset, - dset->shared->layout.u.contig.addr, dset->shared->layout.u.contig.size, - dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, - mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, - buf))<0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed"); - } /* end else */ - break; - - case H5D_CHUNKED: - assert(store); - if((ret_value=H5D_istore_writevv(f, dxpl_cache, dxpl_id, dset, store, - dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, - mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, - buf))<0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "istore write failed"); - break; - - case H5D_COMPACT: - /* Pass along the vector of sequences to write */ - if((ret_value=H5D_compact_writevv(f, dxpl_id, dset, - dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, - mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, - buf))<0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "compact write failed"); - break; - - default: - assert("not implemented yet" && 0); - HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout"); - } /* end switch() */ - -done: - FUNC_LEAVE_NOAPI(ret_value); -} /* H5D_seq_writevv() */ diff --git a/src/H5Gnode.c b/src/H5Gnode.c index 1b3bbb7..bdb4ae6 100644 --- a/src/H5Gnode.c +++ b/src/H5Gnode.c @@ -76,7 +76,7 @@ static herr_t H5G_node_clear(H5F_t *f, H5G_node_t *sym, hbool_t destroy); static herr_t H5G_compute_size(H5F_t *f, H5G_node_t *sym, size_t *size_ptr); /* B-tree callbacks */ -static size_t H5G_node_sizeof_rkey(H5F_t *f, const void *_udata); +static size_t H5G_node_sizeof_rkey(const H5F_t *f, const void *_udata); static H5RC_t *H5G_node_get_shared(H5F_t *f, const void *_udata); static herr_t H5G_node_create(H5F_t *f, hid_t dxpl_id, H5B_ins_t op, void *_lt_key, void *_udata, void *_rt_key, @@ -170,7 +170,7 @@ H5FL_BLK_DEFINE_STATIC(grp_page); *------------------------------------------------------------------------- */ static size_t -H5G_node_sizeof_rkey(H5F_t *f, const void UNUSED * udata) +H5G_node_sizeof_rkey(const H5F_t *f, const void UNUSED * udata) { /* Use FUNC_ENTER_NOAPI_NOINIT_NOFUNC here to avoid performance issues */ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5G_node_sizeof_rkey); diff --git a/src/H5Oefl.c b/src/H5Oefl.c index bc3b293..7e753ed 100644 --- a/src/H5Oefl.c +++ b/src/H5Oefl.c @@ -20,15 +20,18 @@ #define H5F_PACKAGE /*suppress error about including H5Fpkg */ #define H5O_PACKAGE /*suppress error about including H5Opkg */ -#include "H5private.h" -#include "H5Eprivate.h" -#include "H5Fpkg.h" -#include "H5HLprivate.h" -#include "H5MMprivate.h" -#include "H5Opkg.h" /* Object header functions */ - +/* Pablo information */ +/* (Put before include files to avoid problems with inline functions) */ #define PABLO_MASK H5O_efl_mask +#include "H5private.h" /* Generic Functions */ +#include "H5Dprivate.h" /* Datasets */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5Fpkg.h" /* File access */ +#include "H5HLprivate.h" /* Local Heaps */ +#include "H5MMprivate.h" /* Memory management */ +#include "H5Opkg.h" /* Object headers */ + /* PRIVATE PROTOTYPES */ static void *H5O_efl_decode(H5F_t *f, hid_t dxpl_id, const uint8_t *p, H5O_shared_t *sh); static herr_t H5O_efl_encode(H5F_t *f, uint8_t *p, const void *_mesg); @@ -613,11 +616,12 @@ done: *------------------------------------------------------------------------- */ ssize_t -H5O_efl_readvv(const H5O_efl_t *efl, +H5O_efl_readvv(H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], void *_buf) { + const H5O_efl_t *efl=&(io_info->store->efl); /* Pointer to efl info */ unsigned char *buf; /* Pointer to buffer to write */ haddr_t addr; /* Actual address to read */ size_t size; /* Size of sequence in bytes */ @@ -692,11 +696,12 @@ done: *------------------------------------------------------------------------- */ ssize_t -H5O_efl_writevv(const H5O_efl_t *efl, +H5O_efl_writevv(H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], const void *_buf) { + const H5O_efl_t *efl=&(io_info->store->efl); /* Pointer to efl info */ const unsigned char *buf; /* Pointer to buffer to write */ haddr_t addr; /* Actual address to read */ size_t size; /* Size of sequence in bytes */ diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h index 31017a4..347c1c3 100644 --- a/src/H5Oprivate.h +++ b/src/H5Oprivate.h @@ -143,6 +143,18 @@ typedef struct H5O_layout_compact_t { void *buf; /* Buffer for compact dataset */ } H5O_layout_compact_t; +/* Function pointers for I/O on particular types of dataset layouts */ +/* (Forward declare some structs/unions to avoid #include problems) */ +struct H5D_io_info_t; +typedef ssize_t (*H5O_layout_readvv_func_t)(struct H5D_io_info_t *io_info, + size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], + size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], + void *buf); +typedef ssize_t (*H5O_layout_writevv_func_t)(struct H5D_io_info_t *io_info, + size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], + size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], + const void *buf); + typedef struct H5O_layout_t { H5D_layout_t type; /* Type of layout */ unsigned version; /* Version of message */ @@ -151,6 +163,8 @@ typedef struct H5O_layout_t { H5O_layout_chunk_t chunk; /* Information for chunked layout */ H5O_layout_compact_t compact; /* Information for compact layout */ } u; + H5O_layout_readvv_func_t readvv; /* I/O routine for reading data */ + H5O_layout_writevv_func_t writevv; /* I/O routine for writing data */ } H5O_layout_t; /* Enable reading/writing "bogus" messages */ @@ -265,11 +279,11 @@ H5_DLL size_t H5O_layout_meta_size(H5F_t *f, const void *_mesg); /* EFL operators */ H5_DLL hsize_t H5O_efl_total_size(H5O_efl_t *efl); -H5_DLL ssize_t H5O_efl_readvv(const H5O_efl_t *efl, +H5_DLL ssize_t H5O_efl_readvv(struct H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], void *buf); -H5_DLL ssize_t H5O_efl_writevv(const H5O_efl_t *efl, +H5_DLL ssize_t H5O_efl_writevv(struct H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], const void *buf); diff --git a/src/H5Smpio.c b/src/H5Smpio.c index b46d347..0a9a7bf 100644 --- a/src/H5Smpio.c +++ b/src/H5Smpio.c @@ -191,7 +191,6 @@ H5S_mpio_none_type( const H5S_t UNUSED *space, size_t UNUSED elmt_size, * *------------------------------------------------------------------------- */ -#ifndef AKC_OLD static herr_t H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size, /* out: */ @@ -453,308 +452,6 @@ done: #endif FUNC_LEAVE_NOAPI(ret_value); } -#else -/* keep this old code for now. */ -static herr_t -H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size, - /* out: */ - MPI_Datatype *new_type, - size_t *count, - hsize_t *extra_offset, - hbool_t *is_derived_type ) -{ - H5S_sel_iter_t sel_iter; /* Selection iteration info */ - hbool_t sel_iter_init=0; /* Selection iteration info has been initialized */ - - struct dim { /* less hassle than malloc/free & ilk */ - hssize_t start; - hsize_t strid; - hsize_t block; - hsize_t xtent; - hsize_t count; - } d[H5S_MAX_RANK]; - - int i; - int offset[H5S_MAX_RANK]; - int max_xtent[H5S_MAX_RANK]; - H5S_hyper_dim_t *diminfo; /* [rank] */ - int rank; - int block_length[2]; - MPI_Datatype inner_type, outer_type, old_type[2]; - MPI_Aint extent_len, displacement[2]; - int mpi_code; /* MPI return code */ - herr_t ret_value = SUCCEED; - - FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_hyper_type); - - /* Check args */ - assert (space); - assert(sizeof(MPI_Aint) >= sizeof(elmt_size)); - if (0==elmt_size) - goto empty; - - /* Initialize selection iterator */ - if (H5S_select_iter_init(&sel_iter, space, elmt_size)<0) - HGOTO_ERROR (H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator"); - sel_iter_init=1; /* Selection iteration info has been initialized */ - - /* Abbreviate args */ - diminfo=sel_iter.u.hyp.diminfo; - assert (diminfo); - - /* make a local copy of the dimension info so we can operate with them */ - - /* Check if this is a "flattened" regular hyperslab selection */ - if(sel_iter.u.hyp.iter_rank!=0 && sel_iter.u.hyp.iter_rank<space->extent.rank) { - /* Flattened selection */ - rank=sel_iter.u.hyp.iter_rank; - assert (rank >= 0 && rank<=H5S_MAX_RANK); /* within array bounds */ - if (0==rank) - goto empty; - -#ifdef H5Smpi_DEBUG - HDfprintf(stderr, "%s: Flattened selection\n",FUNC); -#endif - for ( i=0; i<rank; ++i) { - d[i].start = diminfo[i].start+sel_iter.u.hyp.sel_off[i]; - d[i].strid = diminfo[i].stride; - d[i].block = diminfo[i].block; - d[i].count = diminfo[i].count; - d[i].xtent = sel_iter.u.hyp.size[i]; -#ifdef H5Smpi_DEBUG - HDfprintf(stderr, "%s: start=%Hd stride=%Hu count=%Hu block=%Hu xtent=%Hu", - FUNC, d[i].start, d[i].strid, d[i].count, d[i].block, d[i].xtent ); - if (i==0) - HDfprintf(stderr, " rank=%d\n", rank ); - else - HDfprintf(stderr, "\n" ); -#endif - if (0==d[i].block) - goto empty; - if (0==d[i].count) - goto empty; - if (0==d[i].xtent) - goto empty; - } - } /* end if */ - else { - /* Non-flattened selection */ - rank = space->extent.rank; - assert (rank >= 0 && rank<=H5S_MAX_RANK); /* within array bounds */ - if (0==rank) - goto empty; - -#ifdef H5Smpi_DEBUG - HDfprintf(stderr, "%s: Non-flattened selection\n",FUNC); -#endif - for ( i=0; i<rank; ++i) { - d[i].start = diminfo[i].start+space->select.offset[i]; - d[i].strid = diminfo[i].stride; - d[i].block = diminfo[i].block; - d[i].count = diminfo[i].count; - d[i].xtent = space->extent.size[i]; -#ifdef H5Smpi_DEBUG - HDfprintf(stderr, "%s: start=%Hd stride=%Hu count=%Hu block=%Hu xtent=%Hu", - FUNC, d[i].start, d[i].strid, d[i].count, d[i].block, d[i].xtent ); - if (i==0) - HDfprintf(stderr, " rank=%d\n", rank ); - else - HDfprintf(stderr, "\n" ); -#endif - if (0==d[i].block) - goto empty; - if (0==d[i].count) - goto empty; - if (0==d[i].xtent) - goto empty; - } - } /* end else */ - -/********************************************************************** - Compute array "offset[rank]" which gives the offsets for a multi- - dimensional array with dimensions "d[i].xtent" (i=0,1,...,rank-1). -**********************************************************************/ - offset[rank-1] = 1; - max_xtent[rank-1] = d[rank-1].xtent; -#ifdef H5Smpi_DEBUG - i=rank-1; - HDfprintf(stderr, " offset[%2d]=%d; max_xtent[%2d]=%d\n", - i, offset[i], i, max_xtent[i]); -#endif - for (i=rank-2; i>=0; --i) { - offset[i] = offset[i+1]*d[i+1].xtent; - max_xtent[i] = max_xtent[i+1]*d[i].xtent; -#ifdef H5Smpi_DEBUG - HDfprintf(stderr, " offset[%2d]=%d; max_xtent[%2d]=%d\n", - i, offset[i], i, max_xtent[i]); -#endif - } - - /* Create a type covering the selected hyperslab. - * Multidimensional dataspaces are stored in row-major order. - * The type is built from the inside out, going from the - * fastest-changing (i.e., inner) dimension * to the slowest (outer). */ - -/******************************************************* -* Construct contig type for inner contig dims: -*******************************************************/ -#ifdef H5Smpi_DEBUG - HDfprintf(stderr, "%s: Making contig type %d MPI_BYTEs\n", FUNC,elmt_size ); - for (i=rank-1; i>=0; --i) - HDfprintf(stderr, "d[%d].xtent=%Hu \n", i, d[i].xtent); -#endif - if (MPI_SUCCESS != (mpi_code= MPI_Type_contiguous( (int)elmt_size, MPI_BYTE, &inner_type ))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code); - -/******************************************************* -* Construct the type by walking the hyperslab dims -* from the inside out: -*******************************************************/ - for ( i=rank-1; i>=0; --i) { -#ifdef H5Smpi_DEBUG - HDfprintf(stderr, "%s: Dimension i=%d \n" - "count=%Hu block=%Hu stride=%Hu\n", - FUNC, i, d[i].count, d[i].block, d[i].strid ); -#endif - -#ifdef H5Smpi_DEBUG - HDfprintf(stderr, "%s: i=%d Making vector-type \n", FUNC,i); -#endif - /**************************************** - * Build vector in current dimension: - ****************************************/ - mpi_code =MPI_Type_vector((int)(d[i].count), /* count */ - (int)(d[i].block), /* blocklength */ - (int)(d[i].strid), /* stride */ - inner_type, /* old type */ - &outer_type ); /* new type */ - - MPI_Type_free( &inner_type ); - if (mpi_code!=MPI_SUCCESS) - HMPI_GOTO_ERROR(FAIL, "couldn't create MPI vector type", mpi_code); - - displacement[1] = (MPI_Aint)elmt_size * max_xtent[i]; - if(MPI_SUCCESS != (mpi_code = MPI_Type_extent(outer_type, &extent_len))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_extent failed", mpi_code); - - /************************************************* - * Restructure this datatype ("outer_type") - * so that it still starts at 0, but its extent - * is the full extent in this dimension. - *************************************************/ - if ((int)extent_len < displacement[1]) { - -#ifdef H5Smpi_DEBUG - HDfprintf(stderr, "%s: i=%d Extending struct type\n" - "***displacements: 0, %d\n", FUNC, i, displacement[1]); -#endif - -#ifdef H5_HAVE_MPI2 /* have MPI-2 (this function is not included in MPICH) */ - mpi_code = MPI_Type_create_resized - ( outer_type, /* old type */ - 0, /* blocklengths */ - displacement[1], /* displacements */ - &inner_type); /* new type */ -#else /* do not have MPI-2 */ - block_length[0] = 1; - block_length[1] = 1; - - displacement[0] = 0; - - old_type[0] = outer_type; - old_type[1] = MPI_UB; - mpi_code = MPI_Type_struct ( 2, /* count */ - block_length, /* blocklengths */ - displacement, /* displacements */ - old_type, /* old types */ - &inner_type); /* new type */ -#endif - - MPI_Type_free (&outer_type); - if (mpi_code!=MPI_SUCCESS) - HMPI_GOTO_ERROR(FAIL, "couldn't resize MPI vector type", mpi_code); - } - else { - inner_type = outer_type; - } - } /* end for */ -/*************************** -* End of loop, walking -* thru dimensions. -***************************/ - - - /* At this point inner_type is actually the outermost type, even for 0-trip loop */ - -/*************************************************************** -* Final task: create a struct which is a "clone" of the -* current struct, but displaced according to the d[i].start -* values given in the hyperslab description: -***************************************************************/ - displacement[0] = 0; - for (i=rank-1; i>=0; i--) - displacement[0] += d[i].start * offset[i]; - -printf("dumping MPI_BYTE\n"); -printdatatype(MPI_INT); -printdatatype(MPI_BYTE); - if (displacement[0] > 0) { - displacement[0] *= elmt_size; - block_length[0] = 1; - old_type[0] = inner_type; - -#ifdef H5Smpi_DEBUG - HDfprintf(stderr, "%s: Making final struct\n***count=1:\n", FUNC); - HDfprintf(stderr, "\tblocklength[0]=%d; displacement[0]=%d\n", - block_length[0], displacement[0]); -#endif - - - if (MPI_SUCCESS != (mpi_code= MPI_Type_struct( 1, /* count */ - block_length, /* blocklengths */ - displacement, /* displacements */ - old_type, /* old type */ - new_type )) /* new type */ - ) - HMPI_GOTO_ERROR(FAIL, "couldn't create MPI struct type", mpi_code); - - if (MPI_SUCCESS != (mpi_code= MPI_Type_free (&old_type[0]))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code); - } - else { - *new_type = inner_type; - } - - if (MPI_SUCCESS != (mpi_code= MPI_Type_commit( new_type ))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code); - - /* fill in the remaining return values */ - *count = 1; /* only have to move one of these suckers! */ - *extra_offset = 0; - *is_derived_type = 1; - HGOTO_DONE(SUCCEED); - -empty: - /* special case: empty hyperslab */ - *new_type = MPI_BYTE; - *count = 0; - *extra_offset = 0; - *is_derived_type = 0; - -done: - /* Release selection iterator */ - if(sel_iter_init) { - if (H5S_SELECT_ITER_RELEASE(&sel_iter)<0) - HDONE_ERROR (H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator"); - } /* end if */ - -#ifdef H5Smpi_DEBUG - HDfprintf(stderr, "Leave %s, count=%ld is_derived_type=%d\n", - FUNC, *count, *is_derived_type ); -#endif - FUNC_LEAVE_NOAPI(ret_value); -} -#endif /*------------------------------------------------------------------------- diff --git a/src/H5Sprivate.h b/src/H5Sprivate.h index efa9b30..4879bfd 100644 --- a/src/H5Sprivate.h +++ b/src/H5Sprivate.h @@ -117,16 +117,16 @@ typedef struct H5S_conv_t { */ /* Read from file to application w/o intermediate scratch buffer */ - herr_t (*read)(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, + herr_t (*read)(H5D_io_info_t *io_info, + H5O_layout_readvv_func_t op, size_t nelmts, size_t elmt_size, const H5S_t *file_space, const H5S_t *mem_space, void *buf/*out*/); /* Write directly from app buffer to file */ - herr_t (*write)(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, + herr_t (*write)(H5D_io_info_t *io_info, + H5O_layout_writevv_func_t op, size_t nelmts, size_t elmt_size, const H5S_t *file_space, const H5S_t *mem_space, const void *buf); @@ -234,12 +234,12 @@ H5_DLL herr_t H5S_select_iterate(void *buf, hid_t type_id, const H5S_t *space, H5D_operator_t op, void *operator_data); H5_DLL herr_t H5S_select_fill(void *fill, size_t fill_size, const H5S_t *space, void *buf); -H5_DLL herr_t H5S_select_fscat (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5_DLL herr_t H5S_select_fscat (H5D_io_info_t *io_info, + H5O_layout_writevv_func_t op, const H5S_t *file_space, H5S_sel_iter_t *file_iter, size_t nelmts, const void *_buf); -H5_DLL size_t H5S_select_fgath (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5_DLL size_t H5S_select_fgath (H5D_io_info_t *io_info, + H5O_layout_readvv_func_t op, const H5S_t *file_space, H5S_sel_iter_t *file_iter, size_t nelmts, void *buf); H5_DLL herr_t H5S_select_mscat (const void *_tscat_buf, @@ -248,13 +248,13 @@ H5_DLL herr_t H5S_select_mscat (const void *_tscat_buf, H5_DLL size_t H5S_select_mgath (const void *_buf, const H5S_t *space, H5S_sel_iter_t *iter, size_t nelmts, const H5D_dxpl_cache_t *dxpl_cache, void *_tgath_buf/*out*/); -H5_DLL herr_t H5S_select_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5_DLL herr_t H5S_select_read(H5D_io_info_t *io_info, + H5O_layout_readvv_func_t op, size_t nelmts, size_t elmt_size, const H5S_t *file_space, const H5S_t *mem_space, void *buf/*out*/); -H5_DLL herr_t H5S_select_write(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5_DLL herr_t H5S_select_write(H5D_io_info_t *io_info, + H5O_layout_writevv_func_t op, size_t nelmts, size_t elmt_size, const H5S_t *file_space, const H5S_t *mem_space, const void *buf/*out*/); diff --git a/src/H5Sselect.c b/src/H5Sselect.c index e9fc5a3..af3b7c6 100644 --- a/src/H5Sselect.c +++ b/src/H5Sselect.c @@ -24,13 +24,14 @@ /* (Put before include files to avoid problems with inline functions) */ #define PABLO_MASK H5S_select_mask -#include "H5private.h" /* Generic Functions */ -#include "H5Dprivate.h" /* Datasets (for their properties) */ -#include "H5Eprivate.h" /* Error handling */ -#include "H5FLprivate.h" /* Free Lists */ -#include "H5Iprivate.h" /* ID Functions */ -#include "H5Spkg.h" /* Dataspace functions */ -#include "H5Vprivate.h" /* Vector functions */ +#include "H5private.h" /* Generic Functions */ +#include "H5Dprivate.h" /* Datasets */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5FLprivate.h" /* Free Lists */ +#include "H5Iprivate.h" /* IDs */ +#include "H5Oprivate.h" /* Object headers */ +#include "H5Spkg.h" /* Dataspaces */ +#include "H5Vprivate.h" /* Vector and array functions */ /* Local functions */ #ifdef LATER @@ -1279,9 +1280,6 @@ H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2) htri_t ret_value=TRUE; /* return value */ FUNC_ENTER_NOAPI(H5S_select_shape_same, FAIL); -#ifdef QAK -HDfprintf(stderr,"%s: Entering\n",FUNC); -#endif /* QAK */ /* Check args */ assert(space1); @@ -1291,28 +1289,15 @@ HDfprintf(stderr,"%s: Entering\n",FUNC); if (space1->extent.rank!=space2->extent.rank) HGOTO_DONE(FALSE); -#ifdef QAK -HDfprintf(stderr,"%s: Check 0.5\n",FUNC); -HDfprintf(stderr,"%s: space1 selection type=%d\n",FUNC,(int)H5S_GET_SELECT_TYPE(space1)); -HDfprintf(stderr,"%s: space1->select.num_elem=%Hd\n",FUNC,space1->select.num_elem); -HDfprintf(stderr,"%s: space2 selection type=%d\n",FUNC,(int)H5S_GET_SELECT_TYPE(space2)); -HDfprintf(stderr,"%s: space2->select.num_elem=%Hd\n",FUNC,space2->select.num_elem); -#endif /* QAK */ /* Check for different number of elements selected */ if(H5S_GET_SELECT_NPOINTS(space1)!=H5S_GET_SELECT_NPOINTS(space2)) HGOTO_DONE(FALSE); -#ifdef QAK -HDfprintf(stderr,"%s: Check 1.0\n",FUNC); -#endif /* QAK */ /* Check for "easy" cases before getting into generalized block iteration code */ if(H5S_GET_SELECT_TYPE(space1)==H5S_SEL_ALL && H5S_GET_SELECT_TYPE(space2)==H5S_SEL_ALL) { hsize_t dims1[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #1 */ hsize_t dims2[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #2 */ -#ifdef QAK -HDfprintf(stderr,"%s: Check 2.0\n",FUNC); -#endif /* QAK */ if(H5S_get_simple_extent_dims(space1, dims1, NULL)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality"); if(H5S_get_simple_extent_dims(space2, dims2, NULL)<0) @@ -1324,17 +1309,11 @@ HDfprintf(stderr,"%s: Check 2.0\n",FUNC); HGOTO_DONE(FALSE); } /* end if */ else if(H5S_GET_SELECT_TYPE(space1)==H5S_SEL_NONE || H5S_GET_SELECT_TYPE(space2)==H5S_SEL_NONE) { -#ifdef QAK -HDfprintf(stderr,"%s: Check 3.0\n",FUNC); -#endif /* QAK */ HGOTO_DONE(TRUE); } /* end if */ else if((H5S_GET_SELECT_TYPE(space1)==H5S_SEL_HYPERSLABS && space1->select.sel_info.hslab->diminfo_valid) && (H5S_GET_SELECT_TYPE(space2)==H5S_SEL_HYPERSLABS && space2->select.sel_info.hslab->diminfo_valid)) { -#ifdef QAK -HDfprintf(stderr,"%s: Check 4.0\n",FUNC); -#endif /* QAK */ /* Check that the shapes are the same */ for (u=0; u<space1->extent.rank; u++) { if(space1->select.sel_info.hslab->opt_diminfo[u].stride!=space2->select.sel_info.hslab->opt_diminfo[u].stride) @@ -1355,27 +1334,6 @@ HDfprintf(stderr,"%s: Check 4.0\n",FUNC); hssize_t off2[H5O_LAYOUT_NDIMS]; /* Offset of selection #2 blocks */ htri_t status1,status2; /* Status from next block checks */ unsigned first_block=1; /* Flag to indicate the first block */ -#ifdef QAK -HDfprintf(stderr,"%s: Check 10.0\n",FUNC); -HDfprintf(stderr,"%s: space1 selection type=%d\n",FUNC,(int)H5S_GET_SELECT_TYPE(space1)); -if(space1->select.sel_info.hslab.span_lst) { - HDfprintf(stderr,"%s: Dumping space1 span list\n",FUNC); - H5S_hyper_print_spans(stderr,space1->select.sel_info.hslab.span_lst); -} /* end if */ -else { - HDfprintf(stderr,"%s: Dumping space1 diminfo\n",FUNC); - H5S_hyper_print_diminfo(stderr,space1); -} /* end else */ -HDfprintf(stderr,"%s: space2 selection type=%d\n",FUNC,(int)H5S_GET_SELECT_TYPE(space2)); -if(space2->select.sel_info.hslab.span_lst) { - HDfprintf(stderr,"%s: Dumping space2 span list\n",FUNC); - H5S_hyper_print_spans(stderr,space2->select.sel_info.hslab.span_lst); -} /* end if */ -else { - HDfprintf(stderr,"%s: Dumping space2 diminfo\n",FUNC); - H5S_hyper_print_diminfo(stderr,space2); -} /* end else */ -#endif /* QAK */ /* Initialize iterator for each dataspace selection * Use '0' for element size instead of actual element size to indicate @@ -1394,28 +1352,8 @@ else { /* Get the current block for each selection iterator */ if(H5S_SELECT_ITER_BLOCK(&iter1,start1,end1)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator block"); -#ifdef QAK -{ - HDfprintf(stderr,"%s: iter1 start={",FUNC); - for(u=0; u<space1->extent.rank; u++) - HDfprintf(stderr,"%Hd%s",start1[u],(u<(space1->extent.rank-1) ? ", " : "}\n")); - HDfprintf(stderr,"%s: iter1 end={",FUNC); - for(u=0; u<space1->extent.rank; u++) - HDfprintf(stderr,"%Hd%s",end1[u],(u<(space1->extent.rank-1) ? ", " : "}\n")); -} -#endif /* QAK */ if(H5S_SELECT_ITER_BLOCK(&iter2,start2,end2)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator block"); -#ifdef QAK -{ - HDfprintf(stderr,"%s: iter2 start={",FUNC); - for(u=0; u<space1->extent.rank; u++) - HDfprintf(stderr,"%Hd%s",start2[u],(u<(space1->extent.rank-1) ? ", " : "}\n")); - HDfprintf(stderr,"%s: iter2 end={",FUNC); - for(u=0; u<space1->extent.rank; u++) - HDfprintf(stderr,"%Hd%s",end2[u],(u<(space1->extent.rank-1) ? ", " : "}\n")); -} -#endif /* QAK */ /* The first block only compares the sizes and sets the relative offsets for later blocks */ if(first_block) { @@ -1450,9 +1388,6 @@ else { HGOTO_ERROR (H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to check iterator block"); if((status2=H5S_SELECT_ITER_HAS_NEXT_BLOCK(&iter2))<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to check iterator block"); -#ifdef QAK -HDfprintf(stderr,"%s: status1=%d, status2=%d\n",FUNC,(int)status1,(int)status2); -#endif /* QAK */ /* Did we run out of blocks at the same time? */ if(status1==FALSE && status2==FALSE) @@ -1479,9 +1414,7 @@ done: if (H5S_SELECT_ITER_RELEASE(&iter2)<0) HDONE_ERROR (H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator"); } /* end if */ -#ifdef QAK -HDfprintf(stderr,"%s: Leaving, ret_value=%d\n",FUNC,ret_value); -#endif /* QAK */ + FUNC_LEAVE_NOAPI(ret_value); } /* H5S_select_shape_same() */ @@ -1605,8 +1538,8 @@ done: *------------------------------------------------------------------------- */ herr_t -H5S_select_fscat (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5S_select_fscat (H5D_io_info_t *io_info, + H5O_layout_writevv_func_t op, const H5S_t *space, H5S_sel_iter_t *iter, size_t nelmts, const void *_buf) { @@ -1626,20 +1559,19 @@ H5S_select_fscat (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, FUNC_ENTER_NOAPI(H5S_select_fscat, FAIL); /* Check args */ - assert (f); - assert (dset); - assert (store); + assert (io_info); + assert (op); assert (space); assert (iter); assert (nelmts>0); assert (_buf); - assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); + assert(TRUE==H5P_isa_class(io_info->dxpl_id,H5P_DATASET_XFER)); /* Allocate the vector I/O arrays */ - if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { - if((len = H5FL_SEQ_MALLOC(size_t,dxpl_cache->vec_size))==NULL) + if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { + if((len = H5FL_SEQ_MALLOC(size_t,io_info->dxpl_cache->vec_size))==NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O length vector array"); - if((off = H5FL_SEQ_MALLOC(hsize_t,dxpl_cache->vec_size))==NULL) + if((off = H5FL_SEQ_MALLOC(hsize_t,io_info->dxpl_cache->vec_size))==NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O offset vector array"); } /* end if */ else { @@ -1650,7 +1582,7 @@ H5S_select_fscat (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, /* Loop until all elements are written */ while(nelmts>0) { /* Get list of sequences for selection to write */ - if(H5S_SELECT_GET_SEQ_LIST(space,H5S_GET_SEQ_LIST_SORTED,iter,dxpl_cache->vec_size,nelmts,&nseq,&nelem,off,len)<0) + if(H5S_SELECT_GET_SEQ_LIST(space,H5S_GET_SEQ_LIST_SORTED,iter,io_info->dxpl_cache->vec_size,nelmts,&nseq,&nelem,off,len)<0) HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed"); /* Reset the current sequence information */ @@ -1659,7 +1591,7 @@ H5S_select_fscat (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, mem_off=0; /* Write sequence list out */ - if (H5D_seq_writevv(f, dxpl_cache, dxpl_id, dset, store, nseq, &dset_curr_seq, len, off, 1, &mem_curr_seq, &mem_len, &mem_off, buf)<0) + if ((*op)(io_info, nseq, &dset_curr_seq, len, off, 1, &mem_curr_seq, &mem_len, &mem_off, buf)<0) HGOTO_ERROR(H5E_DATASPACE, H5E_WRITEERROR, FAIL, "write error"); /* Update buffer */ @@ -1670,7 +1602,7 @@ H5S_select_fscat (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, } /* end while */ done: - if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { + if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { if(len!=NULL) H5FL_SEQ_FREE(size_t,len); if(off!=NULL) @@ -1704,8 +1636,8 @@ done: *------------------------------------------------------------------------- */ size_t -H5S_select_fgath (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5S_select_fgath (H5D_io_info_t *io_info, + H5O_layout_readvv_func_t op, const H5S_t *space, H5S_sel_iter_t *iter, size_t nelmts, void *_buf/*out*/) { @@ -1725,19 +1657,19 @@ H5S_select_fgath (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, FUNC_ENTER_NOAPI(H5S_select_fgath, 0); /* Check args */ - assert (f); - assert (dset); - assert (store); + assert (io_info); + assert (io_info->dset); + assert (io_info->store); assert (space); assert (iter); assert (nelmts>0); assert (_buf); /* Allocate the vector I/O arrays */ - if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { - if((len = H5FL_SEQ_MALLOC(size_t,dxpl_cache->vec_size))==NULL) + if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { + if((len = H5FL_SEQ_MALLOC(size_t,io_info->dxpl_cache->vec_size))==NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "can't allocate I/O length vector array"); - if((off = H5FL_SEQ_MALLOC(hsize_t,dxpl_cache->vec_size))==NULL) + if((off = H5FL_SEQ_MALLOC(hsize_t,io_info->dxpl_cache->vec_size))==NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "can't allocate I/O offset vector array"); } /* end if */ else { @@ -1745,10 +1677,10 @@ H5S_select_fgath (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, off=_off; } /* end else */ - /* Loop until all elements are written */ + /* Loop until all elements are read */ while(nelmts>0) { - /* Get list of sequences for selection to write */ - if(H5S_SELECT_GET_SEQ_LIST(space,H5S_GET_SEQ_LIST_SORTED,iter,dxpl_cache->vec_size,nelmts,&nseq,&nelem,off,len)<0) + /* Get list of sequences for selection to read */ + if(H5S_SELECT_GET_SEQ_LIST(space,H5S_GET_SEQ_LIST_SORTED,iter,io_info->dxpl_cache->vec_size,nelmts,&nseq,&nelem,off,len)<0) HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, 0, "sequence length generation failed"); /* Reset the current sequence information */ @@ -1757,7 +1689,7 @@ H5S_select_fgath (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, mem_off=0; /* Read sequence list in */ - if (H5D_seq_readvv(f, dxpl_cache, dxpl_id, dset, store, nseq, &dset_curr_seq, len, off, 1, &mem_curr_seq, &mem_len, &mem_off, buf)<0) + if ((*op)(io_info, nseq, &dset_curr_seq, len, off, 1, &mem_curr_seq, &mem_len, &mem_off, buf)<0) HGOTO_ERROR(H5E_DATASPACE, H5E_READERROR, 0, "read error"); /* Update buffer */ @@ -1768,7 +1700,7 @@ H5S_select_fgath (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, } /* end while */ done: - if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { + if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { if(len!=NULL) H5FL_SEQ_FREE(size_t,len); if(off!=NULL) @@ -1969,8 +1901,8 @@ done: *------------------------------------------------------------------------- */ herr_t -H5S_select_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5S_select_read(H5D_io_info_t *io_info, + H5O_layout_readvv_func_t op, size_t nelmts, size_t elmt_size, const H5S_t *file_space, const H5S_t *mem_space, void *buf/*out*/) @@ -1999,10 +1931,12 @@ H5S_select_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, FUNC_ENTER_NOAPI(H5S_select_read, FAIL); /* Check args */ - assert(f); - assert(store); + assert(io_info); + assert(io_info->dset); + assert(io_info->dxpl_cache); + assert(io_info->store); assert(buf); - assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); + assert(TRUE==H5P_isa_class(io_info->dxpl_id,H5P_DATASET_XFER)); /* Initialize file iterator */ if (H5S_select_iter_init(&file_iter, file_space, elmt_size)<0) @@ -2015,14 +1949,14 @@ H5S_select_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, mem_iter_init=1; /* Memory selection iteration info has been initialized */ /* Allocate the vector I/O arrays */ - if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { - if((mem_len = H5FL_SEQ_MALLOC(size_t,dxpl_cache->vec_size))==NULL) + if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { + if((mem_len = H5FL_SEQ_MALLOC(size_t,io_info->dxpl_cache->vec_size))==NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O length vector array"); - if((mem_off = H5FL_SEQ_MALLOC(hsize_t,dxpl_cache->vec_size))==NULL) + if((mem_off = H5FL_SEQ_MALLOC(hsize_t,io_info->dxpl_cache->vec_size))==NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O offset vector array"); - if((file_len = H5FL_SEQ_MALLOC(size_t,dxpl_cache->vec_size))==NULL) + if((file_len = H5FL_SEQ_MALLOC(size_t,io_info->dxpl_cache->vec_size))==NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O length vector array"); - if((file_off = H5FL_SEQ_MALLOC(hsize_t,dxpl_cache->vec_size))==NULL) + if((file_off = H5FL_SEQ_MALLOC(hsize_t,io_info->dxpl_cache->vec_size))==NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O offset vector array"); } /* end if */ else { @@ -2041,7 +1975,7 @@ H5S_select_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, /* Check if more file sequences are needed */ if(curr_file_seq>=file_nseq) { /* Get sequences for file selection */ - if(H5S_SELECT_GET_SEQ_LIST(file_space,H5S_GET_SEQ_LIST_SORTED,&file_iter,dxpl_cache->vec_size,nelmts,&file_nseq,&file_nelem,file_off,file_len)<0) + if(H5S_SELECT_GET_SEQ_LIST(file_space,H5S_GET_SEQ_LIST_SORTED,&file_iter,io_info->dxpl_cache->vec_size,nelmts,&file_nseq,&file_nelem,file_off,file_len)<0) HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed"); /* Start at the beginning of the sequences again */ @@ -2051,21 +1985,15 @@ H5S_select_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, /* Check if more memory sequences are needed */ if(curr_mem_seq>=mem_nseq) { /* Get sequences for memory selection */ - if(H5S_SELECT_GET_SEQ_LIST(mem_space,0,&mem_iter,dxpl_cache->vec_size,nelmts,&mem_nseq,&mem_nelem,mem_off,mem_len)<0) + if(H5S_SELECT_GET_SEQ_LIST(mem_space,0,&mem_iter,io_info->dxpl_cache->vec_size,nelmts,&mem_nseq,&mem_nelem,mem_off,mem_len)<0) HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed"); /* Start at the beginning of the sequences again */ curr_mem_seq=0; } /* end if */ -#ifdef QAK -HDfprintf(stderr,"%s: curr_file_seq=%Zu, file_nseq=%Zu\n",FUNC,curr_file_seq,file_nseq); -HDfprintf(stderr,"%s: curr_mem_seq=%Zu, mem_nseq=%Zu\n",FUNC,curr_mem_seq,mem_nseq); -HDfprintf(stderr,"%s: file_off[%Zu]=%Hu, file_len[%Zu]=%Zu\n",FUNC,curr_file_seq,file_off[curr_file_seq],curr_file_seq,file_len[curr_file_seq]); -HDfprintf(stderr,"%s: mem_off[%Zu]=%Hu, mem_len[%Zu]=%Zu\n",FUNC,curr_mem_seq,mem_off[curr_mem_seq],curr_mem_seq,mem_len[curr_mem_seq]); -#endif /* QAK */ /* Read file sequences into current memory sequence */ - if ((tmp_file_len=H5D_seq_readvv(f, dxpl_cache, dxpl_id, dset, store, + if ((tmp_file_len=(*op)(io_info, file_nseq, &curr_file_seq, file_len, file_off, mem_nseq, &curr_mem_seq, mem_len, mem_off, buf))<0) @@ -2090,7 +2018,7 @@ done: } /* end if */ /* Free vector arrays */ - if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { + if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { if(file_len!=NULL) H5FL_SEQ_FREE(size_t,file_len); if(file_off!=NULL) @@ -2119,8 +2047,8 @@ done: *------------------------------------------------------------------------- */ herr_t -H5S_select_write(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - H5D_t *dset, const H5D_storage_t *store, +H5S_select_write(H5D_io_info_t *io_info, + H5O_layout_writevv_func_t op, size_t nelmts, size_t elmt_size, const H5S_t *file_space, const H5S_t *mem_space, const void *buf/*out*/) @@ -2147,31 +2075,23 @@ H5S_select_write(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5S_select_write, FAIL); -#ifdef QAK -{ - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - Entering, time=%f\n",FUNC,mpi_rank,time); -} -#endif /* QAK */ /* Check args */ - assert(f); - assert(store); + assert(io_info); + assert(io_info->dset); + assert(io_info->store); + assert(TRUE==H5P_isa_class(io_info->dxpl_id,H5P_DATASET_XFER)); assert(buf); - assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); /* Allocate the vector I/O arrays */ - if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { - if((mem_len = H5FL_SEQ_MALLOC(size_t,dxpl_cache->vec_size))==NULL) + if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { + if((mem_len = H5FL_SEQ_MALLOC(size_t,io_info->dxpl_cache->vec_size))==NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O length vector array"); - if((mem_off = H5FL_SEQ_MALLOC(hsize_t,dxpl_cache->vec_size))==NULL) + if((mem_off = H5FL_SEQ_MALLOC(hsize_t,io_info->dxpl_cache->vec_size))==NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O offset vector array"); - if((file_len = H5FL_SEQ_MALLOC(size_t,dxpl_cache->vec_size))==NULL) + if((file_len = H5FL_SEQ_MALLOC(size_t,io_info->dxpl_cache->vec_size))==NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O length vector array"); - if((file_off = H5FL_SEQ_MALLOC(hsize_t,dxpl_cache->vec_size))==NULL) + if((file_off = H5FL_SEQ_MALLOC(hsize_t,io_info->dxpl_cache->vec_size))==NULL) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O offset vector array"); } /* end if */ else { @@ -2198,66 +2118,27 @@ H5S_select_write(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, /* Loop, until all bytes are processed */ while(nelmts>0) { /* Check if more file sequences are needed */ -#ifdef QAK -{ - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - Before file sequence time=%f\n",FUNC,mpi_rank,time); -} -#endif /* QAK */ if(curr_file_seq>=file_nseq) { /* Get sequences for file selection */ - if(H5S_SELECT_GET_SEQ_LIST(file_space,H5S_GET_SEQ_LIST_SORTED,&file_iter,dxpl_cache->vec_size,nelmts,&file_nseq,&file_nelem,file_off,file_len)<0) + if(H5S_SELECT_GET_SEQ_LIST(file_space,H5S_GET_SEQ_LIST_SORTED,&file_iter,io_info->dxpl_cache->vec_size,nelmts,&file_nseq,&file_nelem,file_off,file_len)<0) HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed"); /* Start at the beginning of the sequences again */ curr_file_seq=0; } /* end if */ -#ifdef QAK -{ - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - After file sequence time=%f\n",FUNC,mpi_rank,time); -} -#endif /* QAK */ /* Check if more memory sequences are needed */ if(curr_mem_seq>=mem_nseq) { /* Get sequences for memory selection */ - if(H5S_SELECT_GET_SEQ_LIST(mem_space,0,&mem_iter,dxpl_cache->vec_size,nelmts,&mem_nseq,&mem_nelem,mem_off,mem_len)<0) + if(H5S_SELECT_GET_SEQ_LIST(mem_space,0,&mem_iter,io_info->dxpl_cache->vec_size,nelmts,&mem_nseq,&mem_nelem,mem_off,mem_len)<0) HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed"); /* Start at the beginning of the sequences again */ curr_mem_seq=0; } /* end if */ -#ifdef QAK -{ - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - After memory sequence time=%f\n",FUNC,mpi_rank,time); -} -#endif /* QAK */ -#ifdef QAK -{ - unsigned u; - -HDfprintf(stderr,"%s: curr_file_seq=%Zu, file_nseq=%Zu\n",FUNC,curr_file_seq,file_nseq); -HDfprintf(stderr,"%s: curr_mem_seq=%Zu, mem_nseq=%Zu\n",FUNC,curr_mem_seq,mem_nseq); -for(u=curr_file_seq; u<file_nseq; u++) - HDfprintf(stderr,"%s: file_off[%u]=%Hu, file_len[%u]=%Zu\n",FUNC,u,file_off[u],u,file_len[u]); -for(u=curr_mem_seq; u<mem_nseq; u++) - HDfprintf(stderr,"%s: mem_off[%u]=%Hu, mem_len[%u]=%Zu\n",FUNC,u,mem_off[u],u,mem_len[u]); -} -#endif /* QAK */ /* Write memory sequences into file sequences */ - if ((tmp_file_len=H5D_seq_writevv(f, dxpl_cache, dxpl_id, dset, store, + if ((tmp_file_len=(*op)(io_info, file_nseq, &curr_file_seq, file_len, file_off, mem_nseq, &curr_mem_seq, mem_len, mem_off, buf))<0) @@ -2282,7 +2163,7 @@ done: } /* end if */ /* Free vector arrays */ - if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { + if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) { if(file_len!=NULL) H5FL_SEQ_FREE(size_t,file_len); if(file_off!=NULL) @@ -2292,15 +2173,7 @@ done: if(mem_off!=NULL) H5FL_SEQ_FREE(hsize_t,mem_off); } /* end if */ -#ifdef QAK -{ - int mpi_rank; - double time; - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - time = MPI_Wtime(); - HDfprintf(stderr,"%s: rank=%d - Leaving, time=%f\n",FUNC,mpi_rank,time); -} -#endif /* QAK */ + FUNC_LEAVE_NOAPI(ret_value); } /* end H5S_select_write() */ @@ -2929,8 +2929,8 @@ done: H5T_t* H5T_open (H5G_entry_t *ent, hid_t dxpl_id) { - H5T_shared_t *shared_fo; - H5T_t *dt; + H5T_shared_t *shared_fo=NULL; + H5T_t *dt=NULL; H5T_t *ret_value; FUNC_ENTER_NOAPI(H5T_open, NULL); diff --git a/src/Makefile.in b/src/Makefile.in index 56a8ca4..98c8927 100644 --- a/src/Makefile.in +++ b/src/Makefile.in @@ -31,7 +31,7 @@ DISTCLEAN=libhdf5.settings ## Source and object files for the library (lexicographically)... LIB_SRC=H5.c H5A.c H5AC.c H5B.c H5C.c H5D.c H5Dcontig.c H5Dcompact.c H5Dio.c \ - H5Distore.c H5Dmpio.c H5Dseq.c H5Dtest.c H5E.c H5F.c H5Fdbg.c H5FD.c \ + H5Distore.c H5Dmpio.c H5Dtest.c H5E.c H5F.c H5Fdbg.c H5FD.c \ H5FDcore.c H5FDfamily.c H5FDfphdf5.c H5FDgass.c H5FDlog.c H5FDmpi.c \ H5FDmpio.c H5FDmpiposix.c H5FDmulti.c H5FDsec2.c H5FDsrb.c \ H5FDstdio.c H5FDstream.c H5FL.c H5FO.c H5FP.c H5FPclient.c \ |