summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2004-09-30 03:47:13 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2004-09-30 03:47:13 (GMT)
commit3ae90362b70ef459aca8cc6025b98f476d8c683f (patch)
tree2ada1083843a720a04a0e0becd9232917ce8adb5 /src
parent383300afc95489a2b991e6640da750ace0e0758b (diff)
downloadhdf5-3ae90362b70ef459aca8cc6025b98f476d8c683f.zip
hdf5-3ae90362b70ef459aca8cc6025b98f476d8c683f.tar.gz
hdf5-3ae90362b70ef459aca8cc6025b98f476d8c683f.tar.bz2
[svn-r9343] Purpose:
Bug fix/code cleanup Description: Clean up raw data I/O code to bundle the I/O parameters (dataset, DXPL ID, etc) into a single struct to pass around through the dataset I/O routines, since they are always passed together, until very near the bottom of the I/O stack. Platforms tested: FreeBSD 4.10 (sleipnir) w/parallel Solaris 2.7 (arabica) IRIX64 6.5 (modi4) h5committest
Diffstat (limited to 'src')
-rw-r--r--src/H5Bprivate.h2
-rw-r--r--src/H5D.c78
-rw-r--r--src/H5Dcompact.c14
-rw-r--r--src/H5Dcontig.c281
-rw-r--r--src/H5Dio.c226
-rw-r--r--src/H5Distore.c385
-rw-r--r--src/H5Dmpio.c53
-rw-r--r--src/H5Dpkg.h36
-rw-r--r--src/H5Dprivate.h63
-rw-r--r--src/H5Dseq.c272
-rw-r--r--src/H5Gnode.c6
-rw-r--r--src/H5Oefl.c23
-rw-r--r--src/H5Oprivate.h18
-rw-r--r--src/H5Smpio.c300
-rw-r--r--src/H5Sprivate.h24
-rw-r--r--src/H5Sselect.c261
-rw-r--r--src/H5T.c4
-rw-r--r--src/Makefile.in2
18 files changed, 674 insertions, 1374 deletions
diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h
index 961c7d9..87e57f3 100644
--- a/src/H5Bprivate.h
+++ b/src/H5Bprivate.h
@@ -97,7 +97,7 @@ typedef struct H5B_shared_t {
typedef struct H5B_class_t {
H5B_subid_t id; /*id as found in file*/
size_t sizeof_nkey; /*size of native (memory) key*/
- size_t (*get_sizeof_rkey)(H5F_t*, const void*); /*raw key size */
+ size_t (*get_sizeof_rkey)(const H5F_t*, const void*); /*raw key size */
H5RC_t * (*get_shared)(H5F_t*, const void*); /*shared info for node */
herr_t (*new_node)(H5F_t*, hid_t, H5B_ins_t, void*, void*, void*, haddr_t*);
int (*cmp2)(H5F_t*, hid_t, void*, void*, void*); /*compare 2 keys */
diff --git a/src/H5D.c b/src/H5D.c
index 701619b..87d7491 100644
--- a/src/H5D.c
+++ b/src/H5D.c
@@ -2166,9 +2166,19 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
} else if (max_points * H5T_get_size (type) > max_storage) {
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "data space size exceeds external storage size")
}
- } else if (ndims>0 && max_dim[0]>new_dset->shared->layout.unused.dim[0]) {
- HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, NULL, "extendible contiguous non-external dataset")
- }
+
+ /* Set the I/O functions for this layout type */
+ new_dset->shared->layout.readvv=H5O_efl_readvv;
+ new_dset->shared->layout.writevv=H5O_efl_writevv;
+ } /* end if */
+ else {
+ if (ndims>0 && max_dim[0]>new_dset->shared->layout.unused.dim[0])
+ HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, NULL, "extendible contiguous non-external dataset")
+
+ /* Set the I/O functions for this layout type */
+ new_dset->shared->layout.readvv=H5D_contig_readvv;
+ new_dset->shared->layout.writevv=H5D_contig_writevv;
+ } /* end else */
/* Compute the total size of a chunk */
tmp_size = H5S_GET_EXTENT_NPOINTS(new_dset->shared->space) *
@@ -2216,6 +2226,10 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
for (u=1, new_dset->shared->layout.u.chunk.size=new_dset->shared->layout.u.chunk.dim[0]; u<new_dset->shared->layout.u.chunk.ndims; u++)
new_dset->shared->layout.u.chunk.size *= new_dset->shared->layout.u.chunk.dim[u];
+ /* Set the I/O functions for this layout type */
+ new_dset->shared->layout.readvv=H5D_istore_readvv;
+ new_dset->shared->layout.writevv=H5D_istore_writevv;
+
/* Initialize the chunk cache for the dataset */
if(H5D_istore_init(file,new_dset)<0)
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize chunk cache")
@@ -2244,6 +2258,10 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
comp_data_size=H5O_MAX_SIZE-H5O_layout_meta_size(file, &(new_dset->shared->layout));
if(new_dset->shared->layout.u.compact.size > comp_data_size)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "compact dataset size is bigger than header message maximum size")
+
+ /* Set the I/O functions for this layout type */
+ new_dset->shared->layout.readvv=H5D_compact_readvv;
+ new_dset->shared->layout.writevv=H5D_compact_writevv;
} /* end case */
break;
@@ -2280,7 +2298,7 @@ done:
if (!ret_value && new_dset && new_dset->shared) {
if( new_dset->shared) {
if(new_dset->shared->layout.type==H5D_CHUNKED && chunk_init) {
- if(H5D_istore_dest(new_dset->ent.file,H5AC_dxpl_id,new_dset)<0)
+ if(H5D_istore_dest(new_dset,H5AC_dxpl_id)<0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, NULL, "unable to destroy chunk cache")
} /* end if */
if (new_dset->shared->space) {
@@ -2547,6 +2565,10 @@ H5D_open_oid(const H5G_entry_t *ent, hid_t dxpl_id)
H5T_get_size(dataset->shared->type);
H5_ASSIGN_OVERFLOW(dataset->shared->layout.u.contig.size,tmp_size,hssize_t,hsize_t);
} /* end if */
+
+ /* Set the I/O functions for this layout type */
+ dataset->shared->layout.readvv=H5D_contig_readvv;
+ dataset->shared->layout.writevv=H5D_contig_writevv;
break;
case H5D_CHUNKED:
@@ -2569,9 +2591,16 @@ H5D_open_oid(const H5G_entry_t *ent, hid_t dxpl_id)
if(H5D_istore_init(dataset->ent.file,dataset)<0)
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize chunk cache")
}
+
+ /* Set the I/O functions for this layout type */
+ dataset->shared->layout.readvv=H5D_istore_readvv;
+ dataset->shared->layout.writevv=H5D_istore_writevv;
break;
case H5D_COMPACT:
+ /* Set the I/O functions for this layout type */
+ dataset->shared->layout.readvv=H5D_compact_readvv;
+ dataset->shared->layout.writevv=H5D_compact_writevv;
break;
default:
@@ -2644,10 +2673,15 @@ H5D_open_oid(const H5G_entry_t *ent, hid_t dxpl_id)
if((dataset->shared->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->shared->layout.u.contig.addr))
|| (dataset->shared->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->shared->layout.u.chunk.addr))) {
HDmemset(&dataset->shared->efl,0,sizeof(H5O_efl_t));
- if(NULL != H5O_read(&(dataset->ent), H5O_EFL_ID, 0, &dataset->shared->efl, dxpl_id))
+ if(NULL != H5O_read(&(dataset->ent), H5O_EFL_ID, 0, &dataset->shared->efl, dxpl_id)) {
if(H5P_set(plist, H5D_CRT_EXT_FILE_LIST_NAME, &dataset->shared->efl) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set external file list")
- }
+
+ /* Override the I/O functions for this layout type */
+ dataset->shared->layout.readvv=H5O_efl_readvv;
+ dataset->shared->layout.writevv=H5O_efl_writevv;
+ } /* end if */
+ } /* end if */
/*
* Make sure all storage is properly initialized.
@@ -2750,7 +2784,7 @@ H5D_close(H5D_t *dataset)
case H5D_CHUNKED:
/* Flush and destroy chunks in the cache */
- if(H5D_istore_dest(dataset->ent.file,H5AC_dxpl_id,dataset)<0)
+ if(H5D_istore_dest(dataset,H5AC_dxpl_id)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy chunk cache")
break;
@@ -2768,9 +2802,9 @@ H5D_close(H5D_t *dataset)
default:
assert ("not implemented yet" && 0);
- #ifdef NDEBUG
+#ifdef NDEBUG
HGOTO_ERROR (H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout")
- #endif /* NDEBUG */
+#endif /* NDEBUG */
} /* end switch */
/*
@@ -2896,7 +2930,7 @@ H5D_extend (H5D_t *dataset, const hsize_t *size, hid_t dxpl_id)
/* Update the index values for the cached chunks for this dataset */
if(H5D_CHUNKED == dataset->shared->layout.type)
- if(H5D_istore_update_cache(dataset->ent.file, dxpl_id, dataset) < 0)
+ if(H5D_istore_update_cache(dataset, dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices")
/* Allocate space for the new parts of the dataset, if appropriate */
@@ -3040,7 +3074,7 @@ H5D_alloc_storage (H5F_t *f, hid_t dxpl_id, H5D_t *dset/*in,out*/, H5D_time_allo
case H5D_CONTIGUOUS:
if(layout->u.contig.addr==HADDR_UNDEF) {
/* Reserve space in the file for the entire array */
- if (H5D_contig_create (f, dxpl_id, dset/*out*/)<0)
+ if (H5D_contig_create (f, dxpl_id, layout/*out*/)<0)
HGOTO_ERROR (H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize contiguous storage")
/* Indicate that we set the storage addr */
@@ -3201,7 +3235,7 @@ H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hid_t dxpl_id)
/* Don't write default fill values to external files */
/* If we will be immediately overwriting the values, don't bother to clear them */
if((dset->shared->efl.nused==0 || dset->shared->fill.buf) && !full_overwrite) {
- if (H5D_contig_fill(dset->ent.file, dxpl_id, dset)<0)
+ if (H5D_contig_fill(dset, dxpl_id)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset")
} /* end if */
break;
@@ -3211,7 +3245,7 @@ H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hid_t dxpl_id)
* Allocate file space
* for all chunks now and initialize each chunk with the fill value.
*/
- if (H5D_istore_allocate(dset->ent.file, dxpl_id, dset, full_overwrite)<0)
+ if (H5D_istore_allocate(dset, dxpl_id, full_overwrite)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset")
break;
@@ -3297,7 +3331,7 @@ H5D_get_storage_size(H5D_t *dset, hid_t dxpl_id)
if(dset->shared->layout.u.chunk.addr == HADDR_UNDEF)
ret_value=0;
else
- ret_value = H5D_istore_allocated(dset->ent.file, dxpl_id, dset);
+ ret_value = H5D_istore_allocated(dset, dxpl_id);
break;
case H5D_CONTIGUOUS:
@@ -3813,7 +3847,7 @@ done:
* Function: H5D_set_extent
*
* Purpose: Based in H5D_extend, allows change to a lower dimension,
- * calls H5S_set_extent and H5F_istore_prune_by_extent instead
+ * calls H5S_set_extent and H5D_istore_prune_by_extent instead
*
* Return: Success: SUCCEED, Failure: FAIL
*
@@ -3885,7 +3919,7 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
/* Update the index values for the cached chunks for this dataset */
if(H5D_CHUNKED == dset->shared->layout.type)
- if(H5D_istore_update_cache(dset->ent.file, dxpl_id, dset) < 0)
+ if(H5D_istore_update_cache(dset, dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices")
/* Allocate space for the new parts of the dataset, if appropriate */
@@ -3900,6 +3934,7 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
*-------------------------------------------------------------------------
*/
if(shrink && H5D_CHUNKED == dset->shared->layout.type) {
+ H5D_io_info_t io_info; /* Dataset I/O info */
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */
@@ -3907,12 +3942,15 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+ /* Construct dataset I/O info */
+ H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,NULL);
+
/* Remove excess chunks */
- if(H5D_istore_prune_by_extent(dset->ent.file, dxpl_cache, dxpl_id, dset) < 0)
+ if(H5D_istore_prune_by_extent(&io_info) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks ")
/* Reset the elements outsize the new dimensions, but in existing chunks */
- if(H5D_istore_initialize_by_extent(dset->ent.file, dxpl_cache, dxpl_id, dset) < 0)
+ if(H5D_istore_initialize_by_extent(&io_info) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to initialize chunks ")
} /* end if */
} /* end if */
@@ -3970,7 +4008,7 @@ H5D_flush(H5F_t *f, hid_t dxpl_id, unsigned flags)
if(NULL==(dataset=H5I_object_verify(id_list[j], H5I_DATASET)))
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to get dataset object")
- /* flush the raw data buffer, if we have a dirty one */
+ /* Flush the raw data buffer, if we have a dirty one */
if (dataset->shared->cache.contig.sieve_buf && dataset->shared->cache.contig.sieve_dirty) {
assert(dataset->shared->layout.type!=H5D_COMPACT); /* We should never have a sieve buffer for compact storage */
@@ -3990,7 +4028,7 @@ H5D_flush(H5F_t *f, hid_t dxpl_id, unsigned flags)
case H5D_CHUNKED:
/* Flush the raw data cache */
- if (H5D_istore_flush(f, dxpl_id, dataset, flags & (H5F_FLUSH_INVALIDATE | H5F_FLUSH_CLEAR_ONLY)) < 0)
+ if (H5D_istore_flush(dataset, dxpl_id, flags & (H5F_FLUSH_INVALIDATE | H5F_FLUSH_CLEAR_ONLY)) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush raw data cache")
break;
diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c
index 76f900d..bf56acc 100644
--- a/src/H5Dcompact.c
+++ b/src/H5Dcompact.c
@@ -61,7 +61,7 @@ static int interface_initialize_g = 0;
*-------------------------------------------------------------------------
*/
ssize_t
-H5D_compact_readvv(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const H5D_t *dset,
+H5D_compact_readvv(H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[],
void *buf)
@@ -70,10 +70,10 @@ H5D_compact_readvv(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const H5D_t *dset,
FUNC_ENTER_NOAPI(H5D_compact_readvv, FAIL);
- assert(dset);
+ assert(io_info->dset);
/* Use the vectorized memory copy routine to do actual work */
- if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,dset->shared->layout.u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0)
+ if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,io_info->dset->shared->layout.u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed");
done:
@@ -105,7 +105,7 @@ done:
*-------------------------------------------------------------------------
*/
ssize_t
-H5D_compact_writevv(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, H5D_t *dset,
+H5D_compact_writevv(H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[],
const void *buf)
@@ -114,13 +114,13 @@ H5D_compact_writevv(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, H5D_t *dset,
FUNC_ENTER_NOAPI(H5D_compact_writevv, FAIL);
- assert(dset);
+ assert(io_info->dset);
/* Use the vectorized memory copy routine to do actual work */
- if((ret_value=H5V_memcpyvv(dset->shared->layout.u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0)
+ if((ret_value=H5V_memcpyvv(io_info->dset->shared->layout.u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed");
- dset->shared->layout.u.compact.dirty = TRUE;
+ io_info->dset->shared->layout.u.compact.dirty = TRUE;
done:
FUNC_LEAVE_NOAPI(ret_value);
diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c
index f4d6f76..be2c45b 100644
--- a/src/H5Dcontig.c
+++ b/src/H5Dcontig.c
@@ -41,8 +41,8 @@
#include "H5Vprivate.h" /* Vector and array functions */
/* Private prototypes */
-static herr_t H5D_contig_write(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
- hsize_t offset, size_t size, const void *buf);
+static herr_t H5D_contig_write(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache,
+ hid_t dxpl_id, const H5D_storage_t *store, hsize_t offset, size_t size, const void *buf);
/* Interface initialization */
static int interface_initialize_g = 0;
@@ -73,7 +73,7 @@ H5FL_BLK_DEFINE_STATIC(zero_fill);
*-------------------------------------------------------------------------
*/
herr_t
-H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
+H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout /*out */ )
{
herr_t ret_value=SUCCEED; /* Return value */
@@ -81,10 +81,10 @@ H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
/* check args */
assert(f);
- assert(dset);
+ assert(layout);
/* Allocate space for the contiguous data */
- if (HADDR_UNDEF==(dset->shared->layout.u.contig.addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, dset->shared->layout.u.contig.size)))
+ if (HADDR_UNDEF==(layout->u.contig.addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, layout->u.contig.size)))
HGOTO_ERROR (H5E_IO, H5E_NOSPACE, FAIL, "unable to reserve file space");
done:
@@ -110,8 +110,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
+H5D_contig_fill(H5D_t *dset, hid_t dxpl_id)
{
+ H5D_storage_t store; /* Union of storage info for dataset */
+ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
+ H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */
hssize_t snpoints; /* Number of points in space (for error checking) */
size_t npoints; /* Number of points in space */
size_t ptsperbuf; /* Maximum # of points which fit in the buffer */
@@ -133,7 +136,6 @@ H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
FUNC_ENTER_NOAPI(H5D_contig_fill, FAIL);
/* Check args */
- assert(f);
assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
assert(dset && H5D_CONTIGUOUS==dset->shared->layout.type);
assert(H5F_addr_defined(dset->shared->layout.u.contig.addr));
@@ -142,20 +144,35 @@ H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
#ifdef H5_HAVE_PARALLEL
/* Retrieve MPI parameters */
- if(IS_H5FD_MPI(f)) {
+ if(IS_H5FD_MPI(dset->ent.file)) {
/* Get the MPI communicator */
- if (MPI_COMM_NULL == (mpi_comm=H5F_mpi_get_comm(f)))
+ if (MPI_COMM_NULL == (mpi_comm=H5F_mpi_get_comm(dset->ent.file)))
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI communicator");
/* Get the MPI rank */
- if ((mpi_rank=H5F_mpi_get_rank(f))<0)
+ if ((mpi_rank=H5F_mpi_get_rank(dset->ent.file))<0)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI rank");
/* Set the MPI-capable file driver flag */
using_mpi=1;
+
+ /* Fill the DXPL cache values for later use */
+ if (H5D_get_dxpl_cache(H5AC_ind_dxpl_id,&dxpl_cache)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
} /* end if */
+ else {
+#endif /* H5_HAVE_PARALLEL */
+ /* Fill the DXPL cache values for later use */
+ if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+#ifdef H5_HAVE_PARALLEL
+ } /* end else */
#endif /* H5_HAVE_PARALLEL */
+ /* Initialize storage info for this dataset */
+ store.contig.dset_addr=dset->shared->layout.u.contig.addr;
+ store.contig.dset_size=dset->shared->layout.u.contig.size;
+
/* Get size of elements */
elmt_size=H5T_get_size(dset->shared->type);
assert(elmt_size>0);
@@ -220,7 +237,7 @@ H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
/* Write the chunks out from only one process */
/* !! Use the internal "independent" DXPL!! -QAK */
if(H5_PAR_META_WRITE==mpi_rank) {
- if (H5D_contig_write(f, H5AC_ind_dxpl_id, dset, offset, size, buf)<0)
+ if (H5D_contig_write(dset, dxpl_cache, H5AC_ind_dxpl_id, &store, offset, size, buf)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to write fill value to dataset");
} /* end if */
@@ -230,7 +247,7 @@ H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
else {
#endif /* H5_HAVE_PARALLEL */
H5_CHECK_OVERFLOW(size,size_t,hsize_t);
- if (H5D_contig_write(f, dxpl_id, dset, offset, size, buf)<0)
+ if (H5D_contig_write(dset, dxpl_cache, dxpl_id, &store, offset, size, buf)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to write fill value to dataset");
#ifdef H5_HAVE_PARALLEL
} /* end else */
@@ -352,9 +369,11 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D_contig_write(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
+H5D_contig_write(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache,
+ hid_t dxpl_id, const H5D_storage_t *store,
hsize_t offset, size_t size, const void *buf)
{
+ H5D_io_info_t io_info; /* Dataset I/O info */
hsize_t dset_off=offset; /* Offset in dataset */
size_t dset_len=size; /* Length in dataset */
size_t dset_curr_seq=0; /* "Current sequence" in dataset */
@@ -365,11 +384,13 @@ H5D_contig_write(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
FUNC_ENTER_NOAPI(H5D_contig_write, FAIL);
- assert (f);
assert (dset);
+ assert (dxpl_cache);
+ assert (store);
assert (buf);
- if (H5D_contig_writevv(f, dxpl_id, dset, dset->shared->layout.u.contig.addr, dset->shared->layout.u.contig.size,
+ H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,store);
+ if (H5D_contig_writevv(&io_info,
1, &dset_curr_seq, &dset_len, &dset_off, 1, &mem_curr_seq, &mem_len, &mem_off, buf)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vector write failed");
@@ -399,13 +420,15 @@ done:
*-------------------------------------------------------------------------
*/
ssize_t
-H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
- haddr_t dset_addr, hsize_t dset_size,
+H5D_contig_readvv(H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *_buf)
{
- unsigned char *buf=(unsigned char *)_buf; /* Pointer to buffer to fill */
+ H5F_t *file=io_info->dset->ent.file; /* File for dataset */
+ H5D_rdcdc_t *dset_contig=&(io_info->dset->shared->cache.contig); /* Cached information about contiguous data */
+ const H5D_contig_storage_t *store_contig=&(io_info->store->contig); /* Contiguous storage info for this I/O operation */
+ unsigned char *buf=(unsigned char *)_buf; /* Pointer to buffer to fill */
haddr_t addr; /* Actual address to read */
size_t size; /* Size of sequence in bytes */
size_t u; /* Counting variable */
@@ -415,15 +438,16 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
FUNC_ENTER_NOAPI(H5D_contig_readvv, FAIL);
/* Check args */
- assert(f);
- assert(dset);
+ assert(io_info);
+ assert(io_info->dset);
+ assert(io_info->store);
assert(buf);
/* Check if data sieving is enabled */
- if(H5F_HAS_FEATURE(f,H5FD_FEAT_DATA_SIEVE)) {
- haddr_t sieve_start, sieve_end; /* Start & end locations of sieve buffer */
+ if(H5F_HAS_FEATURE(file,H5FD_FEAT_DATA_SIEVE)) {
+ haddr_t sieve_start=HADDR_UNDEF, sieve_end=HADDR_UNDEF; /* Start & end locations of sieve buffer */
haddr_t contig_end; /* End locations of block to write */
- size_t sieve_size; /* size of sieve buffer */
+ size_t sieve_size=(size_t)-1; /* size of sieve buffer */
haddr_t abs_eoa; /* Absolute end of file address */
haddr_t rel_eoa; /* Relative end of file address */
hsize_t max_data; /* Actual maximum size of data to cache */
@@ -433,9 +457,9 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
v=*mem_curr_seq;
/* Stash local copies of these value */
- if(dset->shared->cache.contig.sieve_buf!=NULL) {
- sieve_start=dset->shared->cache.contig.sieve_loc;
- sieve_size=dset->shared->cache.contig.sieve_size;
+ if(dset_contig->sieve_buf!=NULL) {
+ sieve_start=dset_contig->sieve_loc;
+ sieve_size=dset_contig->sieve_size;
sieve_end=sieve_start+sieve_size;
} /* end if */
@@ -448,55 +472,52 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
size=dset_len_arr[u];
/* Compute offset on disk */
- addr=dset_addr+dset_offset_arr[u];
+ addr=store_contig->dset_addr+dset_offset_arr[u];
/* Compute offset in memory */
buf = (unsigned char *)_buf + mem_offset_arr[v];
- /* No data sieve buffer yet, go allocate one */
- if(dset->shared->cache.contig.sieve_buf==NULL) {
-
+ /* Check if the sieve buffer is allocated yet */
+ if(dset_contig->sieve_buf==NULL) {
/* Check if we can actually hold the I/O request in the sieve buffer */
-
- if(size>dset->shared->cache.contig.sieve_buf_size) {
- if (H5F_block_read(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0)
+ if(size>dset_contig->sieve_buf_size) {
+ if (H5F_block_read(file, H5FD_MEM_DRAW, addr, size, io_info->dxpl_id, buf)<0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed");
} /* end if */
else {
/* Allocate room for the data sieve buffer */
- if (NULL==(dset->shared->cache.contig.sieve_buf=H5FL_BLK_MALLOC(sieve_buf,dset->shared->cache.contig.sieve_buf_size)))
+ if (NULL==(dset_contig->sieve_buf=H5FL_BLK_MALLOC(sieve_buf,dset_contig->sieve_buf_size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed");
/* Determine the new sieve buffer size & location */
- dset->shared->cache.contig.sieve_loc=addr;
+ dset_contig->sieve_loc=addr;
/* Make certain we don't read off the end of the file */
- if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(f)))
+ if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(file)))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to determine file size");
/* Adjust absolute EOA address to relative EOA address */
- rel_eoa=abs_eoa-H5F_get_base_addr(f);
+ rel_eoa=abs_eoa-H5F_get_base_addr(file);
/* Set up the buffer parameters */
- max_data=dset_size-dset_offset_arr[u];
+ max_data=store_contig->dset_size-dset_offset_arr[u];
/* Compute the size of the sieve buffer */
- H5_ASSIGN_OVERFLOW(dset->shared->cache.contig.sieve_size,MIN3(rel_eoa-dset->shared->cache.contig.sieve_loc,
- max_data,dset->shared->cache.contig.sieve_buf_size),hsize_t,size_t);
+ H5_ASSIGN_OVERFLOW(dset_contig->sieve_size,MIN3(rel_eoa-dset_contig->sieve_loc,max_data,dset_contig->sieve_buf_size),hsize_t,size_t);
/* Read the new sieve buffer */
- if (H5F_block_read(f, H5FD_MEM_DRAW, dset->shared->cache.contig.sieve_loc, dset->shared->cache.contig.sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
+ if (H5F_block_read(file, H5FD_MEM_DRAW, dset_contig->sieve_loc, dset_contig->sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed");
/* Grab the data out of the buffer (must be first piece of data in buffer ) */
- HDmemcpy(buf,dset->shared->cache.contig.sieve_buf,size);
+ HDmemcpy(buf,dset_contig->sieve_buf,size);
/* Reset sieve buffer dirty flag */
- dset->shared->cache.contig.sieve_dirty=0;
+ dset_contig->sieve_dirty=0;
/* Stash local copies of these value */
- sieve_start=dset->shared->cache.contig.sieve_loc;
- sieve_size=dset->shared->cache.contig.sieve_size;
+ sieve_start=dset_contig->sieve_loc;
+ sieve_size=dset_contig->sieve_size;
sieve_end=sieve_start+sieve_size;
} /* end else */
} /* end if */
@@ -506,7 +527,7 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
/* If entire read is within the sieve buffer, read it from the buffer */
if(addr>=sieve_start && contig_end<sieve_end) {
- unsigned char *base_sieve_buf=dset->shared->cache.contig.sieve_buf+(addr-sieve_start);
+ unsigned char *base_sieve_buf=dset_contig->sieve_buf+(addr-sieve_start);
/* Grab the data out of the buffer */
HDmemcpy(buf,base_sieve_buf,size);
@@ -514,68 +535,68 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
/* Entire request is not within this data sieve buffer */
else {
/* Check if we can actually hold the I/O request in the sieve buffer */
- if(size>dset->shared->cache.contig.sieve_buf_size) {
+ if(size>dset_contig->sieve_buf_size) {
/* Check for any overlap with the current sieve buffer */
if((sieve_start>=addr && sieve_start<(contig_end+1))
|| ((sieve_end-1)>=addr && (sieve_end-1)<(contig_end+1))) {
/* Flush the sieve buffer, if it's dirty */
- if(dset->shared->cache.contig.sieve_dirty) {
+ if(dset_contig->sieve_dirty) {
/* Write to file */
- if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
+ if (H5F_block_write(file, H5FD_MEM_DRAW, sieve_start, sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
/* Reset sieve buffer dirty flag */
- dset->shared->cache.contig.sieve_dirty=0;
+ dset_contig->sieve_dirty=0;
} /* end if */
} /* end if */
/* Read directly into the user's buffer */
- if (H5F_block_read(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0)
+ if (H5F_block_read(file, H5FD_MEM_DRAW, addr, size, io_info->dxpl_id, buf)<0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed");
} /* end if */
/* Element size fits within the buffer size */
else {
/* Flush the sieve buffer if it's dirty */
- if(dset->shared->cache.contig.sieve_dirty) {
+ if(dset_contig->sieve_dirty) {
/* Write to file */
- if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
+ if (H5F_block_write(file, H5FD_MEM_DRAW, sieve_start, sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
/* Reset sieve buffer dirty flag */
- dset->shared->cache.contig.sieve_dirty=0;
+ dset_contig->sieve_dirty=0;
} /* end if */
/* Determine the new sieve buffer size & location */
- dset->shared->cache.contig.sieve_loc=addr;
+ dset_contig->sieve_loc=addr;
/* Make certain we don't read off the end of the file */
- if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(f)))
+ if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(file)))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to determine file size");
/* Adjust absolute EOA address to relative EOA address */
- rel_eoa=abs_eoa-H5F_get_base_addr(f);
+ rel_eoa=abs_eoa-H5F_get_base_addr(file);
/* Only need this when resizing sieve buffer */
- max_data=dset_size-dset_offset_arr[u];
+ max_data=store_contig->dset_size-dset_offset_arr[u];
/* Compute the size of the sieve buffer */
/* Don't read off the end of the file, don't read past the end of the data element and don't read more than the buffer size */
- H5_ASSIGN_OVERFLOW(dset->shared->cache.contig.sieve_size,MIN3(rel_eoa-dset->shared->cache.contig.sieve_loc,max_data,dset->shared->cache.contig.sieve_buf_size),hsize_t,size_t);
+ H5_ASSIGN_OVERFLOW(dset_contig->sieve_size,MIN3(rel_eoa-dset_contig->sieve_loc,max_data,dset_contig->sieve_buf_size),hsize_t,size_t);
/* Update local copies of sieve information */
- sieve_start=dset->shared->cache.contig.sieve_loc;
- sieve_size=dset->shared->cache.contig.sieve_size;
+ sieve_start=dset_contig->sieve_loc;
+ sieve_size=dset_contig->sieve_size;
sieve_end=sieve_start+sieve_size;
/* Read the new sieve buffer */
- if (H5F_block_read(f, H5FD_MEM_DRAW, dset->shared->cache.contig.sieve_loc, dset->shared->cache.contig.sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
+ if (H5F_block_read(file, H5FD_MEM_DRAW, dset_contig->sieve_loc, dset_contig->sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed");
/* Grab the data out of the buffer (must be first piece of data in buffer ) */
- HDmemcpy(buf,dset->shared->cache.contig.sieve_buf,size);
+ HDmemcpy(buf,dset_contig->sieve_buf,size);
/* Reset sieve buffer dirty flag */
- dset->shared->cache.contig.sieve_dirty=0;
+ dset_contig->sieve_dirty=0;
} /* end else */
} /* end else */
} /* end else */
@@ -606,13 +627,13 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
size=dset_len_arr[u];
/* Compute offset on disk */
- addr=dset_addr+dset_offset_arr[u];
+ addr=store_contig->dset_addr+dset_offset_arr[u];
/* Compute offset in memory */
buf = (unsigned char *)_buf + mem_offset_arr[v];
/* Write data */
- if (H5F_block_read(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0)
+ if (H5F_block_read(file, H5FD_MEM_DRAW, addr, size, io_info->dxpl_id, buf)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
/* Update memory information */
@@ -662,12 +683,14 @@ done:
*-------------------------------------------------------------------------
*/
ssize_t
-H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
- haddr_t dset_addr, hsize_t dset_size,
+H5D_contig_writevv(H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *_buf)
{
+ H5F_t *file=io_info->dset->ent.file; /* File for dataset */
+ H5D_rdcdc_t *dset_contig=&(io_info->dset->shared->cache.contig); /* Cached information about contiguous data */
+ const H5D_contig_storage_t *store_contig=&(io_info->store->contig); /* Contiguous storage info for this I/O operation */
const unsigned char *buf=_buf; /* Pointer to buffer to fill */
haddr_t addr; /* Actual address to read */
size_t size; /* Size of sequence in bytes */
@@ -678,15 +701,16 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
FUNC_ENTER_NOAPI(H5D_contig_writevv, FAIL);
/* Check args */
- assert(f);
- assert(dset);
+ assert(io_info);
+ assert(io_info->dset);
+ assert(io_info->store);
assert(buf);
/* Check if data sieving is enabled */
- if(H5F_HAS_FEATURE(f,H5FD_FEAT_DATA_SIEVE)) {
- haddr_t sieve_start, sieve_end; /* Start & end locations of sieve buffer */
+ if(H5F_HAS_FEATURE(file,H5FD_FEAT_DATA_SIEVE)) {
+ haddr_t sieve_start=HADDR_UNDEF, sieve_end=HADDR_UNDEF; /* Start & end locations of sieve buffer */
haddr_t contig_end; /* End locations of block to write */
- size_t sieve_size; /* size of sieve buffer */
+ size_t sieve_size=(size_t)-1; /* size of sieve buffer */
haddr_t abs_eoa; /* Absolute end of file address */
haddr_t rel_eoa; /* Relative end of file address */
hsize_t max_data; /* Actual maximum size of data to cache */
@@ -695,10 +719,10 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
u=*dset_curr_seq;
v=*mem_curr_seq;
- /* Stash local copies of these value */
- if(dset->shared->cache.contig.sieve_buf!=NULL) {
- sieve_start=dset->shared->cache.contig.sieve_loc;
- sieve_size=dset->shared->cache.contig.sieve_size;
+ /* Stash local copies of these values */
+ if(dset_contig->sieve_buf!=NULL) {
+ sieve_start=dset_contig->sieve_loc;
+ sieve_size=dset_contig->sieve_size;
sieve_end=sieve_start+sieve_size;
} /* end if */
@@ -711,55 +735,55 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
size=dset_len_arr[u];
/* Compute offset on disk */
- addr=dset_addr+dset_offset_arr[u];
+ addr=store_contig->dset_addr+dset_offset_arr[u];
/* Compute offset in memory */
buf = (const unsigned char *)_buf + mem_offset_arr[v];
/* No data sieve buffer yet, go allocate one */
- if(dset->shared->cache.contig.sieve_buf==NULL) {
+ if(dset_contig->sieve_buf==NULL) {
/* Check if we can actually hold the I/O request in the sieve buffer */
- if(size>dset->shared->cache.contig.sieve_buf_size) {
- if (H5F_block_write(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0)
+ if(size>dset_contig->sieve_buf_size) {
+ if (H5F_block_write(file, H5FD_MEM_DRAW, addr, size, io_info->dxpl_id, buf)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
} /* end if */
else {
/* Allocate room for the data sieve buffer */
- if (NULL==(dset->shared->cache.contig.sieve_buf=H5FL_BLK_MALLOC(sieve_buf,dset->shared->cache.contig.sieve_buf_size)))
+ if (NULL==(dset_contig->sieve_buf=H5FL_BLK_MALLOC(sieve_buf,dset_contig->sieve_buf_size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed");
/* Determine the new sieve buffer size & location */
- dset->shared->cache.contig.sieve_loc=addr;
+ dset_contig->sieve_loc=addr;
/* Make certain we don't read off the end of the file */
- if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(f)))
+ if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(file)))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to determine file size");
/* Adjust absolute EOA address to relative EOA address */
- rel_eoa=abs_eoa-H5F_get_base_addr(f);
+ rel_eoa=abs_eoa-H5F_get_base_addr(file);
/* Set up the buffer parameters */
- max_data=dset_size-dset_offset_arr[u];
+ max_data=store_contig->dset_size-dset_offset_arr[u];
/* Compute the size of the sieve buffer */
- H5_ASSIGN_OVERFLOW(dset->shared->cache.contig.sieve_size,MIN3(rel_eoa-dset->shared->cache.contig.sieve_loc,max_data,dset->shared->cache.contig.sieve_buf_size),hsize_t,size_t);
+ H5_ASSIGN_OVERFLOW(dset_contig->sieve_size,MIN3(rel_eoa-dset_contig->sieve_loc,max_data,dset_contig->sieve_buf_size),hsize_t,size_t);
/* Check if there is any point in reading the data from the file */
- if(dset->shared->cache.contig.sieve_size>size) {
+ if(dset_contig->sieve_size>size) {
/* Read the new sieve buffer */
- if (H5F_block_read(f, H5FD_MEM_DRAW, dset->shared->cache.contig.sieve_loc, dset->shared->cache.contig.sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
+ if (H5F_block_read(file, H5FD_MEM_DRAW, dset_contig->sieve_loc, dset_contig->sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed");
} /* end if */
/* Grab the data out of the buffer (must be first piece of data in buffer ) */
- HDmemcpy(dset->shared->cache.contig.sieve_buf,buf,size);
+ HDmemcpy(dset_contig->sieve_buf,buf,size);
/* Set sieve buffer dirty flag */
- dset->shared->cache.contig.sieve_dirty=1;
+ dset_contig->sieve_dirty=1;
- /* Stash local copies of these value */
- sieve_start=dset->shared->cache.contig.sieve_loc;
- sieve_size=dset->shared->cache.contig.sieve_size;
+ /* Stash local copies of these values */
+ sieve_start=dset_contig->sieve_loc;
+ sieve_size=dset_contig->sieve_size;
sieve_end=sieve_start+sieve_size;
} /* end else */
} /* end if */
@@ -769,118 +793,119 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
/* If entire write is within the sieve buffer, write it to the buffer */
if(addr>=sieve_start && contig_end<sieve_end) {
- unsigned char *base_sieve_buf=dset->shared->cache.contig.sieve_buf+(addr-sieve_start);
+ unsigned char *base_sieve_buf=dset_contig->sieve_buf+(addr-sieve_start);
/* Put the data into the sieve buffer */
HDmemcpy(base_sieve_buf,buf,size);
+
/* Set sieve buffer dirty flag */
- dset->shared->cache.contig.sieve_dirty=1;
+ dset_contig->sieve_dirty=1;
} /* end if */
/* Entire request is not within this data sieve buffer */
else {
/* Check if we can actually hold the I/O request in the sieve buffer */
- if(size>dset->shared->cache.contig.sieve_buf_size) {
+ if(size>dset_contig->sieve_buf_size) {
/* Check for any overlap with the current sieve buffer */
if((sieve_start>=addr && sieve_start<(contig_end+1))
|| ((sieve_end-1)>=addr && (sieve_end-1)<(contig_end+1))) {
/* Flush the sieve buffer, if it's dirty */
- if(dset->shared->cache.contig.sieve_dirty) {
+ if(dset_contig->sieve_dirty) {
/* Write to file */
- if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
+ if (H5F_block_write(file, H5FD_MEM_DRAW, sieve_start, sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
/* Reset sieve buffer dirty flag */
- dset->shared->cache.contig.sieve_dirty=0;
+ dset_contig->sieve_dirty=0;
} /* end if */
/* Force the sieve buffer to be re-read the next time */
- dset->shared->cache.contig.sieve_loc=HADDR_UNDEF;
- dset->shared->cache.contig.sieve_size=0;
+ dset_contig->sieve_loc=HADDR_UNDEF;
+ dset_contig->sieve_size=0;
} /* end if */
/* Write directly from the user's buffer */
- if (H5F_block_write(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0)
+ if (H5F_block_write(file, H5FD_MEM_DRAW, addr, size, io_info->dxpl_id, buf)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
} /* end if */
/* Element size fits within the buffer size */
else {
/* Check if it is possible to (exactly) prepend or append to existing (dirty) sieve buffer */
if(((addr+size)==sieve_start || addr==sieve_end) &&
- (size+sieve_size)<=dset->shared->cache.contig.sieve_buf_size &&
- dset->shared->cache.contig.sieve_dirty) {
+ (size+sieve_size)<=dset_contig->sieve_buf_size &&
+ dset_contig->sieve_dirty) {
/* Prepend to existing sieve buffer */
if((addr+size)==sieve_start) {
/* Move existing sieve information to correct location */
- HDmemmove(dset->shared->cache.contig.sieve_buf+size,dset->shared->cache.contig.sieve_buf,sieve_size);
+ HDmemmove(dset_contig->sieve_buf+size,dset_contig->sieve_buf,sieve_size);
/* Copy in new information (must be first in sieve buffer) */
- HDmemcpy(dset->shared->cache.contig.sieve_buf,buf,size);
+ HDmemcpy(dset_contig->sieve_buf,buf,size);
/* Adjust sieve location */
- dset->shared->cache.contig.sieve_loc=addr;
+ dset_contig->sieve_loc=addr;
} /* end if */
/* Append to existing sieve buffer */
else {
/* Copy in new information */
- HDmemcpy(dset->shared->cache.contig.sieve_buf+sieve_size,buf,size);
+ HDmemcpy(dset_contig->sieve_buf+sieve_size,buf,size);
} /* end else */
/* Adjust sieve size */
- dset->shared->cache.contig.sieve_size += size;
+ dset_contig->sieve_size += size;
/* Update local copies of sieve information */
- sieve_start=dset->shared->cache.contig.sieve_loc;
- sieve_size=dset->shared->cache.contig.sieve_size;
+ sieve_start=dset_contig->sieve_loc;
+ sieve_size=dset_contig->sieve_size;
sieve_end=sieve_start+sieve_size;
} /* end if */
/* Can't add the new data onto the existing sieve buffer */
else {
/* Flush the sieve buffer if it's dirty */
- if(dset->shared->cache.contig.sieve_dirty) {
+ if(dset_contig->sieve_dirty) {
/* Write to file */
- if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
+ if (H5F_block_write(file, H5FD_MEM_DRAW, sieve_start, sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
/* Reset sieve buffer dirty flag */
- dset->shared->cache.contig.sieve_dirty=0;
+ dset_contig->sieve_dirty=0;
} /* end if */
/* Determine the new sieve buffer size & location */
- dset->shared->cache.contig.sieve_loc=addr;
+ dset_contig->sieve_loc=addr;
/* Make certain we don't read off the end of the file */
- if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(f)))
+ if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(file)))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to determine file size");
/* Adjust absolute EOA address to relative EOA address */
- rel_eoa=abs_eoa-H5F_get_base_addr(f);
+ rel_eoa=abs_eoa-H5F_get_base_addr(file);
/* Only need this when resizing sieve buffer */
- max_data=dset_size-dset_offset_arr[u];
+ max_data=store_contig->dset_size-dset_offset_arr[u];
/* Compute the size of the sieve buffer */
/* Don't read off the end of the file, don't read past the end of the data element and don't read more than the buffer size */
- H5_ASSIGN_OVERFLOW(dset->shared->cache.contig.sieve_size,MIN3(rel_eoa-dset->shared->cache.contig.sieve_loc,max_data,dset->shared->cache.contig.sieve_buf_size),hsize_t,size_t);
+ H5_ASSIGN_OVERFLOW(dset_contig->sieve_size,MIN3(rel_eoa-dset_contig->sieve_loc,max_data,dset_contig->sieve_buf_size),hsize_t,size_t);
/* Update local copies of sieve information */
- sieve_start=dset->shared->cache.contig.sieve_loc;
- sieve_size=dset->shared->cache.contig.sieve_size;
+ sieve_start=dset_contig->sieve_loc;
+ sieve_size=dset_contig->sieve_size;
sieve_end=sieve_start+sieve_size;
/* Check if there is any point in reading the data from the file */
- if(dset->shared->cache.contig.sieve_size>size) {
+ if(dset_contig->sieve_size>size) {
/* Read the new sieve buffer */
- if (H5F_block_read(f, H5FD_MEM_DRAW, dset->shared->cache.contig.sieve_loc, dset->shared->cache.contig.sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
+ if (H5F_block_read(file, H5FD_MEM_DRAW, dset_contig->sieve_loc, dset_contig->sieve_size, io_info->dxpl_id, dset_contig->sieve_buf)<0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed");
} /* end if */
/* Grab the data out of the buffer (must be first piece of data in buffer ) */
- HDmemcpy(dset->shared->cache.contig.sieve_buf,buf,size);
+ HDmemcpy(dset_contig->sieve_buf,buf,size);
/* Set sieve buffer dirty flag */
- dset->shared->cache.contig.sieve_dirty=1;
+ dset_contig->sieve_dirty=1;
} /* end else */
} /* end else */
} /* end else */
@@ -912,13 +937,13 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
size=dset_len_arr[u];
/* Compute offset on disk */
- addr=dset_addr+dset_offset_arr[u];
+ addr=store_contig->dset_addr+dset_offset_arr[u];
/* Compute offset in memory */
buf = (const unsigned char *)_buf + mem_offset_arr[v];
/* Write data */
- if (H5F_block_write(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0)
+ if (H5F_block_write(file, H5FD_MEM_DRAW, addr, size, io_info->dxpl_id, buf)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
/* Update memory information */
diff --git a/src/H5Dio.c b/src/H5Dio.c
index bf8ecaa..8130c2d 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -643,8 +643,10 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
hbool_t use_par_opt_io=FALSE; /* Whether the 'optimized' I/O routines with be parallel */
#ifdef H5_HAVE_PARALLEL
hbool_t xfer_mode_changed=FALSE; /* Whether the transfer mode was changed */
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
int prop_value,new_value;
htri_t check_prop;
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
#endif /*H5_HAVE_PARALLEL*/
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */
@@ -870,8 +872,10 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
hbool_t use_par_opt_io=FALSE; /* Whether the 'optimized' I/O routines with be parallel */
#ifdef H5_HAVE_PARALLEL
hbool_t xfer_mode_changed=FALSE; /* Whether the transfer mode was changed */
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
int prop_value,new_value;
htri_t check_prop;
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
#endif /*H5_HAVE_PARALLEL*/
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */
@@ -1117,10 +1121,23 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset,
uint8_t *bkg_buf = NULL; /*background buffer */
hsize_t smine_start; /*strip mine start loc */
size_t n, smine_nelmts; /*elements per strip */
+ H5D_storage_t store; /*union of storage info for dataset */
+ H5D_io_info_t io_info; /* Dataset I/O info */
herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_contig_read)
+ /* Initialize storage info for this dataset */
+ if (dataset->shared->efl.nused>0)
+ HDmemcpy(&store.efl,&(dataset->shared->efl),sizeof(H5O_efl_t));
+ else {
+ store.contig.dset_addr=dataset->shared->layout.u.contig.addr;
+ store.contig.dset_size=dataset->shared->layout.u.contig.size;
+ } /* end if */
+
+ /* Construct dataset I/O info */
+ H5D_BUILD_IO_INFO(&io_info,dataset,dxpl_cache,dxpl_id,&store);
+
/*
* If there is no type conversion then read directly into the
* application's buffer. This saves at least one mem-to-mem copy.
@@ -1135,8 +1152,7 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset,
|| dataset->shared->efl.nused>0 || 0 == nelmts
|| dataset->shared->layout.type==H5D_COMPACT);
H5_CHECK_OVERFLOW(nelmts,hsize_t,size_t);
- status = (sconv->read)(dataset->ent.file, dxpl_cache, dxpl_id,
- dataset, (H5D_storage_t *)&(dataset->shared->efl),
+ status = (sconv->read)(&io_info, dataset->shared->layout.readvv,
(size_t)nelmts, H5T_get_size(dataset->shared->type),
file_space, mem_space,
buf/*out*/);
@@ -1242,8 +1258,7 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset,
|| (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))
|| dataset->shared->efl.nused>0 ||
dataset->shared->layout.type==H5D_COMPACT);
- n = H5S_select_fgath(dataset->ent.file, dxpl_cache, dxpl_id,
- dataset, (H5D_storage_t *)&(dataset->shared->efl),
+ n = H5S_select_fgath(&io_info, dataset->shared->layout.readvv,
file_space, &file_iter, smine_nelmts,
tconv_buf/*out*/);
@@ -1361,10 +1376,23 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset,
uint8_t *bkg_buf = NULL; /*background buffer */
hsize_t smine_start; /*strip mine start loc */
size_t n, smine_nelmts; /*elements per strip */
+ H5D_storage_t store; /*union of storage info for dataset */
+ H5D_io_info_t io_info; /* Dataset I/O info */
herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_contig_write)
+ /* Initialize storage info for this dataset */
+ if (dataset->shared->efl.nused>0)
+ HDmemcpy(&store.efl,&(dataset->shared->efl),sizeof(H5O_efl_t));
+ else {
+ store.contig.dset_addr=dataset->shared->layout.u.contig.addr;
+ store.contig.dset_size=dataset->shared->layout.u.contig.size;
+ } /* end if */
+
+ /* Construct dataset I/O info */
+ H5D_BUILD_IO_INFO(&io_info,dataset,dxpl_cache,dxpl_id,&store);
+
/*
* If there is no type conversion then write directly from the
* application's buffer. This saves at least one mem-to-mem copy.
@@ -1374,8 +1402,7 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset,
H5_timer_begin(&timer);
#endif
H5_CHECK_OVERFLOW(nelmts,hsize_t,size_t);
- status = (sconv->write)(dataset->ent.file, dxpl_cache, dxpl_id,
- dataset, (H5D_storage_t *)&(dataset->shared->efl),
+ status = (sconv->write)(&io_info, dataset->shared->layout.writevv,
(size_t)nelmts, H5T_get_size(dataset->shared->type),
file_space, mem_space,
buf);
@@ -1494,8 +1521,7 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset,
#ifdef H5S_DEBUG
H5_timer_begin(&timer);
#endif
- n = H5S_select_fgath(dataset->ent.file, dxpl_cache, dxpl_id,
- dataset, (H5D_storage_t *)&(dataset->shared->efl),
+ n = H5S_select_fgath(&io_info, dataset->shared->layout.readvv,
file_space, &bkg_iter, smine_nelmts,
bkg_buf/*out*/);
@@ -1520,8 +1546,7 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset,
#ifdef H5S_DEBUG
H5_timer_begin(&timer);
#endif
- status = H5S_select_fscat(dataset->ent.file, dxpl_cache, dxpl_id,
- dataset, (H5D_storage_t *)&(dataset->shared->efl),
+ status = H5S_select_fscat(&io_info, dataset->shared->layout.writevv,
file_space, &file_iter, smine_nelmts,
tconv_buf);
#ifdef H5S_DEBUG
@@ -1603,6 +1628,7 @@ H5D_chunk_read(hsize_t nelmts, H5D_t *dataset,
uint8_t *tconv_buf = NULL; /*data type conv buffer */
uint8_t *bkg_buf = NULL; /*background buffer */
H5D_storage_t store; /*union of EFL and chunk pointer in file space */
+ H5D_io_info_t io_info; /* Dataset I/O info */
herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_read)
@@ -1611,6 +1637,9 @@ H5D_chunk_read(hsize_t nelmts, H5D_t *dataset,
if(H5D_create_chunk_map(dataset, mem_type, file_space, mem_space, &fm)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't build chunk mapping")
+ /* Construct dataset I/O info */
+ H5D_BUILD_IO_INFO(&io_info,dataset,dxpl_cache,dxpl_id,&store);
+
/*
* If there is no type conversion then read directly into the
* application's buffer. This saves at least one mem-to-mem copy.
@@ -1640,8 +1669,7 @@ H5D_chunk_read(hsize_t nelmts, H5D_t *dataset,
store.chunk.index = chunk_info->index;
/* Perform the actual read operation */
- status = (sconv->read)(dataset->ent.file, dxpl_cache, dxpl_id,
- dataset, &store,
+ status = (sconv->read)(&io_info,dataset->shared->layout.readvv,
chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
chunk_info->fspace, chunk_info->mspace,
buf);
@@ -1766,8 +1794,7 @@ H5D_chunk_read(hsize_t nelmts, H5D_t *dataset,
assert(((dataset->shared->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr))
|| (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))
|| dataset->shared->efl.nused>0 || dataset->shared->layout.type==H5D_COMPACT);
- n = H5S_select_fgath(dataset->ent.file, dxpl_cache, dxpl_id,
- dataset, &store,
+ n = H5S_select_fgath(&io_info, dataset->shared->layout.readvv,
chunk_info->fspace, &file_iter, smine_nelmts,
tconv_buf/*out*/);
@@ -1913,32 +1940,18 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset,
uint8_t *tconv_buf = NULL; /*data type conv buffer */
uint8_t *bkg_buf = NULL; /*background buffer */
H5D_storage_t store; /*union of EFL and chunk pointer in file space */
+ H5D_io_info_t io_info; /* Dataset I/O info */
herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_write)
-#ifdef QAK
-{
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - Entering, time=%f\n",FUNC,mpi_rank,time);
-}
-#endif /* QAK */
/* Map elements between file and memory for each chunk*/
if(H5D_create_chunk_map(dataset, mem_type, file_space, mem_space, &fm)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't build chunk mapping")
-#ifdef QAK
-{
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - After creating chunk map, time=%f\n",FUNC,mpi_rank,time);
-}
-#endif /* QAK */
+ /* Construct dataset I/O info */
+ H5D_BUILD_IO_INFO(&io_info,dataset,dxpl_cache,dxpl_id,&store);
+
/*
* If there is no type conversion then write directly from the
* application's buffer. This saves at least one mem-to-mem copy.
@@ -1947,15 +1960,6 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset,
#ifdef H5S_DEBUG
H5_timer_begin(&timer);
#endif
-#ifdef QAK
-{
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - Performing optimized I/O, time=%f\n",FUNC,mpi_rank,time);
-}
-#endif /* QAK */
/* Get first node in chunk tree */
chunk_node=H5TB_first(fm.fsel->root);
@@ -1971,8 +1975,7 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset,
store.chunk.index = chunk_info->index;
/* Perform the actual write operation */
- status = (sconv->write)(dataset->ent.file, dxpl_cache, dxpl_id,
- dataset, &store,
+ status = (sconv->write)(&io_info, dataset->shared->layout.writevv,
chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
chunk_info->fspace, chunk_info->mspace,
buf);
@@ -1984,15 +1987,6 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset,
/* Get the next chunk node in the tree */
chunk_node=H5TB_next(chunk_node);
} /* end while */
-#ifdef QAK
-{
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - Done performing optimized I/O, time=%f\n",FUNC,mpi_rank,time);
-}
-#endif /* QAK */
#ifdef H5S_DEBUG
H5_timer_end(&(sconv->stats[0].write_timer), &timer);
@@ -2003,13 +1997,6 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset,
/* direct xfer accomplished successfully */
HGOTO_DONE(SUCCEED)
} /* end if */
-#ifdef QAK
-{
- int mpi_rank;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- HDfprintf(stderr,"%s: rank=%d - Performing NON-optimized I/O\n",FUNC,mpi_rank);
-}
-#endif /* QAK */
/*
* This is the general case (type conversion, usually).
@@ -2128,8 +2115,7 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset,
#ifdef H5S_DEBUG
H5_timer_begin(&timer);
#endif
- n = H5S_select_fgath(dataset->ent.file, dxpl_cache, dxpl_id,
- dataset, &store,
+ n = H5S_select_fgath(&io_info, dataset->shared->layout.readvv,
chunk_info->fspace, &bkg_iter, smine_nelmts,
bkg_buf/*out*/);
@@ -2155,8 +2141,7 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset,
#ifdef H5S_DEBUG
H5_timer_begin(&timer);
#endif
- status = H5S_select_fscat(dataset->ent.file, dxpl_cache, dxpl_id,
- dataset, &store,
+ status = H5S_select_fscat(&io_info, dataset->shared->layout.writevv,
chunk_info->fspace, &file_iter, smine_nelmts,
tconv_buf);
@@ -2349,15 +2334,6 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_create_chunk_map)
-#ifdef QAK
-{
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - Entering, time=%f\n",FUNC,mpi_rank,time);
-}
-#endif /* QAK */
/* Get layout for dataset */
fm->layout = &(dataset->shared->layout);
@@ -2440,28 +2416,9 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
fm->last_chunk_info=NULL;
} /* end if */
else {
-#ifdef QAK
- {
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - Before creating chunk selections, time=%f\n",FUNC,mpi_rank,time);
- }
-#endif /* QAK */
/* Build the file selection for each chunk */
if(H5D_create_chunk_file_map_hyper(fm)<0)
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create file chunk selections")
-#ifdef QAK
- {
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - After creating file chunk selections, time=%f\n",FUNC,mpi_rank,time);
- HDfprintf(stderr,"%s: rank=%d - H5S_select_shape_same=%d\n",FUNC,mpi_rank,H5S_select_shape_same(file_space,equiv_mspace));
- }
-#endif /* QAK */
/* Clean file chunks' hyperslab span "scratch" information */
curr_node=H5TB_first(fm->fsel->root);
@@ -2544,16 +2501,6 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
} /* end if */
} /* end else */
-#ifdef QAK
-{
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - After creating chunk selections, time=%f\n",FUNC,mpi_rank,time);
-}
-#endif /* QAK */
-
done:
/* Release the [potentially partially built] chunk mapping information if an error occurs */
if(ret_value<0) {
@@ -2583,15 +2530,6 @@ done:
HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't decrement temporary datatype ID")
} /* end if */
-#ifdef QAK
-{
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - Leaving, time=%f\n",FUNC,mpi_rank,time);
-}
-#endif /* QAK */
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_create_chunk_map() */
@@ -2877,18 +2815,6 @@ H5D_create_chunk_mem_map_hyper(const fm_map *fm)
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_create_chunk_mem_map_hyper)
-#ifdef QAK
-{
- hsize_t mem_dims[H5O_LAYOUT_NDIMS]; /* Dimensions of memory space */
-
- if(H5S_get_simple_extent_dims(fm->mem_space, mem_dims, NULL)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality")
-
- HDfprintf(stderr,"%s: mem_dims={",FUNC);
- for(u=0; u<fm->m_ndims; u++)
- HDfprintf(stderr,"%Hd%s",mem_dims[u],(u<(fm->m_ndims-1) ? ", " : "}\n"));
-}
-#endif /* QAK */
/* Sanity check */
assert(fm->f_ndims>0);
@@ -2932,22 +2858,6 @@ H5D_create_chunk_mem_map_hyper(const fm_map *fm)
assert(fm->m_ndims==fm->f_ndims);
for(u=0; u<fm->f_ndims; u++)
adjust[u]=file_sel_start[u]-mem_sel_start[u];
-#ifdef QAK
- {
- int mpi_rank;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- if(mpi_rank==1) {
- HDfprintf(stderr,"%s: rank=%d - adjust={",FUNC,mpi_rank);
- for(u=0; u<fm->f_ndims; u++)
- HDfprintf(stderr,"%Hd%s",adjust[u],(u<(fm->f_ndims-1) ? ", " : "}\n"));
- } /* end if */
- }
-#endif /* QAK */
-#ifdef QAK
- HDfprintf(stderr,"%s: adjust={",FUNC);
- for(u=0; u<fm->f_ndims; u++)
- HDfprintf(stderr,"%Hd%s",adjust[u],(u<(fm->f_ndims-1) ? ", " : "}\n"));
-#endif /* QAK */
/* Iterate over each chunk in the chunk list */
curr_node=H5TB_first(fm->fsel->root);
@@ -2975,49 +2885,11 @@ H5D_create_chunk_mem_map_hyper(const fm_map *fm)
/* Compensate for the chunk offset */
for(u=0; u<fm->f_ndims; u++)
chunk_adjust[u]=adjust[u]-chunk_info->coords[u]; /*lint !e771 The adjust array will always be initialized */
-#ifdef QAK
- {
- int mpi_rank;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- if(mpi_rank==1) {
- HDfprintf(stderr,"%s: rank=%d - Before adjusting memory selection\n",FUNC,mpi_rank);
- HDfprintf(stderr,"%s: rank=%d - chunk_adjust={",FUNC,mpi_rank);
- for(u=0; u<fm->f_ndims; u++)
- HDfprintf(stderr,"%Hd%s",chunk_adjust[u],(u<(fm->f_ndims-1) ? ", " : "}\n"));
- } /* end if */
- }
-#endif /* QAK */
-#ifdef QAK
- HDfprintf(stderr,"%s: Before adjusting memory selection\n",FUNC);
- HDfprintf(stderr,"%s: chunk_adjust={",FUNC);
- for(u=0; u<fm->f_ndims; u++)
- HDfprintf(stderr,"%Hd%s",chunk_adjust[u],(u<(fm->f_ndims-1) ? ", " : "}\n"));
-#endif /* QAK */
+
/* Adjust the selection */
if(H5S_hyper_adjust(chunk_info->mspace,chunk_adjust)<0) /*lint !e772 The chunk_adjust array will always be initialized */
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't adjust chunk selection")
-#ifdef QAK
- {
- int mpi_rank;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- if(mpi_rank==1)
- HDfprintf(stderr,"%s: rank=%d - After adjusting memory selection\n",FUNC,mpi_rank);
- }
-#endif /* QAK */
-#ifdef QAK
- HDfprintf(stderr,"%s: After adjusting memory selection\n",FUNC);
- {
- hsize_t mem_dims[H5O_LAYOUT_NDIMS]; /* Dimensions of memory space */
-
- if(H5S_get_simple_extent_dims(chunk_info->mspace, mem_dims, NULL)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality")
-
- HDfprintf(stderr,"%s: mem_dims={",FUNC);
- for(u=0; u<fm->m_ndims; u++)
- HDfprintf(stderr,"%Hd%s",mem_dims[u],(u<(fm->m_ndims-1) ? ", " : "}\n"));
- }
-#endif /* QAK */
/* Get the next chunk node in the TBBT */
curr_node=H5TB_next(curr_node);
} /* end while */
diff --git a/src/H5Distore.c b/src/H5Distore.c
index e670ef7..9d991d3 100644
--- a/src/H5Distore.c
+++ b/src/H5Distore.c
@@ -160,7 +160,7 @@ static int H5D_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr
void *_rt_key, void *_udata);
/* B-tree callbacks */
-static size_t H5D_istore_sizeof_rkey(H5F_t *f, const void *_udata);
+static size_t H5D_istore_sizeof_rkey(const H5F_t *f, const void *_udata);
static H5RC_t *H5D_istore_get_shared(H5F_t *f, const void *_udata);
static herr_t H5D_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t, void *_lt_key,
void *_udata, void *_rt_key,
@@ -245,7 +245,7 @@ H5FL_BLK_DEFINE_STATIC(chunk_page);
*-------------------------------------------------------------------------
*/
static size_t
-H5D_istore_sizeof_rkey(H5F_t UNUSED *f, const void *_udata)
+H5D_istore_sizeof_rkey(const H5F_t UNUSED *f, const void *_udata)
{
const H5D_istore_ud1_t *udata = (const H5D_istore_ud1_t *) _udata;
size_t nbytes;
@@ -930,7 +930,7 @@ H5D_istore_iter_dump (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, hadd
*-------------------------------------------------------------------------
*/
herr_t
-H5D_istore_init (H5F_t *f, H5D_t *dset)
+H5D_istore_init (const H5F_t *f, H5D_t *dset)
{
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
herr_t ret_value=SUCCEED; /* Return value */
@@ -971,8 +971,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
- hid_t dxpl_id, H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset)
+H5D_istore_flush_entry(H5D_io_info_t *io_info, H5D_rdcc_ent_t *ent, hbool_t reset)
{
herr_t ret_value=SUCCEED; /*return value */
unsigned u; /*counters */
@@ -982,7 +981,8 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
FUNC_ENTER_NOAPI_NOINIT(H5D_istore_flush_entry);
- assert(f);
+ assert(io_info);
+ assert(io_info->dset);
assert(ent);
assert(!ent->locked);
@@ -990,16 +990,16 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
if (ent->dirty) {
H5D_istore_ud1_t udata; /*pass through B-tree */
- udata.mesg = &dset->shared->layout;
+ udata.mesg = &io_info->dset->shared->layout;
udata.key.filter_mask = 0;
udata.addr = HADDR_UNDEF;
udata.key.nbytes = ent->chunk_size;
- for (u=0; u<dset->shared->layout.u.chunk.ndims; u++)
+ for (u=0; u<io_info->dset->shared->layout.u.chunk.ndims; u++)
udata.key.offset[u] = ent->offset[u];
alloc = ent->alloc_size;
/* Should the chunk be filtered before writing it to disk? */
- if (dset->shared->dcpl_cache.pline.nused) {
+ if (io_info->dset->shared->dcpl_cache.pline.nused) {
if (!reset) {
/*
* Copy the chunk to a new buffer before running it through
@@ -1021,8 +1021,8 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
point_of_no_return = TRUE;
ent->chunk = NULL;
}
- if (H5Z_pipeline(&(dset->shared->dcpl_cache.pline), 0, &(udata.key.filter_mask), dxpl_cache->err_detect,
- dxpl_cache->filter_cb, &(udata.key.nbytes), &alloc, &buf)<0)
+ if (H5Z_pipeline(&(io_info->dset->shared->dcpl_cache.pline), 0, &(udata.key.filter_mask), io_info->dxpl_cache->err_detect,
+ io_info->dxpl_cache->filter_cb, &(udata.key.nbytes), &alloc, &buf)<0)
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed")
}
@@ -1030,14 +1030,16 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
* Create the chunk it if it doesn't exist, or reallocate the chunk if
* its size changed. Then write the data into the file.
*/
- if (H5B_insert(f, dxpl_id, H5B_ISTORE, dset->shared->layout.u.chunk.addr, &udata)<0)
+ if (H5B_insert(io_info->dset->ent.file, io_info->dxpl_id, H5B_ISTORE, io_info->dset->shared->layout.u.chunk.addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk");
- if (H5F_block_write(f, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, dxpl_id, buf)<0)
+ if (H5F_block_write(io_info->dset->ent.file, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, io_info->dxpl_id, buf)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file");
/* Mark cache entry as clean */
ent->dirty = FALSE;
- dset->shared->cache.chunk.nflushes++;
+#ifdef H5D_ISTORE_DEBUG
+ io_info->dset->shared->cache.chunk.nflushes++;
+#endif /* H5D_ISTORE_DEBUG */
} /* end if */
/* Reset, but do not free or removed from list */
@@ -1046,7 +1048,7 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
if(buf==ent->chunk)
buf = NULL;
if(ent->chunk!=NULL)
- ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(dset->shared->dcpl_cache.pline));
+ ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(io_info->dset->shared->dcpl_cache.pline));
} /* end if */
done:
@@ -1062,7 +1064,7 @@ done:
*/
if (ret_value<0 && point_of_no_return) {
if(ent->chunk)
- ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(dset->shared->dcpl_cache.pline));
+ ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(io_info->dset->shared->dcpl_cache.pline));
} /* end if */
FUNC_LEAVE_NOAPI(ret_value);
@@ -1088,28 +1090,27 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D_istore_preempt(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, H5D_rdcc_ent_t * ent, hbool_t flush)
+H5D_istore_preempt(H5D_io_info_t *io_info, H5D_rdcc_ent_t * ent, hbool_t flush)
{
- H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
+ H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk);
herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_istore_preempt);
- assert(f);
+ assert(io_info);
assert(ent);
assert(!ent->locked);
assert(ent->idx < rdcc->nslots);
if(flush) {
/* Flush */
- if(H5D_istore_flush_entry(f, dxpl_cache, dxpl_id, dset, ent, TRUE) < 0)
+ if(H5D_istore_flush_entry(io_info, ent, TRUE) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer");
}
else {
/* Don't flush, just free chunk */
if(ent->chunk != NULL)
- ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(dset->shared->dcpl_cache.pline));
+ ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(io_info->dset->shared->dcpl_cache.pline));
}
/* Unlink from list */
@@ -1155,8 +1156,9 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_istore_flush (H5F_t *f, hid_t dxpl_id, H5D_t *dset, unsigned flags)
+H5D_istore_flush (H5D_t *dset, hid_t dxpl_id, unsigned flags)
{
+ H5D_io_info_t io_info; /* Temporary I/O info object */
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
@@ -1170,6 +1172,9 @@ H5D_istore_flush (H5F_t *f, hid_t dxpl_id, H5D_t *dset, unsigned flags)
if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+ /* Construct dataset I/O info */
+ H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,NULL);
+
for (ent=rdcc->head; ent; ent=next) {
next = ent->next;
if ((flags&H5F_FLUSH_CLEAR_ONLY)) {
@@ -1177,10 +1182,10 @@ H5D_istore_flush (H5F_t *f, hid_t dxpl_id, H5D_t *dset, unsigned flags)
ent->dirty = FALSE;
} /* end if */
else if ((flags&H5F_FLUSH_INVALIDATE)) {
- if (H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, ent, TRUE )<0)
+ if (H5D_istore_preempt(&io_info, ent, TRUE )<0)
nerrors++;
} else {
- if (H5D_istore_flush_entry(f, dxpl_cache, dxpl_id, dset, ent, FALSE)<0)
+ if (H5D_istore_flush_entry(&io_info, ent, FALSE)<0)
nerrors++;
}
} /* end for */
@@ -1211,8 +1216,9 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_istore_dest (H5F_t *f, hid_t dxpl_id, H5D_t *dset)
+H5D_istore_dest (H5D_t *dset, hid_t dxpl_id)
{
+ H5D_io_info_t io_info; /* Temporary I/O info object */
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
@@ -1222,10 +1228,15 @@ H5D_istore_dest (H5F_t *f, hid_t dxpl_id, H5D_t *dset)
FUNC_ENTER_NOAPI(H5D_istore_dest, FAIL);
+ assert(dset);
+
/* Fill the DXPL cache values for later use */
if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+ /* Construct dataset I/O info */
+ H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,NULL);
+
/* Flush all the cached chunks */
for (ent=rdcc->head; ent; ent=next) {
#ifdef H5D_ISTORE_DEBUG
@@ -1233,7 +1244,7 @@ H5D_istore_dest (H5F_t *f, hid_t dxpl_id, H5D_t *dset)
HDfflush(stderr);
#endif
next = ent->next;
- if (H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, ent, TRUE )<0)
+ if (H5D_istore_preempt(&io_info, ent, TRUE )<0)
nerrors++;
}
if (nerrors)
@@ -1363,11 +1374,10 @@ H5D_istore_shared_free (void *_shared)
*-------------------------------------------------------------------------
*/
static herr_t
-H5D_istore_prune (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H5D_t *dset,
- size_t size)
+H5D_istore_prune (H5D_io_info_t *io_info, size_t size)
{
int i, j, nerrors=0;
- H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
+ const H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk);
size_t total = rdcc->nbytes;
const int nmeth=2; /*number of methods */
int w[1]; /*weighting as an interval */
@@ -1387,7 +1397,7 @@ H5D_istore_prune (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H
* begins. The pointers participating in the list traversal are each
* given a chance at preemption before any of the pointers are advanced.
*/
- w[0] = (int)(rdcc->nused * H5F_RDCC_W0(f));
+ w[0] = (int)(rdcc->nused * H5F_RDCC_W0(io_info->dset->ent.file));
p[0] = rdcc->head;
p[1] = NULL;
@@ -1443,7 +1453,7 @@ H5D_istore_prune (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H
if (n[j]==cur)
n[j] = cur->next;
}
- if (H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, cur, TRUE)<0)
+ if (H5D_istore_preempt(io_info, cur, TRUE)<0)
nerrors++;
}
}
@@ -1498,18 +1508,18 @@ done:
*-------------------------------------------------------------------------
*/
static void *
-H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5D_istore_lock(H5D_io_info_t *io_info,
H5D_istore_ud1_t *udata, hbool_t relax, unsigned *idx_hint/*in,out*/)
{
- unsigned idx=0; /*hash index number */
- hbool_t found = FALSE; /*already in cache? */
+ H5D_t *dset=io_info->dset; /* Local pointer to the dataset info */
const H5O_pline_t *pline=&(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
const H5O_layout_t *layout=&(dset->shared->layout); /* Dataset layout */
const H5O_fill_t *fill=&(dset->shared->dcpl_cache.fill); /* Fill value info */
H5D_fill_time_t fill_time=dset->shared->dcpl_cache.fill_time; /* Fill time */
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);/*raw data chunk cache*/
H5D_rdcc_ent_t *ent = NULL; /*cache entry */
+ unsigned idx=0; /*hash index number */
+ hbool_t found = FALSE; /*already in cache? */
unsigned u; /*counters */
size_t chunk_size=0; /*size of a chunk */
void *chunk=NULL; /*the file chunk */
@@ -1517,11 +1527,11 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
FUNC_ENTER_NOAPI_NOINIT(H5D_istore_lock);
- assert(f);
+ assert(io_info);
assert(dset);
- assert(store);
- assert(dxpl_cache);
- assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
+ assert(io_info->dxpl_cache);
+ assert(io_info->store);
+ assert(TRUE==H5P_isa_class(io_info->dxpl_id,H5P_DATASET_XFER));
/* Get the chunk's size */
assert(layout->u.chunk.size>0);
@@ -1529,12 +1539,12 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
/* Search for the chunk in the cache */
if (rdcc->nslots>0) {
- idx=H5D_HASH(dset->shared,store->chunk.index);
+ idx=H5D_HASH(dset->shared,io_info->store->chunk.index);
ent = rdcc->slot[idx];
if (ent) {
- for (u=0, found=TRUE; u<dset->shared->layout.u.chunk.ndims; u++) {
- if (store->chunk.offset[u]!=ent->offset[u]) {
+ for (u=0, found=TRUE; u<layout->u.chunk.ndims; u++) {
+ if (io_info->store->chunk.offset[u]!=ent->offset[u]) {
found = FALSE;
break;
} /* end if */
@@ -1546,7 +1556,9 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
/*
* Already in the cache. Count a hit.
*/
+#ifdef H5D_ISTORE_DEBUG
rdcc->nhits++;
+#endif /* H5D_ISTORE_DEBUG */
} else if (!found && relax) {
/*
@@ -1558,8 +1570,8 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
#ifdef H5D_ISTORE_DEBUG
HDputc('w', stderr);
HDfflush(stderr);
-#endif
rdcc->nhits++;
+#endif
if (NULL==(chunk=H5D_istore_chunk_alloc (chunk_size,pline)))
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk");
@@ -1577,7 +1589,7 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
* Not in the cache. Read it from the file and count this as a miss
* if it's in the file or an init if it isn't.
*/
- chunk_addr = H5D_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, udata);
+ chunk_addr = H5D_istore_get_addr(io_info, udata);
} /* end else */
if (H5F_addr_defined(chunk_addr)) {
@@ -1591,15 +1603,17 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
chunk_alloc = udata->key.nbytes;
if (NULL==(chunk = H5D_istore_chunk_alloc (chunk_alloc,pline)))
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk");
- if (H5F_block_read(f, H5FD_MEM_DRAW, chunk_addr, udata->key.nbytes, dxpl_id, chunk)<0)
+ if (H5F_block_read(dset->ent.file, H5FD_MEM_DRAW, chunk_addr, udata->key.nbytes, io_info->dxpl_id, chunk)<0)
HGOTO_ERROR (H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk");
if (pline->nused)
- if (H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->key.filter_mask), dxpl_cache->err_detect,
- dxpl_cache->filter_cb, &(udata->key.nbytes), &chunk_alloc, &chunk)<0) {
+ if (H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->key.filter_mask), io_info->dxpl_cache->err_detect,
+ io_info->dxpl_cache->filter_cb, &(udata->key.nbytes), &chunk_alloc, &chunk)<0) {
HGOTO_ERROR(H5E_PLINE, H5E_READERROR, NULL, "data pipeline read failed");
}
+#ifdef H5D_ISTORE_DEBUG
rdcc->nmisses++;
+#endif /* H5D_ISTORE_DEBUG */
} else {
H5D_fill_value_t fill_status;
@@ -1637,7 +1651,9 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
else
HDmemset(chunk,0,chunk_size);
#endif /* H5_USING_PURIFY */
+#ifdef H5D_ISTORE_DEBUG
rdcc->ninits++;
+#endif /* H5D_ISTORE_DEBUG */
} /* end else */
}
assert (found || chunk_size>0);
@@ -1653,10 +1669,10 @@ else
HDputc('#', stderr);
HDfflush(stderr);
#endif
- if (H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, ent, TRUE)<0)
+ if (H5D_istore_preempt(io_info, ent, TRUE)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk from cache");
}
- if (H5D_istore_prune(f, dxpl_cache, dxpl_id, dset, chunk_size)<0)
+ if (H5D_istore_prune(io_info, chunk_size)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk(s) from cache");
/* Create a new entry */
@@ -1666,7 +1682,7 @@ else
ent->chunk_size = chunk_size;
ent->alloc_size = chunk_size;
for (u=0; u<layout->u.chunk.ndims; u++)
- ent->offset[u] = store->chunk.offset[u];
+ ent->offset[u] = io_info->store->chunk.offset[u];
ent->rd_count = chunk_size;
ent->wr_count = chunk_size;
ent->chunk = chunk;
@@ -1768,17 +1784,19 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5D_istore_unlock(H5D_io_info_t *io_info,
hbool_t dirty, unsigned idx_hint, uint8_t *chunk, size_t naccessed)
{
- H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
+ const H5O_layout_t *layout=&(io_info->dset->shared->layout); /* Dataset layout */
+ const H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk);
H5D_rdcc_ent_t *ent = NULL;
int found = -1;
unsigned u;
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_unlock);
+ assert(io_info);
+
if (UINT_MAX==idx_hint) {
/*not in cache*/
} else {
@@ -1800,17 +1818,17 @@ H5D_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
HDmemset (&x, 0, sizeof x);
x.dirty = TRUE;
- for (u=0; u<dset->shared->layout.u.chunk.ndims; u++)
- x.offset[u] = store->chunk.offset[u];
- assert(dset->shared->layout.u.chunk.size>0);
- H5_ASSIGN_OVERFLOW(x.chunk_size,dset->shared->layout.u.chunk.size,hsize_t,size_t);
+ for (u=0; u<layout->u.chunk.ndims; u++)
+ x.offset[u] = io_info->store->chunk.offset[u];
+ assert(layout->u.chunk.size>0);
+ H5_ASSIGN_OVERFLOW(x.chunk_size,layout->u.chunk.size,hsize_t,size_t);
x.alloc_size = x.chunk_size;
x.chunk = chunk;
- H5D_istore_flush_entry (f, dxpl_cache, dxpl_id, dset, &x, TRUE);
+ H5D_istore_flush_entry (io_info, &x, TRUE);
} else {
if(chunk)
- H5D_istore_chunk_xfree (chunk,&(dset->shared->dcpl_cache.pline));
+ H5D_istore_chunk_xfree (chunk,&(io_info->dset->shared->dcpl_cache.pline));
}
} else {
/*
@@ -1847,12 +1865,12 @@ H5D_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
*-------------------------------------------------------------------------
*/
ssize_t
-H5D_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5D_istore_readvv(H5D_io_info_t *io_info,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *buf)
{
+ H5D_t *dset=io_info->dset; /* Local pointer to the dataset info */
H5D_istore_ud1_t udata; /*B-tree pass-through */
haddr_t chunk_addr; /* Chunk address on disk */
size_t u; /* Local index variables */
@@ -1861,11 +1879,11 @@ H5D_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
FUNC_ENTER_NOAPI(H5D_istore_readvv, FAIL);
/* Check args */
- assert(f);
- assert(dxpl_cache);
+ assert(io_info);
assert(dset && H5D_CHUNKED==dset->shared->layout.type);
assert(dset->shared->layout.u.chunk.ndims>0 && dset->shared->layout.u.chunk.ndims<=H5O_LAYOUT_NDIMS);
- assert(store);
+ assert(io_info->dxpl_cache);
+ assert(io_info->store);
assert(chunk_len_arr);
assert(chunk_offset_arr);
assert(mem_len_arr);
@@ -1874,22 +1892,23 @@ H5D_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
#ifndef NDEBUG
for (u=0; u<dset->shared->layout.u.chunk.ndims; u++)
- assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */
+ assert(io_info->store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */
#endif
/* Get the address of this chunk on disk */
#ifdef QAK
-HDfprintf(stderr,"%s: store->chunk.offset={",FUNC);
+HDfprintf(stderr,"%s: io_info->store->chunk.offset={",FUNC);
for(u=0; u<dset->shared->layout.u.chunk.ndims; u++)
- HDfprintf(stderr,"%Hd%s",store->chunk.offset[u],(u<(dset->shared->layout.u.chunk.ndims-1) ? ", " : "}\n"));
+ HDfprintf(stderr,"%Hd%s",io_info->store->chunk.offset[u],(u<(dset->shared->layout.u.chunk.ndims-1) ? ", " : "}\n"));
#endif /* QAK */
- chunk_addr=H5D_istore_get_addr(f, dxpl_id, &(dset->shared->layout), store->chunk.offset, &udata);
+ chunk_addr=H5D_istore_get_addr(io_info, &udata);
#ifdef QAK
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Zu\n",FUNC,chunk_addr,dset->shared->layout.u.chunk.size);
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]);
HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]);
HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_arr[*mem_curr_seq]);
+HDfprintf(stderr,"%s: buf=%p\n",FUNC,buf);
#endif /* QAK */
/*
@@ -1903,17 +1922,26 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
* writing to other elements in the same chunk. Do a direct
* read-through of only the elements requested.
*/
- if ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes
- && dset->shared->dcpl_cache.pline.nused==0 && chunk_addr!=HADDR_UNDEF)
- || (IS_H5FD_MPI(f) && (H5F_ACC_RDWR & H5F_get_intent(f)))) {
+ if ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes && dset->shared->dcpl_cache.pline.nused==0 && chunk_addr!=HADDR_UNDEF)
+ || (IS_H5FD_MPI(dset->ent.file) && (H5F_ACC_RDWR & H5F_get_intent(dset->ent.file)))) {
+ H5D_io_info_t chk_io_info; /* Temporary I/O info object */
+ H5D_storage_t chk_store; /* Chunk storage information */
+
#ifdef H5_HAVE_PARALLEL
/* Additional sanity check when operating in parallel */
if (chunk_addr==HADDR_UNDEF || dset->shared->dcpl_cache.pline.nused>0)
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk");
#endif /* H5_HAVE_PARALLEL */
- if ((ret_value=H5D_contig_readvv(f, dxpl_id, dset, chunk_addr, (hsize_t)dset->shared->layout.u.chunk.size,
- chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq,
- mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0)
+
+ /* Set up the storage information for the chunk */
+ chk_store.contig.dset_addr=chunk_addr;
+ chk_store.contig.dset_size=(hsize_t)dset->shared->layout.u.chunk.size;
+
+ /* Set up new dataset I/O info */
+ H5D_BUILD_IO_INFO(&chk_io_info,dset,io_info->dxpl_cache,io_info->dxpl_id,&chk_store);
+
+ /* Do I/O directly on chunk without reading it into the cache */
+ if ((ret_value=H5D_contig_readvv(&chk_io_info, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0)
HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL, "unable to read raw data to file");
} /* end if */
else {
@@ -1938,13 +1966,13 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
/* Check if the chunk is in the cache (but hasn't been written to disk yet) */
if (rdcc->nslots>0) {
- unsigned idx=H5D_HASH(dset->shared,store->chunk.index); /* Cache entry index */
+ unsigned idx=H5D_HASH(dset->shared,io_info->store->chunk.index); /* Cache entry index */
H5D_rdcc_ent_t *ent = rdcc->slot[idx]; /* Cache entry */
/* Potential match... */
if (ent) {
for (u=0, found=TRUE; u<dset->shared->layout.u.chunk.ndims; u++) {
- if (store->chunk.offset[u]!=ent->offset[u]) {
+ if (io_info->store->chunk.offset[u]!=ent->offset[u]) {
found = FALSE;
break;
} /* end if */
@@ -2005,8 +2033,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
* Lock the chunk, copy from application to chunk, then unlock the
* chunk.
*/
- if (NULL==(chunk=H5D_istore_lock(f, dxpl_cache, dxpl_id, dset, store,
- &udata, FALSE, &idx_hint)))
+ if (NULL==(chunk=H5D_istore_lock(io_info, &udata, FALSE, &idx_hint)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk");
/* Use the vectorized memory copy routine to do actual work */
@@ -2014,8 +2041,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "vectorized memcpy failed");
H5_CHECK_OVERFLOW(naccessed,ssize_t,size_t);
- if (H5D_istore_unlock(f, dxpl_cache, dxpl_id, dset, store,
- FALSE, idx_hint, chunk, (size_t)naccessed)<0)
+ if (H5D_istore_unlock(io_info, FALSE, idx_hint, chunk, (size_t)naccessed)<0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk");
/* Set return value */
@@ -2043,12 +2069,12 @@ done:
*-------------------------------------------------------------------------
*/
ssize_t
-H5D_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
- hid_t dxpl_id, H5D_t *dset, const H5D_storage_t *store,
+H5D_istore_writevv(H5D_io_info_t *io_info,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *buf)
{
+ H5D_t *dset=io_info->dset; /* Local pointer to the dataset info */
H5D_istore_ud1_t udata; /*B-tree pass-through */
haddr_t chunk_addr; /* Chunk address on disk */
size_t u; /* Local index variables */
@@ -2057,11 +2083,11 @@ H5D_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
FUNC_ENTER_NOAPI(H5D_istore_writevv, FAIL);
/* Check args */
- assert(f);
- assert(dxpl_cache);
+ assert(io_info);
assert(dset && H5D_CHUNKED==dset->shared->layout.type);
assert(dset->shared->layout.u.chunk.ndims>0 && dset->shared->layout.u.chunk.ndims<=H5O_LAYOUT_NDIMS);
- assert(store);
+ assert(io_info->dxpl_cache);
+ assert(io_info->store);
assert(chunk_len_arr);
assert(chunk_offset_arr);
assert(mem_len_arr);
@@ -2070,16 +2096,16 @@ H5D_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
#ifndef NDEBUG
for (u=0; u<dset->shared->layout.u.chunk.ndims; u++)
- assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */
+ assert(io_info->store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */
#endif
/* Get the address of this chunk on disk */
#ifdef QAK
-HDfprintf(stderr,"%s: store->chunk.offset={",FUNC);
+HDfprintf(stderr,"%s: io_info->store->chunk.offset={",FUNC);
for(u=0; u<dset->shared->layout.u.chunk.ndims; u++)
- HDfprintf(stderr,"%Hd%s",store->chunk.offset[u],(u<(dset->shared->layout.u.chunk.ndims-1) ? ", " : "}\n"));
+ HDfprintf(stderr,"%Hd%s",io_info->store->chunk.offset[u],(u<(dset->shared->layout.u.chunk.ndims-1) ? ", " : "}\n"));
#endif /* QAK */
- chunk_addr=H5D_istore_get_addr(f, dxpl_id, &(dset->shared->layout), store->chunk.offset, &udata);
+ chunk_addr=H5D_istore_get_addr(io_info, &udata);
#ifdef QAK
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Zu\n",FUNC,chunk_addr,dset->shared->layout.u.chunk.size);
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
@@ -2099,15 +2125,26 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
* writing to other elements in the same chunk. Do a direct
* write-through of only the elements requested.
*/
- if ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes
- && dset->shared->dcpl_cache.pline.nused==0 && chunk_addr!=HADDR_UNDEF)
- || (IS_H5FD_MPI(f) && (H5F_ACC_RDWR & H5F_get_intent(f)))) {
+ if ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes && dset->shared->dcpl_cache.pline.nused==0 && chunk_addr!=HADDR_UNDEF)
+ || (IS_H5FD_MPI(dset->ent.file) && (H5F_ACC_RDWR & H5F_get_intent(dset->ent.file)))) {
+ H5D_io_info_t chk_io_info; /* Temporary I/O info object */
+ H5D_storage_t chk_store; /* Chunk storage information */
+
#ifdef H5_HAVE_PARALLEL
/* Additional sanity check when operating in parallel */
if (chunk_addr==HADDR_UNDEF || dset->shared->dcpl_cache.pline.nused>0)
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk");
#endif /* H5_HAVE_PARALLEL */
- if ((ret_value=H5D_contig_writevv(f, dxpl_id, dset, chunk_addr, (hsize_t)dset->shared->layout.u.chunk.size, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0)
+
+ /* Set up the storage information for the chunk */
+ chk_store.contig.dset_addr=chunk_addr;
+ chk_store.contig.dset_size=(hsize_t)dset->shared->layout.u.chunk.size;
+
+ /* Set up new dataset I/O info */
+ H5D_BUILD_IO_INFO(&chk_io_info,dset,io_info->dxpl_cache,io_info->dxpl_id,&chk_store);
+
+ /* Do I/O directly on chunk without reading it into the cache */
+ if ((ret_value=H5D_contig_writevv(&chk_io_info, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0)
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file");
} /* end if */
else {
@@ -2147,8 +2184,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
} /* end if */
#endif /* OLD_WAY */
- if (NULL==(chunk=H5D_istore_lock(f, dxpl_cache, dxpl_id, dset, store,
- &udata, relax, &idx_hint)))
+ if (NULL==(chunk=H5D_istore_lock(io_info, &udata, relax, &idx_hint)))
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk");
/* Use the vectorized memory copy routine to do actual work */
@@ -2156,8 +2192,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed");
H5_CHECK_OVERFLOW(naccessed,ssize_t,size_t);
- if (H5D_istore_unlock(f, dxpl_cache, dxpl_id, dset, store,
- TRUE, idx_hint, chunk, (size_t)naccessed)<0)
+ if (H5D_istore_unlock(io_info, TRUE, idx_hint, chunk, (size_t)naccessed)<0)
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "uanble to unlock raw data chunk");
/* Set return value */
@@ -2240,9 +2275,10 @@ done:
*-------------------------------------------------------------------------
*/
hsize_t
-H5D_istore_allocated(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
+H5D_istore_allocated(H5D_t *dset, hid_t dxpl_id)
{
- H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
+ H5D_io_info_t io_info; /* Temporary I/O info object */
+ const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
H5D_rdcc_ent_t *ent; /*cache entry */
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */
@@ -2251,20 +2287,25 @@ H5D_istore_allocated(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
FUNC_ENTER_NOAPI(H5D_istore_allocated, 0);
+ assert(dset);
+
/* Fill the DXPL cache values for later use */
if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, 0, "can't fill dxpl cache")
+ /* Construct dataset I/O info */
+ H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,NULL);
+
/* Search for cached chunks that haven't been written out */
for(ent = rdcc->head; ent; ent = ent->next) {
/* Flush the chunk out to disk, to make certain the size is correct later */
- if (H5D_istore_flush_entry(f, dxpl_cache, dxpl_id, dset, ent, FALSE)<0)
+ if (H5D_istore_flush_entry(&io_info, ent, FALSE)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, 0, "cannot flush indexed storage buffer");
} /* end for */
HDmemset(&udata, 0, sizeof udata);
udata.mesg = &dset->shared->layout;
- if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5D_istore_iter_allocated, dset->shared->layout.u.chunk.addr, &udata)<0)
+ if (H5B_iterate(dset->ent.file, dxpl_id, H5B_ISTORE, H5D_istore_iter_allocated, dset->shared->layout.u.chunk.addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over chunk B-tree");
/* Set return value */
@@ -2294,8 +2335,7 @@ done:
*-------------------------------------------------------------------------
*/
haddr_t
-H5D_istore_get_addr(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
- const hssize_t offset[], H5D_istore_ud1_t *_udata)
+H5D_istore_get_addr(H5D_io_info_t *io_info, H5D_istore_ud1_t *_udata)
{
H5D_istore_ud1_t tmp_udata; /* Information about a chunk */
H5D_istore_ud1_t *udata; /* Pointer to information about a chunk */
@@ -2304,21 +2344,22 @@ H5D_istore_get_addr(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_istore_get_addr);
- assert(f);
- assert(layout && (layout->u.chunk.ndims > 0));
- assert(offset);
+ assert(io_info);
+ assert(io_info->dset);
+ assert(io_info->dset->shared->layout.u.chunk.ndims > 0);
+ assert(io_info->store->chunk.offset);
/* Check for udata struct to return */
udata = (_udata!=NULL ? _udata : &tmp_udata);
/* Initialize the information about the chunk we are looking for */
- for (u=0; u<layout->u.chunk.ndims; u++)
- udata->key.offset[u] = offset[u];
- udata->mesg = layout;
+ for (u=0; u<io_info->dset->shared->layout.u.chunk.ndims; u++)
+ udata->key.offset[u] = io_info->store->chunk.offset[u];
+ udata->mesg = &(io_info->dset->shared->layout);
udata->addr = HADDR_UNDEF;
/* Go get the chunk information */
- if (H5B_find (f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, udata)<0) {
+ if (H5B_find (io_info->dset->ent.file, io_info->dxpl_id, H5B_ISTORE, io_info->dset->shared->layout.u.chunk.addr, udata)<0) {
/* Note: don't push error on stack, leave that to next higher level,
* since many times the B-tree is searched in order to determine
* if a chunk exists in the B-tree or not. -QAK
@@ -2453,9 +2494,10 @@ H5D_istore_chunk_xfree(void *chk, const H5O_pline_t *pline)
*-------------------------------------------------------------------------
*/
herr_t
-H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
- hbool_t full_overwrite)
+H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
{
+ H5D_io_info_t io_info; /* Dataset I/O info */
+ H5D_storage_t store; /* Dataset storage information */
hssize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
hsize_t chunk_size; /* Size of chunk in bytes */
unsigned filter_mask=0; /* Filter mask for chunks that have them */
@@ -2466,7 +2508,8 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
unsigned should_fill=0; /* Whether fill values should be written */
H5D_istore_ud1_t udata; /* B-tree pass-through for creating chunk */
void *chunk=NULL; /* Chunk buffer for writing fill values */
- H5P_genplist_t *dx_plist; /* Data xfer property list */
+ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
+ H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */
#ifdef H5_HAVE_PARALLEL
MPI_Comm mpi_comm=MPI_COMM_NULL; /* MPI communicator for file */
int mpi_rank=(-1); /* This process's rank */
@@ -2478,8 +2521,6 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
unsigned chunk_exists; /* Flag to indicate whether a chunk exists already */
int i; /* Local index variable */
unsigned u; /* Local index variable */
- H5Z_EDC_t edc; /* Decide whether to enable EDC for read */
- H5Z_cb_t cb_struct;
H5P_genplist_t *dc_plist; /* Property list */
int space_ndims; /* Dataset's space rank */
hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Dataset's dataspace dimensions */
@@ -2488,11 +2529,10 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
FUNC_ENTER_NOAPI(H5D_istore_allocate, FAIL);
/* Check args */
- assert(f);
- assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
assert(dset && H5D_CHUNKED==dset->shared->layout.type);
assert(dset->shared->layout.u.chunk.ndims>0 && dset->shared->layout.u.chunk.ndims<=H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(dset->shared->layout.u.chunk.addr));
+ assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
/* Get dataset's creation property list */
if (NULL == (dc_plist = H5I_object(dset->shared->dcpl_id)))
@@ -2511,23 +2551,19 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
if(H5P_get(dc_plist, H5D_CRT_FILL_TIME_NAME, &fill_time) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve fill time");
- /* Get necessary properties from dataset transfer property list */
- if (NULL == (dx_plist = H5P_object_verify(dxpl_id,H5P_DATASET_XFER)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list");
- if(H5P_get(dx_plist,H5D_XFER_EDC_NAME,&edc)<0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get edc information");
- if(H5P_get(dx_plist,H5D_XFER_FILTER_CB_NAME,&cb_struct)<0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get filter callback struct");
+ /* Fill the DXPL cache values for later use */
+ if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
#ifdef H5_HAVE_PARALLEL
/* Retrieve MPI parameters */
- if(IS_H5FD_MPI(f)) {
+ if(IS_H5FD_MPI(dset->ent.file)) {
/* Get the MPI communicator */
- if (MPI_COMM_NULL == (mpi_comm=H5F_mpi_get_comm(f)))
+ if (MPI_COMM_NULL == (mpi_comm=H5F_mpi_get_comm(dset->ent.file)))
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI communicator");
/* Get the MPI rank */
- if ((mpi_rank=H5F_mpi_get_rank(f))<0)
+ if ((mpi_rank=H5F_mpi_get_rank(dset->ent.file))<0)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI rank");
/* Set the MPI-capable file driver flag */
@@ -2584,7 +2620,7 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
size_t nbytes=(size_t)chunk_size;
/* Push the chunk through the filters */
- if (H5Z_pipeline(&pline, 0, &filter_mask, edc, cb_struct, &nbytes, &buf_size, &chunk)<0)
+ if (H5Z_pipeline(&pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &nbytes, &buf_size, &chunk)<0)
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed");
/* Keep the number of bytes the chunk turned in to */
@@ -2592,12 +2628,16 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
} /* end if */
} /* end if */
+ /* Set up dataset I/O info */
+ store.chunk.offset=chunk_offset;
+ H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,&store);
+
/* Loop over all chunks */
carry=0;
while (carry==0) {
/* Check if the chunk exists yet on disk */
chunk_exists=1;
- if(H5D_istore_get_addr(f,dxpl_id,&(dset->shared->layout),chunk_offset, NULL)==HADDR_UNDEF) {
+ if(H5D_istore_get_addr(&io_info,NULL)==HADDR_UNDEF) {
const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
H5D_rdcc_ent_t *ent = NULL; /*cache entry */
@@ -2626,7 +2666,7 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
udata.key.offset[u] = chunk_offset[u];
/* Allocate the chunk with all processes */
- if (H5B_insert(f, dxpl_id, H5B_ISTORE, dset->shared->layout.u.chunk.addr, &udata)<0)
+ if (H5B_insert(dset->ent.file, dxpl_id, H5B_ISTORE, dset->shared->layout.u.chunk.addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk");
/* Check if fill values should be written to blocks */
@@ -2637,7 +2677,7 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
/* Write the chunks out from only one process */
/* !! Use the internal "independent" DXPL!! -QAK */
if(H5_PAR_META_WRITE==mpi_rank) {
- if (H5F_block_write(f, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, H5AC_ind_dxpl_id, chunk)<0)
+ if (H5F_block_write(dset->ent.file, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, H5AC_ind_dxpl_id, chunk)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file");
} /* end if */
@@ -2646,7 +2686,7 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
} /* end if */
else {
#endif /* H5_HAVE_PARALLEL */
- if (H5F_block_write(f, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, dxpl_id, chunk)<0)
+ if (H5F_block_write(dset->ent.file, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, dxpl_id, chunk)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file");
#ifdef H5_HAVE_PARALLEL
} /* end else */
@@ -2787,10 +2827,10 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
- hid_t dxpl_id, H5D_t *dset)
+H5D_istore_prune_by_extent(H5D_io_info_t *io_info)
{
- H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
+ H5D_t *dset=io_info->dset; /* Local pointer to the dataset info */
+ const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
H5D_rdcc_ent_t *ent = NULL, *next = NULL; /*cache entry */
unsigned u; /*counters */
int found; /*remove this entry */
@@ -2801,8 +2841,7 @@ H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
FUNC_ENTER_NOAPI(H5D_istore_prune_by_extent, FAIL);
/* Check args */
- assert(f);
- assert(dxpl_cache);
+ assert(io_info);
assert(dset && H5D_CHUNKED == dset->shared->layout.type);
assert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(dset->shared->layout.u.chunk.addr));
@@ -2836,7 +2875,7 @@ H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
#endif
/* Preempt the entry from the cache, but do not flush it to disk */
- if(H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, ent, FALSE) < 0)
+ if(H5D_istore_preempt(io_info, ent, FALSE) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to preempt chunk");
found=0;
@@ -2853,7 +2892,7 @@ H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
udata.mesg = &dset->shared->layout;
udata.dims = curr_dims;
- if(H5B_iterate(f, dxpl_id, H5B_ISTORE, H5D_istore_prune_extent, dset->shared->layout.u.chunk.addr, &udata) < 0)
+ if(H5B_iterate(dset->ent.file, io_info->dxpl_id, H5B_ISTORE, H5D_istore_prune_extent, dset->shared->layout.u.chunk.addr, &udata) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over B-tree");
done:
@@ -2984,9 +3023,9 @@ H5D_istore_remove(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key /*in,out
*-------------------------------------------------------------------------
*/
herr_t
-H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
- hid_t dxpl_id, H5D_t *dset)
+H5D_istore_initialize_by_extent(H5D_io_info_t *io_info)
{
+ const H5O_layout_t *layout=&(io_info->dset->shared->layout); /* Dataset layout */
uint8_t *chunk = NULL; /*the file chunk */
unsigned idx_hint = 0; /*input value for H5F_istore_lock */
hssize_t chunk_offset[H5O_LAYOUT_NDIMS]; /*logical location of the chunks */
@@ -3018,14 +3057,13 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
FUNC_ENTER_NOAPI(H5D_istore_initialize_by_extent, FAIL);
/* Check args */
- assert(f);
- assert(dxpl_cache);
- assert(dset && H5D_CHUNKED == dset->shared->layout.type);
- assert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
- assert(H5F_addr_defined(dset->shared->layout.u.chunk.addr));
+ assert(io_info);
+ assert(io_info->dset && H5D_CHUNKED == layout->type);
+ assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ assert(H5F_addr_defined(layout->u.chunk.addr));
/* Get dataset's creation property list */
- if (NULL == (dc_plist = H5I_object(dset->shared->dcpl_id)))
+ if (NULL == (dc_plist = H5I_object(io_info->dset->shared->dcpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list")
/* Get necessary properties from property list */
@@ -3041,7 +3079,7 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
HDmemset(count, 0, sizeof(count));
/* Go get the rank & dimensions */
- if((srank = H5S_get_simple_extent_dims(dset->shared->space, curr_dims, NULL)) < 0)
+ if((srank = H5S_get_simple_extent_dims(io_info->dset->shared->space, curr_dims, NULL)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions");
H5_ASSIGN_OVERFLOW(rank,srank,int,unsigned);
@@ -3050,9 +3088,9 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
size[u] = curr_dims[u];
/* Round up to the next integer # of chunks, to accomodate partial chunks */
- chunks[u] = ((curr_dims[u]+dset->shared->layout.u.chunk.dim[u])-1) / dset->shared->layout.u.chunk.dim[u];
+ chunks[u] = ((curr_dims[u]+layout->u.chunk.dim[u])-1) / layout->u.chunk.dim[u];
} /* end for */
- size[u] = dset->shared->layout.u.chunk.dim[u];
+ size[u] = layout->u.chunk.dim[u];
/* Get the "down" sizes for each dimension */
if(H5V_array_down(rank,chunks,down_chunks)<0)
@@ -3060,7 +3098,7 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
/* Create a data space for a chunk & set the extent */
for(u = 0; u < rank; u++)
- chunk_dims[u] = dset->shared->layout.u.chunk.dim[u];
+ chunk_dims[u] = layout->u.chunk.dim[u];
if(NULL == (space_chunk = H5S_create_simple(rank,chunk_dims,NULL)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace");
@@ -3069,18 +3107,22 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
* loop through the chunks copying each chunk from the application to the
* chunk cache.
*/
- for(u = 0; u < dset->shared->layout.u.chunk.ndims; u++) {
- idx_max[u] = (size[u] - 1) / dset->shared->layout.u.chunk.dim[u] + 1;
+ for(u = 0; u < layout->u.chunk.ndims; u++) {
+ idx_max[u] = (size[u] - 1) / layout->u.chunk.dim[u] + 1;
idx_cur[u] = 0;
} /* end for */
+ /* Point to local dataset storage info */
+ assert(io_info->store==NULL); /* Make certain we aren't blowing anything away */
+ io_info->store=&store;
+
/* Loop over all chunks */
carry=0;
while(carry==0) {
- for(u = 0, naccessed = 1; u < dset->shared->layout.u.chunk.ndims; u++) {
+ for(u = 0, naccessed = 1; u < layout->u.chunk.ndims; u++) {
/* The location and size of the chunk being accessed */
- chunk_offset[u] = idx_cur[u] * (hssize_t)(dset->shared->layout.u.chunk.dim[u]);
- sub_size[u] = MIN((idx_cur[u] + 1) * dset->shared->layout.u.chunk.dim[u],
+ chunk_offset[u] = idx_cur[u] * (hssize_t)(layout->u.chunk.dim[u]);
+ sub_size[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u],
size[u]) - chunk_offset[u];
naccessed *= sub_size[u];
} /* end for */
@@ -3089,8 +3131,8 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
* Figure out what chunks have to be initialized. These are the chunks where the dataspace
* extent boundary is within the chunk
*/
- for(u = 0, found = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++) {
- end_chunk = chunk_offset[u] + dset->shared->layout.u.chunk.dim[u];
+ for(u = 0, found = 0; u < layout->u.chunk.ndims - 1; u++) {
+ end_chunk = chunk_offset[u] + layout->u.chunk.dim[u];
if(end_chunk > size[u]) {
found = 1;
break;
@@ -3100,27 +3142,26 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
if(found) {
/* Calculate the index of this chunk */
- if(H5V_chunk_index(rank,chunk_offset,dset->shared->layout.u.chunk.dim,down_chunks,&store.chunk.index)<0)
+ if(H5V_chunk_index(rank,chunk_offset,layout->u.chunk.dim,down_chunks,&store.chunk.index)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
store.chunk.offset=chunk_offset;
- if(NULL == (chunk = H5D_istore_lock(f, dxpl_cache, dxpl_id, dset,
- &store, NULL, FALSE, &idx_hint)))
+ if(NULL == (chunk = H5D_istore_lock(io_info, NULL, FALSE, &idx_hint)))
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk");
if(H5S_select_all(space_chunk,1) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to select space");
for(u = 0; u < rank; u++)
- count[u] = MIN((idx_cur[u] + 1) * dset->shared->layout.u.chunk.dim[u], size[u] - chunk_offset[u]);
+ count[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u], size[u] - chunk_offset[u]);
#ifdef H5D_ISTORE_DEBUG
HDfputs("cache:initialize:offset:[", stdout);
- for(u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++)
+ for(u = 0; u < layout->u.chunk.ndims - 1; u++)
HDfprintf(stdout, "%s%Hd", u ? ", " : "", chunk_offset[u]);
HDfputs("]", stdout);
HDfputs(":count:[", stdout);
- for(u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++)
+ for(u = 0; u < layout->u.chunk.ndims - 1; u++)
HDfprintf(stdout, "%s%Hd", u ? ", " : "", count[u]);
HDfputs("]\n", stdout);
#endif
@@ -3137,13 +3178,12 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
if(H5S_select_fill(fill.buf, (size_t)size[rank], space_chunk, chunk) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "filling selection failed");
- if(H5D_istore_unlock(f, dxpl_cache, dxpl_id, dset, &store,
- TRUE, idx_hint, chunk, (size_t)naccessed) < 0)
+ if(H5D_istore_unlock(io_info, TRUE, idx_hint, chunk, (size_t)naccessed) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk");
} /*found */
/* Increment indices */
- for(i = dset->shared->layout.u.chunk.ndims - 1, carry = 1; i >= 0 && carry; --i) {
+ for(i = layout->u.chunk.ndims - 1, carry = 1; i >= 0 && carry; --i) {
if(++idx_cur[i] >= idx_max[i])
idx_cur[i] = 0;
else
@@ -3225,8 +3265,9 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
+H5D_istore_update_cache(H5D_t *dset, hid_t dxpl_id)
{
+ H5D_io_info_t io_info; /* Temporary I/O info object */
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
H5D_rdcc_ent_t *ent, *next; /*cache entry */
H5D_rdcc_ent_t *old_ent; /* Old cache entry */
@@ -3245,7 +3286,6 @@ H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
FUNC_ENTER_NOAPI(H5D_istore_update_cache, FAIL);
/* Check args */
- assert(f);
assert(dset && H5D_CHUNKED == dset->shared->layout.type);
assert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
@@ -3266,6 +3306,9 @@ H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+ /* Construct dataset I/O info */
+ H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,NULL);
+
/* Recompute the index for each cached chunk that is in a dataset */
for(ent = rdcc->head; ent; ent = next) {
next=ent->next;
@@ -3289,7 +3332,7 @@ H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
next=old_ent->next;
/* Remove the old entry from the cache */
- if (H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, old_ent, TRUE )<0)
+ if (H5D_istore_preempt(&io_info, old_ent, TRUE )<0)
HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks");
} /* end if */
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index cbad339..864215f 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -44,10 +44,9 @@
static int interface_initialize_g = 0;
static herr_t
-H5D_mpio_spaces_xfer(H5F_t *f, const H5D_t *dset, size_t elmt_size,
+H5D_mpio_spaces_xfer(H5D_io_info_t *io_info, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
- hid_t dxpl_id, void *buf/*out*/,
- const H5D_storage_t *store,
+ void *buf/*out*/,
hbool_t do_write);
@@ -95,11 +94,9 @@ H5D_mpio_spaces_xfer(H5F_t *f, const H5D_t *dset, size_t elmt_size,
*-------------------------------------------------------------------------
*/
static herr_t
-H5D_mpio_spaces_xfer(H5F_t *f, const H5D_t *dset, size_t elmt_size,
+H5D_mpio_spaces_xfer(H5D_io_info_t *io_info, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
- hid_t dxpl_id, void *_buf /*out*/,
- const H5D_storage_t *store,
- hbool_t do_write )
+ void *_buf /*out*/, hbool_t do_write )
{
haddr_t addr; /* Address of dataset (or selection) within file */
size_t mpi_buf_count, mpi_file_count; /* Number of "objects" to transfer */
@@ -115,14 +112,14 @@ H5D_mpio_spaces_xfer(H5F_t *f, const H5D_t *dset, size_t elmt_size,
FUNC_ENTER_NOAPI_NOINIT(H5D_mpio_spaces_xfer);
/* Check args */
- assert (f);
- assert (dset);
+ assert (io_info);
+ assert (io_info->dset);
assert (file_space);
assert (mem_space);
assert (buf);
- assert (IS_H5FD_MPIO(f));
+ assert (IS_H5FD_MPIO(io_info->dset->ent.file));
/* Make certain we have the correct type of property list */
- assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
+ assert(TRUE==H5P_isa_class(io_info->dxpl_id,H5P_DATASET_XFER));
/* create the MPI buffer type */
if (H5S_mpio_space_type( mem_space, elmt_size,
@@ -143,21 +140,21 @@ H5D_mpio_spaces_xfer(H5F_t *f, const H5D_t *dset, size_t elmt_size,
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't create MPI file type");
/* Get the base address of the contiguous dataset or the chunk */
- if(dset->shared->layout.type == H5D_CONTIGUOUS)
- addr = H5D_contig_get_addr(dset) + mpi_file_offset;
+ if(io_info->dset->shared->layout.type == H5D_CONTIGUOUS)
+ addr = H5D_contig_get_addr(io_info->dset) + mpi_file_offset;
else {
haddr_t chunk_addr; /* for collective chunk IO */
- assert(dset->shared->layout.type == H5D_CHUNKED);
- chunk_addr=H5D_istore_get_addr(f,dxpl_id,&(dset->shared->layout),store->chunk.offset,NULL);
- addr = H5F_BASE_ADDR(f) + chunk_addr + mpi_file_offset;
+ assert(io_info->dset->shared->layout.type == H5D_CHUNKED);
+ chunk_addr=H5D_istore_get_addr(io_info,NULL);
+ addr = H5F_BASE_ADDR(io_info->dset->ent.file) + chunk_addr + mpi_file_offset;
}
/*
* Pass buf type, file type to the file driver. Request an MPI type
* transfer (instead of an elementary byteblock transfer).
*/
- if(H5FD_mpi_setup_collective(dxpl_id, mpi_buf_type, mpi_file_type)<0)
+ if(H5FD_mpi_setup_collective(io_info->dxpl_id, mpi_buf_type, mpi_file_type)<0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O properties");
plist_is_setup=1;
@@ -166,17 +163,17 @@ H5D_mpio_spaces_xfer(H5F_t *f, const H5D_t *dset, size_t elmt_size,
/* transfer the data */
if (do_write) {
- if (H5F_block_write(f, H5FD_MEM_DRAW, addr, mpi_buf_count, dxpl_id, buf) <0)
+ if (H5F_block_write(io_info->dset->ent.file, H5FD_MEM_DRAW, addr, mpi_buf_count, io_info->dxpl_id, buf) <0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL,"MPI write failed");
} else {
- if (H5F_block_read (f, H5FD_MEM_DRAW, addr, mpi_buf_count, dxpl_id, buf) <0)
+ if (H5F_block_read (io_info->dset->ent.file, H5FD_MEM_DRAW, addr, mpi_buf_count, io_info->dxpl_id, buf) <0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL,"MPI read failed");
}
done:
/* Reset the dxpl settings */
if(plist_is_setup) {
- if(H5FD_mpi_teardown_collective(dxpl_id)<0)
+ if(H5FD_mpi_teardown_collective(io_info->dxpl_id)<0)
HDONE_ERROR(H5E_DATASPACE, H5E_CANTFREE, FAIL, "unable to reset dxpl values");
} /* end if */
@@ -215,8 +212,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_mpio_spaces_read(H5F_t *f, const H5D_dxpl_cache_t UNUSED *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5D_mpio_spaces_read(H5D_io_info_t *io_info,
+ H5O_layout_readvv_func_t UNUSED op,
size_t UNUSED nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
void *buf/*out*/)
@@ -225,8 +222,8 @@ H5D_mpio_spaces_read(H5F_t *f, const H5D_dxpl_cache_t UNUSED *dxpl_cache, hid_t
FUNC_ENTER_NOAPI(H5D_mpio_spaces_read, FAIL);
- ret_value = H5D_mpio_spaces_xfer(f, dset, elmt_size, file_space,
- mem_space, dxpl_id, buf, store, 0/*read*/);
+ ret_value = H5D_mpio_spaces_xfer(io_info, elmt_size, file_space,
+ mem_space, buf, 0/*read*/);
done:
FUNC_LEAVE_NOAPI(ret_value);
@@ -254,8 +251,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_mpio_spaces_write(H5F_t *f, const H5D_dxpl_cache_t UNUSED *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5D_mpio_spaces_write(H5D_io_info_t *io_info,
+ H5O_layout_writevv_func_t UNUSED op,
size_t UNUSED nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
const void *buf)
@@ -265,8 +262,8 @@ H5D_mpio_spaces_write(H5F_t *f, const H5D_dxpl_cache_t UNUSED *dxpl_cache, hid_t
FUNC_ENTER_NOAPI(H5D_mpio_spaces_write, FAIL);
/*OKAY: CAST DISCARDS CONST QUALIFIER*/
- ret_value = H5D_mpio_spaces_xfer(f, dset, elmt_size, file_space,
- mem_space, dxpl_id, (void*)buf, store, 1/*write*/);
+ ret_value = H5D_mpio_spaces_xfer(io_info, elmt_size, file_space,
+ mem_space, (void*)buf, 1/*write*/);
done:
FUNC_LEAVE_NOAPI(ret_value);
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index 293d869..51ea060 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -46,16 +46,25 @@
/* Set the minimum object header size to create objects with */
#define H5D_MINHDR_SIZE 256
+/* [Simple] Macro to construct a H5D_io_info_t from it's components */
+#define H5D_BUILD_IO_INFO(io_info,ds,dxpl_c,dxpl_i,str) \
+ (io_info)->dset=ds; \
+ (io_info)->dxpl_cache=dxpl_c; \
+ (io_info)->dxpl_id=dxpl_i; \
+ (io_info)->store=str
+
/****************************/
/* Package Private Typedefs */
/****************************/
/* The raw data chunk cache */
typedef struct H5D_rdcc_t {
+#ifdef H5D_ISTORE_DEBUG
unsigned ninits; /* Number of chunk creations */
unsigned nhits; /* Number of cache hits */
unsigned nmisses;/* Number of cache misses */
unsigned nflushes;/* Number of cache flushes */
+#endif /* H5D_ISTORE_DEBUG */
size_t nbytes; /* Current cached raw data in bytes */
size_t nslots; /* Number of chunk slots allocated */
struct H5D_rdcc_ent_t *head; /* Head of doubly linked list */
@@ -132,23 +141,20 @@ H5_DLL herr_t H5D_alloc_storage (H5F_t *f, hid_t dxpl_id, H5D_t *dset, H5D_time_
hbool_t update_time, hbool_t full_overwrite);
/* Functions that operate on contiguous storage */
-H5_DLL herr_t H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5D_t *dset);
-H5_DLL herr_t H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset);
+H5_DLL herr_t H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout);
+H5_DLL herr_t H5D_contig_fill(H5D_t *dset, hid_t dxpl_id);
/* Functions that operate on indexed storage */
-H5_DLL herr_t H5D_istore_init (H5F_t *f, H5D_t *dset);
-H5_DLL herr_t H5D_istore_flush (H5F_t *f, hid_t dxpl_id, H5D_t *dset, unsigned flags);
-H5_DLL herr_t H5D_istore_create(H5F_t *f, hid_t dxpl_id,
- H5O_layout_t *layout/*in,out*/);
-H5_DLL herr_t H5D_istore_dest (H5F_t *f, hid_t dxpl_id, H5D_t *dset);
-H5_DLL herr_t H5D_istore_allocate (H5F_t *f, hid_t dxpl_id,
- const H5D_t *dset, hbool_t full_overwrite);
-H5_DLL hsize_t H5D_istore_allocated(H5F_t *f, hid_t dxpl_id, H5D_t *dset);
-H5_DLL herr_t H5D_istore_prune_by_extent( H5F_t *f,
- const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H5D_t *dset);
-H5_DLL herr_t H5D_istore_initialize_by_extent( H5F_t *f,
- const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H5D_t *dset);
-H5_DLL herr_t H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset);
+H5_DLL herr_t H5D_istore_init (const H5F_t *f, H5D_t *dset);
+H5_DLL herr_t H5D_istore_flush (H5D_t *dset, hid_t dxpl_id, unsigned flags);
+H5_DLL herr_t H5D_istore_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout);
+H5_DLL herr_t H5D_istore_dest (H5D_t *dset, hid_t dxpl_id);
+H5_DLL herr_t H5D_istore_allocate (H5D_t *dset, hid_t dxpl_id,
+ hbool_t full_overwrite);
+H5_DLL hsize_t H5D_istore_allocated(H5D_t *dset, hid_t dxpl_id);
+H5_DLL herr_t H5D_istore_prune_by_extent(H5D_io_info_t *io_info);
+H5_DLL herr_t H5D_istore_initialize_by_extent(H5D_io_info_t *io_info);
+H5_DLL herr_t H5D_istore_update_cache(H5D_t *dset, hid_t dxpl_id);
H5_DLL herr_t H5D_istore_dump_btree(H5F_t *f, hid_t dxpl_id, FILE *stream, unsigned ndims,
haddr_t addr);
#ifdef H5D_ISTORE_DEBUG
diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h
index 7956c7d..5a979db 100644
--- a/src/H5Dprivate.h
+++ b/src/H5Dprivate.h
@@ -169,12 +169,20 @@
typedef struct H5D_t H5D_t;
/* Typedef for dataset storage information */
+typedef struct {
+ hsize_t index; /* "Index" of chunk in dataset (must be first for TBBT routines) */
+ hssize_t *offset; /* Chunk's coordinates in elements */
+} H5D_chunk_storage_t;
+
+typedef struct {
+ haddr_t dset_addr; /* Address of dataset in file */
+ hsize_t dset_size; /* Total size of dataset in file */
+} H5D_contig_storage_t;
+
typedef union H5D_storage_t {
H5O_efl_t efl; /* External file list information for dataset */
- struct {
- hsize_t index; /* "Index" of chunk in dataset (must be first for TBBT routines) */
- hssize_t *offset; /* Chunk's coordinates in elements */
- } chunk;
+ H5D_chunk_storage_t chunk; /* Chunk information for dataset */
+ H5D_contig_storage_t contig; /* Contiguous information for dataset */
} H5D_storage_t;
/* Typedef for cached dataset transfer property list information */
@@ -199,6 +207,14 @@ typedef struct H5D_dcpl_cache_t {
H5D_fill_time_t fill_time; /* Fill time (H5D_CRT_FILL_TIME_NAME) */
} H5D_dcpl_cache_t;
+/* Typedef for common raw data I/O operation info */
+typedef struct H5D_io_info_t {
+ H5D_t *dset; /* Pointer to dataset being operated on */
+ const H5D_dxpl_cache_t *dxpl_cache; /* Pointer to cache DXPL info */
+ hid_t dxpl_id; /* Original DXPL ID */
+ const H5D_storage_t *store; /* Dataset storage info */
+} H5D_io_info_t;
+
/* Library-private functions defined in H5D package */
H5_DLL herr_t H5D_init(void);
H5_DLL H5D_t *H5D_open(H5G_entry_t *ent, hid_t dxpl_id);
@@ -217,39 +233,25 @@ H5_DLL herr_t H5D_flush(H5F_t *f, hid_t dxpl_id, unsigned flags);
H5_DLL herr_t H5D_get_dxpl_cache(hid_t dxpl_id, H5D_dxpl_cache_t **cache);
H5_DLL herr_t H5D_get_dxpl_cache_real(hid_t dxpl_id, H5D_dxpl_cache_t *cache);
-/* Functions that operate on byte sequences in memory and on disk */
-H5_DLL ssize_t H5D_seq_readvv(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
- hid_t dxpl_id, H5D_t *dset, const H5D_storage_t *store,
- size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
- size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
- void *buf);
-H5_DLL ssize_t H5D_seq_writevv(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
- hid_t dxpl_id, H5D_t *dset, const H5D_storage_t *store,
- size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
- size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
- const void *buf);
-
/* Functions that operate on contiguous storage */
H5_DLL herr_t H5D_contig_delete(H5F_t *f, hid_t dxpl_id,
const H5O_layout_t *layout);
H5_DLL haddr_t H5D_contig_get_addr(const H5D_t *dset);
-H5_DLL ssize_t H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
- haddr_t dset_addr, hsize_t dset_size,
+H5_DLL ssize_t H5D_contig_readvv(H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *buf);
-H5_DLL ssize_t H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
- haddr_t dset_addr, hsize_t dset_size,
+H5_DLL ssize_t H5D_contig_writevv(H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *buf);
/* Functions that operate on compact dataset storage */
-H5_DLL ssize_t H5D_compact_readvv(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
+H5_DLL ssize_t H5D_compact_readvv(H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[],
void *buf);
-H5_DLL ssize_t H5D_compact_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
+H5_DLL ssize_t H5D_compact_writevv(H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[],
const void *buf);
@@ -260,18 +262,15 @@ struct H5D_istore_ud1_t; /*define at H5Distore.c*/
/* Functions that operate on indexed storage */
H5_DLL herr_t H5D_istore_delete(H5F_t *f, hid_t dxpl_id,
const H5O_layout_t *layout);
-H5_DLL ssize_t H5D_istore_readvv(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
- hid_t dxpl_id, H5D_t *dset, const H5D_storage_t *store,
+H5_DLL ssize_t H5D_istore_readvv(H5D_io_info_t *io_info,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *buf);
-H5_DLL ssize_t H5D_istore_writevv(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
- hid_t dxpl_id, H5D_t *dset, const H5D_storage_t *store,
+H5_DLL ssize_t H5D_istore_writevv(H5D_io_info_t *io_info,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *buf);
-H5_DLL haddr_t H5D_istore_get_addr(H5F_t *f, hid_t dxpl_id,
- const H5O_layout_t *layout, const hssize_t offset[],
+H5_DLL haddr_t H5D_istore_get_addr(H5D_io_info_t *io_info,
struct H5D_istore_ud1_t *_udata);
H5_DLL herr_t H5D_istore_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream,
int indent, int fwidth, int ndims);
@@ -281,15 +280,15 @@ H5_DLL herr_t H5D_istore_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * str
struct H5S_t;
/* MPI-IO function to read directly from app buffer to file rky980813 */
-H5_DLL herr_t H5D_mpio_spaces_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5_DLL herr_t H5D_mpio_spaces_read(H5D_io_info_t *io_info,
+ H5O_layout_readvv_func_t op,
size_t nelmts, size_t elmt_size,
const struct H5S_t *file_space, const struct H5S_t *mem_space,
void *buf/*out*/);
/* MPI-IO function to write directly from app buffer to file rky980813 */
-H5_DLL herr_t H5D_mpio_spaces_write(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5_DLL herr_t H5D_mpio_spaces_write(H5D_io_info_t *io_info,
+ H5O_layout_writevv_func_t op,
size_t nelmts, size_t elmt_size,
const struct H5S_t *file_space, const struct H5S_t *mem_space,
const void *buf);
diff --git a/src/H5Dseq.c b/src/H5Dseq.c
deleted file mode 100644
index f413666..0000000
--- a/src/H5Dseq.c
+++ /dev/null
@@ -1,272 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by the Board of Trustees of the University of Illinois. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
- * Thursday, September 28, 2000
- *
- * Purpose: Provides I/O facilities for sequences of bytes stored with various
- * layout policies. These routines are similar to the H5Farray.c routines,
- * these deal in terms of byte offsets and lengths, not coordinates and
- * hyperslab sizes.
- *
- */
-
-#define H5D_PACKAGE /*suppress error about including H5Dpkg */
-
-/* Pablo information */
-/* (Put before include files to avoid problems with inline functions) */
-#define PABLO_MASK H5Dseq_mask
-
-#include "H5private.h" /* Generic Functions */
-#include "H5Dpkg.h" /* Datasets */
-#include "H5Eprivate.h" /* Error handling */
-#include "H5Fprivate.h" /* Files */
-#include "H5FDprivate.h" /* File drivers */
-#include "H5Iprivate.h" /* IDs */
-#include "H5MFprivate.h" /* File space management */
-#include "H5MMprivate.h" /* Memory management */
-#include "H5Oprivate.h" /* Object headers */
-#include "H5Pprivate.h" /* Property lists */
-#include "H5Vprivate.h" /* Vector and array functions */
-
-/* Interface initialization */
-#define INTERFACE_INIT NULL
-static int interface_initialize_g = 0;
-
-
-/*-------------------------------------------------------------------------
- * Function: H5D_seq_readvv
- *
- * Purpose: Reads in a vector of byte sequences from a file dataset into a
- * buffer in in memory. The data is read from file F and the array's size
- * and storage information is in LAYOUT. External files are described
- * according to the external file list, EFL. The vector of byte sequences
- * offsets is in the DSET_OFFSET array into the dataset (offsets are in
- * terms of bytes) and the size of each sequence is in the SEQ_LEN array.
- * The total size of the file array is implied in the LAYOUT argument.
- * Bytes read into BUF are sequentially stored in the buffer, each sequence
- * from the vector stored directly after the previous. The number of
- * sequences is NSEQ.
- * Purpose: Reads a vector of byte sequences from a vector of byte
- * sequences in a file dataset into a buffer in memory. The data is
- * read from file F and the array's size and storage information is in
- * LAYOUT. External files and chunks are described according to the
- * storage information, STORE. The vector of byte sequences offsets for
- * the file is in the DSET_OFFSET_ARR array into the dataset (offsets are
- * in terms of bytes) and the size of each sequence is in the DSET_LEN_ARR
- * array. The vector of byte sequences offsets for memory is in the
- * MEM_OFFSET_ARR array into the dataset (offsets are in terms of bytes)
- * and the size of each sequence is in the MEM_LEN_ARR array. The total
- * size of the file array is implied in the LAYOUT argument. The maximum
- * number of sequences in the file dataset and the memory buffer are
- * DSET_MAX_NSEQ & MEM_MAX_NSEQ respectively. The current sequence being
- * operated on in the file dataset and the memory buffer are DSET_CURR_SEQ
- * & MEM_CURR_SEQ respectively. The current sequence being operated on
- * will be updated as a result of the operation, as will the offsets and
- * lengths of the file dataset and memory buffer sequences.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * Wednesday, May 7, 2003
- *
- * Modifications:
- *
- *-------------------------------------------------------------------------
- */
-ssize_t
-H5D_seq_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
- size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
- size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
- void *buf/*out*/)
-{
- ssize_t ret_value; /* Return value */
-
- FUNC_ENTER_NOAPI(H5D_seq_readvv, FAIL);
-
- /* Check args */
- assert(f);
- assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); /* Make certain we have the correct type of property list */
- assert(dset);
- assert(dset_curr_seq);
- assert(*dset_curr_seq<dset_max_nseq);
- assert(dset_len_arr);
- assert(dset_offset_arr);
- assert(mem_curr_seq);
- assert(*mem_curr_seq<mem_max_nseq);
- assert(mem_len_arr);
- assert(mem_offset_arr);
- assert(buf);
-
- switch (dset->shared->layout.type) {
- case H5D_CONTIGUOUS:
- /* Read directly from file if the dataset is in an external file */
- if (store && store->efl.nused>0) {
- /* Note: We can't use data sieve buffers for datasets in external files
- * because the 'addr' of all external files is set to 0 (above) and
- * all datasets in external files would alias to the same set of
- * file offsets, totally mixing up the data sieve buffer information. -QAK
- */
- if((ret_value=H5O_efl_readvv(&(store->efl),
- dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
- mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
- buf))<0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "external data read failed");
- } else {
- /* Pass along the vector of sequences to read */
- if((ret_value=H5D_contig_readvv(f, dxpl_id, dset,
- dset->shared->layout.u.contig.addr, dset->shared->layout.u.contig.size,
- dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
- mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
- buf))<0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed");
- } /* end else */
- break;
-
- case H5D_CHUNKED:
- assert(store);
- if((ret_value=H5D_istore_readvv(f, dxpl_cache, dxpl_id, dset, store,
- dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
- mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
- buf))<0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "istore read failed");
- break;
-
- case H5D_COMPACT:
- /* Pass along the vector of sequences to read */
- if((ret_value=H5D_compact_readvv(f, dxpl_id, dset,
- dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
- mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
- buf))<0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "compact read failed");
- break;
-
- default:
- assert("not implemented yet" && 0);
- HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout");
- } /* end switch() */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value);
-} /* H5D_seq_readvv() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5D_seq_writevv
- *
- * Purpose: Writes a vector of byte sequences from a buffer in memory into
- * a vector of byte sequences in a file dataset. The data is written to
- * file F and the array's size and storage information is in LAYOUT.
- * External files and chunks are described according to the storage
- * information, STORE. The vector of byte sequences offsets for the file
- * is in the DSET_OFFSET_ARR array into the dataset (offsets are in
- * terms of bytes) and the size of each sequence is in the DSET_LEN_ARR
- * array. The vector of byte sequences offsets for memory is in the
- * MEM_OFFSET_ARR array into the dataset (offsets are in terms of bytes)
- * and the size of each sequence is in the MEM_LEN_ARR array. The total
- * size of the file array is implied in the LAYOUT argument. The maximum
- * number of sequences in the file dataset and the memory buffer are
- * DSET_MAX_NSEQ & MEM_MAX_NSEQ respectively. The current sequence being
- * operated on in the file dataset and the memory buffer are DSET_CURR_SEQ
- * & MEM_CURR_SEQ respectively. The current sequence being operated on
- * will be updated as a result of the operation, as will the offsets and
- * lengths of the file dataset and memory buffer sequences.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * Friday, May 2, 2003
- *
- * Modifications:
- *
- *-------------------------------------------------------------------------
- */
-ssize_t
-H5D_seq_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
- hid_t dxpl_id, struct H5D_t *dset, const H5D_storage_t *store,
- size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
- size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
- const void *buf)
-{
- ssize_t ret_value; /* Return value */
-
- FUNC_ENTER_NOAPI(H5D_seq_writevv, FAIL);
-
- /* Check args */
- assert(f);
- assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); /* Make certain we have the correct type of property list */
- assert(dset);
- assert(dset_curr_seq);
- assert(*dset_curr_seq<dset_max_nseq);
- assert(dset_len_arr);
- assert(dset_offset_arr);
- assert(mem_curr_seq);
- assert(*mem_curr_seq<mem_max_nseq);
- assert(mem_len_arr);
- assert(mem_offset_arr);
- assert(buf);
-
- switch (dset->shared->layout.type) {
- case H5D_CONTIGUOUS:
- /* Write directly to file if the dataset is in an external file */
- if (store && store->efl.nused>0) {
- /* Note: We can't use data sieve buffers for datasets in external files
- * because the 'addr' of all external files is set to 0 (above) and
- * all datasets in external files would alias to the same set of
- * file offsets, totally mixing up the data sieve buffer information. -QAK
- */
- if ((ret_value=H5O_efl_writevv(&(store->efl),
- dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
- mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
- buf))<0)
- HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "external data write failed");
- } else {
- /* Pass along the vector of sequences to write */
- if ((ret_value=H5D_contig_writevv(f, dxpl_id, dset,
- dset->shared->layout.u.contig.addr, dset->shared->layout.u.contig.size,
- dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
- mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
- buf))<0)
- HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
- } /* end else */
- break;
-
- case H5D_CHUNKED:
- assert(store);
- if((ret_value=H5D_istore_writevv(f, dxpl_cache, dxpl_id, dset, store,
- dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
- mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
- buf))<0)
- HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "istore write failed");
- break;
-
- case H5D_COMPACT:
- /* Pass along the vector of sequences to write */
- if((ret_value=H5D_compact_writevv(f, dxpl_id, dset,
- dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
- mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
- buf))<0)
- HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "compact write failed");
- break;
-
- default:
- assert("not implemented yet" && 0);
- HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout");
- } /* end switch() */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value);
-} /* H5D_seq_writevv() */
diff --git a/src/H5Gnode.c b/src/H5Gnode.c
index 6ee15d9..4afc5f3 100644
--- a/src/H5Gnode.c
+++ b/src/H5Gnode.c
@@ -75,7 +75,7 @@ static herr_t H5G_node_dest(H5F_t *f, H5G_node_t *sym);
static herr_t H5G_node_clear(H5G_node_t *sym);
/* B-tree callbacks */
-static size_t H5G_node_sizeof_rkey(H5F_t *f, const void *_udata);
+static size_t H5G_node_sizeof_rkey(const H5F_t *f, const void *_udata);
static H5RC_t *H5G_node_get_shared(H5F_t *f, const void *_udata);
static herr_t H5G_node_create(H5F_t *f, hid_t dxpl_id, H5B_ins_t op, void *_lt_key,
void *_udata, void *_rt_key,
@@ -172,7 +172,7 @@ H5FL_BLK_DEFINE_STATIC(grp_page);
*-------------------------------------------------------------------------
*/
static size_t
-H5G_node_sizeof_rkey(H5F_t *f, const void UNUSED * udata)
+H5G_node_sizeof_rkey(const H5F_t *f, const void UNUSED * udata)
{
/* Use FUNC_ENTER_NOAPI_NOINIT_NOFUNC here to avoid performance issues */
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5G_node_sizeof_rkey);
@@ -690,7 +690,7 @@ H5G_node_create(H5F_t *f, hid_t dxpl_id, H5B_ins_t UNUSED op, void *_lt_key,
hsize_t size = 0;
herr_t ret_value=SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(H5G_node_create, FAIL);
+ FUNC_ENTER_NOAPI_NOINIT(H5G_node_create);
/*
* Check arguments.
diff --git a/src/H5Oefl.c b/src/H5Oefl.c
index 1f389ce..0dada50 100644
--- a/src/H5Oefl.c
+++ b/src/H5Oefl.c
@@ -20,15 +20,18 @@
#define H5F_PACKAGE /*suppress error about including H5Fpkg */
#define H5O_PACKAGE /*suppress error about including H5Opkg */
-#include "H5private.h"
-#include "H5Eprivate.h"
-#include "H5Fpkg.h"
-#include "H5HLprivate.h"
-#include "H5MMprivate.h"
-#include "H5Opkg.h" /* Object header functions */
-
+/* Pablo information */
+/* (Put before include files to avoid problems with inline functions) */
#define PABLO_MASK H5O_efl_mask
+#include "H5private.h" /* Generic Functions */
+#include "H5Dprivate.h" /* Datasets */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* File access */
+#include "H5HLprivate.h" /* Local Heaps */
+#include "H5MMprivate.h" /* Memory management */
+#include "H5Opkg.h" /* Object headers */
+
/* PRIVATE PROTOTYPES */
static void *H5O_efl_decode(H5F_t *f, hid_t dxpl_id, const uint8_t *p, H5O_shared_t *sh);
static herr_t H5O_efl_encode(H5F_t *f, uint8_t *p, const void *_mesg);
@@ -617,11 +620,12 @@ done:
*-------------------------------------------------------------------------
*/
ssize_t
-H5O_efl_readvv(const H5O_efl_t *efl,
+H5O_efl_readvv(H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *_buf)
{
+ const H5O_efl_t *efl=&(io_info->store->efl); /* Pointer to efl info */
unsigned char *buf; /* Pointer to buffer to write */
haddr_t addr; /* Actual address to read */
size_t size; /* Size of sequence in bytes */
@@ -696,11 +700,12 @@ done:
*-------------------------------------------------------------------------
*/
ssize_t
-H5O_efl_writevv(const H5O_efl_t *efl,
+H5O_efl_writevv(H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *_buf)
{
+ const H5O_efl_t *efl=&(io_info->store->efl); /* Pointer to efl info */
const unsigned char *buf; /* Pointer to buffer to write */
haddr_t addr; /* Actual address to read */
size_t size; /* Size of sequence in bytes */
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index 9b05474..7b1ca12 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -143,6 +143,18 @@ typedef struct H5O_layout_compact_t {
void *buf; /* Buffer for compact dataset */
} H5O_layout_compact_t;
+/* Function pointers for I/O on particular types of dataset layouts */
+/* (Forward declare some structs/unions to avoid #include problems) */
+struct H5D_io_info_t;
+typedef ssize_t (*H5O_layout_readvv_func_t)(struct H5D_io_info_t *io_info,
+ size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
+ size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
+ void *buf);
+typedef ssize_t (*H5O_layout_writevv_func_t)(struct H5D_io_info_t *io_info,
+ size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
+ size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
+ const void *buf);
+
typedef struct H5O_layout_t {
H5D_layout_t type; /* Type of layout */
unsigned version; /* Version of message */
@@ -156,6 +168,8 @@ typedef struct H5O_layout_t {
H5O_layout_chunk_t chunk; /* Information for chunked layout */
H5O_layout_compact_t compact; /* Information for compact layout */
} u;
+ H5O_layout_readvv_func_t readvv; /* I/O routine for reading data */
+ H5O_layout_writevv_func_t writevv; /* I/O routine for writing data */
} H5O_layout_t;
/* Enable reading/writing "bogus" messages */
@@ -268,11 +282,11 @@ H5_DLL size_t H5O_layout_meta_size(H5F_t *f, const void *_mesg);
/* EFL operators */
H5_DLL hsize_t H5O_efl_total_size(H5O_efl_t *efl);
-H5_DLL ssize_t H5O_efl_readvv(const H5O_efl_t *efl,
+H5_DLL ssize_t H5O_efl_readvv(struct H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *buf);
-H5_DLL ssize_t H5O_efl_writevv(const H5O_efl_t *efl,
+H5_DLL ssize_t H5O_efl_writevv(struct H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *buf);
diff --git a/src/H5Smpio.c b/src/H5Smpio.c
index 65843f6..b8249e1 100644
--- a/src/H5Smpio.c
+++ b/src/H5Smpio.c
@@ -195,7 +195,6 @@ H5S_mpio_none_type( const H5S_t UNUSED *space, size_t UNUSED elmt_size,
*
*-------------------------------------------------------------------------
*/
-#ifndef AKC_OLD
static herr_t
H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
/* out: */
@@ -457,305 +456,6 @@ done:
#endif
FUNC_LEAVE_NOAPI(ret_value);
}
-#else
-/* keep this old code for now. */
-static herr_t
-H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type )
-{
- H5S_sel_iter_t sel_iter; /* Selection iteration info */
- hbool_t sel_iter_init=0; /* Selection iteration info has been initialized */
-
- struct dim { /* less hassle than malloc/free & ilk */
- hssize_t start;
- hsize_t strid;
- hsize_t block;
- hsize_t xtent;
- hsize_t count;
- } d[H5S_MAX_RANK];
-
- int i;
- int offset[H5S_MAX_RANK];
- int max_xtent[H5S_MAX_RANK];
- H5S_hyper_dim_t *diminfo; /* [rank] */
- int rank;
- int block_length[2];
- MPI_Datatype inner_type, outer_type, old_type[2];
- MPI_Aint extent_len, displacement[2];
- int mpi_code; /* MPI return code */
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_hyper_type);
-
- /* Check args */
- assert (space);
- assert(sizeof(MPI_Aint) >= sizeof(elmt_size));
- if (0==elmt_size)
- goto empty;
-
- /* Initialize selection iterator */
- if (H5S_select_iter_init(&sel_iter, space, elmt_size)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator");
- sel_iter_init=1; /* Selection iteration info has been initialized */
-
- /* Abbreviate args */
- diminfo=sel_iter.u.hyp.diminfo;
- assert (diminfo);
-
- /* make a local copy of the dimension info so we can operate with them */
-
- /* Check if this is a "flattened" regular hyperslab selection */
- if(sel_iter.u.hyp.iter_rank!=0 && sel_iter.u.hyp.iter_rank<space->extent.rank) {
- /* Flattened selection */
- rank=sel_iter.u.hyp.iter_rank;
- assert (rank >= 0 && rank<=H5S_MAX_RANK); /* within array bounds */
- if (0==rank)
- goto empty;
-
-#ifdef H5Smpi_DEBUG
- HDfprintf(stderr, "%s: Flattened selection\n",FUNC);
-#endif
- for ( i=0; i<rank; ++i) {
- d[i].start = diminfo[i].start+sel_iter.u.hyp.sel_off[i];
- d[i].strid = diminfo[i].stride;
- d[i].block = diminfo[i].block;
- d[i].count = diminfo[i].count;
- d[i].xtent = sel_iter.u.hyp.size[i];
-#ifdef H5Smpi_DEBUG
- HDfprintf(stderr, "%s: start=%Hd stride=%Hu count=%Hu block=%Hu xtent=%Hu",
- FUNC, d[i].start, d[i].strid, d[i].count, d[i].block, d[i].xtent );
- if (i==0)
- HDfprintf(stderr, " rank=%d\n", rank );
- else
- HDfprintf(stderr, "\n" );
-#endif
- if (0==d[i].block)
- goto empty;
- if (0==d[i].count)
- goto empty;
- if (0==d[i].xtent)
- goto empty;
- }
- } /* end if */
- else {
- /* Non-flattened selection */
- rank = space->extent.rank;
- assert (rank >= 0 && rank<=H5S_MAX_RANK); /* within array bounds */
- if (0==rank)
- goto empty;
-
-#ifdef H5Smpi_DEBUG
- HDfprintf(stderr, "%s: Non-flattened selection\n",FUNC);
-#endif
- for ( i=0; i<rank; ++i) {
- d[i].start = diminfo[i].start+space->select.offset[i];
- d[i].strid = diminfo[i].stride;
- d[i].block = diminfo[i].block;
- d[i].count = diminfo[i].count;
- d[i].xtent = space->extent.size[i];
-#ifdef H5Smpi_DEBUG
- HDfprintf(stderr, "%s: start=%Hd stride=%Hu count=%Hu block=%Hu xtent=%Hu",
- FUNC, d[i].start, d[i].strid, d[i].count, d[i].block, d[i].xtent );
- if (i==0)
- HDfprintf(stderr, " rank=%d\n", rank );
- else
- HDfprintf(stderr, "\n" );
-#endif
- if (0==d[i].block)
- goto empty;
- if (0==d[i].count)
- goto empty;
- if (0==d[i].xtent)
- goto empty;
- }
- } /* end else */
-
-/**********************************************************************
- Compute array "offset[rank]" which gives the offsets for a multi-
- dimensional array with dimensions "d[i].xtent" (i=0,1,...,rank-1).
-**********************************************************************/
- offset[rank-1] = 1;
- max_xtent[rank-1] = d[rank-1].xtent;
-#ifdef H5Smpi_DEBUG
- i=rank-1;
- HDfprintf(stderr, " offset[%2d]=%d; max_xtent[%2d]=%d\n",
- i, offset[i], i, max_xtent[i]);
-#endif
- for (i=rank-2; i>=0; --i) {
- offset[i] = offset[i+1]*d[i+1].xtent;
- max_xtent[i] = max_xtent[i+1]*d[i].xtent;
-#ifdef H5Smpi_DEBUG
- HDfprintf(stderr, " offset[%2d]=%d; max_xtent[%2d]=%d\n",
- i, offset[i], i, max_xtent[i]);
-#endif
- }
-
- /* Create a type covering the selected hyperslab.
- * Multidimensional dataspaces are stored in row-major order.
- * The type is built from the inside out, going from the
- * fastest-changing (i.e., inner) dimension * to the slowest (outer). */
-
-/*******************************************************
-* Construct contig type for inner contig dims:
-*******************************************************/
-#ifdef H5Smpi_DEBUG
- HDfprintf(stderr, "%s: Making contig type %d MPI_BYTEs\n", FUNC,elmt_size );
- for (i=rank-1; i>=0; --i)
- HDfprintf(stderr, "d[%d].xtent=%Hu \n", i, d[i].xtent);
-#endif
- if (MPI_SUCCESS != (mpi_code= MPI_Type_contiguous( (int)elmt_size, MPI_BYTE, &inner_type )))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code);
-
-/*******************************************************
-* Construct the type by walking the hyperslab dims
-* from the inside out:
-*******************************************************/
- for ( i=rank-1; i>=0; --i) {
-#ifdef H5Smpi_DEBUG
- HDfprintf(stderr, "%s: Dimension i=%d \n"
- "count=%Hu block=%Hu stride=%Hu\n",
- FUNC, i, d[i].count, d[i].block, d[i].strid );
-#endif
-
-#ifdef H5Smpi_DEBUG
- HDfprintf(stderr, "%s: i=%d Making vector-type \n", FUNC,i);
-#endif
- /****************************************
- * Build vector in current dimension:
- ****************************************/
- mpi_code =MPI_Type_vector((int)(d[i].count), /* count */
- (int)(d[i].block), /* blocklength */
- (int)(d[i].strid), /* stride */
- inner_type, /* old type */
- &outer_type ); /* new type */
-
- MPI_Type_free( &inner_type );
- if (mpi_code!=MPI_SUCCESS)
- HMPI_GOTO_ERROR(FAIL, "couldn't create MPI vector type", mpi_code);
-
- displacement[1] = (MPI_Aint)elmt_size * max_xtent[i];
- if(MPI_SUCCESS != (mpi_code = MPI_Type_extent(outer_type, &extent_len)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_extent failed", mpi_code);
-
- /*************************************************
- * Restructure this datatype ("outer_type")
- * so that it still starts at 0, but its extent
- * is the full extent in this dimension.
- *************************************************/
- if ((int)extent_len < displacement[1]) {
-
-#ifdef H5Smpi_DEBUG
- HDfprintf(stderr, "%s: i=%d Extending struct type\n"
- "***displacements: 0, %d\n", FUNC, i, displacement[1]);
-#endif
-
-#ifdef H5_HAVE_MPI2 /* have MPI-2 (this function is not included in MPICH) */
- mpi_code = MPI_Type_create_resized
- ( outer_type, /* old type */
- 0, /* blocklengths */
- displacement[1], /* displacements */
- &inner_type); /* new type */
-#else /* do not have MPI-2 */
- block_length[0] = 1;
- block_length[1] = 1;
-
- displacement[0] = 0;
-
- old_type[0] = outer_type;
- old_type[1] = MPI_UB;
- mpi_code = MPI_Type_struct ( 2, /* count */
- block_length, /* blocklengths */
- displacement, /* displacements */
- old_type, /* old types */
- &inner_type); /* new type */
-#endif
-
- MPI_Type_free (&outer_type);
- if (mpi_code!=MPI_SUCCESS)
- HMPI_GOTO_ERROR(FAIL, "couldn't resize MPI vector type", mpi_code);
- }
- else {
- inner_type = outer_type;
- }
- } /* end for */
-/***************************
-* End of loop, walking
-* thru dimensions.
-***************************/
-
-
- /* At this point inner_type is actually the outermost type, even for 0-trip loop */
-
-/***************************************************************
-* Final task: create a struct which is a "clone" of the
-* current struct, but displaced according to the d[i].start
-* values given in the hyperslab description:
-***************************************************************/
- displacement[0] = 0;
- for (i=rank-1; i>=0; i--)
- displacement[0] += d[i].start * offset[i];
-
- if (displacement[0] > 0) {
- displacement[0] *= elmt_size;
- block_length[0] = 1;
- old_type[0] = inner_type;
-
-#ifdef H5Smpi_DEBUG
- HDfprintf(stderr, "%s: Making final struct\n***count=1:\n", FUNC);
- HDfprintf(stderr, "\tblocklength[0]=%d; displacement[0]=%d\n",
- block_length[0], displacement[0]);
-#endif
-
-
- if (MPI_SUCCESS != (mpi_code= MPI_Type_struct( 1, /* count */
- block_length, /* blocklengths */
- displacement, /* displacements */
- old_type, /* old type */
- new_type )) /* new type */
- )
- HMPI_GOTO_ERROR(FAIL, "couldn't create MPI struct type", mpi_code);
-
- if (MPI_SUCCESS != (mpi_code= MPI_Type_free (&old_type[0])))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code);
- }
- else {
- *new_type = inner_type;
- }
-
- if (MPI_SUCCESS != (mpi_code= MPI_Type_commit( new_type )))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
-
- /* fill in the remaining return values */
- *count = 1; /* only have to move one of these suckers! */
- *extra_offset = 0;
- *is_derived_type = 1;
- HGOTO_DONE(SUCCEED);
-
-empty:
- /* special case: empty hyperslab */
- *new_type = MPI_BYTE;
- *count = 0;
- *extra_offset = 0;
- *is_derived_type = 0;
-
-done:
- /* Release selection iterator */
- if(sel_iter_init) {
- if (H5S_SELECT_ITER_RELEASE(&sel_iter)<0)
- HDONE_ERROR (H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator");
- } /* end if */
-
-#ifdef H5Smpi_DEBUG
- HDfprintf(stderr, "Leave %s, count=%ld is_derived_type=%d\n",
- FUNC, *count, *is_derived_type );
-#endif
- FUNC_LEAVE_NOAPI(ret_value);
-}
-#endif
/*-------------------------------------------------------------------------
diff --git a/src/H5Sprivate.h b/src/H5Sprivate.h
index efa9b30..4879bfd 100644
--- a/src/H5Sprivate.h
+++ b/src/H5Sprivate.h
@@ -117,16 +117,16 @@ typedef struct H5S_conv_t {
*/
/* Read from file to application w/o intermediate scratch buffer */
- herr_t (*read)(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+ herr_t (*read)(H5D_io_info_t *io_info,
+ H5O_layout_readvv_func_t op,
size_t nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
void *buf/*out*/);
/* Write directly from app buffer to file */
- herr_t (*write)(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+ herr_t (*write)(H5D_io_info_t *io_info,
+ H5O_layout_writevv_func_t op,
size_t nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
const void *buf);
@@ -234,12 +234,12 @@ H5_DLL herr_t H5S_select_iterate(void *buf, hid_t type_id, const H5S_t *space,
H5D_operator_t op, void *operator_data);
H5_DLL herr_t H5S_select_fill(void *fill, size_t fill_size,
const H5S_t *space, void *buf);
-H5_DLL herr_t H5S_select_fscat (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5_DLL herr_t H5S_select_fscat (H5D_io_info_t *io_info,
+ H5O_layout_writevv_func_t op,
const H5S_t *file_space, H5S_sel_iter_t *file_iter, size_t nelmts,
const void *_buf);
-H5_DLL size_t H5S_select_fgath (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5_DLL size_t H5S_select_fgath (H5D_io_info_t *io_info,
+ H5O_layout_readvv_func_t op,
const H5S_t *file_space, H5S_sel_iter_t *file_iter, size_t nelmts,
void *buf);
H5_DLL herr_t H5S_select_mscat (const void *_tscat_buf,
@@ -248,13 +248,13 @@ H5_DLL herr_t H5S_select_mscat (const void *_tscat_buf,
H5_DLL size_t H5S_select_mgath (const void *_buf,
const H5S_t *space, H5S_sel_iter_t *iter, size_t nelmts,
const H5D_dxpl_cache_t *dxpl_cache, void *_tgath_buf/*out*/);
-H5_DLL herr_t H5S_select_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5_DLL herr_t H5S_select_read(H5D_io_info_t *io_info,
+ H5O_layout_readvv_func_t op,
size_t nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
void *buf/*out*/);
-H5_DLL herr_t H5S_select_write(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5_DLL herr_t H5S_select_write(H5D_io_info_t *io_info,
+ H5O_layout_writevv_func_t op,
size_t nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
const void *buf/*out*/);
diff --git a/src/H5Sselect.c b/src/H5Sselect.c
index 1dacbf5..bddc89b 100644
--- a/src/H5Sselect.c
+++ b/src/H5Sselect.c
@@ -22,15 +22,16 @@
/* Pablo information */
/* (Put before include files to avoid problems with inline functions) */
-#define PABLO_MASK H5Sselect_mask
+#define PABLO_MASK H5S_select_mask
-#include "H5private.h" /* Generic Functions */
-#include "H5Dprivate.h" /* Datasets (for their properties) */
-#include "H5Eprivate.h" /* Error handling */
-#include "H5FLprivate.h" /* Free Lists */
-#include "H5Iprivate.h" /* ID Functions */
-#include "H5Spkg.h" /* Dataspace functions */
-#include "H5Vprivate.h" /* Vector functions */
+#include "H5private.h" /* Generic Functions */
+#include "H5Dprivate.h" /* Datasets */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5Iprivate.h" /* IDs */
+#include "H5Oprivate.h" /* Object headers */
+#include "H5Spkg.h" /* Dataspaces */
+#include "H5Vprivate.h" /* Vector and array functions */
/* Interface initialization */
#define INTERFACE_INIT NULL
@@ -432,7 +433,7 @@ done:
htri_t
H5S_select_valid(const H5S_t *space)
{
- htri_t ret_value; /* return value */
+ htri_t ret_value; /* Return value */
FUNC_ENTER_NOAPI(H5S_select_valid, 0);
@@ -1341,9 +1342,6 @@ H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2)
htri_t ret_value=TRUE; /* return value */
FUNC_ENTER_NOAPI(H5S_select_shape_same, FAIL);
-#ifdef QAK
-HDfprintf(stderr,"%s: Entering\n",FUNC);
-#endif /* QAK */
/* Check args */
assert(space1);
@@ -1353,28 +1351,15 @@ HDfprintf(stderr,"%s: Entering\n",FUNC);
if (space1->extent.rank!=space2->extent.rank)
HGOTO_DONE(FALSE);
-#ifdef QAK
-HDfprintf(stderr,"%s: Check 0.5\n",FUNC);
-HDfprintf(stderr,"%s: space1 selection type=%d\n",FUNC,(int)H5S_GET_SELECT_TYPE(space1));
-HDfprintf(stderr,"%s: space1->select.num_elem=%Hd\n",FUNC,space1->select.num_elem);
-HDfprintf(stderr,"%s: space2 selection type=%d\n",FUNC,(int)H5S_GET_SELECT_TYPE(space2));
-HDfprintf(stderr,"%s: space2->select.num_elem=%Hd\n",FUNC,space2->select.num_elem);
-#endif /* QAK */
/* Check for different number of elements selected */
if(H5S_GET_SELECT_NPOINTS(space1)!=H5S_GET_SELECT_NPOINTS(space2))
HGOTO_DONE(FALSE);
-#ifdef QAK
-HDfprintf(stderr,"%s: Check 1.0\n",FUNC);
-#endif /* QAK */
/* Check for "easy" cases before getting into generalized block iteration code */
if(H5S_GET_SELECT_TYPE(space1)==H5S_SEL_ALL && H5S_GET_SELECT_TYPE(space2)==H5S_SEL_ALL) {
hsize_t dims1[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #1 */
hsize_t dims2[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #2 */
-#ifdef QAK
-HDfprintf(stderr,"%s: Check 2.0\n",FUNC);
-#endif /* QAK */
if(H5S_get_simple_extent_dims(space1, dims1, NULL)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality");
if(H5S_get_simple_extent_dims(space2, dims2, NULL)<0)
@@ -1386,17 +1371,11 @@ HDfprintf(stderr,"%s: Check 2.0\n",FUNC);
HGOTO_DONE(FALSE);
} /* end if */
else if(H5S_GET_SELECT_TYPE(space1)==H5S_SEL_NONE || H5S_GET_SELECT_TYPE(space2)==H5S_SEL_NONE) {
-#ifdef QAK
-HDfprintf(stderr,"%s: Check 3.0\n",FUNC);
-#endif /* QAK */
HGOTO_DONE(TRUE);
} /* end if */
else if((H5S_GET_SELECT_TYPE(space1)==H5S_SEL_HYPERSLABS && space1->select.sel_info.hslab->diminfo_valid)
&& (H5S_GET_SELECT_TYPE(space2)==H5S_SEL_HYPERSLABS && space2->select.sel_info.hslab->diminfo_valid)) {
-#ifdef QAK
-HDfprintf(stderr,"%s: Check 4.0\n",FUNC);
-#endif /* QAK */
/* Check that the shapes are the same */
for (u=0; u<space1->extent.rank; u++) {
if(space1->select.sel_info.hslab->opt_diminfo[u].stride!=space2->select.sel_info.hslab->opt_diminfo[u].stride)
@@ -1417,27 +1396,6 @@ HDfprintf(stderr,"%s: Check 4.0\n",FUNC);
hssize_t off2[H5O_LAYOUT_NDIMS]; /* Offset of selection #2 blocks */
htri_t status1,status2; /* Status from next block checks */
unsigned first_block=1; /* Flag to indicate the first block */
-#ifdef QAK
-HDfprintf(stderr,"%s: Check 10.0\n",FUNC);
-HDfprintf(stderr,"%s: space1 selection type=%d\n",FUNC,(int)H5S_GET_SELECT_TYPE(space1));
-if(space1->select.sel_info.hslab.span_lst) {
- HDfprintf(stderr,"%s: Dumping space1 span list\n",FUNC);
- H5S_hyper_print_spans(stderr,space1->select.sel_info.hslab.span_lst);
-} /* end if */
-else {
- HDfprintf(stderr,"%s: Dumping space1 diminfo\n",FUNC);
- H5S_hyper_print_diminfo(stderr,space1);
-} /* end else */
-HDfprintf(stderr,"%s: space2 selection type=%d\n",FUNC,(int)H5S_GET_SELECT_TYPE(space2));
-if(space2->select.sel_info.hslab.span_lst) {
- HDfprintf(stderr,"%s: Dumping space2 span list\n",FUNC);
- H5S_hyper_print_spans(stderr,space2->select.sel_info.hslab.span_lst);
-} /* end if */
-else {
- HDfprintf(stderr,"%s: Dumping space2 diminfo\n",FUNC);
- H5S_hyper_print_diminfo(stderr,space2);
-} /* end else */
-#endif /* QAK */
/* Initialize iterator for each dataspace selection
* Use '0' for element size instead of actual element size to indicate
@@ -1456,28 +1414,8 @@ else {
/* Get the current block for each selection iterator */
if(H5S_SELECT_ITER_BLOCK(&iter1,start1,end1)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator block");
-#ifdef QAK
-{
- HDfprintf(stderr,"%s: iter1 start={",FUNC);
- for(u=0; u<space1->extent.rank; u++)
- HDfprintf(stderr,"%Hd%s",start1[u],(u<(space1->extent.rank-1) ? ", " : "}\n"));
- HDfprintf(stderr,"%s: iter1 end={",FUNC);
- for(u=0; u<space1->extent.rank; u++)
- HDfprintf(stderr,"%Hd%s",end1[u],(u<(space1->extent.rank-1) ? ", " : "}\n"));
-}
-#endif /* QAK */
if(H5S_SELECT_ITER_BLOCK(&iter2,start2,end2)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator block");
-#ifdef QAK
-{
- HDfprintf(stderr,"%s: iter2 start={",FUNC);
- for(u=0; u<space1->extent.rank; u++)
- HDfprintf(stderr,"%Hd%s",start2[u],(u<(space1->extent.rank-1) ? ", " : "}\n"));
- HDfprintf(stderr,"%s: iter2 end={",FUNC);
- for(u=0; u<space1->extent.rank; u++)
- HDfprintf(stderr,"%Hd%s",end2[u],(u<(space1->extent.rank-1) ? ", " : "}\n"));
-}
-#endif /* QAK */
/* The first block only compares the sizes and sets the relative offsets for later blocks */
if(first_block) {
@@ -1512,9 +1450,6 @@ else {
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to check iterator block");
if((status2=H5S_SELECT_ITER_HAS_NEXT_BLOCK(&iter2))<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to check iterator block");
-#ifdef QAK
-HDfprintf(stderr,"%s: status1=%d, status2=%d\n",FUNC,(int)status1,(int)status2);
-#endif /* QAK */
/* Did we run out of blocks at the same time? */
if(status1==FALSE && status2==FALSE)
@@ -1541,9 +1476,7 @@ done:
if (H5S_SELECT_ITER_RELEASE(&iter2)<0)
HDONE_ERROR (H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator");
} /* end if */
-#ifdef QAK
-HDfprintf(stderr,"%s: Leaving, ret_value=%d\n",FUNC,ret_value);
-#endif /* QAK */
+
FUNC_LEAVE_NOAPI(ret_value);
} /* H5S_select_shape_same() */
@@ -1667,8 +1600,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5S_select_fscat (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5S_select_fscat (H5D_io_info_t *io_info,
+ H5O_layout_writevv_func_t op,
const H5S_t *space, H5S_sel_iter_t *iter, size_t nelmts,
const void *_buf)
{
@@ -1688,20 +1621,19 @@ H5S_select_fscat (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
FUNC_ENTER_NOAPI(H5S_select_fscat, FAIL);
/* Check args */
- assert (f);
- assert (dset);
- assert (store);
+ assert (io_info);
+ assert (op);
assert (space);
assert (iter);
assert (nelmts>0);
assert (_buf);
- assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
+ assert(TRUE==H5P_isa_class(io_info->dxpl_id,H5P_DATASET_XFER));
/* Allocate the vector I/O arrays */
- if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
- if((len = H5FL_SEQ_MALLOC(size_t,dxpl_cache->vec_size))==NULL)
+ if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
+ if((len = H5FL_SEQ_MALLOC(size_t,io_info->dxpl_cache->vec_size))==NULL)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O length vector array");
- if((off = H5FL_SEQ_MALLOC(hsize_t,dxpl_cache->vec_size))==NULL)
+ if((off = H5FL_SEQ_MALLOC(hsize_t,io_info->dxpl_cache->vec_size))==NULL)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O offset vector array");
} /* end if */
else {
@@ -1712,7 +1644,7 @@ H5S_select_fscat (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
/* Loop until all elements are written */
while(nelmts>0) {
/* Get list of sequences for selection to write */
- if(H5S_SELECT_GET_SEQ_LIST(space,H5S_GET_SEQ_LIST_SORTED,iter,dxpl_cache->vec_size,nelmts,&nseq,&nelem,off,len)<0)
+ if(H5S_SELECT_GET_SEQ_LIST(space,H5S_GET_SEQ_LIST_SORTED,iter,io_info->dxpl_cache->vec_size,nelmts,&nseq,&nelem,off,len)<0)
HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed");
/* Reset the current sequence information */
@@ -1721,7 +1653,7 @@ H5S_select_fscat (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
mem_off=0;
/* Write sequence list out */
- if (H5D_seq_writevv(f, dxpl_cache, dxpl_id, dset, store, nseq, &dset_curr_seq, len, off, 1, &mem_curr_seq, &mem_len, &mem_off, buf)<0)
+ if ((*op)(io_info, nseq, &dset_curr_seq, len, off, 1, &mem_curr_seq, &mem_len, &mem_off, buf)<0)
HGOTO_ERROR(H5E_DATASPACE, H5E_WRITEERROR, FAIL, "write error");
/* Update buffer */
@@ -1732,7 +1664,7 @@ H5S_select_fscat (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
} /* end while */
done:
- if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
+ if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
if(len!=NULL)
H5FL_SEQ_FREE(size_t,len);
if(off!=NULL)
@@ -1766,8 +1698,8 @@ done:
*-------------------------------------------------------------------------
*/
size_t
-H5S_select_fgath (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5S_select_fgath (H5D_io_info_t *io_info,
+ H5O_layout_readvv_func_t op,
const H5S_t *space, H5S_sel_iter_t *iter, size_t nelmts,
void *_buf/*out*/)
{
@@ -1787,19 +1719,19 @@ H5S_select_fgath (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
FUNC_ENTER_NOAPI(H5S_select_fgath, 0);
/* Check args */
- assert (f);
- assert (dset);
- assert (store);
+ assert (io_info);
+ assert (io_info->dset);
+ assert (io_info->store);
assert (space);
assert (iter);
assert (nelmts>0);
assert (_buf);
/* Allocate the vector I/O arrays */
- if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
- if((len = H5FL_SEQ_MALLOC(size_t,dxpl_cache->vec_size))==NULL)
+ if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
+ if((len = H5FL_SEQ_MALLOC(size_t,io_info->dxpl_cache->vec_size))==NULL)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "can't allocate I/O length vector array");
- if((off = H5FL_SEQ_MALLOC(hsize_t,dxpl_cache->vec_size))==NULL)
+ if((off = H5FL_SEQ_MALLOC(hsize_t,io_info->dxpl_cache->vec_size))==NULL)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "can't allocate I/O offset vector array");
} /* end if */
else {
@@ -1807,10 +1739,10 @@ H5S_select_fgath (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
off=_off;
} /* end else */
- /* Loop until all elements are written */
+ /* Loop until all elements are read */
while(nelmts>0) {
- /* Get list of sequences for selection to write */
- if(H5S_SELECT_GET_SEQ_LIST(space,H5S_GET_SEQ_LIST_SORTED,iter,dxpl_cache->vec_size,nelmts,&nseq,&nelem,off,len)<0)
+ /* Get list of sequences for selection to read */
+ if(H5S_SELECT_GET_SEQ_LIST(space,H5S_GET_SEQ_LIST_SORTED,iter,io_info->dxpl_cache->vec_size,nelmts,&nseq,&nelem,off,len)<0)
HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, 0, "sequence length generation failed");
/* Reset the current sequence information */
@@ -1819,7 +1751,7 @@ H5S_select_fgath (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
mem_off=0;
/* Read sequence list in */
- if (H5D_seq_readvv(f, dxpl_cache, dxpl_id, dset, store, nseq, &dset_curr_seq, len, off, 1, &mem_curr_seq, &mem_len, &mem_off, buf)<0)
+ if ((*op)(io_info, nseq, &dset_curr_seq, len, off, 1, &mem_curr_seq, &mem_len, &mem_off, buf)<0)
HGOTO_ERROR(H5E_DATASPACE, H5E_READERROR, 0, "read error");
/* Update buffer */
@@ -1830,7 +1762,7 @@ H5S_select_fgath (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
} /* end while */
done:
- if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
+ if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
if(len!=NULL)
H5FL_SEQ_FREE(size_t,len);
if(off!=NULL)
@@ -2031,8 +1963,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5S_select_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5S_select_read(H5D_io_info_t *io_info,
+ H5O_layout_readvv_func_t op,
size_t nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
void *buf/*out*/)
@@ -2061,10 +1993,12 @@ H5S_select_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
FUNC_ENTER_NOAPI(H5S_select_read, FAIL);
/* Check args */
- assert(f);
- assert(store);
+ assert(io_info);
+ assert(io_info->dset);
+ assert(io_info->dxpl_cache);
+ assert(io_info->store);
assert(buf);
- assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
+ assert(TRUE==H5P_isa_class(io_info->dxpl_id,H5P_DATASET_XFER));
/* Initialize file iterator */
if (H5S_select_iter_init(&file_iter, file_space, elmt_size)<0)
@@ -2077,14 +2011,14 @@ H5S_select_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
mem_iter_init=1; /* Memory selection iteration info has been initialized */
/* Allocate the vector I/O arrays */
- if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
- if((mem_len = H5FL_SEQ_MALLOC(size_t,dxpl_cache->vec_size))==NULL)
+ if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
+ if((mem_len = H5FL_SEQ_MALLOC(size_t,io_info->dxpl_cache->vec_size))==NULL)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O length vector array");
- if((mem_off = H5FL_SEQ_MALLOC(hsize_t,dxpl_cache->vec_size))==NULL)
+ if((mem_off = H5FL_SEQ_MALLOC(hsize_t,io_info->dxpl_cache->vec_size))==NULL)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O offset vector array");
- if((file_len = H5FL_SEQ_MALLOC(size_t,dxpl_cache->vec_size))==NULL)
+ if((file_len = H5FL_SEQ_MALLOC(size_t,io_info->dxpl_cache->vec_size))==NULL)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O length vector array");
- if((file_off = H5FL_SEQ_MALLOC(hsize_t,dxpl_cache->vec_size))==NULL)
+ if((file_off = H5FL_SEQ_MALLOC(hsize_t,io_info->dxpl_cache->vec_size))==NULL)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O offset vector array");
} /* end if */
else {
@@ -2103,7 +2037,7 @@ H5S_select_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
/* Check if more file sequences are needed */
if(curr_file_seq>=file_nseq) {
/* Get sequences for file selection */
- if(H5S_SELECT_GET_SEQ_LIST(file_space,H5S_GET_SEQ_LIST_SORTED,&file_iter,dxpl_cache->vec_size,nelmts,&file_nseq,&file_nelem,file_off,file_len)<0)
+ if(H5S_SELECT_GET_SEQ_LIST(file_space,H5S_GET_SEQ_LIST_SORTED,&file_iter,io_info->dxpl_cache->vec_size,nelmts,&file_nseq,&file_nelem,file_off,file_len)<0)
HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed");
/* Start at the beginning of the sequences again */
@@ -2113,21 +2047,15 @@ H5S_select_read(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
/* Check if more memory sequences are needed */
if(curr_mem_seq>=mem_nseq) {
/* Get sequences for memory selection */
- if(H5S_SELECT_GET_SEQ_LIST(mem_space,0,&mem_iter,dxpl_cache->vec_size,nelmts,&mem_nseq,&mem_nelem,mem_off,mem_len)<0)
+ if(H5S_SELECT_GET_SEQ_LIST(mem_space,0,&mem_iter,io_info->dxpl_cache->vec_size,nelmts,&mem_nseq,&mem_nelem,mem_off,mem_len)<0)
HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed");
/* Start at the beginning of the sequences again */
curr_mem_seq=0;
} /* end if */
-#ifdef QAK
-HDfprintf(stderr,"%s: curr_file_seq=%Zu, file_nseq=%Zu\n",FUNC,curr_file_seq,file_nseq);
-HDfprintf(stderr,"%s: curr_mem_seq=%Zu, mem_nseq=%Zu\n",FUNC,curr_mem_seq,mem_nseq);
-HDfprintf(stderr,"%s: file_off[%Zu]=%Hu, file_len[%Zu]=%Zu\n",FUNC,curr_file_seq,file_off[curr_file_seq],curr_file_seq,file_len[curr_file_seq]);
-HDfprintf(stderr,"%s: mem_off[%Zu]=%Hu, mem_len[%Zu]=%Zu\n",FUNC,curr_mem_seq,mem_off[curr_mem_seq],curr_mem_seq,mem_len[curr_mem_seq]);
-#endif /* QAK */
/* Read file sequences into current memory sequence */
- if ((tmp_file_len=H5D_seq_readvv(f, dxpl_cache, dxpl_id, dset, store,
+ if ((tmp_file_len=(*op)(io_info,
file_nseq, &curr_file_seq, file_len, file_off,
mem_nseq, &curr_mem_seq, mem_len, mem_off,
buf))<0)
@@ -2152,7 +2080,7 @@ done:
} /* end if */
/* Free vector arrays */
- if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
+ if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
if(file_len!=NULL)
H5FL_SEQ_FREE(size_t,file_len);
if(file_off!=NULL)
@@ -2181,8 +2109,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5S_select_write(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
- H5D_t *dset, const H5D_storage_t *store,
+H5S_select_write(H5D_io_info_t *io_info,
+ H5O_layout_writevv_func_t op,
size_t nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
const void *buf/*out*/)
@@ -2209,31 +2137,23 @@ H5S_select_write(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5S_select_write, FAIL);
-#ifdef QAK
-{
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - Entering, time=%f\n",FUNC,mpi_rank,time);
-}
-#endif /* QAK */
/* Check args */
- assert(f);
- assert(store);
+ assert(io_info);
+ assert(io_info->dset);
+ assert(io_info->store);
+ assert(TRUE==H5P_isa_class(io_info->dxpl_id,H5P_DATASET_XFER));
assert(buf);
- assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
/* Allocate the vector I/O arrays */
- if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
- if((mem_len = H5FL_SEQ_MALLOC(size_t,dxpl_cache->vec_size))==NULL)
+ if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
+ if((mem_len = H5FL_SEQ_MALLOC(size_t,io_info->dxpl_cache->vec_size))==NULL)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O length vector array");
- if((mem_off = H5FL_SEQ_MALLOC(hsize_t,dxpl_cache->vec_size))==NULL)
+ if((mem_off = H5FL_SEQ_MALLOC(hsize_t,io_info->dxpl_cache->vec_size))==NULL)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O offset vector array");
- if((file_len = H5FL_SEQ_MALLOC(size_t,dxpl_cache->vec_size))==NULL)
+ if((file_len = H5FL_SEQ_MALLOC(size_t,io_info->dxpl_cache->vec_size))==NULL)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O length vector array");
- if((file_off = H5FL_SEQ_MALLOC(hsize_t,dxpl_cache->vec_size))==NULL)
+ if((file_off = H5FL_SEQ_MALLOC(hsize_t,io_info->dxpl_cache->vec_size))==NULL)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate I/O offset vector array");
} /* end if */
else {
@@ -2260,66 +2180,27 @@ H5S_select_write(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
/* Loop, until all bytes are processed */
while(nelmts>0) {
/* Check if more file sequences are needed */
-#ifdef QAK
-{
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - Before file sequence time=%f\n",FUNC,mpi_rank,time);
-}
-#endif /* QAK */
if(curr_file_seq>=file_nseq) {
/* Get sequences for file selection */
- if(H5S_SELECT_GET_SEQ_LIST(file_space,H5S_GET_SEQ_LIST_SORTED,&file_iter,dxpl_cache->vec_size,nelmts,&file_nseq,&file_nelem,file_off,file_len)<0)
+ if(H5S_SELECT_GET_SEQ_LIST(file_space,H5S_GET_SEQ_LIST_SORTED,&file_iter,io_info->dxpl_cache->vec_size,nelmts,&file_nseq,&file_nelem,file_off,file_len)<0)
HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed");
/* Start at the beginning of the sequences again */
curr_file_seq=0;
} /* end if */
-#ifdef QAK
-{
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - After file sequence time=%f\n",FUNC,mpi_rank,time);
-}
-#endif /* QAK */
/* Check if more memory sequences are needed */
if(curr_mem_seq>=mem_nseq) {
/* Get sequences for memory selection */
- if(H5S_SELECT_GET_SEQ_LIST(mem_space,0,&mem_iter,dxpl_cache->vec_size,nelmts,&mem_nseq,&mem_nelem,mem_off,mem_len)<0)
+ if(H5S_SELECT_GET_SEQ_LIST(mem_space,0,&mem_iter,io_info->dxpl_cache->vec_size,nelmts,&mem_nseq,&mem_nelem,mem_off,mem_len)<0)
HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed");
/* Start at the beginning of the sequences again */
curr_mem_seq=0;
} /* end if */
-#ifdef QAK
-{
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - After memory sequence time=%f\n",FUNC,mpi_rank,time);
-}
-#endif /* QAK */
-#ifdef QAK
-{
- unsigned u;
-
-HDfprintf(stderr,"%s: curr_file_seq=%Zu, file_nseq=%Zu\n",FUNC,curr_file_seq,file_nseq);
-HDfprintf(stderr,"%s: curr_mem_seq=%Zu, mem_nseq=%Zu\n",FUNC,curr_mem_seq,mem_nseq);
-for(u=curr_file_seq; u<file_nseq; u++)
- HDfprintf(stderr,"%s: file_off[%u]=%Hu, file_len[%u]=%Zu\n",FUNC,u,file_off[u],u,file_len[u]);
-for(u=curr_mem_seq; u<mem_nseq; u++)
- HDfprintf(stderr,"%s: mem_off[%u]=%Hu, mem_len[%u]=%Zu\n",FUNC,u,mem_off[u],u,mem_len[u]);
-}
-#endif /* QAK */
/* Write memory sequences into file sequences */
- if ((tmp_file_len=H5D_seq_writevv(f, dxpl_cache, dxpl_id, dset, store,
+ if ((tmp_file_len=(*op)(io_info,
file_nseq, &curr_file_seq, file_len, file_off,
mem_nseq, &curr_mem_seq, mem_len, mem_off,
buf))<0)
@@ -2344,7 +2225,7 @@ done:
} /* end if */
/* Free vector arrays */
- if(dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
+ if(io_info->dxpl_cache->vec_size!=H5D_XFER_HYPER_VECTOR_SIZE_DEF) {
if(file_len!=NULL)
H5FL_SEQ_FREE(size_t,file_len);
if(file_off!=NULL)
@@ -2354,15 +2235,7 @@ done:
if(mem_off!=NULL)
H5FL_SEQ_FREE(hsize_t,mem_off);
} /* end if */
-#ifdef QAK
-{
- int mpi_rank;
- double time;
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- time = MPI_Wtime();
- HDfprintf(stderr,"%s: rank=%d - Leaving, time=%f\n",FUNC,mpi_rank,time);
-}
-#endif /* QAK */
+
FUNC_LEAVE_NOAPI(ret_value);
} /* end H5S_select_write() */
diff --git a/src/H5T.c b/src/H5T.c
index 1f133cf..446b2df 100644
--- a/src/H5T.c
+++ b/src/H5T.c
@@ -2761,8 +2761,8 @@ done:
H5T_t*
H5T_open (H5G_entry_t *ent, hid_t dxpl_id)
{
- H5T_shared_t *shared_fo;
- H5T_t *dt;
+ H5T_shared_t *shared_fo=NULL;
+ H5T_t *dt=NULL;
H5T_t *ret_value;
FUNC_ENTER_NOAPI(H5T_open, NULL);
diff --git a/src/Makefile.in b/src/Makefile.in
index 2e21b59..21ccab1 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -31,7 +31,7 @@ DISTCLEAN=libhdf5.settings
## Source and object files for the library (lexicographically)...
LIB_SRC=H5.c H5A.c H5AC.c H5B.c H5D.c H5Dcontig.c H5Dcompact.c H5Dio.c \
- H5Distore.c H5Dmpio.c H5Dseq.c H5Dtest.c H5E.c H5F.c H5FD.c \
+ H5Distore.c H5Dmpio.c H5Dtest.c H5E.c H5F.c H5FD.c \
H5FDcore.c H5FDfamily.c H5FDgass.c H5FDlog.c H5FDmpi.c H5FDmpio.c \
H5FDmpiposix.c H5FDmulti.c H5FDsec2.c H5FDsrb.c H5FDstdio.c \
H5FDstream.c H5FL.c H5FO.c H5FS.c H5G.c H5Gent.c H5Gnode.c H5Gstab.c \