summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MANIFEST1
-rw-r--r--hl/src/H5DO.c122
-rw-r--r--hl/src/H5DOprivate.h37
-rw-r--r--src/H5Dchunk.c84
-rw-r--r--src/H5Dcompact.c2
-rw-r--r--src/H5Dcontig.c2
-rw-r--r--src/H5Dint.c8
-rw-r--r--src/H5Dio.c31
-rw-r--r--src/H5Dmpio.c129
-rw-r--r--src/H5Dpkg.h16
-rw-r--r--src/H5Dprivate.h3
-rw-r--r--src/H5Pdxpl.c16
-rw-r--r--src/H5Smpio.c443
-rw-r--r--src/H5Sprivate.h13
-rw-r--r--src/H5Sselect.c53
-rw-r--r--src/H5private.h4
-rw-r--r--testpar/t_coll_chunk.c405
-rw-r--r--testpar/t_dset.c424
-rw-r--r--testpar/testphdf5.h19
19 files changed, 1404 insertions, 408 deletions
diff --git a/MANIFEST b/MANIFEST
index 8649f77..d3f7270 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -2098,7 +2098,6 @@
./hl/src/Makefile.am
./hl/src/Makefile.in
./hl/src/H5DO.c
-./hl/src/H5DOprivate.h
./hl/src/H5DOpublic.h
./hl/src/H5DS.c
./hl/src/H5DSprivate.h
diff --git a/hl/src/H5DO.c b/hl/src/H5DO.c
index 9cfd8c1..99dbd93 100644
--- a/hl/src/H5DO.c
+++ b/hl/src/H5DO.c
@@ -18,8 +18,14 @@
#include <assert.h>
#include <stdio.h>
-#include "H5DOprivate.h"
+/* High-level library internal header file */
+#include "H5HLprivate2.h"
+/* public LT prototypes */
+#include "H5DOpublic.h"
+
+
+
/*-------------------------------------------------------------------------
* Function: H5DOwrite_chunk
*
@@ -30,108 +36,64 @@
* Programmer: Raymond Lu
* 30 July 2012
*
- * Modifications:
*-------------------------------------------------------------------------
*/
herr_t
H5DOwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, const hsize_t *offset,
size_t data_size, const void *buf)
{
- hbool_t created_dxpl = FALSE;
- herr_t ret_value = SUCCEED; /* Return value */
+ hbool_t created_dxpl = FALSE; /* Whether we created a DXPL */
+ hbool_t do_direct_write = TRUE; /* Flag for direct writes */
+ uint32_t data_size_32; /* Chunk data size (limited to 32-bits currently) */
+ herr_t ret_value = FAIL; /* Return value */
- if(dset_id < 0) {
- ret_value = FAIL;
+ /* Check arguments */
+ if(dset_id < 0)
goto done;
- }
-
- if(!buf) {
- ret_value = FAIL;
+ if(!buf)
goto done;
- }
-
- if(!offset) {
- ret_value = FAIL;
+ if(!offset)
goto done;
- }
-
- if(!data_size) {
- ret_value = FAIL;
+ if(!data_size)
+ goto done;
+ data_size_32 = (uint32_t)data_size;
+ if(data_size != (size_t)data_size_32)
goto done;
- }
+ /* If the user passed in a default DXPL, create one to pass to H5Dwrite() */
if(H5P_DEFAULT == dxpl_id) {
- if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) {
- ret_value = FAIL;
+ if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
goto done;
- }
-
created_dxpl = TRUE;
- }
+ } /* end if */
- if(H5DO_write_chunk(dset_id, dxpl_id, filters, offset, data_size, buf) < 0) {
- ret_value = FAIL;
+ /* Set direct write parameters */
+ if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME, &do_direct_write) < 0)
goto done;
- }
-
-done:
- if(created_dxpl) {
- if(H5Pclose(dxpl_id) < 0)
- ret_value = FAIL;
- }
-
- return ret_value;
-}
-
-/*-------------------------------------------------------------------------
- * Function: H5DO_write_chunk
- *
- * Purpose: Private function for H5DOwrite_chunk
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Raymond Lu
- * 30 July 2012
- *
- * Modifications:
- *-------------------------------------------------------------------------
- */
-herr_t
-H5DO_write_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, const hsize_t *offset,
- size_t data_size, const void *buf)
-{
- hbool_t do_direct_write = TRUE;
- herr_t ret_value = SUCCEED; /* Return value */
-
- if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME, &do_direct_write) < 0) {
- ret_value = FAIL;
+ if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME, &filters) < 0)
goto done;
- }
-
- if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME, &filters) < 0) {
- ret_value = FAIL;
+ if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_NAME, &offset) < 0)
goto done;
- }
-
- if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_NAME, &offset) < 0) {
- ret_value = FAIL;
+ if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_NAME, &data_size_32) < 0)
goto done;
- }
- if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_NAME, &data_size) < 0) {
- ret_value = FAIL;
+ /* Write chunk */
+ if(H5Dwrite(dset_id, 0, H5S_ALL, H5S_ALL, dxpl_id, buf) < 0)
goto done;
- }
- if(H5Dwrite(dset_id, 0, H5S_ALL, H5S_ALL, dxpl_id, buf) < 0) {
- ret_value = FAIL;
- goto done;
- }
+ /* Indicate success */
+ ret_value = SUCCEED;
done:
- do_direct_write = FALSE;
- if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME, &do_direct_write) < 0)
- ret_value = FAIL;
+ if(created_dxpl) {
+ if(H5Pclose(dxpl_id) < 0)
+ ret_value = FAIL;
+ } /* end if */
+ else
+ /* Reset the direct write flag on user DXPL */
+ if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME, &do_direct_write) < 0)
+ ret_value = FAIL;
+
+ return(ret_value);
+} /* end H5DOwrite_chunk() */
- return ret_value;
-}
diff --git a/hl/src/H5DOprivate.h b/hl/src/H5DOprivate.h
deleted file mode 100644
index fcea585..0000000
--- a/hl/src/H5DOprivate.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * Copyright by the Board of Trustees of the University of Illinois. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-#ifndef _H5DOprivate_H
-#define _H5DOprivate_H
-
-/* High-level library internal header file */
-#include "H5HLprivate2.h"
-
-/* public LT prototypes */
-#include "H5DOpublic.h"
-
-/*-------------------------------------------------------------------------
- * Private functions
- *-------------------------------------------------------------------------
- */
-
-H5_HLDLL herr_t H5DO_write_chunk(hid_t dset_id,
- hid_t dxpl_id,
- uint32_t filters,
- const hsize_t *offset,
- size_t data_size,
- const void *buf);
-
-#endif
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index 2330daa..7b8fc3a 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -164,10 +164,18 @@ typedef struct H5D_chunk_it_ud4_t {
/* Callback info for nonexistent readvv operation */
typedef struct H5D_chunk_readvv_ud_t {
unsigned char *rbuf; /* Read buffer to initialize */
- H5D_t *dset; /* Dataset to operate on */
+ const H5D_t *dset; /* Dataset to operate on */
hid_t dxpl_id; /* DXPL for operation */
} H5D_chunk_readvv_ud_t;
+/* Callback info for file selection iteration */
+typedef struct H5D_chunk_file_iter_ud_t {
+ H5D_chunk_map_t *fm; /* File->memory chunk mapping info */
+#ifdef H5_HAVE_PARALLEL
+ const H5D_io_info_t *io_info; /* I/O info for operation */
+#endif /* H5_HAVE_PARALLEL */
+} H5D_chunk_file_iter_ud_t;
+
/********************/
/* Local Prototypes */
@@ -297,7 +305,7 @@ H5FL_BLK_DEFINE_STATIC(chunk);
*/
herr_t
H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, hsize_t *offset,
- size_t data_size, const void *buf)
+ uint32_t data_size, const void *buf)
{
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
H5D_chunk_ud_t udata; /* User data for querying chunk info */
@@ -329,8 +337,7 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, hsiz
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
/* Find out the file address of the chunk */
- if(H5D__chunk_lookup(dset, dxpl_id, offset, chunk_idx,
- &udata) < 0)
+ if(H5D__chunk_lookup(dset, dxpl_id, offset, chunk_idx, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
udata.filter_mask = filters;
@@ -685,7 +692,7 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space,
H5D_chunk_map_t *fm)
{
- H5D_t *dataset = io_info->dset; /* Local pointer to dataset info */
+ const H5D_t *dataset = io_info->dset; /* Local pointer to dataset info */
const H5T_t *mem_type = type_info->mem_type; /* Local pointer to memory datatype */
H5S_t *tmp_mspace = NULL; /* Temporary memory dataspace */
hssize_t old_offset[H5O_LAYOUT_NDIMS]; /* Old selection offset */
@@ -698,7 +705,6 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
H5S_sel_type fsel_type; /* Selection type on disk */
char bogus; /* "bogus" buffer to pass to selection iterator */
unsigned u; /* Local index variable */
- hbool_t sel_hyper_flag;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -796,6 +802,8 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create chunk selections for single element")
} /* end if */
else {
+ hbool_t sel_hyper_flag; /* Whether file selection is a hyperslab */
+
/* Initialize skip list for chunk selections */
if(NULL == dataset->shared->cache.chunk.sel_chunks) {
if(NULL == (dataset->shared->cache.chunk.sel_chunks = H5SL_create(H5SL_TYPE_HSIZE, NULL)))
@@ -843,12 +851,20 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
} /* end while */
} /* end if */
else {
+ H5D_chunk_file_iter_ud_t udata; /* User data for iteration */
+
/* Create temporary datatypes for selection iteration */
if((f_tid = H5I_register(H5I_DATATYPE, H5T_copy(dataset->shared->type, H5T_COPY_ALL), FALSE)) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register file datatype")
+ /* Initialize the user data */
+ udata.fm = fm;
+#ifdef H5_HAVE_PARALLEL
+ udata.io_info = io_info;
+#endif /* H5_HAVE_PARALLEL */
+
/* Spaces might not be the same shape, iterate over the file selection directly */
- if(H5S_select_iterate(&bogus, f_tid, file_space, H5D__chunk_file_cb, fm) < 0)
+ if(H5S_select_iterate(&bogus, f_tid, file_space, H5D__chunk_file_cb, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create file chunk selections")
/* Reset "last chunk" info */
@@ -1265,7 +1281,7 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
/* Add temporary chunk to the list of chunks */
/* Allocate the file & memory chunk information */
- if (NULL==(new_chunk_info = H5FL_MALLOC (H5D_chunk_info_t))) {
+ if (NULL==(new_chunk_info = H5FL_MALLOC(H5D_chunk_info_t))) {
(void)H5S_close(tmp_fchunk);
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate chunk info")
} /* end if */
@@ -1276,7 +1292,7 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
new_chunk_info->index=chunk_index;
#ifdef H5_HAVE_PARALLEL
- /* store chunk selection information */
+ /* Store chunk selection information, for multi-chunk I/O */
if(io_info->using_mpi_vfd)
fm->select_chunk[chunk_index] = new_chunk_info;
#endif /* H5_HAVE_PARALLEL */
@@ -1476,9 +1492,10 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const hsize_t *coords, void *_fm)
+H5D__chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const hsize_t *coords, void *_udata)
{
- H5D_chunk_map_t *fm = (H5D_chunk_map_t *)_fm; /* File<->memory chunk mapping info */
+ H5D_chunk_file_iter_ud_t *udata = (H5D_chunk_file_iter_ud_t *)_udata; /* User data for operation */
+ H5D_chunk_map_t *fm = udata->fm; /* File<->memory chunk mapping info */
H5D_chunk_info_t *chunk_info; /* Chunk information for current chunk */
hsize_t coords_in_chunk[H5O_LAYOUT_NDIMS]; /* Coordinates of element in chunk */
hsize_t chunk_index; /* Chunk index */
@@ -1496,7 +1513,7 @@ H5D__chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, cons
/* If the chunk index is the same as the last chunk index we used,
* get the cached info to operate on.
*/
- chunk_info=fm->last_chunk_info;
+ chunk_info = fm->last_chunk_info;
} /* end if */
else {
/* If the chunk index is not the same as the last chunk index we used,
@@ -1553,14 +1570,20 @@ H5D__chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, cons
} /* end if */
} /* end if */
+#ifdef H5_HAVE_PARALLEL
+ /* Store chunk selection information, for collective multi-chunk I/O */
+ if(udata->io_info->using_mpi_vfd)
+ fm->select_chunk[chunk_index] = chunk_info;
+#endif /* H5_HAVE_PARALLEL */
+
/* Update the "last chunk seen" information */
- fm->last_index=chunk_index;
- fm->last_chunk_info=chunk_info;
+ fm->last_index = chunk_index;
+ fm->last_chunk_info = chunk_info;
} /* end else */
- /* Get the coordinates of the element in the chunk */
+ /* Get the offset of the element within the chunk */
for(u = 0; u < fm->f_ndims; u++)
- coords_in_chunk[u] = coords[u] % fm->layout->u.chunk.dim[u];
+ coords_in_chunk[u] = coords[u] - chunk_info->coords[u];
/* Add point to file selection for chunk */
if(H5S_select_elements(chunk_info->fspace, H5S_SELECT_APPEND, (size_t)1, coords_in_chunk) < 0)
@@ -1669,8 +1692,8 @@ done:
htri_t
H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr, hbool_t write_op)
{
- const H5D_t *dataset = io_info->dset;
- htri_t ret_value = FAIL;
+ const H5D_t *dataset = io_info->dset; /* Local pointer to dataset info */
+ htri_t ret_value = FAIL; /* Return value */
FUNC_ENTER_PACKAGE
@@ -1812,8 +1835,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
/* Get the info for the chunk in the file */
- if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id,
- chunk_info->coords, chunk_info->index, &udata) < 0)
+ if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->coords, chunk_info->index, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
/* Check for non-existant chunk & skip it if appropriate */
@@ -1942,8 +1964,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
/* Load the chunk into cache. But if the whole chunk is written,
* simply allocate space instead of load the chunk. */
- if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->coords,
- chunk_info->index, &udata) < 0)
+ if(H5D__chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->coords, chunk_info->index, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
if((cacheable = H5D__chunk_cacheable(io_info, udata.addr, TRUE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
@@ -2293,7 +2314,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D__chunk_create(H5D_t *dset /*in,out*/, hid_t dxpl_id)
+H5D__chunk_create(const H5D_t *dset /*in,out*/, hid_t dxpl_id)
{
H5D_chk_idx_info_t idx_info; /* Chunked index info */
herr_t ret_value = SUCCEED; /* Return value */
@@ -2774,7 +2795,7 @@ void *
H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
hbool_t relax)
{
- H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
+ const H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info - always equal to the pline passed to H5D__chunk_alloc */
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */
@@ -3211,7 +3232,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D__chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
+H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
hsize_t old_dim[])
{
H5D_chk_idx_info_t idx_info; /* Chunked index info */
@@ -3410,8 +3431,7 @@ H5D__chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
&chunk_idx) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
- if(H5D__chunk_lookup(dset, dxpl_id, chunk_offset, chunk_idx,
- &udata) < 0)
+ if(H5D__chunk_lookup(dset, dxpl_id, chunk_offset, chunk_idx, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
HDassert(!H5F_addr_defined(udata.addr));
@@ -3576,7 +3596,7 @@ static herr_t
H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
{
const H5D_io_info_t *io_info = udata->io_info; /* Local pointer to I/O info */
- H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
+ const H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset's layout */
unsigned rank = udata->common.layout->ndims - 1; /* Dataset rank */
const hsize_t *chunk_offset = io_info->store->chunk.offset; /* Chunk offset */
@@ -3598,8 +3618,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t);
/* Get the info for the chunk in the file */
- if(H5D__chunk_lookup(dset, io_info->dxpl_id, chunk_offset,
- io_info->store->chunk.index, &chk_udata) < 0)
+ if(H5D__chunk_lookup(dset, io_info->dxpl_id, chunk_offset, io_info->store->chunk.index, &chk_udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
/* If this chunk does not exist in cache or on disk, no need to do anything
@@ -4008,8 +4027,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
#endif /* NDEBUG */
/* Check if the chunk exists in cache or on disk */
- if(H5D__chunk_lookup(dset, dxpl_id, chunk_offset,
- chk_io_info.store->chunk.index, &chk_udata) < 0)
+ if(H5D__chunk_lookup(dset, dxpl_id, chunk_offset, chk_io_info.store->chunk.index, &chk_udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk")
/* Evict the entry from the cache if present, but do not flush
@@ -4148,7 +4166,7 @@ herr_t
H5D__chunk_addrmap(const H5D_io_info_t *io_info, haddr_t chunk_addr[])
{
H5D_chk_idx_info_t idx_info; /* Chunked index info */
- H5D_t *dset = io_info->dset; /* Local pointer to dataset info */
+ const H5D_t *dset = io_info->dset; /* Local pointer to dataset info */
H5D_chunk_it_ud2_t udata; /* User data for iteration callback */
herr_t ret_value = SUCCEED; /* Return value */
diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c
index 0b34c07..789beab 100644
--- a/src/H5Dcompact.c
+++ b/src/H5Dcompact.c
@@ -117,7 +117,7 @@ H5FL_BLK_EXTERN(type_conv);
*-------------------------------------------------------------------------
*/
herr_t
-H5D__compact_fill(H5D_t *dset, hid_t dxpl_id)
+H5D__compact_fill(const H5D_t *dset, hid_t dxpl_id)
{
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */
diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c
index bf8c001..e5012ca 100644
--- a/src/H5Dcontig.c
+++ b/src/H5Dcontig.c
@@ -192,7 +192,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D__contig_fill(H5D_t *dset, hid_t dxpl_id)
+H5D__contig_fill(const H5D_t *dset, hid_t dxpl_id)
{
H5D_io_info_t ioinfo; /* Dataset I/O info */
H5D_storage_t store; /* Union of storage info for dataset */
diff --git a/src/H5Dint.c b/src/H5Dint.c
index 1704c0f..1886340 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -66,7 +66,7 @@ static herr_t H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space
static herr_t H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset,
hid_t dapl_id);
static herr_t H5D__open_oid(H5D_t *dataset, hid_t dapl_id, hid_t dxpl_id);
-static herr_t H5D__init_storage(H5D_t *dataset, hbool_t full_overwrite,
+static herr_t H5D__init_storage(const H5D_t *dataset, hbool_t full_overwrite,
hsize_t old_dim[], hid_t dxpl_id);
@@ -1578,7 +1578,7 @@ H5D_typeof(const H5D_t *dset)
*-------------------------------------------------------------------------
*/
herr_t
-H5D__alloc_storage(H5D_t *dset/*in,out*/, hid_t dxpl_id, H5D_time_alloc_t time_alloc,
+H5D__alloc_storage(const H5D_t *dset, hid_t dxpl_id, H5D_time_alloc_t time_alloc,
hbool_t full_overwrite, hsize_t old_dim[])
{
H5F_t *f = dset->oloc.file; /* The dataset's file pointer */
@@ -1741,7 +1741,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__init_storage(H5D_t *dset, hbool_t full_overwrite, hsize_t old_dim[],
+H5D__init_storage(const H5D_t *dset, hbool_t full_overwrite, hsize_t old_dim[],
hid_t dxpl_id)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -2357,7 +2357,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D__mark(H5D_t *dataset, hid_t dxpl_id, unsigned flags)
+H5D__mark(const H5D_t *dataset, hid_t UNUSED dxpl_id, unsigned flags)
{
herr_t ret_value = SUCCEED; /* Return value */
diff --git a/src/H5Dio.c b/src/H5Dio.c
index 055cdc3..87b8a17 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -59,7 +59,11 @@ static herr_t H5D__pre_write(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_i
hid_t file_space_id, hid_t dxpl_id, const void *buf);
/* Setup/teardown routines */
-static herr_t H5D__ioinfo_init(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache,
+static herr_t H5D__ioinfo_init(H5D_t *dset,
+#ifndef H5_HAVE_PARALLEL
+const
+#endif /* H5_HAVE_PARALLEL */
+ H5D_dxpl_cache_t *dxpl_cache,
hid_t dxpl_id, const H5D_type_info_t *type_info, H5D_storage_t *store,
H5D_io_info_t *io_info);
static herr_t H5D__typeinfo_init(const H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache,
@@ -275,14 +279,15 @@ H5D__pre_write(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list")
+ /* Retrieve the 'direct write' flag */
if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME, &direct_write) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting flag for direct chunk write")
/* Direct chunk write */
if(direct_write) {
- uint32_t direct_filters = 0;
+ uint32_t direct_filters;
hsize_t *direct_offset;
- size_t direct_datasize = 0;
+ uint32_t direct_datasize;
int ndims = 0;
hsize_t dims[H5O_LAYOUT_NDIMS];
hsize_t internal_offset[H5O_LAYOUT_NDIMS];
@@ -291,12 +296,11 @@ H5D__pre_write(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
if(H5D_CHUNKED != dset->shared->layout.type)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
+ /* Retrieve parameters for direct chunk write */
if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME, &direct_filters) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting filter info for direct chunk write")
-
if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_NAME, &direct_offset) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting offset info for direct chunk write")
-
if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_NAME, &direct_datasize) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting data size for direct chunk write")
@@ -305,7 +309,7 @@ H5D__pre_write(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dims, NULL)) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve dataspace extent dims")
- for(i=0; i<ndims; i++) {
+ for(i = 0; i < ndims; i++) {
/* Make sure the offset doesn't exceed the dataset's dimensions */
if(direct_offset[i] > dims[i])
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset")
@@ -315,7 +319,7 @@ H5D__pre_write(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset doesn't fall on chunks's boundary")
internal_offset[i] = direct_offset[i];
- }
+ } /* end for */
/* Terminate the offset with a zero */
internal_offset[ndims] = 0;
@@ -323,7 +327,8 @@ H5D__pre_write(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
/* write raw data */
if(H5D__chunk_direct_write(dset, dxpl_id, direct_filters, internal_offset, direct_datasize, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write chunk directly")
- } else { /* Normal write */
+ } /* end if */
+ else { /* Normal write */
const H5S_t *mem_space = NULL;
const H5S_t *file_space = NULL;
char fake_char;
@@ -471,7 +476,7 @@ H5D__read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
/* Attempt to construct projected dataspace for memory dataspace */
if(H5S_select_construct_projection(mem_space, &projected_mem_space,
- (unsigned)H5S_GET_EXTENT_NDIMS(file_space), buf, &adj_buf, type_info.dst_type_size) < 0)
+ (unsigned)H5S_GET_EXTENT_NDIMS(file_space), buf, (const void **)&adj_buf, type_info.dst_type_size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to construct projected memory dataspace")
HDassert(projected_mem_space);
HDassert(adj_buf);
@@ -697,7 +702,7 @@ H5D__write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
/* Attempt to construct projected dataspace for memory dataspace */
if(H5S_select_construct_projection(mem_space, &projected_mem_space,
- (unsigned)H5S_GET_EXTENT_NDIMS(file_space), buf, &adj_buf, type_info.src_type_size) < 0)
+ (unsigned)H5S_GET_EXTENT_NDIMS(file_space), buf, (const void **)&adj_buf, type_info.src_type_size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to construct projected memory dataspace")
HDassert(projected_mem_space);
HDassert(adj_buf);
@@ -824,7 +829,11 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__ioinfo_init(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
+H5D__ioinfo_init(H5D_t *dset,
+#ifndef H5_HAVE_PARALLEL
+const
+#endif /* H5_HAVE_PARALLEL */
+ H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
const H5D_type_info_t *type_info, H5D_storage_t *store, H5D_io_info_t *io_info)
{
FUNC_ENTER_STATIC_NOERR
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index 3b5bb30..684f510 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -183,42 +183,30 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space,
/* Optimized MPI types flag must be set and it must be collective IO */
/* (Don't allow parallel I/O for the MPI-posix driver, since it doesn't do real collective I/O) */
if(!(H5S_mpi_opt_types_g && io_info->dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE
- && !IS_H5FD_MPIPOSIX(io_info->dset->oloc.file))) {
+ && !IS_H5FD_MPIPOSIX(io_info->dset->oloc.file)))
local_cause |= H5D_MPIO_SET_MPIPOSIX;
- } /* end if */
/* Don't allow collective operations if datatype conversions need to happen */
- if(!type_info->is_conv_noop) {
+ if(!type_info->is_conv_noop)
local_cause |= H5D_MPIO_DATATYPE_CONVERSION;
- } /* end if */
/* Don't allow collective operations if data transform operations should occur */
- if(!type_info->is_xform_noop) {
+ if(!type_info->is_xform_noop)
local_cause |= H5D_MPIO_DATA_TRANSFORMS;
- } /* end if */
/* Check whether these are both simple or scalar dataspaces */
if(!((H5S_SIMPLE == H5S_GET_EXTENT_TYPE(mem_space) || H5S_SCALAR == H5S_GET_EXTENT_TYPE(mem_space))
- && (H5S_SIMPLE == H5S_GET_EXTENT_TYPE(file_space) || H5S_SCALAR == H5S_GET_EXTENT_TYPE(file_space)))) {
+ && (H5S_SIMPLE == H5S_GET_EXTENT_TYPE(file_space) || H5S_SCALAR == H5S_GET_EXTENT_TYPE(file_space))))
local_cause |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES;
- } /* end if */
-
- /* Can't currently handle point selections */
- if(H5S_SEL_POINTS == H5S_GET_SELECT_TYPE(mem_space)
- || H5S_SEL_POINTS == H5S_GET_SELECT_TYPE(file_space)) {
- local_cause |= H5D_MPIO_POINT_SELECTIONS;
- } /* end if */
/* Dataset storage must be contiguous or chunked */
if(!(io_info->dset->shared->layout.type == H5D_CONTIGUOUS ||
- io_info->dset->shared->layout.type == H5D_CHUNKED)) {
+ io_info->dset->shared->layout.type == H5D_CHUNKED))
local_cause |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
- } /* end if */
/* check if external-file storage is used */
- if (io_info->dset->shared->dcpl_cache.efl.nused > 0) {
+ if(io_info->dset->shared->dcpl_cache.efl.nused > 0)
local_cause |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
- }
/* The handling of memory space is different for chunking and contiguous
* storage. For contiguous storage, mem_space and file_space won't change
@@ -228,11 +216,9 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space,
*/
/* Don't allow collective operations if filters need to be applied */
- if(io_info->dset->shared->layout.type == H5D_CHUNKED) {
- if(io_info->dset->shared->dcpl_cache.pline.nused > 0) {
- local_cause |= H5D_MPIO_FILTERS;
- } /* end if */
- } /* end if */
+ if(io_info->dset->shared->layout.type == H5D_CHUNKED &&
+ io_info->dset->shared->dcpl_cache.pline.nused > 0)
+ local_cause |= H5D_MPIO_FILTERS;
/* Form consensus opinion among all processes about whether to perform
* collective I/O
@@ -242,7 +228,6 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space,
ret_value = global_cause > 0 ? FALSE : TRUE;
-
done:
/* Write the local value of no-collective-cause to the DXPL. */
if(H5P_set(dx_plist, H5D_MPIO_LOCAL_NO_COLLECTIVE_CAUSE_NAME, &local_cause) < 0)
@@ -946,15 +931,58 @@ if(H5DEBUG(D))
/* Obtain MPI derived datatype from all individual chunks */
for(u = 0; u < num_chunk; u++) {
- /* Disk MPI derived datatype */
+ hsize_t *permute_map = NULL; /* array that holds the mapping from the old,
+ out-of-order displacements to the in-order
+ displacements of the MPI datatypes of the
+ point selection of the file space */
+ hbool_t is_permuted = FALSE;
+
+ /* Obtain disk and memory MPI derived datatype */
+ /* NOTE: The permute_map array can be allocated within H5S_mpio_space_type
+ * and will be fed into the next call to H5S_mpio_space_type
+ * where it will be freed.
+ */
if(H5S_mpio_space_type(chunk_addr_info_array[u].chunk_info.fspace,
- type_info->src_type_size, &chunk_ftype[u], &chunk_mpi_file_counts[u], &(chunk_mft_is_derived_array[u])) < 0)
+ type_info->src_type_size,
+ &chunk_ftype[u], /* OUT: datatype created */
+ &chunk_mpi_file_counts[u], /* OUT */
+ &(chunk_mft_is_derived_array[u]), /* OUT */
+ TRUE, /* this is a file space,
+ so permute the
+ datatype if the point
+ selections are out of
+ order */
+ &permute_map,/* OUT: a map to indicate the
+ permutation of points
+ selected in case they
+ are out of order */
+ &is_permuted /* OUT */) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type")
-
- /* Buffer MPI derived datatype */
+ /* Sanity check */
+ if(is_permuted)
+ HDassert(permute_map);
if(H5S_mpio_space_type(chunk_addr_info_array[u].chunk_info.mspace,
- type_info->dst_type_size, &chunk_mtype[u], &chunk_mpi_mem_counts[u], &(chunk_mbt_is_derived_array[u])) < 0)
+ type_info->dst_type_size, &chunk_mtype[u],
+ &chunk_mpi_mem_counts[u],
+ &(chunk_mbt_is_derived_array[u]),
+ FALSE, /* this is a memory
+ space, so if the file
+ space is not
+ permuted, there is no
+ need to permute the
+ datatype if the point
+ selections are out of
+ order*/
+ &permute_map, /* IN: the permutation map
+ generated by the
+ file_space selection
+ and applied to the
+ memory selection */
+ &is_permuted /* IN */) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buf type")
+ /* Sanity check */
+ if(is_permuted)
+ HDassert(!permute_map);
/* Chunk address relative to the first chunk */
chunk_addr_info_array[u].chunk_addr -= ctg_store.contig.dset_addr;
@@ -1309,12 +1337,51 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
if((file_space != NULL) && (mem_space != NULL)) {
int mpi_file_count; /* Number of file "objects" to transfer */
+ hsize_t *permute_map = NULL; /* array that holds the mapping from the old,
+ out-of-order displacements to the in-order
+ displacements of the MPI datatypes of the
+ point selection of the file space */
+ hbool_t is_permuted = FALSE;
/* Obtain disk and memory MPI derived datatype */
- if(H5S_mpio_space_type(file_space, type_info->src_type_size, &mpi_file_type, &mpi_file_count, &mft_is_derived) < 0)
+ /* NOTE: The permute_map array can be allocated within H5S_mpio_space_type
+ * and will be fed into the next call to H5S_mpio_space_type
+ * where it will be freed.
+ */
+ if(H5S_mpio_space_type(file_space, type_info->src_type_size,
+ &mpi_file_type, &mpi_file_count, &mft_is_derived, /* OUT: datatype created */
+ TRUE, /* this is a file space, so
+ permute the datatype if the
+ point selection is out of
+ order */
+ &permute_map, /* OUT: a map to indicate
+ the permutation of
+ points selected in
+ case they are out of
+ order */
+ &is_permuted /* OUT */) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type")
- if(H5S_mpio_space_type(mem_space, type_info->src_type_size, &mpi_buf_type, &mpi_buf_count, &mbt_is_derived) < 0)
+ /* Sanity check */
+ if(is_permuted)
+ HDassert(permute_map);
+ if(H5S_mpio_space_type(mem_space, type_info->src_type_size,
+ &mpi_buf_type, &mpi_buf_count, &mbt_is_derived, /* OUT: datatype created */
+ FALSE, /* this is a memory space, so if
+ the file space is not
+ permuted, there is no need to
+ permute the datatype if the
+ point selections are out of
+ order*/
+ &permute_map /* IN: the permutation map
+ generated by the
+ file_space selection
+ and applied to the
+ memory selection */,
+ &is_permuted /* IN */) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buffer type")
+ /* Sanity check */
+ if(is_permuted)
+ HDassert(!permute_map);
} /* end if */
else {
/* For non-selection, participate with a none MPI derived datatype, the count is 0. */
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index 7e6eb4b..c3cee61 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -187,7 +187,7 @@ typedef enum H5D_io_op_type_t {
} H5D_io_op_type_t;
typedef struct H5D_io_info_t {
- H5D_t *dset; /* Pointer to dataset being operated on */
+ const H5D_t *dset; /* Pointer to dataset being operated on */
#ifndef H5_HAVE_PARALLEL
const
#endif /* H5_HAVE_PARALLEL */
@@ -531,7 +531,7 @@ H5_DLL H5D_t *H5D__create_named(const H5G_loc_t *loc, const char *name,
hid_t dapl_id, hid_t dxpl_id);
H5_DLL herr_t H5D__get_space_status(H5D_t *dset, H5D_space_status_t *allocation,
hid_t dxpl_id);
-H5_DLL herr_t H5D__alloc_storage(H5D_t *dset, hid_t dxpl_id, H5D_time_alloc_t time_alloc,
+H5_DLL herr_t H5D__alloc_storage(const H5D_t *dset, hid_t dxpl_id, H5D_time_alloc_t time_alloc,
hbool_t full_overwrite, hsize_t old_dim[]);
H5_DLL herr_t H5D__get_storage_size(H5D_t *dset, hid_t dxpl_id, hsize_t *storage_size);
H5_DLL haddr_t H5D__get_offset(const H5D_t *dset);
@@ -544,7 +544,7 @@ H5_DLL herr_t H5D__check_filters(H5D_t *dataset);
H5_DLL herr_t H5D__set_extent(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id);
H5_DLL herr_t H5D__get_dxpl_cache(hid_t dxpl_id, H5D_dxpl_cache_t **cache);
H5_DLL herr_t H5D__flush_sieve_buf(H5D_t *dataset, hid_t dxpl_id);
-H5_DLL herr_t H5D__mark(H5D_t *dataset, hid_t dxpl_id, unsigned flags);
+H5_DLL herr_t H5D__mark(const H5D_t *dataset, hid_t dxpl_id, unsigned flags);
H5_DLL herr_t H5D__flush_real(H5D_t *dataset, hid_t dxpl_id);
/* Internal I/O routines */
@@ -586,7 +586,7 @@ H5_DLL herr_t H5D__layout_oh_write(H5D_t *dataset, hid_t dxpl_id, H5O_t *oh,
H5_DLL herr_t H5D__contig_alloc(H5F_t *f, hid_t dxpl_id,
H5O_storage_contig_t *storage);
H5_DLL hbool_t H5D__contig_is_space_alloc(const H5O_storage_t *storage);
-H5_DLL herr_t H5D__contig_fill(H5D_t *dset, hid_t dxpl_id);
+H5_DLL herr_t H5D__contig_fill(const H5D_t *dset, hid_t dxpl_id);
H5_DLL herr_t H5D__contig_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space,
H5D_chunk_map_t *fm);
@@ -603,7 +603,7 @@ H5_DLL herr_t H5D__contig_delete(H5F_t *f, hid_t dxpl_id,
/* Functions that operate on chunked dataset storage */
H5_DLL htri_t H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr,
hbool_t write_op);
-H5_DLL herr_t H5D__chunk_create(H5D_t *dset /*in,out*/, hid_t dxpl_id);
+H5_DLL herr_t H5D__chunk_create(const H5D_t *dset /*in,out*/, hid_t dxpl_id);
H5_DLL herr_t H5D__chunk_set_info(const H5D_t *dset);
H5_DLL herr_t H5D__chunk_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
hid_t dapl_id);
@@ -616,7 +616,7 @@ H5_DLL herr_t H5D__chunk_unlock(const H5D_io_info_t *io_info,
const H5D_chunk_ud_t *udata, hbool_t dirty, void *chunk,
uint32_t naccessed);
H5_DLL herr_t H5D__chunk_allocated(H5D_t *dset, hid_t dxpl_id, hsize_t *nbytes);
-H5_DLL herr_t H5D__chunk_allocate(H5D_t *dset, hid_t dxpl_id,
+H5_DLL herr_t H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id,
hbool_t full_overwrite, hsize_t old_dim[]);
H5_DLL herr_t H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id,
const hsize_t *old_dim);
@@ -634,12 +634,14 @@ H5_DLL herr_t H5D__chunk_dump_index(H5D_t *dset, hid_t dxpl_id, FILE *stream);
H5_DLL herr_t H5D__chunk_dest(H5F_t *f, hid_t dxpl_id, H5D_t *dset);
H5_DLL herr_t H5D__chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh,
H5O_storage_t *store);
+H5_DLL herr_t H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters,
+ hsize_t *offset, uint32_t data_size, const void *buf);
#ifdef H5D_CHUNK_DEBUG
H5_DLL herr_t H5D__chunk_stats(const H5D_t *dset, hbool_t headers);
#endif /* H5D_CHUNK_DEBUG */
/* Functions that operate on compact dataset storage */
-H5_DLL herr_t H5D__compact_fill(H5D_t *dset, hid_t dxpl_id);
+H5_DLL herr_t H5D__compact_fill(const H5D_t *dset, hid_t dxpl_id);
H5_DLL herr_t H5D__compact_copy(H5F_t *f_src, H5O_storage_compact_t *storage_src,
H5F_t *f_dst, H5O_storage_compact_t *storage_dst, H5T_t *src_dtype,
H5O_copy_t *cpy_info, hid_t dxpl_id);
diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h
index 07f512b..9b430ba 100644
--- a/src/H5Dprivate.h
+++ b/src/H5Dprivate.h
@@ -177,8 +177,5 @@ H5_DLL herr_t H5D_chunk_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_ad
H5_DLL herr_t H5D_btree_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream,
int indent, int fwidth, unsigned ndims);
-H5_DLL herr_t H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters,
- hsize_t *offset, size_t data_size, const void *buf);
-
#endif /* _H5Dprivate_H */
diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c
index b142e22..9e25a59 100644
--- a/src/H5Pdxpl.c
+++ b/src/H5Pdxpl.c
@@ -135,7 +135,7 @@
#define H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_DEF 0
#define H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_SIZE sizeof(hsize_t *)
#define H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_DEF NULL
-#define H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_SIZE sizeof(size_t)
+#define H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_SIZE sizeof(uint32_t)
#define H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_DEF 0
/******************/
@@ -188,6 +188,11 @@ const H5P_libclass_t H5P_CLS_DXFR[1] = {{
/*****************************/
+/***************************/
+/* Local Private Variables */
+/***************************/
+
+
/*-------------------------------------------------------------------------
* Function: H5P__dxfr_reg_prop
@@ -232,7 +237,7 @@ H5P__dxfr_reg_prop(H5P_genclass_t *pclass)
hbool_t direct_chunk_flag = H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_DEF; /* Default value for the flag of direct chunk write */
uint32_t direct_chunk_filters = H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_DEF; /* Default value for the filters of direct chunk write */
hsize_t *direct_chunk_offset = H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_DEF; /* Default value for the offset of direct chunk write */
- size_t direct_chunk_datasize = H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_DEF; /* Default value for the datasize of direct chunk write */
+ uint32_t direct_chunk_datasize = H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_DEF; /* Default value for the datasize of direct chunk write */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -362,8 +367,6 @@ done:
} /* end H5P__dxfr_reg_prop() */
-
-
/*-------------------------------------------------------------------------
* Function: H5P_dxfr_xform_del
*
@@ -1098,8 +1101,9 @@ H5Pset_btree_ratios(hid_t plist_id, double left, double middle,
H5TRACE4("e", "iddd", plist_id, left, middle, right);
/* Check arguments */
- if(left < 0.0 || left > 1.0 || middle < 0.0 || middle > 1.0 ||
- right < 0.0 || right > 1.0)
+ if(left < (double)0.0f || left > (double)1.0f
+ || middle < (double)0.0f || middle > (double)1.0f
+ || right < (double)0.0f || right > (double)1.0f)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "split ratio must satisfy 0.0<=X<=1.0")
/* Get the plist structure */
diff --git a/src/H5Smpio.c b/src/H5Smpio.c
index 77e902f..284e16f 100644
--- a/src/H5Smpio.c
+++ b/src/H5Smpio.c
@@ -43,6 +43,14 @@ static herr_t H5S_mpio_all_type(const H5S_t *space, size_t elmt_size,
MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
static herr_t H5S_mpio_none_type(MPI_Datatype *new_type, int *count,
hbool_t *is_derived_type);
+static herr_t H5S_mpio_create_point_datatype(size_t elmt_size, hsize_t num_points,
+ MPI_Aint *disp, MPI_Datatype *new_type);
+static herr_t H5S_mpio_point_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type,
+ hbool_t do_permute, hsize_t **permute_map, hbool_t *is_permuted);
+static herr_t H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size,
+ hsize_t **permute_map, MPI_Datatype *new_type, int *count,
+ hbool_t *is_derived_type);
static herr_t H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size,
MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
static herr_t H5S_mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
@@ -131,6 +139,335 @@ H5S_mpio_none_type(MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
/*-------------------------------------------------------------------------
+ * Function: H5S_mpio_create_point_datatype
+ *
+ * Purpose: Create a derived datatype for point selections.
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Outputs: *new_type the MPI type corresponding to the selection
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_mpio_create_point_datatype (size_t elmt_size, hsize_t num_points,
+ MPI_Aint *disp, MPI_Datatype *new_type)
+{
+ MPI_Datatype elmt_type; /* MPI datatype for individual element */
+ hbool_t elmt_type_created = FALSE; /* Whether the element MPI datatype was created */
+ int mpi_code; /* MPI error code */
+ int *blocks = NULL; /* Array of block sizes for MPI hindexed create call */
+ hsize_t u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Create an MPI datatype for an element */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &elmt_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ elmt_type_created = TRUE;
+
+ /* Allocate block sizes for MPI datatype call */
+ if(NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * num_points)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks")
+
+ /* Would be nice to have Create_Hindexed_block to avoid this array of all ones */
+ for(u = 0; u < num_points; u++)
+ blocks[u] = 1;
+
+ /* Create an MPI datatype for the whole point selection */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)num_points, blocks, disp, elmt_type, new_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_indexed_block failed", mpi_code)
+
+ /* Commit MPI datatype for later use */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+
+done:
+ if(elmt_type_created)
+ MPI_Type_free(&elmt_type);
+ if(blocks)
+ H5MM_free(blocks);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_mpio_create_point_datatype() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_mpio_point_type
+ *
+ * Purpose: Translate an HDF5 "point" selection into an MPI type.
+ * Create a permutation array to handle out-of-order point selections.
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Outputs: *new_type the MPI type corresponding to the selection
+ * *count how many objects of the new_type in selection
+ * (useful if this is the buffer type for xfer)
+ * *is_derived_type 0 if MPI primitive type, 1 if derived
+ * *permute_map the permutation of the displacements to create
+ * the MPI_Datatype
+ * *is_permuted 0 if the displacements are permuted, 1 if not
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type,
+ int *count, hbool_t *is_derived_type, hbool_t do_permute, hsize_t **permute,
+ hbool_t *is_permuted)
+{
+ MPI_Aint *disp = NULL; /* Datatype displacement for each point*/
+ H5S_pnt_node_t *curr = NULL; /* Current point being operated on in from the selection */
+ hssize_t snum_points; /* Signed number of elements in selection */
+ hsize_t num_points; /* Sumber of points in the selection */
+ hsize_t u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Check args */
+ HDassert(space);
+
+ /* Get the total number of points selected */
+ if((snum_points = (hssize_t)H5S_GET_SELECT_NPOINTS(space)) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOUNT, FAIL, "can't get number of elements selected")
+ num_points = (hsize_t)snum_points;
+
+ /* Allocate array for element displacements */
+ if(NULL == (disp = (MPI_Aint *)H5MM_malloc(sizeof(MPI_Aint) * num_points)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+
+ /* Allocate array for element permutation - returned to caller */
+ if(do_permute)
+ if(NULL == (*permute = (hsize_t *)H5MM_malloc(sizeof(hsize_t) * num_points)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate permutation array")
+
+ /* Iterate through list of elements */
+ curr = space->select.sel_info.pnt_lst->head;
+ for(u = 0 ; u < num_points ; u++) {
+ /* calculate the displacement of the current point */
+ disp[u] = H5VM_array_offset(space->extent.rank, space->extent.size, curr->pnt);
+ disp[u] *= elmt_size;
+
+ /* This is a File Space used to set the file view, so adjust the displacements
+ * to have them monotonically non-decreasing.
+ * Generate the permutation array by indicating at each point being selected,
+ * the position it will shifted in the new displacement. Example:
+ * Suppose 4 points with corresponding are selected
+ * Pt 1: disp=6 ; Pt 2: disp=3 ; Pt 3: disp=0 ; Pt 4: disp=4
+ * The permute map to sort the displacements in order will be:
+ * point 1: map[0] = L, indicating that this point is not moved (1st point selected)
+ * point 2: map[1] = 0, indicating that this point is moved to the first position,
+ * since disp_pt1(6) > disp_pt2(3)
+ * point 3: map[2] = 0, move to position 0, bec it has the lowest disp between
+ * the points selected so far.
+ * point 4: map[3] = 2, move the 2nd position since point 1 has a higher disp,
+ * but points 2 and 3 have lower displacements.
+ */
+ if(do_permute) {
+ if(u > 0 && disp[u] < disp[u - 1]) {
+ unsigned s = 0, l = u, m = u / 2;
+
+ *is_permuted = TRUE;
+ do {
+ if(disp[u] > disp[m])
+ s = m + 1;
+ else if(disp[u] < disp[m])
+ l = m;
+ else
+ break;
+ m = s + ((l - s) / 2);
+ } while(s < l);
+
+ if(m < u) {
+ MPI_Aint temp;
+
+ temp = disp[u];
+ HDmemmove(disp + m + 1, disp + m, (u - m) * sizeof(MPI_Aint));
+ disp[m] = temp;
+ } /* end if */
+ (*permute)[u] = m;
+ } /* end if */
+ else
+ (*permute)[u] = num_points;
+ } /* end if */
+ /* this is a memory space, and no permutation is necessary to create
+ the derived datatype */
+ else {
+ ;/* do nothing */
+ } /* end else */
+
+ /* get the next point */
+ curr = curr->next;
+ } /* end for */
+
+ /* Create the MPI datatype for the set of element displacements */
+ if(H5S_mpio_create_point_datatype(elmt_size, num_points, disp, new_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create an MPI Datatype from point selection")
+
+ /* Set values about MPI datatype created */
+ *count = 1;
+ *is_derived_type = TRUE;
+
+done:
+ if(NULL != disp)
+ H5MM_free(disp);
+
+ /* Release the permutation buffer, if it wasn't used */
+ if(!(*is_permuted) && (*permute)) {
+ H5MM_free(*permute);
+ *permute = NULL;
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_mpio_point_type() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_mpio_permute_type
+ *
+ * Purpose: Translate an HDF5 "all/hyper/point" selection into an MPI type,
+ * while applying the permutation map. This function is called if
+ * the file space selection is permuted due to out-of-order point
+ * selection and so the memory datatype has to be permuted using the
+ * permutation map created by the file selection.
+ *
+ * Note: This routine is called from H5S_mpio_space_type(), which is
+ * called first for the file dataspace and creates
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Outputs: *new_type the MPI type corresponding to the selection
+ * *count how many objects of the new_type in selection
+ * (useful if this is the buffer type for xfer)
+ * *is_derived_type 0 if MPI primitive type, 1 if derived
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
+{
+ MPI_Aint *disp = NULL; /* Datatype displacement for each point*/
+ H5S_sel_iter_t sel_iter; /* Selection iteration info */
+ hbool_t sel_iter_init = FALSE; /* Selection iteration info has been initialized */
+ hsize_t off[H5D_IO_VECTOR_SIZE]; /* Array to store sequence offsets */
+ size_t len[H5D_IO_VECTOR_SIZE]; /* Array to store sequence lengths */
+ hssize_t snum_points; /* Signed number of elements in selection */
+ hsize_t num_points; /* Number of points in the selection */
+ size_t max_elem; /* Maximum number of elements allowed in sequences */
+ hsize_t u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Check args */
+ HDassert(space);
+
+ /* Get the total number of points selected */
+ if((snum_points = (hssize_t)H5S_GET_SELECT_NPOINTS(space)) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOUNT, FAIL, "can't get number of elements selected")
+ num_points = (hsize_t)snum_points;
+
+ /* Allocate array to store point displacements */
+ if(NULL == (disp = (MPI_Aint *)H5MM_malloc(sizeof(MPI_Aint) * num_points)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+
+ /* Initialize selection iterator */
+ if(H5S_select_iter_init(&sel_iter, space, elmt_size) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator")
+ sel_iter_init = TRUE; /* Selection iteration info has been initialized */
+
+ /* Set the number of elements to iterate over */
+ H5_ASSIGN_OVERFLOW(max_elem, num_points, hsize_t, size_t);
+
+ /* Loop, while elements left in selection */
+ u = 0;
+ while(max_elem > 0) {
+ hsize_t off[H5D_IO_VECTOR_SIZE]; /* Array to store sequence offsets */
+ size_t len[H5D_IO_VECTOR_SIZE]; /* Array to store sequence lengths */
+ size_t nelem; /* Number of elements used in sequences */
+ size_t nseq; /* Number of sequences generated */
+ size_t curr_seq; /* Current sequence being worked on */
+
+ /* Get the sequences of bytes */
+ if(H5S_SELECT_GET_SEQ_LIST(space, 0, &sel_iter, (size_t)H5D_IO_VECTOR_SIZE, max_elem, &nseq, &nelem, off, len) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_UNSUPPORTED, FAIL, "sequence length generation failed")
+
+ /* Loop, while sequences left to process */
+ for(curr_seq = 0; curr_seq < nseq; curr_seq++) {
+ hsize_t curr_off; /* Current offset within sequence */
+ size_t curr_len; /* Length of bytes left to process in sequence */
+
+ /* Get the current offset */
+ curr_off = off[curr_seq];
+
+ /* Get the number of bytes in sequence */
+ curr_len = len[curr_seq];
+
+ /* Loop, while bytes left in sequence */
+ while(curr_len > 0) {
+ /* Set the displacement of the current point */
+ disp[u] = curr_off;
+
+ /* This is a memory displacement, so for each point selected,
+ * apply the map that was generated by the file selection */
+ if((*permute)[u] != num_points) {
+ MPI_Aint temp = disp[u];
+
+ HDmemmove(disp + (*permute)[u] + 1, disp + (*permute)[u],
+ (u - (*permute)[u]) * sizeof(MPI_Aint));
+ disp[(*permute)[u]] = temp;
+ } /* end if */
+
+ /* Advance to next element */
+ u++;
+
+ /* Increment offset in dataspace */
+ curr_off += elmt_size;
+
+ /* Decrement number of bytes left in sequence */
+ curr_len -= elmt_size;
+ } /* end while */
+ } /* end for */
+
+ /* Decrement number of elements left to process */
+ max_elem -= nelem;
+ } /* end while */
+
+ /* Create the MPI datatype for the set of element displacements */
+ if(H5S_mpio_create_point_datatype(elmt_size, num_points, disp, new_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create an MPI Datatype from point selection")
+
+ /* Set values about MPI datatype created */
+ *count = 1;
+ *is_derived_type = TRUE;
+
+done:
+ /* Release selection iterator */
+ if(sel_iter_init)
+ if(H5S_SELECT_ITER_RELEASE(&sel_iter) < 0)
+ HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator")
+
+ /* Free memory */
+ if(disp)
+ H5MM_free(disp);
+ if(*permute) {
+ H5MM_free(*permute);
+ *permute = NULL;
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_mpio_permute_type() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5S_mpio_hyper_type
*
* Purpose: Translate an HDF5 hyperslab selection into an MPI type.
@@ -478,7 +815,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5S_obtain datatype
+ * Function: H5S_obtain_datatype
*
* Purpose: Obtain an MPI derived datatype based on span-tree
* implementation
@@ -673,8 +1010,9 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5S_mpio_space_type(const H5S_t *space, size_t elmt_size,
- MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
+H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type,
+ int *count, hbool_t *is_derived_type, hbool_t do_permute, hsize_t **permute_map,
+ hbool_t *is_permuted)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -684,44 +1022,75 @@ H5S_mpio_space_type(const H5S_t *space, size_t elmt_size,
HDassert(space);
HDassert(elmt_size);
- /* Creat MPI type based on the kind of selection */
+ /* Create MPI type based on the kind of selection */
switch(H5S_GET_EXTENT_TYPE(space)) {
case H5S_NULL:
case H5S_SCALAR:
case H5S_SIMPLE:
- switch(H5S_GET_SELECT_TYPE(space)) {
- case H5S_SEL_NONE:
- if(H5S_mpio_none_type(new_type, count, is_derived_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert 'none' selection to MPI type")
- break;
-
- case H5S_SEL_ALL:
- if(H5S_mpio_all_type(space, elmt_size, new_type, count, is_derived_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert 'all' selection to MPI type")
- break;
-
- case H5S_SEL_POINTS:
- /* not yet implemented */
- ret_value = FAIL;
- break;
-
- case H5S_SEL_HYPERSLABS:
- if((H5S_SELECT_IS_REGULAR(space) == TRUE)) {
- if(H5S_mpio_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert regular 'hyperslab' selection to MPI type")
- } /* end if */
- else {
- if(H5S_mpio_span_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert irregular 'hyperslab' selection to MPI type")
- } /* end else */
- break;
-
- case H5S_SEL_ERROR:
- case H5S_SEL_N:
- default:
- HDassert("unknown selection type" && 0);
- break;
- } /* end switch */
+ /* If the file space has been permuted previously due to
+ * out-of-order point selection, then permute this selection which
+ * should be a memory selection to match the file space permutation.
+ */
+ if(TRUE == *is_permuted) {
+ switch(H5S_GET_SELECT_TYPE(space)) {
+ case H5S_SEL_NONE:
+ if(H5S_mpio_none_type(new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't convert 'none' selection to MPI type")
+ break;
+
+ case H5S_SEL_ALL:
+ case H5S_SEL_POINTS:
+ case H5S_SEL_HYPERSLABS:
+ /* Sanity check */
+ HDassert(!do_permute);
+
+ if(H5S_mpio_permute_type(space, elmt_size, permute_map, new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't convert 'all' selection to MPI type")
+ break;
+
+ case H5S_SEL_ERROR:
+ case H5S_SEL_N:
+ default:
+ HDassert("unknown selection type" && 0);
+ break;
+ } /* end switch */
+ } /* end if */
+ /* the file space is not permuted, so do a regular selection */
+ else {
+ switch(H5S_GET_SELECT_TYPE(space)) {
+ case H5S_SEL_NONE:
+ if(H5S_mpio_none_type(new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert 'none' selection to MPI type")
+ break;
+
+ case H5S_SEL_ALL:
+ if(H5S_mpio_all_type(space, elmt_size, new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert 'all' selection to MPI type")
+ break;
+
+ case H5S_SEL_POINTS:
+ if(H5S_mpio_point_type(space, elmt_size, new_type, count, is_derived_type, do_permute, permute_map, is_permuted) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't convert 'point' selection to MPI type")
+ break;
+
+ case H5S_SEL_HYPERSLABS:
+ if((H5S_SELECT_IS_REGULAR(space) == TRUE)) {
+ if(H5S_mpio_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert regular 'hyperslab' selection to MPI type")
+ } /* end if */
+ else {
+ if(H5S_mpio_span_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert irregular 'hyperslab' selection to MPI type")
+ } /* end else */
+ break;
+
+ case H5S_SEL_ERROR:
+ case H5S_SEL_N:
+ default:
+ HDassert("unknown selection type" && 0);
+ break;
+ } /* end switch */
+ } /* end else */
break;
case H5S_NO_CLASS:
diff --git a/src/H5Sprivate.h b/src/H5Sprivate.h
index 0e67af1..be8a28f 100644
--- a/src/H5Sprivate.h
+++ b/src/H5Sprivate.h
@@ -272,12 +272,13 @@ H5_DLL herr_t H5S_select_iter_release(H5S_sel_iter_t *sel_iter);
/* (Defined in H5S.c) */
H5_DLLVAR hbool_t H5S_mpi_opt_types_g;
-H5_DLL herr_t
-H5S_mpio_space_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- int *count,
- hbool_t *is_derived_type );
+H5_DLL herr_t H5S_mpio_space_type(const H5S_t *space, size_t elmt_size,
+ /* out: */ MPI_Datatype *new_type,
+ int *count,
+ hbool_t *is_derived_type,
+ hbool_t do_permute,
+ hsize_t **permute_map,
+ hbool_t * is_permuted);
#endif /* H5_HAVE_PARALLEL */
#endif /* _H5Sprivate_H */
diff --git a/src/H5Sselect.c b/src/H5Sselect.c
index 44688b6..2cb4b38 100644
--- a/src/H5Sselect.c
+++ b/src/H5Sselect.c
@@ -34,8 +34,8 @@
/* Local functions */
#ifdef LATER
-static herr_t H5S_select_iter_block (const H5S_sel_iter_t *iter, hsize_t *start, hsize_t *end);
-static htri_t H5S_select_iter_has_next_block (const H5S_sel_iter_t *iter);
+static herr_t H5S_select_iter_block(const H5S_sel_iter_t *iter, hsize_t *start, hsize_t *end);
+static htri_t H5S_select_iter_has_next_block(const H5S_sel_iter_t *iter);
static herr_t H5S_select_iter_next_block(H5S_sel_iter_t *iter);
#endif /* LATER */
@@ -922,7 +922,7 @@ H5S_select_iter_init(H5S_sel_iter_t *sel_iter, const H5S_t *space, size_t elmt_s
REVISION LOG
--------------------------------------------------------------------------*/
herr_t
-H5S_select_iter_coords (const H5S_sel_iter_t *sel_iter, hsize_t *coords)
+H5S_select_iter_coords(const H5S_sel_iter_t *sel_iter, hsize_t *coords)
{
herr_t ret_value; /* return value */
@@ -964,7 +964,7 @@ H5S_select_iter_coords (const H5S_sel_iter_t *sel_iter, hsize_t *coords)
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_select_iter_block (const H5S_sel_iter_t *iter, hsize_t *start, hsize_t *end)
+H5S_select_iter_block(const H5S_sel_iter_t *iter, hsize_t *start, hsize_t *end)
{
herr_t ret_value; /* return value */
@@ -1004,7 +1004,7 @@ H5S_select_iter_block (const H5S_sel_iter_t *iter, hsize_t *start, hsize_t *end)
REVISION LOG
--------------------------------------------------------------------------*/
hsize_t
-H5S_select_iter_nelmts (const H5S_sel_iter_t *sel_iter)
+H5S_select_iter_nelmts(const H5S_sel_iter_t *sel_iter)
{
hsize_t ret_value; /* return value */
@@ -1043,7 +1043,7 @@ H5S_select_iter_nelmts (const H5S_sel_iter_t *sel_iter)
REVISION LOG
--------------------------------------------------------------------------*/
static htri_t
-H5S_select_iter_has_next_block (const H5S_sel_iter_t *iter)
+H5S_select_iter_has_next_block(const H5S_sel_iter_t *iter)
{
herr_t ret_value; /* return value */
@@ -1217,24 +1217,13 @@ H5S_select_iterate(void *buf, hid_t type_id, const H5S_t *space, H5D_operator_t
H5T_t *dt; /* Datatype structure */
H5S_sel_iter_t iter; /* Selection iteration info */
hbool_t iter_init = FALSE; /* Selection iteration info has been initialized */
- uint8_t *loc; /* Current element location in buffer */
- hsize_t coords[H5O_LAYOUT_NDIMS]; /* Coordinates of element in dataspace */
hssize_t nelmts; /* Number of elements in selection */
hsize_t space_size[H5O_LAYOUT_NDIMS]; /* Dataspace size */
- hsize_t off[H5D_IO_VECTOR_SIZE]; /* Array to store sequence offsets */
- hsize_t curr_off; /* Current offset within sequence */
- hsize_t tmp_off; /* Temporary offset within sequence */
- size_t len[H5D_IO_VECTOR_SIZE]; /* Array to store sequence lengths */
- size_t curr_len; /* Length of bytes left to process in sequence */
- size_t nseq; /* Number of sequences generated */
- size_t curr_seq; /* Current sequnce being worked on */
- size_t nelem; /* Number of elements used in sequences */
size_t max_elem; /* Maximum number of elements allowed in sequences */
size_t elmt_size; /* Datatype size */
unsigned ndims; /* Number of dimensions in dataspace */
- int i; /* Local Index variable */
- herr_t user_ret=0; /* User's return value */
- herr_t ret_value=SUCCEED; /* Return value */
+ herr_t user_ret = 0; /* User's return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1274,12 +1263,21 @@ H5S_select_iterate(void *buf, hid_t type_id, const H5S_t *space, H5D_operator_t
/* Loop, while elements left in selection */
while(max_elem > 0 && user_ret == 0) {
+ hsize_t off[H5D_IO_VECTOR_SIZE]; /* Array to store sequence offsets */
+ size_t len[H5D_IO_VECTOR_SIZE]; /* Array to store sequence lengths */
+ size_t nelem; /* Number of elements used in sequences */
+ size_t nseq; /* Number of sequences generated */
+ size_t curr_seq; /* Current sequence being worked on */
+
/* Get the sequences of bytes */
if(H5S_SELECT_GET_SEQ_LIST(space, 0, &iter, (size_t)H5D_IO_VECTOR_SIZE, max_elem, &nseq, &nelem, off, len) < 0)
HGOTO_ERROR(H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed")
/* Loop, while sequences left to process */
- for(curr_seq=0; curr_seq<nseq && user_ret==0; curr_seq++) {
+ for(curr_seq = 0; curr_seq < nseq && user_ret == 0; curr_seq++) {
+ hsize_t curr_off; /* Current offset within sequence */
+ size_t curr_len; /* Length of bytes left to process in sequence */
+
/* Get the current offset */
curr_off = off[curr_seq];
@@ -1288,6 +1286,11 @@ H5S_select_iterate(void *buf, hid_t type_id, const H5S_t *space, H5D_operator_t
/* Loop, while bytes left in sequence */
while(curr_len > 0 && user_ret == 0) {
+ hsize_t coords[H5O_LAYOUT_NDIMS]; /* Coordinates of element in dataspace */
+ hsize_t tmp_off; /* Temporary offset within sequence */
+ uint8_t *loc; /* Current element location in buffer */
+ int i; /* Local Index variable */
+
/* Compute the coordinate from the offset */
for(i = (int)ndims, tmp_off = curr_off; i >= 0; i--) {
coords[i] = tmp_off % space_size[i];
@@ -1298,22 +1301,22 @@ H5S_select_iterate(void *buf, hid_t type_id, const H5S_t *space, H5D_operator_t
loc = (unsigned char *)buf + curr_off;
/* Call user's callback routine */
- user_ret=(*op)(loc,type_id,ndims,coords,operator_data);
+ user_ret = (*op)(loc, type_id, ndims, coords, operator_data);
/* Increment offset in dataspace */
- curr_off+=elmt_size;
+ curr_off += elmt_size;
/* Decrement number of bytes left in sequence */
- curr_len-=elmt_size;
+ curr_len -= elmt_size;
} /* end while */
} /* end for */
/* Decrement number of elements left to process */
- max_elem-=nelem;
+ max_elem -= nelem;
} /* end while */
/* Set return value */
- ret_value=user_ret;
+ ret_value = user_ret;
done:
/* Release selection iterator */
diff --git a/src/H5private.h b/src/H5private.h
index ac4574f..7462eda 100644
--- a/src/H5private.h
+++ b/src/H5private.h
@@ -437,8 +437,8 @@
/*
* Maximum & minimum values for our typedefs.
*/
-#define HSIZET_MAX ((hsize_t)ULLONG_MAX)
-#define HSSIZET_MAX ((hssize_t)LLONG_MAX)
+#define HSIZET_MAX ((hsize_t)ULLONG_MAX)
+#define HSSIZET_MAX ((hssize_t)LLONG_MAX)
#define HSSIZET_MIN (~(HSSIZET_MAX))
/*
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 73e7f09..85ea931 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -16,6 +16,9 @@
#include "testphdf5.h"
#include "H5Dprivate.h"
+#define HYPER 1
+#define POINT 2
+#define ALL 3
/* some commonly used routines for collective chunk IO tests*/
@@ -23,14 +26,17 @@ static void ccslab_set(int mpi_rank,int mpi_size,hsize_t start[],hsize_t count[]
hsize_t stride[],hsize_t block[],int mode);
static void ccdataset_fill(hsize_t start[],hsize_t count[],
- hsize_t stride[],hsize_t block[],DATATYPE*dataset);
+ hsize_t stride[],hsize_t block[],DATATYPE*dataset,
+ int mem_selection);
static void ccdataset_print(hsize_t start[],hsize_t block[],DATATYPE*dataset);
static int ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[],
- hsize_t block[], DATATYPE *dataset, DATATYPE *original);
+ hsize_t block[], DATATYPE *dataset, DATATYPE *original,
+ int mem_selection);
-static void coll_chunktest(const char* filename,int chunk_factor,int select_factor,int api_option);
+static void coll_chunktest(const char* filename, int chunk_factor, int select_factor,
+ int api_option, int file_selection, int mem_selection, int mode);
/*-------------------------------------------------------------------------
@@ -73,7 +79,15 @@ coll_chunk1(void)
{
const char *filename = GetTestParameters();
- coll_chunktest(filename, 1, BYROW_CONT, API_NONE);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
}
@@ -117,7 +131,15 @@ coll_chunk2(void)
{
const char *filename = GetTestParameters();
- coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER);
}
@@ -164,7 +186,15 @@ coll_chunk3(void)
int mpi_size;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
}
/*-------------------------------------------------------------------------
@@ -208,7 +238,15 @@ coll_chunk4(void)
{
const char *filename = GetTestParameters();
- coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, HYPER, IN_ORDER);
}
/*-------------------------------------------------------------------------
@@ -252,7 +290,15 @@ coll_chunk5(void)
{
const char *filename = GetTestParameters();
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, HYPER, IN_ORDER);
}
/*-------------------------------------------------------------------------
@@ -298,7 +344,15 @@ coll_chunk6(void)
{
const char *filename = GetTestParameters();
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, HYPER, IN_ORDER);
}
/*-------------------------------------------------------------------------
@@ -342,7 +396,15 @@ coll_chunk7(void)
{
const char *filename = GetTestParameters();
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, HYPER, IN_ORDER);
}
/*-------------------------------------------------------------------------
@@ -386,7 +448,15 @@ coll_chunk8(void)
{
const char *filename = GetTestParameters();
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, HYPER, IN_ORDER);
}
/*-------------------------------------------------------------------------
@@ -430,7 +500,15 @@ coll_chunk9(void)
{
const char *filename = GetTestParameters();
- coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, IN_ORDER);
}
/*-------------------------------------------------------------------------
@@ -474,7 +552,15 @@ coll_chunk10(void)
{
const char *filename = GetTestParameters();
- coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, IN_ORDER);
}
@@ -505,14 +591,16 @@ coll_chunk10(void)
*-------------------------------------------------------------------------
*/
-
static void
coll_chunktest(const char* filename,
int chunk_factor,
int select_factor,
- int api_option)
+ int api_option,
+ int file_selection,
+ int mem_selection,
+ int mode)
{
- hid_t file,dataset, file_dataspace;
+ hid_t file, dataset, file_dataspace, mem_dataspace;
hid_t acc_plist,xfer_plist,crp_plist;
hsize_t dims[RANK], chunk_dims[RANK];
@@ -532,6 +620,11 @@ coll_chunktest(const char* filename,
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
+ int i;
+
/* set up MPI parameters */
MPI_Comm_size(comm,&mpi_size);
MPI_Comm_rank(comm,&mpi_rank);
@@ -551,7 +644,6 @@ coll_chunktest(const char* filename,
dims[0] = SPACE_DIM1*mpi_size;
dims[1] = SPACE_DIM2;
-
/* allocate memory for data buffer */
data_array1 = (int *)malloc(dims[0] * dims[1] * sizeof(int));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
@@ -559,9 +651,25 @@ coll_chunktest(const char* filename,
/* set up dimensions of the slab this process accesses */
ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
+ /* set up the coords array selection */
+ num_points = block[0] * block[1] * count[0] * count[1];
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+ point_set(start, count, stride, block, num_points, coords, mode);
+
file_dataspace = H5Screate_simple(2, dims, NULL);
VRFY((file_dataspace >= 0), "file dataspace created succeeded");
+ if(ALL != mem_selection) {
+ mem_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem dataspace created succeeded");
+ }
+ else {
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple (1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
crp_plist = H5Pcreate(H5P_DATASET_CREATE);
VRFY((crp_plist >= 0),"");
@@ -569,25 +677,67 @@ coll_chunktest(const char* filename,
chunk_dims[0] = dims[0]/chunk_factor;
/* to decrease the testing time, maintain bigger chunk size */
-
(chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2/2);
status = H5Pset_chunk(crp_plist, 2, chunk_dims);
VRFY((status >= 0),"chunk creation property list succeeded");
dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT,
- file_dataspace, H5P_DEFAULT, crp_plist, H5P_DEFAULT);
+ file_dataspace, H5P_DEFAULT, crp_plist, H5P_DEFAULT);
VRFY((dataset >= 0),"dataset created succeeded");
status = H5Pclose(crp_plist);
VRFY((status >= 0), "");
/*put some trivial data in the data array */
- ccdataset_fill(start, stride, count,block, data_array1);
+ ccdataset_fill(start, stride, count,block, data_array1, mem_selection);
+
MESG("data_array initialized");
- status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride,
- count, block);
- VRFY((status >= 0),"hyperslab selection succeeded");
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0),"hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0),"Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY((status >= 0),"none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0),"hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0),"Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY((status >= 0),"none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
/* set up the collective transfer property list */
xfer_plist = H5Pcreate(H5P_DATASET_XFER);
@@ -604,33 +754,39 @@ coll_chunktest(const char* filename,
case API_LINK_HARD:
status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_ONE_IO);
VRFY((status>= 0),"collective chunk optimization succeeded");
- break;
+ break;
+
case API_MULTI_HARD:
status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_MULTI_IO);
VRFY((status>= 0),"collective chunk optimization succeeded ");
- break;
+ break;
+
case API_LINK_TRUE:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,2);
VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
- break;
+ break;
+
case API_LINK_FALSE:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,6);
VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
- break;
+ break;
+
case API_MULTI_COLL:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50);
VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
- break;
+ break;
+
case API_MULTI_IND:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100);
VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
- break;
+ break;
+
default:
- ;
+ ;
}
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
@@ -641,44 +797,42 @@ coll_chunktest(const char* filename,
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
VRFY((status >= 0),"testing property list inserted succeeded");
-
- break;
+ break;
case API_MULTI_HARD:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
VRFY((status >= 0),"testing property list inserted succeeded");
- break;
+ break;
case API_LINK_TRUE:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
VRFY((status >= 0),"testing property list inserted succeeded");
-
- break;
+ break;
case API_LINK_FALSE:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
VRFY((status >= 0),"testing property list inserted succeeded");
- break;
+ break;
case API_MULTI_COLL:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
VRFY((status >= 0),"testing property list inserted succeeded");
- break;
+ break;
case API_MULTI_IND:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
VRFY((status >= 0),"testing property list inserted succeeded");
- break;
+ break;
default:
;
@@ -687,7 +841,7 @@ coll_chunktest(const char* filename,
#endif
/* write data collectively */
- status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, file_dataspace,
+ status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
xfer_plist, data_array1);
VRFY((status >= 0),"dataset write succeeded");
@@ -698,32 +852,38 @@ coll_chunktest(const char* filename,
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_HARD_NAME,&prop_value);
VRFY((status >= 0),"testing property list get succeeded");
VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO directly succeeded");
- break;
+ break;
+
case API_MULTI_HARD:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME,&prop_value);
VRFY((status >= 0),"testing property list get succeeded");
VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
- break;
+ break;
+
case API_LINK_TRUE:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME,&prop_value);
VRFY((status >= 0),"testing property list get succeeded");
VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO succeeded");
- break;
+ break;
+
case API_LINK_FALSE:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME,&prop_value);
VRFY((status >= 0),"testing property list get succeeded");
VRFY((prop_value == 0),"API to set LINK IO transferring to multi-chunk IO succeeded");
- break;
+ break;
+
case API_MULTI_COLL:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,&prop_value);
VRFY((status >= 0),"testing property list get succeeded");
VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
- break;
+ break;
+
case API_MULTI_IND:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME,&prop_value);
VRFY((status >= 0),"testing property list get succeeded");
VRFY((prop_value == 0),"API to set MULTI-CHUNK IO transferring to independent IO succeeded");
- break;
+ break;
+
default:
;
}
@@ -739,12 +899,15 @@ coll_chunktest(const char* filename,
status = H5Sclose(file_dataspace);
VRFY((status >= 0),"");
+ status = H5Sclose(mem_dataspace);
+ VRFY((status >= 0),"");
+
+
status = H5Fclose(file);
VRFY((status >= 0),"");
if (data_array1) HDfree(data_array1);
-
/* Use collective read to verify the correctness of collective write. */
/* allocate memory for data buffer */
@@ -771,15 +934,68 @@ coll_chunktest(const char* filename,
/* set up dimensions of the slab this process accesses */
ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
- /* obtain the file dataspace*/
+ /* obtain the file and mem dataspace*/
file_dataspace = H5Dget_space (dataset);
VRFY((file_dataspace >= 0), "");
- status=H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0), "");
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Dget_space (dataset);
+ VRFY((mem_dataspace >= 0), "");
+ }
+ else {
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple (1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0),"hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0),"Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY((status >= 0),"none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0),"hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0),"Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY((status >= 0),"none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
/* fill dataset with test data */
- ccdataset_fill(start, stride,count,block, data_origin1);
+ ccdataset_fill(start, stride,count,block, data_origin1, mem_selection);
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
VRFY((xfer_plist >= 0),"");
@@ -790,13 +1006,12 @@ coll_chunktest(const char* filename,
VRFY((status>= 0),"set independent IO collectively succeeded");
}
-
- status = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, file_dataspace,
+ status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
xfer_plist, data_array1);
VRFY((status >=0),"dataset read succeeded");
/* verify the read data with original expected data */
- status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
if (status) nerrors++;
status = H5Pclose(xfer_plist);
@@ -804,17 +1019,23 @@ coll_chunktest(const char* filename,
/* close dataset collectively */
status=H5Dclose(dataset);
- VRFY((status >= 0), "");
+ VRFY((status >= 0), "H5Dclose");
/* release all IDs created */
- H5Sclose(file_dataspace);
+ status = H5Sclose(file_dataspace);
+ VRFY((status >= 0),"H5Sclose");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY((status >= 0),"H5Sclose");
/* close the file collectively */
- H5Fclose(file);
+ status = H5Fclose(file);
+ VRFY((status >= 0),"H5Fclose");
/* release data buffers */
- if (data_array1) free(data_array1);
- if (data_origin1) free(data_origin1);
+ if(coords) HDfree(coords);
+ if(data_array1) HDfree(data_array1);
+ if(data_origin1) HDfree(data_origin1);
}
@@ -933,12 +1154,12 @@ ccdataset_fill(hsize_t start[],
hsize_t stride[],
hsize_t count[],
hsize_t block[],
- DATATYPE * dataset)
+ DATATYPE * dataset,
+ int mem_selection)
{
DATATYPE *dataptr = dataset;
DATATYPE *tmptr;
- hsize_t i,j,k1,k2;
-
+ hsize_t i,j,k1,k2,k=0;
/* put some trivial data in the data_array */
tmptr = dataptr;
@@ -950,10 +1171,16 @@ ccdataset_fill(hsize_t start[],
for(k2 = 0; k2 < count[1]; k2++) {
for(j = 0;j < block[1]; j++) {
- dataptr = tmptr + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+
- start[1]+k2*stride[1]+j);
+ if (ALL != mem_selection) {
+ dataptr = tmptr + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+
+ start[1]+k2*stride[1]+j);
+ }
+ else {
+ dataptr = tmptr + k;
+ k++;
+ }
- *dataptr = (DATATYPE)(k1+k2+i+j);
+ *dataptr = (DATATYPE)(k1+k2+i+j);
}
}
}
@@ -1000,9 +1227,10 @@ ccdataset_vrfy(hsize_t start[],
hsize_t stride[],
hsize_t block[],
DATATYPE *dataset,
- DATATYPE *original)
+ DATATYPE *original,
+ int mem_selection)
{
- hsize_t i, j,k1,k2;
+ hsize_t i, j,k1,k2,k=0;
int vrfyerrs;
DATATYPE *dataptr,*oriptr;
@@ -1020,26 +1248,31 @@ ccdataset_vrfy(hsize_t start[],
vrfyerrs = 0;
- for (k1 = 0; k1 < count[0];k1++) {
- for(i = 0;i < block[0];i++) {
- for(k2 = 0; k2<count[1];k2++) {
- for(j=0;j<block[1];j++) {
-
- dataptr = dataset + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+
- start[1]+k2*stride[1]+j);
- oriptr = original + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+
- start[1]+k2*stride[1]+j);
-
- if (*dataptr != *oriptr){
- if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
- printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j,
- *(original), *(dataset));
- }
- }
- }
- }
- }
+ for (k1=0;k1<count[0];k1++) {
+ for(i=0;i<block[0];i++) {
+ for(k2=0; k2<count[1];k2++) {
+ for(j=0;j<block[1];j++) {
+ if (ALL != mem_selection) {
+ dataptr = dataset + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+
+ start[1]+k2*stride[1]+j);
+ oriptr = original + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+
+ start[1]+k2*stride[1]+j);
+ }
+ else {
+ dataptr = dataset + k;
+ oriptr = original + k;
+ k++;
+ }
+ if (*dataptr != *oriptr){
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
+ printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j,
+ *(original), *(dataset));
+ }
+ }
+ }
+ }
+ }
}
if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
printf("[more errors ...]\n");
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index fca87f6..4870eb6 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -118,6 +118,54 @@ if(VERBOSE_MED){
}
}
+/*
+ * Setup the coordinates for point selection.
+ */
+void point_set(hsize_t start[],
+ hsize_t count[],
+ hsize_t stride[],
+ hsize_t block[],
+ size_t num_points,
+ hsize_t coords[],
+ int order)
+{
+ hsize_t i,j, k = 0, m ,n, s1 ,s2;
+
+ HDcompile_assert(RANK == 2);
+
+ if(OUT_OF_ORDER == order)
+ k = (num_points * RANK) - 1;
+ else if(IN_ORDER == order)
+ k = 0;
+
+ s1 = start[0];
+ s2 = start[1];
+
+ for(i = 0 ; i < count[0]; i++)
+ for(j = 0 ; j < count[1]; j++)
+ for(m = 0 ; m < block[0]; m++)
+ for(n = 0 ; n < block[1]; n++)
+ if(OUT_OF_ORDER == order) {
+ coords[k--] = s2 + (stride[1] * j) + n;
+ coords[k--] = s1 + (stride[0] * i) + m;
+ }
+ else if(IN_ORDER == order) {
+ coords[k++] = s1 + stride[0] * i + m;
+ coords[k++] = s2 + stride[1] * j + n;
+ }
+
+ if(VERBOSE_MED) {
+ printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ k = 0;
+ for(i = 0; i < num_points ; i++) {
+ printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
+ k += 2;
+ }
+ }
+}
/*
* Fill the dataset with trivial data for testing.
@@ -501,7 +549,8 @@ dataset_writeAll(void)
hid_t sid; /* Dataspace ID */
hid_t file_dataspace; /* File dataspace ID */
hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
+ hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
+ hid_t dataset5, dataset6, dataset7; /* Dataset ID */
hid_t datatype; /* Datatype ID */
hbool_t use_gpfs = FALSE; /* Use GPFS hints */
hsize_t dims[RANK]; /* dataset dim sizes */
@@ -512,6 +561,11 @@ dataset_writeAll(void)
hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
hsize_t block[RANK]; /* for hyperslab setting */
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
+ int i;
+
herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
@@ -526,6 +580,11 @@ dataset_writeAll(void)
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ /* set up the coords array selection */
+ num_points = dim1;
+ coords = (hsize_t *)HDmalloc(dim1 * RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+
/* allocate memory for data buffer */
data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
@@ -573,6 +632,13 @@ dataset_writeAll(void)
dataset3 = H5Dcreate2(fid, DATASETNAME3, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset3 >= 0), "H5Dcreate2 succeeded");
+ dataset5 = H5Dcreate2(fid, DATASETNAME7, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset5 >= 0), "H5Dcreate2 succeeded");
+ dataset6 = H5Dcreate2(fid, DATASETNAME8, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset6 >= 0), "H5Dcreate2 succeeded");
+ dataset7 = H5Dcreate2(fid, DATASETNAME9, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset7 >= 0), "H5Dcreate2 succeeded");
+
/* release 2-D space ID created */
H5Sclose(sid);
@@ -827,8 +893,6 @@ dataset_writeAll(void)
VRFY((ret>= 0),"set independent IO collectively succeeded");
}
-
-
/* write data collectively */
MESG("writeAll with scalar dataspace");
ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
@@ -846,6 +910,137 @@ dataset_writeAll(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
+
+ if(data_array1) free(data_array1);
+ data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ block[0] = 1;
+ block[1] = dim1;
+ stride[0] = 1;
+ stride[1] = dim1;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = dim0/mpi_size * mpi_rank;
+ start[1] = 0;
+
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* Dataset5: point selection in File - Hyperslab selection in Memory*/
+ /* create a file dataspace independently */
+ point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space (dataset5);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ mem_dataspace = H5Dget_space (dataset5);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset5 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset6: point selection in File - Point selection in Memory*/
+ /* create a file dataspace independently */
+ start[0] = dim0/mpi_size * mpi_rank;
+ start[1] = 0;
+ point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space (dataset6);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ point_set (start, count, stride, block, num_points, coords, IN_ORDER);
+ mem_dataspace = H5Dget_space (dataset6);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset6 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset7: point selection in File - All selection in Memory*/
+ /* create a file dataspace independently */
+ start[0] = dim0/mpi_size * mpi_rank;
+ start[1] = 0;
+ point_set (start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space (dataset7);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple (1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+
+ ret = H5Sselect_all(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset7 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
/*
* All writes completed. Close datasets collectively
*/
@@ -856,13 +1051,20 @@ dataset_writeAll(void)
ret = H5Dclose(dataset3);
VRFY((ret >= 0), "H5Dclose3 succeeded");
ret = H5Dclose(dataset4);
- VRFY((ret >= 0), "H5Dclose3 succeeded");
+ VRFY((ret >= 0), "H5Dclose4 succeeded");
+ ret = H5Dclose(dataset5);
+ VRFY((ret >= 0), "H5Dclose5 succeeded");
+ ret = H5Dclose(dataset6);
+ VRFY((ret >= 0), "H5Dclose6 succeeded");
+ ret = H5Dclose(dataset7);
+ VRFY((ret >= 0), "H5Dclose7 succeeded");
/* close the file collectively */
H5Fclose(fid);
/* release data buffers */
- if(data_array1) free(data_array1);
+ if(coords) HDfree(coords);
+ if(data_array1) HDfree(data_array1);
}
/*
@@ -882,7 +1084,7 @@ dataset_readAll(void)
hid_t xfer_plist; /* Dataset transfer properties list */
hid_t file_dataspace; /* File dataspace ID */
hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */
hbool_t use_gpfs = FALSE; /* Use GPFS hints */
DATATYPE *data_array1 = NULL; /* data buffer */
DATATYPE *data_origin1 = NULL; /* expected data buffer */
@@ -892,6 +1094,11 @@ dataset_readAll(void)
hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
hsize_t block[RANK]; /* for hyperslab setting */
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
+ int i,j,k;
+
herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
@@ -906,6 +1113,11 @@ dataset_readAll(void)
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ /* set up the coords array selection */
+ num_points = dim1;
+ coords = (hsize_t *)HDmalloc(dim0 * dim1 * RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+
/* allocate memory for data buffer */
data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
@@ -939,6 +1151,14 @@ dataset_readAll(void)
dataset2 = H5Dopen2(fid, DATASETNAME2, H5P_DEFAULT);
VRFY((dataset2 >= 0), "H5Dopen2 2 succeeded");
+ /* open another dataset collectively */
+ dataset5 = H5Dopen2(fid, DATASETNAME7, H5P_DEFAULT);
+ VRFY((dataset5 >= 0), "H5Dopen2 5 succeeded");
+ dataset6 = H5Dopen2(fid, DATASETNAME8, H5P_DEFAULT);
+ VRFY((dataset6 >= 0), "H5Dopen2 6 succeeded");
+ dataset7 = H5Dopen2(fid, DATASETNAME9, H5P_DEFAULT);
+ VRFY((dataset7 >= 0), "H5Dopen2 7 succeeded");
+
/*
* Set up dimensions of the slab this process accesses.
*/
@@ -1077,6 +1297,162 @@ dataset_readAll(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
+ if(data_array1) free(data_array1);
+ if(data_origin1) free(data_origin1);
+ data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+ data_origin1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
+
+ block[0] = 1;
+ block[1] = dim1;
+ stride[0] = 1;
+ stride[1] = dim1;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = dim0/mpi_size * mpi_rank;
+ start[1] = 0;
+
+ dataset_fill(start, block, data_origin1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
+ }
+
+ /* Dataset5: point selection in memory - Hyperslab selection in file*/
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset5);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space (dataset5);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset5 succeeded");
+
+
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if(ret) nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+
+ if(data_array1) free(data_array1);
+ data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* Dataset6: point selection in File - Point selection in Memory*/
+ /* create a file dataspace independently */
+ start[0] = dim0/mpi_size * mpi_rank;
+ start[1] = 0;
+ point_set (start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space (dataset6);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space (dataset6);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset6 succeeded");
+
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if(ret) nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ if(data_array1) free(data_array1);
+ data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* Dataset7: point selection in memory - All selection in file*/
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset7);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_all(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all succeeded");
+
+ num_points = dim0 * dim1;
+ k=0;
+ for (i=0 ; i<dim0; i++) {
+ for (j=0 ; j<dim1; j++) {
+ coords[k++] = i;
+ coords[k++] = j;
+ }
+ }
+ mem_dataspace = H5Dget_space (dataset7);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset7 succeeded");
+
+ start[0] = dim0/mpi_size * mpi_rank;
+ start[1] = 0;
+ ret = dataset_vrfy(start, count, stride, block, data_array1+(dim0/mpi_size * dim1 * mpi_rank), data_origin1);
+ if(ret) nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
/*
* All reads completed. Close datasets collectively
@@ -1085,13 +1461,20 @@ dataset_readAll(void)
VRFY((ret >= 0), "H5Dclose1 succeeded");
ret = H5Dclose(dataset2);
VRFY((ret >= 0), "H5Dclose2 succeeded");
+ ret = H5Dclose(dataset5);
+ VRFY((ret >= 0), "H5Dclose5 succeeded");
+ ret = H5Dclose(dataset6);
+ VRFY((ret >= 0), "H5Dclose6 succeeded");
+ ret = H5Dclose(dataset7);
+ VRFY((ret >= 0), "H5Dclose7 succeeded");
/* close the file collectively */
H5Fclose(fid);
/* release data buffers */
- if(data_array1) free(data_array1);
- if(data_origin1) free(data_origin1);
+ if(coords) HDfree(coords);
+ if(data_array1) HDfree(data_array1);
+ if(data_origin1) HDfree(data_origin1);
}
@@ -3073,9 +3456,6 @@ actual_io_mode_tests(void) {
* TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES:
* Test for NULL dataspace as the cause of breaking collective I/O.
*
- * TEST_POINT_SELECTIONS:
- * Test for selecting elements of dataspce as the cause of breaking collective I/O.
- *
* TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT:
* Test for Compact layout as the cause of breaking collective I/O.
*
@@ -3247,12 +3627,6 @@ test_no_collective_cause_mode(int selection_mode)
no_collective_cause_global_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES;
}
- if (selection_mode & TEST_POINT_SELECTIONS ) {
- test_name = "Broken Collective I/O - Point Selection";
- no_collective_cause_local_expected |= H5D_MPIO_POINT_SELECTIONS;
- no_collective_cause_global_expected |= H5D_MPIO_POINT_SELECTIONS;
- }
-
if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT ||
selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
test_name = "Broken Collective I/O - No CONTI or CHUNKED Dataset";
@@ -3311,17 +3685,6 @@ test_no_collective_cause_mode(int selection_mode)
VRFY((mem_space >= 0), "mem_space created");
}
- if (selection_mode & TEST_POINT_SELECTIONS) {
- coord[0][0] = 0; coord[0][1] = 0;
- coord[1][0] = 1; coord[1][1] = 1;
- ret = H5Sselect_elements (file_space, H5S_SELECT_SET, NELM, (const hsize_t *)coord);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
-
- ret = H5Sselect_elements (mem_space, H5S_SELECT_SET, NELM, (const hsize_t *)coord);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
- }
-
-
/* Get the number of elements in the selection */
length = dim0 * dim1;
@@ -3437,7 +3800,7 @@ test_no_collective_cause_mode(int selection_mode)
* have the correct values.
*
* NOTE:
- * This is a temprary function.
+ * This is a temporary function.
* test_no_collective_cause_mode(TEST_FILTERS) will replace this when
* H5Dcreate and H5write support for mpio and filter feature.
*
@@ -3688,7 +4051,6 @@ no_collective_cause_tests(void)
test_no_collective_cause_mode (TEST_DATA_TRANSFORMS);
test_no_collective_cause_mode (TEST_SET_MPIPOSIX);
test_no_collective_cause_mode (TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
- test_no_collective_cause_mode (TEST_POINT_SELECTIONS);
test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
#ifdef H5_HAVE_FILTER_FLETCHER32
@@ -3704,7 +4066,7 @@ no_collective_cause_tests(void)
*/
test_no_collective_cause_mode (TEST_SET_MPIPOSIX | TEST_DATATYPE_CONVERSION);
test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
- test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS | TEST_POINT_SELECTIONS);
+ test_no_collective_cause_mode (TEST_SET_MPIPOSIX | TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
return;
}
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 1dd4bfa..08bbf12 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -45,6 +45,13 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
#define DATASETNAME4 "Data4"
#define DATASETNAME5 "Data5"
#define DATASETNAME6 "Data6"
+#define DATASETNAME7 "Data7"
+#define DATASETNAME8 "Data8"
+#define DATASETNAME9 "Data9"
+
+/* point selection order */
+#define IN_ORDER 1
+#define OUT_OF_ORDER 2
/* Hyperslab layout styles */
#define BYROW 1 /* divide into slabs of rows */
@@ -181,13 +188,12 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
#define TEST_DATA_TRANSFORMS 0x008
#define TEST_SET_MPIPOSIX 0x010
#define TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES 0x020
-#define TEST_POINT_SELECTIONS 0x040
-#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT 0x080
-#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL 0x100
-#define TEST_FILTERS 0x200
+#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT 0x040
+#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL 0x080
+#define TEST_FILTERS 0x100
/* TEST_FILTERS will take place of this after supporting mpio + filter for
* H5Dcreate and H5Dwrite */
-#define TEST_FILTERS_READ 0x400
+#define TEST_FILTERS_READ 0x200
/* Don't erase these lines, they are put here for debugging purposes */
/*
@@ -295,5 +301,6 @@ hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type, hbool_
MPI_Offset h5_mpi_get_file_size(const char *filename, MPI_Comm comm, MPI_Info info);
int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[],
hsize_t block[], DATATYPE *dataset, DATATYPE *original);
-
+void point_set (hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ size_t num_points, hsize_t coords[], int order);
#endif /* PHDF5TEST_H */