summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2010-07-19 05:05:45 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2010-07-19 05:05:45 (GMT)
commitf82774c0d5a59c8ff48c91bd1339eb13605b2b87 (patch)
tree68289ae6df66d56f69371c6c540de2050abaa431
parent075f618e23fdfefb104e6df289a010a884aa5a02 (diff)
downloadhdf5-f82774c0d5a59c8ff48c91bd1339eb13605b2b87.zip
hdf5-f82774c0d5a59c8ff48c91bd1339eb13605b2b87.tar.gz
hdf5-f82774c0d5a59c8ff48c91bd1339eb13605b2b87.tar.bz2
[svn-r19092] Description:
Bring "shape same" changes from LBL branch to trunk. These changes allow shapes that are the same, but projected into dataspaces with different ranks to be detected correctly, and also contains code to project a dataspace into greater/lesser number of dimensions, so the I/O can proceed in a faster way. These changes also contain several bug fixes and _lots_ of code cleanups to the MPI datatype creation code. Many other misc. code cleanup are included as well... Tested on: FreeBSD/32 6.3 (duty) in debug mode FreeBSD/64 6.3 (liberty) w/C++ & FORTRAN, in debug mode Linux/32 2.6 (jam) w/PGI compilers, w/default API=1.8.x, w/C++ & FORTRAN, w/threadsafe, in debug mode Linux/64-amd64 2.6 (amani) w/Intel compilers, w/default API=1.6.x, w/C++ & FORTRAN, in production mode Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN, w/szip filter, in production mode Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN, in production mode Linux/64-amd64 2.6 (abe) w/parallel, w/FORTRAN, in debug mode Mac OS X/32 10.6.3 (amazon) in debug mode Mac OS X/32 10.6.3 (amazon) w/C++ & FORTRAN, w/threadsafe, in production mode
-rw-r--r--MANIFEST1
-rw-r--r--src/H5Dchunk.c3
-rw-r--r--src/H5Dio.c98
-rw-r--r--src/H5Dmpio.c124
-rw-r--r--src/H5Sall.c99
-rw-r--r--src/H5Shyper.c828
-rw-r--r--src/H5Smpio.c627
-rw-r--r--src/H5Snone.c254
-rw-r--r--src/H5Spkg.h6
-rw-r--r--src/H5Spoint.c181
-rw-r--r--src/H5Sprivate.h21
-rw-r--r--src/H5Sselect.c740
-rw-r--r--test/testframe.c2
-rw-r--r--test/tselect.c5244
-rw-r--r--testpar/Makefile.am3
-rw-r--r--testpar/Makefile.in7
-rw-r--r--testpar/t_rank_projection.c4041
-rw-r--r--testpar/t_span_tree.c1927
-rw-r--r--testpar/testphdf5.c21
-rw-r--r--testpar/testphdf5.h4
20 files changed, 13125 insertions, 1106 deletions
diff --git a/MANIFEST b/MANIFEST
index d597d53..f3f1753 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -960,6 +960,7 @@
./testpar/t_filter_read.c
./testpar/t_span_tree.c
./testpar/t_posix_compliant.c
+./testpar/t_rank_projection.c
./testpar/testpar.h
./testpar/testphdf5.c
./testpar/testphdf5.h
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index a28bce9..88c4ab9 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -2665,7 +2665,6 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/
H5D_rdcc_ent_t *ent = NULL; /*cache entry */
- hbool_t found = FALSE; /*already in cache? */
haddr_t chunk_addr = HADDR_UNDEF; /* Address of chunk on disk */
size_t chunk_size; /*size of a chunk */
void *chunk = NULL; /*the file chunk */
@@ -2797,7 +2796,7 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
rdcc->stats.ninits++;
} /* end else */
} /* end else */
- HDassert(found || chunk_size > 0);
+ HDassert(chunk_size > 0);
if(ent) {
/*
diff --git a/src/H5Dio.c b/src/H5Dio.c
index 285451e..b7c2ecb 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -291,6 +291,19 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
H5D_io_info_t io_info; /* Dataset I/O info */
H5D_type_info_t type_info; /* Datatype info for operation */
hbool_t type_info_init = FALSE; /* Whether the datatype info has been initialized */
+ H5S_t * projected_mem_space = NULL; /* If not NULL, ptr to dataspace containing a */
+ /* projection of the supplied mem_space to a new */
+ /* data space with rank equal to that of */
+ /* file_space. */
+ /* */
+ /* This field is only used if */
+ /* H5S_select_shape_same() returns TRUE when */
+ /* comparing the mem_space and the data_space, */
+ /* and the mem_space have different rank. */
+ /* */
+ /* Note that if this variable is used, the */
+ /* projected mem space must be discarded at the */
+ /* end of the function to avoid a memory leak. */
H5D_storage_t store; /*union of EFL and chunk pointer in file space */
hssize_t snelmts; /*total number of elmts (signed) */
hsize_t nelmts; /*total number of elmts */
@@ -340,6 +353,37 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
if(!(H5S_has_extent(mem_space)))
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "memory dataspace does not have extent set")
+ /* H5S_select_shape_same() has been modified to accept topologically identical
+ * selections with different rank as having the same shape (if the most
+ * rapidly changing coordinates match up), but the I/O code still has
+ * difficulties with the notion.
+ *
+ * To solve this, we check to see if H5S_select_shape_same() returns true,
+ * and if the ranks of the mem and file spaces are different. If the are,
+ * construct a new mem space that is equivalent to the old mem space, and
+ * use that instead.
+ *
+ * Note that in general, this requires us to touch up the memory buffer as
+ * well.
+ */
+ if(TRUE == H5S_select_shape_same(mem_space, file_space) &&
+ H5S_GET_EXTENT_NDIMS(mem_space) != H5S_GET_EXTENT_NDIMS(file_space)) {
+ void *adj_buf = NULL; /* Pointer to the location in buf corresponding */
+ /* to the beginning of the projected mem space. */
+
+ /* Attempt to construct projected dataspace for memory dataspace */
+ if(H5S_select_construct_projection(mem_space, &projected_mem_space,
+ (unsigned)H5S_GET_EXTENT_NDIMS(file_space), buf, &adj_buf, type_info.dst_type_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to construct projected memory dataspace")
+ HDassert(projected_mem_space);
+ HDassert(adj_buf);
+
+ /* Switch to using projected memory dataspace & adjusted buffer */
+ mem_space = projected_mem_space;
+ buf = adj_buf;
+ } /* end if */
+
+
/* Retrieve dataset properties */
/* <none needed in the general case> */
@@ -417,6 +461,11 @@ done:
if(type_info_init && H5D_typeinfo_term(&type_info) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down type info")
+ /* discard projected mem space if it was created */
+ if(NULL != projected_mem_space)
+ if(H5S_close(projected_mem_space) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down projected memory dataspace")
+
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5D_read() */
@@ -442,6 +491,19 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
H5D_io_info_t io_info; /* Dataset I/O info */
H5D_type_info_t type_info; /* Datatype info for operation */
hbool_t type_info_init = FALSE; /* Whether the datatype info has been initialized */
+ H5S_t * projected_mem_space = NULL; /* If not NULL, ptr to dataspace containing a */
+ /* projection of the supplied mem_space to a new */
+ /* data space with rank equal to that of */
+ /* file_space. */
+ /* */
+ /* This field is only used if */
+ /* H5S_select_shape_same() returns TRUE when */
+ /* comparing the mem_space and the data_space, */
+ /* and the mem_space have different rank. */
+ /* */
+ /* Note that if this variable is used, the */
+ /* projected mem space must be discarded at the */
+ /* end of the function to avoid a memory leak. */
H5D_storage_t store; /*union of EFL and chunk pointer in file space */
hssize_t snelmts; /*total number of elmts (signed) */
hsize_t nelmts; /*total number of elmts */
@@ -515,6 +577,37 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
file_space = dataset->shared->space;
if(!mem_space)
mem_space = file_space;
+
+ /* H5S_select_shape_same() has been modified to accept topologically
+ * identical selections with different rank as having the same shape
+ * (if the most rapidly changing coordinates match up), but the I/O
+ * code still has difficulties with the notion.
+ *
+ * To solve this, we check to see if H5S_select_shape_same() returns
+ * true, and if the ranks of the mem and file spaces are different.
+ * If the are, construct a new mem space that is equivalent to the
+ * old mem space, and use that instead.
+ *
+ * Note that in general, this requires us to touch up the memory buffer
+ * as well.
+ */
+ if(TRUE == H5S_select_shape_same(mem_space, file_space) &&
+ H5S_GET_EXTENT_NDIMS(mem_space) != H5S_GET_EXTENT_NDIMS(file_space)) {
+ void *adj_buf = NULL; /* Pointer to the location in buf corresponding */
+ /* to the beginning of the projected mem space. */
+
+ /* Attempt to construct projected dataspace for memory dataspace */
+ if(H5S_select_construct_projection(mem_space, &projected_mem_space,
+ (unsigned)H5S_GET_EXTENT_NDIMS(file_space), buf, &adj_buf, type_info.src_type_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to construct projected memory dataspace")
+ HDassert(projected_mem_space);
+ HDassert(adj_buf);
+
+ /* Switch to using projected memory dataspace & adjusted buffer */
+ mem_space = projected_mem_space;
+ buf = adj_buf;
+ } /* end if */
+
if((snelmts = H5S_GET_SELECT_NPOINTS(mem_space)) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection")
H5_ASSIGN_OVERFLOW(nelmts, snelmts, hssize_t, hsize_t);
@@ -608,6 +701,11 @@ done:
if(type_info_init && H5D_typeinfo_term(&type_info) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down type info")
+ /* discard projected mem space if it was created */
+ if(NULL != projected_mem_space)
+ if(H5S_close(projected_mem_space) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down projected memory dataspace")
+
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5D_write() */
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index ad9b737..e646a7b 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -115,7 +115,7 @@ static herr_t H5D_inter_collective_io(H5D_io_info_t *io_info,
const H5D_type_info_t *type_info, const H5S_t *file_space,
const H5S_t *mem_space);
static herr_t H5D_final_collective_io(H5D_io_info_t *io_info,
- const H5D_type_info_t *type_info, size_t nelmts, MPI_Datatype *mpi_file_type,
+ const H5D_type_info_t *type_info, hsize_t nelmts, MPI_Datatype *mpi_file_type,
MPI_Datatype *mpi_buf_type);
#ifdef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
static herr_t H5D_sort_chunk(H5D_io_info_t *io_info, const H5D_chunk_map_t *fm,
@@ -819,10 +819,10 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type
H5D_chunk_map_t *fm, int sum_chunk)
{
H5D_chunk_addr_info_t *chunk_addr_info_array = NULL;
- hbool_t mbt_is_derived = FALSE;
- hbool_t mft_is_derived = FALSE;
MPI_Datatype chunk_final_mtype; /* Final memory MPI datatype for all chunks with seletion */
+ hbool_t chunk_final_mtype_is_derived = FALSE;
MPI_Datatype chunk_final_ftype; /* Final file MPI datatype for all chunks with seletion */
+ hbool_t chunk_final_ftype_is_derived = FALSE;
H5D_storage_t ctg_store; /* Storage info for "fake" contiguous dataset */
size_t total_chunks;
haddr_t *total_chunk_addr_array = NULL;
@@ -830,7 +830,10 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type
MPI_Datatype *chunk_ftype = NULL;
MPI_Aint *chunk_disp_array = NULL;
MPI_Aint *chunk_mem_disp_array = NULL;
- int *blocklen = NULL;
+ hbool_t *chunk_mft_is_derived_array = NULL; /* Flags to indicate each chunk's MPI file datatype is derived */
+ hbool_t *chunk_mbt_is_derived_array = NULL; /* Flags to indicate each chunk's MPI memory datatype is derived */
+ int *chunk_mpi_file_counts = NULL; /* Count of MPI file datatype for each chunk */
+ int *chunk_mpi_mem_counts = NULL; /* Count of MPI memory datatype for each chunk */
int mpi_code; /* MPI return code */
herr_t ret_value = SUCCEED;
@@ -897,7 +900,7 @@ if(H5DEBUG(D))
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO")
} /* end if */
else {
- size_t mpi_buf_count; /* Number of MPI types */
+ hsize_t mpi_buf_count; /* Number of MPI types */
size_t num_chunk; /* Number of chunks for this process */
size_t u; /* Local index variable */
@@ -912,21 +915,25 @@ if(H5DEBUG(D))
/* Set up MPI datatype for chunks selected */
if(num_chunk) {
- hsize_t mpi_mem_extra_offset; /* Extra offset for memory MPI datatype */
- hsize_t mpi_file_extra_offset; /* Extra offset for file MPI datatype */
- size_t mpi_mem_count; /* Memory MPI datatype count */
- size_t mpi_file_count; /* File MPI datatype count */
- hbool_t locl_mbt_is_derived = FALSE, /* Whether the buffer (memory) type is derived and needs to be free'd */
- local_mft_is_derived = FALSE; /* Whether the file type is derived and needs to be free'd */
- int blocklen_value; /* Placeholder for array fill */
-
/* Allocate chunking information */
- chunk_addr_info_array= H5MM_malloc(num_chunk * sizeof(H5D_chunk_addr_info_t));
- chunk_mtype = H5MM_malloc(num_chunk * sizeof(MPI_Datatype));
- chunk_ftype = H5MM_malloc(num_chunk * sizeof(MPI_Datatype));
- chunk_disp_array = H5MM_malloc(num_chunk * sizeof(MPI_Aint));
- chunk_mem_disp_array = H5MM_calloc(num_chunk * sizeof(MPI_Aint));
- blocklen = H5MM_malloc(num_chunk * sizeof(int));
+ if(NULL == (chunk_addr_info_array = (H5D_chunk_addr_info_t *)H5MM_malloc(num_chunk * sizeof(H5D_chunk_addr_info_t))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk array buffer")
+ if(NULL == (chunk_mtype = (MPI_Datatype *)H5MM_malloc(num_chunk * sizeof(MPI_Datatype))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory datatype buffer")
+ if(NULL == (chunk_ftype = (MPI_Datatype *)H5MM_malloc(num_chunk * sizeof(MPI_Datatype))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file datatype buffer")
+ if(NULL == (chunk_disp_array = (MPI_Aint *)H5MM_malloc(num_chunk * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file displacement buffer")
+ if(NULL == (chunk_mem_disp_array = (MPI_Aint *)H5MM_calloc(num_chunk * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory displacement buffer")
+ if(NULL == (chunk_mpi_mem_counts = (int *)H5MM_calloc(num_chunk * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory counts buffer")
+ if(NULL == (chunk_mpi_file_counts = (int *)H5MM_calloc(num_chunk * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file counts buffer")
+ if(NULL == (chunk_mbt_is_derived_array = (hbool_t *)H5MM_calloc(num_chunk * sizeof(hbool_t))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory is derived datatype flags buffer")
+ if(NULL == (chunk_mft_is_derived_array = (hbool_t *)H5MM_calloc(num_chunk * sizeof(hbool_t))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file is derived datatype flags buffer")
#ifdef H5D_DEBUG
if(H5DEBUG(D))
@@ -945,14 +952,12 @@ if(H5DEBUG(D))
for(u = 0; u < num_chunk; u++) {
/* Disk MPI derived datatype */
if(H5S_mpio_space_type(chunk_addr_info_array[u].chunk_info.fspace,
- type_info->src_type_size, &chunk_ftype[u], &mpi_file_count,
- &mpi_file_extra_offset, &local_mft_is_derived) < 0)
+ type_info->src_type_size, &chunk_ftype[u], &chunk_mpi_file_counts[u], &(chunk_mft_is_derived_array[u])) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type")
/* Buffer MPI derived datatype */
if(H5S_mpio_space_type(chunk_addr_info_array[u].chunk_info.mspace,
- type_info->dst_type_size, &chunk_mtype[u], &mpi_mem_count,
- &mpi_mem_extra_offset, &locl_mbt_is_derived) < 0)
+ type_info->dst_type_size, &chunk_mtype[u], &chunk_mpi_mem_counts[u], &(chunk_mbt_is_derived_array[u])) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buf type")
/* Chunk address relative to the first chunk */
@@ -963,39 +968,38 @@ if(H5DEBUG(D))
chunk_disp_array[u] = (MPI_Aint)chunk_addr_info_array[u].chunk_addr;
} /* end for */
- /* Initialize the buffer with the constant value 1 */
- blocklen_value = 1;
- H5V_array_fill(blocklen, &blocklen_value, sizeof(int), num_chunk);
-
/* Create final MPI derived datatype for the file */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_struct((int)num_chunk, blocklen, chunk_disp_array, chunk_ftype, &chunk_final_ftype)))
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_struct((int)num_chunk, chunk_mpi_file_counts, chunk_disp_array, chunk_ftype, &chunk_final_ftype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_struct failed", mpi_code)
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&chunk_final_ftype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+ chunk_final_ftype_is_derived = TRUE;
/* Create final MPI derived datatype for memory */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_struct(num_chunk, blocklen, chunk_mem_disp_array, chunk_mtype, &chunk_final_mtype)))
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_struct((int)num_chunk, chunk_mpi_mem_counts, chunk_mem_disp_array, chunk_mtype, &chunk_final_mtype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_struct failed", mpi_code)
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&chunk_final_mtype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+ chunk_final_mtype_is_derived = TRUE;
/* Free the file & memory MPI datatypes for each chunk */
for(u = 0; u < num_chunk; u++) {
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(chunk_mtype + u)))
- HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ if(chunk_mbt_is_derived_array[u])
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(chunk_mtype + u)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(chunk_ftype + u)))
- HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ if(chunk_mft_is_derived_array[u])
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(chunk_ftype + u)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
} /* end for */
- /* buffer, file derived datatypes should be true */
- mbt_is_derived = TRUE;
- mft_is_derived = TRUE;
- mpi_buf_count = (size_t)1;
+ /* We have a single, complicated MPI datatype for both memory & file */
+ mpi_buf_count = (hsize_t)1;
} /* end if */
else { /* no selection at all for this process */
/* Allocate chunking information */
- total_chunk_addr_array = H5MM_malloc(sizeof(haddr_t) * total_chunks);
+ if(NULL == (total_chunk_addr_array = (haddr_t *)H5MM_malloc(sizeof(haddr_t) * total_chunks)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate total chunk address arraybuffer")
/* Retrieve chunk address map */
if(H5D_chunk_addrmap(io_info, total_chunk_addr_array) < 0)
@@ -1012,8 +1016,8 @@ if(H5DEBUG(D))
chunk_final_ftype = MPI_BYTE;
chunk_final_mtype = MPI_BYTE;
- /* buffer, file derived datatypes should be true */
- mpi_buf_count = (size_t)0;
+ /* No chunks selected for this process */
+ mpi_buf_count = (hsize_t)0;
} /* end else */
#ifdef H5D_DEBUG
if(H5DEBUG(D))
@@ -1033,6 +1037,7 @@ done:
if(H5DEBUG(D))
HDfprintf(H5DEBUG(D),"before freeing memory inside H5D_link_collective_io ret_value = %d\n", ret_value);
#endif
+ /* Release resources */
if(total_chunk_addr_array)
H5MM_xfree(total_chunk_addr_array);
if(chunk_addr_info_array)
@@ -1045,13 +1050,19 @@ if(H5DEBUG(D))
H5MM_xfree(chunk_disp_array);
if(chunk_mem_disp_array)
H5MM_xfree(chunk_mem_disp_array);
- if(blocklen)
- H5MM_xfree(blocklen);
+ if(chunk_mpi_mem_counts)
+ H5MM_xfree(chunk_mpi_mem_counts);
+ if(chunk_mpi_file_counts)
+ H5MM_xfree(chunk_mpi_file_counts);
+ if(chunk_mbt_is_derived_array)
+ H5MM_xfree(chunk_mbt_is_derived_array);
+ if(chunk_mft_is_derived_array)
+ H5MM_xfree(chunk_mft_is_derived_array);
/* Free the MPI buf and file types, if they were derived */
- if(mbt_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&chunk_final_mtype)))
+ if(chunk_final_mtype_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&chunk_final_mtype)))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
- if(mft_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&chunk_final_ftype)))
+ if(chunk_final_ftype_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&chunk_final_ftype)))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
FUNC_LEAVE_NOAPI(ret_value)
@@ -1547,32 +1558,29 @@ static herr_t
H5D_inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
const H5S_t *file_space, const H5S_t *mem_space)
{
- size_t mpi_buf_count; /* # of MPI types */
+ int mpi_buf_count; /* # of MPI types */
hbool_t mbt_is_derived = FALSE;
hbool_t mft_is_derived = FALSE;
MPI_Datatype mpi_file_type, mpi_buf_type;
- int mpi_code; /* MPI return code */
- herr_t ret_value = SUCCEED; /* return value */
+ int mpi_code; /* MPI return code */
+ herr_t ret_value = SUCCEED; /* return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_inter_collective_io)
if((file_space != NULL) && (mem_space != NULL)) {
- hsize_t mpi_buf_offset, mpi_file_offset; /* Offset within dataset where selection (ie. MPI type) begins */
- size_t mpi_file_count; /* Number of file "objects" to transfer */
+ int mpi_file_count; /* Number of file "objects" to transfer */
/* Obtain disk and memory MPI derived datatype */
- if(H5S_mpio_space_type(file_space, type_info->src_type_size,
- &mpi_file_type, &mpi_file_count, &mpi_file_offset, &mft_is_derived) < 0)
+ if(H5S_mpio_space_type(file_space, type_info->src_type_size, &mpi_file_type, &mpi_file_count, &mft_is_derived) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type")
- if(H5S_mpio_space_type(mem_space, type_info->src_type_size,
- &mpi_buf_type, &mpi_buf_count, &mpi_buf_offset, &mbt_is_derived) < 0)
+ if(H5S_mpio_space_type(mem_space, type_info->src_type_size, &mpi_buf_type, &mpi_buf_count, &mbt_is_derived) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buffer type")
} /* end if */
else {
/* For non-selection, participate with a none MPI derived datatype, the count is 0. */
mpi_buf_type = MPI_BYTE;
mpi_file_type = MPI_BYTE;
- mpi_buf_count = (size_t)0;
+ mpi_buf_count = 0;
mbt_is_derived = FALSE;
mft_is_derived = FALSE;
} /* end else */
@@ -1583,7 +1591,7 @@ if(H5DEBUG(D))
#endif
/* Perform final collective I/O operation */
- if(H5D_final_collective_io(io_info, type_info, mpi_buf_count, &mpi_file_type, &mpi_buf_type) < 0)
+ if(H5D_final_collective_io(io_info, type_info, (hsize_t)mpi_buf_count, &mpi_file_type, &mpi_buf_type) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish collective MPI-IO")
done:
@@ -1616,7 +1624,7 @@ if(H5DEBUG(D))
*/
static herr_t
H5D_final_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- size_t mpi_buf_count, MPI_Datatype *mpi_file_type, MPI_Datatype *mpi_buf_type)
+ hsize_t mpi_buf_count, MPI_Datatype *mpi_file_type, MPI_Datatype *mpi_buf_type)
{
hbool_t plist_is_setup = FALSE; /* Whether the dxpl has been customized */
herr_t ret_value = SUCCEED;
@@ -1629,11 +1637,11 @@ H5D_final_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info
plist_is_setup = TRUE;
if(io_info->op_type == H5D_IO_OP_WRITE) {
- if((io_info->io_ops.single_write)(io_info, type_info, (hsize_t)mpi_buf_count, NULL, NULL) < 0)
+ if((io_info->io_ops.single_write)(io_info, type_info, mpi_buf_count, NULL, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
} /* end if */
else {
- if((io_info->io_ops.single_read)(io_info, type_info, (hsize_t)mpi_buf_count, NULL, NULL) < 0)
+ if((io_info->io_ops.single_read)(io_info, type_info, mpi_buf_count, NULL, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
} /* end else */
diff --git a/src/H5Sall.c b/src/H5Sall.c
index 115d5d35..c98781a 100644
--- a/src/H5Sall.c
+++ b/src/H5Sall.c
@@ -47,6 +47,8 @@ static htri_t H5S_all_is_contiguous(const H5S_t *space);
static htri_t H5S_all_is_single(const H5S_t *space);
static htri_t H5S_all_is_regular(const H5S_t *space);
static herr_t H5S_all_adjust_u(H5S_t *space, const hsize_t *offset);
+static herr_t H5S_all_project_scalar(const H5S_t *space, hsize_t *offset);
+static herr_t H5S_all_project_simple(const H5S_t *space, H5S_t *new_space, hsize_t *offset);
static herr_t H5S_all_iter_init(H5S_sel_iter_t *iter, const H5S_t *space);
/* Selection iteration callbacks */
@@ -76,6 +78,8 @@ const H5S_select_class_t H5S_sel_all[1] = {{
H5S_all_is_single,
H5S_all_is_regular,
H5S_all_adjust_u,
+ H5S_all_project_scalar,
+ H5S_all_project_simple,
H5S_all_iter_init,
}};
@@ -372,18 +376,18 @@ H5S_all_iter_release (H5S_sel_iter_t UNUSED * iter)
EXAMPLES
REVISION LOG
--------------------------------------------------------------------------*/
-herr_t
-H5S_all_release (H5S_t UNUSED * space)
+static herr_t
+H5S_all_release(H5S_t *space)
{
- FUNC_ENTER_NOAPI_NOFUNC(H5S_all_release);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_all_release)
/* Check args */
- assert (space);
+ HDassert(space);
/* Reset the number of elements in the selection */
- space->select.num_elem=0;
+ space->select.num_elem = 0;
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_all_release() */
@@ -406,18 +410,18 @@ H5S_all_release (H5S_t UNUSED * space)
EXAMPLES
REVISION LOG
--------------------------------------------------------------------------*/
-herr_t
+static herr_t
H5S_all_copy(H5S_t *dst, const H5S_t UNUSED *src, hbool_t UNUSED share_selection)
{
- FUNC_ENTER_NOAPI_NOFUNC(H5S_all_copy);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_all_copy)
- assert(src);
- assert(dst);
+ HDassert(src);
+ HDassert(dst);
/* Set number of elements in selection */
- dst->select.num_elem=(hsize_t)H5S_GET_EXTENT_NPOINTS(dst);
+ dst->select.num_elem = (hsize_t)H5S_GET_EXTENT_NPOINTS(dst);
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5S_all_copy() */
@@ -542,20 +546,20 @@ H5S_all_serialize (const H5S_t *space, uint8_t *buf)
REVISION LOG
--------------------------------------------------------------------------*/
herr_t
-H5S_all_deserialize (H5S_t *space, const uint8_t UNUSED *buf)
+H5S_all_deserialize(H5S_t *space, const uint8_t UNUSED *buf)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI(H5S_all_deserialize, FAIL);
+ FUNC_ENTER_NOAPI(H5S_all_deserialize, FAIL)
- assert(space);
+ HDassert(space);
/* Change to "all" selection */
if((ret_value = H5S_select_all(space, TRUE)) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't change selection")
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_all_deserialize() */
@@ -764,6 +768,69 @@ H5S_all_adjust_u(H5S_t UNUSED *space, const hsize_t UNUSED *offset)
} /* H5S_all_adjust_u() */
+/*-------------------------------------------------------------------------
+ * Function: H5S_all_project_scalar
+ *
+ * Purpose: Projects a single element 'all' selection into a scalar
+ * dataspace
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_all_project_scalar(const H5S_t UNUSED *space, hsize_t *offset)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_all_project_scalar)
+
+ /* Check args */
+ HDassert(space && H5S_SEL_ALL == H5S_GET_SELECT_TYPE(space));
+ HDassert(offset);
+
+ /* Set offset of selection in projected buffer */
+ *offset = 0;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5S_all_project_scalar() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_all_project_simple
+ *
+ * Purpose: Projects an 'all' selection onto/into a simple dataspace
+ * of a different rank
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_all_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *offset)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_all_project_simple)
+
+ /* Check args */
+ HDassert(base_space && H5S_SEL_ALL == H5S_GET_SELECT_TYPE(base_space));
+ HDassert(new_space);
+ HDassert(offset);
+
+ /* Select the entire new space */
+ if(H5S_select_all(new_space, TRUE) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSET, FAIL, "unable to set all selection")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_all_project_simple() */
+
+
/*--------------------------------------------------------------------------
NAME
H5S_select_all
diff --git a/src/H5Shyper.c b/src/H5Shyper.c
index 74402b1..df81275 100644
--- a/src/H5Shyper.c
+++ b/src/H5Shyper.c
@@ -33,12 +33,12 @@
/* Local datatypes */
/* Static function prototypes */
-static herr_t H5S_hyper_free_span_info (H5S_hyper_span_info_t *span_info);
-static herr_t H5S_hyper_free_span (H5S_hyper_span_t *span);
-static H5S_hyper_span_info_t *H5S_hyper_copy_span (H5S_hyper_span_info_t *spans);
-static herr_t H5S_hyper_span_scratch (H5S_hyper_span_info_t *spans, void *scr_value);
-static herr_t H5S_hyper_span_precompute (H5S_hyper_span_info_t *spans, size_t elmt_size);
-static herr_t H5S_generate_hyperslab (H5S_t *space, H5S_seloper_t op,
+static herr_t H5S_hyper_free_span_info(H5S_hyper_span_info_t *span_info);
+static herr_t H5S_hyper_free_span(H5S_hyper_span_t *span);
+static H5S_hyper_span_info_t *H5S_hyper_copy_span(H5S_hyper_span_info_t *spans);
+static void H5S_hyper_span_scratch(H5S_hyper_span_info_t *spans, void *scr_value);
+static herr_t H5S_hyper_span_precompute(H5S_hyper_span_info_t *spans, size_t elmt_size);
+static herr_t H5S_generate_hyperslab(H5S_t *space, H5S_seloper_t op,
const hsize_t start[], const hsize_t stride[], const hsize_t count[], const hsize_t block[]);
static herr_t H5S_hyper_generate_spans(H5S_t *space);
/* Needed for use in hyperslab code (H5Shyper.c) */
@@ -62,6 +62,8 @@ static htri_t H5S_hyper_is_contiguous(const H5S_t *space);
static htri_t H5S_hyper_is_single(const H5S_t *space);
static htri_t H5S_hyper_is_regular(const H5S_t *space);
static herr_t H5S_hyper_adjust_u(H5S_t *space, const hsize_t *offset);
+static herr_t H5S_hyper_project_scalar(const H5S_t *space, hsize_t *offset);
+static herr_t H5S_hyper_project_simple(const H5S_t *space, H5S_t *new_space, hsize_t *offset);
static herr_t H5S_hyper_iter_init(H5S_sel_iter_t *iter, const H5S_t *space);
/* Selection iteration callbacks */
@@ -96,6 +98,8 @@ const H5S_select_class_t H5S_sel_hyper[1] = {{
H5S_hyper_is_single,
H5S_hyper_is_regular,
H5S_hyper_adjust_u,
+ H5S_hyper_project_scalar,
+ H5S_hyper_project_simple,
H5S_hyper_iter_init,
}};
@@ -292,15 +296,15 @@ H5S_hyper_iter_init(H5S_sel_iter_t *iter, const H5S_t *space)
/* Check if the regular selection can be "flattened" */
if(cont_dim>0) {
- unsigned last_dim_flattened=1; /* Flag to indicate that the last dimension was flattened */
- unsigned flat_rank=rank-cont_dim; /* Number of dimensions after flattening */
+ unsigned last_dim_flattened = 1; /* Flag to indicate that the last dimension was flattened */
+ unsigned flat_rank = rank-cont_dim; /* Number of dimensions after flattening */
unsigned curr_dim; /* Current dimension */
/* Set the iterator's rank to the contiguous dimensions */
- iter->u.hyp.iter_rank=flat_rank;
+ iter->u.hyp.iter_rank = flat_rank;
/* "Flatten" dataspace extent and selection information */
- curr_dim=flat_rank-1;
+ curr_dim = flat_rank - 1;
for(i = (int)rank - 1, acc = 1; i >= 0; i--) {
if(tdiminfo[i].block == mem_size[i] && i > 0) {
/* "Flatten" this dimension */
@@ -308,24 +312,25 @@ H5S_hyper_iter_init(H5S_sel_iter_t *iter, const H5S_t *space)
acc *= mem_size[i];
/* Indicate that the dimension was flattened */
- last_dim_flattened=1;
+ last_dim_flattened = 1;
} /* end if */
else {
if(last_dim_flattened) {
/* First dimension after flattened dimensions */
- iter->u.hyp.diminfo[curr_dim].start = tdiminfo[i].start*acc;
+ iter->u.hyp.diminfo[curr_dim].start = tdiminfo[i].start * acc;
+
/* Special case for single block regular selections */
if(tdiminfo[i].count==1)
iter->u.hyp.diminfo[curr_dim].stride = 1;
else
- iter->u.hyp.diminfo[curr_dim].stride = tdiminfo[i].stride*acc;
+ iter->u.hyp.diminfo[curr_dim].stride = tdiminfo[i].stride * acc;
iter->u.hyp.diminfo[curr_dim].count = tdiminfo[i].count;
- iter->u.hyp.diminfo[curr_dim].block = tdiminfo[i].block*acc;
- iter->u.hyp.size[curr_dim] = mem_size[i]*acc;
+ iter->u.hyp.diminfo[curr_dim].block = tdiminfo[i].block * acc;
+ iter->u.hyp.size[curr_dim] = mem_size[i] * acc;
iter->u.hyp.sel_off[curr_dim] = space->select.offset[i] * acc;
/* Reset the "last dim flattened" flag to avoid flattened any further dimensions */
- last_dim_flattened=0;
+ last_dim_flattened = 0;
/* Reset the "accumulator" for possible further dimension flattening */
acc=1;
@@ -596,12 +601,12 @@ static htri_t
H5S_hyper_iter_has_next_block(const H5S_sel_iter_t *iter)
{
unsigned u; /* Local index variable */
- herr_t ret_value=FALSE; /* Return value */
+ htri_t ret_value = FALSE; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_iter_has_next_block);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_iter_has_next_block)
/* Check args */
- assert (iter);
+ HDassert(iter);
/* Check for a single "regular" hyperslab */
if(iter->u.hyp.diminfo_valid) {
@@ -609,25 +614,25 @@ H5S_hyper_iter_has_next_block(const H5S_sel_iter_t *iter)
const hsize_t *toff; /* Temporary offset in selection */
/* Check if the offset of the iterator is at the last location in all dimensions */
- tdiminfo=iter->u.hyp.diminfo;
- toff=iter->u.hyp.off;
- for(u=0; u<iter->rank; u++) {
+ tdiminfo = iter->u.hyp.diminfo;
+ toff = iter->u.hyp.off;
+ for(u = 0; u < iter->rank; u++) {
/* If there is only one block, continue */
- if(tdiminfo[u].count==1)
+ if(tdiminfo[u].count == 1)
continue;
- if(toff[u]!=(tdiminfo[u].start+((tdiminfo[u].count-1)*tdiminfo[u].stride)))
+ if(toff[u] != (tdiminfo[u].start + ((tdiminfo[u].count - 1) * tdiminfo[u].stride)))
HGOTO_DONE(TRUE);
} /* end for */
} /* end if */
else {
/* Check for any levels of the tree with more sequences in them */
- for(u=0; u<iter->rank; u++)
- if(iter->u.hyp.span[u]->next!=NULL)
+ for(u = 0; u < iter->rank; u++)
+ if(iter->u.hyp.span[u]->next != NULL)
HGOTO_DONE(TRUE);
} /* end else */
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_hyper_iter_has_next_block() */
@@ -1089,30 +1094,30 @@ H5S_hyper_iter_release (H5S_sel_iter_t *iter)
REVISION LOG
--------------------------------------------------------------------------*/
static H5S_hyper_span_t *
-H5S_hyper_new_span (hsize_t low, hsize_t high, H5S_hyper_span_info_t *down, H5S_hyper_span_t *next)
+H5S_hyper_new_span(hsize_t low, hsize_t high, H5S_hyper_span_info_t *down, H5S_hyper_span_t *next)
{
H5S_hyper_span_t *ret_value;
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_new_span);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_new_span)
/* Allocate a new span node */
- if((ret_value = H5FL_MALLOC(H5S_hyper_span_t))==NULL)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "can't allocate hyperslab span");
+ if(NULL == (ret_value = H5FL_MALLOC(H5S_hyper_span_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, NULL, "can't allocate hyperslab span")
/* Copy the span's basic information */
- ret_value->low=low;
- ret_value->high=high;
- ret_value->nelem=(high-low)+1;
- ret_value->pstride=0;
- ret_value->down=down;
- ret_value->next=next;
+ ret_value->low = low;
+ ret_value->high = high;
+ ret_value->nelem = (high - low) + 1;
+ ret_value->pstride = 0;
+ ret_value->down = down;
+ ret_value->next = next;
/* Increment the reference count of the 'down span' if there is one */
- if(ret_value->down!=NULL)
+ if(ret_value->down)
ret_value->down->count++;
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_hyper_new_span() */
@@ -1195,24 +1200,23 @@ done:
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_hyper_span_precompute (H5S_hyper_span_info_t *spans, size_t elmt_size)
+H5S_hyper_span_precompute(H5S_hyper_span_info_t *spans, size_t elmt_size)
{
- herr_t ret_value=SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_span_precompute);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_span_precompute)
- assert(spans);
+ HDassert(spans);
/* Call the helper routine to actually do the work */
- if(H5S_hyper_span_precompute_helper(spans,elmt_size)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't precompute span info");
+ if(H5S_hyper_span_precompute_helper(spans, elmt_size) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't precompute span info")
/* Reset the scratch pointers for the next routine which needs them */
- if(H5S_hyper_span_scratch(spans,NULL)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't reset hyperslab scratch pointer");
+ H5S_hyper_span_scratch(spans, NULL);
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_hyper_span_precompute() */
@@ -1222,10 +1226,10 @@ done:
PURPOSE
Set the scratch pointers on hyperslab span trees
USAGE
- herr_t H5S_hyper_span_scratch(span_info)
+ void H5S_hyper_span_scratch(span_info)
H5S_hyper_span_info_t *span_info; IN: Span tree to reset
RETURNS
- Non-negative on success, negative on failure
+ <none>
DESCRIPTION
Set the scratch pointers on a hyperslab span tree.
GLOBAL VARIABLES
@@ -1233,37 +1237,33 @@ done:
EXAMPLES
REVISION LOG
--------------------------------------------------------------------------*/
-static herr_t
-H5S_hyper_span_scratch (H5S_hyper_span_info_t *spans, void *scr_value)
+static void
+H5S_hyper_span_scratch(H5S_hyper_span_info_t *spans, void *scr_value)
{
- H5S_hyper_span_t *span; /* Hyperslab span */
- herr_t ret_value=SUCCEED; /* Return value */
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_span_scratch)
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_span_scratch);
-
- assert(spans);
+ HDassert(spans);
/* Check if we've already set this down span tree */
- if(spans->scratch!=scr_value) {
+ if(spans->scratch != scr_value) {
+ H5S_hyper_span_t *span; /* Hyperslab span */
+
/* Set the tree's scratch pointer */
spans->scratch = (H5S_hyper_span_info_t *)scr_value;
/* Set the scratch pointers in all the nodes */
- span=spans->head;
- while(span!=NULL) {
+ span = spans->head;
+ while(span != NULL) {
/* If there are down spans, set their scratch value also */
- if(span->down!=NULL) {
- if(H5S_hyper_span_scratch(span->down,scr_value)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't reset hyperslab scratch pointer");
- } /* end if */
+ if(span->down != NULL)
+ H5S_hyper_span_scratch(span->down, scr_value);
/* Advance to next span */
- span=span->next;
+ span = span->next;
} /* end while */
} /* end if */
-done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI_VOID
} /* H5S_hyper_span_scratch() */
@@ -1293,65 +1293,65 @@ H5S_hyper_copy_span_helper (H5S_hyper_span_info_t *spans)
H5S_hyper_span_info_t *new_down; /* New down span tree */
H5S_hyper_span_info_t *ret_value;
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_copy_span_helper);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_copy_span_helper)
- assert(spans);
+ HDassert(spans);
/* Check if the span tree was already copied */
- if(spans->scratch!=NULL && spans->scratch!=(H5S_hyper_span_info_t *)~((size_t)NULL)) {
+ if(spans->scratch != NULL && spans->scratch != (H5S_hyper_span_info_t *)~((size_t)NULL)) {
/* Just return the value of the already copied span tree */
- ret_value=spans->scratch;
+ ret_value = spans->scratch;
/* Increment the reference count of the span tree */
ret_value->count++;
} /* end if */
else {
/* Allocate a new span_info node */
- if((ret_value = H5FL_MALLOC(H5S_hyper_span_info_t))==NULL)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "can't allocate hyperslab span");
+ if(NULL == (ret_value = H5FL_MALLOC(H5S_hyper_span_info_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, NULL, "can't allocate hyperslab span info")
/* Copy the span_info information */
- ret_value->count=1;
- ret_value->scratch=NULL;
- ret_value->head=NULL;
+ ret_value->count = 1;
+ ret_value->scratch = NULL;
+ ret_value->head = NULL;
/* Set the scratch pointer in the node being copied to the newly allocated node */
- spans->scratch=ret_value;
+ spans->scratch = ret_value;
/* Copy over the nodes in the span list */
- span=spans->head;
- prev_span=NULL;
- while(span!=NULL) {
+ span = spans->head;
+ prev_span = NULL;
+ while(span != NULL) {
/* Allocate a new node */
- if((new_span = H5S_hyper_new_span(span->low,span->high,NULL,NULL))==NULL)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "can't allocate hyperslab span");
+ if(NULL == (new_span = H5S_hyper_new_span(span->low, span->high, NULL, NULL)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, NULL, "can't allocate hyperslab span")
/* Append to list of spans */
- if(prev_span==NULL)
- ret_value->head=new_span;
+ if(NULL == prev_span)
+ ret_value->head = new_span;
else
- prev_span->next=new_span;
+ prev_span->next = new_span;
/* Copy the pstride */
- new_span->pstride=span->pstride;
+ new_span->pstride = span->pstride;
/* Recurse to copy the 'down' spans, if there are any */
- if(span->down!=NULL) {
- if((new_down = H5S_hyper_copy_span_helper(span->down))==NULL)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "can't allocate hyperslab span");
- new_span->down=new_down;
+ if(span->down != NULL) {
+ if(NULL == (new_down = H5S_hyper_copy_span_helper(span->down)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, NULL, "can't copy hyperslab spans")
+ new_span->down = new_down;
} /* end if */
/* Update the previous (new) span */
- prev_span=new_span;
+ prev_span = new_span;
/* Advance to next span */
- span=span->next;
+ span = span->next;
} /* end while */
} /* end else */
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_hyper_copy_span_helper() */
@@ -1375,23 +1375,23 @@ done:
REVISION LOG
--------------------------------------------------------------------------*/
static H5S_hyper_span_info_t *
-H5S_hyper_copy_span (H5S_hyper_span_info_t *spans)
+H5S_hyper_copy_span(H5S_hyper_span_info_t *spans)
{
H5S_hyper_span_info_t *ret_value;
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_copy_span);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_copy_span)
- assert(spans);
+ HDassert(spans);
/* Copy the hyperslab span tree */
- ret_value=H5S_hyper_copy_span_helper(spans);
+ if(NULL == (ret_value = H5S_hyper_copy_span_helper(spans)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "can't copy hyperslab span tree")
/* Reset the scratch pointers for the next routine which needs them */
- if(H5S_hyper_span_scratch(spans,NULL)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, NULL, "can't reset span tree scratch pointers");
+ H5S_hyper_span_scratch(spans, NULL);
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_hyper_copy_span() */
@@ -1630,7 +1630,7 @@ H5S_hyper_copy (H5S_t *dst, const H5S_t *src, hbool_t share_selection)
assert(dst);
/* Allocate space for the hyperslab selection information */
- if((dst->select.sel_info.hslab=H5FL_MALLOC(H5S_hyper_sel_t))==NULL)
+ if(NULL == (dst->select.sel_info.hslab = H5FL_MALLOC(H5S_hyper_sel_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate hyperslab info");
/* Set temporary pointers */
@@ -1658,7 +1658,7 @@ H5S_hyper_copy (H5S_t *dst, const H5S_t *src, hbool_t share_selection)
} /* end if */
else
/* Copy the hyperslab span information */
- dst->select.sel_info.hslab->span_lst=H5S_hyper_copy_span(src->select.sel_info.hslab->span_lst);
+ dst->select.sel_info.hslab->span_lst = H5S_hyper_copy_span(src->select.sel_info.hslab->span_lst);
} /* end if */
done:
@@ -2331,44 +2331,44 @@ H5S_hyper_span_blocklist(H5S_hyper_span_info_t *spans, hsize_t start[], hsize_t
{
H5S_hyper_span_t *curr; /* Pointer to current hyperslab span */
hsize_t u; /* Index variable */
- herr_t ret_value=SUCCEED; /* return value */
+ herr_t ret_value = SUCCEED; /* return value */
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_span_blocklist);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_span_blocklist)
/* Sanity checks */
- assert(spans);
- assert(rank<H5O_LAYOUT_NDIMS);
- assert(start);
- assert(end);
- assert(startblock);
- assert(numblocks && *numblocks>0);
- assert(buf && *buf);
+ HDassert(spans);
+ HDassert(rank < H5O_LAYOUT_NDIMS);
+ HDassert(start);
+ HDassert(end);
+ HDassert(startblock);
+ HDassert(numblocks && *numblocks > 0);
+ HDassert(buf && *buf);
/* Walk through the list of spans, recursing or outputing them */
- curr=spans->head;
- while(curr!=NULL && *numblocks>0) {
+ curr = spans->head;
+ while(curr != NULL && *numblocks > 0) {
/* Recurse if this node has down spans */
- if(curr->down!=NULL) {
+ if(curr->down != NULL) {
/* Add the starting and ending points for this span to the list */
- start[rank]=curr->low;
- end[rank]=curr->high;
+ start[rank] = curr->low;
+ end[rank] = curr->high;
/* Recurse down to the next dimension */
- if(H5S_hyper_span_blocklist(curr->down,start,end,rank+1,startblock,numblocks,buf)<0)
+ if(H5S_hyper_span_blocklist(curr->down, start, end, (rank + 1), startblock, numblocks, buf) < 0)
HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "failed to release hyperslab spans");
} /* end if */
else {
/* Skip this block if we haven't skipped all the startblocks yet */
- if(*startblock>0) {
+ if(*startblock > 0) {
/* Decrement the starting block */
(*startblock)--;
- }
+ } /* end if */
/* Process this block */
else {
/* Encode all the previous dimensions starting & ending points */
/* Copy previous starting points */
- for(u=0; u<rank; u++, (*buf)++)
+ for(u = 0; u < rank; u++, (*buf)++)
HDmemcpy(*buf, &start[u], sizeof(hsize_t));
/* Copy starting point for this span */
@@ -2376,7 +2376,7 @@ H5S_hyper_span_blocklist(H5S_hyper_span_info_t *spans, hsize_t start[], hsize_t
(*buf)++;
/* Copy previous ending points */
- for(u=0; u<rank; u++, (*buf)++)
+ for(u = 0; u < rank; u++, (*buf)++)
HDmemcpy(*buf, &end[u], sizeof(hsize_t));
/* Copy starting point for this span */
@@ -2389,11 +2389,11 @@ H5S_hyper_span_blocklist(H5S_hyper_span_info_t *spans, hsize_t start[], hsize_t
} /* end else */
/* Advance to next node */
- curr=curr->next;
+ curr = curr->next;
} /* end while */
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_hyper_span_blocklist() */
@@ -2430,29 +2430,26 @@ done:
static herr_t
H5S_get_select_hyper_blocklist(H5S_t *space, hbool_t internal, hsize_t startblock, hsize_t numblocks, hsize_t *buf)
{
- H5S_hyper_dim_t *diminfo; /* Alias for dataspace's diminfo information */
- hsize_t tmp_count[H5O_LAYOUT_NDIMS]; /* Temporary hyperslab counts */
- hsize_t offset[H5O_LAYOUT_NDIMS]; /* Offset of element in dataspace */
- hsize_t start[H5O_LAYOUT_NDIMS]; /* Location of start of hyperslab */
- hsize_t end[H5O_LAYOUT_NDIMS]; /* Location of end of hyperslab */
- hsize_t temp_off; /* Offset in a given dimension */
- int i; /* Counter */
- int fast_dim; /* Rank of the fastest changing dimension for the dataspace */
- int temp_dim; /* Temporary rank holder */
- int ndims; /* Rank of the dataspace */
- int done; /* Whether we are done with the iteration */
- herr_t ret_value=SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_get_select_hyper_blocklist);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_get_select_hyper_blocklist)
- assert(space);
- assert(buf);
+ HDassert(space);
+ HDassert(buf);
/* Check for a "regular" hyperslab selection */
if(space->select.sel_info.hslab->diminfo_valid) {
+ const H5S_hyper_dim_t *diminfo; /* Alias for dataspace's diminfo information */
+ hsize_t tmp_count[H5O_LAYOUT_NDIMS]; /* Temporary hyperslab counts */
+ hsize_t offset[H5O_LAYOUT_NDIMS]; /* Offset of element in dataspace */
+ unsigned fast_dim; /* Rank of the fastest changing dimension for the dataspace */
+ unsigned ndims; /* Rank of the dataspace */
+ hbool_t done; /* Whether we are done with the iteration */
+ unsigned u; /* Counter */
+
/* Set some convienence values */
- ndims=space->extent.rank;
- fast_dim=ndims-1;
+ ndims = space->extent.rank;
+ fast_dim = ndims - 1;
/* Check which set of dimension information to use */
if(internal)
@@ -2460,39 +2457,41 @@ H5S_get_select_hyper_blocklist(H5S_t *space, hbool_t internal, hsize_t startbloc
* Use the "optimized dimension information" to pass back information
* on the blocks set, not the "application information".
*/
- diminfo=space->select.sel_info.hslab->opt_diminfo;
+ diminfo = space->select.sel_info.hslab->opt_diminfo;
else
/*
* Use the "application dimension information" to pass back to the user
* the blocks they set, not the optimized, internal information.
*/
- diminfo=space->select.sel_info.hslab->app_diminfo;
+ diminfo = space->select.sel_info.hslab->app_diminfo;
/* Build the tables of count sizes as well as the initial offset */
- for(i=0; i<ndims; i++) {
- tmp_count[i]=diminfo[i].count;
- offset[i]=diminfo[i].start;
+ for(u = 0; u < ndims; u++) {
+ tmp_count[u] = diminfo[u].count;
+ offset[u] = diminfo[u].start;
} /* end for */
/* We're not done with the iteration */
- done=0;
+ done = FALSE;
/* Go iterate over the hyperslabs */
- while(done==0 && numblocks>0) {
+ while(!done && numblocks > 0) {
+ hsize_t temp_off; /* Offset in a given dimension */
+
/* Iterate over the blocks in the fastest dimension */
- while(tmp_count[fast_dim]>0 && numblocks>0) {
+ while(tmp_count[fast_dim] > 0 && numblocks > 0) {
/* Check if we should copy this block information */
- if(startblock==0) {
+ if(startblock == 0) {
/* Copy the starting location */
- HDmemcpy(buf,offset,sizeof(hsize_t)*ndims);
- buf+=ndims;
+ HDmemcpy(buf, offset, sizeof(hsize_t) * ndims);
+ buf += ndims;
/* Compute the ending location */
- HDmemcpy(buf,offset,sizeof(hsize_t)*ndims);
- for(i=0; i<ndims; i++)
- buf[i]+=(diminfo[i].block-1);
- buf+=ndims;
+ HDmemcpy(buf, offset, sizeof(hsize_t) * ndims);
+ for(u = 0; u < ndims; u++)
+ buf[u] += (diminfo[u].block - 1);
+ buf += ndims;
/* Decrement the number of blocks to retrieve */
numblocks--;
@@ -2501,33 +2500,35 @@ H5S_get_select_hyper_blocklist(H5S_t *space, hbool_t internal, hsize_t startbloc
startblock--;
/* Move the offset to the next sequence to start */
- offset[fast_dim]+=diminfo[fast_dim].stride;
+ offset[fast_dim] += diminfo[fast_dim].stride;
/* Decrement the block count */
tmp_count[fast_dim]--;
} /* end while */
/* Work on other dimensions if necessary */
- if(fast_dim>0 && numblocks>0) {
+ if(fast_dim > 0 && numblocks > 0) {
+ int temp_dim; /* Temporary rank holder */
+
/* Reset the block counts */
- tmp_count[fast_dim]=diminfo[fast_dim].count;
+ tmp_count[fast_dim] = diminfo[fast_dim].count;
/* Bubble up the decrement to the slower changing dimensions */
- temp_dim=fast_dim-1;
- while(temp_dim>=0 && done==0) {
+ temp_dim = (int)(fast_dim - 1);
+ while(temp_dim >= 0 && !done) {
/* Decrement the block count */
tmp_count[temp_dim]--;
/* Check if we have more blocks left */
- if(tmp_count[temp_dim]>0)
+ if(tmp_count[temp_dim] > 0)
break;
/* Check for getting out of iterator */
- if(temp_dim==0)
- done=1;
+ if(temp_dim == 0)
+ done = TRUE;
/* Reset the block count in this dimension */
- tmp_count[temp_dim]=diminfo[temp_dim].count;
+ tmp_count[temp_dim] = diminfo[temp_dim].count;
/* Wrapped a dimension, go up to next dimension */
temp_dim--;
@@ -2535,16 +2536,20 @@ H5S_get_select_hyper_blocklist(H5S_t *space, hbool_t internal, hsize_t startbloc
} /* end if */
/* Re-compute offset array */
- for(i=0; i<ndims; i++) {
- temp_off=diminfo[i].start+diminfo[i].stride*(diminfo[i].count-tmp_count[i]);
- offset[i]=temp_off;
+ for(u = 0; u < ndims; u++) {
+ temp_off = diminfo[u].start + diminfo[u].stride * (diminfo[u].count - tmp_count[u]);
+ offset[u] = temp_off;
} /* end for */
} /* end while */
} /* end if */
- else
- ret_value=H5S_hyper_span_blocklist(space->select.sel_info.hslab->span_lst,start,end,(hsize_t)0,&startblock,&numblocks,&buf);
+ else {
+ hsize_t start[H5O_LAYOUT_NDIMS]; /* Location of start of hyperslab */
+ hsize_t end[H5O_LAYOUT_NDIMS]; /* Location of end of hyperslab */
- FUNC_LEAVE_NOAPI(ret_value);
+ ret_value = H5S_hyper_span_blocklist(space->select.sel_info.hslab->span_lst, start, end, (hsize_t)0, &startblock, &numblocks, &buf);
+ } /* end else */
+
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_get_select_hyper_blocklist() */
@@ -2636,40 +2641,40 @@ done:
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_hyper_bounds_helper (const H5S_hyper_span_info_t *spans, const hssize_t *offset, hsize_t rank, hsize_t *start, hsize_t *end)
+H5S_hyper_bounds_helper(const H5S_hyper_span_info_t *spans, const hssize_t *offset, hsize_t rank, hsize_t *start, hsize_t *end)
{
- H5S_hyper_span_t *curr; /* Hyperslab information nodes */
- herr_t ret_value=SUCCEED; /* Return value */
+ H5S_hyper_span_t *curr; /* Hyperslab information nodes */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_bounds_helper)
- assert(spans);
- assert(offset);
- assert(rank<H5O_LAYOUT_NDIMS);
- assert(start);
- assert(end);
+ HDassert(spans);
+ HDassert(offset);
+ HDassert(rank < H5O_LAYOUT_NDIMS);
+ HDassert(start);
+ HDassert(end);
/* Check each point to determine whether selection+offset is within extent */
curr=spans->head;
while(curr!=NULL) {
/* Check for offset moving selection negative */
- if(((hssize_t)curr->low+offset[rank])<0)
+ if(((hssize_t)curr->low + offset[rank]) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "offset moves selection out of bounds")
/* Check if the current span extends the bounding box */
- if((curr->low+offset[rank])<start[rank])
- start[rank]=curr->low+offset[rank];
- if((curr->high+offset[rank])>end[rank])
- end[rank]=curr->high+offset[rank];
+ if((curr->low + offset[rank]) < start[rank])
+ start[rank] = curr->low + offset[rank];
+ if((curr->high + offset[rank]) > end[rank])
+ end[rank] = curr->high + offset[rank];
/* Recurse if this node has down spans */
- if(curr->down!=NULL) {
- if(H5S_hyper_bounds_helper(curr->down,offset,rank+1,start,end)<0)
+ if(curr->down != NULL) {
+ if(H5S_hyper_bounds_helper(curr->down, offset, (rank + 1), start, end) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADSELECT, FAIL, "failure in lower dimension")
} /* end if */
/* Advance to next node */
- curr=curr->next;
+ curr = curr->next;
} /* end while */
done:
@@ -3609,20 +3614,16 @@ done:
herr_t
H5S_hyper_reset_scratch(H5S_t *space)
{
- herr_t ret_value=SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_reset_scratch);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_reset_scratch)
- assert(space);
+ HDassert(space);
/* Check if there are spans in the span tree */
- if(space->select.sel_info.hslab->span_lst!=NULL)
+ if(space->select.sel_info.hslab->span_lst != NULL)
/* Reset the scratch pointers for the next routine which needs them */
- if(H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst,NULL)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't reset span tree scratch pointers");
+ H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst, NULL);
-done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_hyper_reset_scratch() */
@@ -3685,6 +3686,8 @@ H5S_hyper_convert(H5S_t *space)
case H5S_SEL_NONE: /* No elements selected in dataspace */
case H5S_SEL_POINTS: /* Point selection */
+ case H5S_SEL_ERROR: /* Selection error */
+ case H5S_SEL_N: /* Selection count */
default:
HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, FAIL, "can't convert to span tree selection");
} /* end switch */
@@ -4033,8 +4036,7 @@ H5S_hyper_adjust_u(H5S_t *space, const hsize_t *offset)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADSELECT, FAIL, "can't perform hyperslab offset adjustment");
/* Reset the scratch pointers for the next routine which needs them */
- if(H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst,NULL)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't reset hyperslab scratch pointer");
+ H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst, NULL);
} /* end if */
done:
@@ -4042,6 +4044,357 @@ done:
} /* H5S_hyper_adjust_u() */
+/*-------------------------------------------------------------------------
+ * Function: H5S_hyper_project_scalar
+ *
+ * Purpose: Projects a single element hyperslab selection into a scalar
+ * dataspace
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_hyper_project_scalar(const H5S_t *space, hsize_t *offset)
+{
+ hsize_t block[H5S_MAX_RANK]; /* Block selected in base dataspace */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_project_scalar)
+
+ /* Check args */
+ HDassert(space && H5S_SEL_HYPERSLABS == H5S_GET_SELECT_TYPE(space));
+ HDassert(offset);
+
+ /* Check for a "regular" hyperslab selection */
+ if(space->select.sel_info.hslab->diminfo_valid) {
+ const H5S_hyper_dim_t *diminfo = space->select.sel_info.hslab->opt_diminfo; /* Alias for dataspace's diminfo information */
+ unsigned u; /* Counter */
+
+ /* Build the table of the initial offset */
+ for(u = 0; u < space->extent.rank; u++) {
+ block[u] = diminfo[u].start;
+
+ /* Check for more than one hyperslab */
+ if(diminfo[u].count > 1 || diminfo[u].block > 1)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "hyperslab selection of one element has more than one node!")
+ } /* end for */
+ } /* end if */
+ else {
+ const H5S_hyper_span_t *curr; /* Pointer to current hyperslab span */
+ unsigned curr_dim; /* Current dimension being operated on */
+
+ /* Advance down selected spans */
+ curr = space->select.sel_info.hslab->span_lst->head;
+ curr_dim = 0;
+ while(curr) {
+ /* Check for more than one span */
+ if(curr->next || curr->low != curr->high)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "hyperslab selection of one element has more than one node!")
+
+ /* Save the location of the selection in current dimension */
+ block[curr_dim] = curr->low;
+
+ /* Advance down to next dimension */
+ curr = curr->down->head;
+ curr_dim++;
+ } /* end while */
+ } /* end else */
+
+ /* Calculate offset of selection in projected buffer */
+ *offset = H5V_array_offset(space->extent.rank, space->extent.size, block);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_hyper_project_scalar() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_hyper_project_simple_lower
+ *
+ * Purpose: Projects a hyperslab selection onto/into a simple dataspace
+ * of a lower rank
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_hyper_project_simple_lower(const H5S_t *base_space, H5S_t *new_space)
+{
+ H5S_hyper_span_info_t *down; /* Pointer to list of spans */
+ unsigned curr_dim; /* Current dimension being operated on */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_project_simple_lower)
+
+ /* Check args */
+ HDassert(base_space && H5S_SEL_HYPERSLABS == H5S_GET_SELECT_TYPE(base_space));
+ HDassert(new_space);
+ HDassert(new_space->extent.rank < base_space->extent.rank);
+
+ /* Walk down the span tree until we reach the selection to project */
+ down = base_space->select.sel_info.hslab->span_lst;
+ curr_dim = 0;
+ while(down && curr_dim < (base_space->extent.rank - new_space->extent.rank)) {
+ /* Sanity check */
+ HDassert(NULL == down->head->next);
+
+ /* Advance down to next dimension */
+ down = down->head->down;
+ curr_dim++;
+ } /* end while */
+ HDassert(down);
+
+ /* Share the underlying hyperslab span information */
+ new_space->select.sel_info.hslab->span_lst = down;
+ new_space->select.sel_info.hslab->span_lst->count++;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5S_hyper_project_simple_lower() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_hyper_project_simple_higher
+ *
+ * Purpose: Projects a hyperslab selection onto/into a simple dataspace
+ * of a higher rank
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_hyper_project_simple_higher(const H5S_t *base_space, H5S_t *new_space)
+{
+ H5S_hyper_span_t *prev_span = NULL; /* Pointer to previous list of spans */
+ unsigned curr_dim; /* Current dimension being operated on */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_project_simple_higher)
+
+ /* Check args */
+ HDassert(base_space && H5S_SEL_HYPERSLABS == H5S_GET_SELECT_TYPE(base_space));
+ HDassert(new_space);
+ HDassert(new_space->extent.rank > base_space->extent.rank);
+
+ /* Create nodes until reaching the correct # of dimensions */
+ new_space->select.sel_info.hslab->span_lst = NULL;
+ curr_dim = 0;
+ while(curr_dim < (new_space->extent.rank - base_space->extent.rank)) {
+ H5S_hyper_span_info_t *new_span_info; /* Pointer to list of spans */
+ H5S_hyper_span_t *new_span; /* Temporary hyperslab span */
+
+ /* Allocate a new span_info node */
+ if(NULL == (new_span_info = H5FL_MALLOC(H5S_hyper_span_info_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate hyperslab span info")
+
+ /* Check for linking into higher span */
+ if(prev_span)
+ prev_span->down = new_span_info;
+
+ /* Allocate a new node */
+ if(NULL == (new_span = H5S_hyper_new_span(0, 0, NULL, NULL)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate hyperslab span")
+
+ /* Set the span_info information */
+ new_span_info->count = 1;
+ new_span_info->scratch = NULL;
+ new_span_info->head = new_span;
+
+ /* Attach to new space, if top span info */
+ if(NULL == new_space->select.sel_info.hslab->span_lst)
+ new_space->select.sel_info.hslab->span_lst = new_span_info;
+
+ /* Remember previous span info */
+ prev_span = new_span;
+
+ /* Advance to next dimension */
+ curr_dim++;
+ } /* end while */
+ HDassert(new_space->select.sel_info.hslab->span_lst);
+ HDassert(prev_span);
+
+ /* Share the underlying hyperslab span information */
+ prev_span->down = base_space->select.sel_info.hslab->span_lst;
+ prev_span->down->count++;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_hyper_project_simple_higher() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_hyper_project_simple
+ *
+ * Purpose: Projects a hyperslab selection onto/into a simple dataspace
+ * of a different rank
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_hyper_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *offset)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_hyper_project_simple)
+
+ /* Check args */
+ HDassert(base_space && H5S_SEL_HYPERSLABS == H5S_GET_SELECT_TYPE(base_space));
+ HDassert(new_space);
+ HDassert(offset);
+
+ /* We are setting a new selection, remove any current selection in new dataspace */
+ if(H5S_SELECT_RELEASE(new_space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release selection")
+
+ /* Allocate space for the hyperslab selection information */
+ if(NULL == (new_space->select.sel_info.hslab = H5FL_MALLOC(H5S_hyper_sel_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate hyperslab info")
+
+ /* Check for a "regular" hyperslab selection */
+ if(base_space->select.sel_info.hslab->diminfo_valid) {
+ unsigned base_space_dim; /* Current dimension in the base dataspace */
+ unsigned new_space_dim; /* Current dimension in the new dataspace */
+
+ /* Check if the new space's rank is < or > base space's rank */
+ if(new_space->extent.rank < base_space->extent.rank) {
+ const H5S_hyper_dim_t *opt_diminfo = base_space->select.sel_info.hslab->opt_diminfo; /* Alias for dataspace's diminfo information */
+ hsize_t block[H5S_MAX_RANK]; /* Block selected in base dataspace */
+ unsigned u; /* Local index variable */
+
+ /* Compute the offset for the down-projection */
+ HDmemset(block, 0, sizeof(block));
+ for(u = 0; u < (base_space->extent.rank - new_space->extent.rank); u++)
+ block[u] = opt_diminfo[u].start;
+ *offset = H5V_array_offset(base_space->extent.rank, base_space->extent.size, block);
+
+ /* Set the correct dimensions for the base & new spaces */
+ base_space_dim = base_space->extent.rank - new_space->extent.rank;
+ new_space_dim = 0;
+ } /* end if */
+ else {
+ HDassert(new_space->extent.rank > base_space->extent.rank);
+
+ /* The offset is zero when projected into higher dimensions */
+ *offset = 0;
+
+ /* Set the diminfo information for the higher dimensions */
+ for(new_space_dim = 0; new_space_dim < (new_space->extent.rank - base_space->extent.rank); new_space_dim++) {
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].start = 0;
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].stride = 1;
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].count = 1;
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].block = 1;
+
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].start = 0;
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].stride = 1;
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].count = 1;
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].block = 1;
+ } /* end for */
+
+ /* Start at beginning of base space's dimension info */
+ base_space_dim = 0;
+ } /* end else */
+
+ /* Copy the diminfo */
+ while(base_space_dim < base_space->extent.rank) {
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].start =
+ base_space->select.sel_info.hslab->app_diminfo[base_space_dim].start;
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].stride =
+ base_space->select.sel_info.hslab->app_diminfo[base_space_dim].stride;
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].count =
+ base_space->select.sel_info.hslab->app_diminfo[base_space_dim].count;
+ new_space->select.sel_info.hslab->app_diminfo[new_space_dim].block =
+ base_space->select.sel_info.hslab->app_diminfo[base_space_dim].block;
+
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].start =
+ base_space->select.sel_info.hslab->opt_diminfo[base_space_dim].start;
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].stride =
+ base_space->select.sel_info.hslab->opt_diminfo[base_space_dim].stride;
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].count =
+ base_space->select.sel_info.hslab->opt_diminfo[base_space_dim].count;
+ new_space->select.sel_info.hslab->opt_diminfo[new_space_dim].block =
+ base_space->select.sel_info.hslab->opt_diminfo[base_space_dim].block;
+
+ /* Advance to next dimensions */
+ base_space_dim++;
+ new_space_dim++;
+ } /* end for */
+
+ /* Indicate that the dimension information is valid */
+ new_space->select.sel_info.hslab->diminfo_valid = TRUE;
+
+ /* Indicate that there's no slab information */
+ new_space->select.sel_info.hslab->span_lst = NULL;
+ } /* end if */
+ else {
+ /* Check if the new space's rank is < or > base space's rank */
+ if(new_space->extent.rank < base_space->extent.rank) {
+ const H5S_hyper_span_t *curr; /* Pointer to current hyperslab span */
+ hsize_t block[H5S_MAX_RANK]; /* Block selected in base dataspace */
+ unsigned curr_dim; /* Current dimension being operated on */
+
+ /* Clear the block buffer */
+ HDmemset(block, 0, sizeof(block));
+
+ /* Advance down selected spans */
+ curr = base_space->select.sel_info.hslab->span_lst->head;
+ curr_dim = 0;
+ while(curr && curr_dim < (base_space->extent.rank - new_space->extent.rank)) {
+ /* Save the location of the selection in current dimension */
+ block[curr_dim] = curr->low;
+
+ /* Advance down to next dimension */
+ curr = curr->down->head;
+ curr_dim++;
+ } /* end while */
+
+ /* Compute the offset for the down-projection */
+ *offset = H5V_array_offset(base_space->extent.rank, base_space->extent.size, block);
+
+ /* Project the base space's selection down in less dimensions */
+ if(H5S_hyper_project_simple_lower(base_space, new_space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't project hyperslab selection into less dimensions")
+ } /* end if */
+ else {
+ HDassert(new_space->extent.rank > base_space->extent.rank);
+
+ /* The offset is zero when projected into higher dimensions */
+ *offset = 0;
+
+ /* Project the base space's selection down in less dimensions */
+ if(H5S_hyper_project_simple_higher(base_space, new_space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't project hyperslab selection into less dimensions")
+ } /* end else */
+
+ /* Indicate that the dimension information is not valid */
+ new_space->select.sel_info.hslab->diminfo_valid = FALSE;
+ } /* end else */
+
+ /* Number of elements selected will be the same */
+ new_space->select.num_elem = base_space->select.num_elem;
+
+ /* Set selection type */
+ new_space->select.type = H5S_sel_hyper;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_hyper_project_simple() */
+
+
/*--------------------------------------------------------------------------
NAME
H5S_hyper_adjust_helper_s
@@ -4061,41 +4414,41 @@ done:
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_hyper_adjust_helper_s (H5S_hyper_span_info_t *spans, const hssize_t *offset)
+H5S_hyper_adjust_helper_s(H5S_hyper_span_info_t *spans, const hssize_t *offset)
{
H5S_hyper_span_t *span; /* Pointer to current span in span tree */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_adjust_helper_s);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_hyper_adjust_helper_s)
/* Sanity check */
- assert(spans);
- assert(offset);
+ HDassert(spans);
+ HDassert(offset);
/* Check if we've already set this down span tree */
- if(spans->scratch!=(H5S_hyper_span_info_t *)~((size_t)NULL)) {
+ if(spans->scratch != (H5S_hyper_span_info_t *)~((size_t)NULL)) {
/* Set the tree's scratch pointer */
- spans->scratch=(H5S_hyper_span_info_t *)~((size_t)NULL);
+ spans->scratch = (H5S_hyper_span_info_t *)~((size_t)NULL);
/* Get the span lists for each span in this tree */
- span=spans->head;
+ span = spans->head;
/* Iterate over the spans in tree */
- while(span!=NULL) {
+ while(span != NULL) {
/* Adjust span offset */
- assert((hssize_t)span->low>=*offset);
- span->low-=*offset;
- span->high-=*offset;
+ HDassert((hssize_t)span->low >= *offset);
+ span->low -= *offset;
+ span->high -= *offset;
/* Recursively adjust spans in next dimension down */
- if(span->down!=NULL)
- H5S_hyper_adjust_helper_s(span->down,offset+1);
+ if(span->down != NULL)
+ H5S_hyper_adjust_helper_s(span->down, offset + 1);
/* Advance to next span in this dimension */
- span=span->next;
+ span = span->next;
} /* end while */
} /* end if */
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_hyper_adjust_helper_s() */
@@ -4142,8 +4495,7 @@ H5S_hyper_adjust_s(H5S_t *space, const hssize_t *offset)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADSELECT, FAIL, "can't perform hyperslab offset adjustment");
/* Reset the scratch pointers for the next routine which needs them */
- if(H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst,NULL)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't reset hyperslab scratch pointer");
+ H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst, NULL);
} /* end if */
done:
@@ -4252,8 +4604,7 @@ H5S_hyper_move(H5S_t *space, const hssize_t *offset)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADSELECT, FAIL, "can't perform hyperslab offset movement");
/* Reset the scratch pointers for the next routine which needs them */
- if(H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst,NULL)==FAIL)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't reset hyperslab scratch pointer");
+ H5S_hyper_span_scratch(space->select.sel_info.hslab->span_lst, NULL);
} /* end if */
done:
@@ -6035,8 +6386,6 @@ done:
* Programmer: Quincey Koziol
* Wednesday, January 10, 2001
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -6220,18 +6569,18 @@ H5S_select_hyperslab (H5S_t *space, H5S_seloper_t op,
HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, FAIL, "invalid selection operation");
} /* end switch */
- if(op==H5S_SELECT_SET) {
+ if(op == H5S_SELECT_SET) {
/* If we are setting a new selection, remove current selection first */
- if(H5S_SELECT_RELEASE(space)<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release hyperslab");
+ if(H5S_SELECT_RELEASE(space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release selection")
/* Allocate space for the hyperslab selection information */
- if((space->select.sel_info.hslab=H5FL_MALLOC(H5S_hyper_sel_t))==NULL)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate hyperslab info");
+ if(NULL == (space->select.sel_info.hslab = H5FL_MALLOC(H5S_hyper_sel_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate hyperslab info")
/* Save the diminfo */
- space->select.num_elem=1;
- for(u=0; u<space->extent.rank; u++) {
+ space->select.num_elem = 1;
+ for(u = 0; u < space->extent.rank; u++) {
space->select.sel_info.hslab->app_diminfo[u].start = start[u];
space->select.sel_info.hslab->app_diminfo[u].stride = stride[u];
space->select.sel_info.hslab->app_diminfo[u].count = count[u];
@@ -6241,39 +6590,40 @@ H5S_select_hyperslab (H5S_t *space, H5S_seloper_t op,
space->select.sel_info.hslab->opt_diminfo[u].stride = opt_stride[u];
space->select.sel_info.hslab->opt_diminfo[u].count = opt_count[u];
space->select.sel_info.hslab->opt_diminfo[u].block = opt_block[u];
- space->select.num_elem*=(opt_count[u]*opt_block[u]);
+
+ space->select.num_elem *= (opt_count[u] * opt_block[u]);
} /* end for */
/* Indicate that the dimension information is valid */
- space->select.sel_info.hslab->diminfo_valid=TRUE;
+ space->select.sel_info.hslab->diminfo_valid = TRUE;
/* Indicate that there's no slab information */
- space->select.sel_info.hslab->span_lst=NULL;
+ space->select.sel_info.hslab->span_lst = NULL;
} /* end if */
- else if(op>=H5S_SELECT_OR && op<=H5S_SELECT_NOTA) {
+ else if(op >= H5S_SELECT_OR && op <= H5S_SELECT_NOTA) {
/* Sanity check */
- assert(H5S_GET_SELECT_TYPE(space)==H5S_SEL_HYPERSLABS);
+ HDassert(H5S_GET_SELECT_TYPE(space) == H5S_SEL_HYPERSLABS);
/* Check if there's no hyperslab span information currently */
- if(space->select.sel_info.hslab->span_lst==NULL)
- if(H5S_hyper_generate_spans(space)<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_UNINITIALIZED, FAIL, "dataspace does not have span tree");
+ if(NULL == space->select.sel_info.hslab->span_lst)
+ if(H5S_hyper_generate_spans(space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_UNINITIALIZED, FAIL, "dataspace does not have span tree")
/* Indicate that the regular dimensions are no longer valid */
- space->select.sel_info.hslab->diminfo_valid=FALSE;
+ space->select.sel_info.hslab->diminfo_valid = FALSE;
/* Add in the new hyperslab information */
- if(H5S_generate_hyperslab (space, op, start, opt_stride, opt_count, opt_block)<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINSERT, FAIL, "can't generate hyperslabs");
+ if(H5S_generate_hyperslab(space, op, start, opt_stride, opt_count, opt_block) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINSERT, FAIL, "can't generate hyperslabs")
} /* end if */
else
- HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, FAIL, "invalid selection operation");
+ HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, FAIL, "invalid selection operation")
/* Set selection type */
- space->select.type=H5S_sel_hyper;
+ space->select.type = H5S_sel_hyper;
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_select_hyperslab() */
diff --git a/src/H5Smpio.c b/src/H5Smpio.c
index f535122..e9d0541 100644
--- a/src/H5Smpio.c
+++ b/src/H5Smpio.c
@@ -31,45 +31,25 @@
#include "H5Fprivate.h" /* File access */
#include "H5FDprivate.h" /* File drivers */
#include "H5Iprivate.h" /* IDs */
+#include "H5MMprivate.h" /* Memory management */
#include "H5Oprivate.h" /* Object headers */
#include "H5Pprivate.h" /* Property lists */
#include "H5Spkg.h" /* Dataspaces */
#ifdef H5_HAVE_PARALLEL
-static herr_t
-H5S_mpio_all_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type );
-static herr_t
-H5S_mpio_none_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type );
-static herr_t
-H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type );
-
-static herr_t
-H5S_mpio_span_hyper_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type );
+static herr_t H5S_mpio_all_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
+static herr_t H5S_mpio_none_type(MPI_Datatype *new_type, int *count,
+ hbool_t *is_derived_type);
+static herr_t H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
+static herr_t H5S_mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
+static herr_t H5S_obtain_datatype(const hsize_t down[], H5S_hyper_span_t* span,
+ const MPI_Datatype *elmt_type, MPI_Datatype *span_type, size_t elmt_size);
-static herr_t H5S_obtain_datatype(const hsize_t size[],
- H5S_hyper_span_t* span,MPI_Datatype *span_type,
- size_t elmt_size,int dimindex);
+#define H5S_MPIO_INITIAL_ALLOC_COUNT 256
/*-------------------------------------------------------------------------
@@ -82,30 +62,20 @@ static herr_t H5S_obtain_datatype(const hsize_t size[],
* Outputs: *new_type the MPI type corresponding to the selection
* *count how many objects of the new_type in selection
* (useful if this is the buffer type for xfer)
- * *extra_offset Number of bytes of offset within dataset
* *is_derived_type 0 if MPI primitive type, 1 if derived
*
* Programmer: rky 980813
*
- * Modifications:
- *
- * Quincey Koziol, June 18, 2002
- * Added 'extra_offset' parameter
- *
*-------------------------------------------------------------------------
*/
static herr_t
-H5S_mpio_all_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type )
+H5S_mpio_all_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
{
hsize_t total_bytes;
- hssize_t snelmts; /*total number of elmts (signed) */
- hsize_t nelmts; /*total number of elmts */
- herr_t ret_value = SUCCEED;
+ hssize_t snelmts; /* Total number of elmts (signed) */
+ hsize_t nelmts; /* Total number of elmts */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_all_type)
@@ -121,8 +91,7 @@ H5S_mpio_all_type( const H5S_t *space, size_t elmt_size,
/* fill in the return values */
*new_type = MPI_BYTE;
- H5_ASSIGN_OVERFLOW(*count, total_bytes, hsize_t, size_t);
- *extra_offset = 0;
+ H5_ASSIGN_OVERFLOW(*count, total_bytes, hsize_t, int);
*is_derived_type = FALSE;
done:
@@ -140,32 +109,23 @@ done:
* Outputs: *new_type the MPI type corresponding to the selection
* *count how many objects of the new_type in selection
* (useful if this is the buffer type for xfer)
- * *extra_offset Number of bytes of offset within dataset
* *is_derived_type 0 if MPI primitive type, 1 if derived
*
* Programmer: Quincey Koziol, October 29, 2002
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static herr_t
-H5S_mpio_none_type( const H5S_t UNUSED *space, size_t UNUSED elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type )
+H5S_mpio_none_type(MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_mpio_none_type);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_mpio_none_type)
/* fill in the return values */
*new_type = MPI_BYTE;
*count = 0;
- *extra_offset = 0;
*is_derived_type = FALSE;
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_mpio_none_type() */
@@ -179,35 +139,15 @@ H5S_mpio_none_type( const H5S_t UNUSED *space, size_t UNUSED elmt_size,
* Outputs: *new_type the MPI type corresponding to the selection
* *count how many objects of the new_type in selection
* (useful if this is the buffer type for xfer)
- * *extra_offset Number of bytes of offset within dataset
* *is_derived_type 0 if MPI primitive type, 1 if derived
*
* Programmer: rky 980813
*
- * Modifications: ppw 990401
- * rky, ppw 2000-09-26 Freed old type after creating struct type.
- * rky 2000-10-05 Changed displacements to be MPI_Aint.
- * rky 2000-10-06 Added code for cases of empty hyperslab.
- * akc, rky 2000-11-16 Replaced hard coded dimension size with
- * H5S_MAX_RANK.
- *
- * Quincey Koziol, June 18, 2002
- * Added 'extra_offset' parameter. Also accomodate selection
- * offset in MPI type built.
- *
- * Albert Cheng, August 4, 2004
- * Reimplemented the algorithm of forming the outer_type by
- * defining it as (start, vector, extent) in one call.
- *
*-------------------------------------------------------------------------
*/
static herr_t
-H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type )
+H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
{
H5S_sel_iter_t sel_iter; /* Selection iteration info */
hbool_t sel_iter_init = FALSE; /* Selection iteration info has been initialized */
@@ -231,18 +171,16 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
int mpi_code; /* MPI return code */
herr_t ret_value = SUCCEED;
- FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_hyper_type);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_hyper_type)
/* Check args */
HDassert(space);
HDassert(sizeof(MPI_Aint) >= sizeof(elmt_size));
- if(0 == elmt_size)
- goto empty;
/* Initialize selection iterator */
if(H5S_select_iter_init(&sel_iter, space, elmt_size) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator")
- sel_iter_init = 1; /* Selection iteration info has been initialized */
+ sel_iter_init = TRUE; /* Selection iteration info has been initialized */
/* Abbreviate args */
diminfo = sel_iter.u.hyp.diminfo;
@@ -251,18 +189,16 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
/* make a local copy of the dimension info so we can operate with them */
/* Check if this is a "flattened" regular hyperslab selection */
- if(sel_iter.u.hyp.iter_rank!=0 && sel_iter.u.hyp.iter_rank<space->extent.rank) {
+ if(sel_iter.u.hyp.iter_rank != 0 && sel_iter.u.hyp.iter_rank < space->extent.rank) {
/* Flattened selection */
rank = sel_iter.u.hyp.iter_rank;
HDassert(rank >= 0 && rank <= H5S_MAX_RANK); /* within array bounds */
- if (0==rank)
- goto empty;
#ifdef H5S_DEBUG
if(H5DEBUG(S))
HDfprintf(H5DEBUG(S), "%s: Flattened selection\n",FUNC);
#endif
- for ( i=0; i<rank; ++i) {
- d[i].start = diminfo[i].start+sel_iter.u.hyp.sel_off[i];
+ for(i = 0; i < rank; ++i) {
+ d[i].start = diminfo[i].start + sel_iter.u.hyp.sel_off[i];
d[i].strid = diminfo[i].stride;
d[i].block = diminfo[i].block;
d[i].count = diminfo[i].count;
@@ -277,26 +213,26 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
HDfprintf(H5DEBUG(S), "\n" );
}
#endif
- if (0==d[i].block)
+ if(0 == d[i].block)
goto empty;
- if (0==d[i].count)
+ if(0 == d[i].count)
goto empty;
- if (0==d[i].xtent)
+ if(0 == d[i].xtent)
goto empty;
- }
+ } /* end for */
} /* end if */
else {
/* Non-flattened selection */
rank = space->extent.rank;
- HDassert(rank >= 0 && rank<=H5S_MAX_RANK); /* within array bounds */
- if (0==rank)
+ HDassert(rank >= 0 && rank <= H5S_MAX_RANK); /* within array bounds */
+ if(0 == rank)
goto empty;
#ifdef H5S_DEBUG
if(H5DEBUG(S))
HDfprintf(H5DEBUG(S),"%s: Non-flattened selection\n",FUNC);
#endif
- for ( i=0; i<rank; ++i) {
- d[i].start = diminfo[i].start+space->select.offset[i];
+ for(i = 0; i < rank; ++i) {
+ d[i].start = diminfo[i].start + space->select.offset[i];
d[i].strid = diminfo[i].stride;
d[i].block = diminfo[i].block;
d[i].count = diminfo[i].count;
@@ -311,40 +247,37 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
HDfprintf(H5DEBUG(S), "\n" );
}
#endif
- if (0==d[i].block)
+ if(0 == d[i].block)
goto empty;
- if (0==d[i].count)
+ if(0 == d[i].count)
goto empty;
- if (0==d[i].xtent)
+ if(0 == d[i].xtent)
goto empty;
- }
+ } /* end for */
} /* end else */
/**********************************************************************
Compute array "offset[rank]" which gives the offsets for a multi-
dimensional array with dimensions "d[i].xtent" (i=0,1,...,rank-1).
**********************************************************************/
- offset[rank-1] = 1;
- max_xtent[rank-1] = d[rank-1].xtent;
-/*#ifdef H5Smpi_DEBUG */ /* leave the old way */
+ offset[rank - 1] = 1;
+ max_xtent[rank - 1] = d[rank - 1].xtent;
#ifdef H5S_DEBUG
- if(H5DEBUG(S)){
+ if(H5DEBUG(S)) {
i=rank-1;
- HDfprintf(H5DEBUG(S), " offset[%2d]=%d; max_xtent[%2d]=%d\n",
+ HDfprintf(H5DEBUG(S), " offset[%2d]=%d; max_xtent[%2d]=%d\n",
i, offset[i], i, max_xtent[i]);
}
#endif
- for (i=rank-2; i>=0; --i) {
- offset[i] = offset[i+1]*d[i+1].xtent;
- max_xtent[i] = max_xtent[i+1]*d[i].xtent;
+ for(i = rank - 2; i >= 0; --i) {
+ offset[i] = offset[i + 1] * d[i + 1].xtent;
+ max_xtent[i] = max_xtent[i + 1] * d[i].xtent;
#ifdef H5S_DEBUG
- if(H5DEBUG(S)){
+ if(H5DEBUG(S))
HDfprintf(H5DEBUG(S), " offset[%2d]=%d; max_xtent[%2d]=%d\n",
i, offset[i], i, max_xtent[i]);
- }
#endif
-
- }
+ } /* end for */
/* Create a type covering the selected hyperslab.
* Multidimensional dataspaces are stored in row-major order.
@@ -356,59 +289,58 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
*******************************************************/
#ifdef H5S_DEBUG
if(H5DEBUG(S)) {
- HDfprintf(H5DEBUG(S), "%s: Making contig type %d MPI_BYTEs\n", FUNC,elmt_size );
+ HDfprintf(H5DEBUG(S), "%s: Making contig type %Zu MPI_BYTEs\n", FUNC, elmt_size);
for (i=rank-1; i>=0; --i)
HDfprintf(H5DEBUG(S), "d[%d].xtent=%Hu \n", i, d[i].xtent);
}
#endif
- if (MPI_SUCCESS != (mpi_code= MPI_Type_contiguous( (int)elmt_size, MPI_BYTE, &inner_type )))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code);
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &inner_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
/*******************************************************
* Construct the type by walking the hyperslab dims
* from the inside out:
*******************************************************/
- for ( i=rank-1; i>=0; --i) {
+ for(i = rank - 1; i >= 0; --i) {
#ifdef H5S_DEBUG
- if(H5DEBUG(S)) {
- HDfprintf(H5DEBUG(S), "%s: Dimension i=%d \n"
+ if(H5DEBUG(S))
+ HDfprintf(H5DEBUG(S), "%s: Dimension i=%d \n"
"start=%Hd count=%Hu block=%Hu stride=%Hu, xtent=%Hu max_xtent=%d\n",
FUNC, i, d[i].start, d[i].count, d[i].block, d[i].strid, d[i].xtent, max_xtent[i]);
- }
#endif
#ifdef H5S_DEBUG
if(H5DEBUG(S))
- HDfprintf(H5DEBUG(S), "%s: i=%d Making vector-type \n", FUNC,i);
+ HDfprintf(H5DEBUG(S), "%s: i=%d Making vector-type \n", FUNC,i);
#endif
/****************************************
* Build vector type of the selection.
****************************************/
- mpi_code =MPI_Type_vector((int)(d[i].count), /* count */
- (int)(d[i].block), /* blocklength */
- (int)(d[i].strid), /* stride */
- inner_type, /* old type */
- &outer_type); /* new type */
-
- MPI_Type_free( &inner_type );
- if (mpi_code!=MPI_SUCCESS)
- HMPI_GOTO_ERROR(FAIL, "couldn't create MPI vector type", mpi_code);
-
- /****************************************
- * Then build the dimension type as (start, vector type, xtent).
- ****************************************/
- /* calculate start and extent values of this dimension */
+ mpi_code = MPI_Type_vector((int)(d[i].count), /* count */
+ (int)(d[i].block), /* blocklength */
+ (int)(d[i].strid), /* stride */
+ inner_type, /* old type */
+ &outer_type); /* new type */
+
+ MPI_Type_free(&inner_type);
+ if(mpi_code != MPI_SUCCESS)
+ HMPI_GOTO_ERROR(FAIL, "couldn't create MPI vector type", mpi_code)
+
+ /****************************************
+ * Then build the dimension type as (start, vector type, xtent).
+ ****************************************/
+ /* calculate start and extent values of this dimension */
displacement[1] = d[i].start * offset[i] * elmt_size;
displacement[2] = (MPI_Aint)elmt_size * max_xtent[i];
if(MPI_SUCCESS != (mpi_code = MPI_Type_extent(outer_type, &extent_len)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_extent failed", mpi_code);
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_extent failed", mpi_code)
- /*************************************************
- * Restructure this datatype ("outer_type")
- * so that it still starts at 0, but its extent
- * is the full extent in this dimension.
- *************************************************/
- if (displacement[1] > 0 || (int)extent_len < displacement[2]) {
+ /*************************************************
+ * Restructure this datatype ("outer_type")
+ * so that it still starts at 0, but its extent
+ * is the full extent in this dimension.
+ *************************************************/
+ if(displacement[1] > 0 || (int)extent_len < displacement[2]) {
block_length[0] = 1;
block_length[1] = 1;
@@ -420,42 +352,37 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
old_types[1] = outer_type;
old_types[2] = MPI_UB;
#ifdef H5S_DEBUG
- if(H5DEBUG(S)){
- HDfprintf(H5DEBUG(S), "%s: i=%d Extending struct type\n"
- "***displacements: %d, %d, %d\n",
- FUNC, i, displacement[0], displacement[1], displacement[2]);
- }
+ if(H5DEBUG(S))
+ HDfprintf(H5DEBUG(S), "%s: i=%d Extending struct type\n"
+ "***displacements: %ld, %ld, %ld\n",
+ FUNC, i, (long)displacement[0], (long)displacement[1], (long)displacement[2]);
#endif
- mpi_code = MPI_Type_struct ( 3, /* count */
- block_length, /* blocklengths */
- displacement, /* displacements */
- old_types, /* old types */
- &inner_type); /* new type */
-
- MPI_Type_free (&outer_type);
- if (mpi_code!=MPI_SUCCESS)
- HMPI_GOTO_ERROR(FAIL, "couldn't resize MPI vector type", mpi_code);
- }
- else {
+ mpi_code = MPI_Type_struct(3, /* count */
+ block_length, /* blocklengths */
+ displacement, /* displacements */
+ old_types, /* old types */
+ &inner_type); /* new type */
+
+ MPI_Type_free(&outer_type);
+ if(mpi_code != MPI_SUCCESS)
+ HMPI_GOTO_ERROR(FAIL, "couldn't resize MPI vector type", mpi_code)
+ } /* end if */
+ else
inner_type = outer_type;
- }
} /* end for */
/***************************
* End of loop, walking
* thru dimensions.
***************************/
-
/* At this point inner_type is actually the outermost type, even for 0-trip loop */
-
*new_type = inner_type;
- if (MPI_SUCCESS != (mpi_code= MPI_Type_commit( new_type )))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
/* fill in the remaining return values */
*count = 1; /* only have to move one of these suckers! */
- *extra_offset = 0;
*is_derived_type = TRUE;
HGOTO_DONE(SUCCEED);
@@ -463,24 +390,21 @@ empty:
/* special case: empty hyperslab */
*new_type = MPI_BYTE;
*count = 0;
- *extra_offset = 0;
*is_derived_type = FALSE;
done:
/* Release selection iterator */
- if(sel_iter_init) {
- if (H5S_SELECT_ITER_RELEASE(&sel_iter)<0)
- HDONE_ERROR (H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator");
- } /* end if */
+ if(sel_iter_init)
+ if(H5S_SELECT_ITER_RELEASE(&sel_iter) < 0)
+ HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator")
#ifdef H5S_DEBUG
- if(H5DEBUG(S)){
+ if(H5DEBUG(S))
HDfprintf(H5DEBUG(S), "Leave %s, count=%ld is_derived_type=%t\n",
FUNC, *count, *is_derived_type );
- }
#endif
- FUNC_LEAVE_NOAPI(ret_value);
-}
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5S_mpio_hyper_type() */
/*-------------------------------------------------------------------------
@@ -494,68 +418,57 @@ done:
* Outputs: *new_type the MPI type corresponding to the selection
* *count how many objects of the new_type in selection
* (useful if this is the buffer type for xfer)
- * *extra_offset Number of bytes of offset within dataset
* *is_derived_type 0 if MPI primitive type, 1 if derived
*
* Programmer: kyang
*
+ *-------------------------------------------------------------------------
*/
static herr_t
-H5S_mpio_span_hyper_type( const H5S_t *space,
- size_t elmt_size,
- MPI_Datatype *new_type,/* out: */
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type )
+H5S_mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
{
- MPI_Datatype span_type;
- H5S_hyper_span_t *ospan;
- H5S_hyper_span_info_t *odown;
- hsize_t *size;
- int mpi_code;
- herr_t ret_value = SUCCEED;
+ MPI_Datatype elmt_type; /* MPI datatype for an element */
+ hbool_t elmt_type_is_derived = FALSE; /* Whether the element type has been created */
+ MPI_Datatype span_type; /* MPI datatype for overall span tree */
+ hsize_t down[H5S_MAX_RANK]; /* 'down' sizes for each dimension */
+ int mpi_code; /* MPI return code */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_span_hyper_type)
/* Check args */
HDassert(space);
-
- if(0 == elmt_size)
- goto empty;
- size = space->extent.size;
- if(0 == size)
- goto empty;
-
- odown = space->select.sel_info.hslab->span_lst;
- if(NULL == odown)
- goto empty;
- ospan = odown->head;
- if(NULL == ospan)
- goto empty;
-
- /* obtain derived data type */
- if(FAIL == H5S_obtain_datatype(space->extent.size, ospan, &span_type, elmt_size, space->extent.rank))
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't obtain MPI derived data type")
-
+ HDassert(space->extent.size);
+ HDassert(space->select.sel_info.hslab->span_lst);
+ HDassert(space->select.sel_info.hslab->span_lst->head);
+
+ /* Create the base type for an element */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &elmt_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ elmt_type_is_derived = TRUE;
+
+ /* Compute 'down' sizes for each dimension */
+ if(H5V_array_down(space->extent.rank, space->extent.size, down) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGETSIZE, FAIL, "couldn't compute 'down' dimension sizes")
+
+ /* Obtain derived data type */
+ if(H5S_obtain_datatype(down, space->select.sel_info.hslab->span_lst->head, &elmt_type, &span_type, elmt_size) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't obtain MPI derived data type")
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&span_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
-
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
*new_type = span_type;
+
/* fill in the remaining return values */
*count = 1;
- *extra_offset = 0;
*is_derived_type = TRUE;
- HGOTO_DONE(SUCCEED)
-
-empty:
- /* special case: empty hyperslab */
- *new_type = MPI_BYTE;
- *count = 0;
- *extra_offset = 0;
- *is_derived_type = FALSE;
-
done:
+ /* Release resources */
+ if(elmt_type_is_derived)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&elmt_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_mpio_span_hyper_type() */
@@ -564,7 +477,7 @@ done:
* Function: H5S_obtain datatype
*
* Purpose: Obtain an MPI derived datatype based on span-tree
- implementation
+ * implementation
*
* Return: non-negative on success, negative on failure.
*
@@ -572,165 +485,169 @@ done:
*
* Programmer: kyang
*
+ *-------------------------------------------------------------------------
*/
static herr_t
-H5S_obtain_datatype(const hsize_t size[],
- H5S_hyper_span_t* span,
- MPI_Datatype *span_type,
- size_t elmt_size,
- int dimindex)
+H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span,
+ const MPI_Datatype *elmt_type, MPI_Datatype *span_type, size_t elmt_size)
{
- int innercount, outercount;
- MPI_Datatype bas_type;
- MPI_Datatype temp_type;
- MPI_Datatype tempinner_type;
+ size_t alloc_count; /* Number of span tree nodes allocated at this level */
+ size_t outercount; /* Number of span tree nodes at this level */
MPI_Datatype *inner_type = NULL;
+ hbool_t inner_types_freed = FALSE; /* Whether the inner_type MPI datatypes have been freed */
+ hbool_t span_type_valid = FALSE; /* Whether the span_type MPI datatypes is valid */
int *blocklen = NULL;
MPI_Aint *disp = NULL;
- MPI_Aint stride;
- H5S_hyper_span_info_t *down;
- H5S_hyper_span_t *tspan;
-#ifdef H5_HAVE_MPI2
- MPI_Aint sizeaint, sizedtype;
-#endif /* H5_HAVE_MPI2 */
- hsize_t total_lowd, total_lowd1;
- int i;
- int mpi_code;
- herr_t ret_value = SUCCEED;
+ H5S_hyper_span_t *tspan; /* Temporary pointer to span tree node */
+ int mpi_code; /* MPI return status code */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5S_obtain_datatype)
+ /* Sanity check */
HDassert(span);
- inner_type = NULL;
- down = NULL;
- tspan = NULL;
- down = span->down;
- tspan = span;
-
- /* Obtain the number of span tree nodes for this dimension */
- outercount = 0;
- while(tspan) {
- tspan = tspan->next;
- outercount++;
- } /* end while */
- if(outercount == 0)
- HGOTO_DONE(SUCCEED)
-
-/* MPI2 hasn't been widely acccepted, adding H5_HAVE_MPI2 for the future use */
-#ifdef H5_HAVE_MPI2
- MPI_Type_extent(MPI_Aint, &sizeaint);
- MPI_Type_extent(MPI_Datatype, &sizedtype);
-
- blocklen = (int *)HDcalloc((size_t)outercount, sizeof(int));
- disp = (MPI_Aint *)HDcalloc((size_t)outercount, sizeaint);
- inner_type = (MPI_Datatype *)HDcalloc((size_t)outercount, sizedtype);
-#else
- blocklen = (int *)HDcalloc((size_t)outercount, sizeof(int));
- disp = (MPI_Aint *)HDcalloc((size_t)outercount, sizeof(MPI_Aint));
- inner_type = (MPI_Datatype *)HDcalloc((size_t)outercount, sizeof(MPI_Datatype));
-#endif
-
- tspan = span;
- outercount = 0;
+ /* Allocate the initial displacement & block length buffers */
+ alloc_count = H5S_MPIO_INITIAL_ALLOC_COUNT;
+ if(NULL == (disp = (MPI_Aint *)H5MM_malloc(alloc_count * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+ if(NULL == (blocklen = (int *)H5MM_malloc(alloc_count * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
/* if this is the fastest changing dimension, it is the base case for derived datatype. */
- if(down == NULL) {
-
- HDassert(dimindex <= 1);
-
- if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &bas_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code);
-
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&bas_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
-
+ if(NULL == span->down) {
+ tspan = span;
+ outercount = 0;
while(tspan) {
+ /* Check if we need to increase the size of the buffers */
+ if(outercount >= alloc_count) {
+ MPI_Aint *tmp_disp; /* Temporary pointer to new displacement buffer */
+ int *tmp_blocklen; /* Temporary pointer to new block length buffer */
+
+ /* Double the allocation count */
+ alloc_count *= 2;
+
+ /* Re-allocate the buffers */
+ if(NULL == (tmp_disp = (MPI_Aint *)H5MM_realloc(disp, alloc_count * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+ disp = tmp_disp;
+ if(NULL == (tmp_blocklen = (int *)H5MM_realloc(blocklen, alloc_count * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
+ blocklen = tmp_blocklen;
+ } /* end if */
+
+ /* Store displacement & block length */
disp[outercount] = (MPI_Aint)elmt_size * tspan->low;
blocklen[outercount] = tspan->nelem;
+
tspan = tspan->next;
outercount++;
} /* end while */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_hindexed(outercount, blocklen, disp, bas_type, span_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_hindexed failed", mpi_code);
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_hindexed((int)outercount, blocklen, disp, *elmt_type, span_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_hindexed failed", mpi_code)
+ span_type_valid = TRUE;
} /* end if */
- else { /* dimindex is the rank of the dimension */
-
- HDassert(dimindex > 1);
-
- /* Calculate the total bytes of the lower dimensions */
- total_lowd = 1; /* one dimension down */
- total_lowd1 = 1; /* two dimensions down */
-
- for(i = dimindex - 1; i > 0; i--)
- total_lowd = total_lowd * size[i];
+ else {
+ size_t u; /* Local index variable */
- for(i = dimindex - 1; i > 1; i--)
- total_lowd1 = total_lowd1 * size[i];
+ if(NULL == (inner_type = (MPI_Datatype *)H5MM_malloc(alloc_count * sizeof(MPI_Datatype))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of inner MPI datatypes")
+ tspan = span;
+ outercount = 0;
while(tspan) {
+ MPI_Datatype down_type; /* Temporary MPI datatype for a span tree node's children */
+ MPI_Aint stride; /* Distance between inner MPI datatypes */
+
+ /* Check if we need to increase the size of the buffers */
+ if(outercount >= alloc_count) {
+ MPI_Aint *tmp_disp; /* Temporary pointer to new displacement buffer */
+ int *tmp_blocklen; /* Temporary pointer to new block length buffer */
+ MPI_Datatype *tmp_inner_type; /* Temporary pointer to inner MPI datatype buffer */
+
+ /* Double the allocation count */
+ alloc_count *= 2;
+
+ /* Re-allocate the buffers */
+ if(NULL == (tmp_disp = (MPI_Aint *)H5MM_realloc(disp, alloc_count * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+ disp = tmp_disp;
+ if(NULL == (tmp_blocklen = (int *)H5MM_realloc(blocklen, alloc_count * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
+ blocklen = tmp_blocklen;
+ if(NULL == (tmp_inner_type = (MPI_Datatype *)H5MM_realloc(inner_type, alloc_count * sizeof(MPI_Datatype))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of inner MPI datatypes")
+ } /* end if */
/* Displacement should be in byte and should have dimension information */
/* First using MPI Type vector to build derived data type for this span only */
/* Need to calculate the disp in byte for this dimension. */
/* Calculate the total bytes of the lower dimension */
-
- disp[outercount] = tspan->low * total_lowd * elmt_size;
+ disp[outercount] = tspan->low * (*down) * elmt_size;
blocklen[outercount] = 1;
- /* generating inner derived datatype by using MPI_Type_hvector */
- if(FAIL == H5S_obtain_datatype(size, tspan->down->head, &temp_type, elmt_size, dimindex - 1))
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't obtain MPI derived data type")
-
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&temp_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
-
- /* building the inner vector datatype */
- stride = total_lowd * elmt_size;
- innercount = tspan->nelem;
+ /* Generate MPI datatype for next dimension down */
+ if(H5S_obtain_datatype(down + 1, tspan->down->head, elmt_type, &down_type, elmt_size) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't obtain MPI derived data type")
- if(MPI_SUCCESS != (mpi_code = MPI_Type_hvector(innercount, 1, stride, temp_type, &tempinner_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_hvector failed", mpi_code);
+ /* Build the MPI datatype for this node */
+ stride = (*down) * elmt_size;
+ H5_CHECK_OVERFLOW(tspan->nelem, hsize_t, int)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_hvector((int)tspan->nelem, 1, stride, down_type, &inner_type[outercount]))) {
+ MPI_Type_free(&down_type);
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_hvector failed", mpi_code)
+ } /* end if */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&tempinner_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
+ /* Release MPI datatype for next dimension down */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&down_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&temp_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code);
-
- inner_type[outercount] = tempinner_type;
- outercount ++;
tspan = tspan->next;
+ outercount++;
} /* end while */
/* building the whole vector datatype */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_struct(outercount, blocklen, disp, inner_type, span_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_struct failed", mpi_code);
+ H5_CHECK_OVERFLOW(outercount, size_t, int)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_struct((int)outercount, blocklen, disp, inner_type, span_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_struct failed", mpi_code)
+ span_type_valid = TRUE;
+
+ /* Release inner node types */
+ for(u = 0; u < outercount; u++)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[u])))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ inner_types_freed = TRUE;
} /* end else */
- if(inner_type != NULL && down != NULL) {
- } /* end if */
-
done:
+ /* General cleanup */
if(inner_type != NULL) {
- if(down != NULL) {
- for(i = 0; i < outercount; i++)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[i])))
- HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code);
+ if(!inner_types_freed) {
+ size_t u; /* Local index variable */
+
+ for(u = 0; u < outercount; u++)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[u])))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
} /* end if */
- HDfree(inner_type);
+ H5MM_free(inner_type);
} /* end if */
if(blocklen != NULL)
- HDfree(blocklen);
+ H5MM_free(blocklen);
if(disp != NULL)
- HDfree(disp);
+ H5MM_free(disp);
+
+ /* Error cleanup */
+ if(ret_value < 0) {
+ if(span_type_valid)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(span_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ } /* end if */
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_obtain_datatype() */
-
/*-------------------------------------------------------------------------
* Function: H5S_mpio_space_type
@@ -743,49 +660,38 @@ done:
* Outputs: *new_type the MPI type corresponding to the selection
* *count how many objects of the new_type in selection
* (useful if this is the buffer type for xfer)
- * *extra_offset Number of bytes of offset within dataset
* *is_derived_type 0 if MPI primitive type, 1 if derived
*
* Programmer: rky 980813
*
- * Modifications:
- *
- * Quincey Koziol, June 18, 2002
- * Added 'extra_offset' parameter
- *
*-------------------------------------------------------------------------
*/
herr_t
-H5S_mpio_space_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type )
+H5S_mpio_space_type(const H5S_t *space, size_t elmt_size,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
{
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_space_type);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_space_type)
/* Check args */
HDassert(space);
+ HDassert(elmt_size);
/* Creat MPI type based on the kind of selection */
- switch (H5S_GET_EXTENT_TYPE(space)) {
+ switch(H5S_GET_EXTENT_TYPE(space)) {
case H5S_NULL:
case H5S_SCALAR:
case H5S_SIMPLE:
switch(H5S_GET_SELECT_TYPE(space)) {
case H5S_SEL_NONE:
- if ( H5S_mpio_none_type( space, elmt_size,
- /* out: */ new_type, count, extra_offset, is_derived_type ) <0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert \"all\" selection to MPI type");
+ if(H5S_mpio_none_type(new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert 'none' selection to MPI type")
break;
case H5S_SEL_ALL:
- if ( H5S_mpio_all_type( space, elmt_size,
- /* out: */ new_type, count, extra_offset, is_derived_type ) <0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert \"all\" selection to MPI type");
+ if(H5S_mpio_all_type(space, elmt_size, new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert 'all' selection to MPI type")
break;
case H5S_SEL_POINTS:
@@ -794,16 +700,14 @@ H5S_mpio_space_type( const H5S_t *space, size_t elmt_size,
break;
case H5S_SEL_HYPERSLABS:
- if((H5S_SELECT_IS_REGULAR(space) == TRUE)) {
- if(H5S_mpio_hyper_type( space, elmt_size,
- /* out: */ new_type, count, extra_offset, is_derived_type )<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert \"all\" selection to MPI type");
- }
- else {
- if(H5S_mpio_span_hyper_type( space, elmt_size,
- /* out: */ new_type, count, extra_offset, is_derived_type )<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert \"all\" selection to MPI type");
- }
+ if((H5S_SELECT_IS_REGULAR(space) == TRUE)) {
+ if(H5S_mpio_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert regular 'hyperslab' selection to MPI type")
+ } /* end if */
+ else {
+ if(H5S_mpio_span_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert irregular 'hyperslab' selection to MPI type")
+ } /* end else */
break;
default:
@@ -815,11 +719,10 @@ H5S_mpio_space_type( const H5S_t *space, size_t elmt_size,
default:
HDassert("unknown data space type" && 0);
break;
- }
+ } /* end switch */
done:
FUNC_LEAVE_NOAPI(ret_value);
-}
-
+} /* end H5S_mpio_space_type() */
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5Snone.c b/src/H5Snone.c
index c6e8a6a..1948f13 100644
--- a/src/H5Snone.c
+++ b/src/H5Snone.c
@@ -48,6 +48,8 @@ static htri_t H5S_none_is_contiguous(const H5S_t *space);
static htri_t H5S_none_is_single(const H5S_t *space);
static htri_t H5S_none_is_regular(const H5S_t *space);
static herr_t H5S_none_adjust_u(H5S_t *space, const hsize_t *offset);
+static herr_t H5S_none_project_scalar(const H5S_t *space, hsize_t *offset);
+static herr_t H5S_none_project_simple(const H5S_t *space, H5S_t *new_space, hsize_t *offset);
static herr_t H5S_none_iter_init(H5S_sel_iter_t *iter, const H5S_t *space);
/* Selection iteration callbacks */
@@ -77,6 +79,8 @@ const H5S_select_class_t H5S_sel_none[1] = {{
H5S_none_is_single,
H5S_none_is_regular,
H5S_none_adjust_u,
+ H5S_none_project_scalar,
+ H5S_none_project_simple,
H5S_none_iter_init,
}};
@@ -110,18 +114,18 @@ static const H5S_sel_iter_class_t H5S_sel_iter_none[1] = {{
*-------------------------------------------------------------------------
*/
herr_t
-H5S_none_iter_init (H5S_sel_iter_t *iter, const H5S_t UNUSED *space)
+H5S_none_iter_init(H5S_sel_iter_t *iter, const H5S_t UNUSED *space)
{
- FUNC_ENTER_NOAPI_NOFUNC(H5S_none_iter_init);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_none_iter_init)
/* Check args */
- assert (space && H5S_SEL_NONE==H5S_GET_SELECT_TYPE(space));
- assert (iter);
+ HDassert(space && H5S_SEL_NONE==H5S_GET_SELECT_TYPE(space));
+ HDassert(iter);
/* Initialize type of selection iterator */
- iter->type=H5S_sel_iter_none;
+ iter->type = H5S_sel_iter_none;
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_none_iter_init() */
@@ -141,15 +145,15 @@ H5S_none_iter_init (H5S_sel_iter_t *iter, const H5S_t UNUSED *space)
*-------------------------------------------------------------------------
*/
static herr_t
-H5S_none_iter_coords (const H5S_sel_iter_t UNUSED *iter, hsize_t UNUSED *coords)
+H5S_none_iter_coords(const H5S_sel_iter_t UNUSED *iter, hsize_t UNUSED *coords)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_coords);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_coords)
/* Check args */
- assert (iter);
- assert (coords);
+ HDassert(iter);
+ HDassert(coords);
- FUNC_LEAVE_NOAPI(FAIL);
+ FUNC_LEAVE_NOAPI(FAIL)
} /* H5S_none_iter_coords() */
@@ -169,16 +173,16 @@ H5S_none_iter_coords (const H5S_sel_iter_t UNUSED *iter, hsize_t UNUSED *coords)
*-------------------------------------------------------------------------
*/
static herr_t
-H5S_none_iter_block (const H5S_sel_iter_t UNUSED *iter, hsize_t UNUSED *start, hsize_t UNUSED *end)
+H5S_none_iter_block(const H5S_sel_iter_t UNUSED *iter, hsize_t UNUSED *start, hsize_t UNUSED *end)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_block);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_block)
/* Check args */
- assert (iter);
- assert (start);
- assert (end);
+ HDassert(iter);
+ HDassert(start);
+ HDassert(end);
- FUNC_LEAVE_NOAPI(FAIL);
+ FUNC_LEAVE_NOAPI(FAIL)
} /* H5S_none_iter_block() */
@@ -197,14 +201,14 @@ H5S_none_iter_block (const H5S_sel_iter_t UNUSED *iter, hsize_t UNUSED *start, h
*-------------------------------------------------------------------------
*/
static hsize_t
-H5S_none_iter_nelmts (const H5S_sel_iter_t UNUSED *iter)
+H5S_none_iter_nelmts(const H5S_sel_iter_t UNUSED *iter)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_nelmts);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_nelmts)
/* Check args */
- assert (iter);
+ HDassert(iter);
- FUNC_LEAVE_NOAPI(0);
+ FUNC_LEAVE_NOAPI(0)
} /* H5S_none_iter_nelmts() */
@@ -228,12 +232,12 @@ H5S_none_iter_nelmts (const H5S_sel_iter_t UNUSED *iter)
static htri_t
H5S_none_iter_has_next_block(const H5S_sel_iter_t UNUSED *iter)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_has_next_block);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_has_next_block)
/* Check args */
- assert (iter);
+ HDassert(iter);
- FUNC_LEAVE_NOAPI(FAIL);
+ FUNC_LEAVE_NOAPI(FAIL)
} /* H5S_none_iter_has_next_block() */
@@ -258,13 +262,13 @@ H5S_none_iter_has_next_block(const H5S_sel_iter_t UNUSED *iter)
static herr_t
H5S_none_iter_next(H5S_sel_iter_t UNUSED *iter, size_t UNUSED nelem)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_next);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_next)
/* Check args */
- assert (iter);
- assert (nelem>0);
+ HDassert(iter);
+ HDassert(nelem>0);
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_none_iter_next() */
@@ -315,14 +319,14 @@ H5S_none_iter_next_block(H5S_sel_iter_t UNUSED *iter)
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_none_iter_release (H5S_sel_iter_t UNUSED * iter)
+H5S_none_iter_release(H5S_sel_iter_t UNUSED * iter)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_release);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_iter_release)
/* Check args */
- assert (iter);
+ HDassert(iter);
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_none_iter_release() */
@@ -344,14 +348,14 @@ H5S_none_iter_release (H5S_sel_iter_t UNUSED * iter)
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_none_release (H5S_t UNUSED * space)
+H5S_none_release(H5S_t UNUSED * space)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_release);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_release)
/* Check args */
- assert (space);
+ HDassert(space);
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_none_release() */
@@ -377,15 +381,15 @@ H5S_none_release (H5S_t UNUSED * space)
static herr_t
H5S_none_copy(H5S_t *dst, const H5S_t UNUSED *src, hbool_t UNUSED share_selection)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_copy);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_copy)
- assert(src);
- assert(dst);
+ HDassert(src);
+ HDassert(dst);
/* Set number of elements in selection */
- dst->select.num_elem=0;
+ dst->select.num_elem = 0;
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5S_none_copy() */
@@ -410,13 +414,13 @@ H5S_none_copy(H5S_t *dst, const H5S_t UNUSED *src, hbool_t UNUSED share_selectio
REVISION LOG
--------------------------------------------------------------------------*/
static htri_t
-H5S_none_is_valid (const H5S_t UNUSED *space)
+H5S_none_is_valid(const H5S_t UNUSED *space)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_valid);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_valid)
- assert(space);
+ HDassert(space);
- FUNC_LEAVE_NOAPI(TRUE);
+ FUNC_LEAVE_NOAPI(TRUE)
} /* end H5S_none_is_valid() */
@@ -440,17 +444,17 @@ H5S_none_is_valid (const H5S_t UNUSED *space)
REVISION LOG
--------------------------------------------------------------------------*/
static hssize_t
-H5S_none_serial_size (const H5S_t UNUSED *space)
+H5S_none_serial_size(const H5S_t UNUSED *space)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_serial_size);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_serial_size)
- assert(space);
+ HDassert(space);
/* Basic number of bytes required to serialize point selection:
* <type (4 bytes)> + <version (4 bytes)> + <padding (4 bytes)> +
* <length (4 bytes)> = 16 bytes
*/
- FUNC_LEAVE_NOAPI(16);
+ FUNC_LEAVE_NOAPI(16)
} /* end H5S_none_serial_size() */
@@ -474,11 +478,11 @@ H5S_none_serial_size (const H5S_t UNUSED *space)
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_none_serialize (const H5S_t *space, uint8_t *buf)
+H5S_none_serialize(const H5S_t *space, uint8_t *buf)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_serialize);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_serialize)
- assert(space);
+ HDassert(space);
/* Store the preamble information */
UINT32ENCODE(buf, (uint32_t)H5S_GET_SELECT_TYPE(space)); /* Store the type of selection */
@@ -486,7 +490,7 @@ H5S_none_serialize (const H5S_t *space, uint8_t *buf)
UINT32ENCODE(buf, (uint32_t)0); /* Store the un-used padding */
UINT32ENCODE(buf, (uint32_t)0); /* Store the additional information length */
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S_none_serialize() */
@@ -510,20 +514,20 @@ H5S_none_serialize (const H5S_t *space, uint8_t *buf)
REVISION LOG
--------------------------------------------------------------------------*/
static herr_t
-H5S_none_deserialize (H5S_t *space, const uint8_t UNUSED *buf)
+H5S_none_deserialize(H5S_t *space, const uint8_t UNUSED *buf)
{
- herr_t ret_value; /* return value */
+ herr_t ret_value = SUCCEED; /* return value */
- FUNC_ENTER_NOAPI_NOINIT(H5S_none_deserialize);
+ FUNC_ENTER_NOAPI_NOINIT(H5S_none_deserialize)
- assert(space);
+ HDassert(space);
/* Change to "none" selection */
- if((ret_value=H5S_select_none(space))<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't change selection");
+ if(H5S_select_none(space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't change selection")
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_none_deserialize() */
@@ -555,13 +559,13 @@ done:
static herr_t
H5S_none_bounds(const H5S_t UNUSED *space, hsize_t UNUSED *start, hsize_t UNUSED *end)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_bounds);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_bounds)
- assert(space);
- assert(start);
- assert(end);
+ HDassert(space);
+ HDassert(start);
+ HDassert(end);
- FUNC_LEAVE_NOAPI(FAIL);
+ FUNC_LEAVE_NOAPI(FAIL)
} /* H5Sget_none_bounds() */
@@ -618,11 +622,11 @@ H5S_none_offset(const H5S_t UNUSED *space, hsize_t UNUSED *offset)
static htri_t
H5S_none_is_contiguous(const H5S_t UNUSED *space)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_contiguous);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_contiguous)
- assert(space);
+ HDassert(space);
- FUNC_LEAVE_NOAPI(FALSE);
+ FUNC_LEAVE_NOAPI(FALSE)
} /* H5S_none_is_contiguous() */
@@ -647,11 +651,11 @@ H5S_none_is_contiguous(const H5S_t UNUSED *space)
static htri_t
H5S_none_is_single(const H5S_t UNUSED *space)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_single);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_single)
- assert(space);
+ HDassert(space);
- FUNC_LEAVE_NOAPI(FALSE);
+ FUNC_LEAVE_NOAPI(FALSE)
} /* H5S_none_is_single() */
@@ -677,12 +681,12 @@ H5S_none_is_single(const H5S_t UNUSED *space)
static htri_t
H5S_none_is_regular(const H5S_t UNUSED *space)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_regular);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_is_regular)
/* Check args */
- assert(space);
+ HDassert(space);
- FUNC_LEAVE_NOAPI(TRUE);
+ FUNC_LEAVE_NOAPI(TRUE)
} /* H5S_none_is_regular() */
@@ -717,6 +721,65 @@ H5S_none_adjust_u(H5S_t UNUSED *space, const hsize_t UNUSED *offset)
} /* H5S_none_adjust_u() */
+/*-------------------------------------------------------------------------
+ * Function: H5S_none_project_scalar
+ *
+ * Purpose: Projects a 'none' selection into a scalar dataspace
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_none_project_scalar(const H5S_t UNUSED *space, hsize_t UNUSED *offset)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_project_scalar)
+
+ /* Check args */
+ HDassert(space && H5S_SEL_NONE == H5S_GET_SELECT_TYPE(space));
+ HDassert(offset);
+
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* H5S_none_project_scalar() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_none_project_simple
+ *
+ * Purpose: Projects an 'none' selection onto/into a simple dataspace
+ * of a different rank
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_none_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *offset)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_none_project_simple)
+
+ /* Check args */
+ HDassert(base_space && H5S_SEL_NONE == H5S_GET_SELECT_TYPE(base_space));
+ HDassert(new_space);
+ HDassert(offset);
+
+ /* Select the entire new space */
+ if(H5S_select_none(new_space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSET, FAIL, "unable to set none selection")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_none_project_simple() */
+
+
/*--------------------------------------------------------------------------
NAME
H5S_select_none
@@ -734,27 +797,28 @@ H5S_none_adjust_u(H5S_t UNUSED *space, const hsize_t UNUSED *offset)
EXAMPLES
REVISION LOG
--------------------------------------------------------------------------*/
-herr_t H5S_select_none (H5S_t *space)
+herr_t
+H5S_select_none(H5S_t *space)
{
- herr_t ret_value=SUCCEED; /* return value */
+ herr_t ret_value = SUCCEED; /* return value */
- FUNC_ENTER_NOAPI(H5S_select_none, FAIL);
+ FUNC_ENTER_NOAPI(H5S_select_none, FAIL)
/* Check args */
- assert(space);
+ HDassert(space);
/* Remove current selection first */
- if(H5S_SELECT_RELEASE(space)<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release hyperslab");
+ if(H5S_SELECT_RELEASE(space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release hyperslab")
/* Set number of elements in selection */
- space->select.num_elem=0;
+ space->select.num_elem = 0;
/* Set selection type */
- space->select.type=H5S_sel_none;
+ space->select.type = H5S_sel_none;
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_none() */
@@ -833,24 +897,24 @@ H5S_none_get_seq_list(const H5S_t UNUSED *space, unsigned UNUSED flags, H5S_sel_
size_t UNUSED maxseq, size_t UNUSED maxelem, size_t *nseq, size_t *nelem,
hsize_t UNUSED *off, size_t UNUSED *len)
{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_get_seq_list);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_none_get_seq_list)
/* Check args */
- assert(space);
- assert(iter);
- assert(maxseq>0);
- assert(maxelem>0);
- assert(nseq);
- assert(nelem);
- assert(off);
- assert(len);
+ HDassert(space);
+ HDassert(iter);
+ HDassert(maxseq > 0);
+ HDassert(maxelem > 0);
+ HDassert(nseq);
+ HDassert(nelem);
+ HDassert(off);
+ HDassert(len);
/* "none" selections don't generate sequences of bytes */
- *nseq=0;
+ *nseq = 0;
/* They don't use any elements, either */
- *nelem=0;
+ *nelem = 0;
- FUNC_LEAVE_NOAPI(SUCCEED);
+ FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5S_none_get_seq_list() */
diff --git a/src/H5Spkg.h b/src/H5Spkg.h
index b7818a2..0a9df69 100644
--- a/src/H5Spkg.h
+++ b/src/H5Spkg.h
@@ -145,6 +145,10 @@ typedef htri_t (*H5S_sel_is_single_func_t)(const H5S_t *space);
typedef htri_t (*H5S_sel_is_regular_func_t)(const H5S_t *space);
/* Method to adjust a selection by an offset */
typedef herr_t (*H5S_sel_adjust_u_func_t)(H5S_t *space, const hsize_t *offset);
+/* Method to construct single element projection onto scalar dataspace */
+typedef herr_t (*H5S_sel_project_scalar)(const H5S_t *space, hsize_t *offset);
+/* Method to construct selection projection onto/into simple dataspace */
+typedef herr_t (*H5S_sel_project_simple)(const H5S_t *space, H5S_t *new_space, hsize_t *offset);
/* Method to initialize iterator for current selection */
typedef herr_t (*H5S_sel_iter_init_func_t)(H5S_sel_iter_t *sel_iter, const H5S_t *space);
@@ -166,6 +170,8 @@ typedef struct {
H5S_sel_is_single_func_t is_single; /* Method to determine if current selection is a single block */
H5S_sel_is_regular_func_t is_regular; /* Method to determine if current selection is "regular" */
H5S_sel_adjust_u_func_t adjust_u; /* Method to adjust a selection by an offset */
+ H5S_sel_project_scalar project_scalar; /* Method to construct scalar dataspace projection */
+ H5S_sel_project_simple project_simple; /* Method to construct simple dataspace projection */
H5S_sel_iter_init_func_t iter_init; /* Method to initialize iterator for current selection */
} H5S_select_class_t;
diff --git a/src/H5Spoint.c b/src/H5Spoint.c
index 24dfe2a..cb7e98f 100644
--- a/src/H5Spoint.c
+++ b/src/H5Spoint.c
@@ -49,6 +49,8 @@ static htri_t H5S_point_is_contiguous(const H5S_t *space);
static htri_t H5S_point_is_single(const H5S_t *space);
static htri_t H5S_point_is_regular(const H5S_t *space);
static herr_t H5S_point_adjust_u(H5S_t *space, const hsize_t *offset);
+static herr_t H5S_point_project_scalar(const H5S_t *space, hsize_t *offset);
+static herr_t H5S_point_project_simple(const H5S_t *space, H5S_t *new_space, hsize_t *offset);
static herr_t H5S_point_iter_init(H5S_sel_iter_t *iter, const H5S_t *space);
/* Selection iteration callbacks */
@@ -78,6 +80,8 @@ const H5S_select_class_t H5S_sel_point[1] = {{
H5S_point_is_single,
H5S_point_is_regular,
H5S_point_adjust_u,
+ H5S_point_project_scalar,
+ H5S_point_project_simple,
H5S_point_iter_init,
}};
@@ -610,18 +614,18 @@ H5S_point_copy(H5S_t *dst, const H5S_t *src, hbool_t UNUSED share_selection)
/* Allocate room for the head of the point list */
if(NULL == (dst->select.sel_info.pnt_lst = H5FL_MALLOC(H5S_pnt_list_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate point node")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point list node")
curr = src->select.sel_info.pnt_lst->head;
new_tail = NULL;
while(curr) {
/* Create new point */
if(NULL == (new_node = H5FL_MALLOC(H5S_pnt_node_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate point node")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point node")
new_node->next = NULL;
- if(NULL == (new_node->pnt = (hsize_t *)H5MM_malloc(src->extent.rank*sizeof(hsize_t)))) {
+ if(NULL == (new_node->pnt = (hsize_t *)H5MM_malloc(src->extent.rank * sizeof(hsize_t)))) {
new_node = H5FL_FREE(H5S_pnt_node_t, new_node);
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate coordinate information")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate coordinate information")
} /* end if */
/* Copy over the point's coordinates */
@@ -976,7 +980,7 @@ H5S_get_select_elem_pointlist(H5S_t *space, hsize_t startpoint, hsize_t numpoint
node = node->next;
} /* end while */
- /* Iterate through the node, copying each hyperslab's information */
+ /* Iterate through the node, copying each point's information */
while(node != NULL && numpoints > 0) {
HDmemcpy(buf, node->pnt, sizeof(hsize_t) * rank);
buf += rank;
@@ -1346,6 +1350,173 @@ H5S_point_adjust_u(H5S_t *space, const hsize_t *offset)
} /* H5S_point_adjust_u() */
+/*-------------------------------------------------------------------------
+ * Function: H5S_point_project_scalar
+ *
+ * Purpose: Projects a single element point selection into a scalar
+ * dataspace
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_point_project_scalar(const H5S_t *space, hsize_t *offset)
+{
+ const H5S_pnt_node_t *node; /* Point node */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_point_project_scalar)
+
+ /* Check args */
+ HDassert(space && H5S_SEL_POINTS == H5S_GET_SELECT_TYPE(space));
+ HDassert(offset);
+
+ /* Get the head of the point list */
+ node = space->select.sel_info.pnt_lst->head;
+
+ /* Check for more than one point selected */
+ if(node->next)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "point selection of one element has more than one node!")
+
+ /* Calculate offset of selection in projected buffer */
+ *offset = H5V_array_offset(space->extent.rank, space->extent.size, node->pnt);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_point_project_scalar() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_point_project_simple
+ *
+ * Purpose: Projects a point selection onto/into a simple dataspace
+ * of a different rank
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Sunday, July 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S_point_project_simple(const H5S_t *base_space, H5S_t *new_space, hsize_t *offset)
+{
+ const H5S_pnt_node_t *base_node; /* Point node in base space */
+ H5S_pnt_node_t *new_node; /* Point node in new space */
+ H5S_pnt_node_t *prev_node; /* Previous point node in new space */
+ unsigned rank_diff; /* Difference in ranks between spaces */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5S_point_project_simple)
+
+ /* Check args */
+ HDassert(base_space && H5S_SEL_POINTS == H5S_GET_SELECT_TYPE(base_space));
+ HDassert(new_space);
+ HDassert(offset);
+
+ /* We are setting a new selection, remove any current selection in new dataspace */
+ if(H5S_SELECT_RELEASE(new_space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release selection")
+
+ /* Allocate room for the head of the point list */
+ if(NULL == (new_space->select.sel_info.pnt_lst = H5FL_MALLOC(H5S_pnt_list_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point list node")
+
+ /* Check if the new space's rank is < or > base space's rank */
+ if(new_space->extent.rank < base_space->extent.rank) {
+ hsize_t block[H5S_MAX_RANK]; /* Block selected in base dataspace */
+
+ /* Compute the difference in ranks */
+ rank_diff = base_space->extent.rank - new_space->extent.rank;
+
+ /* Calculate offset of selection in projected buffer */
+ HDmemset(block, 0, sizeof(block));
+ HDmemcpy(block, base_space->select.sel_info.pnt_lst->head->pnt, sizeof(hsize_t) * rank_diff);
+ *offset = H5V_array_offset(base_space->extent.rank, base_space->extent.size, block);
+
+ /* Iterate through base space's point nodes, copying the point information */
+ base_node = base_space->select.sel_info.pnt_lst->head;
+ prev_node = NULL;
+ while(base_node) {
+ /* Create new point */
+ if(NULL == (new_node = H5FL_MALLOC(H5S_pnt_node_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point node")
+ new_node->next = NULL;
+ if(NULL == (new_node->pnt = (hsize_t *)H5MM_malloc(new_space->extent.rank * sizeof(hsize_t)))) {
+ new_node = H5FL_FREE(H5S_pnt_node_t, new_node);
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate coordinate information")
+ } /* end if */
+
+ /* Copy over the point's coordinates */
+ HDmemcpy(new_node->pnt, &base_node->pnt[rank_diff], (new_space->extent.rank * sizeof(hsize_t)));
+
+ /* Keep the order the same when copying */
+ if(NULL == prev_node)
+ prev_node = new_space->select.sel_info.pnt_lst->head = new_node;
+ else {
+ prev_node->next = new_node;
+ prev_node = new_node;
+ } /* end else */
+
+ /* Advance to next node */
+ base_node = base_node->next;
+ } /* end while */
+ } /* end if */
+ else {
+ HDassert(new_space->extent.rank > base_space->extent.rank);
+
+ /* Compute the difference in ranks */
+ rank_diff = new_space->extent.rank - base_space->extent.rank;
+
+ /* The offset is zero when projected into higher dimensions */
+ *offset = 0;
+
+ /* Iterate through base space's point nodes, copying the point information */
+ base_node = base_space->select.sel_info.pnt_lst->head;
+ prev_node = NULL;
+ while(base_node) {
+ /* Create new point */
+ if(NULL == (new_node = H5FL_MALLOC(H5S_pnt_node_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate point node")
+ new_node->next = NULL;
+ if(NULL == (new_node->pnt = (hsize_t *)H5MM_malloc(new_space->extent.rank * sizeof(hsize_t)))) {
+ new_node = H5FL_FREE(H5S_pnt_node_t, new_node);
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate coordinate information")
+ } /* end if */
+
+ /* Copy over the point's coordinates */
+ HDmemset(new_node->pnt, 0, sizeof(hsize_t) * rank_diff);
+ HDmemcpy(&new_node->pnt[rank_diff], base_node->pnt, (new_space->extent.rank * sizeof(hsize_t)));
+
+ /* Keep the order the same when copying */
+ if(NULL == prev_node)
+ prev_node = new_space->select.sel_info.pnt_lst->head = new_node;
+ else {
+ prev_node->next = new_node;
+ prev_node = new_node;
+ } /* end else */
+
+ /* Advance to next node */
+ base_node = base_node->next;
+ } /* end while */
+ } /* end else */
+
+ /* Number of elements selected will be the same */
+ new_space->select.num_elem = base_space->select.num_elem;
+
+ /* Set selection type */
+ new_space->select.type = H5S_sel_point;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_point_project_simple() */
+
+
/*--------------------------------------------------------------------------
NAME
H5Sselect_elements
diff --git a/src/H5Sprivate.h b/src/H5Sprivate.h
index 2858ddb..2faf977 100644
--- a/src/H5Sprivate.h
+++ b/src/H5Sprivate.h
@@ -133,6 +133,8 @@ typedef struct H5S_sel_iter_t {
#define H5S_SELECT_IS_SINGLE(S) ((*(S)->select.type->is_single)(S))
#define H5S_SELECT_IS_REGULAR(S) ((*(S)->select.type->is_regular)(S))
#define H5S_SELECT_ADJUST_U(S,O) ((*(S)->select.type->adjust_u)(S, O))
+#define H5S_SELECT_PROJECT_SCALAR(S,O) ((*(S)->select.type->project_scalar)(S, O))
+#define H5S_SELECT_PROJECT_SIMPLE(S,NS, O) ((*(S)->select.type->project_simple)(S, NS, O))
#define H5S_SELECT_ITER_COORDS(ITER,COORDS) ((*(ITER)->type->iter_coords)(ITER,COORDS))
#define H5S_SELECT_ITER_BLOCK(ITER,START,END) ((*(ITER)->type->iter_block)(ITER,START,END))
#define H5S_SELECT_ITER_NELMTS(ITER) ((*(ITER)->type->iter_nelmts)(ITER))
@@ -157,6 +159,8 @@ typedef struct H5S_sel_iter_t {
#define H5S_SELECT_IS_SINGLE(S) (H5S_select_is_single(S))
#define H5S_SELECT_IS_REGULAR(S) (H5S_select_is_regular(S))
#define H5S_SELECT_ADJUST_U(S,O) (H5S_select_adjust_u(S, O))
+#define H5S_SELECT_PROJECT_SCALAR(S,O) (H5S_select_project_scalar)(S, O))
+#define H5S_SELECT_PROJECT_SIMPLE(S,NS,O) (H5S_select_project_simple)(S, NS, O))
#define H5S_SELECT_ITER_COORDS(ITER,COORDS) (H5S_select_iter_coords(ITER,COORDS))
#define H5S_SELECT_ITER_BLOCK(ITER,START,END) (H5S_select_iter_block(ITER,START,END))
#define H5S_SELECT_ITER_NELMTS(ITER) (H5S_select_iter_nelmts(ITER))
@@ -215,6 +219,9 @@ H5_DLL herr_t H5S_get_select_offset(const H5S_t *space, hsize_t *offset);
H5_DLL herr_t H5S_select_offset(H5S_t *space, const hssize_t *offset);
H5_DLL herr_t H5S_select_copy(H5S_t *dst, const H5S_t *src, hbool_t share_selection);
H5_DLL htri_t H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2);
+H5_DLL herr_t H5S_select_construct_projection(const H5S_t *base_space,
+ H5S_t **new_space_ptr, unsigned new_space_rank, const void *buf,
+ void **adj_buf_ptr, hsize_t element_size);
H5_DLL herr_t H5S_select_release(H5S_t *ds);
H5_DLL herr_t H5S_select_get_seq_list(const H5S_t *space, unsigned flags,
H5S_sel_iter_t *iter, size_t maxseq, size_t maxbytes,
@@ -225,6 +232,8 @@ H5_DLL htri_t H5S_select_is_contiguous(const H5S_t *space);
H5_DLL htri_t H5S_select_is_single(const H5S_t *space);
H5_DLL htri_t H5S_select_is_regular(const H5S_t *space);
H5_DLL herr_t H5S_select_adjust_u(H5S_t *space, const hsize_t *offset);
+H5_DLL herr_t H5S_select_project_scalar(const H5S_t *space, hsize_t *offset);
+H5_DLL herr_t H5S_select_project_simple(const H5S_t *space, H5S_t *new_space, hsize_t *offset);
/* Operations on all selections */
H5_DLL herr_t H5S_select_all(H5S_t *space, hbool_t rel_prev);
@@ -268,18 +277,8 @@ H5_DLL herr_t
H5S_mpio_space_type( const H5S_t *space, size_t elmt_size,
/* out: */
MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
+ int *count,
hbool_t *is_derived_type );
-
-H5_DLL herr_t
-H5S_mpio_space_span_type( const H5S_t *space, size_t elmt_size,
- /* out: */
- MPI_Datatype *new_type,
- size_t *count,
- hsize_t *extra_offset,
- hbool_t *is_derived_type );
-
#endif /* H5_HAVE_PARALLEL */
#endif /* _H5Sprivate_H */
diff --git a/src/H5Sselect.c b/src/H5Sselect.c
index a419131..af3c9f9 100644
--- a/src/H5Sselect.c
+++ b/src/H5Sselect.c
@@ -27,6 +27,7 @@
#include "H5Eprivate.h" /* Error handling */
#include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */
+#include "H5MMprivate.h" /* Memory management */
#include "H5Spkg.h" /* Dataspaces */
#include "H5Vprivate.h" /* Vector and array functions */
#include "H5WBprivate.h" /* Wrapped Buffers */
@@ -108,7 +109,7 @@ H5S_select_copy (H5S_t *dst, const H5S_t *src, hbool_t share_selection)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI(H5S_select_copy, FAIL);
+ FUNC_ENTER_NOAPI(H5S_select_copy, FAIL)
/* Check args */
assert(dst);
@@ -119,10 +120,10 @@ H5S_select_copy (H5S_t *dst, const H5S_t *src, hbool_t share_selection)
/* Perform correct type of copy based on the type of selection */
if((ret_value=(*src->select.type->copy)(dst,src,share_selection))<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "can't copy selection specific information");
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "can't copy selection specific information")
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_copy() */
@@ -149,14 +150,14 @@ H5S_select_release(H5S_t *ds)
{
herr_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_release);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_release)
assert(ds);
/* Call the selection type's release function */
ret_value=(*ds->select.type->release)(ds);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_select_release() */
@@ -186,14 +187,14 @@ H5S_select_get_seq_list(const H5S_t *space, unsigned flags,
{
herr_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_get_seq_list);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_get_seq_list)
assert(space);
/* Call the selection type's get_seq_list function */
ret_value=(*space->select.type->get_seq_list)(space,flags,iter,maxseq,maxbytes,nseq,nbytes,off,len);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_select_get_seq_list() */
@@ -221,14 +222,14 @@ H5S_select_serial_size(const H5S_t *space)
{
hssize_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_serial_size);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_serial_size)
assert(space);
/* Call the selection type's serial_size function */
ret_value=(*space->select.type->serial_size)(space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_select_serial_size() */
@@ -259,7 +260,7 @@ H5S_select_serialize(const H5S_t *space, uint8_t *buf)
{
herr_t ret_value=SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_serialize);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_serialize)
assert(space);
assert(buf);
@@ -267,7 +268,7 @@ H5S_select_serialize(const H5S_t *space, uint8_t *buf)
/* Call the selection type's serialize function */
ret_value=(*space->select.type->serialize)(space,buf);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_select_serialize() */
@@ -410,13 +411,13 @@ H5S_select_valid(const H5S_t *space)
{
htri_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_valid);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_valid)
assert(space);
ret_value = (*space->select.type->is_valid)(space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_valid() */
@@ -449,7 +450,7 @@ H5S_select_deserialize (H5S_t *space, const uint8_t *buf)
uint32_t sel_type; /* Pointer to the selection type */
herr_t ret_value=FAIL; /* return value */
- FUNC_ENTER_NOAPI(H5S_select_deserialize, FAIL);
+ FUNC_ENTER_NOAPI(H5S_select_deserialize, FAIL)
assert(space);
@@ -476,10 +477,10 @@ H5S_select_deserialize (H5S_t *space, const uint8_t *buf)
break;
}
if(ret_value<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTLOAD, FAIL, "can't deserialize selection");
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTLOAD, FAIL, "can't deserialize selection")
done:
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_deserialize() */
@@ -567,7 +568,7 @@ H5S_get_select_bounds(const H5S_t *space, hsize_t *start, hsize_t *end)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_get_select_bounds);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_get_select_bounds)
/* Check args */
assert(space);
@@ -576,7 +577,7 @@ H5S_get_select_bounds(const H5S_t *space, hsize_t *start, hsize_t *end)
ret_value = (*space->select.type->bounds)(space,start,end);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_get_select_bounds() */
@@ -646,14 +647,14 @@ H5S_select_is_contiguous(const H5S_t *space)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_is_contiguous);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_is_contiguous)
/* Check args */
assert(space);
ret_value = (*space->select.type->is_contiguous)(space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_is_contiguous() */
@@ -683,14 +684,14 @@ H5S_select_is_single(const H5S_t *space)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_is_single);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_is_single)
/* Check args */
assert(space);
ret_value = (*space->select.type->is_single)(space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_is_single() */
@@ -720,14 +721,14 @@ H5S_select_is_regular(const H5S_t *space)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_is_regular);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_is_regular)
/* Check args */
assert(space);
ret_value = (*space->select.type->is_regular)(space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_is_regular() */
@@ -770,6 +771,86 @@ H5S_select_adjust_u(H5S_t *space, const hsize_t *offset)
/*--------------------------------------------------------------------------
NAME
+ H5S_select_project_scalar
+ PURPOSE
+ Project a single element selection for a scalar dataspace
+ USAGE
+ herr_t H5S_select_project_scalar(space, offset)
+ const H5S_t *space; IN: Pointer to dataspace to project
+ hsize_t *offset; IN/OUT: Offset of projected point
+ RETURNS
+ Non-negative on success, negative on failure
+ DESCRIPTION
+ Projects a selection of a single element into a scalar dataspace, computing
+ the offset of the element in the original selection.
+ GLOBAL VARIABLES
+ COMMENTS, BUGS, ASSUMPTIONS
+ This routine participates in the "Inlining C function pointers"
+ pattern, don't call it directly, use the appropriate macro
+ defined in H5Sprivate.h.
+ EXAMPLES
+ REVISION LOG
+--------------------------------------------------------------------------*/
+herr_t
+H5S_select_project_scalar(const H5S_t *space, hsize_t *offset)
+{
+ herr_t ret_value; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_project_scalar)
+
+ /* Check args */
+ HDassert(space);
+ HDassert(offset);
+
+ ret_value = (*space->select.type->project_scalar)(space, offset);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_select_project_scalar() */
+
+
+/*--------------------------------------------------------------------------
+ NAME
+ H5S_select_project_simple
+ PURPOSE
+ Project a selection onto/into a dataspace of different rank
+ USAGE
+ herr_t H5S_select_project_simple(space, new_space, offset)
+ const H5S_t *space; IN: Pointer to dataspace to project
+ H5S_t *new_space; IN/OUT: Pointer to dataspace projected onto
+ hsize_t *offset; IN/OUT: Offset of projected point
+ RETURNS
+ Non-negative on success, negative on failure
+ DESCRIPTION
+ Projects a selection onto/into a simple dataspace, computing
+ the offset of the first element in the original selection.
+ GLOBAL VARIABLES
+ COMMENTS, BUGS, ASSUMPTIONS
+ This routine participates in the "Inlining C function pointers"
+ pattern, don't call it directly, use the appropriate macro
+ defined in H5Sprivate.h.
+ EXAMPLES
+ REVISION LOG
+--------------------------------------------------------------------------*/
+herr_t
+H5S_select_project_simple(const H5S_t *space, H5S_t *new_space, hsize_t *offset)
+{
+ herr_t ret_value; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_project_simple)
+
+ /* Check args */
+ HDassert(space);
+ HDassert(new_space);
+ HDassert(offset);
+
+ ret_value = (*space->select.type->project_simple)(space, new_space, offset);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_select_project_simple() */
+
+
+/*--------------------------------------------------------------------------
+ NAME
H5S_select_iter_init
PURPOSE
Initializes iteration information for a selection.
@@ -790,7 +871,7 @@ H5S_select_iter_init(H5S_sel_iter_t *sel_iter, const H5S_t *space, size_t elmt_s
{
herr_t ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_init);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_init)
/* Check args */
assert(sel_iter);
@@ -813,7 +894,7 @@ H5S_select_iter_init(H5S_sel_iter_t *sel_iter, const H5S_t *space, size_t elmt_s
/* Call initialization routine for selection type */
ret_value= (*space->select.type->iter_init)(sel_iter, space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_init() */
@@ -844,7 +925,7 @@ H5S_select_iter_coords (const H5S_sel_iter_t *sel_iter, hsize_t *coords)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_coords);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_coords)
/* Check args */
assert(sel_iter);
@@ -853,7 +934,7 @@ H5S_select_iter_coords (const H5S_sel_iter_t *sel_iter, hsize_t *coords)
/* Call iter_coords routine for selection type */
ret_value = (*sel_iter->type->iter_coords)(sel_iter,coords);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_coords() */
#ifdef LATER
@@ -886,7 +967,7 @@ H5S_select_iter_block (const H5S_sel_iter_t *iter, hsize_t *start, hsize_t *end)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_select_iter_block);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_select_iter_block)
/* Check args */
assert(iter);
@@ -896,7 +977,7 @@ H5S_select_iter_block (const H5S_sel_iter_t *iter, hsize_t *start, hsize_t *end)
/* Call iter_block routine for selection type */
ret_value = (*iter->type->iter_block)(iter,start,end);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_block() */
#endif /* LATER */
@@ -926,7 +1007,7 @@ H5S_select_iter_nelmts (const H5S_sel_iter_t *sel_iter)
{
hsize_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_nelmts);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_nelmts)
/* Check args */
assert(sel_iter);
@@ -934,7 +1015,7 @@ H5S_select_iter_nelmts (const H5S_sel_iter_t *sel_iter)
/* Call iter_nelmts routine for selection type */
ret_value = (*sel_iter->type->iter_nelmts)(sel_iter);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_nelmts() */
#ifdef LATER
@@ -965,7 +1046,7 @@ H5S_select_iter_has_next_block (const H5S_sel_iter_t *iter)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_select_iter_has_next_block);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_select_iter_has_next_block)
/* Check args */
assert(iter);
@@ -973,7 +1054,7 @@ H5S_select_iter_has_next_block (const H5S_sel_iter_t *iter)
/* Call iter_has_next_block routine for selection type */
ret_value = (*iter->type->iter_has_next_block)(iter);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_has_next_block() */
#endif /* LATER */
@@ -1005,7 +1086,7 @@ H5S_select_iter_next(H5S_sel_iter_t *iter, size_t nelem)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_next);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_next)
/* Check args */
assert(iter);
@@ -1017,7 +1098,7 @@ H5S_select_iter_next(H5S_sel_iter_t *iter, size_t nelem)
/* Decrement the number of elements left in selection */
iter->elmt_left-=nelem;
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_next() */
#ifdef LATER
@@ -1050,7 +1131,7 @@ H5S_select_iter_next_block(H5S_sel_iter_t *iter)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_select_iter_next_block);
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_select_iter_next_block)
/* Check args */
assert(iter);
@@ -1058,7 +1139,7 @@ H5S_select_iter_next_block(H5S_sel_iter_t *iter)
/* Call iter_next_block routine for selection type */
ret_value = (*iter->type->iter_next_block)(iter);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_next_block() */
#endif /* LATER */
@@ -1088,7 +1169,7 @@ H5S_select_iter_release(H5S_sel_iter_t *sel_iter)
{
herr_t ret_value; /* return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_release);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_select_iter_release)
/* Check args */
assert(sel_iter);
@@ -1096,7 +1177,7 @@ H5S_select_iter_release(H5S_sel_iter_t *sel_iter)
/* Call selection type-specific release routine */
ret_value = (*sel_iter->type->iter_release)(sel_iter);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* H5S_select_iter_release() */
@@ -1154,7 +1235,7 @@ H5S_select_iterate(void *buf, hid_t type_id, const H5S_t *space, H5D_operator_t
herr_t user_ret=0; /* User's return value */
herr_t ret_value=SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(H5S_select_iterate, FAIL);
+ FUNC_ENTER_NOAPI(H5S_select_iterate, FAIL)
/* Check args */
HDassert(buf);
@@ -1302,7 +1383,7 @@ H5S_get_select_type(const H5S_t *space)
{
H5S_sel_type ret_value; /* Return value */
- FUNC_ENTER_NOAPI_NOFUNC(H5S_get_select_type);
+ FUNC_ENTER_NOAPI_NOFUNC(H5S_get_select_type)
/* Check args */
assert(space);
@@ -1310,7 +1391,7 @@ H5S_get_select_type(const H5S_t *space)
/* Set return value */
ret_value=H5S_GET_SELECT_TYPE(space);
- FUNC_LEAVE_NOAPI(ret_value);
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_get_select_type() */
@@ -1334,16 +1415,17 @@ H5S_get_select_type(const H5S_t *space)
Assumes that there is only a single "block" for hyperslab selections.
EXAMPLES
REVISION LOG
+ Modified function to view identical shapes with different dimensions
+ as being the same under some circumstances.
--------------------------------------------------------------------------*/
htri_t
H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2)
{
- H5S_sel_iter_t iter1; /* Selection #1 iteration info */
- H5S_sel_iter_t iter2; /* Selection #2 iteration info */
- hbool_t iter1_init = 0; /* Selection #1 iteration info has been initialized */
- hbool_t iter2_init = 0; /* Selection #2 iteration info has been initialized */
- unsigned u; /* Index variable */
- htri_t ret_value = TRUE; /* Return value */
+ H5S_sel_iter_t iter_a; /* Selection a iteration info */
+ H5S_sel_iter_t iter_b; /* Selection b iteration info */
+ hbool_t iter_a_init = 0; /* Selection a iteration info has been initialized */
+ hbool_t iter_b_init = 0; /* Selection b iteration info has been initialized */
+ htri_t ret_value = TRUE; /* Return value */
FUNC_ENTER_NOAPI(H5S_select_shape_same, FAIL)
@@ -1358,139 +1440,527 @@ H5S_select_shape_same(const H5S_t *space1, const H5S_t *space2)
HGOTO_DONE(FALSE)
} /* end if */
else {
- /* Check for different dimensionality */
- if(space1->extent.rank != space2->extent.rank)
- HGOTO_DONE(FALSE)
+ const H5S_t *space_a; /* Dataspace with larger rank */
+ const H5S_t *space_b; /* Dataspace with smaller rank */
+ unsigned space_a_rank; /* Number of dimensions of dataspace A */
+ unsigned space_b_rank; /* Number of dimensions of dataspace B */
+
+ /* need to be able to handle spaces of different rank:
+ *
+ * To simplify logic, let space_a point to the element of the set
+ * {space1, space2} with the largest rank or space1 if the ranks
+ * are identical.
+ *
+ * Similarly, let space_b point to the element of {space1, space2}
+ * with the smallest rank, or space2 if they are identical.
+ *
+ * Let: space_a_rank be the rank of space_a,
+ * space_b_rank be the rank of space_b,
+ * delta_rank = space_a_rank - space_b_rank.
+ *
+ * Set all this up below.
+ */
+ if(space1->extent.rank >= space2->extent.rank) {
+ space_a = space1;
+ space_a_rank = space_a->extent.rank;
+
+ space_b = space2;
+ space_b_rank = space_b->extent.rank;
+ } /* end if */
+ else {
+ space_a = space2;
+ space_a_rank = space_a->extent.rank;
+
+ space_b = space1;
+ space_b_rank = space_b->extent.rank;
+ } /* end else */
+ HDassert(space_a_rank >= space_b_rank);
+ HDassert(space_b_rank > 0);
/* Check for different number of elements selected */
- if(H5S_GET_SELECT_NPOINTS(space1) != H5S_GET_SELECT_NPOINTS(space2))
+ if(H5S_GET_SELECT_NPOINTS(space_a) != H5S_GET_SELECT_NPOINTS(space_b))
HGOTO_DONE(FALSE)
/* Check for "easy" cases before getting into generalized block iteration code */
- if(H5S_GET_SELECT_TYPE(space1)==H5S_SEL_ALL && H5S_GET_SELECT_TYPE(space2)==H5S_SEL_ALL) {
- hsize_t dims1[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #1 */
- hsize_t dims2[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #2 */
-
- if(H5S_get_simple_extent_dims(space1, dims1, NULL)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality");
- if(H5S_get_simple_extent_dims(space2, dims2, NULL)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality");
-
- /* Check that the sizes are the same */
- for (u=0; u<space1->extent.rank; u++)
- if(dims1[u]!=dims2[u])
- HGOTO_DONE(FALSE);
+ if((H5S_GET_SELECT_TYPE(space_a) == H5S_SEL_ALL) && (H5S_GET_SELECT_TYPE(space_b) == H5S_SEL_ALL)) {
+ hsize_t dims1[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #1 */
+ hsize_t dims2[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #2 */
+ int space_a_dim; /* Current dimension in dataspace A */
+ int space_b_dim; /* Current dimension in dataspace B */
+
+ if(H5S_get_simple_extent_dims(space_a, dims1, NULL) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality")
+ if(H5S_get_simple_extent_dims(space_b, dims2, NULL) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality")
+
+ space_a_dim = (int)space_a_rank - 1;
+ space_b_dim = (int)space_b_rank - 1;
+
+ /* recall that space_a_rank >= space_b_rank.
+ *
+ * In the following while loop, we test to see if space_a and space_b
+ * have identical size in all dimensions they have in common.
+ */
+ while(space_b_dim >= 0) {
+ if(dims1[space_a_dim] != dims2[space_b_dim])
+ HGOTO_DONE(FALSE)
+
+ space_a_dim--;
+ space_b_dim--;
+ } /* end while */
+
+ /* Since we are selecting the entire spaces, we must also verify that space_a
+ * has size 1 in all dimensions that it does not share with space_b.
+ */
+ while(space_a_dim >= 0) {
+ if(dims1[space_a_dim] != 1)
+ HGOTO_DONE(FALSE)
+
+ space_a_dim--;
+ } /* end while */
} /* end if */
- else if(H5S_GET_SELECT_TYPE(space1)==H5S_SEL_NONE || H5S_GET_SELECT_TYPE(space2)==H5S_SEL_NONE) {
- HGOTO_DONE(TRUE);
+ else if((H5S_GET_SELECT_TYPE(space1) == H5S_SEL_NONE) || (H5S_GET_SELECT_TYPE(space2) == H5S_SEL_NONE)) {
+ HGOTO_DONE(TRUE)
} /* end if */
- else if((H5S_GET_SELECT_TYPE(space1)==H5S_SEL_HYPERSLABS && space1->select.sel_info.hslab->diminfo_valid)
- && (H5S_GET_SELECT_TYPE(space2)==H5S_SEL_HYPERSLABS && space2->select.sel_info.hslab->diminfo_valid)) {
-
- /* Check that the shapes are the same */
- for (u=0; u<space1->extent.rank; u++) {
- if(space1->select.sel_info.hslab->opt_diminfo[u].stride!=space2->select.sel_info.hslab->opt_diminfo[u].stride)
- HGOTO_DONE(FALSE);
- if(space1->select.sel_info.hslab->opt_diminfo[u].count!=space2->select.sel_info.hslab->opt_diminfo[u].count)
- HGOTO_DONE(FALSE);
- if(space1->select.sel_info.hslab->opt_diminfo[u].block!=space2->select.sel_info.hslab->opt_diminfo[u].block)
- HGOTO_DONE(FALSE);
- } /* end for */
+ else if((H5S_GET_SELECT_TYPE(space_a) == H5S_SEL_HYPERSLABS && space_a->select.sel_info.hslab->diminfo_valid)
+ && (H5S_GET_SELECT_TYPE(space_b) == H5S_SEL_HYPERSLABS && space_b->select.sel_info.hslab->diminfo_valid)) {
+ int space_a_dim; /* Current dimension in dataspace A */
+ int space_b_dim; /* Current dimension in dataspace B */
+
+ space_a_dim = (int)space_a_rank - 1;
+ space_b_dim = (int)space_b_rank - 1;
+
+ /* check that the shapes are the same in the common dimensions, and that
+ * block == 1 in all dimensions that appear only in space_a.
+ */
+ while(space_b_dim >= 0) {
+ if(space_a->select.sel_info.hslab->opt_diminfo[space_a_dim].stride !=
+ space_b->select.sel_info.hslab->opt_diminfo[space_b_dim].stride)
+ HGOTO_DONE(FALSE)
+
+ if(space_a->select.sel_info.hslab->opt_diminfo[space_a_dim].count !=
+ space_b->select.sel_info.hslab->opt_diminfo[space_b_dim].count)
+ HGOTO_DONE(FALSE)
+
+ if(space_a->select.sel_info.hslab->opt_diminfo[space_a_dim].block !=
+ space_b->select.sel_info.hslab->opt_diminfo[space_b_dim].block)
+ HGOTO_DONE(FALSE)
+
+ space_a_dim--;
+ space_b_dim--;
+ } /* end while */
+
+ while(space_a_dim >= 0) {
+ if(space_a->select.sel_info.hslab->opt_diminfo[space_a_dim].block != 1)
+ HGOTO_DONE(FALSE)
+
+ space_a_dim--;
+ } /* end while */
} /* end if */
/* Iterate through all the blocks in the selection */
else {
- hsize_t start1[H5O_LAYOUT_NDIMS]; /* Start point of selection block in dataspace #1 */
- hsize_t start2[H5O_LAYOUT_NDIMS]; /* Start point of selection block in dataspace #2 */
- hsize_t end1[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #1 */
- hsize_t end2[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace #2 */
- hsize_t off1[H5O_LAYOUT_NDIMS]; /* Offset of selection #1 blocks */
- hsize_t off2[H5O_LAYOUT_NDIMS]; /* Offset of selection #2 blocks */
- htri_t status1,status2; /* Status from next block checks */
- unsigned first_block=1; /* Flag to indicate the first block */
+ hsize_t start_a[H5O_LAYOUT_NDIMS]; /* Start point of selection block in dataspace a */
+ hsize_t start_b[H5O_LAYOUT_NDIMS]; /* Start point of selection block in dataspace b */
+ hsize_t end_a[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace a */
+ hsize_t end_b[H5O_LAYOUT_NDIMS]; /* End point of selection block in dataspace b */
+ hsize_t off_a[H5O_LAYOUT_NDIMS]; /* Offset of selection a blocks */
+ hsize_t off_b[H5O_LAYOUT_NDIMS]; /* Offset of selection b blocks */
+ hbool_t first_block = TRUE; /* Flag to indicate the first block */
/* Initialize iterator for each dataspace selection
* Use '0' for element size instead of actual element size to indicate
* that the selection iterator shouldn't be "flattened", since we
* aren't actually going to be doing I/O with the iterators.
*/
- if(H5S_select_iter_init(&iter1, space1, (size_t)0) < 0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator");
- iter1_init = 1;
- if(H5S_select_iter_init(&iter2, space2, (size_t)0) < 0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator");
- iter2_init = 1;
+ if(H5S_select_iter_init(&iter_a, space_a, (size_t)0) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator a")
+ iter_a_init = 1;
+ if(H5S_select_iter_init(&iter_b, space_b, (size_t)0) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator b")
+ iter_b_init = 1;
/* Iterate over all the blocks in each selection */
while(1) {
+ int space_a_dim; /* Current dimension in dataspace A */
+ int space_b_dim; /* Current dimension in dataspace B */
+ htri_t status_a, status_b; /* Status from next block checks */
+
/* Get the current block for each selection iterator */
- if(H5S_SELECT_ITER_BLOCK(&iter1,start1,end1)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator block");
- if(H5S_SELECT_ITER_BLOCK(&iter2,start2,end2)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator block");
+ if(H5S_SELECT_ITER_BLOCK(&iter_a, start_a, end_a) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator block a")
+ if(H5S_SELECT_ITER_BLOCK(&iter_b, start_b, end_b) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator block b")
- /* The first block only compares the sizes and sets the relative offsets for later blocks */
+ space_a_dim = (int)space_a_rank - 1;
+ space_b_dim = (int)space_b_rank - 1;
+
+ /* The first block only compares the sizes and sets the
+ * relative offsets for later blocks
+ */
if(first_block) {
- /* If the block sizes from each selection doesn't match, get out */
- for (u=0; u<space1->extent.rank; u++) {
- if((end1[u]-start1[u])!=(end2[u]-start2[u]))
- HGOTO_DONE(FALSE);
+ /* If the block sizes in the common dimensions from
+ * each selection don't match, get out
+ */
+ while(space_b_dim >= 0) {
+ if((end_a[space_a_dim] - start_a[space_a_dim]) !=
+ (end_b[space_b_dim] - start_b[space_b_dim]))
+ HGOTO_DONE(FALSE)
+
+ /* Set the relative locations of the selections */
+ off_a[space_a_dim] = start_a[space_a_dim];
+ off_b[space_b_dim] = start_b[space_b_dim];
+
+ space_a_dim--;
+ space_b_dim--;
+ } /* end while */
+
+ /* similarly, if the block size in any dimension that appears only
+ * in space_a is not equal to 1, get out.
+ */
+ while(space_a_dim >= 0) {
+ if((end_a[space_a_dim] - start_a[space_a_dim]) != 0)
+ HGOTO_DONE(FALSE)
/* Set the relative locations of the selections */
- off1[u]=start1[u];
- off2[u]=start2[u];
- } /* end for */
+ off_a[space_a_dim] = start_a[space_a_dim];
+
+ space_a_dim--;
+ } /* end while */
/* Reset "first block" flag */
- first_block=0;
+ first_block = FALSE;
} /* end if */
+ /* Check over the blocks for each selection */
else {
- /* Check over the blocks for each selection */
- for (u=0; u<space1->extent.rank; u++) {
+ /* for dimensions that space_a and space_b have in common: */
+ while(space_b_dim >= 0) {
/* Check if the blocks are in the same relative location */
- if((start1[u]-off1[u])!=(start2[u]-off2[u]))
- HGOTO_DONE(FALSE);
+ if((start_a[space_a_dim] - off_a[space_a_dim]) !=
+ (start_b[space_b_dim] - off_b[space_b_dim]))
+ HGOTO_DONE(FALSE)
/* If the block sizes from each selection doesn't match, get out */
- if((end1[u]-start1[u])!=(end2[u]-start2[u]))
- HGOTO_DONE(FALSE);
- } /* end for */
+ if((end_a[space_a_dim] - start_a[space_a_dim]) !=
+ (end_b[space_b_dim] - start_b[space_b_dim]))
+ HGOTO_DONE(FALSE)
+
+ space_a_dim--;
+ space_b_dim--;
+ } /* end while */
+
+ /* For dimensions that appear only in space_a: */
+ while(space_a_dim >= 0) {
+ /* If the block size isn't 1, get out */
+ if((end_a[space_a_dim] - start_a[space_a_dim]) != 0)
+ HGOTO_DONE(FALSE)
+
+ space_a_dim--;
+ } /* end while */
} /* end else */
/* Check if we are able to advance to the next selection block */
- if((status1=H5S_SELECT_ITER_HAS_NEXT_BLOCK(&iter1))<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to check iterator block");
- if((status2=H5S_SELECT_ITER_HAS_NEXT_BLOCK(&iter2))<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to check iterator block");
+ if((status_a = H5S_SELECT_ITER_HAS_NEXT_BLOCK(&iter_a)) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to check iterator block a")
+
+ if((status_b = H5S_SELECT_ITER_HAS_NEXT_BLOCK(&iter_b)) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to check iterator block b")
/* Did we run out of blocks at the same time? */
- if(status1==FALSE && status2==FALSE)
+ if((status_a == FALSE) && (status_b == FALSE))
break;
- else if(status1!=status2) {
- HGOTO_DONE(FALSE);
- } /* end if */
+ else if(status_a != status_b)
+ HGOTO_DONE(FALSE)
else {
/* Advance to next block in selection iterators */
- if(H5S_SELECT_ITER_NEXT_BLOCK(&iter1)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to advance to next iterator block");
- if(H5S_SELECT_ITER_NEXT_BLOCK(&iter2)<0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to advance to next iterator block");
+ if(H5S_SELECT_ITER_NEXT_BLOCK(&iter_a) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to advance to next iterator block a")
+
+ if(H5S_SELECT_ITER_NEXT_BLOCK(&iter_b) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to advance to next iterator block b")
} /* end else */
} /* end while */
} /* end else */
} /* end else */
done:
- if(iter1_init) {
- if (H5S_SELECT_ITER_RELEASE(&iter1)<0)
- HDONE_ERROR (H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator");
+ if(iter_a_init)
+ if(H5S_SELECT_ITER_RELEASE(&iter_a) < 0)
+ HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator a")
+ if(iter_b_init)
+ if(H5S_SELECT_ITER_RELEASE(&iter_b) < 0)
+ HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator b")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5S_select_shape_same() */
+
+
+/*--------------------------------------------------------------------------
+ NAME
+ H5S_select_construct_projection
+
+ PURPOSE
+ Given a dataspace a of rank n with some selection, construct a new
+ dataspace b of rank m (m != n), with the selection in a being
+ topologically identical to that in b (as verified by
+ H5S_select_shape_same().
+
+ This function exists, as some I/O code chokes of topologically
+ identical selections with different ranks. At least to begin
+ with, we will deal with the issue by constructing projections
+ of the memory dataspace with ranks equaling those of the file
+ dataspace.
+
+ Note that if m > n, it is possible that the starting point in the
+ buffer associated with the memory dataspace will have to be
+ adjusted to match the projected dataspace. If the buf parameter
+ is not NULL, the function must return an adjusted buffer base
+ address in *adj_buf_ptr.
+
+ USAGE
+ htri_t H5S_select_construct_projection(base_space,
+ new_space_ptr,
+ new_space_rank,
+ buf,
+ adj_buf_ptr)
+ const H5S_t *base_space; IN: Ptr to Dataspace to project
+ H5S_t ** new_space_ptr; OUT: Ptr to location in which to return
+ the address of the projected space
+ int new_space_rank; IN: Rank of the projected space.
+ const void * buf; IN: Base address of the buffer
+ associated with the base space.
+ May be NULL.
+ void ** adj_buf_ptr; OUT: If buf != NULL, store the base
+ address of the section of buf
+ that is described by *new_space_ptr
+ in *adj_buf_ptr.
+
+ RETURNS
+ Non-negative on success/Negative on failure.
+
+ DESCRIPTION
+ Construct a new dataspace and associated selection which is a
+ projection of the supplied dataspace and associated selection into
+ the specified rank. Return it in *new_space_ptr.
+
+ If buf is supplied, computes the base address of the projected
+ selection in buf, and stores the base address in *adj_buf_ptr.
+
+ GLOBAL VARIABLES
+ COMMENTS, BUGS, ASSUMPTIONS
+ The selection in the supplied base_space has thickness 1 in all
+ dimensions greater than new_space_rank. Note that here we count
+ dimensions from the fastest changing coordinate to the slowest
+ changing changing coordinate.
+ EXAMPLES
+ REVISION LOG
+--------------------------------------------------------------------------*/
+herr_t
+H5S_select_construct_projection(const H5S_t *base_space, H5S_t **new_space_ptr,
+ unsigned new_space_rank, const void *buf, void **adj_buf_ptr, hsize_t element_size)
+{
+ H5S_t * new_space = NULL; /* New dataspace constructed */
+ hsize_t base_space_dims[H5S_MAX_RANK]; /* Current dimensions of base dataspace */
+ hsize_t base_space_maxdims[H5S_MAX_RANK]; /* Maximum dimensions of base dataspace */
+ int sbase_space_rank; /* Signed # of dimensions of base dataspace */
+ unsigned base_space_rank; /* # of dimensions of base dataspace */
+ hsize_t projected_space_element_offset = 0; /* Offset of selected element in projected buffer */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5S_select_construct_projection, FAIL)
+
+ /* Sanity checks */
+ HDassert(base_space != NULL);
+ HDassert((H5S_GET_EXTENT_TYPE(base_space) == H5S_SCALAR) || (H5S_GET_EXTENT_TYPE(base_space) == H5S_SIMPLE));
+ HDassert(new_space_ptr != NULL);
+ HDassert((new_space_rank != 0) || (H5S_GET_SELECT_NPOINTS(base_space) <= 1));
+ HDassert(new_space_rank <= H5S_MAX_RANK);
+ HDassert((buf == NULL) || (adj_buf_ptr != NULL));
+ HDassert(element_size > 0 );
+
+ /* Get the extent info for the base dataspace */
+ if((sbase_space_rank = H5S_get_simple_extent_dims(base_space, base_space_dims, base_space_maxdims)) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality of base space")
+ base_space_rank = (unsigned)sbase_space_rank;
+ HDassert(base_space_rank != new_space_rank);
+
+ /* Check if projected space is scalar */
+ if(new_space_rank == 0) {
+ hssize_t npoints; /* Number of points selected */
+
+ /* Retreve the number of elements selected */
+ if((npoints = (hssize_t)H5S_GET_SELECT_NPOINTS(base_space)) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get number of points selected")
+ HDassert(npoints <= 1);
+
+ /* Create new scalar dataspace */
+ if(NULL == (new_space = H5S_create(H5S_SCALAR)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "unable to create scalar dataspace")
+
+ /* No need to register the dataspace(i.e. get an ID) as
+ * we will just be discarding it shortly.
+ */
+
+ /* Selection for the new space will be either all or
+ * none, depending on whether the base space has 0 or
+ * 1 elements selected.
+ *
+ * Observe that the base space can't have more than
+ * one selected element, since its selection has the
+ * same shape as the file dataspace, and that data
+ * space is scalar.
+ */
+ if(1 == npoints) {
+ /* Assuming that the selection in the base dataspace is not
+ * empty, we must compute the offset of the selected item in
+ * the buffer associated with the base dataspace.
+ *
+ * Since the new space rank is zero, we know that the
+ * the base space must have rank at least 1 -- and
+ * hence it is a simple dataspace. However, the
+ * selection, may be either point, hyperspace, or all.
+ *
+ */
+ if(H5S_SELECT_PROJECT_SCALAR(base_space, &projected_space_element_offset) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSET, FAIL, "unable to project scalar selection")
+ } /* end if */
+ else {
+ HDassert(0 == npoints);
+
+ if(H5S_select_none(new_space) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't delete default selection")
+ } /* end else */
+ } /* end if */
+ else { /* projected space must be simple */
+ hsize_t new_space_dims[H5S_MAX_RANK]; /* Current dimensions for new dataspace */
+ hsize_t new_space_maxdims[H5S_MAX_RANK];/* Maximum dimensions for new dataspace */
+ unsigned rank_diff; /* Difference in ranks */
+
+ /* Set up the dimensions of the new, projected dataspace.
+ *
+ * How we do this depends on whether we are projecting up into
+ * increased dimensions, or down into a reduced number of
+ * dimensions.
+ *
+ * If we are projecting up (the first half of the following
+ * if statement), we copy the dimensions of the base data
+ * space into the fastest changing dimensions of the new
+ * projected dataspace, and set the remaining dimensions to
+ * one.
+ *
+ * If we are projecting down (the second half of the following
+ * if statement), we just copy the dimensions with the most
+ * quickly changing dimensions into the dims for the projected
+ * data set.
+ *
+ * This works, because H5S_select_shape_same() will return
+ * true on selections of different rank iff:
+ *
+ * 1) the selection in the lower rank dataspace matches that
+ * in the dimensions with the fastest changing indicies in
+ * the larger rank dataspace, and
+ *
+ * 2) the selection has thickness 1 in all ranks that appear
+ * only in the higher rank dataspace (i.e. those with
+ * more slowly changing indicies).
+ */
+ if(new_space_rank > base_space_rank) {
+ hsize_t tmp_dim_size = 1; /* Temporary dimension value, for filling arrays */
+
+ /* we must copy the dimensions of the base space into
+ * the fastest changing dimensions of the new space,
+ * and set the remaining dimensions to 1
+ */
+ rank_diff = new_space_rank - base_space_rank;
+ H5V_array_fill(new_space_dims, &tmp_dim_size, sizeof(tmp_dim_size), rank_diff);
+ H5V_array_fill(new_space_maxdims, &tmp_dim_size, sizeof(tmp_dim_size), rank_diff);
+ HDmemcpy(&new_space_dims[rank_diff], base_space_dims, sizeof(new_space_dims[0]) * base_space_rank);
+ HDmemcpy(&new_space_maxdims[rank_diff], base_space_maxdims, sizeof(new_space_maxdims[0]) * base_space_rank);
+ } /* end if */
+ else { /* new_space_rank < base_space_rank */
+ /* we must copy the fastest changing dimension of the
+ * base space into the dimensions of the new space.
+ */
+ rank_diff = base_space_rank - new_space_rank;
+ HDmemcpy(new_space_dims, &base_space_dims[rank_diff], sizeof(new_space_dims[0]) * new_space_rank);
+ HDmemcpy(new_space_maxdims, &base_space_maxdims[rank_diff], sizeof(new_space_maxdims[0]) * new_space_rank);
+ } /* end else */
+
+ /* now have the new space rank and dimensions set up --
+ * so we can create the new simple dataspace.
+ */
+ if(NULL == (new_space = H5S_create_simple(new_space_rank, new_space_dims, new_space_maxdims)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace")
+
+ /* No need to register the dataspace(i.e. get an ID) as
+ * we will just be discarding it shortly.
+ */
+
+ /* If we get this far, we have successfully created the projected
+ * dataspace. We must now project the selection in the base
+ * dataspace into the projected dataspace.
+ */
+ if(H5S_SELECT_PROJECT_SIMPLE(base_space, new_space, &projected_space_element_offset) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSET, FAIL, "unable to project simple selection")
+
+ /* If we get this far, we have created the new dataspace, and projected
+ * the selection in the base dataspace into the new dataspace.
+ *
+ * If the base dataspace is simple, check to see if the
+ * offset_changed flag on the base selection has been set -- if so,
+ * project the offset into the new dataspace and set the
+ * offset_changed flag.
+ */
+ if(H5S_GET_EXTENT_TYPE(base_space) == H5S_SIMPLE && base_space->select.offset_changed) {
+ if(new_space_rank > base_space_rank) {
+ HDmemset(new_space->select.offset, 0, sizeof(new_space->select.offset[0]) * rank_diff);
+ HDmemcpy(&new_space->select.offset[rank_diff], base_space->select.offset, sizeof(new_space->select.offset[0]) * base_space_rank);
+ } /* end if */
+ else
+ HDmemcpy(new_space->select.offset, &base_space->select.offset[rank_diff], sizeof(new_space->select.offset[0]) * new_space_rank);
+
+ /* Propagate the offset changed flag into the new dataspace. */
+ new_space->select.offset_changed = TRUE;
+ } /* end if */
+ } /* end else */
+
+ /* If we have done the projection correctly, the following assertion
+ * should hold.
+ */
+ HDassert(TRUE == H5S_select_shape_same(base_space, new_space));
+
+ /* load the address of the new space into *new_space_ptr */
+ *new_space_ptr = new_space;
+
+ /* now adjust the buffer if required */
+ if(buf != NULL) {
+ if(new_space_rank < base_space_rank) {
+ /* a bit of pointer magic here:
+ *
+ * Since we can't do pointer arithmetic on void pointers, we first
+ * cast buf to a pointer to byte -- i.e. uint8_t.
+ *
+ * We then multiply the projected space element offset we
+ * calculated earlier by the supplied element size, add this
+ * value to the type cast buf pointer, cast the result back
+ * to a pointer to void, and assign the result to *adj_buf_ptr.
+ */
+ *adj_buf_ptr = (void *)(((const uint8_t *)buf) +
+ ((size_t)(projected_space_element_offset * element_size)));
+ } /* end if */
+ else
+ /* No adjustment necessary */
+ *adj_buf_ptr = buf;
} /* end if */
- if(iter2_init) {
- if (H5S_SELECT_ITER_RELEASE(&iter2)<0)
- HDONE_ERROR (H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator");
+
+done:
+ /* Cleanup on error */
+ if(ret_value < 0) {
+ if(new_space && H5S_close(new_space) < 0)
+ HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release dataspace")
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5S_select_shape_same() */
+} /* H5S_select_construct_projection() */
/*--------------------------------------------------------------------------
@@ -1536,7 +2006,7 @@ H5S_select_fill(const void *fill, size_t fill_size, const H5S_t *space, void *_b
/* Initialize iterator */
if(H5S_select_iter_init(&iter, space, fill_size) < 0)
- HGOTO_ERROR (H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator")
iter_init = 1; /* Selection iteration info has been initialized */
/* Get the number of elements in selection */
@@ -1556,7 +2026,7 @@ H5S_select_fill(const void *fill, size_t fill_size, const H5S_t *space, void *_b
/* Get the sequences of bytes */
if(H5S_SELECT_GET_SEQ_LIST(space, 0, &iter, (size_t)H5D_IO_VECTOR_SIZE, max_elem, &nseq, &nelem, off, len) < 0)
- HGOTO_ERROR (H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed")
+ HGOTO_ERROR(H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed")
/* Loop over sequences */
for(curr_seq = 0; curr_seq < nseq; curr_seq++) {
diff --git a/test/testframe.c b/test/testframe.c
index f0d94d8..201f569 100644
--- a/test/testframe.c
+++ b/test/testframe.c
@@ -26,7 +26,7 @@
/*
* Definitions for the testing structure.
*/
-#define MAXNUMOFTESTS 45
+#define MAXNUMOFTESTS 50
#define MAXTESTNAME 16
#define MAXTESTDESC 64
diff --git a/test/tselect.c b/test/tselect.c
index b8c59d8..89cd9e5 100644
--- a/test/tselect.c
+++ b/test/tselect.c
@@ -159,6 +159,9 @@
#define SPACERE5_DIM3 12
#define SPACERE5_DIM4 8
+/* #defines for shape same / different rank tests */
+#define SS_DR_MAX_RANK 5
+
/* Location comparison function */
@@ -1585,6 +1588,2031 @@ test_select_hyper_contig3(hid_t dset_type, hid_t xfer_plist)
HDfree(rbuf);
} /* test_select_hyper_contig3() */
+
+/****************************************************************
+**
+** verify_select_hyper_contig_dr__run_test(): Verify data from
+** test_select_hyper_contig_dr__run_test()
+**
+****************************************************************/
+static void
+verify_select_hyper_contig_dr__run_test(const uint16_t *cube_buf,
+ size_t cube_size, unsigned edge_size, unsigned cube_rank)
+{
+ const uint16_t *cube_ptr; /* Pointer into the cube buffer */
+ uint16_t expected_value; /* Expected value in dataset */
+ unsigned i, j, k, l, m; /* Local index variables */
+ size_t s; /* Local index variable */
+ hbool_t mis_match; /* Flag to indicate mis-match in expected value */
+
+ HDassert(cube_buf);
+ HDassert(cube_size > 0);
+
+ expected_value = 0;
+ mis_match = FALSE;
+ cube_ptr = cube_buf;
+ s = 0;
+ i = 0;
+ do {
+ j = 0;
+ do {
+ k = 0;
+ do {
+ l = 0;
+ do {
+ m = 0;
+ do {
+ /* Sanity check */
+ HDassert(s < cube_size);
+
+ /* Check for correct value */
+ if(*cube_ptr != expected_value)
+ mis_match = TRUE;
+
+ /* Advance to next element */
+ cube_ptr++;
+ expected_value++;
+ s++;
+ m++;
+ } while((cube_rank > 0) && (m < edge_size));
+ l++;
+ } while((cube_rank > 1) && (l < edge_size));
+ k++;
+ } while((cube_rank > 2) && (k < edge_size));
+ j++;
+ } while((cube_rank > 3) && (j < edge_size));
+ i++;
+ } while((cube_rank > 4) && (i < edge_size));
+ if(mis_match)
+ TestErrPrintf("Initial cube data don't match! Line = %d\n", __LINE__);
+} /* verify_select_hyper_contig_dr__run_test() */
+
+
+/****************************************************************
+**
+** test_select_hyper_contig_dr__run_test(): Test H5S (dataspace)
+** selection code with contiguous source and target having
+** different ranks but the same shape. We have already
+** tested H5S_shape_same in isolation, so now we try to do
+** I/O.
+**
+****************************************************************/
+static void
+test_select_hyper_contig_dr__run_test(int test_num, const uint16_t *cube_buf,
+ const uint16_t *zero_buf, unsigned edge_size, unsigned chunk_edge_size,
+ unsigned small_rank, unsigned large_rank, hid_t dset_type, hid_t xfer_plist)
+{
+ hbool_t mis_match; /* Flag indicating a value read in wasn't what was expected */
+ hid_t fapl; /* File access property list */
+ hid_t fid1; /* File ID */
+ hid_t small_cube_sid; /* Dataspace ID for small cube in memory & file */
+ hid_t mem_large_cube_sid; /* Dataspace ID for large cube in memory */
+ hid_t file_large_cube_sid; /* Dataspace ID for large cube in file */
+ hid_t small_cube_dcpl_id = H5P_DEFAULT; /* DCPL for small cube dataset */
+ hid_t large_cube_dcpl_id = H5P_DEFAULT; /* DCPL for large cube dataset */
+ hid_t small_cube_dataset; /* Dataset ID */
+ hid_t large_cube_dataset; /* Dataset ID */
+ size_t start_index; /* Offset within buffer to begin inspecting */
+ size_t stop_index; /* Offset within buffer to end inspecting */
+ uint16_t expected_value; /* Expected value in dataset */
+ uint16_t * small_cube_buf_1; /* Buffer for small cube data */
+ uint16_t * large_cube_buf_1; /* Buffer for large cube data */
+ uint16_t * ptr_1; /* Temporary pointer into cube data */
+ hsize_t dims[SS_DR_MAX_RANK]; /* Dataspace dimensions */
+ hsize_t start[SS_DR_MAX_RANK]; /* Shared hyperslab start offset */
+ hsize_t stride[SS_DR_MAX_RANK]; /* Shared hyperslab stride */
+ hsize_t count[SS_DR_MAX_RANK]; /* Shared hyperslab count */
+ hsize_t block[SS_DR_MAX_RANK]; /* Shared hyperslab block size */
+ hsize_t * start_ptr; /* Actual hyperslab start offset */
+ hsize_t * stride_ptr; /* Actual hyperslab stride */
+ hsize_t * count_ptr; /* Actual hyperslab count */
+ hsize_t * block_ptr; /* Actual hyperslab block size */
+ size_t small_cube_size; /* Number of elements in small cube */
+ size_t large_cube_size; /* Number of elements in large cube */
+ unsigned u, v, w, x; /* Local index variables */
+ size_t s; /* Local index variable */
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(7, ("\tn-cube slice through m-cube I/O test %d.\n", test_num));
+ MESSAGE(7, ("\tranks = %u/%u, edge_size = %u, chunk_edge_size = %u.\n", small_rank, large_rank, edge_size, chunk_edge_size));
+
+ HDassert(edge_size >= 6);
+ HDassert(edge_size >= chunk_edge_size);
+ HDassert((chunk_edge_size == 0) || (chunk_edge_size >= 3));
+ HDassert(small_rank > 0);
+ HDassert(small_rank < large_rank);
+ HDassert(large_rank <= SS_DR_MAX_RANK);
+
+ /* Compute cube sizes */
+ small_cube_size = large_cube_size = (size_t)1;
+ for(u = 0; u < large_rank; u++) {
+ if(u < small_rank)
+ small_cube_size *= (size_t)edge_size;
+
+ large_cube_size *= (size_t)edge_size;
+ } /* end for */
+
+ HDassert(large_cube_size < (size_t)UINT_MAX);
+
+ /* set up the start, stride, count, and block pointers */
+ start_ptr = &(start[SS_DR_MAX_RANK - large_rank]);
+ stride_ptr = &(stride[SS_DR_MAX_RANK - large_rank]);
+ count_ptr = &(count[SS_DR_MAX_RANK - large_rank]);
+ block_ptr = &(block[SS_DR_MAX_RANK - large_rank]);
+
+ /* Allocate buffers */
+ small_cube_buf_1 = (uint16_t *)HDcalloc(sizeof(uint16_t), small_cube_size);
+ CHECK(small_cube_buf_1, NULL, "HDcalloc");
+ large_cube_buf_1 = (uint16_t *)HDcalloc(sizeof(uint16_t), large_cube_size);
+ CHECK(large_cube_buf_1, NULL, "HDcalloc");
+
+ /* Create a dataset transfer property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Use the 'core' VFD for this test */
+ ret = H5Pset_fapl_core(fapl, (size_t)(1024 * 1024), FALSE);
+ CHECK(ret, FAIL, "H5Pset_fapl_core");
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Close file access property list */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* setup dims: */
+ dims[0] = dims[1] = dims[2] = dims[3] = dims[4] = (hsize_t)edge_size;
+
+ /* Create small cube dataspaces */
+ small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL);
+ CHECK(small_cube_sid, FAIL, "H5Screate_simple");
+
+ /* Create large cube dataspace */
+ mem_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL);
+ CHECK(mem_large_cube_sid, FAIL, "H5Screate_simple");
+ file_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL);
+ CHECK(file_large_cube_sid, FAIL, "H5Screate_simple");
+
+ /* if chunk edge size is greater than zero, set up the small and
+ * large data set creation property lists to specify chunked
+ * datasets.
+ */
+ if(chunk_edge_size > 0) {
+ hsize_t chunk_dims[SS_DR_MAX_RANK]; /* Chunk dimensions */
+
+ chunk_dims[0] = chunk_dims[1] =
+ chunk_dims[2] = chunk_dims[3] = chunk_dims[4] = (hsize_t)chunk_edge_size;
+
+ small_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(small_cube_dcpl_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_layout(small_cube_dcpl_id, H5D_CHUNKED);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ ret = H5Pset_chunk(small_cube_dcpl_id, (int)small_rank, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+
+ large_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(large_cube_dcpl_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_layout(large_cube_dcpl_id, H5D_CHUNKED);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ ret = H5Pset_chunk(large_cube_dcpl_id, (int)large_rank, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+ } /* end if */
+
+ /* create the small cube dataset */
+ small_cube_dataset = H5Dcreate2(fid1, "small_cube_dataset", dset_type,
+ small_cube_sid, H5P_DEFAULT, small_cube_dcpl_id, H5P_DEFAULT);
+ CHECK(small_cube_dataset, FAIL, "H5Dcreate2");
+
+ /* Close non-default small dataset DCPL */
+ if(small_cube_dcpl_id != H5P_DEFAULT) {
+ ret = H5Pclose(small_cube_dcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ } /* end if */
+
+ /* create the large cube dataset */
+ large_cube_dataset = H5Dcreate2(fid1, "large_cube_dataset", dset_type,
+ file_large_cube_sid, H5P_DEFAULT, large_cube_dcpl_id, H5P_DEFAULT);
+ CHECK(large_cube_dataset, FAIL, "H5Dcreate2");
+
+ /* Close non-default large dataset DCPL */
+ if(large_cube_dcpl_id != H5P_DEFAULT) {
+ ret = H5Pclose(large_cube_dcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ } /* end if */
+
+
+ /* write initial data to the on disk datasets */
+ ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid,
+ small_cube_sid, xfer_plist, cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid,
+ file_large_cube_sid, xfer_plist, cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* read initial data from disk and verify that it is as expected. */
+ ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid,
+ small_cube_sid, xfer_plist, small_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check that the data is valid */
+ verify_select_hyper_contig_dr__run_test(small_cube_buf_1, small_cube_size,
+ edge_size, small_rank);
+
+ ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid,
+ file_large_cube_sid, xfer_plist, large_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check that the data is valid */
+ verify_select_hyper_contig_dr__run_test(large_cube_buf_1, large_cube_size,
+ edge_size, large_rank);
+
+
+ /* first, verify that we can read from disk correctly using selections
+ * of different rank that H5S_select_shape_same() views as being of the
+ * same shape.
+ *
+ * Start by reading small_rank-D slice from the on disk large cube, and
+ * verifying that the data read is correct. Verify that H5S_select_shape_same()
+ * returns true on the memory and file selections.
+ */
+
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for(u = 0; u < SS_DR_MAX_RANK; u++) {
+ start[u] = 0;
+ stride[u] = 1;
+ count[u] = 1;
+ if((SS_DR_MAX_RANK - u) > small_rank)
+ block[u] = 1;
+ else
+ block[u] = (hsize_t)edge_size;
+ } /* end for */
+
+ u = 0;
+ do {
+ v = 0;
+ do {
+ w = 0;
+ do {
+ x = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+ start[0] = (hsize_t)u;
+ start[1] = (hsize_t)v;
+ start[2] = (hsize_t)w;
+ start[3] = (hsize_t)x;
+ start[4] = (hsize_t)0;
+
+ ret = H5Sselect_hyperslab(file_large_cube_sid,
+ H5S_SELECT_SET,
+ start_ptr,
+ stride_ptr,
+ count_ptr,
+ block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* verify that H5S_select_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(small_cube_sid,
+ file_large_cube_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+ /* Read selection from disk */
+ ret = H5Dread(large_cube_dataset,
+ H5T_NATIVE_UINT16,
+ small_cube_sid,
+ file_large_cube_sid,
+ xfer_plist,
+ small_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* verify that expected data is retrieved */
+ mis_match = FALSE;
+ ptr_1 = small_cube_buf_1;
+ expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) +
+ (w * edge_size * edge_size) +
+ (x * edge_size));
+ for(s = 0; s < small_cube_size; s++ ) {
+ if(*ptr_1 != expected_value )
+ mis_match = TRUE;
+ ptr_1++;
+ expected_value++;
+ } /* end for */
+ if(mis_match)
+ TestErrPrintf("small cube read from largecube has bad data! Line=%d\n",__LINE__);
+
+ x++;
+ } while((large_rank >= 2) && (small_rank <= 1) && (x < edge_size));
+ w++;
+ } while((large_rank >= 3) && (small_rank <= 2) && (w < edge_size));
+ v++;
+ } while((large_rank >= 4) && (small_rank <= 3) && (v < edge_size));
+ u++;
+ } while((large_rank >= 5) && (small_rank <= 4) && (u < edge_size));
+
+
+ /* similarly, read the on disk small cube into slices through the in memory
+ * large cube, and verify that the correct data (and only the correct data)
+ * is read.
+ */
+
+ /* zero out the in-memory large cube */
+ HDmemset(large_cube_buf_1, 0, large_cube_size * sizeof(uint16_t));
+
+ u = 0;
+ do {
+ v = 0;
+ do {
+ w = 0;
+ do {
+ x = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+ start[0] = (hsize_t)u;
+ start[1] = (hsize_t)v;
+ start[2] = (hsize_t)w;
+ start[3] = (hsize_t)x;
+ start[4] = (hsize_t)0;
+
+ ret = H5Sselect_hyperslab(mem_large_cube_sid,
+ H5S_SELECT_SET,
+ start_ptr,
+ stride_ptr,
+ count_ptr,
+ block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* verify that H5S_select_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(small_cube_sid,
+ mem_large_cube_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+
+ /* Read selection from disk */
+ ret = H5Dread(small_cube_dataset,
+ H5T_NATIVE_UINT16,
+ mem_large_cube_sid,
+ small_cube_sid,
+ xfer_plist,
+ large_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ start_index = (u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) +
+ (w * edge_size * edge_size) +
+ (x * edge_size);
+ stop_index = start_index + small_cube_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= large_cube_size);
+
+ mis_match = FALSE;
+ ptr_1 = large_cube_buf_1;
+ expected_value = 0;
+ for(s = 0; s < start_index; s++) {
+ if(*ptr_1 != 0)
+ mis_match = TRUE;
+ ptr_1++;
+ } /* end for */
+ for(; s <= stop_index; s++) {
+ if(*ptr_1 != expected_value)
+ mis_match = TRUE;
+ expected_value++;
+ ptr_1++;
+ } /* end for */
+ for(; s < large_cube_size; s++) {
+ if(*ptr_1 != 0)
+ mis_match = TRUE;
+ ptr_1++;
+ } /* end for */
+ if(mis_match)
+ TestErrPrintf("large cube read from small cube has bad data! Line=%u\n", __LINE__);
+
+ /* Zero out the buffer for the next pass */
+ HDmemset(large_cube_buf_1 + start_index, 0, small_cube_size * sizeof(uint16_t));
+
+ x++;
+ } while((large_rank >= 2) && (small_rank <= 1) && (x < edge_size));
+ w++;
+ } while((large_rank >= 3) && (small_rank <= 2) && (w < edge_size));
+ v++;
+ } while((large_rank >= 4) && (small_rank <= 3) && (v < edge_size));
+ u++;
+ } while((large_rank >= 5) && (small_rank <= 4) && (u < edge_size));
+
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5S_select_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank D slices from the in memory large cube, to
+ * the the on disk small cube dataset. After each write, read the small
+ * cube dataset back from disk, and verify that it contains the expected
+ * data. Verify that H5S_select_shape_same() returns true on the
+ * memory and file selections.
+ */
+
+ u = 0;
+ do {
+ v = 0;
+ do {
+ w = 0;
+ do {
+ x = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* zero out the on disk small cube */
+ ret = H5Dwrite(small_cube_dataset,
+ H5T_NATIVE_UINT16,
+ small_cube_sid,
+ small_cube_sid,
+ xfer_plist,
+ zero_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* select the portion of the in memory large cube from which we
+ * are going to write data.
+ */
+ start[0] = (hsize_t)u;
+ start[1] = (hsize_t)v;
+ start[2] = (hsize_t)w;
+ start[3] = (hsize_t)x;
+ start[4] = (hsize_t)0;
+
+ ret = H5Sselect_hyperslab(mem_large_cube_sid,
+ H5S_SELECT_SET,
+ start_ptr,
+ stride_ptr,
+ count_ptr,
+ block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* verify that H5S_select_shape_same() reports the in
+ * memory slice through the cube selection and the
+ * on disk full small cube selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(small_cube_sid,
+ mem_large_cube_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+
+ /* write the slice from the in memory large cube to the on disk small cube */
+ ret = H5Dwrite(small_cube_dataset,
+ H5T_NATIVE_UINT16,
+ mem_large_cube_sid,
+ small_cube_sid,
+ xfer_plist,
+ cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+
+ /* read the on disk small cube into memory */
+ ret = H5Dread(small_cube_dataset,
+ H5T_NATIVE_UINT16,
+ small_cube_sid,
+ small_cube_sid,
+ xfer_plist,
+ small_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+
+ /* verify that expected data is retrieved */
+ mis_match = FALSE;
+ ptr_1 = small_cube_buf_1;
+ expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) +
+ (w * edge_size * edge_size) +
+ (x * edge_size));
+ for(s = 0; s < small_cube_size; s++) {
+ if(*ptr_1 != expected_value)
+ mis_match = TRUE;
+ expected_value++;
+ ptr_1++;
+ } /* end for */
+ if(mis_match )
+ TestErrPrintf("small cube data don't match! Line=%d\n",__LINE__);
+
+ x++;
+ } while((large_rank >= 2) && (small_rank <= 1) && (x < edge_size));
+ w++;
+ } while((large_rank >= 3) && (small_rank <= 2) && (w < edge_size));
+ v++;
+ } while((large_rank >= 4) && (small_rank <= 3) && (v < edge_size));
+ u++;
+ } while((large_rank >= 5) && (small_rank <= 4) && (u < edge_size));
+
+
+ /* Now write the contents of the in memory small cube to slices of
+ * the on disk cube. After each write, read the on disk cube
+ * into memeory, and verify that it contains the expected
+ * data. Verify that H5S_select_shape_same() returns true on
+ * the memory and file selections.
+ */
+
+ /* select the entire memory and file cube dataspaces */
+ ret = H5Sselect_all(mem_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Sselect_all(file_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ u = 0;
+ do {
+ v = 0;
+ do {
+ w = 0;
+ do {
+ x = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* zero out the on disk cube */
+ ret = H5Dwrite(large_cube_dataset,
+ H5T_NATIVE_USHORT,
+ mem_large_cube_sid,
+ file_large_cube_sid,
+ xfer_plist,
+ zero_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+
+ /* select the portion of the in memory large cube to which we
+ * are going to write data.
+ */
+ start[0] = (hsize_t)u;
+ start[1] = (hsize_t)v;
+ start[2] = (hsize_t)w;
+ start[3] = (hsize_t)x;
+ start[4] = (hsize_t)0;
+
+ ret = H5Sselect_hyperslab(file_large_cube_sid,
+ H5S_SELECT_SET,
+ start_ptr,
+ stride_ptr,
+ count_ptr,
+ block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* verify that H5S_select_shape_same() reports the in
+ * memory full selection of the small cube and the
+ * on disk slice through the large cube selection
+ * as having the same shape.
+ */
+ check = H5S_select_shape_same_test(small_cube_sid,
+ file_large_cube_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+
+ /* write the cube from memory to the target slice of the disk cube */
+ ret = H5Dwrite(large_cube_dataset,
+ H5T_NATIVE_UINT16,
+ small_cube_sid,
+ file_large_cube_sid,
+ xfer_plist,
+ cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+
+ /* read the on disk cube into memory */
+ ret = H5Sselect_all(file_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dread(large_cube_dataset,
+ H5T_NATIVE_UINT16,
+ mem_large_cube_sid,
+ file_large_cube_sid,
+ xfer_plist,
+ large_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ start_index = (u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) +
+ (w * edge_size * edge_size) +
+ (x * edge_size);
+ stop_index = start_index + small_cube_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= large_cube_size);
+
+ mis_match = FALSE;
+ ptr_1 = large_cube_buf_1;
+ expected_value = 0;
+ for(s = 0; s < start_index; s++) {
+ if(*ptr_1 != 0)
+ mis_match = TRUE;
+ ptr_1++;
+ } /* end for */
+ for(; s <= stop_index; s++) {
+ if(*ptr_1 != expected_value)
+ mis_match = TRUE;
+ expected_value++;
+ ptr_1++;
+ } /* end for */
+ for(; s < large_cube_size; s++) {
+ if(*ptr_1 != 0)
+ mis_match = TRUE;
+ ptr_1++;
+ } /* end for */
+ if(mis_match)
+ TestErrPrintf("large cube written from small cube has bad data! Line=%d\n", __LINE__);
+
+ x++;
+ } while((large_rank >= 2) && (small_rank <= 1) && (x < edge_size));
+ w++;
+ } while((large_rank >= 3) && (small_rank <= 2) && (w < edge_size));
+ v++;
+ } while((large_rank >= 4) && (small_rank <= 3) && (v < edge_size));
+ u++;
+ } while((large_rank >= 5) && (small_rank <= 4) && (u < edge_size));
+
+ /* Close memory dataspaces */
+ ret = H5Sclose(small_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(mem_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ /* Close disk dataspace */
+ ret = H5Sclose(file_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ /* Close Datasets */
+ ret = H5Dclose(small_cube_dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Dclose(large_cube_dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(small_cube_buf_1);
+ HDfree(large_cube_buf_1);
+
+} /* test_select_hyper_contig_dr__run_test() */
+
+
+/****************************************************************
+**
+** test_select_hyper_contig_dr(): Test H5S (dataspace)
+** selection code with contiguous source and target having
+** different ranks but the same shape. We have already
+** tested H5S_shape_same in isolation, so now we try to do
+** I/O.
+**
+****************************************************************/
+static void
+test_select_hyper_contig_dr(hid_t dset_type, hid_t xfer_plist)
+{
+ int test_num = 0;
+ unsigned chunk_edge_size; /* Size of chunk's dataspace dimensions */
+ unsigned edge_size = 6; /* Size of dataset's dataspace dimensions */
+ unsigned small_rank; /* Current rank of small dataset */
+ unsigned large_rank; /* Current rank of large dataset */
+ uint16_t *cube_buf; /* Buffer for writing cube data */
+ uint16_t *zero_buf; /* Buffer for writing zeroed cube data */
+ uint16_t *cube_ptr; /* Temporary pointer into cube data */
+ unsigned max_rank = 5; /* Max. rank to use */
+ size_t max_cube_size; /* Max. number of elements in largest cube */
+ size_t s; /* Local index variable */
+ unsigned u; /* Local index variable */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Contiguous Hyperslabs With Different Rank I/O Functionality\n"));
+
+ /* Compute max. cube size */
+ max_cube_size = (size_t)1;
+ for(u = 0; u < max_rank; u++)
+ max_cube_size *= (size_t)edge_size;
+
+ /* Allocate cube buffer for writing values */
+ cube_buf = (uint16_t *)HDmalloc(sizeof(uint16_t) * max_cube_size);
+ CHECK(cube_buf, NULL, "HDmalloc");
+
+ /* Initialize the cube buffer */
+ cube_ptr = cube_buf;
+ for(s = 0; s < max_cube_size; s++)
+ *cube_ptr++ = (uint16_t)s;
+
+ /* Allocate cube buffer for zeroing values on disk */
+ zero_buf = (uint16_t *)HDcalloc(sizeof(uint16_t), max_cube_size);
+ CHECK(zero_buf, NULL, "HDcalloc");
+
+ for(large_rank = 1; large_rank <= max_rank; large_rank++) {
+ for(small_rank = 1; small_rank < large_rank; small_rank++) {
+ chunk_edge_size = 0;
+ test_select_hyper_contig_dr__run_test(test_num, cube_buf, zero_buf,
+ edge_size, chunk_edge_size, small_rank, large_rank,
+ dset_type, xfer_plist);
+ test_num++;
+
+ chunk_edge_size = 3;
+ test_select_hyper_contig_dr__run_test(test_num, cube_buf, zero_buf,
+ edge_size, chunk_edge_size, small_rank, large_rank,
+ dset_type, xfer_plist);
+ test_num++;
+ } /* for loop on small rank */
+ } /* for loop on large rank */
+
+ HDfree(cube_buf);
+ HDfree(zero_buf);
+
+} /* test_select_hyper_contig_dr() */
+
+
+/****************************************************************
+**
+** test_select_hyper_checker_board_dr__select_checker_board():
+** Given an n-cube data space with each edge of length
+** edge_size, and a checker_edge_size either select a checker
+** board selection of the entire cube(if sel_rank == n),
+** or select a checker board selection of a
+** sel_rank dimensional slice through n-cube parallel to the
+** sel_rank fastest changing indices, with origin (in the
+** higher indices) as indicated by the start array.
+**
+** Note that this function, like all its relatives, is
+** hard coded to presume a maximum n-cube rank of 5.
+** While this maximum is declared as a constant, increasing
+** it will require extensive coding in addition to changing
+** the value of the constant.
+**
+** JRM -- 9/9/09
+**
+****************************************************************/
+static void
+test_select_hyper_checker_board_dr__select_checker_board(hid_t tgt_n_cube_sid,
+ unsigned tgt_n_cube_rank, unsigned edge_size, unsigned checker_edge_size,
+ unsigned sel_rank, hsize_t sel_start[])
+{
+ hbool_t first_selection = TRUE;
+ unsigned n_cube_offset;
+ unsigned sel_offset;
+ hsize_t base_count;
+ hsize_t offset_count;
+ hsize_t start[SS_DR_MAX_RANK]; /* Offset of hyperslab selection */
+ hsize_t stride[SS_DR_MAX_RANK]; /* Stride of hyperslab selection */
+ hsize_t count[SS_DR_MAX_RANK]; /* Count of hyperslab selection */
+ hsize_t block[SS_DR_MAX_RANK]; /* Block size of hyperslab selection */
+ unsigned i, j, k, l, m; /* Local index variable */
+ unsigned u; /* Local index variables */
+ herr_t ret; /* Generic return value */
+
+ HDassert(edge_size >= 6);
+ HDassert(0 < checker_edge_size);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(0 < sel_rank);
+ HDassert(sel_rank <= tgt_n_cube_rank);
+ HDassert(tgt_n_cube_rank <= SS_DR_MAX_RANK);
+
+ sel_offset = SS_DR_MAX_RANK - sel_rank;
+ n_cube_offset = SS_DR_MAX_RANK - tgt_n_cube_rank;
+ HDassert(n_cube_offset <= sel_offset);
+
+ /* First, compute the base count (which assumes start == 0
+ * for the associated offset) and offset_count (which
+ * assumes start == checker_edge_size for the associated
+ * offset).
+ */
+ base_count = edge_size / (checker_edge_size * 2);
+ if((edge_size % (checker_edge_size * 2)) > 0)
+ base_count++;
+
+ offset_count = (edge_size - checker_edge_size) / (checker_edge_size * 2);
+ if(((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0)
+ offset_count++;
+
+ /* Now set up the stride and block arrays, and portions of the start
+ * and count arrays that will not be altered during the selection of
+ * the checker board.
+ */
+ u = 0;
+ while(u < n_cube_offset) {
+ /* these values should never be used */
+ start[u] = 0;
+ stride[u] = 0;
+ count[u] = 0;
+ block[u] = 0;
+
+ u++;
+ } /* end while */
+
+ while(u < sel_offset) {
+ start[u] = sel_start[u];
+ stride[u] = 2 * edge_size;
+ count[u] = 1;
+ block[u] = 1;
+
+ u++;
+ } /* end while */
+
+ while(u < SS_DR_MAX_RANK) {
+ stride[u] = 2 * checker_edge_size;
+ block[u] = checker_edge_size;
+
+ u++;
+ } /* end while */
+
+ i = 0;
+ do {
+ if(0 >= sel_offset) {
+ if(i == 0) {
+ start[0] = 0;
+ count[0] = base_count;
+ } /* end if */
+ else {
+ start[0] = checker_edge_size;
+ count[0] = offset_count;
+ } /* end else */
+ } /* end if */
+
+ j = 0;
+ do {
+ if(1 >= sel_offset) {
+ if(j == 0 ) {
+ start[1] = 0;
+ count[1] = base_count;
+ } /* end if */
+ else {
+ start[1] = checker_edge_size;
+ count[1] = offset_count;
+ } /* end else */
+ } /* end if */
+
+ k = 0;
+ do {
+ if(2 >= sel_offset) {
+ if(k == 0) {
+ start[2] = 0;
+ count[2] = base_count;
+ } /* end if */
+ else {
+ start[2] = checker_edge_size;
+ count[2] = offset_count;
+ } /* end else */
+ } /* end if */
+
+ l = 0;
+ do {
+ if(3 >= sel_offset) {
+ if(l == 0) {
+ start[3] = 0;
+ count[3] = base_count;
+ } /* end if */
+ else {
+ start[3] = checker_edge_size;
+ count[3] = offset_count;
+ } /* end else */
+ } /* end if */
+
+ m = 0;
+ do {
+ if(4 >= sel_offset) {
+ if(m == 0) {
+ start[4] = 0;
+ count[4] = base_count;
+ } /* end if */
+ else {
+ start[4] = checker_edge_size;
+ count[4] = offset_count;
+ } /* end else */
+ } /* end if */
+
+ if(((i + j + k + l + m) % 2) == 0) {
+ if(first_selection) {
+ first_selection = FALSE;
+
+ ret = H5Sselect_hyperslab(tgt_n_cube_sid,
+ H5S_SELECT_SET,
+ &(start[n_cube_offset]),
+ &(stride[n_cube_offset]),
+ &(count[n_cube_offset]),
+ &(block[n_cube_offset]));
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end if */
+ else {
+ ret = H5Sselect_hyperslab(tgt_n_cube_sid,
+ H5S_SELECT_OR,
+ &(start[n_cube_offset]),
+ &(stride[n_cube_offset]),
+ &(count[n_cube_offset]),
+ &(block[n_cube_offset]));
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end else */
+ } /* end if */
+
+ m++;
+ } while((m <= 1) && (4 >= sel_offset));
+ l++;
+ } while((l <= 1) && (3 >= sel_offset));
+ k++;
+ } while((k <= 1) && (2 >= sel_offset));
+ j++;
+ } while((j <= 1) && (1 >= sel_offset));
+ i++;
+ } while((i <= 1) && (0 >= sel_offset));
+
+ /* Wierdness alert:
+ *
+ * Some how, it seems that selections can extend beyond the
+ * boundaries of the target data space -- hence the following
+ * code to manually clip the selection back to the data space
+ * proper.
+ */
+ for(u = 0; u < SS_DR_MAX_RANK; u++) {
+ start[u] = 0;
+ stride[u] = edge_size;
+ count[u] = 1;
+ block[u] = edge_size;
+ } /* end for */
+
+ ret = H5Sselect_hyperslab(tgt_n_cube_sid, H5S_SELECT_AND, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+} /* test_select_hyper_checker_board_dr__select_checker_board() */
+
+
+/****************************************************************
+**
+** test_select_hyper_checker_board_dr__verify_data():
+**
+** Examine the supplied buffer to see if it contains the
+** expected data. Return TRUE if it does, and FALSE
+** otherwise.
+**
+** The supplied buffer is presumed to contain the results
+** of read or writing a checkerboard selection of an
+** n-cube, or a checkerboard selection of an m (1 <= m < n)
+** dimensional slice through an n-cube parallel to the
+** fastest changing indices.
+**
+** It is further presumed that the buffer was zeroed before
+** the read, and that the n-cube was initialize with the
+** natural numbers listed in order from the origin along
+** the fastest changing axis.
+**
+** Thus for a 10x10x10 3-cube, the value stored in location
+** (x, y, z) (assuming that z is the fastest changing index
+** and x the slowest) is assumed to be:
+**
+** (10 * 10 * x) + (10 * y) + z
+**
+** Thus, if the buffer contains the result of reading a
+** checker board selection of a 10x10x10 3-cube, location
+** (x, y, z) will contain zero if it is not in a checker,
+** and 100x + 10y + z if (x, y, z) is in a checker.
+**
+** If the buffer contains the result of reading a 3
+** dimensional slice (parallel to the three fastest changing
+** indices) through an n cube (n > 3), then the expected
+** values in the buffer will be the same, save that we will
+** add a constant determined by the origin of the 3-cube
+** in the n-cube.
+**
+** Finally, the function presumes that the first element
+** of the buffer resides either at the origin of either
+** a selected or an unselected checker.
+**
+****************************************************************/
+static hbool_t
+test_select_hyper_checker_board_dr__verify_data(uint16_t * buf_ptr,
+ unsigned rank, unsigned edge_size, unsigned checker_edge_size,
+ uint16_t first_expected_val, hbool_t buf_starts_in_checker)
+{
+ hbool_t good_data = TRUE;
+ hbool_t in_checker;
+ hbool_t start_in_checker[5];
+ uint16_t expected_value;
+ uint16_t * val_ptr;
+ unsigned i, j, k, l, m; /* to track position in n-cube */
+ unsigned v, w, x, y, z; /* to track position in checker */
+ const unsigned test_max_rank = 5; /* code changes needed if this is increased */
+
+ HDassert(buf_ptr != NULL);
+ HDassert(0 < rank);
+ HDassert(rank <= test_max_rank);
+ HDassert(edge_size >= 6);
+ HDassert(0 < checker_edge_size);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(test_max_rank <= SS_DR_MAX_RANK);
+
+ val_ptr = buf_ptr;
+ expected_value = first_expected_val;
+
+ i = 0;
+ v = 0;
+ start_in_checker[0] = buf_starts_in_checker;
+ do {
+ if(v >= checker_edge_size) {
+ start_in_checker[0] = !start_in_checker[0];
+ v = 0;
+ } /* end if */
+
+ j = 0;
+ w = 0;
+ start_in_checker[1] = start_in_checker[0];
+ do {
+ if(w >= checker_edge_size) {
+ start_in_checker[1] = !start_in_checker[1];
+ w = 0;
+ } /* end if */
+
+ k = 0;
+ x = 0;
+ start_in_checker[2] = start_in_checker[1];
+ do {
+ if(x >= checker_edge_size) {
+ start_in_checker[2] = !start_in_checker[2];
+ x = 0;
+ } /* end if */
+
+ l = 0;
+ y = 0;
+ start_in_checker[3] = start_in_checker[2];
+ do {
+ if(y >= checker_edge_size) {
+ start_in_checker[3] = ! start_in_checker[3];
+ y = 0;
+ } /* end if */
+
+ m = 0;
+ z = 0;
+ in_checker = start_in_checker[3];
+ do {
+ if(z >= checker_edge_size) {
+ in_checker = ! in_checker;
+ z = 0;
+ } /* end if */
+
+ if(in_checker) {
+ if(*val_ptr != expected_value)
+ good_data = FALSE;
+ } /* end if */
+ else {
+ if(*val_ptr != 0)
+ good_data = FALSE;
+ } /* end else */
+
+ val_ptr++;
+ expected_value++;
+
+ m++;
+ z++;
+ } while((rank >= (test_max_rank - 4)) && (m < edge_size));
+ l++;
+ y++;
+ } while((rank >= (test_max_rank - 3)) && (l < edge_size));
+ k++;
+ x++;
+ } while((rank >= (test_max_rank - 2)) && (k < edge_size));
+ j++;
+ w++;
+ } while((rank >= (test_max_rank - 1)) && (j < edge_size));
+ i++;
+ v++;
+ } while((rank >= test_max_rank) && (i < edge_size));
+
+ return(good_data);
+} /* test_select_hyper_checker_board_dr__verify_data() */
+
+
+/****************************************************************
+**
+** test_select_hyper_checker_board_dr__run_test(): Test H5S
+** (dataspace) selection code with checker board source and
+** target selections having different ranks but the same
+** shape. We have already tested H5S_shape_same in
+** isolation, so now we try to do I/O.
+**
+****************************************************************/
+static void
+test_select_hyper_checker_board_dr__run_test(int test_num, const uint16_t *cube_buf,
+ const uint16_t *zero_buf, unsigned edge_size, unsigned checker_edge_size,
+ unsigned chunk_edge_size, unsigned small_rank, unsigned large_rank,
+ hid_t dset_type, hid_t xfer_plist)
+{
+ hbool_t data_ok;
+ hbool_t start_in_checker[5];
+ hid_t fapl; /* File access property list */
+ hid_t fid; /* HDF5 File IDs */
+ hid_t full_small_cube_sid; /* Dataspace for small cube w/all selection */
+ hid_t mem_small_cube_sid;
+ hid_t file_small_cube_sid;
+ hid_t full_large_cube_sid; /* Dataspace for large cube w/all selection */
+ hid_t mem_large_cube_sid;
+ hid_t file_large_cube_sid;
+ hid_t small_cube_dcpl_id = H5P_DEFAULT; /* DCPL for small cube dataset */
+ hid_t large_cube_dcpl_id = H5P_DEFAULT; /* DCPL for large cube dataset */
+ hid_t small_cube_dataset; /* Dataset ID */
+ hid_t large_cube_dataset; /* Dataset ID */
+ unsigned small_rank_offset; /* Rank offset of slice */
+ const unsigned test_max_rank = 5; /* must update code if this changes */
+ size_t start_index; /* Offset within buffer to begin inspecting */
+ size_t stop_index; /* Offset within buffer to end inspecting */
+ uint16_t expected_value;
+ uint16_t * small_cube_buf_1;
+ uint16_t * large_cube_buf_1;
+ uint16_t * ptr_1;
+ size_t small_cube_size; /* Number of elements in small cube */
+ size_t large_cube_size; /* Number of elements in large cube */
+ hsize_t dims[SS_DR_MAX_RANK];
+ hsize_t chunk_dims[SS_DR_MAX_RANK];
+ hsize_t sel_start[SS_DR_MAX_RANK];
+ unsigned u, v, w, x; /* Local index variables */
+ size_t s; /* Local index variable */
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(7, ("\tn-cube slice through m-cube I/O test %d.\n", test_num));
+ MESSAGE(7, ("\tranks = %d/%d, edge_size = %d, checker_edge_size = %d, chunk_edge_size = %d.\n", small_rank, large_rank, edge_size, checker_edge_size, chunk_edge_size));
+
+ HDassert(edge_size >= 6);
+ HDassert(checker_edge_size > 0);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(edge_size >= chunk_edge_size);
+ HDassert((chunk_edge_size == 0) || (chunk_edge_size >= 3));
+ HDassert(small_rank > 0);
+ HDassert(small_rank < large_rank);
+ HDassert(large_rank <= test_max_rank);
+ HDassert(test_max_rank <= SS_DR_MAX_RANK);
+
+ /* Compute cube sizes */
+ small_cube_size = large_cube_size = (size_t)1;
+ for(u = 0; u < large_rank; u++) {
+ if(u < small_rank)
+ small_cube_size *= (size_t)edge_size;
+
+ large_cube_size *= (size_t)edge_size;
+ } /* end for */
+ HDassert(large_cube_size < (size_t)(UINT_MAX));
+
+ small_rank_offset = test_max_rank - small_rank;
+ HDassert(small_rank_offset >= 1);
+
+ /* also, at present, we use 16 bit values in this test --
+ * hence the following assertion. Delete it if we convert
+ * to 32 bit values.
+ */
+ HDassert(large_cube_size < (size_t)(UINT16_MAX));
+
+
+ /* Allocate & initialize buffers */
+ small_cube_buf_1 = (uint16_t *)HDcalloc(sizeof(uint16_t), small_cube_size);
+ CHECK(small_cube_buf_1, NULL, "HDcalloc");
+ large_cube_buf_1 = (uint16_t *)HDcalloc(sizeof(uint16_t), large_cube_size);
+ CHECK(large_cube_buf_1, NULL, "HDcalloc");
+
+
+ /* Create a dataset transfer property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Use the 'core' VFD for this test */
+ ret = H5Pset_fapl_core(fapl, (size_t)(1024 * 1024), FALSE);
+ CHECK(ret, FAIL, "H5Pset_fapl_core");
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close file access property list */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+
+ /* setup dims: */
+ dims[0] = dims[1] = dims[2] = dims[3] = dims[4] = edge_size;
+
+
+ /* Create small cube dataspaces */
+ full_small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL);
+ CHECK(full_small_cube_sid, FAIL, "H5Screate_simple");
+
+ mem_small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL);
+ CHECK(mem_small_cube_sid, FAIL, "H5Screate_simple");
+
+ file_small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL);
+ CHECK(file_small_cube_sid, FAIL, "H5Screate_simple");
+
+
+ /* Create large cube dataspace */
+ full_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL);
+ CHECK(full_large_cube_sid, FAIL, "H5Screate_simple");
+
+ mem_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL);
+ CHECK(mem_large_cube_sid, FAIL, "H5Screate_simple");
+
+ file_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL);
+ CHECK(file_large_cube_sid, FAIL, "H5Screate_simple");
+
+
+ /* if chunk edge size is greater than zero, set up the small and
+ * large data set creation property lists to specify chunked
+ * datasets.
+ */
+ if(chunk_edge_size > 0) {
+ chunk_dims[0] = chunk_dims[1] =
+ chunk_dims[2] = chunk_dims[3] = chunk_dims[4] = chunk_edge_size;
+
+ small_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(small_cube_dcpl_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_layout(small_cube_dcpl_id, H5D_CHUNKED);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ ret = H5Pset_chunk(small_cube_dcpl_id, (int)small_rank, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+
+ large_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(large_cube_dcpl_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_layout(large_cube_dcpl_id, H5D_CHUNKED);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ ret = H5Pset_chunk(large_cube_dcpl_id, (int)large_rank, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+ } /* end if */
+
+
+ /* create the small cube dataset */
+ small_cube_dataset = H5Dcreate2(fid, "small_cube_dataset", dset_type,
+ file_small_cube_sid, H5P_DEFAULT, small_cube_dcpl_id, H5P_DEFAULT);
+ CHECK(small_cube_dataset, FAIL, "H5Dcreate2");
+
+ /* Close non-default small dataset DCPL */
+ if(small_cube_dcpl_id != H5P_DEFAULT) {
+ ret = H5Pclose(small_cube_dcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ } /* end if */
+
+ /* create the large cube dataset */
+ large_cube_dataset = H5Dcreate2(fid, "large_cube_dataset", dset_type,
+ file_large_cube_sid, H5P_DEFAULT, large_cube_dcpl_id, H5P_DEFAULT);
+ CHECK(large_cube_dataset, FAIL, "H5Dcreate2");
+
+ /* Close non-default large dataset DCPL */
+ if(large_cube_dcpl_id != H5P_DEFAULT) {
+ ret = H5Pclose(large_cube_dcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ } /* end if */
+
+
+ /* write initial data to the on disk datasets */
+ ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid,
+ full_small_cube_sid, xfer_plist, cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, full_large_cube_sid,
+ full_large_cube_sid, xfer_plist, cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+
+ /* read initial small cube data from disk and verify that it is as expected. */
+ ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid,
+ full_small_cube_sid, xfer_plist, small_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check that the data is valid */
+ verify_select_hyper_contig_dr__run_test(small_cube_buf_1, small_cube_size,
+ edge_size, small_rank);
+
+ /* read initial large cube data from disk and verify that it is as expected. */
+ ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, full_large_cube_sid,
+ full_large_cube_sid, xfer_plist, large_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check that the data is valid */
+ verify_select_hyper_contig_dr__run_test(large_cube_buf_1, large_cube_size,
+ edge_size, large_rank);
+
+
+ /* first, verify that we can read from disk correctly using selections
+ * of different rank that H5S_select_shape_same() views as being of the
+ * same shape.
+ *
+ * Start by reading small_rank-D slice from the on disk large cube, and
+ * verifying that the data read is correct. Verify that H5S_select_shape_same()
+ * returns true on the memory and file selections.
+ *
+ * The first step is to set up the needed checker board selection in the
+ * in memory small small cube
+ */
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+
+ test_select_hyper_checker_board_dr__select_checker_board(mem_small_cube_sid,
+ small_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank,
+ sel_start);
+
+ /* now read slices from the large, on-disk cube into the small cube.
+ * Note how we adjust sel_start only in the dimensions peculiar to the
+ * large cube.
+ */
+
+ start_in_checker[0] = TRUE;
+ u = 0;
+ do {
+ if(small_rank_offset > 0)
+ sel_start[0] = u;
+
+ v = 0;
+ do {
+ if(small_rank_offset > 1)
+ sel_start[1] = v;
+
+ w = 0;
+ do {
+ if(small_rank_offset > 2)
+ sel_start[2] = w;
+
+ x = 0;
+ do {
+ if(small_rank_offset > 3)
+ sel_start[3] = x;
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ HDassert((sel_start[0] == 0) || (0 < small_rank_offset));
+ HDassert((sel_start[1] == 0) || (1 < small_rank_offset));
+ HDassert((sel_start[2] == 0) || (2 < small_rank_offset));
+ HDassert((sel_start[3] == 0) || (3 < small_rank_offset));
+ HDassert((sel_start[4] == 0) || (4 < small_rank_offset));
+
+ test_select_hyper_checker_board_dr__select_checker_board
+ (
+ file_large_cube_sid,
+ large_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank,
+ sel_start
+ );
+
+ /* verify that H5S_select_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(mem_small_cube_sid,
+ file_large_cube_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+ /* zero the buffer that we will be using for reading */
+ HDmemset(small_cube_buf_1, 0, sizeof(*small_cube_buf_1) * small_cube_size);
+
+ /* Read selection from disk */
+ ret = H5Dread(large_cube_dataset,
+ H5T_NATIVE_UINT16,
+ mem_small_cube_sid,
+ file_large_cube_sid,
+ xfer_plist,
+ small_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ expected_value = (uint16_t)
+ ((u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) +
+ (w * edge_size * edge_size) +
+ (x * edge_size));
+
+ data_ok = test_select_hyper_checker_board_dr__verify_data
+ (
+ small_cube_buf_1,
+ small_rank,
+ edge_size,
+ checker_edge_size,
+ expected_value,
+ (hbool_t)TRUE
+ );
+ if(!data_ok)
+ TestErrPrintf("small cube read from largecube has bad data! Line=%d\n",__LINE__);
+
+ x++;
+ } while((large_rank >= (test_max_rank - 3)) &&
+ (small_rank <= (test_max_rank - 4)) && (x < edge_size));
+ w++;
+ } while((large_rank >= (test_max_rank - 2)) &&
+ (small_rank <= (test_max_rank - 3)) && (w < edge_size));
+ v++;
+ } while((large_rank >= (test_max_rank - 1)) &&
+ (small_rank <= (test_max_rank - 2)) && (v < edge_size));
+ u++;
+ } while((large_rank >= test_max_rank) &&
+ (small_rank <= (test_max_rank - 1)) && (u < edge_size));
+
+
+ /* similarly, read the on disk small cube into slices through the in memory
+ * large cube, and verify that the correct data (and only the correct data)
+ * is read.
+ */
+
+ /* select a checker board in the file small cube dataspace */
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ test_select_hyper_checker_board_dr__select_checker_board(file_small_cube_sid,
+ small_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank,
+ sel_start);
+
+
+ start_in_checker[0] = TRUE;
+ u = 0;
+ do {
+ if(0 < small_rank_offset)
+ sel_start[0] = u;
+
+ v = 0;
+ do {
+ if(1 < small_rank_offset)
+ sel_start[1] = v;
+
+ w = 0;
+ do {
+ if(2 < small_rank_offset)
+ sel_start[2] = w;
+
+ x = 0;
+ do {
+ if(3 < small_rank_offset)
+ sel_start[3] = x;
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ HDassert((sel_start[0] == 0) || (0 < small_rank_offset));
+ HDassert((sel_start[1] == 0) || (1 < small_rank_offset));
+ HDassert((sel_start[2] == 0) || (2 < small_rank_offset));
+ HDassert((sel_start[3] == 0) || (3 < small_rank_offset));
+ HDassert((sel_start[4] == 0) || (4 < small_rank_offset));
+
+ test_select_hyper_checker_board_dr__select_checker_board
+ (
+ mem_large_cube_sid,
+ large_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank,
+ sel_start
+ );
+
+ /* verify that H5S_select_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(file_small_cube_sid,
+ mem_large_cube_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+
+ /* zero out the in memory large cube */
+ HDmemset(large_cube_buf_1, 0, sizeof(*large_cube_buf_1) * large_cube_size);
+
+ /* Read selection from disk */
+ ret = H5Dread(small_cube_dataset,
+ H5T_NATIVE_UINT16,
+ mem_large_cube_sid,
+ file_small_cube_sid,
+ xfer_plist,
+ large_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ data_ok = TRUE;
+ ptr_1 = large_cube_buf_1;
+ expected_value = 0;
+ start_index = (u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) +
+ (w * edge_size * edge_size) +
+ (x * edge_size);
+ stop_index = start_index + small_cube_size - 1;
+
+ HDassert( start_index < stop_index );
+ HDassert( stop_index <= large_cube_size );
+
+ /* verify that the large cube contains only zeros before the slice */
+ for(s = 0; s < start_index; s++) {
+ if(*ptr_1 != 0)
+ data_ok = FALSE;
+ ptr_1++;
+ } /* end for */
+ HDassert(s == start_index);
+
+ data_ok &= test_select_hyper_checker_board_dr__verify_data
+ (
+ ptr_1,
+ small_rank,
+ edge_size,
+ checker_edge_size,
+ (uint16_t)0,
+ (hbool_t)TRUE
+ );
+
+ ptr_1 += small_cube_size;
+ s += small_cube_size;
+
+ HDassert(s == stop_index + 1);
+
+ /* verify that the large cube contains only zeros after the slice */
+ for(s = stop_index + 1; s < large_cube_size; s++) {
+ if(*ptr_1 != 0)
+ data_ok = FALSE;
+ ptr_1++;
+ } /* end for */
+ if(!data_ok)
+ TestErrPrintf("large cube read from small cube has bad data! Line=%d\n",__LINE__);
+
+ x++;
+ } while((large_rank >= (test_max_rank - 3)) &&
+ (small_rank <= (test_max_rank - 4)) && (x < edge_size));
+ w++;
+ } while((large_rank >= (test_max_rank - 2)) &&
+ (small_rank <= (test_max_rank - 3)) && (w < edge_size));
+ v++;
+ } while((large_rank >= (test_max_rank - 1)) &&
+ (small_rank <= (test_max_rank - 2)) && (v < edge_size));
+ u++;
+ } while((large_rank >= test_max_rank) &&
+ (small_rank <= (test_max_rank - 1)) && (u < edge_size));
+
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5S_select_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank D slices from the in memory large cube, to
+ * the the on disk small cube dataset. After each write, read the small
+ * cube dataset back from disk, and verify that it contains the expected
+ * data. Verify that H5S_select_shape_same() returns true on the
+ * memory and file selections.
+ */
+
+ /* select a checker board in the file small cube dataspace */
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ test_select_hyper_checker_board_dr__select_checker_board(file_small_cube_sid,
+ small_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank,
+ sel_start);
+
+ start_in_checker[0] = TRUE;
+ u = 0;
+ do {
+ if(small_rank_offset > 0)
+ sel_start[0] = u;
+
+ v = 0;
+ do {
+ if(small_rank_offset > 1)
+ sel_start[1] = v;
+
+ w = 0;
+ do {
+ if(small_rank_offset > 2)
+ sel_start[2] = w;
+
+ x = 0;
+ do {
+ if(small_rank_offset > 3)
+ sel_start[3] = x;
+
+ /* zero out the on disk small cube */
+ ret = H5Dwrite(small_cube_dataset,
+ H5T_NATIVE_UINT16,
+ full_small_cube_sid,
+ full_small_cube_sid,
+ xfer_plist,
+ zero_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ HDassert((sel_start[0] == 0) || (0 < small_rank_offset));
+ HDassert((sel_start[1] == 0) || (1 < small_rank_offset));
+ HDassert((sel_start[2] == 0) || (2 < small_rank_offset));
+ HDassert((sel_start[3] == 0) || (3 < small_rank_offset));
+ HDassert((sel_start[4] == 0) || (4 < small_rank_offset));
+
+ test_select_hyper_checker_board_dr__select_checker_board
+ (
+ mem_large_cube_sid,
+ large_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank,
+ sel_start
+ );
+
+ /* verify that H5S_select_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(file_small_cube_sid,
+ mem_large_cube_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+
+ /* write the slice from the in memory large cube to the
+ * on disk small cube
+ */
+ ret = H5Dwrite(small_cube_dataset,
+ H5T_NATIVE_UINT16,
+ mem_large_cube_sid,
+ file_small_cube_sid,
+ xfer_plist,
+ cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+
+ /* zero the buffer that we will be using for reading */
+ HDmemset(small_cube_buf_1, 0, sizeof(*small_cube_buf_1) * small_cube_size);
+
+ /* read the on disk small cube into memory */
+ ret = H5Dread(small_cube_dataset,
+ H5T_NATIVE_UINT16,
+ full_small_cube_sid,
+ full_small_cube_sid,
+ xfer_plist,
+ small_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ expected_value = (uint16_t)
+ ((u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) +
+ (w * edge_size * edge_size) +
+ (x * edge_size));
+
+ data_ok = test_select_hyper_checker_board_dr__verify_data
+ (
+ small_cube_buf_1,
+ small_rank,
+ edge_size,
+ checker_edge_size,
+ expected_value,
+ (hbool_t)TRUE
+ );
+ if(!data_ok)
+ TestErrPrintf("small cube read from largecube has bad data! Line=%d\n",__LINE__);
+
+ x++;
+ } while((large_rank >= (test_max_rank - 3)) &&
+ (small_rank <= (test_max_rank - 4)) && (x < edge_size));
+ w++;
+ } while((large_rank >= (test_max_rank - 2)) &&
+ (small_rank <= (test_max_rank - 3)) && (w < edge_size));
+ v++;
+ } while((large_rank >= (test_max_rank - 1)) &&
+ (small_rank <= (test_max_rank - 2)) && (v < edge_size));
+ u++;
+ } while((large_rank >= test_max_rank) &&
+ (small_rank <= (test_max_rank - 1)) && (u < edge_size));
+
+
+ /* Now write checker board selections of the entries in memory
+ * small cube to slices of the on disk cube. After each write,
+ * read the on disk large cube * into memeory, and verify that
+ * it contains the expected * data. Verify that
+ * H5S_select_shape_same() returns true on the memory and file
+ * selections.
+ */
+
+ /* select a checker board in the in memory small cube dataspace */
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ test_select_hyper_checker_board_dr__select_checker_board(mem_small_cube_sid,
+ small_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank,
+ sel_start);
+
+ start_in_checker[0] = TRUE;
+ u = 0;
+ do {
+ if(small_rank_offset > 0)
+ sel_start[0] = u;
+
+ v = 0;
+ do {
+ if(small_rank_offset > 1)
+ sel_start[1] = v;
+
+ w = 0;
+ do {
+ if(small_rank_offset > 2)
+ sel_start[2] = w;
+
+ x = 0;
+ do {
+ if(small_rank_offset > 3)
+ sel_start[3] = x;
+
+ /* zero out the on disk cube */
+ ret = H5Dwrite(large_cube_dataset,
+ H5T_NATIVE_USHORT,
+ full_large_cube_sid,
+ full_large_cube_sid,
+ xfer_plist,
+ zero_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ HDassert((sel_start[0] == 0) || (0 < small_rank_offset));
+ HDassert((sel_start[1] == 0) || (1 < small_rank_offset));
+ HDassert((sel_start[2] == 0) || (2 < small_rank_offset));
+ HDassert((sel_start[3] == 0) || (3 < small_rank_offset));
+ HDassert((sel_start[4] == 0) || (4 < small_rank_offset));
+
+
+ test_select_hyper_checker_board_dr__select_checker_board
+ (
+ file_large_cube_sid,
+ large_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank,
+ sel_start
+ );
+
+ /* verify that H5S_select_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(file_large_cube_sid,
+ mem_small_cube_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+
+ /* write the checker board selection of the in memory
+ * small cube to a slice through the on disk large
+ * cube.
+ */
+ ret = H5Dwrite(large_cube_dataset,
+ H5T_NATIVE_UINT16,
+ mem_small_cube_sid,
+ file_large_cube_sid,
+ xfer_plist,
+ cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+
+ /* zero out the in memory large cube */
+ HDmemset(large_cube_buf_1, 0, sizeof(*large_cube_buf_1) * large_cube_size);
+
+ /* read the on disk large cube into memory */
+ ret = H5Dread(large_cube_dataset,
+ H5T_NATIVE_UINT16,
+ full_large_cube_sid,
+ full_large_cube_sid,
+ xfer_plist,
+ large_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+
+ /* verify that the expected data and only the
+ * expected data was written to the on disk large
+ * cube.
+ */
+ data_ok = TRUE;
+ ptr_1 = large_cube_buf_1;
+ expected_value = 0;
+ start_index = (u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) +
+ (w * edge_size * edge_size) +
+ (x * edge_size);
+ stop_index = start_index + small_cube_size - 1;
+
+ HDassert( start_index < stop_index );
+ HDassert( stop_index <= large_cube_size );
+
+ /* verify that the large cube contains only zeros before the slice */
+ for(s = 0; s < start_index; s++) {
+ if(*ptr_1 != 0)
+ data_ok = FALSE;
+ ptr_1++;
+ } /* end for */
+ HDassert(s == start_index);
+
+ /* verify that the slice contains the expected data */
+ data_ok &= test_select_hyper_checker_board_dr__verify_data
+ (
+ ptr_1,
+ small_rank,
+ edge_size,
+ checker_edge_size,
+ (uint16_t)0,
+ (hbool_t)TRUE
+ );
+
+ ptr_1 += small_cube_size;
+ s += small_cube_size;
+
+ HDassert(s == stop_index + 1);
+
+ /* verify that the large cube contains only zeros after the slice */
+ for(s = stop_index + 1; s < large_cube_size; s++) {
+ if(*ptr_1 != 0)
+ data_ok = FALSE;
+ ptr_1++;
+ } /* end for */
+ if(!data_ok)
+ TestErrPrintf("large cube written from small cube has bad data! Line=%d\n",__LINE__);
+
+ x++;
+ } while((large_rank >= (test_max_rank - 3)) &&
+ (small_rank <= (test_max_rank - 4)) && (x < edge_size));
+ w++;
+ } while((large_rank >= (test_max_rank - 2)) &&
+ (small_rank <= (test_max_rank - 3)) && (w < edge_size));
+ v++;
+ } while((large_rank >= (test_max_rank - 1)) &&
+ (small_rank <= (test_max_rank - 2)) && (v < edge_size));
+ u++;
+ } while((large_rank >= test_max_rank) &&
+ (small_rank <= (test_max_rank - 1)) && (u < edge_size));
+
+
+ /* Close memory dataspaces */
+ ret = H5Sclose(full_small_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(full_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(mem_small_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(mem_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ /* Close disk dataspace */
+ ret = H5Sclose(file_small_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(file_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Datasets */
+ ret = H5Dclose(small_cube_dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Dclose(large_cube_dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(small_cube_buf_1);
+ HDfree(large_cube_buf_1);
+
+} /* test_select_hyper_checker_board_dr__run_test() */
+
+
+/****************************************************************
+**
+** test_select_hyper_checker_board_dr(): Test H5S (dataspace)
+** selection code with checkerboard source and target having
+** different ranks but the same shape. We have already
+** tested H5S_shape_same in isolation, so now we try to do
+** I/O.
+**
+** This is just an initial smoke check, so we will work
+** with a slice through a cube only.
+**
+****************************************************************/
+static void
+test_select_hyper_checker_board_dr(hid_t dset_type, hid_t xfer_plist)
+{
+ uint16_t *cube_buf; /* Buffer for writing cube data */
+ uint16_t *cube_ptr; /* Temporary pointer into cube data */
+ uint16_t *zero_buf; /* Buffer for writing zeroed cube data */
+ int test_num = 0;
+ unsigned checker_edge_size = 2; /* Size of checkerboard dimension */
+ unsigned chunk_edge_size; /* Size of chunk's dataspace dimensions */
+ unsigned edge_size = 6; /* Size of dataset's dataspace dimensions */
+ unsigned small_rank; /* Current rank of small dataset */
+ unsigned large_rank; /* Current rank of large dataset */
+ unsigned max_rank = 5; /* Max. rank to use */
+ size_t max_cube_size; /* Max. number of elements in largest cube */
+ size_t s; /* Local index variable */
+ unsigned u; /* Local index variable */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Checker Board Hyperslabs With Different Rank I/O Functionality\n"));
+
+ /* Compute max. cube size */
+ max_cube_size = (size_t)1;
+ for(u = 0; u < max_rank; u++)
+ max_cube_size *= (size_t)(edge_size + 1);
+
+ /* Allocate cube buffer for writing values */
+ cube_buf = (uint16_t *)HDmalloc(sizeof(uint16_t) * max_cube_size);
+ CHECK(cube_buf, NULL, "HDmalloc");
+
+ /* Initialize the cube buffer */
+ cube_ptr = cube_buf;
+ for(s = 0; s < max_cube_size; s++)
+ *cube_ptr++ = (uint16_t)s;
+
+ /* Allocate cube buffer for zeroing values on disk */
+ zero_buf = (uint16_t *)HDcalloc(sizeof(uint16_t), max_cube_size);
+ CHECK(zero_buf, NULL, "HDcalloc");
+
+ for(large_rank = 1; large_rank <= max_rank; large_rank++) {
+ for(small_rank = 1; small_rank < large_rank; small_rank++) {
+ chunk_edge_size = 0;
+ test_select_hyper_checker_board_dr__run_test(test_num, cube_buf,
+ zero_buf, edge_size, checker_edge_size, chunk_edge_size, small_rank,
+ large_rank, dset_type, xfer_plist);
+ test_num++;
+
+ test_select_hyper_checker_board_dr__run_test(test_num, cube_buf,
+ zero_buf,
+ edge_size + 1, checker_edge_size, chunk_edge_size, small_rank,
+ large_rank, dset_type, xfer_plist);
+ test_num++;
+
+ chunk_edge_size = 3;
+ test_select_hyper_checker_board_dr__run_test(test_num, cube_buf,
+ zero_buf,
+ edge_size, checker_edge_size, chunk_edge_size, small_rank,
+ large_rank, dset_type, xfer_plist);
+ test_num++;
+
+ test_select_hyper_checker_board_dr__run_test(test_num, cube_buf,
+ zero_buf,
+ edge_size + 1, checker_edge_size, chunk_edge_size, small_rank,
+ large_rank, dset_type, xfer_plist);
+ test_num++;
+ } /* for loop on small rank */
+ } /* for loop on large rank */
+
+ HDfree(cube_buf);
+ HDfree(zero_buf);
+
+} /* test_select_hyper_checker_board_dr() */
+
+
/****************************************************************
**
** test_select_hyper_copy(): Test H5S (dataspace) selection code.
@@ -2345,15 +4373,15 @@ test_select_point_offset(void)
CHECK(ret, FAIL, "H5Sselect_elements");
/* Read selection from disk */
- ret=H5Dread(dataset,H5T_NATIVE_UCHAR,sid2,sid1,H5P_DEFAULT,rbuf);
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf);
CHECK(ret, FAIL, "H5Dread");
/* Compare data read with data written out */
- for(i=0; i<POINT1_NPOINTS; i++) {
- tbuf=wbuf+((coord2[i][0]+offset[0])*SPACE2_DIM2)+coord2[i][1]+offset[1];
- tbuf2=rbuf+(coord3[i][0]*SPACE3_DIM2)+coord3[i][1];
- if(*tbuf!=*tbuf2)
- TestErrPrintf("element values don't match!, i=%d\n",i);
+ for(i = 0; i < POINT1_NPOINTS; i++) {
+ tbuf = wbuf + ((coord2[i][0] + (hsize_t)offset[0]) * SPACE2_DIM2) + coord2[i][1] + (hsize_t)offset[1];
+ tbuf2 = rbuf + (coord3[i][0] * SPACE3_DIM2) + coord3[i][1];
+ if(*tbuf != *tbuf2)
+ TestErrPrintf("element values don't match!, i=%d\n", i);
} /* end for */
/* Close memory dataspace */
@@ -2410,7 +4438,7 @@ test_select_hyper_union(void)
*tbuf2; /* temporary buffer pointer */
int i,j; /* Counters */
herr_t ret; /* Generic return value */
- hsize_t npoints; /* Number of elements in selection */
+ hssize_t npoints; /* Number of elements in selection */
/* Output message about test being performed */
MESSAGE(5, ("Testing Hyperslab Selection Functions with unions of hyperslabs\n"));
@@ -2448,7 +4476,7 @@ test_select_hyper_union(void)
CHECK(ret, FAIL, "H5Sselect_hyperslab");
npoints = H5Sget_select_npoints(sid1);
- VERIFY(npoints, 2*15*13, "H5Sget_select_npoints");
+ VERIFY(npoints, 2 * 15 * 13, "H5Sget_select_npoints");
/* Select 8x26 hyperslab for memory dataset */
start[0]=15; start[1]=0;
@@ -2561,7 +4589,7 @@ test_select_hyper_union(void)
CHECK(ret, FAIL, "H5Sselect_hyperslab");
npoints = H5Sget_select_npoints(sid2);
- VERIFY(npoints, 15*26, "H5Sget_select_npoints");
+ VERIFY(npoints, 15 * 26, "H5Sget_select_npoints");
/* Create a dataset */
dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -2647,7 +4675,7 @@ test_select_hyper_union(void)
CHECK(ret, FAIL, "H5Sselect_hyperslab");
npoints = H5Sget_select_npoints(sid2);
- VERIFY(npoints, 15*26, "H5Sget_select_npoints");
+ VERIFY(npoints, 15 * 26, "H5Sget_select_npoints");
/* Create a dataset */
dataset = H5Dcreate2(fid1, SPACE3_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -2737,7 +4765,7 @@ test_select_hyper_union(void)
CHECK(ret, FAIL, "H5Sselect_hyperslab");
npoints = H5Sget_select_npoints(sid2);
- VERIFY(npoints, 15*26, "H5Sget_select_npoints");
+ VERIFY(npoints, 15 * 26, "H5Sget_select_npoints");
/* Create a dataset */
dataset = H5Dcreate2(fid1, SPACE4_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -2834,7 +4862,7 @@ test_select_hyper_union(void)
CHECK(ret, FAIL, "H5Sselect_hyperslab");
npoints = H5Sget_select_npoints(sid2);
- VERIFY(npoints, 15*26, "H5Sget_select_npoints");
+ VERIFY(npoints, 15 * 26, "H5Sget_select_npoints");
/* Create a dataset */
dataset = H5Dcreate2(fid1,SPACE5_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -3277,7 +5305,7 @@ test_select_hyper_and_2d(void)
*tbuf2; /* temporary buffer pointer */
int i,j; /* Counters */
herr_t ret; /* Generic return value */
- hsize_t npoints; /* Number of elements in selection */
+ hssize_t npoints; /* Number of elements in selection */
/* Output message about test being performed */
MESSAGE(5, ("Testing Hyperslab Selection Functions with intersection of 2-D hyperslabs\n"));
@@ -3322,7 +5350,7 @@ test_select_hyper_and_2d(void)
CHECK(ret, FAIL, "H5Sselect_hyperslab");
npoints = H5Sget_select_npoints(sid1);
- VERIFY(npoints, 5*5, "H5Sget_select_npoints");
+ VERIFY(npoints, 5 * 5, "H5Sget_select_npoints");
/* Select 25 hyperslab for memory dataset */
start[0]=0;
@@ -3333,7 +5361,7 @@ test_select_hyper_and_2d(void)
CHECK(ret, FAIL, "H5Sselect_hyperslab");
npoints = H5Sget_select_npoints(sid2);
- VERIFY(npoints, 5*5, "H5Sget_select_npoints");
+ VERIFY(npoints, 5 * 5, "H5Sget_select_npoints");
/* Create a dataset */
dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -3406,7 +5434,7 @@ test_select_hyper_xor_2d(void)
*tbuf2; /* temporary buffer pointer */
int i,j; /* Counters */
herr_t ret; /* Generic return value */
- hsize_t npoints; /* Number of elements in selection */
+ hssize_t npoints; /* Number of elements in selection */
/* Output message about test being performed */
MESSAGE(5, ("Testing Hyperslab Selection Functions with XOR of 2-D hyperslabs\n"));
@@ -3537,7 +5565,7 @@ test_select_hyper_notb_2d(void)
*tbuf2; /* temporary buffer pointer */
int i,j; /* Counters */
herr_t ret; /* Generic return value */
- hsize_t npoints; /* Number of elements in selection */
+ hssize_t npoints; /* Number of elements in selection */
/* Output message about test being performed */
MESSAGE(5, ("Testing Hyperslab Selection Functions with NOTB of 2-D hyperslabs\n"));
@@ -3667,7 +5695,7 @@ test_select_hyper_nota_2d(void)
*tbuf2; /* temporary buffer pointer */
int i,j; /* Counters */
herr_t ret; /* Generic return value */
- hsize_t npoints; /* Number of elements in selection */
+ hssize_t npoints; /* Number of elements in selection */
/* Output message about test being performed */
MESSAGE(5, ("Testing Hyperslab Selection Functions with NOTA of 2-D hyperslabs\n"));
@@ -3878,7 +5906,7 @@ test_select_hyper_union_random_5d(hid_t read_plist)
#else /* QAK */
seed=987909620;
#endif /* QAK */
- HDsrand(seed);
+ HDsrandom(seed);
#ifdef QAK
printf("test_num=%d, seed=%u\n",test_num,seed);
@@ -3893,26 +5921,26 @@ printf("hyperslab=%d\n",i);
#endif /* QAK */
/* Select random hyperslab location & size for selection */
for(j=0; j<SPACE5_RANK; j++) {
- start[j]=rand()%dims1[j];
- count[j]=(rand()%(dims1[j]-start[j]))+1;
+ start[j] = ((hsize_t)HDrandom() % dims1[j]);
+ count[j] = (((hsize_t)HDrandom() % (dims1[j] - start[j])) + 1);
#ifdef QAK
printf("start[%d]=%d, count[%d]=%d (end[%d]=%d)\n",j,(int)start[j],j,(int)count[j],j,(int)(start[j]+count[j]-1));
#endif /* QAK */
} /* end for */
/* Select hyperslab */
- ret = H5Sselect_hyperslab(sid1,(i==0 ? H5S_SELECT_SET : H5S_SELECT_OR),start,NULL,count,NULL);
+ ret = H5Sselect_hyperslab(sid1, (i == 0 ? H5S_SELECT_SET : H5S_SELECT_OR), start, NULL, count, NULL);
CHECK(ret, FAIL, "H5Sselect_hyperslab");
} /* end for */
/* Get the number of elements selected */
- npoints=H5Sget_select_npoints(sid1);
+ npoints = H5Sget_select_npoints(sid1);
CHECK(npoints, 0, "H5Sget_select_npoints");
/* Select linear 1-D hyperslab for memory dataset */
- start[0]=0;
- count[0]=npoints;
- ret = H5Sselect_hyperslab(sid2,H5S_SELECT_SET,start,NULL,count,NULL);
+ start[0] = 0;
+ count[0] = (hsize_t)npoints;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL);
CHECK(ret, FAIL, "H5Sselect_hyperslab");
npoints2 = H5Sget_select_npoints(sid2);
@@ -4973,24 +7001,24 @@ typedef struct {
static herr_t
test_select_hyper_iter3(void *_elem, hid_t UNUSED type_id, unsigned ndim, const hsize_t *point, void *_operator_data)
{
- unsigned short *tbuf=(unsigned short *)_elem; /* temporary buffer pointer */
- fill_iter_info *iter_info=(fill_iter_info *)_operator_data; /* Get the pointer to the iterator information */
+ unsigned *tbuf = (unsigned *)_elem; /* temporary buffer pointer */
+ fill_iter_info *iter_info = (fill_iter_info *)_operator_data; /* Get the pointer to the iterator information */
hsize_t *coord_ptr; /* Pointer to the coordinate information for a point*/
/* Check value in current buffer location */
- if(*tbuf!=iter_info->fill_value)
+ if(*tbuf != iter_info->fill_value)
return(-1);
else {
/* Check number of dimensions */
- if(ndim!=SPACE7_RANK)
+ if(ndim != SPACE7_RANK)
return(-1);
else {
/* Check Coordinates */
- coord_ptr=iter_info->coords+(2*iter_info->curr_coord);
+ coord_ptr = iter_info->coords + (2 * iter_info->curr_coord);
iter_info->curr_coord++;
- if(coord_ptr[0]!=point[0])
+ if(coord_ptr[0] != point[0])
return(-1);
- else if(coord_ptr[1]!=point[1])
+ else if(coord_ptr[1] != point[1])
return(-1);
else
return(0);
@@ -5009,25 +7037,25 @@ test_select_fill_all(void)
{
hid_t sid1; /* Dataspace ID */
hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2};
- int fill_value; /* Fill value */
+ unsigned fill_value; /* Fill value */
fill_iter_info iter_info; /* Iterator information structure */
hsize_t points[SPACE7_DIM1*SPACE7_DIM2][SPACE7_RANK]; /* Coordinates of selection */
- unsigned short *wbuf, /* buffer to write to disk */
+ unsigned *wbuf, /* buffer to write to disk */
*tbuf; /* temporary buffer pointer */
- int i,j; /* Counters */
+ unsigned u, v; /* Counters */
herr_t ret; /* Generic return value */
/* Output message about test being performed */
MESSAGE(5, ("Testing Filling 'all' Selections\n"));
/* Allocate memory buffer */
- wbuf = (unsigned short *)HDmalloc(sizeof(unsigned short)*SPACE7_DIM1*SPACE7_DIM2);
+ wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2);
CHECK(wbuf, NULL, "HDmalloc");
/* Initialize memory buffer */
- for(i=0, tbuf=wbuf; i<SPACE7_DIM1; i++)
- for(j=0; j<SPACE7_DIM2; j++)
- *tbuf++=(unsigned short)(i*SPACE7_DIM2)+j;
+ for(u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for(v = 0; v < SPACE7_DIM2; v++)
+ *tbuf++ = (u * SPACE7_DIM2) + v;
/* Create dataspace for dataset on disk */
sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL);
@@ -5036,32 +7064,32 @@ test_select_fill_all(void)
/* Space defaults to "all" selection */
/* Set fill value */
- fill_value=SPACE7_FILL;
+ fill_value = SPACE7_FILL;
/* Fill selection in memory */
- ret=H5Dfill(&fill_value,H5T_NATIVE_INT,wbuf,H5T_NATIVE_USHORT,sid1);
+ ret = H5Dfill(&fill_value, H5T_NATIVE_UINT, wbuf, H5T_NATIVE_UINT, sid1);
CHECK(ret, FAIL, "H5Dfill");
/* Verify memory buffer the hard way... */
- for(i=0, tbuf=wbuf; i<SPACE7_DIM1; i++)
- for(j=0; j<SPACE7_DIM2; j++)
- if(*tbuf!=(unsigned short)fill_value)
- TestErrPrintf("Error! j=%d, i=%d, *tbuf=%x, fill_value=%x\n",j,i,(unsigned)*tbuf,(unsigned)fill_value);
+ for(u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for(v = 0; v < SPACE7_DIM2; v++)
+ if(*tbuf != fill_value)
+ TestErrPrintf("Error! v=%d, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, fill_value);
/* Set the coordinates of the selection */
- for(i=0; i<SPACE7_DIM1; i++)
- for(j=0; j<SPACE7_DIM2; j++) {
- points[(i*SPACE7_DIM2)+j][0]=i;
- points[(i*SPACE7_DIM2)+j][1]=j;
+ for(u = 0; u < SPACE7_DIM1; u++)
+ for(v = 0; v < SPACE7_DIM2; v++) {
+ points[(u * SPACE7_DIM2) + v][0] = u;
+ points[(u * SPACE7_DIM2) + v][1] = v;
} /* end for */
/* Initialize the iterator structure */
- iter_info.fill_value=SPACE7_FILL;
- iter_info.curr_coord=0;
- iter_info.coords=(hsize_t *)points;
+ iter_info.fill_value = SPACE7_FILL;
+ iter_info.curr_coord = 0;
+ iter_info.coords = (hsize_t *)points;
/* Iterate through selection, verifying correct data */
- ret = H5Diterate(wbuf,H5T_NATIVE_USHORT,sid1,test_select_hyper_iter3,&iter_info);
+ ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info);
CHECK(ret, FAIL, "H5Diterate");
/* Close dataspace */
@@ -5085,78 +7113,78 @@ test_select_fill_point(hssize_t *offset)
hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2};
hssize_t real_offset[SPACE7_RANK]; /* Actual offset to use */
hsize_t points[5][SPACE7_RANK] = {{2,4}, {3,8}, {8,4}, {7,5}, {7,7}};
- size_t num_points=5; /* Number of points selected */
+ size_t num_points = 5; /* Number of points selected */
int fill_value; /* Fill value */
fill_iter_info iter_info; /* Iterator information structure */
- unsigned short *wbuf, /* buffer to write to disk */
+ unsigned *wbuf, /* buffer to write to disk */
*tbuf; /* temporary buffer pointer */
- int i,j,k; /* Counters */
+ unsigned u, v, w; /* Counters */
herr_t ret; /* Generic return value */
/* Output message about test being performed */
MESSAGE(5, ("Testing Filling 'point' Selections\n"));
/* Allocate memory buffer */
- wbuf = (unsigned short *)HDmalloc(sizeof(unsigned short)*SPACE7_DIM1*SPACE7_DIM2);
+ wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2);
CHECK(wbuf, NULL, "HDmalloc");
/* Initialize memory buffer */
- for(i=0, tbuf=wbuf; i<SPACE7_DIM1; i++)
- for(j=0; j<SPACE7_DIM2; j++)
- *tbuf++=(unsigned short)(i*SPACE7_DIM2)+j;
+ for(u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for(v = 0; v < SPACE7_DIM2; v++)
+ *tbuf++ = (unsigned short)(u * SPACE7_DIM2) + v;
/* Create dataspace for dataset on disk */
sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL);
CHECK(sid1, FAIL, "H5Screate_simple");
/* Select "point" selection */
- ret = H5Sselect_elements(sid1, H5S_SELECT_SET,num_points,(const hsize_t *)points);
+ ret = H5Sselect_elements(sid1, H5S_SELECT_SET, num_points, (const hsize_t *)points);
CHECK(ret, FAIL, "H5Sselect_elements");
- if(offset!=NULL) {
- HDmemcpy(real_offset,offset,SPACE7_RANK*sizeof(hssize_t));
+ if(offset != NULL) {
+ HDmemcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t));
/* Set offset, if provided */
- ret = H5Soffset_simple(sid1,real_offset);
+ ret = H5Soffset_simple(sid1, real_offset);
CHECK(ret, FAIL, "H5Soffset_simple");
} /* end if */
else
- HDmemset(real_offset,0,SPACE7_RANK*sizeof(hssize_t));
+ HDmemset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t));
/* Set fill value */
- fill_value=SPACE7_FILL;
+ fill_value = SPACE7_FILL;
/* Fill selection in memory */
- ret=H5Dfill(&fill_value,H5T_NATIVE_INT,wbuf,H5T_NATIVE_USHORT,sid1);
+ ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1);
CHECK(ret, FAIL, "H5Dfill");
/* Verify memory buffer the hard way... */
- for(i=0, tbuf=wbuf; i<SPACE7_DIM1; i++)
- for(j=0; j<SPACE7_DIM2; j++, tbuf++) {
- for(k=0; k<(int)num_points; k++) {
- if(i==(int)(points[k][0]+real_offset[0]) && j==(int)(points[k][1]+real_offset[1])) {
- if(*tbuf!=(unsigned short)fill_value)
- TestErrPrintf("Error! j=%d, i=%d, *tbuf=%u, fill_value=%u\n",j,i,(unsigned)*tbuf,(unsigned)fill_value);
+ for(u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for(v = 0; v < SPACE7_DIM2; v++, tbuf++) {
+ for(w = 0; w < (unsigned)num_points; w++) {
+ if(u == (unsigned)(points[w][0] + (hsize_t)real_offset[0]) && v == (unsigned)(points[w][1] + (hsize_t)real_offset[1])) {
+ if(*tbuf != (unsigned)fill_value)
+ TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, (unsigned)fill_value);
break;
} /* end if */
} /* end for */
- if(k==(int)num_points && *tbuf!=((unsigned short)(i*SPACE7_DIM2)+j))
- TestErrPrintf("Error! j=%d, i=%d, *tbuf=%u, should be: %u\n",j,i,(unsigned)*tbuf,(unsigned)((i*SPACE7_DIM2)+j));
+ if(w == (unsigned)num_points && *tbuf != ((u * SPACE7_DIM2) + v))
+ TestErrPrintf("Error! v=%d, u=%d, *tbuf=%u, should be: %u\n", v, u, *tbuf, ((u * SPACE7_DIM2) + v));
} /* end for */
/* Initialize the iterator structure */
- iter_info.fill_value=SPACE7_FILL;
- iter_info.curr_coord=0;
- iter_info.coords=(hsize_t *)points;
+ iter_info.fill_value = SPACE7_FILL;
+ iter_info.curr_coord = 0;
+ iter_info.coords = (hsize_t *)points;
/* Add in the offset */
- for(i=0; i<(int)num_points; i++) {
- points[i][0]+=real_offset[0];
- points[i][1]+=real_offset[1];
+ for(u = 0; u < (unsigned)num_points; u++) {
+ points[u][0] = (hsize_t)(points[u][0] + real_offset[0]);
+ points[u][1] = (hsize_t)(points[u][1] + real_offset[1]);
} /* end for */
/* Iterate through selection, verifying correct data */
- ret = H5Diterate(wbuf,H5T_NATIVE_USHORT,sid1,test_select_hyper_iter3,&iter_info);
+ ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info);
CHECK(ret, FAIL, "H5Diterate");
/* Close dataspace */
@@ -5185,78 +7213,78 @@ test_select_fill_hyper_simple(hssize_t *offset)
hsize_t points[16][SPACE7_RANK]; /* Coordinates selected */
int fill_value; /* Fill value */
fill_iter_info iter_info; /* Iterator information structure */
- unsigned short *wbuf, /* buffer to write to disk */
+ unsigned *wbuf, /* buffer to write to disk */
*tbuf; /* temporary buffer pointer */
- int i,j; /* Counters */
+ unsigned u, v; /* Counters */
herr_t ret; /* Generic return value */
/* Output message about test being performed */
MESSAGE(5, ("Testing Filling Simple 'hyperslab' Selections\n"));
/* Allocate memory buffer */
- wbuf = (unsigned short *)HDmalloc(sizeof(unsigned short)*SPACE7_DIM1*SPACE7_DIM2);
+ wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2);
CHECK(wbuf, NULL, "HDmalloc");
/* Initialize memory buffer */
- for(i=0, tbuf=wbuf; i<SPACE7_DIM1; i++)
- for(j=0; j<SPACE7_DIM2; j++)
- *tbuf++=(unsigned short)(i*SPACE7_DIM2)+j;
+ for(u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for(v = 0; v < SPACE7_DIM2; v++)
+ *tbuf++ = (unsigned short)(u * SPACE7_DIM2) + v;
/* Create dataspace for dataset on disk */
sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL);
CHECK(sid1, FAIL, "H5Screate_simple");
/* Select "hyperslab" selection */
- start[0]=3; start[1]=3;
- count[0]=4; count[1]=4;
- ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET,start,NULL,count,NULL);
+ start[0] = 3; start[1] = 3;
+ count[0] = 4; count[1] = 4;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL);
CHECK(ret, FAIL, "H5Sselect_hyperslab");
- if(offset!=NULL) {
- HDmemcpy(real_offset,offset,SPACE7_RANK*sizeof(hssize_t));
+ if(offset != NULL) {
+ HDmemcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t));
/* Set offset, if provided */
- ret = H5Soffset_simple(sid1,real_offset);
+ ret = H5Soffset_simple(sid1, real_offset);
CHECK(ret, FAIL, "H5Soffset_simple");
} /* end if */
else
- HDmemset(real_offset,0,SPACE7_RANK*sizeof(hssize_t));
+ HDmemset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t));
/* Set fill value */
- fill_value=SPACE7_FILL;
+ fill_value = SPACE7_FILL;
/* Fill selection in memory */
- ret=H5Dfill(&fill_value,H5T_NATIVE_INT,wbuf,H5T_NATIVE_USHORT,sid1);
+ ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1);
CHECK(ret, FAIL, "H5Dfill");
/* Verify memory buffer the hard way... */
- for(i=0, tbuf=wbuf; i<SPACE7_DIM1; i++)
- for(j=0; j<SPACE7_DIM2; j++, tbuf++) {
- if((i>=(int)(start[0]+real_offset[0]) && i<(int)(start[0]+count[0]+real_offset[0]))
- && (j>=(int)(start[1]+real_offset[1]) && j<(int)(start[1]+count[1]+real_offset[1]))) {
- if(*tbuf!=(unsigned short)fill_value)
- TestErrPrintf("Error! j=%d, i=%d, *tbuf=%u, fill_value=%u\n",j,i,(unsigned)*tbuf,(unsigned)fill_value);
+ for(u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for(v = 0; v < SPACE7_DIM2; v++, tbuf++) {
+ if((u >= (unsigned)(start[0] + real_offset[0]) && u < (unsigned)(start[0] + count[0] + real_offset[0]))
+ && (v >= (unsigned)(start[1] + real_offset[1]) && v < (unsigned)(start[1] + count[1] + real_offset[1]))) {
+ if(*tbuf != (unsigned)fill_value)
+ TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, (unsigned)fill_value);
} /* end if */
else {
- if(*tbuf!=((unsigned short)(i*SPACE7_DIM2)+j))
- TestErrPrintf("Error! j=%d, i=%d, *tbuf=%u, should be: %u\n",j,i,(unsigned)*tbuf,(unsigned)((i*SPACE7_DIM2)+j));
+ if(*tbuf != ((unsigned)(u * SPACE7_DIM2) + v))
+ TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, should be: %u\n", v, u, *tbuf, ((u * SPACE7_DIM2) + v));
} /* end else */
} /* end for */
/* Initialize the iterator structure */
- iter_info.fill_value=SPACE7_FILL;
- iter_info.curr_coord=0;
- iter_info.coords=(hsize_t *)points;
+ iter_info.fill_value = SPACE7_FILL;
+ iter_info.curr_coord = 0;
+ iter_info.coords = (hsize_t *)points;
/* Set the coordinates of the selection (with the offset) */
- for(i=0, num_points=0; i<(int)count[0]; i++)
- for(j=0; j<(int)count[1]; j++, num_points++) {
- points[num_points][0]=i+start[0]+real_offset[0];
- points[num_points][1]=j+start[1]+real_offset[1];
+ for(u = 0, num_points = 0; u < (unsigned)count[0]; u++)
+ for(v = 0; v < (unsigned)count[1]; v++, num_points++) {
+ points[num_points][0] = (hsize_t)(u + start[0] + real_offset[0]);
+ points[num_points][1] = (hsize_t)(v + start[1] + real_offset[1]);
} /* end for */
/* Iterate through selection, verifying correct data */
- ret = H5Diterate(wbuf,H5T_NATIVE_USHORT,sid1,test_select_hyper_iter3,&iter_info);
+ ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info);
CHECK(ret, FAIL, "H5Diterate");
/* Close dataspace */
@@ -5292,79 +7320,79 @@ test_select_fill_hyper_regular(hssize_t *offset)
size_t num_points=16; /* Number of points selected */
int fill_value; /* Fill value */
fill_iter_info iter_info; /* Iterator information structure */
- unsigned short *wbuf, /* buffer to write to disk */
+ unsigned *wbuf, /* buffer to write to disk */
*tbuf; /* temporary buffer pointer */
- int i,j,k; /* Counters */
+ unsigned u, v, w; /* Counters */
herr_t ret; /* Generic return value */
/* Output message about test being performed */
MESSAGE(5, ("Testing Filling Regular 'hyperslab' Selections\n"));
/* Allocate memory buffer */
- wbuf = (unsigned short *)HDmalloc(sizeof(unsigned short)*SPACE7_DIM1*SPACE7_DIM2);
+ wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2);
CHECK(wbuf, NULL, "HDmalloc");
/* Initialize memory buffer */
- for(i=0, tbuf=wbuf; i<SPACE7_DIM1; i++)
- for(j=0; j<SPACE7_DIM2; j++)
- *tbuf++=(unsigned short)(i*SPACE7_DIM2)+j;
+ for(u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for(v = 0; v < SPACE7_DIM2; v++)
+ *tbuf++ =(u * SPACE7_DIM2) + v;
/* Create dataspace for dataset on disk */
sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL);
CHECK(sid1, FAIL, "H5Screate_simple");
/* Select "hyperslab" selection */
- start[0]=2; start[1]=2;
- stride[0]=4; stride[1]=4;
- count[0]=2; count[1]=2;
- block[0]=2; block[1]=2;
- ret = H5Sselect_hyperslab(sid1,H5S_SELECT_SET,start,stride,count,block);
+ start[0] = 2; start[1] = 2;
+ stride[0] = 4; stride[1] = 4;
+ count[0] = 2; count[1] = 2;
+ block[0] = 2; block[1] = 2;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
CHECK(ret, FAIL, "H5Sselect_hyperslab");
- if(offset!=NULL) {
- HDmemcpy(real_offset,offset,SPACE7_RANK*sizeof(hssize_t));
+ if(offset != NULL) {
+ HDmemcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t));
/* Set offset, if provided */
- ret = H5Soffset_simple(sid1,real_offset);
+ ret = H5Soffset_simple(sid1, real_offset);
CHECK(ret, FAIL, "H5Soffset_simple");
} /* end if */
else
- HDmemset(real_offset,0,SPACE7_RANK*sizeof(hssize_t));
+ HDmemset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t));
/* Set fill value */
- fill_value=SPACE7_FILL;
+ fill_value = SPACE7_FILL;
/* Fill selection in memory */
- ret=H5Dfill(&fill_value,H5T_NATIVE_INT,wbuf,H5T_NATIVE_USHORT,sid1);
+ ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1);
CHECK(ret, FAIL, "H5Dfill");
/* Verify memory buffer the hard way... */
- for(i=0, tbuf=wbuf; i<SPACE7_DIM1; i++)
- for(j=0; j<SPACE7_DIM2; j++, tbuf++) {
- for(k=0; k<(int)num_points; k++) {
- if(i==(int)(points[k][0]+real_offset[0]) && j==(int)(points[k][1]+real_offset[1])) {
- if(*tbuf!=(unsigned short)fill_value)
- TestErrPrintf("Error! j=%d, i=%d, *tbuf=%u, fill_value=%u\n",j,i,(unsigned)*tbuf,(unsigned)fill_value);
+ for(u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for(v = 0; v < SPACE7_DIM2; v++, tbuf++) {
+ for(w = 0; w < (unsigned)num_points; w++) {
+ if(u == (unsigned)(points[w][0] + real_offset[0]) && v == (unsigned)(points[w][1] + real_offset[1])) {
+ if(*tbuf != (unsigned)fill_value)
+ TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, (unsigned)fill_value);
break;
} /* end if */
} /* end for */
- if(k==(int)num_points && *tbuf!=((unsigned short)(i*SPACE7_DIM2)+j))
- TestErrPrintf("Error! j=%d, i=%d, *tbuf=%u, should be: %u\n",j,i,(unsigned)*tbuf,(unsigned)((i*SPACE7_DIM2)+j));
+ if(w == (unsigned)num_points && *tbuf != ((u * SPACE7_DIM2) + v))
+ TestErrPrintf("Error! v=%d, u=%d, *tbuf=%u, should be: %u\n", v, u, *tbuf, ((u * SPACE7_DIM2) + v));
} /* end for */
/* Initialize the iterator structure */
- iter_info.fill_value=SPACE7_FILL;
- iter_info.curr_coord=0;
- iter_info.coords=(hsize_t *)points;
+ iter_info.fill_value = SPACE7_FILL;
+ iter_info.curr_coord = 0;
+ iter_info.coords = (hsize_t *)points;
/* Add in the offset */
- for(i=0; i<(int)num_points; i++) {
- points[i][0] += real_offset[0];
- points[i][1] += real_offset[1];
+ for(u = 0; u < (unsigned)num_points; u++) {
+ points[u][0] = (hsize_t)(points[u][0] + real_offset[0]);
+ points[u][1] = (hsize_t)(points[u][1] + real_offset[1]);
} /* end for */
/* Iterate through selection, verifying correct data */
- ret = H5Diterate(wbuf,H5T_NATIVE_USHORT,sid1,test_select_hyper_iter3,&iter_info);
+ ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info);
CHECK(ret, FAIL, "H5Diterate");
/* Close dataspace */
@@ -5407,72 +7435,72 @@ test_select_fill_hyper_irregular(hssize_t *offset)
{6,4}, {6,5}, {6,6}, {6,7},
{7,4}, {7,5}, {7,6}, {7,7},
};
- size_t num_points=32; /* Number of points selected */
- size_t num_iter_points=28; /* Number of resulting points */
+ size_t num_points = 32; /* Number of points selected */
+ size_t num_iter_points = 28; /* Number of resulting points */
int fill_value; /* Fill value */
fill_iter_info iter_info; /* Iterator information structure */
- unsigned short *wbuf, /* buffer to write to disk */
+ unsigned *wbuf, /* buffer to write to disk */
*tbuf; /* temporary buffer pointer */
- int i,j,k; /* Counters */
+ unsigned u, v, w; /* Counters */
herr_t ret; /* Generic return value */
/* Output message about test being performed */
MESSAGE(5, ("Testing Filling Irregular 'hyperslab' Selections\n"));
/* Allocate memory buffer */
- wbuf = (unsigned short *)HDmalloc(sizeof(unsigned short)*SPACE7_DIM1*SPACE7_DIM2);
+ wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2);
CHECK(wbuf, NULL, "HDmalloc");
/* Initialize memory buffer */
- for(i=0, tbuf=wbuf; i<SPACE7_DIM1; i++)
- for(j=0; j<SPACE7_DIM2; j++)
- *tbuf++=(unsigned short)(i*SPACE7_DIM2)+j;
+ for(u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for(v = 0; v < SPACE7_DIM2; v++)
+ *tbuf++ = (u * SPACE7_DIM2) + v;
/* Create dataspace for dataset on disk */
sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL);
CHECK(sid1, FAIL, "H5Screate_simple");
/* Select first "hyperslab" selection */
- start[0]=2; start[1]=2;
- count[0]=4; count[1]=4;
- ret = H5Sselect_hyperslab(sid1,H5S_SELECT_SET,start,NULL,count,NULL);
+ start[0] = 2; start[1] = 2;
+ count[0] = 4; count[1] = 4;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL);
CHECK(ret, FAIL, "H5Sselect_hyperslab");
/* Combine with second "hyperslab" selection */
- start[0]=4; start[1]=4;
- count[0]=4; count[1]=4;
- ret = H5Sselect_hyperslab(sid1,H5S_SELECT_OR,start,NULL,count,NULL);
+ start[0] = 4; start[1] = 4;
+ count[0] = 4; count[1] = 4;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_OR, start, NULL, count, NULL);
CHECK(ret, FAIL, "H5Sselect_hyperslab");
- if(offset!=NULL) {
- HDmemcpy(real_offset,offset,SPACE7_RANK*sizeof(hssize_t));
+ if(offset != NULL) {
+ HDmemcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t));
/* Set offset, if provided */
- ret = H5Soffset_simple(sid1,real_offset);
+ ret = H5Soffset_simple(sid1, real_offset);
CHECK(ret, FAIL, "H5Soffset_simple");
} /* end if */
else
- HDmemset(real_offset,0,SPACE7_RANK*sizeof(hssize_t));
+ HDmemset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t));
/* Set fill value */
- fill_value=SPACE7_FILL;
+ fill_value = SPACE7_FILL;
/* Fill selection in memory */
- ret=H5Dfill(&fill_value,H5T_NATIVE_INT,wbuf,H5T_NATIVE_USHORT,sid1);
+ ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1);
CHECK(ret, FAIL, "H5Dfill");
/* Verify memory buffer the hard way... */
- for(i=0, tbuf=wbuf; i<SPACE7_DIM1; i++)
- for(j=0; j<SPACE7_DIM2; j++, tbuf++) {
- for(k=0; k<(int)num_points; k++) {
- if(i==(int)(points[k][0]+real_offset[0]) && j==(int)(points[k][1]+real_offset[1])) {
- if(*tbuf!=(unsigned short)fill_value)
- TestErrPrintf("Error! j=%d, i=%d, *tbuf=%u, fill_value=%u\n",j,i,(unsigned)*tbuf,(unsigned)fill_value);
+ for(u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for(v = 0; v < SPACE7_DIM2; v++, tbuf++) {
+ for(w = 0; w < (unsigned)num_points; w++) {
+ if(u == (unsigned)(points[w][0] + real_offset[0]) && v == (unsigned)(points[w][1] + real_offset[1])) {
+ if(*tbuf != (unsigned)fill_value)
+ TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, (unsigned)fill_value);
break;
} /* end if */
} /* end for */
- if(k==(int)num_points && *tbuf!=((unsigned short)(i*SPACE7_DIM2)+j))
- TestErrPrintf("Error! j=%d, i=%d, *tbuf=%u, should be: %u\n",j,i,(unsigned)*tbuf,(unsigned)((i*SPACE7_DIM2)+j));
+ if(w == (unsigned)num_points && *tbuf != ((u * SPACE7_DIM2) + v))
+ TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, should be: %u\n", v, u, *tbuf, ((u * SPACE7_DIM2) + v));
} /* end for */
/* Initialize the iterator structure */
@@ -5481,13 +7509,13 @@ test_select_fill_hyper_irregular(hssize_t *offset)
iter_info.coords = (hsize_t *)iter_points;
/* Add in the offset */
- for(i=0; i<(int)num_iter_points; i++) {
- iter_points[i][0] += real_offset[0];
- iter_points[i][1] += real_offset[1];
+ for(u = 0; u < (unsigned)num_iter_points; u++) {
+ iter_points[u][0] = (hsize_t)(iter_points[u][0] + real_offset[0]);
+ iter_points[u][1] = (hsize_t)(iter_points[u][1] + real_offset[1]);
} /* end for */
/* Iterate through selection, verifying correct data */
- ret = H5Diterate(wbuf, H5T_NATIVE_USHORT, sid1, test_select_hyper_iter3, &iter_info);
+ ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info);
CHECK(ret, FAIL, "H5Diterate");
/* Close dataspace */
@@ -7005,6 +9033,2855 @@ test_shape_same(void)
CHECK(ret, FAIL, "H5Sclose");
} /* test_shape_same() */
+/****************************************************************
+**
+** test_shape_same_dr__smoke_check_1():
+**
+** Create a square, 2 D data space (10 X 10), and select
+** all of it.
+**
+** Similarly, create nine, 3 D data spaces (10 X 10 X 10),
+** and select (10 X 10 X 1) hyper slabs in each, three with
+** the slab parallel to the xy plane, three parallel to the
+** xz plane, and three parallel to the yz plane.
+**
+** Assuming that z is the fastest changing dimension,
+** H5S_select_shape_same() should return TRUE when comparing
+** the full 2 D space against any hyperslab parallel to the
+** yz plane in the 3 D space, and FALSE when comparing the
+** full 2 D space against the other two hyper slabs.
+**
+** Also create two additional 3 D data spaces (10 X 10 X 10),
+** and select a (10 X 10 X 2) hyper slab parallel to the yz
+** axis in one of them, and two parallel (10 X 10 X 1) hyper
+** slabs parallel to the yz axis in the other.
+** H5S_select_shape_same() should return FALSE when comparing
+** each to the 2 D selection.
+**
+****************************************************************/
+static void
+test_shape_same_dr__smoke_check_1(void)
+{
+ hid_t small_square_sid;
+ hid_t small_cube_xy_slice_0_sid;
+ hid_t small_cube_xy_slice_1_sid;
+ hid_t small_cube_xy_slice_2_sid;
+ hid_t small_cube_xz_slice_0_sid;
+ hid_t small_cube_xz_slice_1_sid;
+ hid_t small_cube_xz_slice_2_sid;
+ hid_t small_cube_yz_slice_0_sid;
+ hid_t small_cube_yz_slice_1_sid;
+ hid_t small_cube_yz_slice_2_sid;
+ hid_t small_cube_yz_slice_3_sid;
+ hid_t small_cube_yz_slice_4_sid;
+ hsize_t small_cube_dims[] = {10, 10, 10};
+ hsize_t start[3];
+ hsize_t stride[3];
+ hsize_t count[3];
+ hsize_t block[3];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(7, (" Smoke check 1: Slices through a cube.\n"));
+
+ /* Create the 10 x 10 dataspace */
+ small_square_sid = H5Screate_simple(2, small_cube_dims, NULL);
+ CHECK(small_square_sid, FAIL, "H5Screate_simple");
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xy axis */
+ small_cube_xy_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xy_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xy_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_2_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+ start[2] = 0; /* z */
+
+ /* stride is a bit silly here, since we are only selecting a single */
+ /* contiguous plane, but include it anyway, with values large enough */
+ /* to ensure that we will only get the single block selected. */
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 10; /* x */
+ block[1] = 10; /* y */
+ block[2] = 1; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[2] = 5;
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[2] = 9;
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xz axis */
+ small_cube_xz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_2_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+ start[2] = 0; /* z */
+
+ /* stride is a bit silly here, since we are only selecting a single */
+ /* contiguous chunk, but include it anyway, with values large enough */
+ /* to ensure that we will only get the single chunk. */
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 10; /* x */
+ block[1] = 1; /* y */
+ block[2] = 10; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[1] = 4;
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[1] = 9;
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslabs parallel to the yz axis */
+ small_cube_yz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_2_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_3_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_3_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_4_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_4_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+ start[2] = 0; /* z */
+
+ /* stride is a bit silly here, since we are only selecting a single */
+ /* contiguous chunk, but include it anyway, with values large enough */
+ /* to ensure that we will only get the single chunk. */
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 1; /* x */
+ block[1] = 10; /* y */
+ block[2] = 10; /* z */
+
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 4;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 9;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 4;
+ block[0] = 2;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_3_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 3;
+ block[0] = 1;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_4_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 6;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_4_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* setup is done -- run the tests: */
+
+ /* Compare against "xy" selection */
+ check = H5S_select_shape_same_test(small_cube_xy_slice_0_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_xy_slice_1_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_xy_slice_2_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+
+ /* Compare against "xz" selection */
+ check = H5S_select_shape_same_test(small_cube_xz_slice_0_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_xz_slice_1_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_xz_slice_2_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+
+ /* Compare against "yz" selection */
+ check = H5S_select_shape_same_test(small_cube_yz_slice_0_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_yz_slice_1_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_yz_slice_2_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_yz_slice_3_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_yz_slice_4_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+
+ /* Close dataspaces */
+ ret = H5Sclose(small_square_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ ret = H5Sclose(small_cube_xy_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ ret = H5Sclose(small_cube_xz_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ ret = H5Sclose(small_cube_yz_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_3_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_4_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* test_shape_same_dr__smoke_check_1() */
+
+/****************************************************************
+**
+** test_shape_same_dr__smoke_check_2():
+**
+** Create a square, 2 D data space (10 X 10), and select
+** a "checker board" hyper slab as follows:
+**
+** * * - - * * - - * *
+** * * - - * * - - * *
+** - - * * - - * * - -
+** - - * * - - * * - -
+** * * - - * * - - * *
+** * * - - * * - - * *
+** - - * * - - * * - -
+** - - * * - - * * - -
+** * * - - * * - - * *
+** * * - - * * - - * *
+**
+** where asterisks indicate selected elements, and dashes
+** indicate unselected elements.
+**
+** Similarly, create nine, 3 D data spaces (10 X 10 X 10),
+** and select similar (10 X 10 X 1) checker board hyper
+** slabs in each, three with the slab parallel to the xy
+** plane, three parallel to the xz plane, and three parallel
+** to the yz plane.
+**
+** Assuming that z is the fastest changing dimension,
+** H5S_select_shape_same() should return TRUE when comparing
+** the 2 D space checker board selection against a checker
+** board hyperslab parallel to the yz plane in the 3 D
+** space, and FALSE when comparing the 2 D checkerboard
+** selection against two hyper slabs parallel to the xy
+** or xz planes.
+**
+** Also create an additional 3 D data spaces (10 X 10 X 10),
+** and select a checker board parallel with the yz axis,
+** save with some squares being on different planes.
+** H5S_select_shape_same() should return FALSE when
+** comparing this selection to the 2 D selection.
+**
+****************************************************************/
+static void
+test_shape_same_dr__smoke_check_2(void)
+{
+ hid_t small_square_sid;
+ hid_t small_cube_xy_slice_0_sid;
+ hid_t small_cube_xy_slice_1_sid;
+ hid_t small_cube_xy_slice_2_sid;
+ hid_t small_cube_xz_slice_0_sid;
+ hid_t small_cube_xz_slice_1_sid;
+ hid_t small_cube_xz_slice_2_sid;
+ hid_t small_cube_yz_slice_0_sid;
+ hid_t small_cube_yz_slice_1_sid;
+ hid_t small_cube_yz_slice_2_sid;
+ hid_t small_cube_yz_slice_3_sid;
+ hsize_t small_cube_dims[] = {10, 10, 10};
+ hsize_t start[3];
+ hsize_t stride[3];
+ hsize_t count[3];
+ hsize_t block[3];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(7, (" Smoke check 2: Checker board slices through a cube.\n"));
+
+ /* Create the 10 x 10 dataspace */
+ small_square_sid = H5Screate_simple(2, small_cube_dims, NULL);
+ CHECK(small_square_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+
+ stride[0] = 4; /* x */
+ stride[1] = 4; /* y */
+
+ count[0] = 3; /* x */
+ count[1] = 3; /* y */
+
+ block[0] = 2; /* x */
+ block[1] = 2; /* y */
+ ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 2; /* x */
+ start[1] = 2; /* y */
+
+ stride[0] = 4; /* x */
+ stride[1] = 4; /* y */
+
+ count[0] = 2; /* x */
+ count[1] = 2; /* y */
+
+ block[0] = 2; /* x */
+ block[1] = 2; /* y */
+ ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xy axis */
+ small_cube_xy_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xy_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xy_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_2_sid, FAIL, "H5Screate_simple");
+
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+ start[2] = 0; /* z */
+
+ stride[0] = 4; /* x */
+ stride[1] = 4; /* y */
+ stride[2] = 20; /* z -- large enough that there will only be one slice */
+
+ count[0] = 3; /* x */
+ count[1] = 3; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 2; /* y */
+ block[2] = 1; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[2] = 3;
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[2] = 9;
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ start[0] = 2; /* x */
+ start[1] = 2; /* y */
+ start[2] = 0; /* z */
+
+ stride[0] = 4; /* x */
+ stride[1] = 4; /* y */
+ stride[2] = 20; /* z -- large enough that there will only be one slice */
+
+ count[0] = 2; /* x */
+ count[1] = 2; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 2; /* y */
+ block[2] = 1; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[2] = 3;
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[2] = 9;
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xz axis */
+ small_cube_xz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_2_sid, FAIL, "H5Screate_simple");
+
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+ start[2] = 0; /* z */
+
+ stride[0] = 4; /* x */
+ stride[1] = 20; /* y -- large enough that there will only be one slice */
+ stride[2] = 4; /* z */
+
+ count[0] = 3; /* x */
+ count[1] = 1; /* y */
+ count[2] = 3; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 1; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[1] = 5;
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[1] = 9;
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 2; /* x */
+ start[1] = 0; /* y */
+ start[2] = 2; /* z */
+
+ stride[0] = 4; /* x */
+ stride[1] = 20; /* y -- large enough that there will only be one slice */
+ stride[2] = 4; /* z */
+
+ count[0] = 2; /* x */
+ count[1] = 1; /* y */
+ count[2] = 2; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 1; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[1] = 5;
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[1] = 9;
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslabs parallel to the yz axis */
+ small_cube_yz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_2_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_3_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_3_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+ start[2] = 0; /* z */
+
+ stride[0] = 20; /* x -- large enough that there will only be one slice */
+ stride[1] = 4; /* y */
+ stride[2] = 4; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 3; /* y */
+ count[2] = 3; /* z */
+
+ block[0] = 1; /* x */
+ block[1] = 2; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 8;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 9;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 3;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_3_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ start[0] = 0; /* x */
+ start[1] = 2; /* y */
+ start[2] = 2; /* z */
+
+ stride[0] = 20; /* x -- large enough that there will only be one slice */
+ stride[1] = 4; /* y */
+ stride[2] = 4; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 2; /* y */
+ count[2] = 2; /* z */
+
+ block[0] = 1; /* x */
+ block[1] = 2; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 8;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 9;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 4;
+ /* This test gets the right answer, but it fails the shape same
+ * test in an unexpected point. Bring this up with Quincey, as
+ * the oddness looks like it is not related to my code.
+ * -- JRM
+ */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_3_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* setup is done -- run the tests: */
+
+ /* Compare against "xy" selection */
+ check = H5S_select_shape_same_test(small_cube_xy_slice_0_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_xy_slice_1_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_xy_slice_2_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+
+ /* Compare against "xz" selection */
+ check = H5S_select_shape_same_test(small_cube_xz_slice_0_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_xz_slice_1_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_xz_slice_2_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+
+ /* Compare against "yz" selection */
+ check = H5S_select_shape_same_test(small_cube_yz_slice_0_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_yz_slice_1_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_yz_slice_2_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_yz_slice_3_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+
+ /* Close dataspaces */
+ ret = H5Sclose(small_square_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ ret = H5Sclose(small_cube_xy_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ ret = H5Sclose(small_cube_xz_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ ret = H5Sclose(small_cube_yz_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_3_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* test_shape_same_dr__smoke_check_2() */
+
+
+/****************************************************************
+**
+** test_shape_same_dr__smoke_check_3():
+**
+** Create a square, 2 D data space (10 X 10), and select an
+** irregular hyper slab as follows:
+**
+** y
+** 9 - - - - - - - - - -
+** 8 - - - - - - - - - -
+** 7 - - - * * * * - - -
+** 6 - - * * * * * - - -
+** 5 - - * * - - - - - -
+** 4 - - * * - * * - - -
+** 3 - - * * - * * - - -
+** 2 - - - - - - - - - -
+** 1 - - - - - - - - - -
+** 0 - - - - - - - - - -
+** 0 1 2 3 4 5 6 7 8 9 x
+**
+** where asterisks indicate selected elements, and dashes
+** indicate unselected elements.
+**
+** Similarly, create nine, 3 D data spaces (10 X 10 X 10),
+** and select similar irregular hyper slabs in each, three
+** with the slab parallel to the xy plane, three parallel
+** to the xz plane, and three parallel to the yz plane.
+** Further, translate the irregular slab in 2/3rds of the
+** cases.
+**
+** Assuming that z is the fastest changing dimension,
+** H5S_select_shape_same() should return TRUE when
+** comparing the 2 D irregular hyperslab selection
+** against the irregular hyperslab selections parallel
+** to the yz plane in the 3 D space, and FALSE when
+** comparing it against the irregular hyper slabs
+** selections parallel to the xy or xz planes.
+**
+****************************************************************/
+static void
+test_shape_same_dr__smoke_check_3(void)
+{
+ hid_t small_square_sid;
+ hid_t small_cube_xy_slice_0_sid;
+ hid_t small_cube_xy_slice_1_sid;
+ hid_t small_cube_xy_slice_2_sid;
+ hid_t small_cube_xz_slice_0_sid;
+ hid_t small_cube_xz_slice_1_sid;
+ hid_t small_cube_xz_slice_2_sid;
+ hid_t small_cube_yz_slice_0_sid;
+ hid_t small_cube_yz_slice_1_sid;
+ hid_t small_cube_yz_slice_2_sid;
+ hsize_t small_cube_dims[] = {10, 10, 10};
+ hsize_t start[3];
+ hsize_t stride[3];
+ hsize_t count[3];
+ hsize_t block[3];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(7, (" Smoke check 3: Offset subsets of slices through a cube.\n"));
+
+ /* Create the 10 x 10 dataspace */
+ small_square_sid = H5Screate_simple(2, small_cube_dims, NULL);
+ CHECK(small_square_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 2; /* x */
+ start[1] = 3; /* y */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+
+ block[0] = 2; /* x */
+ block[1] = 4; /* y */
+ ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 3; /* x */
+ start[1] = 6; /* y */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+
+ block[0] = 4; /* x */
+ block[1] = 2; /* y */
+ ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 5; /* x */
+ start[1] = 3; /* y */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+
+ block[0] = 2; /* x */
+ block[1] = 2; /* y */
+ ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xy axis */
+ small_cube_xy_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xy_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xy_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_2_sid, FAIL, "H5Screate_simple");
+
+
+ start[0] = 2; /* x */
+ start[1] = 3; /* y */
+ start[2] = 5; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 4; /* y */
+ block[2] = 1; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[0] -= 1; /* x */
+ start[1] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[1] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 3; /* x */
+ start[1] = 6; /* y */
+ start[2] = 5; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 4; /* x */
+ block[1] = 2; /* y */
+ block[2] = 1; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[0] -= 1; /* x */
+ start[1] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[1] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 5; /* x */
+ start[1] = 3; /* y */
+ start[2] = 5; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 2; /* y */
+ block[2] = 1; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[0] -= 1; /* x */
+ start[1] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[1] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xz axis */
+ small_cube_xz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_2_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 2; /* x */
+ start[1] = 5; /* y */
+ start[2] = 3; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 1; /* y */
+ block[2] = 4; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[0] -= 1; /* x */
+ start[2] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[2] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 3; /* x */
+ start[1] = 5; /* y */
+ start[2] = 6; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 4; /* x */
+ block[1] = 1; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[0] -= 1; /* x */
+ start[2] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[2] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 5; /* x */
+ start[1] = 5; /* y */
+ start[2] = 3; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 1; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[0] -= 1; /* x */
+ start[2] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[2] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+/* QAK: Start here.
+ */
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslabs parallel to the yz axis */
+ small_cube_yz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_2_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 8; /* x */
+ start[1] = 2; /* y */
+ start[2] = 3; /* z */
+
+ stride[0] = 20; /* x -- large enough that there will only be one slice */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 1; /* x */
+ block[1] = 2; /* y */
+ block[2] = 4; /* z */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[1] -= 1; /* x */
+ start[2] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[2] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 8; /* x */
+ start[1] = 3; /* y */
+ start[2] = 6; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 1; /* x */
+ block[1] = 4; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[1] -= 1; /* x */
+ start[2] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[2] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 8; /* x */
+ start[1] = 5; /* y */
+ start[2] = 3; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 1; /* x */
+ block[1] = 2; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[1] -= 1; /* x */
+ start[2] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[2] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* setup is done -- run the tests: */
+
+ /* Compare against "xy" selection */
+ check = H5S_select_shape_same_test(small_cube_xy_slice_0_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_xy_slice_1_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_xy_slice_2_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+
+ /* Compare against "xz" selection */
+ check = H5S_select_shape_same_test(small_cube_xz_slice_0_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_xz_slice_1_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_xz_slice_2_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+
+ /* Compare against "yz" selection */
+ check = H5S_select_shape_same_test(small_cube_yz_slice_0_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_yz_slice_1_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(small_cube_yz_slice_2_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+
+ /* Close dataspaces */
+ ret = H5Sclose(small_square_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ ret = H5Sclose(small_cube_xy_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ ret = H5Sclose(small_cube_xz_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ ret = H5Sclose(small_cube_yz_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* test_shape_same_dr__smoke_check_3() */
+
+
+/****************************************************************
+**
+** test_shape_same_dr__smoke_check_4():
+**
+** Create a square, 2 D data space (10 X 10), and select
+** the entire space.
+**
+** Similarly, create 3 D and 4 D data spaces:
+**
+** (1 X 10 X 10)
+** (10 X 1 X 10)
+** (10 X 10 X 1)
+** (10 X 10 X 10)
+**
+** (1 X 1 X 10 X 10)
+** (1 X 10 X 1 X 10)
+** (1 X 10 X 10 X 1)
+** (10 X 1 X 1 X 10)
+** (10 X 1 X 10 X 1)
+** (10 X 10 X 1 X 1)
+** (10 X 1 X 10 X 10)
+**
+** And select these entire spaces as well.
+**
+** Compare the 2 D space against all the other spaces
+** with H5S_select_shape_same(). The (1 X 10 X 10) &
+** (1 X 1 X 10 X 10) should return TRUE. All others
+** should return FALSE.
+**
+****************************************************************/
+static void
+test_shape_same_dr__smoke_check_4(void)
+{
+ hid_t square_sid;
+ hid_t three_d_space_0_sid;
+ hid_t three_d_space_1_sid;
+ hid_t three_d_space_2_sid;
+ hid_t three_d_space_3_sid;
+ hid_t four_d_space_0_sid;
+ hid_t four_d_space_1_sid;
+ hid_t four_d_space_2_sid;
+ hid_t four_d_space_3_sid;
+ hid_t four_d_space_4_sid;
+ hid_t four_d_space_5_sid;
+ hid_t four_d_space_6_sid;
+ hsize_t dims[] = {10, 10, 10, 10};
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(7, (" Smoke check 4: Spaces of different dimension but same size.\n"));
+
+ /* Create the 10 x 10 dataspace */
+ square_sid = H5Screate_simple(2, dims, NULL);
+ CHECK(square_sid, FAIL, "H5Screate_simple");
+
+ /* create (1 X 10 X 10) data space */
+ dims[0] = 1;
+ dims[1] = 10;
+ dims[2] = 10;
+ three_d_space_0_sid = H5Screate_simple(3, dims, NULL);
+ CHECK(three_d_space_0_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 1 X 10) data space */
+ dims[0] = 10;
+ dims[1] = 1;
+ dims[2] = 10;
+ three_d_space_1_sid = H5Screate_simple(3, dims, NULL);
+ CHECK(three_d_space_1_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 10 X 1) data space */
+ dims[0] = 10;
+ dims[1] = 10;
+ dims[2] = 1;
+ three_d_space_2_sid = H5Screate_simple(3, dims, NULL);
+ CHECK(three_d_space_2_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 10 X 10) data space */
+ dims[0] = 10;
+ dims[1] = 10;
+ dims[2] = 10;
+ three_d_space_3_sid = H5Screate_simple(3, dims, NULL);
+ CHECK(three_d_space_3_sid, FAIL, "H5Screate_simple");
+
+
+ /* create (1 X 1 X 10 X 10) data space */
+ dims[0] = 1;
+ dims[1] = 1;
+ dims[2] = 10;
+ dims[3] = 10;
+ four_d_space_0_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_0_sid, FAIL, "H5Screate_simple");
+
+ /* create (1 X 10 X 1 X 10) data space */
+ dims[0] = 1;
+ dims[1] = 10;
+ dims[2] = 1;
+ dims[3] = 10;
+ four_d_space_1_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_1_sid, FAIL, "H5Screate_simple");
+
+ /* create (1 X 10 X 10 X 1) data space */
+ dims[0] = 1;
+ dims[1] = 10;
+ dims[2] = 10;
+ dims[3] = 1;
+ four_d_space_2_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_2_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 1 X 1 X 10) data space */
+ dims[0] = 10;
+ dims[1] = 1;
+ dims[2] = 1;
+ dims[3] = 10;
+ four_d_space_3_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_3_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 1 X 10 X 1) data space */
+ dims[0] = 10;
+ dims[1] = 1;
+ dims[2] = 10;
+ dims[3] = 1;
+ four_d_space_4_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_4_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 10 X 1 X 1) data space */
+ dims[0] = 10;
+ dims[1] = 10;
+ dims[2] = 1;
+ dims[3] = 1;
+ four_d_space_5_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_5_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 1 X 10 X 10) data space */
+ dims[0] = 10;
+ dims[1] = 1;
+ dims[2] = 10;
+ dims[3] = 10;
+ four_d_space_6_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_6_sid, FAIL, "H5Screate_simple");
+
+
+ /* setup is done -- run the tests: */
+
+ check = H5S_select_shape_same_test(three_d_space_0_sid, square_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(three_d_space_1_sid, square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(three_d_space_2_sid, square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(three_d_space_3_sid, square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+
+ check = H5S_select_shape_same_test(four_d_space_0_sid, square_sid);
+ VERIFY(check, TRUE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(four_d_space_1_sid, square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(four_d_space_2_sid, square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(four_d_space_3_sid, square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(four_d_space_4_sid, square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(four_d_space_5_sid, square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+ check = H5S_select_shape_same_test(four_d_space_6_sid, square_sid);
+ VERIFY(check, FALSE, "H5S_select_shape_same_test");
+
+
+ /* Close dataspaces */
+ ret = H5Sclose(square_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ ret = H5Sclose(three_d_space_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(three_d_space_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(three_d_space_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(three_d_space_3_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+
+ ret = H5Sclose(four_d_space_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(four_d_space_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(four_d_space_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(four_d_space_3_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(four_d_space_4_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(four_d_space_5_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(four_d_space_6_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* test_shape_same_dr__smoke_check_4() */
+
+/****************************************************************
+**
+** test_shape_same_dr__full_space_vs_slice(): Tests selection
+** of a full n-cube data space vs an n-dimensional slice of
+** of an m-cube (m > n) in a call to H5S_select_shape_same().
+** Note that this test does not require the n-cube and the
+** n-dimensional slice to have the same rank (although
+** H5S_select_shape_same() should always return FALSE if
+** they don't).
+**
+** Per Quincey's suggestion, only test up to 5 dimensional
+** spaces.
+**
+****************************************************************/
+static void
+test_shape_same_dr__full_space_vs_slice(int test_num,
+ int small_rank,
+ int large_rank,
+ int offset,
+ hsize_t edge_size,
+ hbool_t dim_selected[],
+ hbool_t expected_result)
+{
+ char test_desc_0[128];
+ char test_desc_1[128];
+ int i;
+ hid_t n_cube_0_sid; /* the fully selected hyper cube */
+ hid_t n_cube_1_sid; /* the hyper cube in which a slice is selected */
+ hsize_t dims[SS_DR_MAX_RANK];
+ hsize_t start[SS_DR_MAX_RANK];
+ hsize_t * start_ptr;
+ hsize_t stride[SS_DR_MAX_RANK];
+ hsize_t * stride_ptr;
+ hsize_t count[SS_DR_MAX_RANK];
+ hsize_t * count_ptr;
+ hsize_t block[SS_DR_MAX_RANK];
+ hsize_t * block_ptr;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ HDassert( 0 < small_rank );
+ HDassert( small_rank <= large_rank );
+ HDassert( large_rank <= SS_DR_MAX_RANK );
+ HDassert( 0 <= offset );
+ HDassert( offset < large_rank );
+ HDassert( edge_size > 0 );
+ HDassert( edge_size <= 1000 );
+
+ sprintf(test_desc_0,
+ "\tn-cube slice through m-cube (n <= m) test %d.\n",
+ test_num);
+ MESSAGE(7, (test_desc_0));
+
+ /* This statement must be updated if SS_DR_MAX_RANK is changed */
+ sprintf(test_desc_1,
+ "\t\tranks: %d/%d offset: %d dim_selected: %d/%d/%d/%d/%d.\n",
+ small_rank, large_rank, offset,
+ (int)dim_selected[0],
+ (int)dim_selected[1],
+ (int)dim_selected[2],
+ (int)dim_selected[3],
+ (int)dim_selected[4]);
+ MESSAGE(7, (test_desc_1));
+
+ /* copy the edge size into the dims array */
+ for(i = 0; i < SS_DR_MAX_RANK; i++)
+ dims[i] = edge_size;
+
+ /* Create the small n-cube */
+ n_cube_0_sid = H5Screate_simple(small_rank, dims, NULL);
+ CHECK(n_cube_0_sid, FAIL, "H5Screate_simple");
+
+
+ /* Create the large n-cube */
+ n_cube_1_sid = H5Screate_simple(large_rank, dims, NULL);
+ CHECK(n_cube_1_sid, FAIL, "H5Screate_simple");
+
+ /* set up start, stride, count, and block for the hyperslab selection */
+ for(i = 0; i < SS_DR_MAX_RANK; i++) {
+ stride[i] = 2 * edge_size; /* a bit silly in this case */
+ count[i] = 1;
+ if(dim_selected[i]) {
+ start[i] = 0;
+ block[i] = edge_size;
+ } /* end if */
+ else {
+ start[i] = (hsize_t)offset;
+ block[i] = 1;
+ } /* end else */
+ } /* end for */
+
+ /* since large rank may be less than SS_DR_MAX_RANK, we may not
+ * use the entire start, stride, count, and block arrays. This
+ * is a problem, since it is inconvenient to set up the dim_selected
+ * array to reflect the large rank, and thus if large_rank <
+ * SS_DR_MAX_RANK, we need to hide the lower index entries
+ * from H5Sselect_hyperslab().
+ *
+ * Do this by setting up pointers to the first valid entry in start,
+ * stride, count, and block below, and pass these pointers in
+ * to H5Sselect_hyperslab() instead of the array base addresses.
+ */
+
+ i = SS_DR_MAX_RANK - large_rank;
+ HDassert(i >= 0);
+
+ start_ptr = &(start[i]);
+ stride_ptr = &(stride[i]);
+ count_ptr = &(count[i]);
+ block_ptr = &(block[i]);
+
+
+ /* select the hyper slab */
+ ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_SET,
+ start_ptr, stride_ptr, count_ptr, block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* setup is done -- run the test: */
+ check = H5S_select_shape_same_test(n_cube_0_sid, n_cube_1_sid);
+ VERIFY(check, expected_result, "test_shape_same_dr__full_space_vs_slice");
+
+
+ /* Close dataspaces */
+ ret = H5Sclose(n_cube_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(n_cube_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* test_shape_same_dr__full_space_vs_slice() */
+
+
+/****************************************************************
+**
+** test_shape_same_dr__run_full_space_vs_slice_tests():
+**
+** Run the est_shape_same_dr__full_space_vs_slice() test
+** over a variety of ranks and offsets.
+**
+** At present, we test H5S_select_shape_same() with
+** fully selected 1, 2, 3, and 4 cubes as one parameter, and
+** 1, 2, 3, and 4 dimensional slices through a n-cube of rank
+** no more than 5 (and at least the rank of the slice).
+** We stop at rank 5, as Quincey suggested that it would be
+** sufficient.
+**
+** All the n-cubes will have lengths of the same size, so
+** H5S_select_shape_same() should return true iff:
+**
+** 1) the rank for the fully selected n cube equals the
+** number of dimensions selected in the slice through the
+** m-cube (m >= n).
+**
+** 2) The dimensions selected in the slice through the m-cube
+** are the dimesnions with the most quickly changing
+** indices.
+**
+****************************************************************/
+static void
+test_shape_same_dr__run_full_space_vs_slice_tests(void)
+{
+ hbool_t dim_selected[5];
+ hbool_t expected_result;
+ int i, j;
+ int v, w, x, y, z;
+ int test_num = 0;
+ int small_rank;
+ int large_rank;
+ hsize_t edge_size = 10;
+
+ for(large_rank = 1; large_rank <= 5; large_rank++) {
+ for(small_rank = 1; small_rank <= large_rank; small_rank++) {
+ v = 0;
+ do {
+ if(v == 0)
+ dim_selected[0] = FALSE;
+ else
+ dim_selected[0] = TRUE;
+
+ w = 0;
+ do {
+ if(w == 0)
+ dim_selected[1] = FALSE;
+ else
+ dim_selected[1] = TRUE;
+
+ x = 0;
+ do {
+ if(x == 0)
+ dim_selected[2] = FALSE;
+ else
+ dim_selected[2] = TRUE;
+
+ y = 0;
+ do {
+ if(y == 0)
+ dim_selected[3] = FALSE;
+ else
+ dim_selected[3] = TRUE;
+
+ z = 0;
+ do {
+ if(z == 0)
+ dim_selected[4] = FALSE;
+ else
+ dim_selected[4] = TRUE;
+
+ /* compute the expected result: */
+ i = 0;
+ j = 4;
+ expected_result = TRUE;
+ while((i < small_rank) && expected_result) {
+ if(!dim_selected[j])
+ expected_result = FALSE;
+ i++;
+ j--;
+ } /* end while */
+
+ while((i < large_rank) && expected_result) {
+ if(dim_selected[j])
+ expected_result = FALSE;
+ i++;
+ j--;
+ } /* end while */
+
+
+ /* everything is set up -- run the tests */
+
+ test_shape_same_dr__full_space_vs_slice
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ 0,
+ edge_size,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__full_space_vs_slice
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ large_rank / 2,
+ edge_size,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__full_space_vs_slice
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ large_rank - 1,
+ edge_size,
+ dim_selected,
+ expected_result
+ );
+
+ z++;
+ } while((z < 2) && (large_rank >= 1));
+
+ y++;
+ } while((y < 2) && (large_rank >= 2));
+
+ x++;
+ } while((x < 2) && (large_rank >= 3));
+
+ w++;
+ } while((w < 2) && (large_rank >= 4));
+
+ v++;
+ } while((v < 2) && (large_rank >= 5));
+ } /* end for */
+ } /* end for */
+
+} /* test_shape_same_dr__run_full_space_vs_slice_tests() */
+
+
+/****************************************************************
+**
+** test_shape_same_dr__checkerboard(): Tests selection of a
+** "checker board" subset of a full n-cube data space vs
+** a "checker board" n-dimensional slice of an m-cube (m > n).
+** in a call to H5S_select_shape_same().
+**
+** Note that this test does not require the n-cube and the
+** n-dimensional slice to have the same rank (although
+** H5S_select_shape_same() should always return FALSE if
+** they don't).
+**
+** Per Quincey's suggestion, only test up to 5 dimensional
+** spaces.
+**
+****************************************************************/
+static void
+test_shape_same_dr__checkerboard(int test_num,
+ int small_rank,
+ int large_rank,
+ int offset,
+ hsize_t edge_size,
+ hsize_t checker_size,
+ hbool_t dim_selected[],
+ hbool_t expected_result)
+{
+ char test_desc_0[128];
+ char test_desc_1[128];
+ int i;
+ int dims_selected = 0;
+ hid_t n_cube_0_sid; /* the checker board selected
+ * hyper cube
+ */
+ hid_t n_cube_1_sid; /* the hyper cube in which a
+ * checkerboard slice is selected
+ */
+ hsize_t dims[SS_DR_MAX_RANK];
+ hsize_t base_start[2];
+ hsize_t start[SS_DR_MAX_RANK];
+ hsize_t * start_ptr;
+ hsize_t base_stride[2];
+ hsize_t stride[SS_DR_MAX_RANK];
+ hsize_t * stride_ptr;
+ hsize_t base_count[2];
+ hsize_t count[SS_DR_MAX_RANK];
+ hsize_t * count_ptr;
+ hsize_t base_block[2];
+ hsize_t block[SS_DR_MAX_RANK];
+ hsize_t * block_ptr;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ HDassert( 0 < small_rank );
+ HDassert( small_rank <= large_rank );
+ HDassert( large_rank <= SS_DR_MAX_RANK );
+ HDassert( 0 < checker_size );
+ HDassert( checker_size <= edge_size );
+ HDassert( edge_size <= 1000 );
+ HDassert( 0 <= offset );
+ HDassert( offset < (int)edge_size );
+
+ for(i = SS_DR_MAX_RANK - large_rank; i < SS_DR_MAX_RANK; i++)
+ if(dim_selected[i] == TRUE)
+ dims_selected++;
+
+ HDassert( dims_selected >= 0 );
+ HDassert( dims_selected <= large_rank );
+
+ sprintf(test_desc_0,
+ "\tcheckerboard n-cube slice through m-cube (n <= m) test %d.\n",
+ test_num);
+ MESSAGE(7, (test_desc_0));
+
+ /* This statement must be updated if SS_DR_MAX_RANK is changed */
+ sprintf(test_desc_1,
+ "\tranks: %d/%d edge/chkr size: %d/%d offset: %d dim_selected: %d/%d/%d/%d/%d:%d.\n",
+ small_rank, large_rank,
+ (int)edge_size, (int)checker_size,
+ offset,
+ (int)dim_selected[0],
+ (int)dim_selected[1],
+ (int)dim_selected[2],
+ (int)dim_selected[3],
+ (int)dim_selected[4],
+ dims_selected);
+ MESSAGE(7, (test_desc_1));
+
+ /* copy the edge size into the dims array */
+ for(i = 0; i < SS_DR_MAX_RANK; i++)
+ dims[i] = edge_size;
+
+ /* Create the small n-cube */
+ n_cube_0_sid = H5Screate_simple(small_rank, dims, NULL);
+ CHECK(n_cube_0_sid, FAIL, "H5Screate_simple");
+
+ /* Select a "checkerboard" pattern in the small n-cube.
+ *
+ * In the 1-D case, the "checkerboard" would look like this:
+ *
+ * * * - - * * - - * *
+ *
+ * and in the 2-D case, it would look like this:
+ *
+ * * * - - * * - - * *
+ * * * - - * * - - * *
+ * - - * * - - * * - -
+ * - - * * - - * * - -
+ * * * - - * * - - * *
+ * * * - - * * - - * *
+ * - - * * - - * * - -
+ * - - * * - - * * - -
+ * * * - - * * - - * *
+ * * * - - * * - - * *
+ *
+ * In both cases, asterisks indicate selected elements,
+ * and dashes indicate unselected elements.
+ *
+ * 3-D and 4-D ascii art is somewhat painful, so I'll
+ * leave those selections to your imagination. :-)
+ *
+ * Note, that since the edge_size and checker_size are
+ * parameters that are passed in, the selection need
+ * not look exactly like the selection shown above.
+ * At present, the function allows checker sizes that
+ * are not even divisors of the edge size -- thus
+ * something like the following is also possible:
+ *
+ * * * * - - - * * * -
+ * * * * - - - * * * -
+ * * * * - - - * * * -
+ * - - - * * * - - - *
+ * - - - * * * - - - *
+ * - - - * * * - - - *
+ * * * * - - - * * * -
+ * * * * - - - * * * -
+ * * * * - - - * * * -
+ * - - - * * * - - - *
+ *
+ * As the above pattern can't be selected in one
+ * call to H5Sselect_hyperslab(), and since the
+ * values in the start, stride, count, and block
+ * arrays will be repeated over all entries in
+ * the selected space case, and over all selected
+ * dimensions in the selected hyperslab case, we
+ * compute these values first and store them in
+ * in the base_start, base_stride, base_count,
+ * and base_block arrays.
+ */
+
+ base_start[0] = 0;
+ base_start[1] = checker_size;
+
+ base_stride[0] = 2 * checker_size;
+ base_stride[1] = 2 * checker_size;
+
+ /* Note that the following computation depends on the C99
+ * requirement that integer division discard any fraction
+ * (truncation towards zero) to function correctly. As we
+ * now require C99, this shouldn't be a problem, but noting
+ * it may save us some pain if we are ever obliged to support
+ * pre-C99 compilers again.
+ */
+
+ base_count[0] = edge_size / (checker_size * 2);
+ if((edge_size % (checker_size * 2)) > 0)
+ base_count[0]++;
+
+ base_count[1] = (edge_size - checker_size) / (checker_size * 2);
+ if(((edge_size - checker_size) % (checker_size * 2)) > 0)
+ base_count[1]++;
+
+ base_block[0] = checker_size;
+ base_block[1] = checker_size;
+
+ /* now setup start, stride, count, and block arrays for
+ * the first call to H5Sselect_hyperslab().
+ */
+ for(i = 0; i < SS_DR_MAX_RANK; i++) {
+ start[i] = base_start[0];
+ stride[i] = base_stride[0];
+ count[i] = base_count[0];
+ block[i] = base_block[0];
+ } /* end for */
+
+ ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_SET,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* if small_rank == 1, or if edge_size == checker_size, we
+ * are done, as either there is no added dimension in which
+ * to place offset selected "checkers".
+ *
+ * Otherwise, set up start, stride, count and block, and
+ * make the additional selection.
+ */
+
+ if((small_rank > 1) && (checker_size < edge_size)) {
+ for(i = 0; i < SS_DR_MAX_RANK; i++) {
+ start[i] = base_start[1];
+ stride[i] = base_stride[1];
+ count[i] = base_count[1];
+ block[i] = base_block[1];
+ } /* end for */
+
+ ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_OR,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end if */
+
+ /* Wierdness alert:
+ *
+ * Some how, it seems that selections can extend beyond the
+ * boundaries of the target data space -- hence the following
+ * code to manually clip the selection back to the data space
+ * proper.
+ */
+ for(i = 0; i < SS_DR_MAX_RANK; i++) {
+ start[i] = 0;
+ stride[i] = edge_size;
+ count[i] = 1;
+ block[i] = edge_size;
+ } /* end for */
+
+ ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_AND,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* Create the large n-cube */
+ n_cube_1_sid = H5Screate_simple(large_rank, dims, NULL);
+ CHECK(n_cube_1_sid, FAIL, "H5Screate_simple");
+
+
+ /* Now select the checkerboard selection in the (possibly larger) n-cube.
+ *
+ * Since we have already calculated the base start, stride, count,
+ * and block, re-use the values in setting up start, stride, count,
+ * and block.
+ */
+ for(i = 0; i < SS_DR_MAX_RANK; i++) {
+ if(dim_selected[i]) {
+ start[i] = base_start[0];
+ stride[i] = base_stride[0];
+ count[i] = base_count[0];
+ block[i] = base_block[0];
+ } /* end if */
+ else {
+ start[i] = (hsize_t)offset;
+ stride[i] = (hsize_t)(2 * edge_size);
+ count[i] = 1;
+ block[i] = 1;
+ } /* end else */
+ } /* end for */
+
+ /* Since large rank may be less than SS_DR_MAX_RANK, we may not
+ * use the entire start, stride, count, and block arrays. This
+ * is a problem, since it is inconvenient to set up the dim_selected
+ * array to reflect the large rank, and thus if large_rank <
+ * SS_DR_MAX_RANK, we need to hide the lower index entries
+ * from H5Sselect_hyperslab().
+ *
+ * Do this by setting up pointers to the first valid entry in start,
+ * stride, count, and block below, and pass these pointers in
+ * to H5Sselect_hyperslab() instead of the array base addresses.
+ */
+
+ i = SS_DR_MAX_RANK - large_rank;
+ HDassert( i >= 0 );
+
+ start_ptr = &(start[i]);
+ stride_ptr = &(stride[i]);
+ count_ptr = &(count[i]);
+ block_ptr = &(block[i]);
+
+ /* select the hyper slab */
+ ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_SET,
+ start_ptr, stride_ptr, count_ptr, block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* As before, if the number of dimensions selected is less than or
+ * equal to 1, or if edge_size == checker_size, we are done, as
+ * either there is no added dimension in which to place offset selected
+ * "checkers", or the hyperslab is completely occupied by one
+ * "checker".
+ *
+ * Otherwise, set up start, stride, count and block, and
+ * make the additional selection.
+ */
+ if((dims_selected > 1) && (checker_size < edge_size)) {
+ for(i = 0; i < SS_DR_MAX_RANK; i++) {
+ if(dim_selected[i]) {
+ start[i] = base_start[1];
+ stride[i] = base_stride[1];
+ count[i] = base_count[1];
+ block[i] = base_block[1];
+ } /* end if */
+ else {
+ start[i] = (hsize_t)offset;
+ stride[i] = (hsize_t)(2 * edge_size);
+ count[i] = 1;
+ block[i] = 1;
+ } /* end else */
+ } /* end for */
+
+ ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_OR,
+ start_ptr, stride_ptr, count_ptr, block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end if */
+
+
+ /* Wierdness alert:
+ *
+ * Again, it seems that selections can extend beyond the
+ * boundaries of the target data space -- hence the following
+ * code to manually clip the selection back to the data space
+ * proper.
+ */
+ for(i = 0; i < SS_DR_MAX_RANK; i++) {
+ start[i] = 0;
+ stride[i] = edge_size;
+ count[i] = 1;
+ block[i] = edge_size;
+ } /* end for */
+
+ ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_AND,
+ start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* setup is done -- run the test: */
+ check = H5S_select_shape_same_test(n_cube_0_sid, n_cube_1_sid);
+ VERIFY(check, expected_result, "test_shape_same_dr__checkerboard");
+
+
+ /* Close dataspaces */
+ ret = H5Sclose(n_cube_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(n_cube_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* test_shape_same_dr__checkerboard() */
+
+
+/****************************************************************
+**
+** test_shape_same_dr__run_checkerboard_tests():
+**
+** In this set of tests, we test H5S_select_shape_same()
+** with a "checkerboard" selection of 1, 2, 3, and 4 cubes as
+** one parameter, and 1, 2, 3, and 4 dimensional checkerboard
+** slices through a n-cube of rank no more than 5 (and at
+** least the rank of the slice).
+**
+** All the n-cubes will have lengths of the same size, so
+** H5S_select_shape_same() should return true iff:
+**
+** 1) the rank of the n cube equals the number of dimensions
+** selected in the checker board slice through the m-cube
+** (m >= n).
+**
+** 2) The dimensions selected in the checkerboard slice
+** through the m-cube are the dimensions with the most
+** quickly changing indices.
+**
+****************************************************************/
+static void
+test_shape_same_dr__run_checkerboard_tests(void)
+{
+ hbool_t dim_selected[5];
+ hbool_t expected_result;
+ int i, j;
+ int v, w, x, y, z;
+ int test_num = 0;
+ int small_rank;
+ int large_rank;
+
+ for(large_rank = 1; large_rank <= 5; large_rank++) {
+ for(small_rank = 1; small_rank <= large_rank; small_rank++) {
+ v = 0;
+ do {
+ if(v == 0)
+ dim_selected[0] = FALSE;
+ else
+ dim_selected[0] = TRUE;
+
+ w = 0;
+ do {
+ if(w == 0)
+ dim_selected[1] = FALSE;
+ else
+ dim_selected[1] = TRUE;
+
+ x = 0;
+ do {
+ if(x == 0)
+ dim_selected[2] = FALSE;
+ else
+ dim_selected[2] = TRUE;
+
+ y = 0;
+ do {
+ if(y == 0)
+ dim_selected[3] = FALSE;
+ else
+ dim_selected[3] = TRUE;
+
+ z = 0;
+ do {
+ if(z == 0)
+ dim_selected[4] = FALSE;
+ else
+ dim_selected[4] = TRUE;
+
+
+ /* compute the expected result: */
+ i = 0;
+ j = 4;
+ expected_result = TRUE;
+ while((i < small_rank) && expected_result) {
+ if(!dim_selected[j])
+ expected_result = FALSE;
+ i++;
+ j--;
+ } /* end while */
+
+ while((i < large_rank) && expected_result) {
+ if(dim_selected[j])
+ expected_result = FALSE;
+ i++;
+ j--;
+ } /* end while */
+
+
+ /* everything is set up -- run the tests */
+
+ /* run test with edge size 16, checker
+ * size 1, and a variety of offsets
+ */
+ test_shape_same_dr__checkerboard
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* offset */ 0,
+ /* edge_size */ 16,
+ /* checker_size */ 1,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__checkerboard
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* offset */ 5,
+ /* edge_size */ 16,
+ /* checker_size */ 1,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__checkerboard
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* offset */ 15,
+ /* edge_size */ 16,
+ /* checker_size */ 1,
+ dim_selected,
+ expected_result
+ );
+
+
+ /* run test with edge size 10, checker
+ * size 2, and a variety of offsets
+ */
+ test_shape_same_dr__checkerboard
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* offset */ 0,
+ /* edge_size */ 10,
+ /* checker_size */ 2,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__checkerboard
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* offset */ 5,
+ /* edge_size */ 10,
+ /* checker_size */ 2,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__checkerboard
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* offset */ 9,
+ /* edge_size */ 10,
+ /* checker_size */ 2,
+ dim_selected,
+ expected_result
+ );
+
+
+ /* run test with edge size 10, checker
+ * size 3, and a variety of offsets
+ */
+ test_shape_same_dr__checkerboard
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* offset */ 0,
+ /* edge_size */ 10,
+ /* checker_size */ 3,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__checkerboard
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* offset */ 5,
+ /* edge_size */ 10,
+ /* checker_size */ 3,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__checkerboard
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* offset */ 9,
+ /* edge_size */ 10,
+ /* checker_size */ 3,
+ dim_selected,
+ expected_result
+ );
+
+
+ /* run test with edge size 8, checker
+ * size 8, and a variety of offsets
+ */
+ test_shape_same_dr__checkerboard
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* offset */ 0,
+ /* edge_size */ 8,
+ /* checker_size */ 8,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__checkerboard
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* offset */ 4,
+ /* edge_size */ 8,
+ /* checker_size */ 8,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__checkerboard
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* offset */ 7,
+ /* edge_size */ 8,
+ /* checker_size */ 8,
+ dim_selected,
+ expected_result
+ );
+
+ z++;
+ } while((z < 2) && (large_rank >= 1));
+
+ y++;
+ } while((y < 2) && (large_rank >= 2));
+
+ x++;
+ } while((x < 2) && (large_rank >= 3));
+
+ w++;
+ } while((w < 2) && (large_rank >= 4));
+
+ v++;
+ } while((v < 2) && (large_rank >= 5));
+ } /* end for */
+ } /* end for */
+
+} /* test_shape_same_dr__run_checkerboard_tests() */
+
+
+/****************************************************************
+**
+** test_shape_same_dr__irregular():
+**
+** Tests selection of an "irregular" subset of a full
+** n-cube data space vs an identical "irregular" subset
+** of an n-dimensional slice of an m-cube (m > n).
+** in a call to H5S_select_shape_same().
+**
+** Note that this test does not require the n-cube and the
+** n-dimensional slice to have the same rank (although
+** H5S_select_shape_same() should always return FALSE if
+** they don't).
+**
+****************************************************************/
+static void
+test_shape_same_dr__irregular(int test_num,
+ int small_rank,
+ int large_rank,
+ int pattern_offset,
+ int slice_offset,
+ hbool_t dim_selected[],
+ hbool_t expected_result)
+{
+ char test_desc_0[128];
+ char test_desc_1[128];
+ int edge_size = 10;
+ int i;
+ int j;
+ int k;
+ int dims_selected = 0;
+ hid_t n_cube_0_sid; /* the hyper cube containing
+ * an irregular selection
+ */
+ hid_t n_cube_1_sid; /* the hyper cube in which a
+ * slice contains an irregular
+ * selection.
+ */
+ hsize_t dims[SS_DR_MAX_RANK];
+ hsize_t start_0[SS_DR_MAX_RANK] = { 2, 2, 2, 2, 5};
+ hsize_t stride_0[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+ hsize_t count_0[SS_DR_MAX_RANK] = { 1, 1, 1, 1, 1};
+ hsize_t block_0[SS_DR_MAX_RANK] = { 2, 2, 2, 2, 3};
+
+ hsize_t start_1[SS_DR_MAX_RANK] = { 2, 2, 2, 5, 2};
+ hsize_t stride_1[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+ hsize_t count_1[SS_DR_MAX_RANK] = { 1, 1, 1, 1, 1};
+ hsize_t block_1[SS_DR_MAX_RANK] = { 2, 2, 2, 3, 2};
+
+ hsize_t start_2[SS_DR_MAX_RANK] = { 2, 2, 5, 2, 2};
+ hsize_t stride_2[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+ hsize_t count_2[SS_DR_MAX_RANK] = { 1, 1, 1, 1, 1};
+ hsize_t block_2[SS_DR_MAX_RANK] = { 2, 2, 3, 2, 2};
+
+ hsize_t start_3[SS_DR_MAX_RANK] = { 2, 5, 2, 2, 2};
+ hsize_t stride_3[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+ hsize_t count_3[SS_DR_MAX_RANK] = { 1, 1, 1, 1, 1};
+ hsize_t block_3[SS_DR_MAX_RANK] = { 2, 3, 2, 2, 2};
+
+ hsize_t start_4[SS_DR_MAX_RANK] = { 5, 2, 2, 2, 2};
+ hsize_t stride_4[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+ hsize_t count_4[SS_DR_MAX_RANK] = { 1, 1, 1, 1, 1};
+ hsize_t block_4[SS_DR_MAX_RANK] = { 3, 2, 2, 2, 2};
+
+ hsize_t clip_start[SS_DR_MAX_RANK] = { 0, 0, 0, 0, 0};
+ hsize_t clip_stride[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+ hsize_t clip_count[SS_DR_MAX_RANK] = { 1, 1, 1, 1, 1};
+ hsize_t clip_block[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+
+
+ hsize_t *(starts[SS_DR_MAX_RANK]) =
+ {start_0, start_1, start_2, start_3, start_4};
+ hsize_t *(strides[SS_DR_MAX_RANK]) =
+ {stride_0, stride_1, stride_2, stride_3, stride_4};
+ hsize_t *(counts[SS_DR_MAX_RANK]) =
+ {count_0, count_1, count_2, count_3, count_4};
+ hsize_t *(blocks[SS_DR_MAX_RANK]) =
+ {block_0, block_1, block_2, block_3, block_4};
+
+ hsize_t start[SS_DR_MAX_RANK];
+ hsize_t * start_ptr;
+ hsize_t stride[SS_DR_MAX_RANK];
+ hsize_t * stride_ptr;
+ hsize_t count[SS_DR_MAX_RANK];
+ hsize_t * count_ptr;
+ hsize_t block[SS_DR_MAX_RANK];
+ hsize_t * block_ptr;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ HDassert( 0 < small_rank );
+ HDassert( small_rank <= large_rank );
+ HDassert( large_rank <= SS_DR_MAX_RANK );
+ HDassert( 9 <= edge_size );
+ HDassert( edge_size <= 1000 );
+ HDassert( 0 <= slice_offset );
+ HDassert( slice_offset < edge_size );
+ HDassert( -2 <= pattern_offset );
+ HDassert( pattern_offset <= 2 );
+
+ for(i = SS_DR_MAX_RANK - large_rank; i < SS_DR_MAX_RANK; i++)
+ if(dim_selected[i] == TRUE)
+ dims_selected++;
+
+ HDassert( dims_selected >= 0 );
+ HDassert( dims_selected <= large_rank );
+
+ sprintf(test_desc_0,
+ "\tirregular sub set of n-cube slice through m-cube (n <= m) test %d.\n",
+ test_num);
+ MESSAGE(7, (test_desc_0));
+
+ /* This statement must be updated if SS_DR_MAX_RANK is changed */
+ sprintf(test_desc_1,
+ "\tranks: %d/%d edge: %d s/p offset: %d/%d dim_selected: %d/%d/%d/%d/%d:%d.\n",
+ small_rank, large_rank,
+ edge_size,
+ slice_offset, pattern_offset,
+ (int)dim_selected[0],
+ (int)dim_selected[1],
+ (int)dim_selected[2],
+ (int)dim_selected[3],
+ (int)dim_selected[4],
+ dims_selected);
+ MESSAGE(7, (test_desc_1));
+
+ /* copy the edge size into the dims array */
+ for(i = 0; i < SS_DR_MAX_RANK; i++)
+ dims[i] = (hsize_t)edge_size;
+
+ /* Create the small n-cube */
+ n_cube_0_sid = H5Screate_simple(small_rank, dims, NULL);
+ CHECK(n_cube_0_sid, FAIL, "H5Screate_simple");
+
+ /* Select an "irregular" pattern in the small n-cube. This
+ * pattern can be though of a set of four 3 x 2 x 2 X 2
+ * four dimensional prisims, each parallel to one of the
+ * axies and none of them intersecting with the other.
+ *
+ * In the lesser dimensional cases, this 4D pattern is
+ * projected onto the lower dimensional space.
+ *
+ * In the 1-D case, the projection of the pattern looks
+ * like this:
+ *
+ * - - * * - * * * - -
+ * 0 1 2 3 4 5 6 7 8 9 x
+ *
+ * and in the 2-D case, it would look like this:
+ *
+ *
+ * y
+ * 9 - - - - - - - - - -
+ * 8 - - - - - - - - - -
+ * 7 - - * * - - - - - -
+ * 6 - - * * - - - - - -
+ * 5 - - * * - - - - - -
+ * 4 - - - - - - - - - -
+ * 3 - - * * - * * * - -
+ * 2 - - * * - * * * - -
+ * 1 - - - - - - - - - -
+ * 0 - - - - - - - - - -
+ * 0 1 2 3 4 5 6 7 8 9 x
+ *
+ * In both cases, asterisks indicate selected elements,
+ * and dashes indicate unselected elements.
+ *
+ * Note that is this case, since the edge size is fixed,
+ * the pattern does not change. However, we do use the
+ * displacement parameter to allow it to be moved around
+ * within the n-cube or hyper slab.
+ */
+
+ /* first, ensure that the small n-cube has no selection */
+ ret = H5Sselect_none(n_cube_0_sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* now, select the irregular pattern */
+ for(i = 0; i < SS_DR_MAX_RANK; i++) {
+ ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_OR,
+ starts[i], strides[i], counts[i], blocks[i]);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end for */
+
+ /* finally, clip the selection to ensure that it lies fully
+ * within the n-cube.
+ */
+ ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_AND,
+ clip_start, clip_stride, clip_count, clip_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* Create the large n-cube */
+ n_cube_1_sid = H5Screate_simple(large_rank, dims, NULL);
+ CHECK(n_cube_1_sid, FAIL, "H5Screate_simple");
+
+ /* Ensure that the large n-cube has no selection */
+ H5Sselect_none(n_cube_1_sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+
+ /* Since large rank may be less than SS_DR_MAX_RANK, we may not
+ * use the entire start, stride, count, and block arrays. This
+ * is a problem, since it is inconvenient to set up the dim_selected
+ * array to reflect the large rank, and thus if large_rank <
+ * SS_DR_MAX_RANK, we need to hide the lower index entries
+ * from H5Sselect_hyperslab().
+ *
+ * Do this by setting up pointers to the first valid entry in start,
+ * stride, count, and block below, and pass these pointers in
+ * to H5Sselect_hyperslab() instead of the array base addresses.
+ */
+
+ i = SS_DR_MAX_RANK - large_rank;
+ HDassert( i >= 0 );
+
+ start_ptr = &(start[i]);
+ stride_ptr = &(stride[i]);
+ count_ptr = &(count[i]);
+ block_ptr = &(block[i]);
+
+
+ /* Now select the irregular selection in the (possibly larger) n-cube.
+ *
+ * Basic idea is to project the pattern used in the smaller n-cube
+ * onto the dimensions selected in the larger n-cube, with the displacement
+ * specified.
+ */
+ for(i = 0; i < SS_DR_MAX_RANK; i++) {
+ j = 0;
+ for(k = 0; k < SS_DR_MAX_RANK; k++) {
+ if(dim_selected[k]) {
+ start[k] = (starts[i])[j] + (hsize_t)pattern_offset;
+ stride[k] = (strides[i])[j];
+ count[k] = (counts[i])[j];
+ block[k] = (blocks[i])[j];
+ j++;
+ } /* end if */
+ else {
+ start[k] = (hsize_t)slice_offset;
+ stride[k] = (hsize_t)(2 * edge_size);
+ count[k] = 1;
+ block[k] = 1;
+ } /* end else */
+ } /* end for */
+
+ /* select the hyper slab */
+ ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_OR,
+ start_ptr, stride_ptr, count_ptr, block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end for */
+
+ /* it is possible that the selection extends beyond the data space.
+ * clip the selection to ensure that it doesn't.
+ */
+ ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_AND,
+ clip_start, clip_stride, clip_count, clip_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+
+ /* setup is done -- run the test: */
+ check = H5S_select_shape_same_test(n_cube_0_sid, n_cube_1_sid);
+ VERIFY(check, expected_result, "test_shape_same_dr__checkerboard");
+
+
+ /* Close dataspaces */
+ ret = H5Sclose(n_cube_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(n_cube_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* test_shape_same_dr__irregular() */
+
+
+/****************************************************************
+**
+** test_shape_same_dr__run_irregular_tests():
+**
+** In this set of tests, we test H5S_select_shape_same()
+** with an "irregular" subselection of 1, 2, 3, and 4 cubes as
+** one parameter, and irregular subselections of 1, 2, 3,
+** and 4 dimensional slices through a n-cube of rank no more
+** than 5 (and at least the rank of the slice) as the other.
+** Note that the "irregular" selection may be offset between
+** the n-cube and the slice.
+**
+** All the irregular selections will be identical (modulo rank)
+** so H5S_select_shape_same() should return true iff:
+**
+** 1) the rank of the n cube equals the number of dimensions
+** selected in the irregular slice through the m-cube
+** (m >= n).
+**
+** 2) The dimensions selected in the irregular slice
+** through the m-cube are the dimensions with the most
+** quickly changing indices.
+**
+****************************************************************/
+static void
+test_shape_same_dr__run_irregular_tests(void)
+{
+ hbool_t dim_selected[5];
+ hbool_t expected_result;
+ int i, j;
+ int v, w, x, y, z;
+ int test_num = 0;
+ int small_rank;
+ int large_rank;
+
+ for(large_rank = 1; large_rank <= 5; large_rank++) {
+ for(small_rank = 1; small_rank <= large_rank; small_rank++) {
+ v = 0;
+ do {
+ if(v == 0)
+ dim_selected[0] = FALSE;
+ else
+ dim_selected[0] = TRUE;
+
+ w = 0;
+ do {
+ if(w == 0)
+ dim_selected[1] = FALSE;
+ else
+ dim_selected[1] = TRUE;
+
+ x = 0;
+ do {
+ if(x == 0)
+ dim_selected[2] = FALSE;
+ else
+ dim_selected[2] = TRUE;
+
+ y = 0;
+ do {
+ if(y == 0)
+ dim_selected[3] = FALSE;
+ else
+ dim_selected[3] = TRUE;
+
+ z = 0;
+ do {
+ if(z == 0)
+ dim_selected[4] = FALSE;
+ else
+ dim_selected[4] = TRUE;
+
+
+ /* compute the expected result: */
+ i = 0;
+ j = 4;
+ expected_result = TRUE;
+ while((i < small_rank) && expected_result) {
+ if(!dim_selected[j])
+ expected_result = FALSE;
+ i++;
+ j--;
+ } /* end while */
+
+ while((i < large_rank) && expected_result) {
+ if(dim_selected[j])
+ expected_result = FALSE;
+ i++;
+ j--;
+ } /* end while */
+
+
+ /* everything is set up -- run the tests */
+
+ test_shape_same_dr__irregular
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* pattern_offset */ -2,
+ /* slice_offset */ 0,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__irregular
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* pattern_offset */ -2,
+ /* slice_offset */ 4,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__irregular
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* pattern_offset */ -2,
+ /* slice_offset */ 9,
+ dim_selected,
+ expected_result
+ );
+
+
+ test_shape_same_dr__irregular
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* pattern_offset */ 0,
+ /* slice_offset */ 0,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__irregular
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* pattern_offset */ 0,
+ /* slice_offset */ 6,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__irregular
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* pattern_offset */ 0,
+ /* slice_offset */ 9,
+ dim_selected,
+ expected_result
+ );
+
+
+ test_shape_same_dr__irregular
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* pattern_offset */ 2,
+ /* slice_offset */ 0,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__irregular
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* pattern_offset */ 2,
+ /* slice_offset */ 5,
+ dim_selected,
+ expected_result
+ );
+
+ test_shape_same_dr__irregular
+ (
+ test_num++,
+ small_rank,
+ large_rank,
+ /* pattern_offset */ 2,
+ /* slice_offset */ 9,
+ dim_selected,
+ expected_result
+ );
+
+ z++;
+ } while((z < 2) && (large_rank >= 1));
+
+ y++;
+ } while((y < 2) && (large_rank >= 2));
+
+ x++;
+ } while((x < 2) && (large_rank >= 3));
+
+ w++;
+ } while((w < 2) && (large_rank >= 4));
+
+ v++;
+ } while((v < 2 ) && (large_rank >= 5));
+ } /* end for */
+ } /* end for */
+
+} /* test_shape_same_dr__run_irregular_tests() */
+
+
+/****************************************************************
+**
+** test_shape_same_dr(): Tests selections on dataspace with
+** different ranks, to verify that "shape same" routine
+** is now handling this case correctly.
+**
+****************************************************************/
+static void
+test_shape_same_dr(void)
+{
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing Same Shape/Different Rank Comparisons\n"));
+
+
+ /* first run some smoke checks */
+ test_shape_same_dr__smoke_check_1();
+ test_shape_same_dr__smoke_check_2();
+ test_shape_same_dr__smoke_check_3();
+ test_shape_same_dr__smoke_check_4();
+
+
+ /* now run more intensive tests. */
+ test_shape_same_dr__run_full_space_vs_slice_tests();
+ test_shape_same_dr__run_checkerboard_tests();
+ test_shape_same_dr__run_irregular_tests();
+
+} /* test_shape_same_dr() */
+
/****************************************************************
**
@@ -8241,6 +13118,14 @@ test_select(void)
test_select_hyper_contig3(H5T_STD_U16LE,plist_id); /* Test yet more contiguous hyperslab selection cases */
test_select_hyper_contig3(H5T_STD_U16BE,H5P_DEFAULT); /* Test yet more contiguous hyperslab selection cases */
test_select_hyper_contig3(H5T_STD_U16BE,plist_id); /* Test yet more contiguous hyperslab selection cases */
+ test_select_hyper_contig_dr(H5T_STD_U16LE, H5P_DEFAULT);
+ test_select_hyper_contig_dr(H5T_STD_U16LE, plist_id);
+ test_select_hyper_contig_dr(H5T_STD_U16BE, H5P_DEFAULT);
+ test_select_hyper_contig_dr(H5T_STD_U16BE, plist_id);
+ test_select_hyper_checker_board_dr(H5T_STD_U16LE, H5P_DEFAULT);
+ test_select_hyper_checker_board_dr(H5T_STD_U16LE, plist_id);
+ test_select_hyper_checker_board_dr(H5T_STD_U16BE, H5P_DEFAULT);
+ test_select_hyper_checker_board_dr(H5T_STD_U16BE, plist_id);
test_select_hyper_copy(); /* Test hyperslab selection copying code */
test_select_point_copy(); /* Test point selection copying code */
test_select_hyper_offset(); /* Test selection offset code with hyperslabs */
@@ -8320,6 +13205,9 @@ test_select(void)
/* Test "same shape" routine */
test_shape_same();
+ /* Test "same shape" routine for selections of different rank */
+ test_shape_same_dr();
+
/* Test "re-build" routine */
test_space_rebuild();
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index 3c39989..6e76e88 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -32,7 +32,8 @@ check_PROGRAMS = $(TEST_PROG_PARA)
check_SCRIPTS= $(TEST_SCRIPT)
testphdf5_SOURCES=testphdf5.c t_dset.c t_file.c t_mdset.c t_ph5basic.c \
- t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c
+ t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c \
+ t_rank_projection.c
# The tests all depend on the hdf5 library and the test library
LDADD = $(LIBH5TEST) $(LIBHDF5)
diff --git a/testpar/Makefile.in b/testpar/Makefile.in
index 02fea3d..4f92776 100644
--- a/testpar/Makefile.in
+++ b/testpar/Makefile.in
@@ -90,7 +90,8 @@ t_posix_compliant_DEPENDENCIES = $(LIBH5TEST) $(LIBHDF5)
am_testphdf5_OBJECTS = testphdf5.$(OBJEXT) t_dset.$(OBJEXT) \
t_file.$(OBJEXT) t_mdset.$(OBJEXT) t_ph5basic.$(OBJEXT) \
t_coll_chunk.$(OBJEXT) t_span_tree.$(OBJEXT) \
- t_chunk_alloc.$(OBJEXT) t_filter_read.$(OBJEXT)
+ t_chunk_alloc.$(OBJEXT) t_filter_read.$(OBJEXT) \
+ t_rank_projection.$(OBJEXT)
testphdf5_OBJECTS = $(am_testphdf5_OBJECTS)
testphdf5_LDADD = $(LDADD)
testphdf5_DEPENDENCIES = $(LIBH5TEST) $(LIBHDF5)
@@ -390,7 +391,8 @@ TEST_PROG_PARA = t_mpi t_posix_compliant testphdf5 t_cache t_pflush1 t_pflush2
TEST_SCRIPT_PARA = testph5.sh
check_SCRIPTS = $(TEST_SCRIPT)
testphdf5_SOURCES = testphdf5.c t_dset.c t_file.c t_mdset.c t_ph5basic.c \
- t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c
+ t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c \
+ t_rank_projection.c
# The tests all depend on the hdf5 library and the test library
@@ -494,6 +496,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_pflush2.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_ph5basic.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_posix_compliant.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_rank_projection.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_span_tree.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/testphdf5.Po@am__quote@
diff --git a/testpar/t_rank_projection.c b/testpar/t_rank_projection.c
new file mode 100644
index 0000000..bbc0a1f
--- /dev/null
+++ b/testpar/t_rank_projection.c
@@ -0,0 +1,4041 @@
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ This program will test independant and collective reads and writes between
+ selections of different rank that non-the-less are deemed as having the
+ same shape by H5Sselect_shape_same().
+ */
+
+#define H5S_PACKAGE /*suppress error about including H5Spkg */
+
+
+#include "hdf5.h"
+#include "H5private.h"
+#include "testphdf5.h"
+#include "H5Spkg.h" /* Dataspaces */
+
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hyperslab_dr_pio_test__run_test()
+ *
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/18/09
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define PAR_SS_DR_MAX_RANK 5
+#define CONTIG_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG 0
+
+void
+contig_hyperslab_dr_pio_test__run_test(const int test_num,
+ const int edge_size,
+ const int chunk_edge_size,
+ const int small_rank,
+ const int large_rank,
+ const hbool_t use_collective_io,
+ const hid_t dset_type)
+{
+ const char *fcnName = "contig_hyperslab_dr_pio_test()";
+ const char *filename;
+ hbool_t use_gpfs = FALSE; /* Use GPFS hints */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l, m, n;
+ int mrc;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int start_index;
+ int stop_index;
+ const int test_max_rank = 5; /* must update code if this changes */
+ uint32_t expected_value;
+ uint32_t * small_ds_buf_0 = NULL;
+ uint32_t * small_ds_buf_1 = NULL;
+ uint32_t * small_ds_buf_2 = NULL;
+ uint32_t * small_ds_slice_buf = NULL;
+ uint32_t * large_ds_buf_0 = NULL;
+ uint32_t * large_ds_buf_1 = NULL;
+ uint32_t * large_ds_buf_2 = NULL;
+ uint32_t * large_ds_slice_buf = NULL;
+ uint32_t * ptr_0;
+ uint32_t * ptr_1;
+ uint32_t * ptr_2;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist = H5P_DEFAULT;
+ hid_t full_mem_small_ds_sid;
+ hid_t full_file_small_ds_sid;
+ hid_t mem_small_ds_sid;
+ hid_t file_small_ds_sid;
+ hid_t small_ds_slice_sid;
+ hid_t full_mem_large_ds_sid;
+ hid_t full_file_large_ds_sid;
+ hid_t mem_large_ds_sid;
+ hid_t file_large_ds_sid;
+ hid_t file_large_ds_process_slice_sid;
+ hid_t mem_large_ds_process_slice_sid;
+ hid_t large_ds_slice_sid;
+ hid_t small_ds_dcpl_id = H5P_DEFAULT;
+ hid_t large_ds_dcpl_id = H5P_DEFAULT;
+ hid_t small_dataset; /* Dataset ID */
+ hid_t large_dataset; /* Dataset ID */
+ size_t small_ds_size = 1;
+ size_t small_ds_slice_size = 1;
+ size_t large_ds_size = 1;
+ size_t large_ds_slice_size = 1;
+ hsize_t dims[PAR_SS_DR_MAX_RANK];
+ hsize_t chunk_dims[PAR_SS_DR_MAX_RANK];
+ hsize_t start[PAR_SS_DR_MAX_RANK];
+ hsize_t stride[PAR_SS_DR_MAX_RANK];
+ hsize_t count[PAR_SS_DR_MAX_RANK];
+ hsize_t block[PAR_SS_DR_MAX_RANK];
+ hsize_t * start_ptr = NULL;
+ hsize_t * stride_ptr = NULL;
+ hsize_t * count_ptr = NULL;
+ hsize_t * block_ptr = NULL;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ HDassert( edge_size >= 6 );
+ HDassert( edge_size >= chunk_edge_size );
+ HDassert( ( chunk_edge_size == 0 ) || ( chunk_edge_size >= 3 ) );
+ HDassert( 1 < small_rank );
+ HDassert( small_rank < large_rank );
+ HDassert( large_rank <= test_max_rank );
+ HDassert( test_max_rank <= PAR_SS_DR_MAX_RANK );
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ HDassert( mpi_size >= 1 );
+
+ mpi_comm = MPI_COMM_WORLD;
+ mpi_info = MPI_INFO_NULL;
+
+ for ( i = 0; i < small_rank - 1; i++ )
+ {
+ small_ds_size *= (size_t)edge_size;
+ small_ds_slice_size *= (size_t)edge_size;
+ }
+ small_ds_size *= (size_t)(mpi_size + 1);
+
+
+ for ( i = 0; i < large_rank - 1; i++ ) {
+
+ large_ds_size *= (size_t)edge_size;
+ large_ds_slice_size *= (size_t)edge_size;
+ }
+ large_ds_size *= (size_t)(mpi_size + 1);
+
+
+ /* set up the start, stride, count, and block pointers */
+ start_ptr = &(start[PAR_SS_DR_MAX_RANK - large_rank]);
+ stride_ptr = &(stride[PAR_SS_DR_MAX_RANK - large_rank]);
+ count_ptr = &(count[PAR_SS_DR_MAX_RANK - large_rank]);
+ block_ptr = &(block[PAR_SS_DR_MAX_RANK - large_rank]);
+
+
+ /* Allocate buffers */
+ small_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size);
+ VRFY((small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded");
+
+ small_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size);
+ VRFY((small_ds_buf_1 != NULL), "malloc of small_ds_buf_1 succeeded");
+
+ small_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size);
+ VRFY((small_ds_buf_2 != NULL), "malloc of small_ds_buf_2 succeeded");
+
+ small_ds_slice_buf =
+ (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_slice_size);
+ VRFY((small_ds_slice_buf != NULL), "malloc of small_ds_slice_buf succeeded");
+
+ large_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size);
+ VRFY((large_ds_buf_0 != NULL), "malloc of large_ds_buf_0 succeeded");
+
+ large_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size);
+ VRFY((large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded");
+
+ large_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size);
+ VRFY((large_ds_buf_2 != NULL), "malloc of large_ds_buf_2 succeeded");
+
+ large_ds_slice_buf =
+ (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_slice_size);
+ VRFY((large_ds_slice_buf != NULL), "malloc of large_ds_slice_buf succeeded");
+
+ /* initialize the buffers */
+
+ ptr_0 = small_ds_buf_0;
+ ptr_1 = small_ds_buf_1;
+ ptr_2 = small_ds_buf_2;
+
+ for ( i = 0; i < (int)small_ds_size; i++ ) {
+
+ *ptr_0 = (uint32_t)i;
+ *ptr_1 = 0;
+ *ptr_2 = 0;
+
+ ptr_0++;
+ ptr_1++;
+ ptr_2++;
+ }
+
+ ptr_0 = small_ds_slice_buf;
+
+ for ( i = 0; i < (int)small_ds_slice_size; i++ ) {
+
+ *ptr_0 = (uint32_t)0;
+ ptr_0++;
+ }
+
+ ptr_0 = large_ds_buf_0;
+ ptr_1 = large_ds_buf_1;
+ ptr_2 = large_ds_buf_2;
+
+ for ( i = 0; i < (int)large_ds_size; i++ ) {
+
+ *ptr_0 = (uint32_t)i;
+ *ptr_1 = 0;
+ *ptr_2 = 0;
+
+ ptr_0++;
+ ptr_1++;
+ ptr_2++;
+ }
+
+ ptr_0 = large_ds_slice_buf;
+
+ for ( i = 0; i < (int)large_ds_slice_size; i++ ) {
+
+ *ptr_0 = (uint32_t)0;
+ ptr_0++;
+ }
+
+ filename = (const char *)GetTestParameters();
+ HDassert( filename != NULL );
+#if CONTIG_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ if ( MAINPROCESS ) {
+
+ HDfprintf(stdout, "%d: test num = %d.\n", mpi_rank, test_num);
+ HDfprintf(stdout, "%d: mpi_size = %d.\n", mpi_rank, mpi_size);
+ HDfprintf(stdout,
+ "%d: small/large rank = %d/%d, use_collective_io = %d.\n",
+ mpi_rank, small_rank, large_rank, (int)use_collective_io);
+ HDfprintf(stdout, "%d: edge_size = %d, chunk_edge_size = %d.\n",
+ mpi_rank, edge_size, chunk_edge_size);
+ HDfprintf(stdout, "%d: small_ds_size = %d, large_ds_size = %d.\n",
+ mpi_rank, (int)small_ds_size, (int)large_ds_size);
+ HDfprintf(stdout, "%d: filename = %s.\n", mpi_rank, filename);
+ }
+#endif
+ /* ----------------------------------------
+ * CREATE AN HDF5 FILE WITH PARALLEL ACCESS
+ * ---------------------------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type, use_gpfs);
+ VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ MESG("File opened.");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
+
+
+ /* setup dims: */
+ dims[0] = (int)(mpi_size + 1);
+ dims[1] = dims[2] = dims[3] = dims[4] = edge_size;
+
+
+ /* Create small ds dataspaces */
+ full_mem_small_ds_sid = H5Screate_simple(small_rank, dims, NULL);
+ VRFY((full_mem_small_ds_sid != 0),
+ "H5Screate_simple() full_mem_small_ds_sid succeeded");
+
+ full_file_small_ds_sid = H5Screate_simple(small_rank, dims, NULL);
+ VRFY((full_file_small_ds_sid != 0),
+ "H5Screate_simple() full_file_small_ds_sid succeeded");
+
+ mem_small_ds_sid = H5Screate_simple(small_rank, dims, NULL);
+ VRFY((mem_small_ds_sid != 0),
+ "H5Screate_simple() mem_small_ds_sid succeeded");
+
+ file_small_ds_sid = H5Screate_simple(small_rank, dims, NULL);
+ VRFY((file_small_ds_sid != 0),
+ "H5Screate_simple() file_small_ds_sid succeeded");
+
+ small_ds_slice_sid = H5Screate_simple(small_rank - 1, &(dims[1]), NULL);
+ VRFY((small_ds_slice_sid != 0),
+ "H5Screate_simple() small_ds_slice_sid succeeded");
+
+
+ /* Create large ds dataspaces */
+ full_mem_large_ds_sid = H5Screate_simple(large_rank, dims, NULL);
+ VRFY((full_mem_large_ds_sid != 0),
+ "H5Screate_simple() full_mem_large_ds_sid succeeded");
+
+ full_file_large_ds_sid = H5Screate_simple(large_rank, dims, NULL);
+ VRFY((full_file_large_ds_sid != FAIL),
+ "H5Screate_simple() full_file_large_ds_sid succeeded");
+
+ mem_large_ds_sid = H5Screate_simple(large_rank, dims, NULL);
+ VRFY((mem_large_ds_sid != FAIL),
+ "H5Screate_simple() mem_large_ds_sid succeeded");
+
+ file_large_ds_sid = H5Screate_simple(large_rank, dims, NULL);
+ VRFY((file_large_ds_sid != FAIL),
+ "H5Screate_simple() file_large_ds_sid succeeded");
+
+ mem_large_ds_process_slice_sid = H5Screate_simple(large_rank, dims, NULL);
+ VRFY((mem_large_ds_process_slice_sid != FAIL),
+ "H5Screate_simple() mem_large_ds_process_slice_sid succeeded");
+
+ file_large_ds_process_slice_sid = H5Screate_simple(large_rank, dims, NULL);
+ VRFY((file_large_ds_process_slice_sid != FAIL),
+ "H5Screate_simple() file_large_ds_process_slice_sid succeeded");
+
+
+ large_ds_slice_sid = H5Screate_simple(large_rank - 1, &(dims[1]), NULL);
+ VRFY((large_ds_slice_sid != 0),
+ "H5Screate_simple() large_ds_slice_sid succeeded");
+
+
+ /* Select the entire extent of the full small ds, and ds slice dataspaces */
+ ret = H5Sselect_all(full_mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_mem_small_ds_sid) succeeded");
+
+ ret = H5Sselect_all(full_file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_file_small_ds_sid) succeeded");
+
+ ret = H5Sselect_all(small_ds_slice_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(small_ds_slice_sid) succeeded");
+
+
+ /* Select the entire extent of the full large ds, and ds slice dataspaces */
+ ret = H5Sselect_all(full_mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_mem_large_ds_sid) succeeded");
+
+ ret = H5Sselect_all(full_file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_file_large_ds_sid) succeeded");
+
+ ret = H5Sselect_all(large_ds_slice_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(large_ds_slice_sid) succeeded");
+
+
+ /* if chunk edge size is greater than zero, set up the small and
+ * large data set creation property lists to specify chunked
+ * datasets.
+ */
+ if ( chunk_edge_size > 0 ) {
+
+ chunk_dims[0] = mpi_size + 1;
+ chunk_dims[1] = chunk_dims[2] =
+ chunk_dims[3] = chunk_dims[4] = chunk_edge_size;
+
+ small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(small_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(small_ds_dcpl_id, small_rank, chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
+
+
+ large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(large_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(large_ds_dcpl_id, large_rank, chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded");
+ }
+
+ /* create the small dataset */
+ small_dataset = H5Dcreate2(fid, "small_dataset", dset_type,
+ file_small_ds_sid, H5P_DEFAULT,
+ small_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret != FAIL), "H5Dcreate2() small_dataset succeeded");
+
+ /* create the large dataset */
+ large_dataset = H5Dcreate2(fid, "large_dataset", dset_type,
+ file_large_ds_sid, H5P_DEFAULT,
+ large_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret != FAIL), "H5Dcreate2() large_dataset succeeded");
+
+
+
+ /* setup xfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ if ( ! use_collective_io ) {
+
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,
+ H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0), "H5Pset_dxpl_mpio_collective_opt() suceeded");
+ }
+
+ /* setup selection to write initial data to the small and large data sets */
+ start[0] = mpi_rank;
+ stride[0] = 2 * (mpi_size + 1);
+ count[0] = 1;
+ block[0] = 1;
+
+ for ( i = 1; i < large_rank; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ block[i] = edge_size;
+ }
+
+ /* setup selections for writing initial data to the small data set */
+ ret = H5Sselect_hyperslab(mem_small_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
+
+ ret = H5Sselect_hyperslab(file_small_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) suceeded");
+
+ if ( MAINPROCESS ) { /* add an additional slice to the selections */
+
+ start[0] = mpi_size;
+
+ ret = H5Sselect_hyperslab(mem_small_ds_sid,
+ H5S_SELECT_OR,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret>= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) suceeded");
+
+ ret = H5Sselect_hyperslab(file_small_ds_sid,
+ H5S_SELECT_OR,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret>= 0), "H5Sselect_hyperslab(file_small_ds_sid, or) suceeded");
+ }
+
+
+ /* write the initial value of the small data set to file */
+ ret = H5Dwrite(small_dataset, dset_type, mem_small_ds_sid, file_small_ds_sid,
+ xfer_plist, small_ds_buf_0);
+
+ VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
+
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc==MPI_SUCCESS), "Sync after small dataset writes");
+
+
+ /* read the small data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set.
+ */
+ ret = H5Dread(small_dataset,
+ H5T_NATIVE_UINT32,
+ full_mem_small_ds_sid,
+ full_file_small_ds_sid,
+ xfer_plist,
+ small_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded");
+
+
+ /* verify that the correct data was written to the small data set */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = small_ds_buf_1;
+
+ i = 0;
+ for ( i = 0; i < (int)small_ds_size; i++ ) {
+
+ if ( *ptr_1 != expected_value ) {
+
+ mis_match = TRUE;
+ }
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY( (mis_match == FALSE), "small ds init data good.");
+
+
+
+ /* setup selections for writing initial data to the large data set */
+
+ start[0] = mpi_rank;
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) suceeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) suceeded");
+
+ /* In passing, setup the process slice data spaces as well */
+
+ ret = H5Sselect_hyperslab(mem_large_ds_process_slice_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0),
+ "H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) suceeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_process_slice_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0),
+ "H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) suceeded");
+
+ if ( MAINPROCESS ) { /* add an additional slice to the selections */
+
+ start[0] = mpi_size;
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid,
+ H5S_SELECT_OR,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret>= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) suceeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid,
+ H5S_SELECT_OR,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret>= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) suceeded");
+ }
+
+
+ /* write the initial value of the large data set to file */
+ ret = H5Dwrite(large_dataset, dset_type, mem_large_ds_sid, file_large_ds_sid,
+ xfer_plist, large_ds_buf_0);
+ if ( ret < 0 ) H5Eprint(H5E_DEFAULT, stderr);
+ VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
+
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc==MPI_SUCCESS), "Sync after large dataset writes");
+
+
+ /* read the small data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set.
+ */
+ ret = H5Dread(large_dataset,
+ H5T_NATIVE_UINT32,
+ full_mem_large_ds_sid,
+ full_file_large_ds_sid,
+ xfer_plist,
+ large_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded");
+
+
+ /* verify that the correct data was written to the small data set */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = large_ds_buf_1;
+
+ i = 0;
+ for ( i = 0; i < (int)large_ds_size; i++ ) {
+
+ if ( *ptr_1 != expected_value ) {
+
+ mis_match = TRUE;
+ }
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY( (mis_match == FALSE), "large ds init data good.");
+
+
+ /* first, verify that we can read from disk correctly using selections
+ * of different rank that H5S_select_shape_same() views as being of the
+ * same shape.
+ *
+ * Start by reading small_rank-D - 1 slice from the on disk large cube,
+ * and verifying that the data read is correct. Verify that
+ * H5S_select_shape_same() returns true on the memory and file selections.
+ */
+
+ /* We have already done a H5Sselect_all() on the data space
+ * small_ds_slice_sid, so no need to call H5Sselect_all() again.
+ */
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ if ( (PAR_SS_DR_MAX_RANK - i) > (small_rank - 1) ) {
+
+ block[i] = 1;
+
+ } else {
+
+ block[i] = edge_size;
+ }
+ }
+
+ /* zero out the buffer we will be reading into */
+ ptr_0 = small_ds_slice_buf;
+
+ for ( i = 0; i < (int)small_ds_slice_size; i++ ) {
+
+ *ptr_0 = (uint32_t)0;
+ ptr_0++;
+ }
+
+#if CONTIG_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout,
+ "%s reading slices from big cube on disk into small cube slice.\n",
+ fcnName);
+#endif
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set. However, in the parallel version, each
+ * process only works with that slice of the large cube indicated
+ * by its rank -- hence we set the most slowly changing index to
+ * mpi_rank, and don't itterate over it.
+ */
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 0 ) {
+
+ i = mpi_rank;
+
+ } else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 1 ) {
+
+ j = mpi_rank;
+
+ } else {
+
+ j = 0;
+ }
+
+ do {
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 2 ) {
+
+ k = mpi_rank;
+
+ } else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ /* we know that small_rank - 1 >= 1 and that
+ * large_rank > small_rank by the assertions at the head
+ * of this function. Thus no need for another inner loop.
+ */
+ start[0] = i;
+ start[1] = j;
+ start[2] = k;
+ start[3] = l;
+ start[4] = 0;
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid,
+ H5S_SELECT_SET,
+ start_ptr,
+ stride_ptr,
+ count_ptr,
+ block_ptr);
+ VRFY((ret != FAIL),
+ "H5Sselect_hyperslab(file_large_cube_sid) succeeded");
+
+
+ /* verify that H5S_select_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(small_ds_slice_sid,
+ file_large_ds_sid);
+ VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+
+
+ /* Read selection from disk */
+#if CONTIG_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, (int)mpi_rank,
+ (int)start[0], (int)start[1], (int)start[2],
+ (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n",
+ fcnName,
+ H5Sget_simple_extent_ndims(small_ds_slice_sid),
+ H5Sget_simple_extent_ndims(file_large_ds_sid));
+#endif
+ ret = H5Dread(large_dataset,
+ H5T_NATIVE_UINT32,
+ small_ds_slice_sid,
+ file_large_ds_sid,
+ xfer_plist,
+ small_ds_slice_buf);
+ VRFY((ret >= 0), "H5Sread() slice from large ds succeeded.");
+
+
+ /* verify that expected data is retrieved */
+
+ mis_match = FALSE;
+ ptr_1 = small_ds_slice_buf;
+ expected_value =
+ (i * edge_size * edge_size * edge_size * edge_size) +
+ (j * edge_size * edge_size * edge_size) +
+ (k * edge_size * edge_size) +
+ (l * edge_size);
+
+ for ( n = 0; n < (int)small_ds_slice_size; n++ ) {
+
+ if ( *ptr_1 != expected_value ) {
+
+ mis_match = TRUE;
+ }
+
+ *ptr_1 = 0; /* zero data for next use */
+
+ ptr_1++;
+ expected_value++;
+ }
+
+ VRFY((mis_match == FALSE),
+ "small slice read from large ds data good.");
+ l++;
+
+ } while ( ( large_rank > 2 ) &&
+ ( (small_rank - 1) <= 1 ) &&
+ ( l < edge_size ) );
+ k++;
+ } while ( ( large_rank > 3 ) &&
+ ( (small_rank - 1) <= 2 ) &&
+ ( k < edge_size ) );
+ j++;
+ } while ( ( large_rank > 4 ) &&
+ ( (small_rank - 1) <= 3 ) &&
+ ( j < edge_size ) );
+
+
+ /* similarly, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
+ */
+
+ start[0] = mpi_rank;
+ stride[0] = 2 * (mpi_size + 1);
+ count[0] = 1;
+ block[0] = 1;
+
+ for ( i = 1; i < large_rank; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ block[i] = edge_size;
+ }
+
+ ret = H5Sselect_hyperslab(file_small_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) suceeded");
+
+
+#if CONTIG_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout,
+ "%s reading slices of on disk small data set into slices of big data set.\n",
+ fcnName);
+#endif
+
+ /* zero out the in memory large ds */
+ ptr_1 = large_ds_buf_1;
+ for ( n = 0; n < (int)large_ds_size; n++ ) {
+
+ *ptr_1 = 0;
+ ptr_1++;
+ }
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ if ( (PAR_SS_DR_MAX_RANK - i) > (small_rank - 1) ) {
+
+ block[i] = 1;
+
+ } else {
+
+ block[i] = edge_size;
+ }
+ }
+
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set that don't appear in the small data set.
+ *
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't itterate
+ * over it.
+ */
+
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 0 ) {
+
+ i = mpi_rank;
+
+ } else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 1 ) {
+
+ j = mpi_rank;
+
+ } else {
+
+ j = 0;
+ }
+
+ do {
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 2 ) {
+
+ k = mpi_rank;
+
+ } else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+ start[0] = i;
+ start[1] = j;
+ start[2] = k;
+ start[3] = l;
+ start[4] = 0;
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid,
+ H5S_SELECT_SET,
+ start_ptr,
+ stride_ptr,
+ count_ptr,
+ block_ptr);
+ VRFY((ret != FAIL),
+ "H5Sselect_hyperslab(mem_large_ds_sid) succeeded");
+
+
+ /* verify that H5S_select_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(file_small_ds_sid,
+ mem_large_ds_sid);
+ VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+
+
+ /* Read selection from disk */
+#if CONTIG_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, (int)mpi_rank,
+ (int)start[0], (int)start[1], (int)start[2],
+ (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
+ fcnName, mpi_rank,
+ H5Sget_simple_extent_ndims(mem_large_ds_sid),
+ H5Sget_simple_extent_ndims(file_small_ds_sid));
+#endif
+ ret = H5Dread(small_dataset,
+ H5T_NATIVE_UINT32,
+ mem_large_ds_sid,
+ file_small_ds_sid,
+ xfer_plist,
+ large_ds_buf_1);
+ VRFY((ret >= 0), "H5Sread() slice from small ds succeeded.");
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ ptr_1 = large_ds_buf_1;
+ expected_value = mpi_rank * small_ds_slice_size;
+ start_index =
+ (i * edge_size * edge_size * edge_size * edge_size) +
+ (j * edge_size * edge_size * edge_size) +
+ (k * edge_size * edge_size) +
+ (l * edge_size);
+ stop_index = start_index + (int)small_ds_slice_size - 1;
+
+ HDassert( 0 <= start_index );
+ HDassert( start_index < stop_index );
+ HDassert( stop_index <= (int)large_ds_size );
+
+ for ( n = 0; n < (int)large_ds_size; n++ ) {
+
+ if ( ( n >= start_index ) && ( n <= stop_index ) ) {
+
+ if ( *ptr_1 != expected_value ) {
+
+ mis_match = TRUE;
+ }
+ expected_value++;
+
+ } else {
+
+ if ( *ptr_1 != 0 ) {
+
+ mis_match = TRUE;
+ }
+ }
+ /* zero out the value for the next pass */
+ *ptr_1 = 0;
+
+ ptr_1++;
+ }
+
+ VRFY((mis_match == FALSE),
+ "small slice read from large ds data good.");
+
+ l++;
+
+ } while ( ( large_rank > 2 ) &&
+ ( (small_rank - 1) <= 1 ) &&
+ ( l < edge_size ) );
+ k++;
+ } while ( ( large_rank > 3 ) &&
+ ( (small_rank - 1) <= 2 ) &&
+ ( k < edge_size ) );
+ j++;
+ } while ( ( large_rank > 4 ) &&
+ ( (small_rank - 1) <= 3 ) &&
+ ( j < edge_size ) );
+
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5S_select_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank - 1 D slices from the in memory large data
+ * set to the on disk small cube dataset. After each write, read the
+ * slice of the small dataset back from disk, and verify that it contains
+ * the expected data. Verify that H5S_select_shape_same() returns true on
+ * the memory and file selections.
+ */
+
+ start[0] = mpi_rank;
+ stride[0] = 2 * (mpi_size + 1);
+ count[0] = 1;
+ block[0] = 1;
+
+ for ( i = 1; i < large_rank; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ block[i] = edge_size;
+ }
+
+ ret = H5Sselect_hyperslab(file_small_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) suceeded");
+
+ ret = H5Sselect_hyperslab(mem_small_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
+
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ if ( (PAR_SS_DR_MAX_RANK - i) > (small_rank - 1) ) {
+
+ block[i] = 1;
+
+ } else {
+
+ block[i] = edge_size;
+ }
+ }
+
+ /* zero out the in memory small ds */
+ ptr_1 = small_ds_buf_1;
+ for ( n = 0; n < (int)small_ds_size; n++ ) {
+
+ *ptr_1 = 0;
+ ptr_1++;
+ }
+
+
+#if CONTIG_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout,
+ "%s writing slices from big ds to slices of small ds on disk.\n",
+ fcnName);
+#endif
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set that don't appear in the small data set.
+ *
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't itterate
+ * over it.
+ */
+
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 0 ) {
+
+ i = mpi_rank;
+
+ } else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 1 ) {
+
+ j = mpi_rank;
+
+ } else {
+
+ j = 0;
+ }
+
+ j = 0;
+ do {
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 2 ) {
+
+ k = mpi_rank;
+
+ } else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* zero out this rank's slice of the on disk small data set */
+ ret = H5Dwrite(small_dataset,
+ H5T_NATIVE_UINT32,
+ mem_small_ds_sid,
+ file_small_ds_sid,
+ xfer_plist,
+ small_ds_buf_2);
+ VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded.");
+
+ /* select the portion of the in memory large cube from which we
+ * are going to write data.
+ */
+ start[0] = i;
+ start[1] = j;
+ start[2] = k;
+ start[3] = l;
+ start[4] = 0;
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid,
+ H5S_SELECT_SET,
+ start_ptr,
+ stride_ptr,
+ count_ptr,
+ block_ptr);
+ VRFY((ret >= 0),
+ "H5Sselect_hyperslab() mem_large_ds_sid succeeded.");
+
+
+ /* verify that H5S_select_shape_same() reports the in
+ * memory slice through the cube selection and the
+ * on disk full square selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(file_small_ds_sid,
+ mem_large_ds_sid);
+ VRFY((check == TRUE), "H5S_select_shape_same_test passed.");
+
+
+ /* write the slice from the in memory large data set to the
+ * slice of the on disk small dataset. */
+#if CONTIG_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, (int)mpi_rank,
+ (int)start[0], (int)start[1], (int)start[2],
+ (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
+ fcnName, mpi_rank,
+ H5Sget_simple_extent_ndims(mem_large_ds_sid),
+ H5Sget_simple_extent_ndims(file_small_ds_sid));
+#endif
+ ret = H5Dwrite(small_dataset,
+ H5T_NATIVE_UINT32,
+ mem_large_ds_sid,
+ file_small_ds_sid,
+ xfer_plist,
+ large_ds_buf_0);
+ VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded.");
+
+
+ /* read the on disk square into memory */
+ ret = H5Dread(small_dataset,
+ H5T_NATIVE_UINT32,
+ mem_small_ds_sid,
+ file_small_ds_sid,
+ xfer_plist,
+ small_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
+
+
+ /* verify that expected data is retrieved */
+
+ mis_match = FALSE;
+ ptr_1 = small_ds_buf_1;
+
+ expected_value =
+ (i * edge_size * edge_size * edge_size * edge_size) +
+ (j * edge_size * edge_size * edge_size) +
+ (k * edge_size * edge_size) +
+ (l * edge_size);
+
+ start_index = mpi_rank * small_ds_slice_size;
+ stop_index = start_index + small_ds_slice_size - 1;
+
+ HDassert( 0 <= start_index );
+ HDassert( start_index < stop_index );
+ HDassert( stop_index <= (int)small_ds_size );
+
+ for ( n = 0; n < (int)small_ds_size; n++ ) {
+
+ if ( ( n >= start_index ) && ( n <= stop_index ) ) {
+
+ if ( *ptr_1 != expected_value ) {
+
+ mis_match = TRUE;
+ }
+ expected_value++;
+
+ } else {
+
+ if ( *ptr_1 != 0 ) {
+
+ mis_match = TRUE;
+ }
+ }
+ /* zero out the value for the next pass */
+ *ptr_1 = 0;
+
+ ptr_1++;
+ }
+
+ VRFY((mis_match == FALSE),
+ "small slice write from large ds data good.");
+
+ l++;
+
+ } while ( ( large_rank > 2 ) &&
+ ( (small_rank - 1) <= 1 ) &&
+ ( l < edge_size ) );
+ k++;
+ } while ( ( large_rank > 3 ) &&
+ ( (small_rank - 1) <= 2 ) &&
+ ( k < edge_size ) );
+ j++;
+ } while ( ( large_rank > 4 ) &&
+ ( (small_rank - 1) <= 3 ) &&
+ ( j < edge_size ) );
+
+
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
+ * each write, read the process's slice of the large data set back
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5S_select_shape_same() returns true on the memory
+ * and file selections.
+ */
+
+ /* select the slice of the in memory small data set associated with
+ * the process's mpi rank.
+ */
+ start[0] = mpi_rank;
+ stride[0] = 2 * (mpi_size + 1);
+ count[0] = 1;
+ block[0] = 1;
+
+ for ( i = 1; i < large_rank; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ block[i] = edge_size;
+ }
+
+ ret = H5Sselect_hyperslab(mem_small_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
+
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to write slices of the small data set to
+ * slices of the large data set.
+ */
+ for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ if ( (PAR_SS_DR_MAX_RANK - i) > (small_rank - 1) ) {
+
+ block[i] = 1;
+
+ } else {
+
+ block[i] = edge_size;
+ }
+ }
+
+ /* zero out the in memory large ds */
+ ptr_1 = large_ds_buf_1;
+ for ( n = 0; n < (int)large_ds_size; n++ ) {
+
+ *ptr_1 = 0;
+ ptr_1++;
+ }
+
+#if CONTIG_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout,
+ "%s writing process slices of small ds to slices of large ds on disk.\n",
+ fcnName);
+#endif
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 0 ) {
+
+ i = mpi_rank;
+
+ } else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 1 ) {
+
+ j = mpi_rank;
+
+ } else {
+
+ j = 0;
+ }
+
+ do {
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 2 ) {
+
+ k = mpi_rank;
+
+ } else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* Zero out this processes slice of the on disk large data set.
+ * Note that this will leave one slice with its original data
+ * as there is one more slice than processes.
+ */
+ ret = H5Dwrite(large_dataset,
+ H5T_NATIVE_UINT32,
+ large_ds_slice_sid,
+ file_large_ds_process_slice_sid,
+ xfer_plist,
+ large_ds_buf_2);
+ VRFY((ret != FAIL), "H5Dwrite() to zero large ds suceeded");
+
+
+ /* select the portion of the in memory large cube to which we
+ * are going to write data.
+ */
+ start[0] = i;
+ start[1] = j;
+ start[2] = k;
+ start[3] = l;
+ start[4] = 0;
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid,
+ H5S_SELECT_SET,
+ start_ptr,
+ stride_ptr,
+ count_ptr,
+ block_ptr);
+ VRFY((ret != FAIL),
+ "H5Sselect_hyperslab() target large ds slice succeeded");
+
+
+ /* verify that H5S_select_shape_same() reports the in
+ * memory small data set slice selection and the
+ * on disk slice through the large data set selection
+ * as having the same shape.
+ */
+ check = H5S_select_shape_same_test(mem_small_ds_sid,
+ file_large_ds_sid);
+ VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+
+
+ /* write the small data set slice from memory to the
+ * target slice of the disk data set
+ */
+#if CONTIG_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, (int)mpi_rank,
+ (int)start[0], (int)start[1], (int)start[2],
+ (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
+ fcnName, mpi_rank,
+ H5Sget_simple_extent_ndims(mem_small_ds_sid),
+ H5Sget_simple_extent_ndims(file_large_ds_sid));
+#endif
+ ret = H5Dwrite(large_dataset,
+ H5T_NATIVE_UINT32,
+ mem_small_ds_sid,
+ file_large_ds_sid,
+ xfer_plist,
+ small_ds_buf_0);
+ VRFY((ret != FAIL),
+ "H5Dwrite of small ds slice to large ds succeeded");
+
+
+ /* read this processes slice on the on disk large
+ * data set into memory.
+ */
+
+ ret = H5Dread(large_dataset,
+ H5T_NATIVE_UINT32,
+ mem_large_ds_process_slice_sid,
+ file_large_ds_process_slice_sid,
+ xfer_plist,
+ large_ds_buf_1);
+ VRFY((ret != FAIL),
+ "H5Dread() of process slice of large ds succeeded");
+
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ ptr_1 = large_ds_buf_1;
+ expected_value = (uint32_t)(mpi_rank) * small_ds_slice_size;
+
+
+ start_index = (i * edge_size * edge_size * edge_size * edge_size) +
+ (j * edge_size * edge_size * edge_size) +
+ (k * edge_size * edge_size) +
+ (l * edge_size);
+ stop_index = start_index + (int)small_ds_slice_size - 1;
+
+ HDassert( 0 <= start_index );
+ HDassert( start_index < stop_index );
+ HDassert( stop_index < (int)large_ds_size );
+
+ for ( n = 0; n < (int)large_ds_size; n++ ) {
+
+ if ( ( n >= start_index ) && ( n <= stop_index ) ) {
+
+ if ( *ptr_1 != expected_value ) {
+
+ mis_match = TRUE;
+ }
+
+ expected_value++;
+
+ } else {
+
+ if ( *ptr_1 != 0 ) {
+
+ mis_match = TRUE;
+ }
+ }
+ /* zero out buffer for next test */
+ *ptr_1 = 0;
+ ptr_1++;
+ }
+
+ VRFY((mis_match == FALSE),
+ "small ds slice write to large ds slice data good.");
+
+ l++;
+
+ } while ( ( large_rank > 2 ) &&
+ ( (small_rank - 1) <= 1 ) &&
+ ( l < edge_size ) );
+ k++;
+ } while ( ( large_rank > 3 ) &&
+ ( (small_rank - 1) <= 2 ) &&
+ ( k < edge_size ) );
+ j++;
+ } while ( ( large_rank > 4 ) &&
+ ( (small_rank - 1) <= 3 ) &&
+ ( j < edge_size ) );
+
+
+ /* Close dataspaces */
+ ret = H5Sclose(full_mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(full_file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_small_ds_sid) succeeded");
+
+ ret = H5Sclose(mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid) succeeded");
+
+ ret = H5Sclose(small_ds_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(small_ds_slice_sid) succeeded");
+
+ ret = H5Sclose(full_mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(full_file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_large_ds_sid) succeeded");
+
+ ret = H5Sclose(mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(mem_large_ds_process_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_process_slice_sid) succeeded");
+
+ ret = H5Sclose(file_large_ds_process_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_large_ds_process_slice_sid) succeeded");
+
+ ret = H5Sclose(large_ds_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(large_ds_slice_sid) succeeded");
+
+
+ /* Close Datasets */
+ ret = H5Dclose(small_dataset);
+ VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded");
+
+ ret = H5Dclose(large_dataset);
+ VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded");
+
+
+ /* close the file collectively */
+ MESG("about to close file.");
+ ret = H5Fclose(fid);
+ VRFY((ret != FAIL), "file close succeeded");
+
+ /* Free memory buffers */
+
+ if ( small_ds_buf_0 != NULL ) HDfree(small_ds_buf_0);
+ if ( small_ds_buf_1 != NULL ) HDfree(small_ds_buf_1);
+ if ( small_ds_buf_2 != NULL ) HDfree(small_ds_buf_2);
+ if ( small_ds_slice_buf != NULL ) HDfree(small_ds_slice_buf);
+
+ if ( large_ds_buf_0 != NULL ) HDfree(large_ds_buf_0);
+ if ( large_ds_buf_1 != NULL ) HDfree(large_ds_buf_1);
+ if ( large_ds_buf_2 != NULL ) HDfree(large_ds_buf_2);
+ if ( large_ds_slice_buf != NULL ) HDfree(large_ds_slice_buf);
+
+ return;
+
+} /* contig_hyperslab_dr_pio_test__run_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hyperslab_dr_pio_test()
+ *
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel case.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/18/09
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+contig_hyperslab_dr_pio_test(void)
+{
+ const char *fcnName = "contig_hyperslab_dr_pio_test()";
+ int test_num = 0;
+ int edge_size = 10;
+ int chunk_edge_size = 0;
+ int small_rank;
+ int large_rank;
+ int use_collective_io;
+ hid_t dset_type = H5T_STD_U32LE;
+
+ for ( large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++ ) {
+
+ for ( small_rank = 2; small_rank < large_rank; small_rank++ ) {
+
+ for ( use_collective_io = 0;
+ use_collective_io <= 1;
+ use_collective_io++ ) {
+
+ chunk_edge_size = 0;
+ contig_hyperslab_dr_pio_test__run_test(test_num,
+ edge_size,
+ chunk_edge_size,
+ small_rank,
+ large_rank,
+ (hbool_t)use_collective_io,
+ dset_type);
+ test_num++;
+#if 1
+ chunk_edge_size = 5;
+ contig_hyperslab_dr_pio_test__run_test(test_num,
+ edge_size,
+ chunk_edge_size,
+ small_rank,
+ large_rank,
+ (hbool_t)use_collective_io,
+ dset_type);
+ test_num++;
+#endif
+ }
+ }
+ }
+
+ return;
+
+} /* contig_hyperslab_dr_pio_test() */
+
+
+/****************************************************************
+**
+** checker_board_hyperslab_dr_pio_test__select_checker_board():
+** Given a data space of tgt_rank, and dimensions:
+**
+** (mpi_size + 1), edge_size, ... , edge_size
+**
+** edge_size, and a checker_edge_size, select a checker
+** board selection of a sel_rank (sel_rank < tgt_rank)
+** dimensional slice through the data space parallel to the
+** sel_rank fastest changing indicies, with origin (in the
+** higher indicies) as indicated by the start array.
+**
+** Note that this function, like all its relatives, is
+** hard coded to presume a maximum data space rank of 5.
+** While this maximum is declared as a constant, increasing
+** it will require extensive coding in addition to changing
+** the value of the constant.
+**
+** JRM -- 10/8/09
+**
+****************************************************************/
+
+#define CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG 0
+
+static void
+checker_board_hyperslab_dr_pio_test__select_checker_board(
+ const int mpi_rank,
+ const hid_t tgt_sid,
+ const int tgt_rank,
+ const int edge_size,
+ const int checker_edge_size,
+ const int sel_rank,
+ hsize_t sel_start[])
+{
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ const char * fcnName =
+ "checker_board_hyperslab_dr_pio_test__select_checker_board():";
+#endif
+ hbool_t first_selection = TRUE;
+ int i, j, k, l, m;
+ int n_cube_offset;
+ int sel_offset;
+ const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */
+ /* this changes */
+ hsize_t base_count;
+ hsize_t offset_count;
+ hsize_t start[PAR_SS_DR_MAX_RANK];
+ hsize_t stride[PAR_SS_DR_MAX_RANK];
+ hsize_t count[PAR_SS_DR_MAX_RANK];
+ hsize_t block[PAR_SS_DR_MAX_RANK];
+ herr_t ret; /* Generic return value */
+
+ HDassert( edge_size >= 6 );
+ HDassert( 0 < checker_edge_size );
+ HDassert( checker_edge_size <= edge_size );
+ HDassert( 0 < sel_rank );
+ HDassert( sel_rank <= tgt_rank );
+ HDassert( tgt_rank <= test_max_rank );
+ HDassert( test_max_rank <= PAR_SS_DR_MAX_RANK );
+
+ sel_offset = test_max_rank - sel_rank;
+ HDassert( sel_offset >= 0 );
+
+ n_cube_offset = test_max_rank - tgt_rank;
+ HDassert( n_cube_offset >= 0 );
+ HDassert( n_cube_offset <= sel_offset );
+
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s:%d: edge_size/checker_edge_size = %d/%d\n",
+ fcnName, mpi_rank, edge_size, checker_edge_size);
+ HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n",
+ fcnName, mpi_rank, sel_rank, sel_offset);
+ HDfprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n",
+ fcnName, mpi_rank, tgt_rank, n_cube_offset);
+#endif /* CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* First, compute the base count (which assumes start == 0
+ * for the associated offset) and offset_count (which
+ * assumes start == checker_edge_size for the associated
+ * offset).
+ *
+ * Note that the following computation depends on the C99
+ * requirement that integer division discard any fraction
+ * (truncation towards zero) to function correctly. As we
+ * now require C99, this shouldn't be a problem, but noting
+ * it may save us some pain if we are ever obliged to support
+ * pre-C99 compilers again.
+ */
+
+ base_count = edge_size / (checker_edge_size * 2);
+
+ if ( (edge_size % (checker_edge_size * 2)) > 0 ) {
+
+ base_count++;
+ }
+
+ offset_count = (edge_size - checker_edge_size) / (checker_edge_size * 2);
+
+ if ( ((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0 ) {
+
+ offset_count++;
+ }
+
+ /* Now set up the stride and block arrays, and portions of the start
+ * and count arrays that will not be altered during the selection of
+ * the checker board.
+ */
+ i = 0;
+ while ( i < n_cube_offset ) {
+
+ /* these values should never be used */
+ start[i] = 0;
+ stride[i] = 0;
+ count[i] = 0;
+ block[i] = 0;
+
+ i++;
+ }
+
+ while ( i < sel_offset ) {
+
+ start[i] = sel_start[i];
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ block[i] = 1;
+
+ i++;
+ }
+
+ while ( i < test_max_rank ) {
+
+ stride[i] = 2 * checker_edge_size;
+ block[i] = checker_edge_size;
+
+ i++;
+ }
+
+ i = 0;
+ do {
+ if ( 0 >= sel_offset ) {
+
+ if ( i == 0 ) {
+
+ start[0] = 0;
+ count[0] = base_count;
+
+ } else {
+
+ start[0] = checker_edge_size;
+ count[0] = offset_count;
+
+ }
+ }
+
+ j = 0;
+ do {
+ if ( 1 >= sel_offset ) {
+
+ if ( j == 0 ) {
+
+ start[1] = 0;
+ count[1] = base_count;
+
+ } else {
+
+ start[1] = checker_edge_size;
+ count[1] = offset_count;
+
+ }
+ }
+
+ k = 0;
+ do {
+ if ( 2 >= sel_offset ) {
+
+ if ( k == 0 ) {
+
+ start[2] = 0;
+ count[2] = base_count;
+
+ } else {
+
+ start[2] = checker_edge_size;
+ count[2] = offset_count;
+
+ }
+ }
+
+ l = 0;
+ do {
+ if ( 3 >= sel_offset ) {
+
+ if ( l == 0 ) {
+
+ start[3] = 0;
+ count[3] = base_count;
+
+ } else {
+
+ start[3] = checker_edge_size;
+ count[3] = offset_count;
+
+ }
+ }
+
+ m = 0;
+ do {
+ if ( 4 >= sel_offset ) {
+
+ if ( m == 0 ) {
+
+ start[4] = 0;
+ count[4] = base_count;
+
+ } else {
+
+ start[4] = checker_edge_size;
+ count[4] = offset_count;
+
+ }
+ }
+
+ if ( ((i + j + k + l + m) % 2) == 0 ) {
+
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n",
+ fcnName, mpi_rank, (int)first_selection);
+ HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n",
+ fcnName, mpi_rank, i, j, k, l, m);
+ HDfprintf(stdout,
+ "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, mpi_rank, (int)start[0], (int)start[1],
+ (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout,
+ "%s:%d: stride = %d %d %d %d %d.\n",
+ fcnName, mpi_rank, (int)stride[0], (int)stride[1],
+ (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout,
+ "%s:%d: count = %d %d %d %d %d.\n",
+ fcnName, mpi_rank, (int)count[0], (int)count[1],
+ (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout,
+ "%s:%d: block = %d %d %d %d %d.\n",
+ fcnName, mpi_rank, (int)block[0], (int)block[1],
+ (int)block[2], (int)block[3], (int)block[4]);
+ HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n",
+ fcnName, mpi_rank,
+ H5Sget_simple_extent_ndims(tgt_sid));
+ HDfprintf(stdout, "%s:%d: selection rank = %d.\n",
+ fcnName, mpi_rank, sel_rank);
+#endif
+
+ if ( first_selection ) {
+
+ first_selection = FALSE;
+
+ ret = H5Sselect_hyperslab
+ (
+ tgt_sid,
+ H5S_SELECT_SET,
+ &(start[n_cube_offset]),
+ &(stride[n_cube_offset]),
+ &(count[n_cube_offset]),
+ &(block[n_cube_offset])
+ );
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
+
+ } else {
+
+ ret = H5Sselect_hyperslab
+ (
+ tgt_sid,
+ H5S_SELECT_OR,
+ &(start[n_cube_offset]),
+ &(stride[n_cube_offset]),
+ &(count[n_cube_offset]),
+ &(block[n_cube_offset])
+ );
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
+
+ }
+ }
+
+ m++;
+
+ } while ( ( m <= 1 ) &&
+ ( 4 >= sel_offset ) );
+
+ l++;
+
+ } while ( ( l <= 1 ) &&
+ ( 3 >= sel_offset ) );
+
+ k++;
+
+ } while ( ( k <= 1 ) &&
+ ( 2 >= sel_offset ) );
+
+ j++;
+
+ } while ( ( j <= 1 ) &&
+ ( 1 >= sel_offset ) );
+
+
+ i++;
+
+ } while ( ( i <= 1 ) &&
+ ( 0 >= sel_offset ) );
+
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
+ fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
+#endif /* CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* Clip the selection back to the data space proper. */
+
+ for ( i = 0; i < test_max_rank; i++ ) {
+
+ start[i] = 0;
+ stride[i] = edge_size;
+ count[i] = 1;
+ block[i] = edge_size;
+ }
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND,
+ start, stride, count, block);
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
+
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
+ fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
+ HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank);
+#endif /* CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ return;
+
+} /* checker_board_hyperslab_dr_pio_test__select_checker_board() */
+
+
+/****************************************************************
+**
+** checker_board_hyperslab_dr_pio_test__verify_data():
+**
+** Examine the supplied buffer to see if it contains the
+** expected data. Return TRUE if it does, and FALSE
+** otherwise.
+**
+** The supplied buffer is presumed to this process's slice
+** of the target data set. Each such slice will be an
+** n-cube of rank (rank -1) and the supplied edge_size with
+** origin (mpi_rank, 0, ... , 0) in the target data set.
+**
+** Further, the buffer is presumed to be the result of reading
+** or writing a checker board selection of an m (1 <= m <
+** rank) dimensional slice through this processes slice
+** of the target data set. Also, this slice must be parallel
+** to the fastest changing indicies.
+**
+** It is further presumed that the buffer was zeroed before
+** the read/write, and that the full target data set (i.e.
+** the buffer/data set for all processes) was initialized
+** with the natural numbers listed in order from the origin
+** along the fastest changing axis.
+**
+** Thus for a 20x10x10 dataset, the value stored in location
+** (x, y, z) (assuming that z is the fastest changing index
+** and x the slowest) is assumed to be:
+**
+** (10 * 10 * x) + (10 * y) + z
+**
+** Further, supposing that this is process 10, this process's
+** slice of the dataset would be a 10 x 10 2-cube with origin
+** (10, 0, 0) in the data set, and would be initialize (prior
+** to the checkerboard selection) as follows:
+**
+** 1000, 1001, 1002, ... 1008, 1009
+** 1010, 1011, 1012, ... 1018, 1019
+** . . . . .
+** . . . . .
+** . . . . .
+** 1090, 1091, 1092, ... 1098, 1099
+**
+** In the case of a read from the processors slice of another
+** data set of different rank, the values expected will have
+** to be adjusted accordingly. This is done via the
+** first_expected_val parameter.
+**
+** Finally, the function presumes that the first element
+** of the buffer resides either at the origin of either
+** a selected or an unselected checker. (Translation:
+** if partial checkers appear in the buffer, they will
+** intersect the edges of the n-cube oposite the origin.)
+**
+****************************************************************/
+
+#define CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__VERIFY_DATA__DEBUG 0
+
+static hbool_t
+checker_board_hyperslab_dr_pio_test__verify_data(uint32_t * buf_ptr,
+ const int mpi_rank,
+ const int rank,
+ const int edge_size,
+ const int checker_edge_size,
+ uint32_t first_expected_val,
+ hbool_t buf_starts_in_checker)
+{
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__VERIFY_DATA__DEBUG
+ const char * fcnName =
+ "checker_board_hyperslab_dr_pio_test__verify_data():";
+#endif
+ hbool_t good_data = TRUE;
+ hbool_t in_checker;
+ hbool_t start_in_checker[5];
+ uint32_t expected_value;
+ uint32_t * val_ptr;
+ int i, j, k, l, m; /* to track position in n-cube */
+ int v, w, x, y, z; /* to track position in checker */
+ const int test_max_rank = 5; /* code changes needed if this is increased */
+
+ HDassert( buf_ptr != NULL );
+ HDassert( 0 < rank );
+ HDassert( rank <= test_max_rank );
+ HDassert( edge_size >= 6 );
+ HDassert( 0 < checker_edge_size );
+ HDassert( checker_edge_size <= edge_size );
+ HDassert( test_max_rank <= PAR_SS_DR_MAX_RANK );
+
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__VERIFY_DATA__DEBUG
+ HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s rank = %d.\n", fcnName, rank);
+ HDfprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size);
+ HDfprintf(stdout, "%s checker_edge_size = %d.\n", fcnName, checker_edge_size);
+ HDfprintf(stdout, "%s first_expected_val = %d.\n", fcnName, (int)first_expected_val);
+ HDfprintf(stdout, "%s starts_in_checker = %d.\n", fcnName, (int)buf_starts_in_checker);
+#endif
+
+ val_ptr = buf_ptr;
+ expected_value = first_expected_val;
+
+ i = 0;
+ v = 0;
+ start_in_checker[0] = buf_starts_in_checker;
+ do
+ {
+ if ( v >= checker_edge_size ) {
+
+ start_in_checker[0] = ! start_in_checker[0];
+ v = 0;
+ }
+
+ j = 0;
+ w = 0;
+ start_in_checker[1] = start_in_checker[0];
+ do
+ {
+ if ( w >= checker_edge_size ) {
+
+ start_in_checker[1] = ! start_in_checker[1];
+ w = 0;
+ }
+
+ k = 0;
+ x = 0;
+ start_in_checker[2] = start_in_checker[1];
+ do
+ {
+ if ( x >= checker_edge_size ) {
+
+ start_in_checker[2] = ! start_in_checker[2];
+ x = 0;
+ }
+
+ l = 0;
+ y = 0;
+ start_in_checker[3] = start_in_checker[2];
+ do
+ {
+ if ( y >= checker_edge_size ) {
+
+ start_in_checker[3] = ! start_in_checker[3];
+ y = 0;
+ }
+
+ m = 0;
+ z = 0;
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__VERIFY_DATA__DEBUG
+ HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
+#endif
+ in_checker = start_in_checker[3];
+ do
+ {
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__VERIFY_DATA__DEBUG
+ HDfprintf(stdout, " %d", (int)(*val_ptr));
+#endif
+ if ( z >= checker_edge_size ) {
+
+ in_checker = ! in_checker;
+ z = 0;
+ }
+
+ if ( in_checker ) {
+
+ if ( *val_ptr != expected_value ) {
+
+ good_data = FALSE;
+ }
+
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+
+ } else if ( *val_ptr != 0 ) {
+
+ good_data = FALSE;
+
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+
+ }
+
+ val_ptr++;
+ expected_value++;
+ m++;
+ z++;
+
+ } while ( ( rank >= (test_max_rank - 4) ) &&
+ ( m < edge_size ) );
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__VERIFY_DATA__DEBUG
+ HDfprintf(stdout, "\n");
+#endif
+ l++;
+ y++;
+ } while ( ( rank >= (test_max_rank - 3) ) &&
+ ( l < edge_size ) );
+ k++;
+ x++;
+ } while ( ( rank >= (test_max_rank - 2) ) &&
+ ( k < edge_size ) );
+ j++;
+ w++;
+ } while ( ( rank >= (test_max_rank - 1) ) &&
+ ( j < edge_size ) );
+ i++;
+ v++;
+ } while ( ( rank >= test_max_rank ) &&
+ ( i < edge_size ) );
+
+ return(good_data);
+
+} /* checker_board_hyperslab_dr_pio_test__verify_data() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: checker_board_hyperslab_dr_pio_test__run_test()
+ *
+ * Purpose: Test I/O to/from checkerboard selections of hyperslabs of
+ * different rank in the parallel.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 10/10/09
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define PAR_SS_DR_MAX_RANK 5
+#define CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG 0
+
+void
+checker_board_hyperslab_dr_pio_test__run_test(const int test_num,
+ const int edge_size,
+ const int checker_edge_size,
+ const int chunk_edge_size,
+ const int small_rank,
+ const int large_rank,
+ const hbool_t use_collective_io,
+ const hid_t dset_type)
+{
+ const char *fcnName = "checker_board_hyperslab_dr_pio_test__run_test()";
+ const char *filename;
+ hbool_t use_gpfs = FALSE; /* Use GPFS hints */
+ hbool_t data_ok = FALSE;
+ hbool_t mis_match = FALSE;
+ int i, j, k, l, m, n;
+ int mrc;
+ int start_index;
+ int stop_index;
+ int small_ds_offset;
+ int large_ds_offset;
+ const int test_max_rank = 5; /* must update code if this changes */
+ uint32_t expected_value;
+ uint32_t * small_ds_buf_0 = NULL;
+ uint32_t * small_ds_buf_1 = NULL;
+ uint32_t * small_ds_buf_2 = NULL;
+ uint32_t * small_ds_slice_buf = NULL;
+ uint32_t * large_ds_buf_0 = NULL;
+ uint32_t * large_ds_buf_1 = NULL;
+ uint32_t * large_ds_buf_2 = NULL;
+ uint32_t * large_ds_slice_buf = NULL;
+ uint32_t * ptr_0;
+ uint32_t * ptr_1;
+ uint32_t * ptr_2;
+ int mpi_rank;
+ int mpi_size;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist = H5P_DEFAULT;
+ hid_t full_mem_small_ds_sid;
+ hid_t full_file_small_ds_sid;
+ hid_t mem_small_ds_sid;
+ hid_t file_small_ds_sid_0;
+ hid_t file_small_ds_sid_1;
+ hid_t small_ds_slice_sid;
+ hid_t full_mem_large_ds_sid;
+ hid_t full_file_large_ds_sid;
+ hid_t mem_large_ds_sid;
+ hid_t file_large_ds_sid_0;
+ hid_t file_large_ds_sid_1;
+ hid_t file_large_ds_process_slice_sid;
+ hid_t mem_large_ds_process_slice_sid;
+ hid_t large_ds_slice_sid;
+ hid_t small_ds_dcpl_id = H5P_DEFAULT;
+ hid_t large_ds_dcpl_id = H5P_DEFAULT;
+ hid_t small_dataset; /* Dataset ID */
+ hid_t large_dataset; /* Dataset ID */
+ size_t small_ds_size = 1;
+ size_t small_ds_slice_size = 1;
+ size_t large_ds_size = 1;
+ size_t large_ds_slice_size = 1;
+ hsize_t dims[PAR_SS_DR_MAX_RANK];
+ hsize_t chunk_dims[PAR_SS_DR_MAX_RANK];
+ hsize_t start[PAR_SS_DR_MAX_RANK];
+ hsize_t stride[PAR_SS_DR_MAX_RANK];
+ hsize_t count[PAR_SS_DR_MAX_RANK];
+ hsize_t block[PAR_SS_DR_MAX_RANK];
+ hsize_t sel_start[PAR_SS_DR_MAX_RANK];
+ hsize_t * start_ptr = NULL;
+ hsize_t * stride_ptr = NULL;
+ hsize_t * count_ptr = NULL;
+ hsize_t * block_ptr = NULL;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ HDassert( edge_size >= 6 );
+ HDassert( edge_size >= chunk_edge_size );
+ HDassert( ( chunk_edge_size == 0 ) || ( chunk_edge_size >= 3 ) );
+ HDassert( 1 < small_rank );
+ HDassert( small_rank < large_rank );
+ HDassert( large_rank <= test_max_rank );
+ HDassert( test_max_rank <= PAR_SS_DR_MAX_RANK );
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ HDassert( mpi_size >= 1 );
+
+ mpi_comm = MPI_COMM_WORLD;
+ mpi_info = MPI_INFO_NULL;
+
+ for ( i = 0; i < small_rank - 1; i++ )
+ {
+ small_ds_size *= (size_t)edge_size;
+ small_ds_slice_size *= (size_t)edge_size;
+ }
+ small_ds_size *= (size_t)(mpi_size + 1);
+
+ small_ds_offset = PAR_SS_DR_MAX_RANK - small_rank;
+
+ HDassert( 0 < small_ds_offset );
+ HDassert( small_ds_offset < PAR_SS_DR_MAX_RANK );
+
+
+ for ( i = 0; i < large_rank - 1; i++ ) {
+
+ large_ds_size *= (size_t)edge_size;
+ large_ds_slice_size *= (size_t)edge_size;
+ }
+ large_ds_size *= (size_t)(mpi_size + 1);
+
+ large_ds_offset = PAR_SS_DR_MAX_RANK - large_rank;
+
+ HDassert( 0 <= large_ds_offset );
+ HDassert( large_ds_offset < PAR_SS_DR_MAX_RANK );
+
+
+ /* set up the start, stride, count, and block pointers */
+ start_ptr = &(start[PAR_SS_DR_MAX_RANK - large_rank]);
+ stride_ptr = &(stride[PAR_SS_DR_MAX_RANK - large_rank]);
+ count_ptr = &(count[PAR_SS_DR_MAX_RANK - large_rank]);
+ block_ptr = &(block[PAR_SS_DR_MAX_RANK - large_rank]);
+
+
+ /* Allocate buffers */
+ small_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size);
+ VRFY((small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded");
+
+ small_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size);
+ VRFY((small_ds_buf_1 != NULL), "malloc of small_ds_buf_1 succeeded");
+
+ small_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size);
+ VRFY((small_ds_buf_2 != NULL), "malloc of small_ds_buf_2 succeeded");
+
+ small_ds_slice_buf =
+ (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_slice_size);
+ VRFY((small_ds_slice_buf != NULL), "malloc of small_ds_slice_buf succeeded");
+
+ large_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size);
+ VRFY((large_ds_buf_0 != NULL), "malloc of large_ds_buf_0 succeeded");
+
+ large_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size);
+ VRFY((large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded");
+
+ large_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size);
+ VRFY((large_ds_buf_2 != NULL), "malloc of large_ds_buf_2 succeeded");
+
+ large_ds_slice_buf =
+ (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_slice_size);
+ VRFY((large_ds_slice_buf != NULL), "malloc of large_ds_slice_buf succeeded");
+
+ /* initialize the buffers */
+
+ ptr_0 = small_ds_buf_0;
+ ptr_1 = small_ds_buf_1;
+ ptr_2 = small_ds_buf_2;
+
+ for ( i = 0; i < (int)small_ds_size; i++ ) {
+
+ *ptr_0 = (uint32_t)i;
+ *ptr_1 = 0;
+ *ptr_2 = 0;
+
+ ptr_0++;
+ ptr_1++;
+ ptr_2++;
+ }
+
+ ptr_0 = small_ds_slice_buf;
+
+ for ( i = 0; i < (int)small_ds_slice_size; i++ ) {
+
+ *ptr_0 = (uint32_t)i;
+ ptr_0++;
+ }
+
+ ptr_0 = large_ds_buf_0;
+ ptr_1 = large_ds_buf_1;
+ ptr_2 = large_ds_buf_2;
+
+ for ( i = 0; i < (int)large_ds_size; i++ ) {
+
+ *ptr_0 = (uint32_t)i;
+ *ptr_1 = 0;
+ *ptr_2 = 0;
+
+ ptr_0++;
+ ptr_1++;
+ ptr_2++;
+ }
+
+ ptr_0 = large_ds_slice_buf;
+
+ for ( i = 0; i < (int)large_ds_slice_size; i++ ) {
+
+ *ptr_0 = (uint32_t)0;
+ ptr_0++;
+ }
+
+ filename = (const char *)GetTestParameters();
+ HDassert( filename != NULL );
+
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ if ( MAINPROCESS ) {
+
+ HDfprintf(stdout, "%s:%d: test num = %d.\n", fcnName, mpi_rank, test_num);
+ HDfprintf(stdout, "%s:%d: mpi_size = %d.\n", fcnName, mpi_rank, mpi_size);
+ HDfprintf(stdout,
+ "%s:%d: small/large rank = %d/%d, use_collective_io = %d.\n",
+ fcnName, mpi_rank, small_rank, large_rank, (int)use_collective_io);
+ HDfprintf(stdout, "%s:%d: edge_size = %d, chunk_edge_size = %d.\n",
+ fcnName, mpi_rank, edge_size, chunk_edge_size);
+ HDfprintf(stdout, "%s:%d: checker_edge_size = %d.\n",
+ fcnName, mpi_rank, checker_edge_size);
+ HDfprintf(stdout, "%s:%d: small_ds_size = %d, large_ds_size = %d.\n",
+ fcnName, mpi_rank, (int)small_ds_size, (int)large_ds_size);
+ HDfprintf(stdout, "%s:%d: filename = %s.\n", fcnName, mpi_rank, filename);
+ }
+#endif /* CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ /* ----------------------------------------
+ * CREATE AN HDF5 FILE WITH PARALLEL ACCESS
+ * ---------------------------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type, use_gpfs);
+ VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ MESG("File opened.");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
+
+
+ /* setup dims: */
+ dims[0] = (int)(mpi_size + 1);
+ dims[1] = dims[2] = dims[3] = dims[4] = edge_size;
+
+
+ /* Create small ds dataspaces */
+ full_mem_small_ds_sid = H5Screate_simple(small_rank, dims, NULL);
+ VRFY((full_mem_small_ds_sid != 0),
+ "H5Screate_simple() full_mem_small_ds_sid succeeded");
+
+ full_file_small_ds_sid = H5Screate_simple(small_rank, dims, NULL);
+ VRFY((full_file_small_ds_sid != 0),
+ "H5Screate_simple() full_file_small_ds_sid succeeded");
+
+ mem_small_ds_sid = H5Screate_simple(small_rank, dims, NULL);
+ VRFY((mem_small_ds_sid != 0),
+ "H5Screate_simple() mem_small_ds_sid succeeded");
+
+ file_small_ds_sid_0 = H5Screate_simple(small_rank, dims, NULL);
+ VRFY((file_small_ds_sid_0 != 0),
+ "H5Screate_simple() file_small_ds_sid_0 succeeded");
+
+ file_small_ds_sid_1 = H5Screate_simple(small_rank, dims, NULL);
+ VRFY((file_small_ds_sid_1 != 0),
+ "H5Screate_simple() file_small_ds_sid_1 succeeded");
+
+ small_ds_slice_sid = H5Screate_simple(small_rank - 1, &(dims[1]), NULL);
+ VRFY((small_ds_slice_sid != 0),
+ "H5Screate_simple() small_ds_slice_sid succeeded");
+
+
+ /* Create large ds dataspaces */
+ full_mem_large_ds_sid = H5Screate_simple(large_rank, dims, NULL);
+ VRFY((full_mem_large_ds_sid != 0),
+ "H5Screate_simple() full_mem_large_ds_sid succeeded");
+
+ full_file_large_ds_sid = H5Screate_simple(large_rank, dims, NULL);
+ VRFY((full_file_large_ds_sid != FAIL),
+ "H5Screate_simple() full_file_large_ds_sid succeeded");
+
+ mem_large_ds_sid = H5Screate_simple(large_rank, dims, NULL);
+ VRFY((mem_large_ds_sid != FAIL),
+ "H5Screate_simple() mem_large_ds_sid succeeded");
+
+ file_large_ds_sid_0 = H5Screate_simple(large_rank, dims, NULL);
+ VRFY((file_large_ds_sid_0 != FAIL),
+ "H5Screate_simple() file_large_ds_sid_0 succeeded");
+
+ file_large_ds_sid_1 = H5Screate_simple(large_rank, dims, NULL);
+ VRFY((file_large_ds_sid_1 != FAIL),
+ "H5Screate_simple() file_large_ds_sid_1 succeeded");
+
+ mem_large_ds_process_slice_sid = H5Screate_simple(large_rank, dims, NULL);
+ VRFY((mem_large_ds_process_slice_sid != FAIL),
+ "H5Screate_simple() mem_large_ds_process_slice_sid succeeded");
+
+ file_large_ds_process_slice_sid = H5Screate_simple(large_rank, dims, NULL);
+ VRFY((file_large_ds_process_slice_sid != FAIL),
+ "H5Screate_simple() file_large_ds_process_slice_sid succeeded");
+
+
+ large_ds_slice_sid = H5Screate_simple(large_rank - 1, &(dims[1]), NULL);
+ VRFY((large_ds_slice_sid != 0),
+ "H5Screate_simple() large_ds_slice_sid succeeded");
+
+
+ /* Select the entire extent of the full small ds, and ds slice dataspaces */
+ ret = H5Sselect_all(full_mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_mem_small_ds_sid) succeeded");
+
+ ret = H5Sselect_all(full_file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_file_small_ds_sid) succeeded");
+
+ ret = H5Sselect_all(small_ds_slice_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(small_ds_slice_sid) succeeded");
+
+
+ /* Select the entire extent of the full large ds, and ds slice dataspaces */
+ ret = H5Sselect_all(full_mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_mem_large_ds_sid) succeeded");
+
+ ret = H5Sselect_all(full_file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_file_large_ds_sid) succeeded");
+
+ ret = H5Sselect_all(large_ds_slice_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(large_ds_slice_sid) succeeded");
+
+
+ /* if chunk edge size is greater than zero, set up the small and
+ * large data set creation property lists to specify chunked
+ * datasets.
+ */
+ if ( chunk_edge_size > 0 ) {
+
+ chunk_dims[0] = mpi_size + 1;
+ chunk_dims[1] = chunk_dims[2] =
+ chunk_dims[3] = chunk_dims[4] = chunk_edge_size;
+
+ small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(small_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(small_ds_dcpl_id, small_rank, chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
+
+
+ large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(large_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(large_ds_dcpl_id, large_rank, chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded");
+ }
+
+ /* create the small dataset */
+ small_dataset = H5Dcreate2(fid, "small_dataset", dset_type,
+ file_small_ds_sid_0, H5P_DEFAULT,
+ small_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret != FAIL), "H5Dcreate2() small_dataset succeeded");
+
+ /* create the large dataset */
+ large_dataset = H5Dcreate2(fid, "large_dataset", dset_type,
+ file_large_ds_sid_0, H5P_DEFAULT,
+ large_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret != FAIL), "H5Dcreate2() large_dataset succeeded");
+
+
+
+ /* setup xfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ if ( ! use_collective_io ) {
+
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,
+ H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0), "H5Pset_dxpl_mpio_collective_opt() suceeded");
+ }
+
+ /* setup selection to write initial data to the small and large data sets */
+ start[0] = mpi_rank;
+ stride[0] = 2 * (mpi_size + 1);
+ count[0] = 1;
+ block[0] = 1;
+
+ for ( i = 1; i < large_rank; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ block[i] = edge_size;
+ }
+
+ /* setup selections for writing initial data to the small data set */
+ ret = H5Sselect_hyperslab(mem_small_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
+
+ ret = H5Sselect_hyperslab(file_small_ds_sid_0,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded");
+
+ if ( MAINPROCESS ) { /* add an additional slice to the selections */
+
+ start[0] = mpi_size;
+
+ ret = H5Sselect_hyperslab(mem_small_ds_sid,
+ H5S_SELECT_OR,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret>= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) suceeded");
+
+ ret = H5Sselect_hyperslab(file_small_ds_sid_0,
+ H5S_SELECT_OR,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret>= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, or) suceeded");
+ }
+
+
+ /* write the initial value of the small data set to file */
+ ret = H5Dwrite(small_dataset, dset_type, mem_small_ds_sid, file_small_ds_sid_0,
+ xfer_plist, small_ds_buf_0);
+ VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
+
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc==MPI_SUCCESS), "Sync after small dataset writes");
+
+
+ /* read the small data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set and verifies it.
+ */
+ ret = H5Dread(small_dataset,
+ H5T_NATIVE_UINT32,
+ full_mem_small_ds_sid,
+ full_file_small_ds_sid,
+ xfer_plist,
+ small_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded");
+
+
+ /* verify that the correct data was written to the small data set */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = small_ds_buf_1;
+
+ i = 0;
+ for ( i = 0; i < (int)small_ds_size; i++ ) {
+
+ if ( *ptr_1 != expected_value ) {
+
+ mis_match = TRUE;
+ }
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY( (mis_match == FALSE), "small ds init data good.");
+
+
+
+ /* setup selections for writing initial data to the large data set */
+
+ start[0] = mpi_rank;
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) suceeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid_0,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) suceeded");
+
+ /* In passing, setup the process slice data spaces as well */
+
+ ret = H5Sselect_hyperslab(mem_large_ds_process_slice_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0),
+ "H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) suceeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_process_slice_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0),
+ "H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) suceeded");
+
+ if ( MAINPROCESS ) { /* add an additional slice to the selections */
+
+ start[0] = mpi_size;
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid,
+ H5S_SELECT_OR,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret>= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) suceeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid_0,
+ H5S_SELECT_OR,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret>= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, or) suceeded");
+ }
+
+
+ /* write the initial value of the large data set to file */
+ ret = H5Dwrite(large_dataset, dset_type, mem_large_ds_sid, file_large_ds_sid_0,
+ xfer_plist, large_ds_buf_0);
+ if ( ret < 0 ) H5Eprint(H5E_DEFAULT, stderr);
+ VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
+
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc==MPI_SUCCESS), "Sync after large dataset writes");
+
+
+ /* read the small data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set.
+ */
+ ret = H5Dread(large_dataset,
+ H5T_NATIVE_UINT32,
+ full_mem_large_ds_sid,
+ full_file_large_ds_sid,
+ xfer_plist,
+ large_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded");
+
+
+ /* verify that the correct data was written to the small data set */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = large_ds_buf_1;
+
+ i = 0;
+ for ( i = 0; i < (int)large_ds_size; i++ ) {
+
+ if ( *ptr_1 != expected_value ) {
+
+ mis_match = TRUE;
+ }
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY( (mis_match == FALSE), "large ds init data good.");
+
+ /***********************************/
+ /***** INITIALIZATION COMPLETE *****/
+ /***********************************/
+
+ /* first, verify that we can read from disk correctly using selections
+ * of different rank that H5S_select_shape_same() views as being of the
+ * same shape.
+ *
+ * Start by reading a (small_rank - 1)-D slice from this processes slice
+ * of the on disk large data set, and verifying that the data read is
+ * correct. Verify that H5S_select_shape_same() returns true on the
+ * memory and file selections.
+ *
+ * The first step is to set up the needed checker board selection in the
+ * in memory small small cube
+ */
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ sel_start[small_ds_offset] = mpi_rank;
+
+ checker_board_hyperslab_dr_pio_test__select_checker_board(mpi_rank,
+ small_ds_slice_sid,
+ small_rank - 1,
+ edge_size,
+ checker_edge_size,
+ small_rank - 1,
+ sel_start);
+
+ /* zero out the buffer we will be reading into */
+
+ ptr_0 = small_ds_slice_buf;
+
+ for ( i = 0; i < (int)small_ds_slice_size; i++ ) {
+
+ *ptr_0 = (uint32_t)0;
+ ptr_0++;
+ }
+
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout, "%s:%d: initial small_ds_slice_buf = ",
+ fcnName, mpi_rank);
+ ptr_0 = small_ds_slice_buf;
+ for ( i = 0; i < (int)small_ds_slice_size; i++ ) {
+ HDfprintf(stdout, "%d ", (int)(*ptr_0));
+ ptr_0++;
+ }
+ HDfprintf(stdout, "\n");
+#endif /* CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ if ( (PAR_SS_DR_MAX_RANK - i) > (small_rank - 1) ) {
+
+ block[i] = 1;
+
+ } else {
+
+ block[i] = edge_size;
+ }
+ }
+
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout,
+ "%s:%d: reading slice from big ds on disk into small ds slice.\n",
+ fcnName, mpi_rank);
+#endif /* CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG */
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set. However, in the parallel version, each
+ * process only works with that slice of the large cube indicated
+ * by its rank -- hence we set the most slowly changing index to
+ * mpi_rank, and don't itterate over it.
+ */
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 0 ) {
+
+ i = mpi_rank;
+
+ } else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 1 ) {
+
+ j = mpi_rank;
+
+ } else {
+
+ j = 0;
+ }
+
+ do {
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 2 ) {
+
+ k = mpi_rank;
+
+ } else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ /* we know that small_rank - 1 >= 1 and that
+ * large_rank > small_rank by the assertions at the head
+ * of this function. Thus no need for another inner loop.
+ */
+ start[0] = i;
+ start[1] = j;
+ start[2] = k;
+ start[3] = l;
+ start[4] = 0;
+
+ HDassert( ( start[0] == 0 ) || ( 0 < small_ds_offset + 1 ) );
+ HDassert( ( start[1] == 0 ) || ( 1 < small_ds_offset + 1 ) );
+ HDassert( ( start[2] == 0 ) || ( 2 < small_ds_offset + 1 ) );
+ HDassert( ( start[3] == 0 ) || ( 3 < small_ds_offset + 1 ) );
+ HDassert( ( start[4] == 0 ) || ( 4 < small_ds_offset + 1 ) );
+
+ checker_board_hyperslab_dr_pio_test__select_checker_board
+ (
+ mpi_rank,
+ file_large_ds_sid_0,
+ large_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank - 1,
+ start
+ );
+
+ /* verify that H5S_select_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(small_ds_slice_sid,
+ file_large_ds_sid_0);
+ VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+
+
+ /* Read selection from disk */
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName,
+ mpi_rank, start[0], start[1], start[2], start[3],
+ start[4]);
+ HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n",
+ fcnName,
+ H5Sget_simple_extent_ndims(small_ds_slice_sid),
+ H5Sget_simple_extent_ndims(file_large_ds_sid_0));
+#endif /* CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ ret = H5Dread(large_dataset,
+ H5T_NATIVE_UINT32,
+ small_ds_slice_sid,
+ file_large_ds_sid_0,
+ xfer_plist,
+ small_ds_slice_buf);
+ VRFY((ret >= 0), "H5Sread() slice from large ds succeeded.");
+
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout, "%s:%d: H5Dread() returns.\n",
+ fcnName, mpi_rank);
+#endif
+
+ /* verify that expected data is retrieved */
+
+ expected_value = (uint32_t)
+ ((i * edge_size * edge_size * edge_size * edge_size) +
+ (j * edge_size * edge_size * edge_size) +
+ (k * edge_size * edge_size) +
+ (l * edge_size));
+
+ data_ok = checker_board_hyperslab_dr_pio_test__verify_data
+ (
+ small_ds_slice_buf,
+ mpi_rank,
+ small_rank - 1,
+ edge_size,
+ checker_edge_size,
+ expected_value,
+ (hbool_t)TRUE
+ );
+
+ VRFY((data_ok == TRUE),
+ "small slice read from large ds data good.");
+ l++;
+
+ } while ( ( large_rank > 2 ) &&
+ ( (small_rank - 1) <= 1 ) &&
+ ( l < edge_size ) );
+ k++;
+ } while ( ( large_rank > 3 ) &&
+ ( (small_rank - 1) <= 2 ) &&
+ ( k < edge_size ) );
+ j++;
+ } while ( ( large_rank > 4 ) &&
+ ( (small_rank - 1) <= 3 ) &&
+ ( j < edge_size ) );
+
+
+ /* similarly, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
+ */
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ sel_start[small_ds_offset] = mpi_rank;
+
+ checker_board_hyperslab_dr_pio_test__select_checker_board(mpi_rank,
+ file_small_ds_sid_0,
+ small_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank - 1,
+ sel_start);
+
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout,
+ "%s reading slices of on disk small data set into slices of big data set.\n",
+ fcnName);
+#endif
+
+ /* zero out the buffer we will be reading into */
+ ptr_0 = large_ds_buf_1;
+ for ( i = 0; i < (int)large_ds_size; i++ ) {
+
+ *ptr_0 = (uint32_t)0;
+ ptr_0++;
+ }
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read the slice of the small data set
+ * into different slices of the process slice of the large data
+ * set.
+ */
+ for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ if ( (PAR_SS_DR_MAX_RANK - i) > (small_rank - 1) ) {
+
+ block[i] = 1;
+
+ } else {
+
+ block[i] = edge_size;
+ }
+ }
+
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set that don't appear in the small data set.
+ *
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't itterate
+ * over it.
+ */
+
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 0 ) {
+
+ i = mpi_rank;
+
+ } else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 1 ) {
+
+ j = mpi_rank;
+
+ } else {
+
+ j = 0;
+ }
+
+ do {
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 2 ) {
+
+ k = mpi_rank;
+
+ } else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+ start[0] = i;
+ start[1] = j;
+ start[2] = k;
+ start[3] = l;
+ start[4] = 0;
+
+ HDassert( ( start[0] == 0 ) || ( 0 < small_ds_offset + 1 ) );
+ HDassert( ( start[1] == 0 ) || ( 1 < small_ds_offset + 1 ) );
+ HDassert( ( start[2] == 0 ) || ( 2 < small_ds_offset + 1 ) );
+ HDassert( ( start[3] == 0 ) || ( 3 < small_ds_offset + 1 ) );
+ HDassert( ( start[4] == 0 ) || ( 4 < small_ds_offset + 1 ) );
+
+ checker_board_hyperslab_dr_pio_test__select_checker_board
+ (
+ mpi_rank,
+ mem_large_ds_sid,
+ large_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank - 1,
+ start
+ );
+
+
+ /* verify that H5S_select_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(file_small_ds_sid_0,
+ mem_large_ds_sid);
+ VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+
+
+ /* Read selection from disk */
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ start[0], start[1], start[2], start[3], start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
+ fcnName, mpi_rank,
+ H5Sget_simple_extent_ndims(large_ds_slice_sid),
+ H5Sget_simple_extent_ndims(file_small_ds_sid_0));
+#endif
+ ret = H5Dread(small_dataset,
+ H5T_NATIVE_UINT32,
+ mem_large_ds_sid,
+ file_small_ds_sid_0,
+ xfer_plist,
+ large_ds_buf_1);
+ VRFY((ret >= 0), "H5Sread() slice from small ds succeeded.");
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ data_ok = TRUE;
+ ptr_1 = large_ds_buf_1;
+ expected_value = mpi_rank * small_ds_slice_size;
+ start_index =
+ (i * edge_size * edge_size * edge_size * edge_size) +
+ (j * edge_size * edge_size * edge_size) +
+ (k * edge_size * edge_size) +
+ (l * edge_size);
+ stop_index = start_index + (int)small_ds_slice_size - 1;
+
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout, "%s:%d: expected_value = %d.\n",
+ fcnName, mpi_rank, expected_value);
+ HDfprintf(stdout, "%s:%d: start/stop index = %d/%d.\n",
+ fcnName, mpi_rank, start_index, stop_index);
+ n = 0;
+ for ( m = 0; m < large_ds_size; m ++ ) {
+ HDfprintf(stdout, "%d ", (int)(*ptr_1));
+ ptr_1++;
+ n++;
+ if ( n >= edge_size ) {
+ HDfprintf(stdout, "\n");
+ n = 0;
+ }
+ }
+ HDfprintf(stdout, "\n");
+ fsync(stdout);
+ ptr_1 = large_ds_buf_1;
+#endif
+
+ HDassert( 0 <= start_index );
+ HDassert( start_index < stop_index );
+ HDassert( stop_index <= (int)large_ds_size );
+
+ for ( n = 0; n < (int)start_index; n++ ) {
+
+ if ( *ptr_1 != 0 ) {
+
+ data_ok = FALSE;
+ }
+
+ /* zero out the value for the next pass */
+ *ptr_1 = 0;
+
+ *ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE),
+ "slice read from small to large ds data good(1).");
+
+ data_ok = checker_board_hyperslab_dr_pio_test__verify_data
+ (
+ ptr_1,
+ mpi_rank,
+ small_rank - 1,
+ edge_size,
+ checker_edge_size,
+ expected_value,
+ (hbool_t)TRUE
+ );
+
+ VRFY((data_ok == TRUE),
+ "slice read from small to large ds data good(2).");
+
+
+ ptr_1 = large_ds_buf_1 + stop_index + 1;
+ for ( n = stop_index + 1; n < large_ds_size; n++ ) {
+
+ if ( *ptr_1 != 0 ) {
+
+ data_ok = FALSE;
+ }
+
+ /* zero out the value for the next pass */
+ *ptr_1 = 0;
+
+ *ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE),
+ "slice read from small to large ds data good(3).");
+
+ l++;
+
+ } while ( ( large_rank > 2 ) &&
+ ( (small_rank - 1) <= 1 ) &&
+ ( l < edge_size ) );
+ k++;
+ } while ( ( large_rank > 3 ) &&
+ ( (small_rank - 1) <= 2 ) &&
+ ( k < edge_size ) );
+ j++;
+ } while ( ( large_rank > 4 ) &&
+ ( (small_rank - 1) <= 3 ) &&
+ ( j < edge_size ) );
+
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5S_select_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank - 1 D slices from the in memory large data
+ * set to the on disk small dataset. After each write, read the slice of
+ * the small dataset back from disk, and verify that it contains the
+ * expected data. Verify that H5S_select_shape_same() returns true on
+ * the memory and file selections.
+ */
+
+ start[0] = mpi_rank;
+ stride[0] = 2 * (mpi_size + 1);
+ count[0] = 1;
+ block[0] = 1;
+
+ for ( i = 1; i < large_rank; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ block[i] = edge_size;
+ }
+
+ ret = H5Sselect_hyperslab(file_small_ds_sid_0,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded");
+
+ ret = H5Sselect_hyperslab(mem_small_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
+
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ sel_start[small_ds_offset] = mpi_rank;
+
+ checker_board_hyperslab_dr_pio_test__select_checker_board(mpi_rank,
+ file_small_ds_sid_1,
+ small_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank - 1,
+ sel_start);
+
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ if ( (PAR_SS_DR_MAX_RANK - i) > (small_rank - 1) ) {
+
+ block[i] = 1;
+
+ } else {
+
+ block[i] = edge_size;
+ }
+ }
+
+ /* zero out the in memory small ds */
+ ptr_1 = small_ds_buf_1;
+ for ( n = 0; n < (int)small_ds_size; n++ ) {
+
+ *ptr_1 = 0;
+ ptr_1++;
+ }
+
+
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout,
+ "%s writing checker boards selections of slices from big ds to slices of small ds on disk.\n",
+ fcnName);
+#endif
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set that don't appear in the small data set.
+ *
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't itterate
+ * over it.
+ */
+
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 0 ) {
+
+ i = mpi_rank;
+
+ } else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 1 ) {
+
+ j = mpi_rank;
+
+ } else {
+
+ j = 0;
+ }
+
+ j = 0;
+ do {
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 2 ) {
+
+ k = mpi_rank;
+
+ } else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* zero out this rank's slice of the on disk small data set */
+ ret = H5Dwrite(small_dataset,
+ H5T_NATIVE_UINT32,
+ mem_small_ds_sid,
+ file_small_ds_sid_0,
+ xfer_plist,
+ small_ds_buf_2);
+ VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded.");
+
+ /* select the portion of the in memory large cube from which we
+ * are going to write data.
+ */
+ start[0] = i;
+ start[1] = j;
+ start[2] = k;
+ start[3] = l;
+ start[4] = 0;
+
+ HDassert( ( start[0] == 0 ) || ( 0 < small_ds_offset + 1 ) );
+ HDassert( ( start[1] == 0 ) || ( 1 < small_ds_offset + 1 ) );
+ HDassert( ( start[2] == 0 ) || ( 2 < small_ds_offset + 1 ) );
+ HDassert( ( start[3] == 0 ) || ( 3 < small_ds_offset + 1 ) );
+ HDassert( ( start[4] == 0 ) || ( 4 < small_ds_offset + 1 ) );
+
+ checker_board_hyperslab_dr_pio_test__select_checker_board
+ (
+ mpi_rank,
+ mem_large_ds_sid,
+ large_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank - 1,
+ start
+ );
+
+
+ /* verify that H5S_select_shape_same() reports the in
+ * memory checkerboard selection of the slice through the
+ * large dataset and the checkerboard selection of the process
+ * slice of the small data set as having the same shape.
+ */
+ check = H5S_select_shape_same_test(file_small_ds_sid_1,
+ mem_large_ds_sid);
+ VRFY((check == TRUE), "H5S_select_shape_same_test passed.");
+
+
+ /* write the checker board selection of the slice from the in
+ * memory large data set to the slice of the on disk small
+ * dataset.
+ */
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ start[0], start[1], start[2], start[3], start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
+ fcnName, mpi_rank,
+ H5Sget_simple_extent_ndims(mem_large_ds_sid),
+ H5Sget_simple_extent_ndims(file_small_ds_sid_1));
+#endif
+ ret = H5Dwrite(small_dataset,
+ H5T_NATIVE_UINT32,
+ mem_large_ds_sid,
+ file_small_ds_sid_1,
+ xfer_plist,
+ large_ds_buf_0);
+ VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded.");
+
+
+ /* read the on disk process slice of the small dataset into memory */
+ ret = H5Dread(small_dataset,
+ H5T_NATIVE_UINT32,
+ mem_small_ds_sid,
+ file_small_ds_sid_0,
+ xfer_plist,
+ small_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
+
+
+ /* verify that expected data is retrieved */
+
+ mis_match = FALSE;
+ ptr_1 = small_ds_buf_1;
+
+ expected_value =
+ (i * edge_size * edge_size * edge_size * edge_size) +
+ (j * edge_size * edge_size * edge_size) +
+ (k * edge_size * edge_size) +
+ (l * edge_size);
+
+ start_index = mpi_rank * small_ds_slice_size;
+ stop_index = start_index + small_ds_slice_size - 1;
+
+ HDassert( 0 <= start_index );
+ HDassert( start_index < stop_index );
+ HDassert( stop_index <= (int)small_ds_size );
+
+ data_ok = TRUE;
+
+ for ( n = 0; n < start_index; n++ ) {
+
+ if ( *(ptr_1 + n) != 0 ) {
+
+ data_ok = FALSE;
+ *(ptr_1 + n) = 0;
+ }
+ }
+
+ data_ok &= checker_board_hyperslab_dr_pio_test__verify_data
+ (
+ ptr_1 + start_index,
+ mpi_rank,
+ small_rank - 1,
+ edge_size,
+ checker_edge_size,
+ expected_value,
+ (hbool_t)TRUE
+ );
+
+
+ for ( n = stop_index; n < small_ds_size; n++ ) {
+
+ if ( *(ptr_1 + n) != 0 ) {
+
+ data_ok = FALSE;
+ *(ptr_1 + n) = 0;
+ }
+ }
+
+ VRFY((data_ok == TRUE),
+ "large slice write slice to small slice data good.");
+
+ l++;
+
+ } while ( ( large_rank > 2 ) &&
+ ( (small_rank - 1) <= 1 ) &&
+ ( l < edge_size ) );
+ k++;
+ } while ( ( large_rank > 3 ) &&
+ ( (small_rank - 1) <= 2 ) &&
+ ( k < edge_size ) );
+ j++;
+ } while ( ( large_rank > 4 ) &&
+ ( (small_rank - 1) <= 3 ) &&
+ ( j < edge_size ) );
+
+
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
+ * each write, read the process's slice of the large data set back
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5S_select_shape_same() returns true on the memory
+ * and file selections.
+ */
+
+ start[0] = mpi_rank;
+ stride[0] = 2 * (mpi_size + 1);
+ count[0] = 1;
+ block[0] = 1;
+
+ for ( i = 1; i < large_rank; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ block[i] = edge_size;
+ }
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid_0,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) suceeded");
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
+
+ /* setup a checkerboard selection of the slice of the in memory small
+ * data set associated with the process's mpi rank.
+ */
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ sel_start[small_ds_offset] = mpi_rank;
+
+ checker_board_hyperslab_dr_pio_test__select_checker_board(mpi_rank,
+ mem_small_ds_sid,
+ small_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank - 1,
+ sel_start);
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to write checkerboard selections of slices
+ * of the small data set to slices of the large data set.
+ */
+ for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
+
+ start[i] = 0;
+ stride[i] = 2 * edge_size;
+ count[i] = 1;
+ if ( (PAR_SS_DR_MAX_RANK - i) > (small_rank - 1) ) {
+
+ block[i] = 1;
+
+ } else {
+
+ block[i] = edge_size;
+ }
+ }
+
+ /* zero out the in memory large ds */
+ ptr_1 = large_ds_buf_1;
+ for ( n = 0; n < (int)large_ds_size; n++ ) {
+
+ *ptr_1 = 0;
+ ptr_1++;
+ }
+
+#if CONTIG_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout,
+ "%s writing process checkerboard selections of slices of small ds to process slices of large ds on disk.\n",
+ fcnName);
+#endif
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 0 ) {
+
+ i = mpi_rank;
+
+ } else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 1 ) {
+
+ j = mpi_rank;
+
+ } else {
+
+ j = 0;
+ }
+
+ do {
+ if ( PAR_SS_DR_MAX_RANK - large_rank == 2 ) {
+
+ k = mpi_rank;
+
+ } else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* Zero out this processes slice of the on disk large data set.
+ * Note that this will leave one slice with its original data
+ * as there is one more slice than processes.
+ */
+ ret = H5Dwrite(large_dataset,
+ H5T_NATIVE_UINT32,
+ mem_large_ds_sid,
+ file_large_ds_sid_0,
+ xfer_plist,
+ large_ds_buf_2);
+ VRFY((ret != FAIL), "H5Dwrite() to zero large ds suceeded");
+
+
+ /* select the portion of the in memory large cube to which we
+ * are going to write data.
+ */
+ start[0] = i;
+ start[1] = j;
+ start[2] = k;
+ start[3] = l;
+ start[4] = 0;
+
+ HDassert( ( start[0] == 0 ) || ( 0 < small_ds_offset + 1 ) );
+ HDassert( ( start[1] == 0 ) || ( 1 < small_ds_offset + 1 ) );
+ HDassert( ( start[2] == 0 ) || ( 2 < small_ds_offset + 1 ) );
+ HDassert( ( start[3] == 0 ) || ( 3 < small_ds_offset + 1 ) );
+ HDassert( ( start[4] == 0 ) || ( 4 < small_ds_offset + 1 ) );
+
+ checker_board_hyperslab_dr_pio_test__select_checker_board
+ (
+ mpi_rank,
+ file_large_ds_sid_1,
+ large_rank,
+ edge_size,
+ checker_edge_size,
+ small_rank - 1,
+ start
+ );
+
+
+ /* verify that H5S_select_shape_same() reports the in
+ * memory small data set slice selection and the
+ * on disk slice through the large data set selection
+ * as having the same shape.
+ */
+ check = H5S_select_shape_same_test(mem_small_ds_sid,
+ file_large_ds_sid_1);
+ VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+
+
+ /* write the small data set slice from memory to the
+ * target slice of the disk data set
+ */
+#if CHECKER_BOARD_HYPERSLAB_DR_PIO_TEST__RUN_TEST__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ start[0], start[1], start[2], start[3], start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
+ fcnName, mpi_rank,
+ H5Sget_simple_extent_ndims(mem_small_ds_sid),
+ H5Sget_simple_extent_ndims(file_large_ds_sid_1));
+#endif
+ ret = H5Dwrite(large_dataset,
+ H5T_NATIVE_UINT32,
+ mem_small_ds_sid,
+ file_large_ds_sid_1,
+ xfer_plist,
+ small_ds_buf_0);
+ VRFY((ret != FAIL),
+ "H5Dwrite of small ds slice to large ds succeeded");
+
+
+ /* read this processes slice on the on disk large
+ * data set into memory.
+ */
+
+ ret = H5Dread(large_dataset,
+ H5T_NATIVE_UINT32,
+ mem_large_ds_sid,
+ file_large_ds_sid_0,
+ xfer_plist,
+ large_ds_buf_1);
+ VRFY((ret != FAIL),
+ "H5Dread() of process slice of large ds succeeded");
+
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ ptr_1 = large_ds_buf_1;
+ expected_value = (uint32_t)(mpi_rank) * small_ds_slice_size;
+
+
+ start_index = (i * edge_size * edge_size * edge_size * edge_size) +
+ (j * edge_size * edge_size * edge_size) +
+ (k * edge_size * edge_size) +
+ (l * edge_size);
+ stop_index = start_index + (int)small_ds_slice_size - 1;
+
+ HDassert( 0 <= start_index );
+ HDassert( start_index < stop_index );
+ HDassert( stop_index < (int)large_ds_size );
+
+
+ mis_match = FALSE;
+
+ data_ok = TRUE;
+
+ for ( n = 0; n < start_index; n++ ) {
+
+ if ( *(ptr_1 + n) != 0 ) {
+
+ data_ok = FALSE;
+ *(ptr_1 + n) = 0;
+ }
+ }
+
+ data_ok &= checker_board_hyperslab_dr_pio_test__verify_data
+ (
+ ptr_1 + start_index,
+ mpi_rank,
+ small_rank - 1,
+ edge_size,
+ checker_edge_size,
+ expected_value,
+ (hbool_t)TRUE
+ );
+
+
+ for ( n = stop_index; n < small_ds_size; n++ ) {
+
+ if ( *(ptr_1 + n) != 0 ) {
+
+ data_ok = FALSE;
+ *(ptr_1 + n) = 0;
+ }
+ }
+
+ VRFY((data_ok == TRUE),
+ "small ds cb slice write to large ds slice data good.");
+
+ l++;
+
+ } while ( ( large_rank > 2 ) &&
+ ( (small_rank - 1) <= 1 ) &&
+ ( l < edge_size ) );
+ k++;
+ } while ( ( large_rank > 3 ) &&
+ ( (small_rank - 1) <= 2 ) &&
+ ( k < edge_size ) );
+ j++;
+ } while ( ( large_rank > 4 ) &&
+ ( (small_rank - 1) <= 3 ) &&
+ ( j < edge_size ) );
+
+
+ /* Close dataspaces */
+ ret = H5Sclose(full_mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(full_file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_small_ds_sid) succeeded");
+
+ ret = H5Sclose(mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(file_small_ds_sid_0);
+ VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid_0) succeeded");
+
+ ret = H5Sclose(file_small_ds_sid_1);
+ VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid_1) succeeded");
+
+ ret = H5Sclose(small_ds_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(small_ds_slice_sid) succeeded");
+
+ ret = H5Sclose(full_mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(full_file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_large_ds_sid) succeeded");
+
+ ret = H5Sclose(mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(file_large_ds_sid_0);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(file_large_ds_sid_1);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(mem_large_ds_process_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_process_slice_sid) succeeded");
+
+ ret = H5Sclose(file_large_ds_process_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_large_ds_process_slice_sid) succeeded");
+
+ ret = H5Sclose(large_ds_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(large_ds_slice_sid) succeeded");
+
+
+ /* Close Datasets */
+ ret = H5Dclose(small_dataset);
+ VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded");
+
+ ret = H5Dclose(large_dataset);
+ VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded");
+
+
+ /* close the file collectively */
+ MESG("about to close file.");
+ ret = H5Fclose(fid);
+ VRFY((ret != FAIL), "file close succeeded");
+
+ /* Free memory buffers */
+ if ( small_ds_buf_0 != NULL ) HDfree(small_ds_buf_0);
+ if ( small_ds_buf_1 != NULL ) HDfree(small_ds_buf_1);
+ if ( small_ds_buf_2 != NULL ) HDfree(small_ds_buf_2);
+ if ( small_ds_slice_buf != NULL ) HDfree(small_ds_slice_buf);
+
+ if ( large_ds_buf_0 != NULL ) HDfree(large_ds_buf_0);
+ if ( large_ds_buf_1 != NULL ) HDfree(large_ds_buf_1);
+ if ( large_ds_buf_2 != NULL ) HDfree(large_ds_buf_2);
+ if ( large_ds_slice_buf != NULL ) HDfree(large_ds_slice_buf);
+
+ return;
+
+} /* contig_hyperslab_dr_pio_test__run_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: checker_board_hyperslab_dr_pio_test()
+ *
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel case.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/18/09
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+checker_board_hyperslab_dr_pio_test(void)
+{
+ const char *fcnName = "checker_board_hyperslab_dr_pio_test()";
+ int test_num = 0;
+ int edge_size = 10;
+ int checker_edge_size = 3;
+ int chunk_edge_size = 0;
+ int small_rank = 3;
+ int large_rank = 4;
+ int use_collective_io = 1;
+ hid_t dset_type = H5T_STD_U32LE;
+#if 0
+ int DebugWait = 1;
+
+ while (DebugWait) ;
+#endif
+
+ for ( large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++ ) {
+
+ for ( small_rank = 2; small_rank < large_rank; small_rank++ ) {
+
+ for ( use_collective_io = 0;
+ use_collective_io <= 1;
+ use_collective_io++ ) {
+
+ chunk_edge_size = 0;
+ checker_board_hyperslab_dr_pio_test__run_test(test_num,
+ edge_size,
+ checker_edge_size,
+ chunk_edge_size,
+ small_rank,
+ large_rank,
+ (hbool_t)use_collective_io,
+ dset_type);
+ test_num++;
+
+ chunk_edge_size = 5;
+ checker_board_hyperslab_dr_pio_test__run_test(test_num,
+ edge_size,
+ checker_edge_size,
+ chunk_edge_size,
+ small_rank,
+ large_rank,
+ (hbool_t)use_collective_io,
+ dset_type);
+ test_num++;
+
+ }
+ }
+ }
+
+ return;
+
+} /* checker_board_hyperslab_dr_pio_test() */
+
diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c
index 667872c..5425377 100644
--- a/testpar/t_span_tree.c
+++ b/testpar/t_span_tree.c
@@ -934,3 +934,1930 @@ coll_read_test(int chunk_factor)
return ;
}
+
+/****************************************************************
+**
+** lower_dim_size_comp_test__select_checker_board():
+**
+** Given a data space of tgt_rank, and dimensions:
+**
+** (mpi_size + 1), edge_size, ... , edge_size
+**
+** edge_size, and a checker_edge_size, select a checker
+** board selection of a sel_rank (sel_rank < tgt_rank)
+** dimensional slice through the data space parallel to the
+** sel_rank fastest changing indicies, with origin (in the
+** higher indicies) as indicated by the start array.
+**
+** Note that this function, is hard coded to presume a
+** maximum data space rank of 5.
+**
+** While this maximum is declared as a constant, increasing
+** it will require extensive coding in addition to changing
+** the value of the constant.
+**
+** JRM -- 11/11/09
+**
+****************************************************************/
+
+#define LDSCT_DS_RANK 5
+#define LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK 0
+
+#define LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG 0
+
+static void
+lower_dim_size_comp_test__select_checker_board(
+ const int mpi_rank,
+ const hid_t tgt_sid,
+ const int tgt_rank,
+ const hsize_t dims[LDSCT_DS_RANK],
+ const int checker_edge_size,
+ const int sel_rank,
+ hsize_t sel_start[])
+{
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ const char * fcnName =
+ "lower_dim_size_comp_test__select_checker_board():";
+#endif
+ hbool_t first_selection = TRUE;
+ int i, j, k, l, m;
+ int ds_offset;
+ int sel_offset;
+ const int test_max_rank = LDSCT_DS_RANK; /* must update code if */
+ /* this changes */
+ hsize_t base_count;
+ hsize_t offset_count;
+ hsize_t start[LDSCT_DS_RANK];
+ hsize_t stride[LDSCT_DS_RANK];
+ hsize_t count[LDSCT_DS_RANK];
+ hsize_t block[LDSCT_DS_RANK];
+ herr_t ret; /* Generic return value */
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout,
+ "%s:%d: dims/checker_edge_size = %d %d %d %d %d / %d\n",
+ fcnName, mpi_rank, (int)dims[0], (int)dims[1], (int)dims[2],
+ (int)dims[3], (int)dims[4], checker_edge_size);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ HDassert( 0 < checker_edge_size );
+ HDassert( 0 < sel_rank );
+ HDassert( sel_rank <= tgt_rank );
+ HDassert( tgt_rank <= test_max_rank );
+ HDassert( test_max_rank <= LDSCT_DS_RANK );
+
+ sel_offset = test_max_rank - sel_rank;
+ HDassert( sel_offset >= 0 );
+
+ ds_offset = test_max_rank - tgt_rank;
+ HDassert( ds_offset >= 0 );
+ HDassert( ds_offset <= sel_offset );
+
+ HDassert( (hsize_t)checker_edge_size <= dims[sel_offset] );
+ HDassert( dims[sel_offset] == 10 );
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n",
+ fcnName, mpi_rank, sel_rank, sel_offset);
+ HDfprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n",
+ fcnName, mpi_rank, tgt_rank, ds_offset);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* First, compute the base count (which assumes start == 0
+ * for the associated offset) and offset_count (which
+ * assumes start == checker_edge_size for the associated
+ * offset).
+ *
+ * Note that the following computation depends on the C99
+ * requirement that integer division discard any fraction
+ * (truncation towards zero) to function correctly. As we
+ * now require C99, this shouldn't be a problem, but noting
+ * it may save us some pain if we are ever obliged to support
+ * pre-C99 compilers again.
+ */
+
+ base_count = dims[sel_offset] / (checker_edge_size * 2);
+
+ if ( (dims[sel_rank] % (checker_edge_size * 2)) > 0 ) {
+
+ base_count++;
+ }
+
+ offset_count =
+ (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) /
+ ((hsize_t)(checker_edge_size * 2)));
+
+ if ( ((dims[sel_rank] - (hsize_t)checker_edge_size) %
+ ((hsize_t)(checker_edge_size * 2))) > 0 ) {
+
+ offset_count++;
+ }
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n",
+ fcnName, mpi_rank, base_count, offset_count);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* Now set up the stride and block arrays, and portions of the start
+ * and count arrays that will not be altered during the selection of
+ * the checker board.
+ */
+ i = 0;
+ while ( i < ds_offset ) {
+
+ /* these values should never be used */
+ start[i] = 0;
+ stride[i] = 0;
+ count[i] = 0;
+ block[i] = 0;
+
+ i++;
+ }
+
+ while ( i < sel_offset ) {
+
+ start[i] = sel_start[i];
+ stride[i] = 2 * dims[i];
+ count[i] = 1;
+ block[i] = 1;
+
+ i++;
+ }
+
+ while ( i < test_max_rank ) {
+
+ stride[i] = (hsize_t)(2 * checker_edge_size);
+ block[i] = (hsize_t)checker_edge_size;
+
+ i++;
+ }
+
+ i = 0;
+ do {
+ if ( 0 >= sel_offset ) {
+
+ if ( i == 0 ) {
+
+ start[0] = 0;
+ count[0] = base_count;
+
+ } else {
+
+ start[0] = (hsize_t)checker_edge_size;
+ count[0] = offset_count;
+
+ }
+ }
+
+ j = 0;
+ do {
+ if ( 1 >= sel_offset ) {
+
+ if ( j == 0 ) {
+
+ start[1] = 0;
+ count[1] = base_count;
+
+ } else {
+
+ start[1] = (hsize_t)checker_edge_size;
+ count[1] = offset_count;
+
+ }
+ }
+
+ k = 0;
+ do {
+ if ( 2 >= sel_offset ) {
+
+ if ( k == 0 ) {
+
+ start[2] = 0;
+ count[2] = base_count;
+
+ } else {
+
+ start[2] = (hsize_t)checker_edge_size;
+ count[2] = offset_count;
+
+ }
+ }
+
+ l = 0;
+ do {
+ if ( 3 >= sel_offset ) {
+
+ if ( l == 0 ) {
+
+ start[3] = 0;
+ count[3] = base_count;
+
+ } else {
+
+ start[3] = (hsize_t)checker_edge_size;
+ count[3] = offset_count;
+
+ }
+ }
+
+ m = 0;
+ do {
+ if ( 4 >= sel_offset ) {
+
+ if ( m == 0 ) {
+
+ start[4] = 0;
+ count[4] = base_count;
+
+ } else {
+
+ start[4] = (hsize_t)checker_edge_size;
+ count[4] = offset_count;
+
+ }
+ }
+
+ if ( ((i + j + k + l + m) % 2) == 0 ) {
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if ( mpi_rank ==
+ LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+
+ HDfprintf(stdout,
+ "%s%d: *** first_selection = %d ***\n",
+ fcnName, mpi_rank, (int)first_selection);
+ HDfprintf(stdout,
+ "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n",
+ fcnName, mpi_rank, i, j, k, l, m);
+ HDfprintf(stdout,
+ "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ (int)start[0], (int)start[1],
+ (int)start[2], (int)start[3],
+ (int)start[4]);
+ HDfprintf(stdout,
+ "%s:%d: stride = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ (int)stride[0], (int)stride[1],
+ (int)stride[2], (int)stride[3],
+ (int)stride[4]);
+ HDfprintf(stdout,
+ "%s:%d: count = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ (int)count[0], (int)count[1],
+ (int)count[2], (int)count[3],
+ (int)count[4]);
+ HDfprintf(stdout,
+ "%s:%d: block = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ (int)block[0], (int)block[1],
+ (int)block[2], (int)block[3],
+ (int)block[4]);
+ HDfprintf(stdout,
+ "%s:%d: n-cube extent dims = %d.\n",
+ fcnName, mpi_rank,
+ H5Sget_simple_extent_ndims(tgt_sid));
+ HDfprintf(stdout,
+ "%s:%d: selection rank = %d.\n",
+ fcnName, mpi_rank, sel_rank);
+ }
+#endif
+
+ if ( first_selection ) {
+
+ first_selection = FALSE;
+
+ ret = H5Sselect_hyperslab
+ (
+ tgt_sid,
+ H5S_SELECT_SET,
+ &(start[ds_offset]),
+ &(stride[ds_offset]),
+ &(count[ds_offset]),
+ &(block[ds_offset])
+ );
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
+
+ } else {
+
+ ret = H5Sselect_hyperslab
+ (
+ tgt_sid,
+ H5S_SELECT_OR,
+ &(start[ds_offset]),
+ &(stride[ds_offset]),
+ &(count[ds_offset]),
+ &(block[ds_offset])
+ );
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
+
+ }
+ }
+
+ m++;
+
+ } while ( ( m <= 1 ) &&
+ ( 4 >= sel_offset ) );
+
+ l++;
+
+ } while ( ( l <= 1 ) &&
+ ( 3 >= sel_offset ) );
+
+ k++;
+
+ } while ( ( k <= 1 ) &&
+ ( 2 >= sel_offset ) );
+
+ j++;
+
+ } while ( ( j <= 1 ) &&
+ ( 1 >= sel_offset ) );
+
+
+ i++;
+
+ } while ( ( i <= 1 ) &&
+ ( 0 >= sel_offset ) );
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
+ fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* Clip the selection back to the data space proper. */
+
+ for ( i = 0; i < test_max_rank; i++ ) {
+
+ start[i] = 0;
+ stride[i] = dims[i];
+ count[i] = 1;
+ block[i] = dims[i];
+ }
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND,
+ start, stride, count, block);
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
+ fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
+ HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ return;
+
+} /* lower_dim_size_comp_test__select_checker_board() */
+
+
+/****************************************************************
+**
+** lower_dim_size_comp_test__verify_data():
+**
+** Examine the supplied buffer to see if it contains the
+** expected data. Return TRUE if it does, and FALSE
+** otherwise.
+**
+** The supplied buffer is presumed to this process's slice
+** of the target data set. Each such slice will be an
+** n-cube of rank (rank -1) and the supplied edge_size with
+** origin (mpi_rank, 0, ... , 0) in the target data set.
+**
+** Further, the buffer is presumed to be the result of reading
+** or writing a checker board selection of an m (1 <= m <
+** rank) dimensional slice through this processes slice
+** of the target data set. Also, this slice must be parallel
+** to the fastest changing indicies.
+**
+** It is further presumed that the buffer was zeroed before
+** the read/write, and that the full target data set (i.e.
+** the buffer/data set for all processes) was initialized
+** with the natural numbers listed in order from the origin
+** along the fastest changing axis.
+**
+** Thus for a 20x10x10 dataset, the value stored in location
+** (x, y, z) (assuming that z is the fastest changing index
+** and x the slowest) is assumed to be:
+**
+** (10 * 10 * x) + (10 * y) + z
+**
+** Further, supposing that this is process 10, this process's
+** slice of the dataset would be a 10 x 10 2-cube with origin
+** (10, 0, 0) in the data set, and would be initialize (prior
+** to the checkerboard selection) as follows:
+**
+** 1000, 1001, 1002, ... 1008, 1009
+** 1010, 1011, 1012, ... 1018, 1019
+** . . . . .
+** . . . . .
+** . . . . .
+** 1090, 1091, 1092, ... 1098, 1099
+**
+** In the case of a read from the processors slice of another
+** data set of different rank, the values expected will have
+** to be adjusted accordingly. This is done via the
+** first_expected_val parameter.
+**
+** Finally, the function presumes that the first element
+** of the buffer resides either at the origin of either
+** a selected or an unselected checker. (Translation:
+** if partial checkers appear in the buffer, they will
+** intersect the edges of the n-cube oposite the origin.)
+**
+****************************************************************/
+
+#define LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG 0
+
+static hbool_t
+lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ const int mpi_rank,
+#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
+ const int rank,
+ const int edge_size,
+ const int checker_edge_size,
+ uint32_t first_expected_val,
+ hbool_t buf_starts_in_checker)
+{
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ const char * fcnName =
+ "lower_dim_size_comp_test__verify_data():";
+#endif
+ hbool_t good_data = TRUE;
+ hbool_t in_checker;
+ hbool_t start_in_checker[5];
+ uint32_t expected_value;
+ uint32_t * val_ptr;
+ int i, j, k, l, m; /* to track position in n-cube */
+ int v, w, x, y, z; /* to track position in checker */
+ const int test_max_rank = 5; /* code changes needed if this is increased */
+
+ HDassert( buf_ptr != NULL );
+ HDassert( 0 < rank );
+ HDassert( rank <= test_max_rank );
+ HDassert( edge_size >= 6 );
+ HDassert( 0 < checker_edge_size );
+ HDassert( checker_edge_size <= edge_size );
+ HDassert( test_max_rank <= LDSCT_DS_RANK );
+
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s rank = %d.\n", fcnName, rank);
+ HDfprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size);
+ HDfprintf(stdout, "%s checker_edge_size = %d.\n",
+ fcnName, checker_edge_size);
+ HDfprintf(stdout, "%s first_expected_val = %d.\n",
+ fcnName, (int)first_expected_val);
+ HDfprintf(stdout, "%s starts_in_checker = %d.\n",
+ fcnName, (int)buf_starts_in_checker);
+ }
+#endif
+
+ val_ptr = buf_ptr;
+ expected_value = first_expected_val;
+
+ i = 0;
+ v = 0;
+ start_in_checker[0] = buf_starts_in_checker;
+ do
+ {
+ if ( v >= checker_edge_size ) {
+
+ start_in_checker[0] = ! start_in_checker[0];
+ v = 0;
+ }
+
+ j = 0;
+ w = 0;
+ start_in_checker[1] = start_in_checker[0];
+ do
+ {
+ if ( w >= checker_edge_size ) {
+
+ start_in_checker[1] = ! start_in_checker[1];
+ w = 0;
+ }
+
+ k = 0;
+ x = 0;
+ start_in_checker[2] = start_in_checker[1];
+ do
+ {
+ if ( x >= checker_edge_size ) {
+
+ start_in_checker[2] = ! start_in_checker[2];
+ x = 0;
+ }
+
+ l = 0;
+ y = 0;
+ start_in_checker[3] = start_in_checker[2];
+ do
+ {
+ if ( y >= checker_edge_size ) {
+
+ start_in_checker[3] = ! start_in_checker[3];
+ y = 0;
+ }
+
+ m = 0;
+ z = 0;
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if ( mpi_rank ==
+ LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
+ }
+#endif
+ in_checker = start_in_checker[3];
+ do
+ {
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if ( mpi_rank ==
+ LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, " %d", (int)(*val_ptr));
+ }
+#endif
+ if ( z >= checker_edge_size ) {
+
+ in_checker = ! in_checker;
+ z = 0;
+ }
+
+ if ( in_checker ) {
+
+ if ( *val_ptr != expected_value ) {
+
+ good_data = FALSE;
+ }
+
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+
+ } else if ( *val_ptr != 0 ) {
+
+ good_data = FALSE;
+
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+
+ }
+
+ val_ptr++;
+ expected_value++;
+ m++;
+ z++;
+
+ } while ( ( rank >= (test_max_rank - 4) ) &&
+ ( m < edge_size ) );
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if ( mpi_rank ==
+ LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "\n");
+ }
+#endif
+ l++;
+ y++;
+ } while ( ( rank >= (test_max_rank - 3) ) &&
+ ( l < edge_size ) );
+ k++;
+ x++;
+ } while ( ( rank >= (test_max_rank - 2) ) &&
+ ( k < edge_size ) );
+ j++;
+ w++;
+ } while ( ( rank >= (test_max_rank - 1) ) &&
+ ( j < edge_size ) );
+ i++;
+ v++;
+ } while ( ( rank >= test_max_rank ) &&
+ ( i < edge_size ) );
+
+ return(good_data);
+
+} /* lower_dim_size_comp_test__verify_data() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: lower_dim_size_comp_test__run_test()
+ *
+ * Purpose: Verify that a bug in the computation of the size of the
+ * lower dimensions of a data space in H5S_obtain_datatype()
+ * has been corrected.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 11/11/09
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define LDSCT_DS_RANK 5
+#define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0
+
+void
+lower_dim_size_comp_test__run_test(const int chunk_edge_size,
+ const hbool_t use_collective_io,
+ const hid_t dset_type)
+{
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ const char *fcnName = "lower_dim_size_comp_test__run_test()";
+ int rank;
+ hsize_t dims[32];
+ hsize_t max_dims[32];
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+ const char *filename;
+ hbool_t use_gpfs = FALSE; /* Use GPFS hints */
+ hbool_t data_ok = FALSE;
+ hbool_t mis_match = FALSE;
+ int i;
+ int start_index;
+ int stop_index;
+ int mrc;
+ int mpi_rank;
+ int mpi_size;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist = H5P_DEFAULT;
+ size_t small_ds_size;
+ size_t small_ds_slice_size;
+ size_t large_ds_size;
+ size_t large_ds_slice_size;
+ uint32_t expected_value;
+ uint32_t * small_ds_buf_0 = NULL;
+ uint32_t * small_ds_buf_1 = NULL;
+ uint32_t * large_ds_buf_0 = NULL;
+ uint32_t * large_ds_buf_1 = NULL;
+ uint32_t * ptr_0;
+ uint32_t * ptr_1;
+ hsize_t small_chunk_dims[LDSCT_DS_RANK];
+ hsize_t large_chunk_dims[LDSCT_DS_RANK];
+ hsize_t small_dims[LDSCT_DS_RANK];
+ hsize_t large_dims[LDSCT_DS_RANK];
+ hsize_t start[LDSCT_DS_RANK];
+ hsize_t stride[LDSCT_DS_RANK];
+ hsize_t count[LDSCT_DS_RANK];
+ hsize_t block[LDSCT_DS_RANK];
+ hsize_t small_sel_start[LDSCT_DS_RANK];
+ hsize_t large_sel_start[LDSCT_DS_RANK];
+ hid_t full_mem_small_ds_sid;
+ hid_t full_file_small_ds_sid;
+ hid_t mem_small_ds_sid;
+ hid_t file_small_ds_sid;
+ hid_t full_mem_large_ds_sid;
+ hid_t full_file_large_ds_sid;
+ hid_t mem_large_ds_sid;
+ hid_t file_large_ds_sid;
+ hid_t small_ds_dcpl_id = H5P_DEFAULT;
+ hid_t large_ds_dcpl_id = H5P_DEFAULT;
+ hid_t small_dataset; /* Dataset ID */
+ hid_t large_dataset; /* Dataset ID */
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ HDassert( mpi_size >= 1 );
+
+ mpi_comm = MPI_COMM_WORLD;
+ mpi_info = MPI_INFO_NULL;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s:%d: chunk_edge_size = %d.\n",
+ fcnName, mpi_rank, (int)chunk_edge_size);
+ HDfprintf(stdout, "%s:%d: use_collective_io = %d.\n",
+ fcnName, mpi_rank, (int)use_collective_io);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+
+ small_ds_size = (size_t)((mpi_size + 1) * 1 * 1 * 10 * 10);
+ small_ds_slice_size = (size_t) ( 1 * 1 * 10 * 10);
+ large_ds_size = (size_t)((mpi_size + 1) * 10 * 10 * 10 * 10);
+ large_ds_slice_size = (size_t) (10 * 10 * 10 * 10);
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s:%d: small ds size / slice size = %d / %d.\n",
+ fcnName, mpi_rank,
+ (int)small_ds_size, (int)small_ds_slice_size);
+ HDfprintf(stdout, "%s:%d: large ds size / slice size = %d / %d.\n",
+ fcnName, mpi_rank,
+ (int)large_ds_size, (int)large_ds_slice_size);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* Allocate buffers */
+ small_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size);
+ VRFY((small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded");
+
+ small_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size);
+ VRFY((small_ds_buf_1 != NULL), "malloc of small_ds_buf_1 succeeded");
+
+ large_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size);
+ VRFY((large_ds_buf_0 != NULL), "malloc of large_ds_buf_0 succeeded");
+
+ large_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size);
+ VRFY((large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded");
+
+
+ /* initialize the buffers */
+
+ ptr_0 = small_ds_buf_0;
+ ptr_1 = small_ds_buf_1;
+
+ for ( i = 0; i < (int)small_ds_size; i++ ) {
+
+ *ptr_0 = (uint32_t)i;
+ *ptr_1 = 0;
+
+ ptr_0++;
+ ptr_1++;
+ }
+
+ ptr_0 = large_ds_buf_0;
+ ptr_1 = large_ds_buf_1;
+
+ for ( i = 0; i < (int)large_ds_size; i++ ) {
+
+ *ptr_0 = (uint32_t)i;
+ *ptr_1 = 0;
+
+ ptr_0++;
+ ptr_1++;
+ }
+
+
+ /* get the file name */
+
+ filename = (const char *)GetTestParameters();
+ HDassert( filename != NULL );
+
+
+ /* ----------------------------------------
+ * CREATE AN HDF5 FILE WITH PARALLEL ACCESS
+ * ---------------------------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type, use_gpfs);
+ VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ MESG("File opened.");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
+
+
+ /* setup dims: */
+ small_dims[0] = (hsize_t)(mpi_size + 1);
+ small_dims[1] = 1;
+ small_dims[2] = 1;
+ small_dims[3] = 10;
+ small_dims[4] = 10;
+
+ large_dims[0] = (hsize_t)(mpi_size + 1);
+ large_dims[1] = 10;
+ large_dims[2] = 10;
+ large_dims[3] = 10;
+ large_dims[4] = 10;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s:%d: small_dims[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)small_dims[0], (int)small_dims[1],
+ (int)small_dims[2], (int)small_dims[3], (int)small_dims[4]);
+ HDfprintf(stdout, "%s:%d: large_dims[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)large_dims[0], (int)large_dims[1],
+ (int)large_dims[2], (int)large_dims[3], (int)large_dims[4]);
+ }
+#endif
+
+ /* create data spaces */
+
+ full_mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
+ VRFY((full_mem_small_ds_sid != 0),
+ "H5Screate_simple() full_mem_small_ds_sid succeeded");
+
+ full_file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
+ VRFY((full_file_small_ds_sid != 0),
+ "H5Screate_simple() full_file_small_ds_sid succeeded");
+
+ mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
+ VRFY((mem_small_ds_sid != 0),
+ "H5Screate_simple() mem_small_ds_sid succeeded");
+
+ file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
+ VRFY((file_small_ds_sid != 0),
+ "H5Screate_simple() file_small_ds_sid succeeded");
+
+
+ full_mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
+ VRFY((full_mem_large_ds_sid != 0),
+ "H5Screate_simple() full_mem_large_ds_sid succeeded");
+
+ full_file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
+ VRFY((full_file_large_ds_sid != 0),
+ "H5Screate_simple() full_file_large_ds_sid succeeded");
+
+ mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
+ VRFY((mem_large_ds_sid != 0),
+ "H5Screate_simple() mem_large_ds_sid succeeded");
+
+ file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
+ VRFY((file_large_ds_sid != 0),
+ "H5Screate_simple() file_large_ds_sid succeeded");
+
+
+ /* Select the entire extent of the full small ds dataspaces */
+ ret = H5Sselect_all(full_mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_mem_small_ds_sid) succeeded");
+
+ ret = H5Sselect_all(full_file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_file_small_ds_sid) succeeded");
+
+
+ /* Select the entire extent of the full large ds dataspaces */
+ ret = H5Sselect_all(full_mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_mem_large_ds_sid) succeeded");
+
+ ret = H5Sselect_all(full_file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_file_large_ds_sid) succeeded");
+
+
+ /* if chunk edge size is greater than zero, set up the small and
+ * large data set creation property lists to specify chunked
+ * datasets.
+ */
+ if ( chunk_edge_size > 0 ) {
+
+ small_chunk_dims[0] = (hsize_t)(1);
+ small_chunk_dims[1] = small_chunk_dims[2] = (hsize_t)1;
+ small_chunk_dims[3] = small_chunk_dims[4] = (hsize_t)chunk_edge_size;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s:%d: small chunk dims[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)small_chunk_dims[0],
+ (int)small_chunk_dims[1], (int)small_chunk_dims[2],
+ (int)small_chunk_dims[3], (int)small_chunk_dims[4]);
+ }
+#endif
+
+ small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(small_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(small_ds_dcpl_id, 5, small_chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
+
+ large_chunk_dims[0] = (hsize_t)(1);
+ large_chunk_dims[1] = large_chunk_dims[2] =
+ large_chunk_dims[3] = large_chunk_dims[4] = (hsize_t)chunk_edge_size;
+
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s:%d: large chunk dims[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)large_chunk_dims[0],
+ (int)large_chunk_dims[1], (int)large_chunk_dims[2],
+ (int)large_chunk_dims[3], (int)large_chunk_dims[4]);
+ }
+#endif
+
+ large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(large_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(large_ds_dcpl_id, 5, large_chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded");
+ }
+
+
+ /* create the small dataset */
+ small_dataset = H5Dcreate2(fid, "small_dataset", dset_type,
+ file_small_ds_sid, H5P_DEFAULT,
+ small_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret >= 0), "H5Dcreate2() small_dataset succeeded");
+
+
+ /* create the large dataset */
+ large_dataset = H5Dcreate2(fid, "large_dataset", dset_type,
+ file_large_ds_sid, H5P_DEFAULT,
+ large_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret >= 0), "H5Dcreate2() large_dataset succeeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout,
+ "%s:%d: small/large ds id = %d / %d.\n",
+ fcnName, mpi_rank, (int)small_dataset,
+ (int)large_dataset);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+
+ /* setup xfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ if ( ! use_collective_io ) {
+
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,
+ H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0), "H5Pset_dxpl_mpio_collective_opt() suceeded");
+ }
+
+
+ /* setup selection to write initial data to the small data sets */
+ start[0] = (hsize_t)(mpi_rank + 1);
+ start[1] = start[2] = start[3] = start[4] = 0;
+
+ stride[0] = (hsize_t)(2 * (mpi_size + 1));
+ stride[1] = stride[2] = 2;
+ stride[3] = stride[4] = 2 * 10;
+
+ count[0] = count[1] = count[2] = count[3] = count[4] = 1;
+
+ block[0] = block[1] = block[2] = 1;
+ block[3] = block[4] = 10;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout,
+ "%s:%d: settings for small data set initialization.\n",
+ fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)start[0], (int)start[1],
+ (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)stride[0], (int)stride[1],
+ (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)count[0], (int)count[1],
+ (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)block[0], (int)block[1],
+ (int)block[2], (int)block[3], (int)block[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* setup selections for writing initial data to the small data set */
+ ret = H5Sselect_hyperslab(mem_small_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
+
+ ret = H5Sselect_hyperslab(file_small_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) suceeded");
+
+ if ( MAINPROCESS ) { /* add an additional slice to the selections */
+
+ start[0] = 0;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout,
+ "%s:%d: added settings for main process.\n",
+ fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)start[0], (int)start[1],
+ (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)stride[0], (int)stride[1],
+ (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)count[0], (int)count[1],
+ (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)block[0], (int)block[1],
+ (int)block[2], (int)block[3], (int)block[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ ret = H5Sselect_hyperslab(mem_small_ds_sid,
+ H5S_SELECT_OR,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret>= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) suceeded");
+
+ ret = H5Sselect_hyperslab(file_small_ds_sid,
+ H5S_SELECT_OR,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret>= 0), "H5Sselect_hyperslab(file_small_ds_sid, or) suceeded");
+ }
+
+ check = H5Sselect_valid(mem_small_ds_sid);
+ VRFY((check == TRUE),"H5Sselect_valid(mem_small_ds_sid) returns TRUE");
+
+ check = H5Sselect_valid(file_small_ds_sid);
+ VRFY((check == TRUE),"H5Sselect_valid(file_small_ds_sid) returns TRUE");
+
+
+ /* write the initial value of the small data set to file */
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s:%d: writing init value of small ds to file.\n",
+ fcnName, mpi_rank);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+ ret = H5Dwrite(small_dataset,
+ dset_type,
+ mem_small_ds_sid,
+ file_small_ds_sid,
+ xfer_plist,
+ small_ds_buf_0);
+ VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
+
+
+ /* read the small data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set and verifies it.
+ */
+ ret = H5Dread(small_dataset,
+ H5T_NATIVE_UINT32,
+ full_mem_small_ds_sid,
+ full_file_small_ds_sid,
+ xfer_plist,
+ small_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded");
+
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc==MPI_SUCCESS), "Sync after small dataset writes");
+
+
+ /* verify that the correct data was written to the small data set,
+ * and reset the buffer to zero in passing.
+ */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = small_ds_buf_1;
+
+ i = 0;
+ for ( i = 0; i < (int)small_ds_size; i++ ) {
+
+ if ( *ptr_1 != expected_value ) {
+
+ mis_match = TRUE;
+ }
+
+ *ptr_1 = (uint32_t)0;
+
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY( (mis_match == FALSE), "small ds init data good.");
+
+
+
+ /* setup selections for writing initial data to the large data set */
+ start[0] = (hsize_t)(mpi_rank + 1);
+ start[1] = start[2] = start[3] = start[4] = (hsize_t)0;
+
+ stride[0] = (hsize_t)(2 * (mpi_size + 1));
+ stride[1] = stride[2] = stride[3] = stride[4] = (hsize_t)(2 * 10);
+
+ count[0] = count[1] = count[2] = count[3] = count[4] = (hsize_t)1;
+
+ block[0] = (hsize_t)1;
+ block[1] = block[2] = block[3] = block[4] = (hsize_t)10;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout,
+ "%s:%d: settings for large data set initialization.\n",
+ fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)start[0], (int)start[1],
+ (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)stride[0], (int)stride[1],
+ (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)count[0], (int)count[1],
+ (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)block[0], (int)block[1],
+ (int)block[2], (int)block[3], (int)block[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) suceeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) suceeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout,
+ "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n",
+ fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(mem_large_ds_sid));
+ HDfprintf(stdout,
+ "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n",
+ fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(file_large_ds_sid));
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ if ( MAINPROCESS ) { /* add an additional slice to the selections */
+
+ start[0] = (hsize_t)0;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout,
+ "%s:%d: added settings for main process.\n",
+ fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)start[0], (int)start[1],
+ (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)stride[0], (int)stride[1],
+ (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)count[0], (int)count[1],
+ (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, (int)block[0], (int)block[1],
+ (int)block[2], (int)block[3], (int)block[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid,
+ H5S_SELECT_OR,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret>= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) suceeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid,
+ H5S_SELECT_OR,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret>= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) suceeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout,
+ "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n",
+ fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(mem_large_ds_sid));
+ HDfprintf(stdout,
+ "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n",
+ fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(file_large_ds_sid));
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+ }
+
+ /* try clipping the selection back to the large data space proper */
+ start[0] = start[1] = start[2] = start[3] = start[4] = (hsize_t)0;
+
+ stride[0] = (hsize_t)(2 * (mpi_size + 1));
+ stride[1] = stride[2] = stride[3] = stride[4] = (hsize_t)(2 * 10);
+
+ count[0] = count[1] = count[2] = count[3] = count[4] = (hsize_t)1;
+
+ block[0] = (hsize_t)(mpi_size + 1);
+ block[1] = block[2] = block[3] = block[4] = (hsize_t)10;
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_AND,
+ start, stride, count, block);
+ VRFY((ret != FAIL),"H5Sselect_hyperslab(mem_large_ds_sid, and) succeeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_AND,
+ start, stride, count, block);
+ VRFY((ret != FAIL),"H5Sselect_hyperslab(file_large_ds_sid, and) succeeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+
+ rank = H5Sget_simple_extent_dims(mem_large_ds_sid, dims, max_dims);
+ HDfprintf(stdout,
+ "%s:%d: mem_large_ds_sid dims[%d] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1],
+ (int)dims[2], (int)dims[3], (int)dims[4]);
+
+ rank = H5Sget_simple_extent_dims(file_large_ds_sid, dims, max_dims);
+ HDfprintf(stdout,
+ "%s:%d: file_large_ds_sid dims[%d] = %d %d %d %d %d\n",
+ fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1],
+ (int)dims[2], (int)dims[3], (int)dims[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ check = H5Sselect_valid(mem_large_ds_sid);
+ VRFY((check == TRUE),"H5Sselect_valid(mem_large_ds_sid) returns TRUE");
+
+ check = H5Sselect_valid(file_large_ds_sid);
+ VRFY((check == TRUE),"H5Sselect_valid(file_large_ds_sid) returns TRUE");
+
+
+ /* write the initial value of the large data set to file */
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s:%d: writing init value of large ds to file.\n",
+ fcnName, mpi_rank);
+ HDfprintf(stdout,
+ "%s:%d: large_dataset = %d.\n",
+ fcnName, mpi_rank,
+ (int)large_dataset);
+ HDfprintf(stdout,
+ "%s:%d: mem_large_ds_sid = %d, file_large_ds_sid = %d.\n",
+ fcnName, mpi_rank,
+ (int)mem_large_ds_sid, (int)file_large_ds_sid);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ ret = H5Dwrite(large_dataset,
+ dset_type,
+ mem_large_ds_sid,
+ file_large_ds_sid,
+ xfer_plist,
+ large_ds_buf_0);
+
+ if ( ret < 0 ) H5Eprint(H5E_DEFAULT, stderr);
+ VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
+
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc==MPI_SUCCESS), "Sync after large dataset writes");
+
+ /* read the large data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set.
+ */
+ ret = H5Dread(large_dataset,
+ H5T_NATIVE_UINT32,
+ full_mem_large_ds_sid,
+ full_file_large_ds_sid,
+ xfer_plist,
+ large_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded");
+
+
+ /* verify that the correct data was written to the large data set.
+ * in passing, reset the buffer to zeros
+ */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = large_ds_buf_1;
+
+ i = 0;
+ for ( i = 0; i < (int)large_ds_size; i++ ) {
+
+ if ( *ptr_1 != expected_value ) {
+
+ mis_match = TRUE;
+ }
+
+ *ptr_1 = (uint32_t)0;
+
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY( (mis_match == FALSE), "large ds init data good.");
+
+ /***********************************/
+ /***** INITIALIZATION COMPLETE *****/
+ /***********************************/
+
+
+ /* read a checkerboard selection of the process slice of the
+ * small on disk data set into the process slice of the large
+ * in memory data set, and verify the data read.
+ */
+
+ small_sel_start[0] = (hsize_t)(mpi_rank + 1);
+ small_sel_start[1] = small_sel_start[2] =
+ small_sel_start[3] = small_sel_start[4] = 0;
+
+ lower_dim_size_comp_test__select_checker_board(mpi_rank,
+ file_small_ds_sid,
+ /* tgt_rank = */ 5,
+ small_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank */ 2,
+ small_sel_start);
+
+ expected_value = (uint32_t)
+ ((small_sel_start[0] * small_dims[1] * small_dims[2] *
+ small_dims[3] * small_dims[4]) +
+ (small_sel_start[1] * small_dims[2] * small_dims[3] *
+ small_dims[4]) +
+ (small_sel_start[2] * small_dims[3] * small_dims[4]) +
+ (small_sel_start[3] * small_dims[4]) +
+ (small_sel_start[4]));
+
+
+ large_sel_start[0] = (hsize_t)(mpi_rank + 1);
+ large_sel_start[1] = 5;
+ large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0;
+
+ lower_dim_size_comp_test__select_checker_board(mpi_rank,
+ mem_large_ds_sid,
+ /* tgt_rank = */ 5,
+ large_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank = */ 2,
+ large_sel_start);
+
+
+ /* verify that H5S_select_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(mem_large_ds_sid,
+ file_small_ds_sid);
+ VRFY((check == TRUE), "H5S_select_shape_same_test passed (1)");
+
+
+ ret = H5Dread(small_dataset,
+ H5T_NATIVE_UINT32,
+ mem_large_ds_sid,
+ file_small_ds_sid,
+ xfer_plist,
+ large_ds_buf_1);
+
+ VRFY((ret >= 0), "H5Sread() slice from small ds succeeded.");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* verify that expected data is retrieved */
+
+ data_ok = TRUE;
+
+ start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] *
+ large_dims[3] * large_dims[4]) +
+ (large_sel_start[1] * large_dims[2] * large_dims[3] *
+ large_dims[4]) +
+ (large_sel_start[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[3] * large_dims[4]) +
+ (large_sel_start[4]));
+
+ stop_index = start_index + (int)small_ds_slice_size;
+
+ HDassert( 0 <= start_index );
+ HDassert( start_index < stop_index );
+ HDassert( stop_index <= (int)large_ds_size );
+
+ ptr_1 = large_ds_buf_1;
+
+ for ( i = 0; i < start_index; i++ ) {
+
+ if ( *ptr_1 != (uint32_t)0 ) {
+
+ data_ok = FALSE;
+ *ptr_1 = (uint32_t)0;
+ }
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from small ds data good(1).");
+
+
+ data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ mpi_rank,
+#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
+ /* rank */ 2,
+ /* edge_size */ 10,
+ /* checker_edge_size */ 3,
+ expected_value,
+ /* buf_starts_in_checker */ TRUE);
+
+ VRFY((data_ok == TRUE), "slice read from small ds data good(2).");
+
+ data_ok = TRUE;
+
+ ptr_1 += small_ds_slice_size;
+
+
+ for ( i = stop_index; i < (int)large_ds_size; i++ ) {
+
+ if ( *ptr_1 != (uint32_t)0 ) {
+
+ data_ok = FALSE;
+ *ptr_1 = (uint32_t)0;
+ }
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from small ds data good(3).");
+
+
+
+
+
+ /* read a checkerboard selection of a slice of the process slice of
+ * the large on disk data set into the process slice of the small
+ * in memory data set, and verify the data read.
+ */
+
+ small_sel_start[0] = (hsize_t)(mpi_rank + 1);
+ small_sel_start[1] = small_sel_start[2] =
+ small_sel_start[3] = small_sel_start[4] = 0;
+
+ lower_dim_size_comp_test__select_checker_board(mpi_rank,
+ mem_small_ds_sid,
+ /* tgt_rank = */ 5,
+ small_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank */ 2,
+ small_sel_start);
+
+ large_sel_start[0] = (hsize_t)(mpi_rank + 1);
+ large_sel_start[1] = 5;
+ large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0;
+
+ lower_dim_size_comp_test__select_checker_board(mpi_rank,
+ file_large_ds_sid,
+ /* tgt_rank = */ 5,
+ large_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank = */ 2,
+ large_sel_start);
+
+
+ /* verify that H5S_select_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5S_select_shape_same_test(mem_small_ds_sid,
+ file_large_ds_sid);
+ VRFY((check == TRUE), "H5S_select_shape_same_test passed (2)");
+
+
+ ret = H5Dread(large_dataset,
+ H5T_NATIVE_UINT32,
+ mem_small_ds_sid,
+ file_large_ds_sid,
+ xfer_plist,
+ small_ds_buf_1);
+
+ VRFY((ret >= 0), "H5Sread() slice from large ds succeeded.");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* verify that expected data is retrieved */
+
+ data_ok = TRUE;
+
+ expected_value = (uint32_t)
+ ((large_sel_start[0] * large_dims[1] * large_dims[2] *
+ large_dims[3] * large_dims[4]) +
+ (large_sel_start[1] * large_dims[2] * large_dims[3] *
+ large_dims[4]) +
+ (large_sel_start[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[3] * large_dims[4]) +
+ (large_sel_start[4]));
+
+ start_index = (int)(mpi_rank + 1) * (int)small_ds_slice_size;
+
+ stop_index = start_index + (int)small_ds_slice_size;
+
+ HDassert( 0 <= start_index );
+ HDassert( start_index < stop_index );
+ HDassert( stop_index <= (int)small_ds_size );
+
+ ptr_1 = small_ds_buf_1;
+
+ for ( i = 0; i < start_index; i++ ) {
+
+ if ( *ptr_1 != (uint32_t)0 ) {
+
+ data_ok = FALSE;
+ *ptr_1 = (uint32_t)0;
+ }
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from large ds data good(1).");
+
+
+ data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ mpi_rank,
+#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
+ /* rank */ 2,
+ /* edge_size */ 10,
+ /* checker_edge_size */ 3,
+ expected_value,
+ /* buf_starts_in_checker */ TRUE);
+
+ VRFY((data_ok == TRUE), "slice read from large ds data good(2).");
+
+ data_ok = TRUE;
+
+ ptr_1 += small_ds_slice_size;
+
+
+ for ( i = stop_index; i < (int)small_ds_size; i++ ) {
+
+ if ( *ptr_1 != (uint32_t)0 ) {
+
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+ HDfprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n",
+ fcnName, mpi_rank, (int)i, (int)(*ptr_1));
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
+
+ data_ok = FALSE;
+ *ptr_1 = (uint32_t)0;
+ }
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from large ds data good(3).");
+
+
+ /* Close dataspaces */
+ ret = H5Sclose(full_mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(full_file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_small_ds_sid) succeeded");
+
+ ret = H5Sclose(mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid) succeeded");
+
+
+ ret = H5Sclose(full_mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(full_file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_large_ds_sid) succeeded");
+
+ ret = H5Sclose(mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid) succeeded");
+
+
+ /* Close Datasets */
+ ret = H5Dclose(small_dataset);
+ VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded");
+
+ ret = H5Dclose(large_dataset);
+ VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded");
+
+
+ /* close the file collectively */
+ MESG("about to close file.");
+ ret = H5Fclose(fid);
+ VRFY((ret != FAIL), "file close succeeded");
+
+ /* Free memory buffers */
+ if ( small_ds_buf_0 != NULL ) HDfree(small_ds_buf_0);
+ if ( small_ds_buf_1 != NULL ) HDfree(small_ds_buf_1);
+
+ if ( large_ds_buf_0 != NULL ) HDfree(large_ds_buf_0);
+ if ( large_ds_buf_1 != NULL ) HDfree(large_ds_buf_1);
+
+ return;
+
+} /* lower_dim_size_comp_test__run_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: lower_dim_size_comp_test()
+ *
+ * Purpose: Test to see if an error in the computation of the size
+ * of the lower dimensions in H5S_obtain_datatype() has
+ * been corrected.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 11/11/09
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+lower_dim_size_comp_test(void)
+{
+ /* const char *fcnName = "lower_dim_size_comp_test()"; */
+ int chunk_edge_size = 0;
+ int use_collective_io = 1;
+ hid_t dset_type = H5T_STD_U32LE;
+#if 0
+ sleep(60);
+#endif
+ for ( use_collective_io = (hbool_t)0;
+ (int)use_collective_io <= 1;
+ (hbool_t)(use_collective_io++) ) {
+
+ chunk_edge_size = 0;
+ lower_dim_size_comp_test__run_test(chunk_edge_size,
+ (hbool_t)use_collective_io,
+ dset_type);
+
+
+ chunk_edge_size = 5;
+ lower_dim_size_comp_test__run_test(chunk_edge_size,
+ (hbool_t)use_collective_io,
+ dset_type);
+ }
+
+ return;
+
+} /* lower_dim_size_comp_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: link_chunk_collective_io_test()
+ *
+ * Purpose: Test to verify that an error in MPI type management in
+ * H5D_link_chunk_collective_io() has been corrected.
+ * In this bug, we used to free MPI types regardless of
+ * whether they were basic or derived.
+ *
+ * This test is based on a bug report kindly provided by
+ * Rob Latham of the MPICH team and ANL.
+ *
+ * The basic thrust of the test is to cause a process
+ * to participate in a collective I/O in which it:
+ *
+ * 1) Reads or writes exactly one chunk,
+ *
+ * 2) Has no in memory buffer for any other chunk.
+ *
+ * The test differers from Rob Latham's bug report in
+ * that is runs with an arbitrary number of proceeses,
+ * and uses a 1 dimensional dataset.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 12/16/09
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16
+
+void
+link_chunk_collective_io_test(void)
+{
+ /* const char *fcnName = "link_chunk_collective_io_test()"; */
+ const char *filename;
+ hbool_t mis_match = FALSE;
+ hbool_t use_gpfs = FALSE; /* Use GPFS hints */
+ int i;
+ int mrc;
+ int mpi_rank;
+ int mpi_size;
+ MPI_Comm mpi_comm = MPI_COMM_WORLD;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hsize_t count[1] = {1};
+ hsize_t stride[1] = {2 * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
+ hsize_t block[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
+ hsize_t start[1];
+ hsize_t dims[1];
+ hsize_t chunk_dims[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
+ herr_t ret; /* Generic return value */
+ hid_t file_id;
+ hid_t acc_tpl;
+ hid_t dset_id;
+ hid_t file_ds_sid;
+ hid_t write_mem_ds_sid;
+ hid_t read_mem_ds_sid;
+ hid_t ds_dcpl_id;
+ hid_t xfer_plist;
+ double diff;
+ double expected_value;
+ double local_data_written[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE];
+ double local_data_read[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE];
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ HDassert( mpi_size > 0 );
+
+ /* get the file name */
+ filename = (const char *)GetTestParameters();
+ HDassert( filename != NULL );
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type, use_gpfs);
+ VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded");
+
+ /* create the file collectively */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ MESG("File opened.");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
+
+ /* setup dims */
+ dims[0] = ((hsize_t)mpi_size) * ((hsize_t)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE));
+
+ /* setup mem and file data spaces */
+ write_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL);
+ VRFY((write_mem_ds_sid != 0),
+ "H5Screate_simple() write_mem_ds_sid succeeded");
+
+ read_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL);
+ VRFY((read_mem_ds_sid != 0),
+ "H5Screate_simple() read_mem_ds_sid succeeded");
+
+ file_ds_sid = H5Screate_simple(1, dims, NULL);
+ VRFY((file_ds_sid != 0),
+ "H5Screate_simple() file_ds_sid succeeded");
+
+ /* setup data set creation property list */
+ ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ds_dcpl_id != FAIL), "H5Pcreate() ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(ds_dcpl_id, 1, chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
+
+ /* create the data set */
+ dset_id = H5Dcreate2(file_id, "dataset", H5T_NATIVE_DOUBLE,
+ file_ds_sid, H5P_DEFAULT,
+ ds_dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2() dataset succeeded");
+
+ /* close the dataset creation property list */
+ ret = H5Pclose(ds_dcpl_id);
+ VRFY((ret >= 0), "H5Pclose(ds_dcpl_id) succeeded");
+
+ /* setup local data */
+ expected_value = (double)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE) *
+ (double)(mpi_rank);
+ for ( i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++ ) {
+
+ local_data_written[i] = expected_value;
+ local_data_read[i] = 0.0;
+ expected_value += 1.0;
+ }
+
+ /* select the file and mem spaces */
+ start[0] = (hsize_t)(mpi_rank * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE);
+ ret = H5Sselect_hyperslab(file_ds_sid,
+ H5S_SELECT_SET,
+ start,
+ stride,
+ count,
+ block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_ds_sid, set) suceeded");
+
+ ret = H5Sselect_all(write_mem_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(mem_ds_sid) succeeded");
+
+ /* Note that we use NO SELECTION on the read memory dataspace */
+
+ /* setup xfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* write the data set */
+ ret = H5Dwrite(dset_id,
+ H5T_NATIVE_DOUBLE,
+ write_mem_ds_sid,
+ file_ds_sid,
+ xfer_plist,
+ local_data_written);
+
+ VRFY((ret >= 0), "H5Dwrite() dataset initial write succeeded");
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc==MPI_SUCCESS), "Sync after dataset write");
+
+ /* read this processes slice of the dataset back in */
+ ret = H5Dread(dset_id,
+ H5T_NATIVE_DOUBLE,
+ read_mem_ds_sid,
+ file_ds_sid,
+ xfer_plist,
+ local_data_read);
+ VRFY((ret >= 0), "H5Dread() dataset read succeeded");
+
+ /* close the xfer property list */
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose(xfer_plist) succeeded");
+
+ /* verify the data */
+ mis_match = FALSE;
+ for ( i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++ ) {
+
+ diff = local_data_written[i] - local_data_read[i];
+ diff = fabs(diff);
+
+ if ( diff >= 0.001 ) {
+
+ mis_match = TRUE;
+ }
+ }
+ VRFY( (mis_match == FALSE), "dataset data good.");
+
+ /* Close dataspaces */
+ ret = H5Sclose(write_mem_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(write_mem_ds_sid) succeeded");
+
+ ret = H5Sclose(read_mem_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(read_mem_ds_sid) succeeded");
+
+ ret = H5Sclose(file_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_ds_sid) succeeded");
+
+ /* Close Dataset */
+ ret = H5Dclose(dset_id);
+ VRFY((ret != FAIL), "H5Dclose(dset_id) succeeded");
+
+ /* close the file collectively */
+ ret = H5Fclose(file_id);
+ VRFY((ret != FAIL), "file close succeeded");
+
+ return;
+
+} /* link_chunk_collective_io_test() */
+
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index 4dabada..0864b11 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -462,7 +462,6 @@ int main(int argc, char **argv)
coll_irregular_complex_chunk_read,NULL,
"collective irregular complex chunk read",PARATESTFILE);
-
AddTest("null", null_dataset, NULL,
"null dataset test", PARATESTFILE);
@@ -473,6 +472,26 @@ int main(int argc, char **argv)
"I/O mode confusion test -- hangs quickly on failure",
&io_mode_confusion_params);
+ AddTest("tldsc",
+ lower_dim_size_comp_test, NULL,
+ "test lower dim size comp in span tree to mpi derived type",
+ PARATESTFILE);
+
+ AddTest("lccio",
+ link_chunk_collective_io_test, NULL,
+ "test mpi derived type management",
+ PARATESTFILE);
+
+ /* rank projections / shape same tests */
+
+ AddTest("chsssdrpio",
+ contig_hyperslab_dr_pio_test, NULL,
+ "contiguous hyperslab shape same different rank PIO",PARATESTFILE);
+
+ AddTest("cbhsssdrpio",
+ checker_board_hyperslab_dr_pio_test, NULL,
+ "checker board hyperslab shape same different rank PIO",PARATESTFILE);
+
/* Display testing information */
TestInfo(argv[0]);
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 24c4432..ba46e4d 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -237,6 +237,10 @@ void coll_irregular_simple_chunk_write(void);
void coll_irregular_complex_chunk_read(void);
void coll_irregular_complex_chunk_write(void);
void io_mode_confusion(void);
+void lower_dim_size_comp_test(void);
+void link_chunk_collective_io_test(void);
+void contig_hyperslab_dr_pio_test(void);
+void checker_board_hyperslab_dr_pio_test(void);
#ifdef H5_HAVE_FILTER_DEFLATE
void compress_readAll(void);
#endif /* H5_HAVE_FILTER_DEFLATE */