summaryrefslogtreecommitdiffstats
path: root/hl
diff options
context:
space:
mode:
authorScot Breitenfeld <brtnfld@hdfgroup.org>2018-06-15 18:53:36 (GMT)
committerScot Breitenfeld <brtnfld@hdfgroup.org>2018-06-15 18:53:36 (GMT)
commit87829e06189cd9b29583b5ca8065b52b1f4cd523 (patch)
tree495ea989045018667409ee2e9a9d1ffded52091f /hl
parent413bc90ec95524c72d0576bc9f1fc5356e541473 (diff)
parent57f64b92d19fed2879ee9bafe1d29bfac865d54c (diff)
downloadhdf5-87829e06189cd9b29583b5ca8065b52b1f4cd523.zip
hdf5-87829e06189cd9b29583b5ca8065b52b1f4cd523.tar.gz
hdf5-87829e06189cd9b29583b5ca8065b52b1f4cd523.tar.bz2
Merge pull request #1111 in HDFFV/hdf5 from hdf5_1_10.sync to hdf5_1_10
* commit '57f64b92d19fed2879ee9bafe1d29bfac865d54c': (30 commits) HDFFV-10405: Using h5fget_obj_count_f with a file id of H5F_OBJ_ALL_F does not work properly HDFFV-10405: Using h5fget_obj_count_f with a file id of H5F_OBJ_ALL_F does not work properly HDFFV-10405: Using h5fget_obj_count_f with a file id of H5F_OBJ_ALL_F does not work properly Cleaned up H5Fmount/unmount code. Normalization with vol_integration branch. Add fortran MPI to test and example Add mpi include folders for fortran C objects Normalization with the vol_integration branch. Fixed MANIFEST Fix usage of compression lib in shared tests Fix jni function call version Fix the error found after earlier checkin. H5O_info fixes for java and examples Added a RELASE.txt entry for HDFFV-10505. Changed 'deprecated' to indicate 'no longer supported' in the --enable-debug/production configure flags. (1) Made the change according to the pull request feedback. (2) Removed the performance test form test/th5o.c: will decide on what needs to be done to show speedup via HDFFV-10463. Normalize with vol_integration. Removed unused H5MF functions and updated FUNC_ENTER macros and naming in H5MFsection.c. Restored some unused #defines to the deprecated section of H5Dpublic.h. Changes made based on feedback from pull request #1039. ...
Diffstat (limited to 'hl')
-rw-r--r--hl/fortran/src/CMakeLists.txt4
-rw-r--r--hl/src/H5DO.c233
-rw-r--r--hl/src/H5DOpublic.h23
-rw-r--r--hl/src/H5DS.c28
-rw-r--r--hl/test/CMakeLists.txt2
-rw-r--r--hl/test/CMakeTests.cmake4
-rw-r--r--hl/test/Makefile.am8
-rw-r--r--hl/test/dectris_hl_perf.c691
-rw-r--r--hl/test/test_dset_opt.c2171
-rw-r--r--hl/test/test_h5do_compat.c286
10 files changed, 393 insertions, 3057 deletions
diff --git a/hl/fortran/src/CMakeLists.txt b/hl/fortran/src/CMakeLists.txt
index fddfcf8..7ec3b63 100644
--- a/hl/fortran/src/CMakeLists.txt
+++ b/hl/fortran/src/CMakeLists.txt
@@ -56,7 +56,7 @@ set (HDF5_HL_F90_HEADERS ${HDF5_HL_F90_SRC_SOURCE_DIR}/H5LTf90proto.h)
add_library (${HDF5_HL_F90_C_LIB_TARGET} STATIC ${HDF5_HL_F90_C_SOURCES} ${HDF5_HL_F90_HEADERS})
target_include_directories(${HDF5_HL_F90_C_LIB_TARGET}
- PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};${HDF5_F90_BINARY_DIR}/static"
+ PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};${HDF5_F90_BINARY_DIR}/static;$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
INTERFACE "$<INSTALL_INTERFACE:$<INSTALL_PREFIX>/include>"
)
TARGET_C_PROPERTIES (${HDF5_HL_F90_C_LIB_TARGET} STATIC)
@@ -72,7 +72,7 @@ set (install_targets ${HDF5_HL_F90_C_LIB_TARGET})
if (BUILD_SHARED_LIBS)
add_library (${HDF5_HL_F90_C_LIBSH_TARGET} SHARED ${HDF5_HL_F90_C_SOURCES} ${HDF5_HL_F90_HEADERS})
target_include_directories(${HDF5_HL_F90_C_LIBSH_TARGET}
- PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};${HDF5_F90_BINARY_DIR}/shared"
+ PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};${HDF5_F90_BINARY_DIR}/shared;$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
INTERFACE "$<INSTALL_INTERFACE:$<INSTALL_PREFIX>/include>"
)
target_compile_definitions(${HDF5_LIBSH_TARGET}
diff --git a/hl/src/H5DO.c b/hl/src/H5DO.c
index 99cf2f7..057c43b 100644
--- a/hl/src/H5DO.c
+++ b/hl/src/H5DO.c
@@ -11,90 +11,37 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <string.h>
-#include <stdlib.h>
-#include <assert.h>
-#include <stdio.h>
-
/* High-level library internal header file */
#include "H5HLprivate2.h"
/* public LT prototypes */
#include "H5DOpublic.h"
-
+#ifndef H5_NO_DEPRECATED_SYMBOLS
/*-------------------------------------------------------------------------
- * Function: H5DOwrite_chunk
+ * Function: H5DOwrite_chunk
*
- * Purpose: Writes an entire chunk to the file directly.
+ * Purpose: Writes an entire chunk to the file directly.
*
- * Return: Non-negative on success/Negative on failure
+ * The H5DOwrite_chunk() call was moved to H5Dwrite_chunk. This
+ * simple wrapper remains so that people can still link to the
+ * high-level library without changing their code.
*
- * Programmer: Raymond Lu
- * 30 July 2012
+ * Return: Non-negative on success/Negative on failure
*
*-------------------------------------------------------------------------
*/
herr_t
-H5DOwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, const hsize_t *offset,
+H5DOwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, const hsize_t *offset,
size_t data_size, const void *buf)
{
- hbool_t created_dxpl = FALSE; /* Whether we created a DXPL */
- hbool_t do_direct_write = TRUE; /* Flag for direct writes */
- uint32_t data_size_32; /* Chunk data size (limited to 32-bits currently) */
- herr_t ret_value = FAIL; /* Return value */
-
- /* Check arguments */
- if(dset_id < 0)
- goto done;
- if(!buf)
- goto done;
- if(!offset)
- goto done;
- if(!data_size)
- goto done;
- data_size_32 = (uint32_t)data_size;
- if(data_size != (size_t)data_size_32)
- goto done;
-
- /* If the user passed in a default DXPL, create one to pass to H5Dwrite() */
- if(H5P_DEFAULT == dxpl_id) {
- if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
- goto done;
- created_dxpl = TRUE;
- } /* end if */
-
- /* Set direct write parameters */
- if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME, &do_direct_write) < 0)
- goto done;
- if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME, &filters) < 0)
- goto done;
- if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_NAME, &offset) < 0)
- goto done;
- if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_NAME, &data_size_32) < 0)
- goto done;
+ /* Call underlying H5D function */
+ if (H5Dwrite_chunk(dset_id, dxpl_id, filters, offset, data_size, buf) < 0)
+ return FAIL;
+ else
+ return SUCCEED;
- /* Write chunk */
- if(H5Dwrite(dset_id, 0, H5S_ALL, H5S_ALL, dxpl_id, buf) < 0)
- goto done;
-
- /* Indicate success */
- ret_value = SUCCEED;
-
-done:
- if(created_dxpl) {
- if(H5Pclose(dxpl_id) < 0)
- ret_value = FAIL;
- } /* end if */
- else {
- /* Reset the direct write flag on user DXPL */
- do_direct_write = FALSE;
- if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME, &do_direct_write) < 0)
- ret_value = FAIL;
- }
-
- return ret_value;
} /* end H5DOwrite_chunk() */
@@ -103,10 +50,11 @@ done:
*
* Purpose: Reads an entire chunk from the file directly.
*
- * Return: Non-negative on success/Negative on failure
+ * The H5DOread_chunk() call was moved to H5Dread_chunk. This
+ * simple wrapper remains so that people can still link to the
+ * high-level library without changing their code.
*
- * Programmer: Matthew Strong (GE Healthcare)
- * 14 February 2016
+ * Return: Non-negative on success/Negative on failure
*
*---------------------------------------------------------------------------
*/
@@ -114,71 +62,29 @@ herr_t
H5DOread_chunk(hid_t dset_id, hid_t dxpl_id, const hsize_t *offset, uint32_t *filters,
void *buf)
{
- hbool_t created_dxpl = FALSE; /* Whether we created a DXPL */
- hbool_t do_direct_read = TRUE; /* Flag for direct writes */
- herr_t ret_value = FAIL; /* Return value */
-
- /* Check arguments */
- if(dset_id < 0)
- goto done;
- if(!buf)
- goto done;
- if(!offset)
- goto done;
- if(!filters)
- goto done;
+ /* Call underlying H5D function */
+ if (H5Dread_chunk(dset_id, dxpl_id, offset, filters, buf) < 0)
+ return FAIL;
+ else
+ return SUCCEED;
+ } /* end H5DOread_chunk() */
- /* If the user passed in a default DXPL, create one to pass to H5Dwrite() */
- if(H5P_DEFAULT == dxpl_id) {
- if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
- goto done;
- created_dxpl = TRUE;
- } /* end if */
-
- /* Set direct write parameters */
- if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME, &do_direct_read) < 0)
- goto done;
- if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_READ_OFFSET_NAME, &offset) < 0)
- goto done;
-
- /* Read chunk */
- if(H5Dread(dset_id, 0, H5S_ALL, H5S_ALL, dxpl_id, buf) < 0)
- goto done;
- /* Get the filter mask */
- if(H5Pget(dxpl_id, H5D_XFER_DIRECT_CHUNK_READ_FILTERS_NAME, filters) < 0)
- goto done;
-
- /* Indicate success */
- ret_value = SUCCEED;
-
-done:
- if(created_dxpl) {
- if(H5Pclose(dxpl_id) < 0)
- ret_value = FAIL;
- } /* end if */
- else {
- /* Reset the direct read flag on user DXPL */
- do_direct_read = FALSE;
- if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME, &do_direct_read) < 0)
- ret_value = FAIL;
- }
-
- return ret_value;
-} /* end H5DOread_chunk() */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
/*-------------------------------------------------------------------------
- * Function: H5DOappend()
+ * Function: H5DOappend()
*
* Purpose: To append elements to a dataset.
- * axis: the dataset dimension (zero-based) for the append
- * extension: the # of elements to append for the axis-th dimension
- * memtype: the datatype
- * buf: buffer with data for the append
*
- * Return: Non-negative on success/Negative on failure
+ * axis: the dataset dimension (zero-based) for the append
+ * extension: the # of elements to append for the axis-th dimension
+ * memtype: the datatype
+ * buf: buffer with data for the append
*
- * Programmer: Vailin Choi; Jan 2014
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Jan 2014
*
* Note:
* This routine is copied from the fast forward feature branch: features/hdf5_ff
@@ -227,7 +133,7 @@ H5DOappend(hid_t dset_id, hid_t dxpl_id, unsigned axis, size_t extension,
/* check arguments */
if(H5I_DATASET != H5Iget_type(dset_id))
- goto done;
+ goto done;
/* If the user passed in a default DXPL, create one to pass to H5Dwrite() */
if(H5P_DEFAULT == dxpl_id) {
@@ -236,39 +142,40 @@ H5DOappend(hid_t dset_id, hid_t dxpl_id, unsigned axis, size_t extension,
created_dxpl = TRUE;
} /* end if */
else if(TRUE != H5Pisa_class(dxpl_id, H5P_DATASET_XFER))
- goto done;
+ goto done;
/* Get the dataspace of the dataset */
if(FAIL == (space_id = H5Dget_space(dset_id)))
- goto done;
+ goto done;
/* Get the rank of this dataspace */
if((sndims = H5Sget_simple_extent_ndims(space_id)) < 0)
- goto done;
+ goto done;
ndims = (unsigned)sndims;
/* Verify correct axis */
if(axis >= ndims)
- goto done;
+ goto done;
/* Get the dimensions sizes of the dataspace */
if(H5Sget_simple_extent_dims(space_id, size, NULL) < 0)
- goto done;
+ goto done;
/* Adjust the dimension size of the requested dimension,
- but first record the old dimension size */
+ * but first record the old dimension size
+ */
old_size = size[axis];
size[axis] += extension;
if(size[axis] < old_size)
- goto done;
+ goto done;
/* Set the extent of the dataset to the new dimension */
if(H5Dset_extent(dset_id, size) < 0)
- goto done;
+ goto done;
/* Get the new dataspace of the dataset */
if(FAIL == (new_space_id = H5Dget_space(dset_id)))
- goto done;
+ goto done;
/* Select a hyperslab corresponding to the append operation */
for(u = 0 ; u < ndims ; u++) {
@@ -282,51 +189,51 @@ H5DOappend(hid_t dset_id, hid_t dxpl_id, unsigned axis, size_t extension,
} /* end if */
} /* end for */
if(FAIL == H5Sselect_hyperslab(new_space_id, H5S_SELECT_SET, start, stride, count, block))
- goto done;
+ goto done;
/* The # of elemnts in the new extended dataspace */
if((snelmts = H5Sget_select_npoints(new_space_id)) < 0)
- goto done;
+ goto done;
nelmts = (hsize_t)snelmts;
/* create a memory space */
if(FAIL == (mem_space_id = H5Screate_simple(1, &nelmts, NULL)))
- goto done;
+ goto done;
/* Write the data */
if(H5Dwrite(dset_id, memtype, mem_space_id, new_space_id, dxpl_id, buf) < 0)
- goto done;
+ goto done;
/* Obtain the dataset's access property list */
if((dapl = H5Dget_access_plist(dset_id)) < 0)
- goto done;
+ goto done;
/* Allocate the boundary array */
boundary = (hsize_t *)HDmalloc(ndims * sizeof(hsize_t));
/* Retrieve the append flush property */
if(H5Pget_append_flush(dapl, ndims, boundary, &append_cb, &udata) < 0)
- goto done;
+ goto done;
/* No boundary for this axis */
if(boundary[axis] != 0) {
- /* Determine whether a boundary is hit or not */
- for(k = start[axis]; k < size[axis]; k++)
- if(!((k + 1) % boundary[axis])) {
- hit = TRUE;
- break;
- }
-
- if(hit) { /* Hit the boundary */
- /* Invoke callback if there is one */
- if(append_cb && append_cb(dset_id, size, udata) < 0)
- goto done;
-
- /* Do a dataset flush */
- if(H5Dflush(dset_id) < 0)
- goto done;
- } /* end if */
+ /* Determine whether a boundary is hit or not */
+ for(k = start[axis]; k < size[axis]; k++)
+ if(!((k + 1) % boundary[axis])) {
+ hit = TRUE;
+ break;
+ }
+
+ if(hit) { /* Hit the boundary */
+ /* Invoke callback if there is one */
+ if(append_cb && append_cb(dset_id, size, udata) < 0)
+ goto done;
+
+ /* Do a dataset flush */
+ if(H5Dflush(dset_id) < 0)
+ goto done;
+ } /* end if */
} /* end if */
/* Indicate success */
@@ -341,22 +248,22 @@ done:
/* Close old dataspace */
if(space_id != FAIL && H5Sclose(space_id) < 0)
- ret_value = FAIL;
+ ret_value = FAIL;
/* Close new dataspace */
if(new_space_id != FAIL && H5Sclose(new_space_id) < 0)
- ret_value = FAIL;
+ ret_value = FAIL;
/* Close memory dataspace */
if(mem_space_id != FAIL && H5Sclose(mem_space_id) < 0)
- ret_value = FAIL;
+ ret_value = FAIL;
/* Close the dataset access property list */
if(dapl != FAIL && H5Pclose(dapl) < 0)
- ret_value = FAIL;
+ ret_value = FAIL;
if(boundary)
- HDfree(boundary);
+ HDfree(boundary);
return ret_value;
} /* H5DOappend() */
diff --git a/hl/src/H5DOpublic.h b/hl/src/H5DOpublic.h
index d5c8de4..e09ebca 100644
--- a/hl/src/H5DOpublic.h
+++ b/hl/src/H5DOpublic.h
@@ -25,18 +25,23 @@ extern "C" {
*-------------------------------------------------------------------------
*/
-H5_HLDLL herr_t H5DOwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters,
- const hsize_t *offset, size_t data_size, const void *buf);
-
-H5_HLDLL herr_t H5DOread_chunk(hid_t dset_id, /*in*/
- hid_t dxpl_id, /*in*/
- const hsize_t *offset, /*in*/
- uint32_t *filters, /*out*/
- void *buf); /*out*/
-
H5_HLDLL herr_t H5DOappend(hid_t dset_id, hid_t dxpl_id, unsigned axis,
size_t extension, hid_t memtype, const void *buf);
+/* Symbols defined for compatibility with previous versions of the HDF5 API.
+ *
+ * Use of these symbols is deprecated.
+ */
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+
+/* Compatibility wrappers for functionality moved to H5D */
+H5_HLDLL herr_t H5DOwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters,
+ const hsize_t *offset, size_t data_size, const void *buf);
+H5_HLDLL herr_t H5DOread_chunk(hid_t dset_id, hid_t dxpl_id, const hsize_t *offset,
+ uint32_t *filters /*out*/, void *buf /*out*/);
+
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+
#ifdef __cplusplus
}
#endif
diff --git a/hl/src/H5DS.c b/hl/src/H5DS.c
index 527c92b..d86cc98 100644
--- a/hl/src/H5DS.c
+++ b/hl/src/H5DS.c
@@ -160,11 +160,11 @@ herr_t H5DSattach_scale(hid_t did,
return FAIL;
/* get info for the dataset in the parameter list */
- if(H5Oget_info(did, &oi1) < 0)
+ if(H5Oget_info2(did, &oi1, H5O_INFO_BASIC) < 0)
return FAIL;
/* get info for the scale in the parameter list */
- if(H5Oget_info(dsid, &oi2) < 0)
+ if(H5Oget_info2(dsid, &oi2, H5O_INFO_BASIC) < 0)
return FAIL;
/* same object, not valid */
@@ -324,11 +324,11 @@ herr_t H5DSattach_scale(hid_t did,
goto out;
/* get info for DS in the parameter list */
- if(H5Oget_info(dsid, &oi1) < 0)
+ if(H5Oget_info2(dsid, &oi1, H5O_INFO_BASIC) < 0)
goto out;
/* get info for this DS */
- if(H5Oget_info(dsid_j, &oi2) < 0)
+ if(H5Oget_info2(dsid_j, &oi2, H5O_INFO_BASIC) < 0)
goto out;
/* same object, so this DS scale is already in this DIM IDX */
@@ -609,11 +609,11 @@ herr_t H5DSdetach_scale(hid_t did,
return FAIL;
/* get info for the dataset in the parameter list */
- if(H5Oget_info(did, &did_oi) < 0)
+ if(H5Oget_info2(did, &did_oi, H5O_INFO_BASIC) < 0)
return FAIL;
/* get info for the scale in the parameter list */
- if(H5Oget_info(dsid, &dsid_oi) < 0)
+ if(H5Oget_info2(dsid, &dsid_oi, H5O_INFO_BASIC) < 0)
return FAIL;
/* same object, not valid */
@@ -696,7 +696,7 @@ herr_t H5DSdetach_scale(hid_t did,
goto out;
/* get info for this DS */
- if(H5Oget_info(dsid_j, &tmp_oi) < 0)
+ if(H5Oget_info2(dsid_j, &tmp_oi, H5O_INFO_BASIC) < 0)
goto out;
/* Close the dereferenced dataset */
@@ -806,7 +806,7 @@ herr_t H5DSdetach_scale(hid_t did,
goto out;
/* get info for this dataset */
- if(H5Oget_info(did_i, &tmp_oi) < 0)
+ if(H5Oget_info2(did_i, &tmp_oi, H5O_INFO_BASIC) < 0)
goto out;
/* close the dereferenced dataset */
@@ -973,11 +973,11 @@ htri_t H5DSis_attached(hid_t did,
return FAIL;
/* get info for the dataset in the parameter list */
- if(H5Oget_info(did, &oi1) < 0)
+ if(H5Oget_info2(did, &oi1, H5O_INFO_BASIC) < 0)
return FAIL;
/* get info for the scale in the parameter list */
- if(H5Oget_info(dsid, &oi2) < 0)
+ if(H5Oget_info2(dsid, &oi2, H5O_INFO_BASIC) < 0)
return FAIL;
/* same object, not valid */
@@ -1054,11 +1054,11 @@ htri_t H5DSis_attached(hid_t did,
goto out;
/* get info for DS in the parameter list */
- if(H5Oget_info(dsid, &oi1) < 0)
+ if(H5Oget_info2(dsid, &oi1, H5O_INFO_BASIC) < 0)
goto out;
/* get info for this DS */
- if(H5Oget_info(dsid_j, &oi2) < 0)
+ if(H5Oget_info2(dsid_j, &oi2, H5O_INFO_BASIC) < 0)
goto out;
/* same object */
@@ -1144,11 +1144,11 @@ htri_t H5DSis_attached(hid_t did,
goto out;
/* get info for dataset in the parameter list */
- if(H5Oget_info(did, &oi3) < 0)
+ if(H5Oget_info2(did, &oi3, H5O_INFO_BASIC) < 0)
goto out;
/* get info for this dataset */
- if(H5Oget_info(did_i, &oi4) < 0)
+ if(H5Oget_info2(did_i, &oi4, H5O_INFO_BASIC) < 0)
goto out;
/* same object */
diff --git a/hl/test/CMakeLists.txt b/hl/test/CMakeLists.txt
index 44f286b..238b5e1 100644
--- a/hl/test/CMakeLists.txt
+++ b/hl/test/CMakeLists.txt
@@ -46,9 +46,9 @@ HL_ADD_EXE (test_image)
HL_ADD_EXE (test_file_image)
HL_ADD_EXE (test_table)
HL_ADD_EXE (test_ds)
-HL_ADD_EXE (test_dset_opt)
HL_ADD_EXE (test_ld)
HL_ADD_EXE (test_dset_append)
+HL_ADD_EXE (test_h5do_compat)
# test_packet has two source files
add_executable (hl_test_packet test_packet.c test_packet_vlen.c)
diff --git a/hl/test/CMakeTests.cmake b/hl/test/CMakeTests.cmake
index e5eb58e..4e945bc 100644
--- a/hl/test/CMakeTests.cmake
+++ b/hl/test/CMakeTests.cmake
@@ -79,6 +79,7 @@ add_test (
file_img1.h5
file_img2.h5
test_append.h5
+ h5do_compat.h5
test_detach.h5
test_ds1.h5
test_ds2.h5
@@ -90,7 +91,6 @@ add_test (
test_ds8.h5
test_ds9.h5
test_ds10.h5
- test_dectris.h5
test_image1.h5
test_image2.h5
test_image3.h5
@@ -115,7 +115,7 @@ HL_add_test (test_file_image)
HL_add_test (test_table)
HL_add_test (test_ds)
HL_add_test (test_packet)
-HL_add_test (test_dset_opt)
HL_add_test (test_ld)
HL_add_test (test_dset_append)
+HL_add_test (test_h5do_compat)
diff --git a/hl/test/Makefile.am b/hl/test/Makefile.am
index e16550f..2e63438 100644
--- a/hl/test/Makefile.am
+++ b/hl/test/Makefile.am
@@ -26,8 +26,8 @@ LDADD=$(LIBH5_HL) $(LIBH5TEST) $(LIBHDF5)
# Test programs. These are our main targets. They should be listed in the
# order to be executed, generally most specific tests to least specific tests.
-TEST_PROG=test_lite test_image test_file_image test_table test_ds test_packet test_dset_opt \
- test_ld test_dset_append
+TEST_PROG=test_lite test_image test_file_image test_table test_ds test_packet \
+ test_ld test_dset_append test_h5do_compat
check_PROGRAMS=$(TEST_PROG)
# These programs generate test files for the tests. They don't need to be
@@ -45,8 +45,8 @@ endif
CHECK_CLEANFILES+=combine_tables[1-2].h5 test_ds[1-9].h5 test_ds10.h5 \
test_image[1-3].h5 file_img[1-2].h5 test_lite[1-4].h5 test_table.h5 \
test_packet_table.h5 test_packet_compress.h5 test_detach.h5 \
- test_packet_table_vlen.h5 testfl_packet_table_vlen.h5 \
- test_dectris.h5 test_append.h5
+ test_packet_table_vlen.h5 testfl_packet_table_vlen.h5 test_append.h5 \
+ h5do_compat.h5
# Sources for test_packet executable
test_packet_SOURCES=test_packet.c test_packet_vlen.c
diff --git a/hl/test/dectris_hl_perf.c b/hl/test/dectris_hl_perf.c
deleted file mode 100644
index 13cfac8..0000000
--- a/hl/test/dectris_hl_perf.c
+++ /dev/null
@@ -1,691 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * Copyright by the Board of Trustees of the University of Illinois. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * This test is for the DECTRIS project to the H5DOwrite_chunk function
- *
- */
-
-#include "hdf5_hl.h"
-
-#ifdef H5_HAVE_FILTER_DEFLATE
-#include <zlib.h>
-
-#if !defined(WIN32) && !defined(__MINGW32__)
-
-#include <math.h>
-
-#ifdef H5_STDC_HEADERS
-# include <errno.h>
-# include <fcntl.h>
-# include <stdio.h>
-# include <stdlib.h>
-#endif
-
-#ifdef H5_HAVE_UNISTD_H
-# include <sys/types.h>
-# include <unistd.h>
-#endif
-
-#ifdef H5_HAVE_SYS_STAT_H
-# include <sys/stat.h>
-#endif
-
-#if defined(H5_TIME_WITH_SYS_TIME)
-# include <sys/time.h>
-# include <time.h>
-#elif defined(H5_HAVE_SYS_TIME_H)
-# include <sys/time.h>
-#else
-# include <time.h>
-#endif
-
-const char *FILENAME[] = {
- "dectris_perf",
- "unix.raw",
- NULL
-};
-
-/*
- * Print the current location on the standard output stream.
- */
-#define FUNC __func__
-#define AT() printf (" at %s:%d in %s()...\n", \
- __FILE__, __LINE__, FUNC);
-#define H5_FAILED() {puts("*FAILED*");fflush(stdout);}
-#define TEST_ERROR {H5_FAILED(); AT(); goto error;}
-#define TESTING(WHAT) {printf("Testing %-62s",WHAT); fflush(stdout);}
-#define PASSED() {puts(" PASSED");fflush(stdout);}
-
-#define DIRECT_UNCOMPRESSED_DSET "direct_uncompressed_dset"
-#define DIRECT_COMPRESSED_DSET "direct_compressed_dset"
-#define REG_COMPRESSED_DSET "reg_compressed_dset"
-#define REG_NO_COMPRESS_DSET "reg_no_compress_dset"
-#define RANK 3
-#define NX 100
-#define NY 1000
-#define NZ 250
-#define CHUNK_NX 1
-#define CHUNK_NY 1000
-#define CHUNK_NZ 250
-
-#define DEFLATE_SIZE_ADJUST(s) (ceil(((double)(s))*1.001)+12)
-char filename[1024];
-unsigned int *outbuf[NX];
-size_t data_size[NX];
-double total_size = 0.0;
-unsigned int *direct_buf[NX];
-double MB = 1048576.0;
-
-/*--------------------------------------------------
- * Function to report IO rate
- *--------------------------------------------------
- */
-void reportTime(struct timeval start, double mbytes)
-{
- struct timeval timeval_stop,timeval_diff;
-
- /*end timing*/
- gettimeofday(&timeval_stop,NULL);
-
- /* Calculate the elapsed gettimeofday time */
- timeval_diff.tv_usec=timeval_stop.tv_usec-start.tv_usec;
- timeval_diff.tv_sec=timeval_stop.tv_sec-start.tv_sec;
-
- if(timeval_diff.tv_usec<0) {
- timeval_diff.tv_usec+=1000000;
- timeval_diff.tv_sec--;
- } /* end if */
-
-/*printf("mbytes=%lf, sec=%lf, usec=%lf\n", mbytes, (double)timeval_diff.tv_sec, (double)timeval_diff.tv_usec);*/
- printf("MBytes/second: %lf\n", (double)mbytes/((double)timeval_diff.tv_sec+((double)timeval_diff.tv_usec/(double)1000000.0)));
-}
-
-/*--------------------------------------------------
- * Create file, datasets, and initialize data
- *--------------------------------------------------
- */
-int create_file(hid_t fapl_id)
-{
- hid_t file; /* handles */
- hid_t fapl;
- hid_t cparms;
- hid_t dataspace, dataset;
- hsize_t dims[RANK] = {NX, NY, NZ};
- hsize_t chunk_dims[RANK] ={CHUNK_NX, CHUNK_NY, CHUNK_NZ};
- unsigned int aggression = 9; /* Compression aggression setting */
- int ret;
- int i, j, n;
-
- int flag;
- int unix_file;
-
- unsigned int *p;
- size_t buf_size = CHUNK_NY*CHUNK_NZ*sizeof(unsigned int);
-
- const Bytef *z_src;
- Bytef *z_dst; /*destination buffer */
- uLongf z_dst_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size);
- uLong z_src_nbytes = (uLong)buf_size;
-
- TESTING("Create a file and dataset");
-
- /*
- * Create the data space with unlimited dimensions.
- */
- if((dataspace = H5Screate_simple(RANK, dims, NULL)) < 0)
- TEST_ERROR;
-
- /*
- * Create a new file. If file exists its contents will be overwritten.
- */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0)
- TEST_ERROR;
-
- /*
- * Modify dataset creation properties, i.e. enable chunking and compression
- */
- if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
- TEST_ERROR;
-
- if(H5Pset_chunk( cparms, RANK, chunk_dims) < 0)
- TEST_ERROR;
-
- /*
- * Create a new dataset within the file using cparms
- * creation properties.
- */
- if((dataset = H5Dcreate2(file, DIRECT_UNCOMPRESSED_DSET, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
- cparms, H5P_DEFAULT)) < 0)
- TEST_ERROR;
-
- if(H5Dclose(dataset) < 0)
- TEST_ERROR;
-
- if((dataset = H5Dcreate2(file, REG_NO_COMPRESS_DSET, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
- cparms, H5P_DEFAULT)) < 0)
- TEST_ERROR;
-
- if(H5Dclose(dataset) < 0)
- TEST_ERROR;
-
- /* Set compression */
- if(H5Pset_deflate( cparms, aggression) < 0)
- TEST_ERROR;
-
- if((dataset = H5Dcreate2(file, DIRECT_COMPRESSED_DSET, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
- cparms, H5P_DEFAULT)) < 0)
- TEST_ERROR;
-
- if(H5Dclose(dataset) < 0)
- TEST_ERROR;
-
-
- if((dataset = H5Dcreate2(file, REG_COMPRESSED_DSET, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
- cparms, H5P_DEFAULT)) < 0)
- TEST_ERROR;
-
- if(H5Dclose(dataset) < 0)
- TEST_ERROR;
-
- if(H5Fclose(file) < 0)
- TEST_ERROR;
-
- if(H5Sclose(dataspace) < 0)
- TEST_ERROR;
-
- if(H5Pclose(cparms) < 0)
- TEST_ERROR;
-
- /* create a unix file*/
- flag = O_CREAT|O_TRUNC|O_WRONLY;
-
- if ((unix_file=open(FILENAME[1],flag,S_IRWXU))== -1)
- TEST_ERROR;
-
- if (close(unix_file) < 0)
- {
- printf(" unable to close the file\n");
- TEST_ERROR;
- }
-
-
- /* Initialize data for chunks */
- for(i = 0; i < NX; i++) {
- p = direct_buf[i] = (unsigned int*)malloc(CHUNK_NY*CHUNK_NZ*sizeof(unsigned int));
-
- for(j=0; j < CHUNK_NY*CHUNK_NZ; j++, p++)
- *p = rand() % 65000;
-
- z_src = (const Bytef*)direct_buf[i];
-
- z_dst_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size);
- /* Allocate output (compressed) buffer */
- outbuf[i] = (unsigned int*)malloc((size_t)z_dst_nbytes);
- z_dst = (Bytef *)outbuf[i];
-
- /* Perform compression from the source to the destination buffer */
- ret = compress2(z_dst, &z_dst_nbytes, z_src, z_src_nbytes, aggression);
-
- data_size[i] = (size_t)z_dst_nbytes;
- total_size += data_size[i];
-
- /* Check for various zlib errors */
- if(Z_BUF_ERROR == ret) {
- fprintf(stderr, "overflow");
- TEST_ERROR;
- } else if(Z_MEM_ERROR == ret) {
- fprintf(stderr, "deflate memory error");
- TEST_ERROR;
- } else if(Z_OK != ret) {
- fprintf(stderr, "other deflate error");
- TEST_ERROR;
- }
- }
-
-
- PASSED();
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Fclose(file);
- } H5E_END_TRY;
- return 1;
-}
-
-/*--------------------------------------------------
- * Benchmark the performance of the new function
- * with precompressed data.
- *--------------------------------------------------
- */
-int
-test_direct_write_uncompressed_data(hid_t fapl_id)
-{
- hid_t file; /* handles */
- hid_t dataspace, dataset;
- hid_t dxpl;
- herr_t status;
- int i;
-
- unsigned filter_mask = 0;
- hsize_t offset[RANK] = {0, 0, 0};
-
- struct timeval timeval_start;
-
- TESTING("H5DOwrite_chunk for uncompressed data");
-
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
- TEST_ERROR;
-
- /* Start the timer */
- gettimeofday(&timeval_start,NULL);
-
- /* Reopen the file and dataset */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_id)) < 0)
- TEST_ERROR;
-
- if((dataset = H5Dopen2(file, DIRECT_UNCOMPRESSED_DSET, H5P_DEFAULT)) < 0)
- TEST_ERROR;
-
-
- /* Write the compressed chunk data repeatedly to cover all the chunks in the
- * dataset, using the direct writing function. */
- for(i=0; i<NX; i++) {
- status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, CHUNK_NY*CHUNK_NZ*sizeof(unsigned int), direct_buf[i]);
- (offset[0])++;
- }
-
- /*
- * Close/release resources.
- */
- H5Dclose(dataset);
- H5Pclose(dxpl);
- H5Fclose(file);
-
- /* Report the performance */
- reportTime(timeval_start, (double)(NX*NY*NZ*sizeof(unsigned int)/MB));
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Pclose(dxpl);
- H5Fclose(file);
- } H5E_END_TRY;
- return 1;
-}
-
-
-/*--------------------------------------------------
- * Benchmark the performance of the new function
- * with precompressed data.
- *--------------------------------------------------
- */
-int
-test_direct_write_compressed_data(hid_t fapl_id)
-{
- hid_t file; /* handles */
- hid_t dataspace, dataset;
- hid_t dxpl;
- herr_t status;
- int i;
-
- unsigned filter_mask = 0;
- hsize_t offset[RANK] = {0, 0, 0};
-
- struct timeval timeval_start;
-
- TESTING("H5DOwrite_chunk for pre-compressed data");
-
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
- TEST_ERROR;
-
- /* Start the timer */
- gettimeofday(&timeval_start,NULL);
-
- /* Reopen the file and dataset */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_id)) < 0)
- TEST_ERROR;
-
- if((dataset = H5Dopen2(file, DIRECT_COMPRESSED_DSET, H5P_DEFAULT)) < 0)
- TEST_ERROR;
-
-
- /* Write the compressed chunk data repeatedly to cover all the chunks in the
- * dataset, using the direct writing function. */
- for(i=0; i<NX; i++) {
- status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, data_size[i], outbuf[i]);
- (offset[0])++;
- }
-
- /*
- * Close/release resources.
- */
- H5Dclose(dataset);
- H5Pclose(dxpl);
- H5Fclose(file);
-
- /* Report the performance */
- reportTime(timeval_start, (double)(total_size/MB));
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Pclose(dxpl);
- H5Fclose(file);
- } H5E_END_TRY;
- return 1;
-}
-
-/*--------------------------------------------------
- * Benchmark the performance of the regular H5Dwrite
- * with compression filter enabled.
- *--------------------------------------------------
- */
-int
-test_compressed_write(hid_t fapl_id)
-{
- hid_t file; /* handles */
- hid_t dataspace, dataset;
- hid_t mem_space;
- hsize_t chunk_dims[RANK] ={CHUNK_NX, CHUNK_NY, CHUNK_NZ};
- hid_t dxpl;
- herr_t status;
- int i;
-
- hsize_t start[RANK]; /* Start of hyperslab */
- hsize_t stride[RANK]; /* Stride of hyperslab */
- hsize_t count[RANK]; /* Block count */
- hsize_t block[RANK]; /* Block sizes */
-
- struct timeval timeval_start;
-
- TESTING("H5Dwrite with compression enabled");
-
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
- TEST_ERROR;
-
- if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
- TEST_ERROR;
-
- /* Start the timer */
- gettimeofday(&timeval_start,NULL);
-
- /* Reopen the file and dataset */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_id)) < 0)
- TEST_ERROR;
-
- if((dataset = H5Dopen2(file, REG_COMPRESSED_DSET, H5P_DEFAULT)) < 0)
- TEST_ERROR;
-
- if((dataspace = H5Dget_space(dataset)) < 0)
- TEST_ERROR;
-
- start[0] = start[1] = start[2] = 0;
- stride[0] = stride[1] = stride[2] = 1;
- count[0] = count[1] = count[2] = 1;
- block[0] = CHUNK_NX; block[1] = CHUNK_NY; block[2] = CHUNK_NZ;
-
- for(i=0; i<NX; i++) {
- /*
- * Select hyperslab for one chunk in the file
- */
- if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
- TEST_ERROR;
- (start[0])++;
-
- if((status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_space, dataspace,
- H5P_DEFAULT, direct_buf[i])) < 0)
- TEST_ERROR;
- }
-
- /*
- * Close/release resources.
- */
- H5Dclose(dataset);
- H5Sclose(dataspace);
- H5Sclose(mem_space);
- H5Pclose(dxpl);
- H5Fclose(file);
-
- /* Report the performance */
- reportTime(timeval_start, (double)(NX*NY*NZ*sizeof(unsigned int)/MB));
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Sclose(dataspace);
- H5Sclose(mem_space);
- H5Pclose(dxpl);
- H5Fclose(file);
- } H5E_END_TRY;
- return 1;
-}
-
-/*--------------------------------------------------
- * Benchmark the performance of the regular H5Dwrite
- * with compression
- *--------------------------------------------------
- */
-int
-test_no_compress_write(hid_t fapl_id)
-{
- hid_t file; /* handles */
- hid_t dataspace, dataset;
- hid_t mem_space;
- hsize_t chunk_dims[RANK] ={CHUNK_NX, CHUNK_NY, CHUNK_NZ};
- hid_t dxpl;
- herr_t status;
- int i;
-
- hsize_t start[RANK]; /* Start of hyperslab */
- hsize_t stride[RANK]; /* Stride of hyperslab */
- hsize_t count[RANK]; /* Block count */
- hsize_t block[RANK]; /* Block sizes */
-
- struct timeval timeval_start;
-
- TESTING("H5Dwrite without compression");
-
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
- TEST_ERROR;
-
- if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
- TEST_ERROR;
-
- /* Start the timer */
- gettimeofday(&timeval_start,NULL);
-
- /* Reopen the file and dataset */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_id)) < 0)
- TEST_ERROR;
-
- if((dataset = H5Dopen2(file, REG_NO_COMPRESS_DSET, H5P_DEFAULT)) < 0)
- TEST_ERROR;
-
- if((dataspace = H5Dget_space(dataset)) < 0)
- TEST_ERROR;
-
- start[0] = start[1] = start[2] = 0;
- stride[0] = stride[1] = stride[2] = 1;
- count[0] = count[1] = count[2] = 1;
- block[0] = CHUNK_NX; block[1] = CHUNK_NY; block[2] = CHUNK_NZ;
-
- for(i=0; i<NX; i++) {
- /*
- * Select hyperslab for one chunk in the file
- */
- if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
- TEST_ERROR;
- (start[0])++;
-
- if((status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_space, dataspace,
- H5P_DEFAULT, direct_buf[i])) < 0)
- TEST_ERROR;
- }
-
- /*
- * Close/release resources.
- */
- H5Dclose(dataset);
- H5Sclose(dataspace);
- H5Sclose(mem_space);
- H5Pclose(dxpl);
- H5Fclose(file);
-
- /* Report the performance */
- reportTime(timeval_start, (double)(NX*NY*NZ*sizeof(unsigned int)/MB));
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Sclose(dataspace);
- H5Sclose(mem_space);
- H5Pclose(dxpl);
- H5Fclose(file);
- } H5E_END_TRY;
- return 1;
-}
-
-/*--------------------------------------------------
- * Benchmark the performance for writing compressed
- * data to a Unix file
- *--------------------------------------------------
- */
-int
-test_unix_write(void)
-{
- int file, flag;
- ssize_t op_size;
- int i;
- struct timeval timeval_start;
-
- TESTING("Write compressed data to a Unix file");
-
- /* create file*/
- flag = O_WRONLY;
-
- /* Start the timer */
- gettimeofday(&timeval_start,NULL);
-
- if ((file=open(FILENAME[1],flag))== -1)
- TEST_ERROR;
-
- /* Write the compressed chunk data repeatedly to cover all the chunks in the
- * dataset, using the direct writing function. */
- for(i=0; i<NX; i++) {
- op_size = write(file, outbuf[i],data_size[i]);
- if (op_size < 0)
- {
- printf(" Error in writing data to file because %s \n", strerror(errno));
- TEST_ERROR;
- }
- else if (op_size == 0)
- {
- printf(" unable to write sufficent data to file because %s \n", strerror(errno));
- TEST_ERROR;
- }
- }
-
- if (close(file) < 0)
- {
- printf(" unable to close the file\n");
- TEST_ERROR;
- }
-
- /* Report the performance */
- reportTime(timeval_start, (double)(total_size/MB));
-
- PASSED();
- return 0;
-
-error:
- return 1;
-}
-
-/*--------------------------------------------------
- * Main function
- *--------------------------------------------------
- */
-int
-main (void)
-{
- hid_t fapl = H5P_DEFAULT;
- int i;
-
- /* Testing setup */
-/* h5_reset();
- fapl = h5_fileaccess();
-
- h5_fixname(FILENAME[0], fapl, filename, sizeof filename);*/
-
- sprintf(filename, "%s.h5", FILENAME[0]);
-
- create_file(fapl);
- test_direct_write_uncompressed_data(fapl);
- test_direct_write_compressed_data(fapl);
- test_no_compress_write(fapl);
- test_compressed_write(fapl);
- test_unix_write();
-
- for(i=0; i<NX; i++) {
- free(outbuf[i]);
- free(direct_buf[i]);
- }
-
-/* h5_cleanup(FILENAME, fapl);*/
- return 0;
-}
-
-#else /* WIN32 / MINGW32 */
-
-int
-main(void)
-{
- printf("Non-POSIX platform. Exiting.\n");
- return EXIT_FAILURE;
-} /* end main() */
-
-#endif /* WIN32 / MINGW32 */
-
-#else /* !H5_HAVE_FILTER_DEFLATE */
-
-/*
- * Function: main
- * Purpose: Dummy main() function for if HDF5 was configured without
- * zlib stuff.
- * Return: EXIT_SUCCESS
- */
-int
-main(void)
-{
- HDfprintf(stdout, "No compression IO performance because zlib was not configured\n");
- return EXIT_SUCCESS;
-}
-
-#endif /* !H5_HAVE_FILTER_DEFLATE */
-
diff --git a/hl/test/test_dset_opt.c b/hl/test/test_dset_opt.c
deleted file mode 100644
index ef4cf13..0000000
--- a/hl/test/test_dset_opt.c
+++ /dev/null
@@ -1,2171 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
-* Copyright by The HDF Group. *
-* Copyright by the Board of Trustees of the University of Illinois. *
-* All rights reserved. *
-* *
-* This file is part of HDF5. The full HDF5 copyright notice, including *
-* terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
-* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-#include <stdlib.h>
-#include <string.h>
-#include "h5hltest.h"
-#include "H5DOpublic.h"
-#include <math.h>
-
-#if defined(H5_HAVE_ZLIB_H) && !defined(H5_ZLIB_HEADER)
-# define H5_ZLIB_HEADER "zlib.h"
-#endif
-#if defined(H5_ZLIB_HEADER)
-# include H5_ZLIB_HEADER /* "zlib.h" */
-#endif
-
-#define FILE_NAME "test_dectris.h5"
-
-/* Datasets for Direct Write tests */
-#define DATASETNAME1 "direct_write"
-#define DATASETNAME2 "skip_one_filter"
-#define DATASETNAME3 "skip_two_filters"
-#define DATASETNAME4 "data_conv"
-#define DATASETNAME5 "contiguous_dset"
-#define DATASETNAME6 "invalid_argue"
-#define DATASETNAME7 "overwrite_chunk"
-/* Datasets for Direct Read tests */
-#define DATASETNAME8 "disabled_chunk_cache"
-#define DATASETNAME9 "flush_chunk_cache"
-#define DATASETNAME10 "read_w_valid_cache"
-#define DATASETNAME11 "unallocated_chunk"
-#define DATASETNAME12 "unfiltered_data"
-
-#define RANK 2
-#define NX 16
-#define NY 16
-#define CHUNK_NX 4
-#define CHUNK_NY 4
-
-#define DEFLATE_SIZE_ADJUST(s) (HDceil(((double)(s))*H5_DOUBLE(1.001))+H5_DOUBLE(12.0))
-
-/* Temporary filter IDs used for testing */
-#define H5Z_FILTER_BOGUS1 305
-#define H5Z_FILTER_BOGUS2 306
-#define ADD_ON 7
-#define FACTOR 3
-
-/* Constants for the overwrite test */
-#define OVERWRITE_NDIMS 3
-#define OVERWRITE_CHUNK_NX 3
-#define OVERWRITE_CHUNK_2NX 6
-#define OVERWRITE_CHUNK_NY 2
-#define OVERWRITE_VALUE 42
-
-/* Defines used in test_single_chunk_latest() */
-#define FILE "single_latest.h5"
-#define DATASET "dataset"
-#define DIM0 4
-#define DIM1 32
-#define CHUNK0 DIM0
-#define CHUNK1 DIM1
-
-/* Local prototypes for filter functions */
-static size_t filter_bogus1(unsigned int flags, size_t cd_nelmts,
- const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
-static size_t filter_bogus2(unsigned int flags, size_t cd_nelmts,
- const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
-
-/* This message derives from H5Z */
-const H5Z_class2_t H5Z_BOGUS1[1] = {{
- H5Z_CLASS_T_VERS, /* H5Z_class_t version */
- H5Z_FILTER_BOGUS1, /* Filter id number */
- 1, 1, /* Encoding and decoding enabled */
- "bogus1", /* Filter name for debugging */
- NULL, /* The "can apply" callback */
- NULL, /* The "set local" callback */
- filter_bogus1, /* The actual filter function */
-}};
-
-const H5Z_class2_t H5Z_BOGUS2[1] = {{
- H5Z_CLASS_T_VERS, /* H5Z_class_t version */
- H5Z_FILTER_BOGUS2, /* Filter id number */
- 1, 1, /* Encoding and decoding enabled */
- "bogus2", /* Filter name for debugging */
- NULL, /* The "can apply" callback */
- NULL, /* The "set local" callback */
- filter_bogus2, /* The actual filter function */
-}};
-
-/*-------------------------------------------------------------------------
- * Function: test_direct_chunk_write
- *
- * Purpose: Test the basic functionality of H5DOwrite_chunk
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Raymond Lu
- * 30 November 2012
- *
- *-------------------------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_DEFLATE
-static int
-test_direct_chunk_write (hid_t file)
-{
- hid_t dataspace = -1, dataset = -1;
- hid_t mem_space = -1;
- hid_t cparms = -1, dxpl = -1;
- hsize_t dims[2] = {NX, NY};
- hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
- hsize_t chunk_dims[2] ={CHUNK_NX, CHUNK_NY};
- herr_t status;
- int ret;
- int data[NX][NY];
- int i, j, n;
-
- unsigned filter_mask = 0;
- int direct_buf[CHUNK_NX][CHUNK_NY];
- int check_chunk[CHUNK_NX][CHUNK_NY];
- hsize_t offset[2] = {0, 0};
- size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
-
- const Bytef *z_src = (const Bytef*)(direct_buf);
- Bytef *z_dst = NULL; /*destination buffer */
- uLongf z_dst_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size);
- uLong z_src_nbytes = (uLong)buf_size;
- int aggression = 9; /* Compression aggression setting */
- void *outbuf = NULL; /* Pointer to new buffer */
-
- hsize_t start[2]; /* Start of hyperslab */
- hsize_t stride[2]; /* Stride of hyperslab */
- hsize_t count[2]; /* Block count */
- hsize_t block[2]; /* Block sizes */
-
- TESTING("basic functionality of H5DOwrite_chunk");
-
- /*
- * Create the data space with unlimited dimensions.
- */
- if((dataspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
- goto error;
-
- if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
- goto error;
-
- /*
- * Modify dataset creation properties, i.e. enable chunking and compression
- */
- if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
- goto error;
-
- if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
- goto error;
-
- if((status = H5Pset_deflate( cparms, (unsigned) aggression)) < 0)
- goto error;
-
- /*
- * Create a new dataset within the file using cparms
- * creation properties.
- */
- if((dataset = H5Dcreate2(file, DATASETNAME1, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
- cparms, H5P_DEFAULT)) < 0)
- goto error;
-
- /* Initialize the dataset */
- for(i = n = 0; i < NX; i++)
- for(j = 0; j < NY; j++)
- data[i][j] = n++;
-
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
- goto error;
-
- /*
- * Write the data for the dataset. It should stay in the chunk cache.
- * It will be evicted from the cache by the H5DOwrite_chunk calls.
- */
- if((status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- dxpl, data)) < 0)
- goto error;
-
- /* Initialize data for one chunk */
- for(i = n = 0; i < CHUNK_NX; i++)
- for(j = 0; j < CHUNK_NY; j++)
- direct_buf[i][j] = n++;
-
- /* Allocate output (compressed) buffer */
- outbuf = HDmalloc(z_dst_nbytes);
- z_dst = (Bytef *)outbuf;
-
- /* Perform compression from the source to the destination buffer */
- ret = compress2(z_dst, &z_dst_nbytes, z_src, z_src_nbytes, aggression);
-
- /* Check for various zlib errors */
- if(Z_BUF_ERROR == ret) {
- HDfprintf(stderr, "overflow");
- goto error;
- } else if(Z_MEM_ERROR == ret) {
- HDfprintf(stderr, "deflate memory error");
- goto error;
- } else if(Z_OK != ret) {
- HDfprintf(stderr, "other deflate error");
- goto error;
- }
-
- /* Write the compressed chunk data repeatedly to cover all the chunks in the
- * dataset, using the direct writing function. */
- for(i=0; i<NX/CHUNK_NX; i++) {
- for(j=0; j<NY/CHUNK_NY; j++) {
- status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, z_dst_nbytes, outbuf);
- offset[1] += CHUNK_NY;
- }
- offset[0] += CHUNK_NX;
- offset[1] = 0;
- }
-
- if(outbuf)
- HDfree(outbuf);
-
- if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0)
- goto error;
-
- if(H5Dclose(dataset) < 0)
- goto error;
-
- if((dataset = H5Dopen2(file, DATASETNAME1, H5P_DEFAULT)) < 0)
- goto error;
-
- /*
- * Select hyperslab for one chunk in the file
- */
- start[0] = CHUNK_NX; start[1] = CHUNK_NY;
- stride[0] = 1; stride[1] = 1;
- count[0] = 1; count[1] = 1;
- block[0] = CHUNK_NX; block[1] = CHUNK_NY;
- if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
- goto error;
-
- /* Read the chunk back */
- if((status = H5Dread(dataset, H5T_NATIVE_INT, mem_space, dataspace, H5P_DEFAULT, check_chunk)) < 0)
- goto error;
-
- /* Check that the values read are the same as the values written */
- for(i = 0; i < CHUNK_NX; i++) {
- for(j = 0; j < CHUNK_NY; j++) {
- if(direct_buf[i][j] != check_chunk[i][j]) {
- HDprintf(" 1. Read different values than written.");
- HDprintf(" At index %d,%d\n", i, j);
- HDprintf(" direct_buf=%d, check_chunk=%d\n", direct_buf[i][j], check_chunk[i][j]);
- goto error;
- }
- }
- }
-
- /* Reinitialize different data for one chunk */
- for(i = 0; i < CHUNK_NX; i++)
- for(j = 0; j < CHUNK_NY; j++)
- direct_buf[i][j] = i + j;
-
- /* Allocate output (compressed) buffer */
- outbuf = HDmalloc(z_dst_nbytes);
- z_dst = (Bytef *)outbuf;
-
- /* Perform compression from the source to the destination buffer */
- ret = compress2(z_dst, &z_dst_nbytes, z_src, z_src_nbytes, aggression);
-
- /* Check for various zlib errors */
- if(Z_BUF_ERROR == ret) {
- HDfprintf(stderr, "overflow");
- goto error;
- } else if(Z_MEM_ERROR == ret) {
- HDfprintf(stderr, "deflate memory error");
- goto error;
- } else if(Z_OK != ret) {
- HDfprintf(stderr, "other deflate error");
- goto error;
- }
-
- /* Rewrite the compressed chunk data repeatedly to cover all the chunks in the
- * dataset, using the direct writing function. */
- offset[0] = offset[1] = 0;
- for(i=0; i<NX/CHUNK_NX; i++) {
- for(j=0; j<NY/CHUNK_NY; j++) {
- status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, z_dst_nbytes, outbuf);
- offset[1] += CHUNK_NY;
- }
- offset[0] += CHUNK_NX;
- offset[1] = 0;
- }
-
- if(outbuf)
- HDfree(outbuf);
-
- if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0)
- goto error;
-
- if(H5Dclose(dataset) < 0)
- goto error;
-
- if((dataset = H5Dopen2(file, DATASETNAME1, H5P_DEFAULT)) < 0)
- goto error;
-
- /* Read the chunk back */
- if((status = H5Dread(dataset, H5T_NATIVE_INT, mem_space, dataspace, H5P_DEFAULT, check_chunk)) < 0)
- goto error;
-
- /* Check that the values read are the same as the values written */
- for(i = 0; i < CHUNK_NX; i++) {
- for(j = 0; j < CHUNK_NY; j++) {
- if(direct_buf[i][j] != check_chunk[i][j]) {
- HDprintf(" 2. Read different values than written.");
- HDprintf(" At index %d,%d\n", i, j);
- HDprintf(" direct_buf=%d, check_chunk=%d\n", direct_buf[i][j], check_chunk[i][j]);
- goto error;
- }
- }
- }
-
- /*
- * Close/release resources.
- */
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
- } H5E_END_TRY;
-
- if(outbuf)
- HDfree(outbuf);
-
- H5_FAILED();
- return 1;
-} /* test_direct_chunk_write() */
-#endif /* H5_HAVE_FILTER_DEFLATE */
-
-/*-------------------------------------------------------------------------
- * Function: test_direct_chunk_overwrite_data
- *
- * Purpose: Test overwriting a chunk with new data.
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Dana Robinson
- * Spring 2017
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_direct_chunk_overwrite_data(hid_t fid)
-{
- size_t buf_size = OVERWRITE_CHUNK_NX * OVERWRITE_CHUNK_NY * sizeof(int16_t);
- int16_t data_buf[OVERWRITE_CHUNK_NY][OVERWRITE_CHUNK_NX];
- int16_t overwrite_buf[OVERWRITE_CHUNK_NY][OVERWRITE_CHUNK_NX];
- uint32_t filter_mask = 0;
- hid_t tid = H5T_NATIVE_UINT16;
- hid_t dcpl_id = -1;
- hid_t sid = -1;
- hid_t did = -1;
- uint16_t fill_value = 0;
- hsize_t dset_dims[] = {1, OVERWRITE_CHUNK_NY, OVERWRITE_CHUNK_2NX};
- hsize_t dset_max_dims[] = {H5S_UNLIMITED, OVERWRITE_CHUNK_NY, OVERWRITE_CHUNK_2NX};
- hsize_t chunk_dims[] = {1, OVERWRITE_CHUNK_NY, OVERWRITE_CHUNK_NX};
- hsize_t offset[] = {0, 0, 0};
- hsize_t i, j;
- int16_t n;
- int16_t read_buf[OVERWRITE_CHUNK_NY][OVERWRITE_CHUNK_2NX];
-
- TESTING("overwriting existing data with H5DOwrite_chunk");
-
- /* Create the dataset's data space */
- if ((sid = H5Screate_simple(OVERWRITE_NDIMS, dset_dims, dset_max_dims)) < 0)
- FAIL_STACK_ERROR
-
- /* Set chunk size and filll value */
- if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
- FAIL_STACK_ERROR
- if (H5Pset_fill_value(dcpl_id, tid, &fill_value) < 0)
- FAIL_STACK_ERROR
- if (H5Pset_chunk(dcpl_id, OVERWRITE_NDIMS, chunk_dims) < 0)
- FAIL_STACK_ERROR
-
- /* Create dataset */
- if ((did = H5Dcreate2(fid, DATASETNAME7, tid, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0)
- FAIL_STACK_ERROR
-
- /* Initialize data buffers */
- n = 0;
- for (i = 0; i < OVERWRITE_CHUNK_NY; i++) {
- for (j = 0; j < OVERWRITE_CHUNK_NX; j++) {
- data_buf[i][j] = n++;
- overwrite_buf[i][j] = OVERWRITE_VALUE;
- }
- }
-
- /* Write chunk data using the direct write function. */
- if (H5DOwrite_chunk(did, H5P_DEFAULT, filter_mask, offset, buf_size, data_buf) < 0)
- FAIL_STACK_ERROR
-
- /* Write second chunk. */
- offset[2] = OVERWRITE_CHUNK_NX;
- if (H5DOwrite_chunk(did, H5P_DEFAULT, filter_mask, offset, buf_size, data_buf) < 0)
- FAIL_STACK_ERROR
-
- /* Overwrite first chunk. */
- offset[2] = 0;
- if (H5DOwrite_chunk(did, H5P_DEFAULT, filter_mask, offset, buf_size, overwrite_buf) < 0)
- FAIL_STACK_ERROR
-
- /* Read the data back out */
- if (H5Dread(did, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) < 0)
- FAIL_STACK_ERROR
-
- /* Ensure that the data are correct in chunk 1 */
- for (i = 0; i < OVERWRITE_CHUNK_NY; i++)
- for (j = 0; j < OVERWRITE_CHUNK_NX; j++) {
- if (read_buf[i][j] != OVERWRITE_VALUE)
- TEST_ERROR
- }
-
- if (H5Pclose(dcpl_id) < 0)
- FAIL_STACK_ERROR
- if (H5Sclose(sid) < 0)
- FAIL_STACK_ERROR
- if (H5Dclose(did) < 0)
- FAIL_STACK_ERROR
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Pclose(dcpl_id);
- H5Sclose(sid);
- H5Dclose(did);
- } H5E_END_TRY;
-
- H5_FAILED();
- return 1;
-} /* end test_direct_chunk_overwrite_data() */
-
-/*-------------------------------------------------------------------------
- * Function: test_skip_compress_write1
- *
- * Purpose: Test skipping compression filter when it is the only filter
- * for the dataset
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Raymond Lu
- * 30 November 2012
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_skip_compress_write1(hid_t file)
-{
- hid_t dataspace = -1, dataset = -1;
- hid_t mem_space = -1;
- hid_t cparms = -1, dxpl = -1;
- hsize_t dims[2] = {NX, NY};
- hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
- hsize_t chunk_dims[2] ={CHUNK_NX, CHUNK_NY};
- herr_t status;
- int i, j, n;
-
- unsigned filter_mask = 0;
- int direct_buf[CHUNK_NX][CHUNK_NY];
- int check_chunk[CHUNK_NX][CHUNK_NY];
- hsize_t offset[2] = {0, 0};
- size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
- int aggression = 9; /* Compression aggression setting */
-
- unsigned read_filter_mask = 0; /* filter mask after direct read */
- int read_direct_buf[CHUNK_NX][CHUNK_NY];
- hsize_t read_buf_size = 0; /* buf size */
-
- hsize_t start[2]; /* Start of hyperslab */
- hsize_t stride[2]; /* Stride of hyperslab */
- hsize_t count[2]; /* Block count */
- hsize_t block[2]; /* Block sizes */
-
- TESTING("skipping compression filter for H5DOwrite_chunk/H5DOread_chunk");
-
- /*
- * Create the data space with unlimited dimensions.
- */
- if((dataspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
- goto error;
-
- if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
- goto error;
-
- /*
- * Modify dataset creation properties, i.e. enable chunking and compression
- */
- if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
- goto error;
-
- if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
- goto error;
-
- if((status = H5Pset_deflate( cparms, (unsigned ) aggression)) < 0)
- goto error;
-
- /*
- * Create a new dataset within the file using cparms
- * creation properties.
- */
- if((dataset = H5Dcreate2(file, DATASETNAME2, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
- cparms, H5P_DEFAULT)) < 0)
- goto error;
-
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
- goto error;
-
- /* Initialize data for one chunk */
- for(i = n = 0; i < CHUNK_NX; i++)
- for(j = 0; j < CHUNK_NY; j++) {
- direct_buf[i][j] = n++;
- }
-
- /* write the uncompressed chunk data repeatedly to dataset, using the direct writing function.
- * Indicate skipping the compression filter. */
- offset[0] = CHUNK_NX;
- offset[1] = CHUNK_NY;
-
- filter_mask = 0x00000001;
-
- if((status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, buf_size, direct_buf)) < 0)
- goto error;
-
- if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0)
- goto error;
-
- if(H5Dclose(dataset) < 0)
- goto error;
-
- if((dataset = H5Dopen2(file, DATASETNAME2, H5P_DEFAULT)) < 0)
- goto error;
-
- /*
- * Select hyperslab for the chunk just written in the file
- */
- start[0] = CHUNK_NX; start[1] = CHUNK_NY;
- stride[0] = 1; stride[1] = 1;
- count[0] = 1; count[1] = 1;
- block[0] = CHUNK_NX; block[1] = CHUNK_NY;
- if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
- goto error;
-
- /* Read the chunk back */
- if((status = H5Dread(dataset, H5T_NATIVE_INT, mem_space, dataspace, H5P_DEFAULT, check_chunk)) < 0)
- goto error;
-
- /* Check that the values read are the same as the values written */
- for(i = 0; i < CHUNK_NX; i++) {
- for(j = 0; j < CHUNK_NY; j++) {
- if(direct_buf[i][j] != check_chunk[i][j]) {
- HDprintf(" 1. Read different values than written.");
- HDprintf(" At index %d,%d\n", i, j);
- HDprintf(" direct_buf=%d, check_chunk=%d\n", direct_buf[i][j], check_chunk[i][j]);
- goto error;
- }
- }
- }
-
- /* Query chunk storage size */
- if((status = H5Dget_chunk_storage_size(dataset, offset, &read_buf_size)) < 0)
- goto error;
- if(read_buf_size != buf_size)
- goto error;
-
- /* Read the raw chunk back */
- HDmemset(&read_direct_buf, 0, sizeof(read_direct_buf));
- if((status = H5DOread_chunk(dataset, H5P_DEFAULT, offset, &read_filter_mask, read_direct_buf)) < 0)
- goto error;
- if(read_filter_mask != filter_mask)
- goto error;
-
- /* Check that the direct chunk read is the same as the chunk written */
- for(i = 0; i < CHUNK_NX; i++) {
- for(j = 0; j < CHUNK_NY; j++) {
- if(direct_buf[i][j] != read_direct_buf[i][j]) {
- HDprintf(" 1. Read different values than written.");
- HDprintf(" At index %d,%d\n", i, j);
- HDprintf(" direct_buf=%d, read_direct_buf=%d\n", direct_buf[i][j], read_direct_buf[i][j]);
- goto error;
- }
- }
- }
-
- /*
- * Close/release resources.
- */
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
- } H5E_END_TRY;
-
- H5_FAILED();
- return 1;
-} /* test_skip_compress_write1() */
-
-/*-------------------------------------------------------------------------
- * Function: filter_bogus1
- *
- * Purpose: A bogus filter that adds ADD_ON to the original value
- *
- * Return: Success: Data chunk size
- *
- * Programmer: Raymond Lu
- * 30 November 2012
- *
- *-------------------------------------------------------------------------
- */
-static size_t
-filter_bogus1(unsigned int flags, size_t H5_ATTR_UNUSED cd_nelmts,
- const unsigned int H5_ATTR_UNUSED *cd_values, size_t nbytes,
- size_t *buf_size, void **buf)
-{
- int *int_ptr=(int *)*buf; /* Pointer to the data values */
- ssize_t buf_left=(ssize_t)*buf_size; /* Amount of data buffer left to process */
-
- if(flags & H5Z_FLAG_REVERSE) { /* read */
- /* Substract the "add on" value to all the data values */
- while(buf_left>0) {
- *int_ptr++ -= (int)ADD_ON;
- buf_left -= (ssize_t)sizeof(int);
- } /* end while */
- } /* end if */
- else { /* write */
- /* Add the "add on" value to all the data values */
- while(buf_left>0) {
- *int_ptr++ += (int)ADD_ON;
- buf_left -= (ssize_t)sizeof(int);
- } /* end while */
- } /* end else */
-
- return nbytes;
-} /* filter_bogus1() */
-
-/*-------------------------------------------------------------------------
- * Function: filter_bogus2
- *
- * Purpose: A bogus filter that multiplies the original value by FACTOR.
- *
- * Return: Success: Data chunk size
- *
- * Programmer: Raymond Lu
- * 30 November 2012
- *-------------------------------------------------------------------------
- */
-static size_t
-filter_bogus2(unsigned int flags, size_t H5_ATTR_UNUSED cd_nelmts,
- const unsigned int H5_ATTR_UNUSED *cd_values, size_t nbytes,
- size_t *buf_size, void **buf)
-{
- int *int_ptr=(int *)*buf; /* Pointer to the data values */
- ssize_t buf_left=(ssize_t)*buf_size; /* Amount of data buffer left to process */
-
- if(flags & H5Z_FLAG_REVERSE) { /* read */
- /* Substract the "add on" value to all the data values */
- while(buf_left>0) {
- *int_ptr++ /= (int)FACTOR;
- buf_left -= (ssize_t)sizeof(int);
- } /* end while */
- } /* end if */
- else { /* write */
- /* Add the "add on" value to all the data values */
- while(buf_left>0) {
- *int_ptr++ *= (int)FACTOR;
- buf_left -= (ssize_t)sizeof(int);
- } /* end while */
- } /* end else */
-
- return nbytes;
-} /* filter_bogus2() */
-
-/*-------------------------------------------------------------------------
- * Function: test_skip_compress_write2
- *
- * Purpose: Test skipping compression filter when there are three filters
- * for the dataset
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Raymond Lu
- * 30 November 2012
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_skip_compress_write2(hid_t file)
-{
- hid_t dataspace = -1, dataset = -1;
- hid_t mem_space = -1;
- hid_t cparms = -1, dxpl = -1;
- hsize_t dims[2] = {NX, NY};
- hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
- hsize_t chunk_dims[2] ={CHUNK_NX, CHUNK_NY};
- herr_t status;
- int i, j, n;
-
- unsigned filter_mask = 0; /* orig filter mask */
- int origin_direct_buf[CHUNK_NX][CHUNK_NY];
- int direct_buf[CHUNK_NX][CHUNK_NY];
- int check_chunk[CHUNK_NX][CHUNK_NY];
- hsize_t offset[2] = {0, 0};
- size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
- int aggression = 9; /* Compression aggression setting */
-
- unsigned read_filter_mask = 0; /* filter mask after direct read */
- int read_direct_buf[CHUNK_NX][CHUNK_NY];
- hsize_t read_buf_size = 0; /* buf size */
-
- hsize_t start[2]; /* Start of hyperslab */
- hsize_t stride[2]; /* Stride of hyperslab */
- hsize_t count[2]; /* Block count */
- hsize_t block[2]; /* Block sizes */
-
- TESTING("skipping compression filters but keep two other filters");
-
- /*
- * Create the data space with unlimited dimensions.
- */
- if((dataspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
- goto error;
-
- if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
- goto error;
-
- /*
- * Modify dataset creation properties, i.e. enable chunking and compression.
- * The order of filters is bogus 1 + deflate + bogus 2.
- */
- if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
- goto error;
-
- if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
- goto error;
-
- /* Register and enable first bogus filter */
- if(H5Zregister (H5Z_BOGUS1) < 0)
- goto error;
-
- if(H5Pset_filter(cparms, H5Z_FILTER_BOGUS1, 0, (size_t)0, NULL) < 0)
- goto error;
-
- /* Enable compression filter */
- if((status = H5Pset_deflate( cparms, (unsigned) aggression)) < 0)
- goto error;
-
- /* Register and enable second bogus filter */
- if(H5Zregister (H5Z_BOGUS2) < 0)
- goto error;
-
- if(H5Pset_filter(cparms, H5Z_FILTER_BOGUS2, 0, (size_t)0, NULL) < 0)
- goto error;
-
- /*
- * Create a new dataset within the file using cparms
- * creation properties.
- */
- if((dataset = H5Dcreate2(file, DATASETNAME3, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
- cparms, H5P_DEFAULT)) < 0)
- goto error;
-
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
- goto error;
-
- /* Initialize data for one chunk. Apply operations of two bogus filters to the chunk */
- for(i = n = 0; i < CHUNK_NX; i++)
- for(j = 0; j < CHUNK_NY; j++) {
- origin_direct_buf[i][j] = n++;
- direct_buf[i][j] = (origin_direct_buf[i][j] + ADD_ON) * FACTOR;
- }
-
- /* write the uncompressed chunk data repeatedly to dataset, using the direct writing function.
- * Indicate skipping the compression filter but keep the other two bogus filters */
- offset[0] = CHUNK_NX;
- offset[1] = CHUNK_NY;
-
- /* compression filter is the middle one to be skipped */
- filter_mask = 0x00000002;
-
- if((status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, buf_size, direct_buf)) < 0)
- goto error;
-
- if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0)
- goto error;
-
- if(H5Dclose(dataset) < 0)
- goto error;
-
- if((dataset = H5Dopen2(file, DATASETNAME3, H5P_DEFAULT)) < 0)
- goto error;
-
- /*
- * Select hyperslab for one chunk in the file
- */
- start[0] = CHUNK_NX; start[1] = CHUNK_NY;
- stride[0] = 1; stride[1] = 1;
- count[0] = 1; count[1] = 1;
- block[0] = CHUNK_NX; block[1] = CHUNK_NY;
- if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
- goto error;
-
- /* Read the chunk back */
- if((status = H5Dread(dataset, H5T_NATIVE_INT, mem_space, dataspace, H5P_DEFAULT, check_chunk)) < 0)
- goto error;
-
- /* Check that the values read are the same as the values written */
- for(i = 0; i < CHUNK_NX; i++) {
- for(j = 0; j < CHUNK_NY; j++) {
- if(origin_direct_buf[i][j] != check_chunk[i][j]) {
- HDprintf(" 1. Read different values than written.");
- HDprintf(" At index %d,%d\n", i, j);
- HDprintf(" origin_direct_buf=%d, check_chunk=%d\n", origin_direct_buf[i][j], check_chunk[i][j]);
- goto error;
- }
- }
- }
-
- /* Query chunk storage size */
- if((status = H5Dget_chunk_storage_size(dataset, offset, &read_buf_size)) < 0)
- goto error;
- if(read_buf_size != buf_size)
- goto error;
-
- /* Read the raw chunk back */
- HDmemset(&read_direct_buf, 0, sizeof(read_direct_buf));
- if((status = H5DOread_chunk(dataset, H5P_DEFAULT, offset, &read_filter_mask, read_direct_buf)) < 0)
- goto error;
- if(read_filter_mask != filter_mask)
- goto error;
-
- /* Check that the direct chunk read is the same as the chunk written */
- for(i = 0; i < CHUNK_NX; i++) {
- for(j = 0; j < CHUNK_NY; j++) {
- if(direct_buf[i][j] != read_direct_buf[i][j]) {
- HDprintf(" 1. Read different values than written.");
- HDprintf(" At index %d,%d\n", i, j);
- HDprintf(" direct_buf=%d, read_direct_buf=%d\n", direct_buf[i][j], read_direct_buf[i][j]);
- goto error;
- }
- }
- }
-
- /*
- * Close/release resources.
- */
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
- } H5E_END_TRY;
-
- H5_FAILED();
- return 1;
-} /* test_skip_compress_write2() */
-
-/*-------------------------------------------------------------------------
- * Function: test_data_conv
- *
- * Purpose: Test data conversion
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Raymond Lu
- * 30 November 2012
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_data_conv(hid_t file)
-{
- typedef struct {
- int a, b, c[4], d, e;
- } src_type_t;
- typedef struct {
- int a, c[4], e;
- } dst_type_t;
-
- hid_t dataspace = -1, dataset = -1;
- hid_t mem_space = -1;
- hid_t cparms = -1, dxpl = -1;
- hsize_t dims[2] = {NX, NY};
- hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
- hsize_t chunk_dims[2] ={CHUNK_NX, CHUNK_NY};
- herr_t status;
- int i, j, n;
- const hsize_t four = 4;
- hid_t st=-1, dt=-1;
- hid_t array_dt;
-
- unsigned filter_mask = 0;
- src_type_t direct_buf[CHUNK_NX][CHUNK_NY];
- dst_type_t check_chunk[CHUNK_NX][CHUNK_NY];
- src_type_t read_chunk[CHUNK_NX][CHUNK_NY]; /* For H5DOread_chunk */
-
- hsize_t offset[2] = {0, 0};
- size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(src_type_t);
-
- hsize_t start[2]; /* Start of hyperslab */
- hsize_t stride[2]; /* Stride of hyperslab */
- hsize_t count[2]; /* Block count */
- hsize_t block[2]; /* Block sizes */
-
- TESTING("data conversion for H5DOwrite_chunk/H5DOread_chunk");
-
- /*
- * Create the data space with unlimited dimensions.
- */
- if((dataspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
- goto error;
-
- if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
- goto error;
-
- /*
- * Modify dataset creation properties, i.e. enable chunking
- */
- if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
- goto error;
-
- if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
- goto error;
-
- /* Build hdf5 datatypes */
- array_dt = H5Tarray_create2(H5T_NATIVE_INT, 1, &four);
- if((st = H5Tcreate(H5T_COMPOUND, sizeof(src_type_t))) < 0 ||
- H5Tinsert(st, "a", HOFFSET(src_type_t, a), H5T_NATIVE_INT) < 0 ||
- H5Tinsert(st, "b", HOFFSET(src_type_t, b), H5T_NATIVE_INT) < 0 ||
- H5Tinsert(st, "c", HOFFSET(src_type_t, c), array_dt) < 0 ||
- H5Tinsert(st, "d", HOFFSET(src_type_t, d), H5T_NATIVE_INT) < 0 ||
- H5Tinsert(st, "e", HOFFSET(src_type_t, e), H5T_NATIVE_INT) < 0)
- goto error;
-
- if(H5Tclose(array_dt) < 0)
- goto error;
-
- array_dt = H5Tarray_create2(H5T_NATIVE_INT, 1, &four);
- if((dt = H5Tcreate(H5T_COMPOUND, sizeof(dst_type_t))) < 0 ||
- H5Tinsert(dt, "a", HOFFSET(dst_type_t, a), H5T_NATIVE_INT) < 0 ||
- H5Tinsert(dt, "c", HOFFSET(dst_type_t, c), array_dt) < 0 ||
- H5Tinsert(dt, "e", HOFFSET(dst_type_t, e), H5T_NATIVE_INT) < 0)
- goto error;
-
- if(H5Tclose(array_dt) < 0)
- goto error;
-
- /*
- * Create a new dataset within the file using cparms
- * creation properties.
- */
- if((dataset = H5Dcreate2(file, DATASETNAME4, st, dataspace, H5P_DEFAULT,
- cparms, H5P_DEFAULT)) < 0)
- goto error;
-
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
- goto error;
-
- /* Initialize data for one chunk */
- for(i = n = 0; i < CHUNK_NX; i++) {
- for(j = 0; j < CHUNK_NY; j++) {
- (direct_buf[i][j]).a = i*j+0;
- (direct_buf[i][j]).b = i*j+1;
- (direct_buf[i][j]).c[0] = i*j+2;
- (direct_buf[i][j]).c[1] = i*j+3;
- (direct_buf[i][j]).c[2] = i*j+4;
- (direct_buf[i][j]).c[3] = i*j+5;
- (direct_buf[i][j]).d = i*j+6;
- (direct_buf[i][j]).e = i*j+7;
- }
- }
-
- /* write the chunk data to dataset, using the direct writing function.
- * There should be no data conversion involved. */
- offset[0] = CHUNK_NX;
- offset[1] = CHUNK_NY;
-
- if((status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, buf_size, direct_buf)) < 0)
- goto error;
-
- if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0)
- goto error;
-
- if(H5Dclose(dataset) < 0)
- goto error;
-
- if((dataset = H5Dopen2(file, DATASETNAME4, H5P_DEFAULT)) < 0)
- goto error;
-
- /* Use H5DOread_chunk() to read the uncompressed data */
- if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, read_chunk)) < 0)
- goto error;
-
- /* Check that the values read are the same as the values written */
- for(i = 0; i < CHUNK_NX; i++) {
- for(j = 0; j < CHUNK_NY; j++) {
- if ((direct_buf[i][j]).a != (read_chunk[i][j]).a ||
- (direct_buf[i][j]).b != (read_chunk[i][j]).b ||
- (direct_buf[i][j]).c[0] != (read_chunk[i][j]).c[0] ||
- (direct_buf[i][j]).c[1] != (read_chunk[i][j]).c[1] ||
- (direct_buf[i][j]).c[2] != (read_chunk[i][j]).c[2] ||
- (direct_buf[i][j]).c[3] != (read_chunk[i][j]).c[3] ||
- (direct_buf[i][j]).d != (read_chunk[i][j]).d ||
- (direct_buf[i][j]).e != (read_chunk[i][j]).e) {
- HDprintf(" 1. Read different values than written.");
- HDprintf(" At index %d,%d\n", i, j);
- HDprintf(" src={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n",
- (direct_buf[i][j]).a, (direct_buf[i][j]).b, (direct_buf[i][j]).c[0], (direct_buf[i][j]).c[1],
- (direct_buf[i][j]).c[2], (direct_buf[i][j]).c[3], (direct_buf[i][j]).d, (direct_buf[i][j]).e);
- HDprintf(" dst={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n",
- (read_chunk[i][j]).a, (read_chunk[i][j]).b, (read_chunk[i][j]).c[0], (read_chunk[i][j]).c[1],
- (read_chunk[i][j]).c[2], (read_chunk[i][j]).c[3], (read_chunk[i][j]).d, (read_chunk[i][j]).e);
-
- goto error;
- }
- }
- }
-
- /*
- * Select hyperslab for the chunk just written in the file
- */
- start[0] = CHUNK_NX; start[1] = CHUNK_NY;
- stride[0] = 1; stride[1] = 1;
- count[0] = 1; count[1] = 1;
- block[0] = CHUNK_NX; block[1] = CHUNK_NY;
- if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
- goto error;
-
- /* Read the chunk back. Data should be converted */
- if((status = H5Dread(dataset, dt, mem_space, dataspace, H5P_DEFAULT, check_chunk)) < 0)
- goto error;
-
- /* Check that the values read are the same as the values written */
- for(i = 0; i < CHUNK_NX; i++) {
- for(j = 0; j < CHUNK_NY; j++) {
- if ((direct_buf[i][j]).a != (check_chunk[i][j]).a ||
- (direct_buf[i][j]).c[0] != (check_chunk[i][j]).c[0] ||
- (direct_buf[i][j]).c[1] != (check_chunk[i][j]).c[1] ||
- (direct_buf[i][j]).c[2] != (check_chunk[i][j]).c[2] ||
- (direct_buf[i][j]).c[3] != (check_chunk[i][j]).c[3] ||
- (direct_buf[i][j]).e != (check_chunk[i][j]).e) {
- HDprintf(" 1. Read different values than written.");
- HDprintf(" At index %d,%d\n", i, j);
- HDprintf(" src={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n",
- (direct_buf[i][j]).a, (direct_buf[i][j]).b, (direct_buf[i][j]).c[0], (direct_buf[i][j]).c[1],
- (direct_buf[i][j]).c[2], (direct_buf[i][j]).c[3], (direct_buf[i][j]).d, (direct_buf[i][j]).e);
- HDprintf(" dst={a=%d, c=[%d,%d,%d,%d], e=%d\n",
- (check_chunk[i][j]).a, (check_chunk[i][j]).c[0], (check_chunk[i][j]).c[1], (check_chunk[i][j]).c[2],
- (check_chunk[i][j]).c[3], (check_chunk[i][j]).e);
-
- goto error;
- }
- }
- }
-
- /*
- * Close/release resources.
- */
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
- H5Tclose(st);
- H5Tclose(dt);
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
- H5Tclose(st);
- H5Tclose(dt);
- } H5E_END_TRY;
-
- H5_FAILED();
- return 1;
-} /* test_data_conv() */
-
-/*-------------------------------------------------------------------------
- * Function: test_invalid_parameters
- *
- * Purpose: Test invalid parameters for H5DOwrite_chunk and H5DOread_chunk
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Raymond Lu
- * 30 November 2012
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_invalid_parameters(hid_t file)
-{
- hid_t dataspace = -1, dataset = -1;
- hid_t mem_space = -1;
- hid_t cparms = -1, dxpl = -1;
- hsize_t dims[2] = {NX, NY};
- hsize_t chunk_dims[2] ={CHUNK_NX, CHUNK_NY};
- herr_t status;
- int i, j, n;
-
- unsigned filter_mask = 0;
- int direct_buf[CHUNK_NX][CHUNK_NY];
- hsize_t offset[2] = {0, 0};
- size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
- int aggression = 9; /* Compression aggression setting */
-
- hsize_t chunk_nbytes; /* Chunk size */
-
- TESTING("invalid parameters for H5DOwrite_chunk/H5DOread_chunk");
-
- /*
- * Create the data space with unlimited dimensions.
- */
- if((dataspace = H5Screate_simple(RANK, dims, NULL)) < 0)
- goto error;
-
- if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
- goto error;
-
- /*
- * Modify dataset creation properties
- */
- if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
- goto error;
-
- /*
- * Create a new contiguous dataset to verify H5DOwrite_chunk/H5DOread_chunk doesn't work
- */
- if((dataset = H5Dcreate2(file, DATASETNAME5, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
- cparms, H5P_DEFAULT)) < 0)
- goto error;
-
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
- goto error;
-
- /* Initialize data for one chunk */
- for(i = n = 0; i < CHUNK_NX; i++)
- for(j = 0; j < CHUNK_NY; j++) {
- direct_buf[i][j] = n++;
- }
-
- /* Try to write the chunk data to contiguous dataset. It should fail */
- offset[0] = CHUNK_NX;
- offset[1] = CHUNK_NY;
-
- H5E_BEGIN_TRY {
- if((status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, buf_size, direct_buf)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- /* Try to get chunk size for a contiguous dataset. It should fail */
- H5E_BEGIN_TRY {
- if((status = H5Dget_chunk_storage_size(dataset, offset, &chunk_nbytes)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- /* Try to H5DOread_chunk from the contiguous dataset. It should fail */
- H5E_BEGIN_TRY {
- if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, direct_buf)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- if(H5Dclose(dataset) < 0)
- goto error;
-
-
- /* Create a chunked dataset with compression filter */
- if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
- goto error;
-
- if((status = H5Pset_deflate( cparms, (unsigned ) aggression)) < 0)
- goto error;
-
- /*
- * Create a new dataset within the file using cparms
- * creation properties.
- */
- if((dataset = H5Dcreate2(file, DATASETNAME6, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
- cparms, H5P_DEFAULT)) < 0)
- goto error;
-
- /* Check invalid dataset ID for H5DOwrite_chunk and H5DOread_chunk */
- H5E_BEGIN_TRY {
- if((status = H5DOwrite_chunk((hid_t)-1, dxpl, filter_mask, offset, buf_size, direct_buf)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- H5E_BEGIN_TRY {
- if((status = H5DOread_chunk((hid_t)-1, dxpl, offset, &filter_mask, direct_buf)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- /* Check invalid DXPL ID for H5DOwrite_chunk and H5DOread_chunk */
- H5E_BEGIN_TRY {
- if((status = H5DOwrite_chunk(dataset, (hid_t)-1, filter_mask, offset, buf_size, direct_buf)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- H5E_BEGIN_TRY {
- if((status = H5DOread_chunk(dataset, (hid_t)-1, offset, &filter_mask, direct_buf)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- /* Check invalid OFFSET for H5DOwrite_chunk and H5DOread_chunk */
- H5E_BEGIN_TRY {
- if((status = H5DOwrite_chunk(dataset, dxpl, filter_mask, NULL, buf_size, direct_buf)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- H5E_BEGIN_TRY {
- if((status = H5DOread_chunk(dataset, dxpl, NULL, &filter_mask, direct_buf)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- /* Check when OFFSET is out of dataset range for H5DOwrite_chunk and H5DOread_chunk */
- offset[0] = NX + 1;
- offset[1] = NY;
- H5E_BEGIN_TRY {
- if((status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, buf_size, direct_buf)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- H5E_BEGIN_TRY {
- if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, direct_buf)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- /* Check when OFFSET is not on chunk boundary for H5DOwrite_chunk and H5DOread_chunk */
- offset[0] = CHUNK_NX;
- offset[1] = CHUNK_NY + 1;
- H5E_BEGIN_TRY {
- if((status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, buf_size, direct_buf)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- H5E_BEGIN_TRY {
- if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, direct_buf)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- /* Check invalid buffer size for H5DOwrite_chunk only */
- offset[0] = CHUNK_NX;
- offset[1] = CHUNK_NY;
- buf_size = 0;
- H5E_BEGIN_TRY {
- if((status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, buf_size, direct_buf)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- /* Check invalid data buffer for H5DOwrite_chunk and H5DOread_chunk */
- buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
- H5E_BEGIN_TRY {
- if((status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, buf_size, NULL)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- H5E_BEGIN_TRY {
- if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, NULL)) != FAIL)
- goto error;
- } H5E_END_TRY;
-
- if(H5Dclose(dataset) < 0)
- goto error;
-
- /*
- * Close/release resources.
- */
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
- } H5E_END_TRY;
-
- H5_FAILED();
- return 1;
-} /* test_invalid_parameters() */
-
-/*-------------------------------------------------------------------------
- * Function: test_direct_chunk_read_no_cache
- *
- * Purpose: Test the basic functionality of H5DOread_chunk with the
- * chunk cache diabled.
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Matthew Strong (GE Healthcare)
- * 14 February 2016
- *
- *-------------------------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_DEFLATE
-static int
-test_direct_chunk_read_no_cache (hid_t file)
-{
- hid_t dataspace = -1, dataset = -1;
- hid_t mem_space = -1;
- hid_t cparms = -1, dxpl = -1, dapl = -1;
- hsize_t dims[2] = {NX, NY};
- hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
- hsize_t chunk_dims[2] = {CHUNK_NX, CHUNK_NY};
- herr_t status; /* status from H5 function calls */
- int ret; /* deflate return status */
- int data[NX][NY];
- int i, j, k, l, n; /* local index variables */
-
- unsigned filter_mask = 0; /* filter mask returned from H5DOread_chunk */
- int direct_buf[CHUNK_NX][CHUNK_NY]; /* chunk read with H5DOread and manually decompressed */
- int check_chunk[CHUNK_NX][CHUNK_NY]; /* chunk read with H5Dread */
- hsize_t offset[2]; /* chunk offset used for H5DOread_chunk */
- size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
-
- Bytef *z_src = NULL; /* source buffer */
- uLongf z_src_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size);
- Bytef *z_dst = (Bytef*)(direct_buf);
- uLong z_dst_nbytes = (uLong)buf_size;
- int aggression = 9; /* Compression aggression setting */
- void *outbuf = NULL; /* Pointer to new buffer */
-
- hsize_t start[2]; /* Start of hyperslab */
- hsize_t stride[2]; /* Stride of hyperslab */
- hsize_t count[2]; /* Block count */
- hsize_t block[2]; /* Block sizes */
-
- TESTING("basic functionality of H5DOread_chunk (chunk cache disabled)");
-
- /* Create the data space with unlimited dimensions. */
- if((dataspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
- goto error;
- if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
- goto error;
-
- /* Modify dataset creation properties, i.e. enable chunking and compression */
- if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
- goto error;
- if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
- goto error;
- if((status = H5Pset_deflate( cparms, (unsigned) aggression)) < 0)
- goto error;
- if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
- goto error;
-
- /* Disable chunk cache by setting number of slots to 0 */
- if((status = H5Pset_chunk_cache(dapl, 0, H5D_CHUNK_CACHE_NBYTES_DEFAULT, H5D_CHUNK_CACHE_W0_DEFAULT)) < 0)
- goto error;
-
- /* Create a new dataset within the file using cparms creation properties. */
- if((dataset = H5Dcreate2(file, DATASETNAME8, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
- cparms, dapl)) < 0)
- goto error;
-
- /* Initialize the dataset */
- for(i = n = 0; i < NX; i++)
- for(j = 0; j < NY; j++)
- data[i][j] = n++;
-
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
- goto error;
-
- /* Write the data for the dataset.
- * Data will skip chunk cache and go directly to disk. */
- if((status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- dxpl, data)) < 0)
- goto error;
-
- /* Allocate output (compressed) buffer */
- outbuf = HDmalloc(z_src_nbytes);
- z_src = (Bytef *)outbuf;
-
- /* For each chunk in the dataset, compare the result of H5Dread and H5DOread_chunk. */
- for(i=0; i<NX/CHUNK_NX; i++) {
- for(j=0; j<NY/CHUNK_NY; j++) {
- /* Select hyperslab for one chunk in the file */
- start[0] = (hsize_t)i * CHUNK_NX; start[1] = (hsize_t)j * CHUNK_NY;
- stride[0] = 1; stride[1] = 1;
- count[0] = 1; count[1] = 1;
- block[0] = CHUNK_NX; block[1] = CHUNK_NY;
-
- /* Hyperslab selection equals single chunk */
- if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
- goto error;
-
- /* Read the chunk back */
- if((status = H5Dread(dataset, H5T_NATIVE_INT, mem_space, dataspace, H5P_DEFAULT, check_chunk)) < 0)
- goto error;
-
- offset[0] = (hsize_t)i * CHUNK_NX; offset[1] = (hsize_t)j * CHUNK_NY;
- /* Read the compressed chunk back using the direct read function. */
- if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, outbuf)) < 0)
- goto error;
-
- /* Check filter mask return value */
- if(filter_mask != 0)
- goto error;
-
- /* Perform decompression from the source to the destination buffer */
- ret = uncompress(z_dst, &z_dst_nbytes, z_src, z_src_nbytes);
-
- /* Check for various zlib errors */
- if(Z_BUF_ERROR == ret) {
- HDfprintf(stderr, "overflow\n");
- goto error;
- } else if(Z_MEM_ERROR == ret) {
- HDfprintf(stderr, "deflate memory error\n");
- goto error;
- } else if(Z_DATA_ERROR == ret) {
- HDfprintf(stderr, "corrupted data\n");
- goto error;
- } else if(Z_OK != ret) {
- HDfprintf(stderr, "other deflate error\n");
- goto error;
- }
-
- /* Check that the decompressed values match those read from H5Dread */
- for(k = 0; k < CHUNK_NX; k++) {
- for(l = 0; l < CHUNK_NY; l++) {
- if(direct_buf[k][l] != check_chunk[k][l]) {
- HDprintf("\n 1. Read different values than written.");
- HDprintf(" At index %d,%d\n", k, l);
- HDprintf(" direct_buf=%d, check_chunk=%d\n", direct_buf[k][l], check_chunk[k][l]);
- goto error;
- }
- }
- }
- }
- }
-
- /* Close/release resources. */
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
- H5Pclose(dapl);
-
- if(outbuf)
- HDfree(outbuf);
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
- H5Pclose(dapl);
- } H5E_END_TRY;
-
- if(outbuf)
- HDfree(outbuf);
-
- H5_FAILED();
- return 1;
-} /* test_direct_chunk_read_no_cache() */
-#endif /* H5_HAVE_FILTER_DEFLATE */
-
-#ifdef H5_HAVE_FILTER_DEFLATE
-static int
-test_direct_chunk_read_cache (hid_t file, hbool_t flush)
-{
- hid_t dataspace = -1, dataset = -1;
- hid_t mem_space = -1;
- hid_t cparms = -1, dxpl = -1;
- hsize_t dims[2] = {NX, NY};
- hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
- hsize_t chunk_dims[2] = {CHUNK_NX, CHUNK_NY};
- herr_t status; /* status from H5 function calls */
- int ret; /* deflate return status */
- int data[NX][NY];
- int i, j, k, l, n; /* local index variables */
-
- unsigned filter_mask = 0; /* filter mask returned from H5DOread_chunk */
- int direct_buf[CHUNK_NX][CHUNK_NY]; /* chunk read with H5DOread and manually decompressed */
- int check_chunk[CHUNK_NX][CHUNK_NY]; /* chunk read with H5Dread */
- hsize_t offset[2]; /* chunk offset used for H5DOread_chunk */
- size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
-
- Bytef *z_src = NULL; /* source buffer */
- uLongf z_src_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size);
- Bytef *z_dst = (Bytef*)(direct_buf);
- uLong z_dst_nbytes = (uLong)buf_size;
- int aggression = 9; /* Compression aggression setting */
- void *outbuf = NULL; /* Pointer to new buffer */
- hsize_t read_buf_size = 0;
-
- hsize_t start[2]; /* Start of hyperslab */
- hsize_t stride[2]; /* Stride of hyperslab */
- hsize_t count[2]; /* Block count */
- hsize_t block[2]; /* Block sizes */
-
- if(flush) {
- TESTING("basic functionality of H5DOread_chunk (flush chunk cache)");
- } else {
- TESTING("basic functionality of H5DOread_chunk (does not flush chunk cache)");
- }
-
- /* Create the data space with unlimited dimensions. */
- if((dataspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
- goto error;
- if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
- goto error;
-
- /* Modify dataset creation properties, i.e. enable chunking and compression */
- if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
- goto error;
- if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
- goto error;
- if((status = H5Pset_deflate( cparms, (unsigned) aggression)) < 0)
- goto error;
-
- /* Create a new dataset within the file using cparms creation properties. */
- if((dataset = H5Dcreate2(file, flush?DATASETNAME9:DATASETNAME10, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
- cparms, H5P_DEFAULT)) < 0)
- goto error;
-
- /* Initialize the dataset */
- for(i = n = 0; i < NX; i++)
- for(j = 0; j < NY; j++)
- data[i][j] = n++;
-
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
- goto error;
-
- /* Write the data for the dataset.
- * It should stay in the chunk cache. */
- if((status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- dxpl, data)) < 0)
- goto error;
-
- if(flush) {
- /* Flush the chunk cache to disk. Cache entry is not evicted. */
- if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0)
- goto error;
- }
-
- /* Allocate output (compressed) buffer */
- outbuf = HDmalloc(z_src_nbytes);
- z_src = (Bytef *)outbuf;
-
- /* For each chunk in the dataset, compare the result of H5Dread and H5DOread_chunk. */
- for(i=0; i<NX/CHUNK_NX; i++) {
- for(j=0; j<NY/CHUNK_NY; j++) {
- /* Select hyperslab for one chunk in the file */
- start[0] = (hsize_t)i * CHUNK_NX; start[1] = (hsize_t)j * CHUNK_NY;
- stride[0] = 1; stride[1] = 1;
- count[0] = 1; count[1] = 1;
- block[0] = CHUNK_NX; block[1] = CHUNK_NY;
-
- /* Hyperslab selection equals single chunk */
- if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
- goto error;
-
- /* Read the chunk back */
- if((status = H5Dread(dataset, H5T_NATIVE_INT, mem_space, dataspace, H5P_DEFAULT, check_chunk)) < 0)
- goto error;
-
- offset[0] = (hsize_t)i * CHUNK_NX; offset[1] = (hsize_t)j * CHUNK_NY;
-
- /* Query chunk storage size */
- if((status = H5Dget_chunk_storage_size(dataset, offset, &read_buf_size)) < 0)
- goto error;
- if(read_buf_size == 0)
- goto error;
-
- /* Read the compressed chunk back using the direct read function. */
- if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, outbuf)) < 0)
- goto error;
-
- /* Check filter mask return value */
- if(filter_mask != 0)
- goto error;
-
- /* Perform decompression from the source to the destination buffer */
- ret = uncompress(z_dst, &z_dst_nbytes, z_src, z_src_nbytes);
-
- /* Check for various zlib errors */
- if(Z_BUF_ERROR == ret) {
- HDfprintf(stderr, "overflow\n");
- goto error;
- } else if(Z_MEM_ERROR == ret) {
- HDfprintf(stderr, "deflate memory error\n");
- goto error;
- } else if(Z_DATA_ERROR == ret) {
- HDfprintf(stderr, "corrupted data\n");
- goto error;
- } else if(Z_OK != ret) {
- HDfprintf(stderr, "other deflate error\n");
- goto error;
- }
-
- /* Check that the decompressed values match those read from H5Dread */
- for(k = 0; k < CHUNK_NX; k++) {
- for(l = 0; l < CHUNK_NY; l++) {
- if(direct_buf[k][l] != check_chunk[k][l]) {
- HDprintf("\n 1. Read different values than written.");
- HDprintf(" At index %d,%d\n", k, l);
- HDprintf(" direct_buf=%d, check_chunk=%d\n", direct_buf[k][l], check_chunk[k][l]);
- goto error;
- }
- }
- }
- }
- }
-
- /* Close/release resources. */
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
-
- if(outbuf)
- HDfree(outbuf);
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
- } H5E_END_TRY;
-
- if(outbuf)
- HDfree(outbuf);
-
- H5_FAILED();
- return 1;
-} /* test_direct_chunk_read_cache() */
-#endif /* H5_HAVE_FILTER_DEFLATE */
-
-/*-------------------------------------------------------------------------
- * Function: test_read_unfiltered_dset
- *
- * Purpose: Test the basic functionality of H5DOread_chunk on a dataset
- * without no filters applied.
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Matthew Strong (GE Healthcare)
- * 30 November 2016
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_read_unfiltered_dset(hid_t file)
-{
- hid_t dataspace = -1, dataset = -1;
- hid_t mem_space = -1;
- hid_t cparms = -1, dxpl = -1;
- hsize_t dims[2] = {NX, NY};
- hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
- hsize_t chunk_dims[2] ={CHUNK_NX, CHUNK_NY};
- herr_t status;
- int data[NX][NY];
- int i, j, k, l, n;
-
- unsigned filter_mask = 0;
- int direct_buf[CHUNK_NX][CHUNK_NY];
- int check_chunk[CHUNK_NX][CHUNK_NY]; /* chunk read with H5Dread */
- hsize_t offset[2] = {0, 0};
- size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
- hsize_t read_buf_size = 0;
-
- hsize_t start[2]; /* Start of hyperslab */
- hsize_t stride[2]; /* Stride of hyperslab */
- hsize_t count[2]; /* Block count */
- hsize_t block[2]; /* Block sizes */
-
- TESTING("basic functionality of H5DOread_chunk on unfiltered datasets");
-
- /* Create the data space with unlimited dimensions. */
- if((dataspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
- goto error;
- if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
- goto error;
-
- /* Modify dataset creation properties, i.e. enable chunking, no compression */
- if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
- goto error;
- if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
- goto error;
-
- /* Create a new dataset within the file using cparms creation properties. */
- if((dataset = H5Dcreate2(file, DATASETNAME12, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
- cparms, H5P_DEFAULT)) < 0)
- goto error;
-
- /* Initialize the dataset */
- for(i = n = 0; i < NX; i++)
- for(j = 0; j < NY; j++)
- data[i][j] = n++;
-
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
- goto error;
-
- /* Write the data for the dataset.
- * It should stay in the chunk cache and will be evicted/flushed by
- * the H5DOread_chunk function call. */
- if((status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- dxpl, data)) < 0)
- goto error;
-
- if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0)
- goto error;
-
- /* For each chunk in the dataset, compare the result of H5Dread and H5DOread_chunk. */
- for(i=0; i<NX/CHUNK_NX; i++) {
- for(j=0; j<NY/CHUNK_NY; j++) {
- /* Select hyperslab for one chunk in the file */
- start[0] = (hsize_t)i * CHUNK_NX; start[1] = (hsize_t)j * CHUNK_NY;
- stride[0] = 1; stride[1] = 1;
- count[0] = 1; count[1] = 1;
- block[0] = CHUNK_NX; block[1] = CHUNK_NY;
-
- /* Hyperslab selection equals single chunk */
- if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
- goto error;
-
- /* Read the chunk back */
- if((status = H5Dread(dataset, H5T_NATIVE_INT, mem_space, dataspace, H5P_DEFAULT, check_chunk)) < 0)
- goto error;
-
- /* Query chunk storage size */
- if((status = H5Dget_chunk_storage_size(dataset, offset, &read_buf_size)) < 0)
- goto error;
-
- if(read_buf_size != buf_size )
- goto error;
-
- offset[0] = (hsize_t)i * CHUNK_NX; offset[1] = (hsize_t)j * CHUNK_NY;
- /* Read the raw chunk back */
- HDmemset(&direct_buf, 0, sizeof(direct_buf));
- filter_mask = UINT_MAX;
- if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, direct_buf)) < 0)
- goto error;
-
- /* Check filter mask return value */
- if(filter_mask != 0)
- goto error;
-
- /* Check that the decompressed values match those read from H5Dread */
- for(k = 0; k < CHUNK_NX; k++) {
- for(l = 0; l < CHUNK_NY; l++) {
- if(direct_buf[k][l] != check_chunk[k][l]) {
- HDprintf("\n 1. Read different values than written.");
- HDprintf(" At index %d,%d\n", k, l);
- HDprintf(" direct_buf=%d, check_chunk=%d\n", direct_buf[k][l], check_chunk[k][l]);
- goto error;
- }
- }
- }
- }
- }
-
- /* Close/release resources. */
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
- } H5E_END_TRY;
-
- H5_FAILED();
- return 1;
-} /* test_read_unfiltered_dset() */
-
-/*-------------------------------------------------------------------------
- * Function: test_read_unallocated_chunk
- *
- * Purpose: Tests the H5DOread_chunk and H5Dget_chunk_storage_size with valid
- * offets to chunks that have not been written to the dataset and are
- * not allocated in the chunk storage on disk.
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Matthew Strong (GE Healthcare)
- * 30 November 2016
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_read_unallocated_chunk (hid_t file)
-{
- hid_t dataspace = -1, dataset = -1;
- hid_t mem_space = -1;
- hid_t cparms = -1, dxpl = -1;
- hsize_t dims[2] = {NX, NY};
- hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
- hsize_t chunk_dims[2] = {CHUNK_NX, CHUNK_NY};
- hsize_t chunk_nbytes = CHUNK_NX*CHUNK_NY*sizeof(int);
- hsize_t direct_chunk_nbytes = 0; /* size (bytes) of the on-disk chunk */
- herr_t status; /* status from H5 function calls */
- hsize_t i, j; /* local index variables */
-
- unsigned filter_mask = 0; /* filter mask returned from H5DOread_chunk */
- int direct_buf[CHUNK_NX][CHUNK_NY]; /* chunk read with H5DOread and manually decompressed */
- hsize_t offset[2]; /* chunk offset used for H5DOread_chunk */
-
- TESTING("H5DOread_chunk with unallocated chunks");
-
- /* Create the data space with unlimited dimensions. */
- if((dataspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
- goto error;
- if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
- goto error;
-
- /* Modify dataset creation properties, i.e. enable chunking, no compression */
- if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
- goto error;
- if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
- goto error;
-
- /* Create a new dataset within the file using cparms creation properties. */
- if((dataset = H5Dcreate2(file, DATASETNAME11, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
- cparms, H5P_DEFAULT)) < 0)
- goto error;
-
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
- goto error;
-
- /* Write a single chunk to intialize the chunk storage */
- HDmemset(&chunk_dims, 0, sizeof(chunk_dims));
- offset[0] = 0; offset[1] = 0;
-
- if(H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, chunk_nbytes, &chunk_dims) < 0)
- goto error;
-
- /* Attempt to read each chunk in the dataset. Chunks are not allocated,
- * therefore we expect the result of H5DOread_chunk to fail. Chunk idx starts
- * at 1, since one chunk was written to init the chunk storage. */
- for(i=1; i<NX/CHUNK_NX; i++) {
- for(j=0; j<NY/CHUNK_NY; j++) {
-
- offset[0] = i * CHUNK_NX;
- offset[1] = j * CHUNK_NY;
-
- /* Read a non-existant chunk using the direct read function. */
- H5E_BEGIN_TRY {
- status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, &direct_buf);
- } H5E_END_TRY;
-
- /* Check that the chunk read call does not succeed. */
- if(status != -1)
- goto error;
-
- /* Query the size of the non-existant chunk */
- direct_chunk_nbytes = ULONG_MAX;
- H5E_BEGIN_TRY {
- status = H5Dget_chunk_storage_size(dataset, offset, &direct_chunk_nbytes);
- } H5E_END_TRY;
-
- /* Check that the chunk storage size call does not succeed. */
- if(status != -1 )
- goto error;
- if(direct_chunk_nbytes != 0 )
- goto error;
-
- }
- }
-
- /* Close/release resources. */
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(dataset);
- H5Sclose(mem_space);
- H5Sclose(dataspace);
- H5Pclose(cparms);
- H5Pclose(dxpl);
- } H5E_END_TRY;
-
- H5_FAILED();
- return 1;
-} /* test_read_unallocated_chunk() */
-
-/*-------------------------------------------------------------------------
- * Function: test_single_chunk_latest
- *
- * Purpose: This is to verify the fix for jira issue HDFFV-10425.
- * The problem was due to a bug in the internal ilbrary routine
- * H5D__chunk_direct_write() which passed a null dataset
- * pointer to the insert callback for the chunk index type.
- * Currently, the single chunk index is the only one that
- * used the dataset pointer in the insert callback.
- *
- * This routine is based on the test program attached to
- * this jira issue:
- * Create a file with the latest format and a chunked dataset
- * with one single chunk. The library will use single chunk
- * index for the dataset.
- * Verify that the data read is the same as the written data.
- *
- * Return: Success: 0
- * Failure: 1
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_single_chunk_latest(void)
-{
- hid_t fid; /* File ID */
- hid_t fapl; /* File access property list ID */
- hid_t sid; /* Dataspace ID */
- hid_t did; /* Dataset ID */
- hid_t dcpl; /* Dataset creation property list */
- hsize_t dims[2] = {DIM0, DIM1}; /* Dimension sizes */
- hsize_t chunk[2] = {CHUNK0, CHUNK1}; /* Chunk dimension sizes */
- hsize_t offset[2] = {0,0}; /* Offset for writing */
- int wdata[DIM0][DIM1]; /* Write buffer */
- int rdata[DIM0][DIM1]; /* Read buffer */
- int i, j; /* Local index variable */
-
- TESTING("H5DOwrite_chunk with single chunk and latest format");
-
- /* Initialize data */
- for (i=0; i<DIM0; i++) {
- for (j=0; j< DIM1; j++)
- wdata[i][j] = j/CHUNK0;
- }
-
- /* Create a new file with the latest format */
- if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
- goto error;
- if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
- goto error;
- if((fid = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
- goto error;
-
- /* Create dataspace */
- if((sid = H5Screate_simple(2, dims, NULL)) < 0)
- goto error;
-
- /* Create the dataset creation property list and set the chunk size */
- if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
- goto error;
- if(H5Pset_chunk(dcpl, 2, chunk) < 0)
- goto error;
-
- /* Create the dataset */
- if((did = H5Dcreate2(fid, DATASET, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
- goto error;
-
- /* Write the data directly to the dataset */
- if(H5DOwrite_chunk(did, H5P_DEFAULT, 0, offset, CHUNK0*CHUNK1*4, (void *)wdata) < 0)
- goto error;
-
- /*
- * Close and release resources.
- */
- if(H5Pclose(dcpl) < 0)
- goto error;
- if(H5Dclose(did) < 0)
- goto error;
- if(H5Sclose(sid) < 0)
- goto error;
- if(H5Pclose(fapl) < 0)
- goto error;
- if(H5Fclose(fid) < 0)
- goto error;
-
- /* Open the file and dataset with default properties */
- if((fid = H5Fopen(FILE, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
- goto error;
- if((did = H5Dopen2(fid, DATASET, H5P_DEFAULT)) < 0)
- goto error;
-
- /* Retrieve dataset creation property list */
- if((dcpl = H5Dget_create_plist(did)) < 0)
- goto error;
-
- /* Read the data */
- if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata) < 0)
- goto error;
-
- /* Verify that the data read was correct. */
- for (i = 0; i < DIM0; i++) {
- for (j = 0; j < DIM1; j++) {
- if(rdata[i][j] != wdata[i][j])
- goto error;
- }
- }
-
- /*
- * Close and release resources
- */
- if(H5Pclose(dcpl) < 0)
- goto error;
- if(H5Dclose(did) < 0)
- goto error;
- if(H5Fclose(fid) < 0)
- goto error;
-
- PASSED();
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- H5Dclose(did);
- H5Sclose(sid);
- H5Pclose(dcpl);
- H5Pclose(fapl);
- H5Fclose(fid);
- } H5E_END_TRY;
-
- H5_FAILED();
- return 1;
-} /* test_single_chunk_latest() */
-
-/*-------------------------------------------------------------------------
- * Function: Main function
- *
- * Purpose: Test direct chunk write function H5DOwrite_chunk and
- * chunk direct read function H5DOread_chunk
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Raymond Lu
- * 30 November 2012
- *
- *-------------------------------------------------------------------------
- */
-int main( void )
-{
- hid_t file_id;
- int nerrors=0;
-
- /*
- * Create a new file. If file exists its contents will be overwritten.
- */
- if((file_id = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
- goto error;
-
- /* Test direct chunk write and direct chunk read */
-#ifdef H5_HAVE_FILTER_DEFLATE
- nerrors += test_direct_chunk_write(file_id);
-#endif /* H5_HAVE_FILTER_DEFLATE */
- nerrors += test_direct_chunk_overwrite_data(file_id);
- nerrors += test_skip_compress_write1(file_id);
- nerrors += test_skip_compress_write2(file_id);
- nerrors += test_data_conv(file_id);
- nerrors += test_invalid_parameters(file_id);
-
- /* Test direct chunk read */
-#ifdef H5_HAVE_FILTER_DEFLATE
- nerrors += test_direct_chunk_read_no_cache(file_id);
- nerrors += test_direct_chunk_read_cache(file_id, TRUE);
- nerrors += test_direct_chunk_read_cache(file_id, FALSE);
-#endif /* H5_HAVE_FILTER_DEFLATE */
- nerrors += test_read_unfiltered_dset(file_id);
- nerrors += test_read_unallocated_chunk(file_id);
-
- nerrors += test_single_chunk_latest();
-
- if(H5Fclose(file_id) < 0)
- goto error;
-
- /* check for errors */
- if (nerrors)
- goto error;
-
- HDputs("All direct chunk read/write tests passed.");
- return EXIT_SUCCESS;
-
-error:
- HDputs("*** TESTS FAILED ***");
- return EXIT_FAILURE;
-}
diff --git a/hl/test/test_h5do_compat.c b/hl/test/test_h5do_compat.c
new file mode 100644
index 0000000..4df5eef
--- /dev/null
+++ b/hl/test/test_h5do_compat.c
@@ -0,0 +1,286 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "h5hltest.h"
+#include "H5DOpublic.h"
+
+/* This test is a minimal test to ensure that the H5DO compatibility wrappers
+ * work correctly.
+ */
+
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+
+#define FILE_NAME "h5do_compat.h5"
+#define DATASET_NAME "direct_chunk_io"
+
+#define NX 8
+#define CHUNK_NX 4
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_direct_chunk_write
+ *
+ * Purpose: Test the basic functionality of H5DOwrite_chunk
+ *
+ * Return: Success: An identifer for the dataset used in the tests
+ * Failure: H5I_INVALID_HID
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+create_dataset(hid_t fid)
+{
+ hid_t did = H5I_INVALID_HID;
+ hid_t sid = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hsize_t dims[1] = {NX};
+ hsize_t maxdims[1] = {H5S_UNLIMITED};
+ hsize_t chunk_dims[1] = {CHUNK_NX};
+ int data[NX];
+ int i;
+
+ /* Create a dataspace for the new dataset */
+ if ((sid = H5Screate_simple(1, dims, maxdims)) < 0)
+ goto error;
+
+ /* Set up dataset creation parameters */
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if (H5Pset_chunk(dcpl_id, 1, chunk_dims) < 0)
+ goto error;
+
+ /* Create a new dataset */
+ if ((did = H5Dcreate2(fid, DATASET_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Initialize the data */
+ for (i = 0; i < NX; i++)
+ data[i] = i;
+
+ /* Write the initialized data */
+ if (H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ goto error;
+
+ /* Close everything */
+ if (H5Sclose(sid) < 0)
+ goto error;
+ if (H5Pclose(dcpl_id) < 0)
+ goto error;
+
+ return did;
+
+ error:
+ H5E_BEGIN_TRY {
+ H5Dclose(did);
+ H5Sclose(sid);
+ H5Pclose(dcpl_id);
+ } H5E_END_TRY;
+
+ return H5I_INVALID_HID;
+
+} /* end create_dataset() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_direct_chunk_write
+ *
+ * Purpose: Test the basic functionality of H5DOwrite_chunk
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_direct_chunk_write(hid_t did)
+{
+ unsigned filter_mask = 0;
+ int chunk_data[CHUNK_NX];
+ hsize_t offset[1];
+ size_t data_size;
+ int i;
+
+ TESTING("H5DOwrite_chunk wrapper");
+
+ /* Set the size of the chunk data */
+ data_size = CHUNK_NX * sizeof(int);
+
+ /* Initialize the chunk data */
+ for (i = 0; i < CHUNK_NX; i++)
+ chunk_data[i] = (i * 10) + i;
+
+ /* Write the direct chunk data repeatedly to cover all the chunks in the
+ * dataset, using the direct writing function.
+ */
+ offset[0] = 0;
+ for (i = 0; i < NX/CHUNK_NX; i++) {
+ if (H5DOwrite_chunk(did, H5P_DEFAULT, filter_mask, offset, data_size, chunk_data) < 0)
+ TEST_ERROR
+ offset[0] += CHUNK_NX;
+ }
+
+ PASSED();
+ return 0;
+
+error:
+ H5_FAILED();
+ return 1;
+} /* test_direct_chunk_write() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_direct_chunk_read
+ *
+ * Purpose: Test the basic functionality of H5DOread_chunk
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_direct_chunk_read(hid_t did)
+{
+ hid_t mem_sid = H5I_INVALID_HID;
+ hid_t file_sid = H5I_INVALID_HID;
+ hsize_t dims[1] = {NX};
+ hsize_t chunk_dims[1] = {CHUNK_NX};
+
+ unsigned filter_mask;
+ int chunk_data[CHUNK_NX]; /* Chunk read with H5DOread_chunk */
+ int check[CHUNK_NX]; /* Chunk read with H5Dread */
+ hsize_t offset[1];
+
+ hsize_t start[1]; /* Start of hyperslab */
+ hsize_t stride[1]; /* Stride of hyperslab */
+ hsize_t count[1]; /* Block count */
+ hsize_t block[1]; /* Block sizes */
+
+ int i,j;
+
+ TESTING("H5DOread_chunk wrapper");
+
+ /* Create dataspaces for reading */
+ if ((mem_sid = H5Screate_simple(1, chunk_dims, NULL)) < 0)
+ TEST_ERROR
+ if ((file_sid = H5Screate_simple(1, dims, NULL)) < 0)
+ TEST_ERROR
+
+ /* For each chunk in the dataset, compare the result of H5Dread and H5DOread_chunk. */
+ for (i = 0; i < NX/CHUNK_NX; i++) {
+
+ /* Select hyperslab for one chunk in the file */
+ start[0] = (hsize_t)i * CHUNK_NX;
+ stride[0] = 1;
+ count[0] = 1;
+ block[0] = CHUNK_NX;
+
+ /* Hyperslab selection equals single chunk */
+ if (H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Read the chunk back */
+ if (H5Dread(did, H5T_NATIVE_INT, mem_sid, file_sid, H5P_DEFAULT, check) < 0)
+ TEST_ERROR
+
+ /* Read the raw chunk back */
+ HDmemset(chunk_data, 0, CHUNK_NX * sizeof(int));
+ filter_mask = UINT_MAX;
+ offset[0] = (hsize_t)i * CHUNK_NX;
+ if (H5DOread_chunk(did, H5P_DEFAULT, offset, &filter_mask, chunk_data) < 0)
+ TEST_ERROR
+
+ /* Check filter mask return value */
+ if (filter_mask != 0)
+ TEST_ERROR
+
+ /* Check that the values are correct */
+ for (j = 0; j < CHUNK_NX; j++)
+ if (chunk_data[i] != check[i])
+ TEST_ERROR
+ }
+
+ /* Close */
+ if (H5Sclose(mem_sid) < 0)
+ TEST_ERROR
+ if (H5Sclose(file_sid) < 0)
+ TEST_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Sclose(mem_sid);
+ H5Sclose(file_sid);
+ } H5E_END_TRY;
+
+ H5_FAILED();
+ return 1;
+} /* test_direct_chunk_read() */
+
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Test direct chunk write function H5DOwrite_chunk and
+ * chunk direct read function H5DOread_chunk
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ *-------------------------------------------------------------------------
+ */
+int main( void )
+{
+#ifdef H5_NO_DEPRECATED_SYMBOLS
+
+ HDputs("Direct chunk read/write wrapper tests SKIPPED.");
+ HDputs("(Backward compatibility not configured)");
+ return EXIT_SUCCESS;
+
+#else
+
+ hid_t fid = H5I_INVALID_HID;
+ hid_t did = H5I_INVALID_HID;
+ int nerrors = 0;
+
+ if ((fid = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+ if ((did = create_dataset(fid)) < 0)
+ goto error;
+
+ nerrors += test_direct_chunk_write(did);
+ nerrors += test_direct_chunk_read(did);
+
+ if (H5Dclose(did) < 0)
+ goto error;
+ if (H5Fclose(fid) < 0)
+ goto error;
+
+ /* check for errors */
+ if (nerrors)
+ goto error;
+
+ HDputs("All direct chunk read/write wrapper tests passed.");
+ return EXIT_SUCCESS;
+
+error:
+ HDputs("*** TESTS FAILED ***");
+ return EXIT_FAILURE;
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+} /* end main() */