summaryrefslogtreecommitdiffstats
path: root/src/H5VLnative_dataset.c
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@lbl.gov>2021-06-02 20:29:46 (GMT)
committerGitHub <noreply@github.com>2021-06-02 20:29:46 (GMT)
commit4b7f34acc16c7d071a5c1d4519ba2a01354e51db (patch)
tree944c634b581e30979afd91ef05fcef4db4a062da /src/H5VLnative_dataset.c
parent88a83d56c07f7c462d1866a2a2642a9f2e37dd22 (diff)
downloadhdf5-4b7f34acc16c7d071a5c1d4519ba2a01354e51db.zip
hdf5-4b7f34acc16c7d071a5c1d4519ba2a01354e51db.tar.gz
hdf5-4b7f34acc16c7d071a5c1d4519ba2a01354e51db.tar.bz2
Combo set of async and other changes (#161)
* Update API tracing for new H5VL_request_status_t typedef * Finish converting internal event set operations to use list iterator callbacks, instead of directly accessing the list structure * Add H5VL_REQUEST_GET_ERR_STACK operation to request subclass, for retrieving a copy of the error stack for a failed asynchronous operation * Remove 'canceled' event status from Java constants * Be safer about releasing resources when inserting a newly opened/created object or file into an event set * Remove H5EStest, add H5ES_WAIT_NONE for 0 timeout, and revise parameters to H5ESwait, to make it more "aggregate". * Remove H5ES_STATUS_CANCELED from Java wrappers also * Apply patch for dynamically registering optional VOL operations * (a) Add async APIs for H5O module as listed in jira issue ID-283. (b) Remove verification of name parameter in async related routines for H55A and H5L modules because it is checked in H5VL_setup* routine. (c) Modify h5dump expected output due to the async changes. * Corrections based on PR feedback. * Further changes to make based on PR feedback. * Remove H5Dwait & H5Fwait (moved to the async connector). Added H5atclose routine. Updated 'optional op' operations. * Fix missed merge marker, and reformatted line * Update API tracing infrastructure for H5atclose callback * Clean up some warnings * Normalize against develop branch * Correct level of indirection * Add doxygen info for H5is_library_terminating and regression tests for it and H5atclose * Relocate prototype (and doxygen info) for H5Aclose * Align w/changes on develop * Move group package initialization code to H5Gint.c, and update tracing macros * Change non-static function declarations to be static * Correct GCC diagnostic macro * Ensure that H5TSpublic.h header gets installed (#129) * Finish moving API routines that invoke VOL framework to main source files. * Fix position of H5Fmount and H5Funmount * Add 'wrapper' versions of async calls, to allow language wrappers and layers on top of HDF5 to pass in their application information. * Add wrappers for dynamically registered optional operations * Fix typo * Update doxygen comment for H5atclose with additional detail. * Add H5VL\*_vararg versions of H5VL routines that use va_list parameters * Implement and test H5S_BLOCK * Switch H5Aexists\*_async and H5Lexists\*_async to use flag to return status, instead of return value. Make the corresponding changes through most of the v1 and v2 B-tree code. Clean up warnings in H5public.h and cmpd_dtransform.c. * Add H5Iregister_future routine and tests. * Correct return value for H5Lexists_async * Add H5_DLL macro to public H5ES API routines * Update supported -> flags parameter for introspect_query callback * Remove my email address. Update passthrough VOL connector ID. * Fix comment for post_open_api_common * Remove unused non-blocking VOL connector * Minor cleanup in async branch in preparation for merge to develop * Update CMake and the Autotools to use the new pass-through VOL ID * Finish another iteration on public H5ES routines, along with running the code reformatter * Another round of reformatting * Fix for SWMR daily test failures (#160) The H5I_register_using_existing_id() call did not initialize the future ID callbacks, causing the library to segfault when it tried to resolve those function pointers. * Added selective async APIs (#150) * Added selective async APIs Description: Added the following APIs: H5Ropen_attr_async H5Ropen_object_async H5Ropen_region_async H5Mcreate_async H5Mopen_async H5Mput_async H5Mget_async H5Mclose_async H5Tcommit_async H5Topen_async H5Tcopy_async H5Tclose_async - Updated an expected output file to include a new internal function in the error stack for the failure case. * Updated async APIs per reviews, including removing async version of H5Tcopy. * Removed statements that were added by mistake in the previous commit. * Fix compile issues in H5M and warnings elsewhere * Reformat code * Brings VOL_LIST changes from develop. (#163) * Remove H5Dwait and H5Fwait calls, which were incorrectly brought back in * Tiny cleanup of H5Lcreate_hard_async * Run source formatter * Allow for canceled operation in wait_cb * Attempt to fix switch on string value * Re-run source formatter * Add H5S_BLOCK testfile to CMake clean target * Add H5Pset_vol_async API routine and 'get_cap_flags' VOL introspection callback * Clean up warnings * Add H5P(set\|get)_vol_implicit_async API routines to allow \/ disallow implicit asynchronous operations (default is disallowed) * Run formatting script * Remove H5VL_REQUEST_WAIT\* * Warning cleanup * Eliminate strdup()s on statically allocated strings * Warning cleanup * Split H5VLrestore_lib_state into H5VLstart_lib_state and H5VLrestore_lib_state, and rename H5VLreset_lib_state to H5VLfinish_lib_state. * Duplicate strings when building err_info to return to applicatin * Move connector author routines into seperate header files, all included in the new hdf5dev.h header * Run bin/trace to add TRACE macros * Allow H5ES_NONE as a valid, but no-op, parameter to all H5ES API routines that accept an event set ID * Clean up formatting * Remove H5Pset/get_vol_implicit_async * Clean up warning * Remove H5Pget_vol_async and replace with more generic H5Pget_vol_cap_flags * Clean up warnings * Add H5ESfree_err_info convenience routine * Fix typo * Correct matching for cached VOL plugins * Add developer header file * Update for C99 compatibility * Add missing trace macro * Stop clang-format from messing with the trace macros. Don't set up VOL wrappers for 'infrastructure' objects like requests and blobs * Fix warning about formatting a directory * Clean up formatting for H5E_BEGIN_TRY / H5E_END_TRY * Reduce scope of H5ES__close * Enable CMake checks for various types on MacOS * Clean up properly when H5CX_retrieve_state() fails. Also clean up many compiler warnings. * Committing clang-format changes * Merge from develop * Fix mis-placed assert * Remove commented-out code * Re-add macro for unsetenv on Windows (I think it accidentally was merged out) * Strengthen sanity check from error report to assertion * Committing clang-format changes * Add units to the comments for a few fields * Switch 'get execution time' operation for async request tokens to be an optional operation and query if connector supports operation before retrieving it. * Committing clang-format changes * Remove H5ESget_time_estimate * Committing clang-format changes * Create developer header for datatype routines and move type conversion register/unregister routines there. * Simplify internal H5VL_setup_name_args and H5VL_setup_idx_args routines * Add H5VLlink_optional_op, allowing dynamicly registered optional operations for the link VOL subclass, also added H5VL_loc_params argument to the link 'optional' callback to allow them to work correctly. * Run bin/format_source on current code * Add H5VLobject_optional_op, allowing dynamicly registered optional operations for the object VOL subclass, also added H5VL_loc_params argument to the object 'optional' callback to allow them to work correctly. * Run bin/format_source on current code * Committing clang-format changes * Revert "Switch 'get execution time' operation for async request tokens to be an optional operation and query if connector supports operation before retrieving it." This reverts commit 5ac92014da2682bdba62d7a2524b8d90e38f6b19. * Committing clang-format changes * Convert attribute 'get' operation to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Update tracing macros * Convert attribute 'specific' operation to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert dataset 'get' and 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also, minor tweaks to attribute 'get' and 'specific' operation parameters. * Convert datatype 'get' and 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also, minor tweaks to H5O_refresh_metadata arguments. * Reduce warnings * Reduce warnings * Track change to datatype 'get' callback * Fix bug with file pointer getting invalidated when object closed * Reformat source * Convert file and group VOL classes 'get' and 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also small cleanup to the attribute get name operation. Also moved 'mount' and 'unmount' operations to be group specific operations, instead of file specific, to better align with their behavior (mounted files are on groups, so a group is what is operated on). * Remove remainder of merge conflict marking * Convert link VOL class 'create' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Remove some unused local variables * Convert link VOL class 'get' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also refactor 'get name by idx' routines to return actual length of name with a parameter instead of the return value, and move some callback context structs for the link interface from the private header file into the source code module, to reduce their visibility scope. * Update tracing macros * Convert link VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert object VOL class 'get' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert object VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also refactor H5G_loc_exists, et al, to return 'exists' flag in a parameter and errors with the function return value, instead of overloading both into the return value. And, corrected logic error in test/links.c around non-existant objects in a file. * Convert request VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert blob VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also removes the H5VL_BLOB_GETSIZE operation, as it's unused in the library and the blob ID size for a container is now returned with H5VL_FILE_GET_CONT_INFO. * Add 'const' to several parameters that are only queried. * Convert all VOL classes' 'optional' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Convert several 'get' routines to return the length of an array in a parameter instead of combining it into the return value. Move several routines to be in less public namespace. Correct direct_chunk test to verify that parameters aren't modified on error. * Switch get/specific/optional VOL callback argument structures to be 'async-friendly'. Also other minor cleanups and bug-fixes. * Add H5Pset_dataset_io_hyperslab_selection / H5S_PLIST feature, to allow skipping H5Dget_space + H5Sselect_hyperslab for async operation * Add dynamic optional operations for request objects * Update dynamic operation test for optional request operations * Update a comment for an operation argument * Run trace and format_source scripts * Committing clang-format changes * Committing clang-format changes Co-authored-by: vchoi <vchoi@jelly.ad.hdfgroup.org> Co-authored-by: vchoi-hdfgroup <55293060+vchoi-hdfgroup@users.noreply.github.com> Co-authored-by: jhendersonHDF <jhenderson@hdfgroup.org> Co-authored-by: Dana Robinson <derobins@hdfgroup.org> Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com>
Diffstat (limited to 'src/H5VLnative_dataset.c')
-rw-r--r--src/H5VLnative_dataset.c425
1 files changed, 269 insertions, 156 deletions
diff --git a/src/H5VLnative_dataset.c b/src/H5VLnative_dataset.c
index 21491e7..1375344 100644
--- a/src/H5VLnative_dataset.c
+++ b/src/H5VLnative_dataset.c
@@ -15,8 +15,15 @@
*
*/
+/****************/
+/* Module Setup */
+/****************/
+
#define H5D_FRIEND /* Suppress error about including H5Dpkg */
+/***********/
+/* Headers */
+/***********/
#include "H5private.h" /* Generic Functions */
#include "H5CXprivate.h" /* API Contexts */
#include "H5Dpkg.h" /* Datasets */
@@ -30,6 +37,128 @@
#include "H5VLnative_private.h" /* Native VOL connector */
+/****************/
+/* Local Macros */
+/****************/
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/* Helper routines for read/write API calls */
+static herr_t H5VL__native_dataset_io_setup(H5D_t *dset, hid_t dxpl_id, hid_t file_space_id,
+ hid_t mem_space_id, H5S_t **file_space, H5S_t **mem_space);
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+/*-------------------------------------------------------------------------
+ * Function: H5VL__native_dataset_io_setup
+ *
+ * Purpose: Set up file and memory dataspaces for dataset I/O operation
+ *
+ * Return: SUCCEED/FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5VL__native_dataset_io_setup(H5D_t *dset, hid_t dxpl_id, hid_t file_space_id, hid_t mem_space_id,
+ H5S_t **file_space, H5S_t **mem_space)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(dset);
+ HDassert(file_space && NULL == *file_space);
+ HDassert(mem_space && NULL == *mem_space);
+
+ /* Set up file dataspace */
+ if (H5S_ALL == file_space_id)
+ /* Use dataspace for dataset */
+ *file_space = dset->shared->space;
+ else if (H5S_BLOCK == file_space_id)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "H5S_BLOCK is not allowed for file dataspace")
+ else if (H5S_PLIST == file_space_id) {
+ H5P_genplist_t *plist; /* Property list pointer */
+ H5S_t * space; /* Dataspace to hold selection */
+
+ /* Get the plist structure */
+ if (NULL == (plist = H5P_object_verify(dxpl_id, H5P_DATASET_XFER)))
+ HGOTO_ERROR(H5E_DATASET, H5E_BADID, FAIL, "bad dataset transfer property list")
+
+ /* See if a dataset I/O selection is already set, and free it if it is */
+ if (H5P_peek(plist, H5D_XFER_DSET_IO_SEL_NAME, &space) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error getting dataset I/O selection")
+
+ /* Use dataspace for dataset */
+ *file_space = dset->shared->space;
+
+ /* Copy, but share, selection from property list to dataset's dataspace */
+ if (H5S_SELECT_COPY(*file_space, space, TRUE) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't copy dataset I/O selection")
+ } /* end else-if */
+ else {
+ /* Get the dataspace pointer */
+ if (NULL == (*file_space = (H5S_t *)H5I_object_verify(file_space_id, H5I_DATASPACE)))
+ HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "file_space_id is not a dataspace ID")
+ } /* end else */
+
+ /* Get dataspace for memory buffer */
+ if (H5S_ALL == mem_space_id)
+ *mem_space = *file_space;
+ else if (H5S_BLOCK == mem_space_id) {
+ hsize_t nelmts; /* # of selected elements in file */
+
+ /* Get the # of elements selected */
+ nelmts = H5S_GET_SELECT_NPOINTS(*file_space);
+
+ /* Check for any elements */
+ if (nelmts > 0) {
+ /* Create a 1-D dataspace of the same # of elements */
+ if (NULL == (*mem_space = H5S_create_simple(1, &nelmts, NULL)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "unable to create simple memory dataspace")
+ } /* end if */
+ else {
+ /* Create a NULL dataspace of the same # of elements */
+ if (NULL == (*mem_space = H5S_create(H5S_NULL)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "unable to create NULL memory dataspace")
+ } /* end else */
+ } /* end if */
+ else if (H5S_PLIST == mem_space_id)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "H5S_PLIST is not allowed for memory dataspace")
+ else {
+ /* Get the dataspace pointer */
+ if (NULL == (*mem_space = (H5S_t *)H5I_object_verify(mem_space_id, H5I_DATASPACE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "mem_space_id is not a dataspace ID")
+ } /* end else */
+
+ /* Check for valid selections */
+ if (H5S_SELECT_VALID(*file_space) != TRUE)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL,
+ "selection + offset not within extent for file dataspace")
+ if (H5S_SELECT_VALID(*mem_space) != TRUE)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL,
+ "selection + offset not within extent for memory dataspace")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5VL__native_dataset_io_setup() */
+
/*-------------------------------------------------------------------------
* Function: H5VL__native_dataset_create
*
@@ -141,10 +270,10 @@ herr_t
H5VL__native_dataset_read(void *obj, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id,
hid_t dxpl_id, void *buf, void H5_ATTR_UNUSED **req)
{
- H5D_t * dset = (H5D_t *)obj;
- const H5S_t *mem_space = NULL;
- const H5S_t *file_space = NULL;
- herr_t ret_value = SUCCEED; /* Return value */
+ H5D_t *dset = (H5D_t *)obj;
+ H5S_t *mem_space = NULL;
+ H5S_t *file_space = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -152,11 +281,10 @@ H5VL__native_dataset_read(void *obj, hid_t mem_type_id, hid_t mem_space_id, hid_
if (NULL == dset->oloc.file)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dataset is not associated with a file")
- /* Get validated dataspace pointers */
- if (H5S_get_validated_dataspace(mem_space_id, &mem_space) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "could not get a validated dataspace from mem_space_id")
- if (H5S_get_validated_dataspace(file_space_id, &file_space) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "could not get a validated dataspace from file_space_id")
+ /* Get file & memory dataspaces */
+ if (H5VL__native_dataset_io_setup(dset, dxpl_id, file_space_id, mem_space_id, &file_space, &mem_space) <
+ 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up file and memory dataspaces")
/* Set DXPL for operation */
H5CX_set_dxpl(dxpl_id);
@@ -166,6 +294,17 @@ H5VL__native_dataset_read(void *obj, hid_t mem_type_id, hid_t mem_space_id, hid_
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data")
done:
+ /* Clean up */
+ if (H5S_BLOCK == mem_space_id && mem_space) {
+ if (H5S_close(mem_space) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL,
+ "unable to release temporary memory dataspace for H5S_BLOCK")
+ } /* end if */
+ else if (H5S_PLIST == file_space_id && file_space)
+ if (H5S_select_all(file_space, TRUE) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL,
+ "unable to release file dataspace selection for H5S_PLIST")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5VL__native_dataset_read() */
@@ -182,10 +321,10 @@ herr_t
H5VL__native_dataset_write(void *obj, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id,
hid_t dxpl_id, const void *buf, void H5_ATTR_UNUSED **req)
{
- H5D_t * dset = (H5D_t *)obj;
- const H5S_t *mem_space = NULL;
- const H5S_t *file_space = NULL;
- herr_t ret_value = SUCCEED; /* Return value */
+ H5D_t *dset = (H5D_t *)obj;
+ H5S_t *mem_space = NULL;
+ H5S_t *file_space = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -193,11 +332,10 @@ H5VL__native_dataset_write(void *obj, hid_t mem_type_id, hid_t mem_space_id, hid
if (NULL == dset->oloc.file)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dataset is not associated with a file")
- /* Get validated dataspace pointers */
- if (H5S_get_validated_dataspace(mem_space_id, &mem_space) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "could not get a validated dataspace from mem_space_id")
- if (H5S_get_validated_dataspace(file_space_id, &file_space) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "could not get a validated dataspace from file_space_id")
+ /* Get file & memory dataspaces */
+ if (H5VL__native_dataset_io_setup(dset, dxpl_id, file_space_id, mem_space_id, &file_space, &mem_space) <
+ 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up file and memory dataspaces")
/* Set DXPL for operation */
H5CX_set_dxpl(dxpl_id);
@@ -207,6 +345,17 @@ H5VL__native_dataset_write(void *obj, hid_t mem_type_id, hid_t mem_space_id, hid
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data")
done:
+ /* Clean up */
+ if (H5S_BLOCK == mem_space_id && mem_space) {
+ if (H5S_close(mem_space) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL,
+ "unable to release temporary memory dataspace for H5S_BLOCK")
+ } /* end if */
+ else if (H5S_PLIST == file_space_id && file_space)
+ if (H5S_select_all(file_space, TRUE) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL,
+ "unable to release file dataspace selection for H5S_PLIST")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5VL__native_dataset_write() */
@@ -220,31 +369,26 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5VL__native_dataset_get(void *obj, H5VL_dataset_get_t get_type, hid_t H5_ATTR_UNUSED dxpl_id,
- void H5_ATTR_UNUSED **req, va_list arguments)
+H5VL__native_dataset_get(void *obj, H5VL_dataset_get_args_t *args, hid_t H5_ATTR_UNUSED dxpl_id,
+ void H5_ATTR_UNUSED **req)
{
H5D_t *dset = (H5D_t *)obj;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
- switch (get_type) {
+ switch (args->op_type) {
/* H5Dget_space */
case H5VL_DATASET_GET_SPACE: {
- hid_t *ret_id = HDva_arg(arguments, hid_t *);
-
- if ((*ret_id = H5D__get_space(dset)) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_CANTGET, FAIL, "can't get space ID of dataset")
+ if ((args->args.get_space.space_id = H5D__get_space(dset)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get space ID of dataset")
break;
}
/* H5Dget_space_status */
case H5VL_DATASET_GET_SPACE_STATUS: {
- H5D_space_status_t *allocation = HDva_arg(arguments, H5D_space_status_t *);
-
- /* Read data space address and return */
- if (H5D__get_space_status(dset, allocation) < 0)
+ if (H5D__get_space_status(dset, args->args.get_space_status.status) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get space status")
break;
@@ -252,40 +396,31 @@ H5VL__native_dataset_get(void *obj, H5VL_dataset_get_t get_type, hid_t H5_ATTR_U
/* H5Dget_type */
case H5VL_DATASET_GET_TYPE: {
- hid_t *ret_id = HDva_arg(arguments, hid_t *);
-
- if ((*ret_id = H5D__get_type(dset)) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_CANTGET, FAIL, "can't get datatype ID of dataset")
+ if ((args->args.get_type.type_id = H5D__get_type(dset)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get datatype ID of dataset")
break;
}
/* H5Dget_create_plist */
case H5VL_DATASET_GET_DCPL: {
- hid_t *ret_id = HDva_arg(arguments, hid_t *);
-
- if ((*ret_id = H5D_get_create_plist(dset)) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_CANTGET, FAIL, "can't get creation property list for dataset")
+ if ((args->args.get_dcpl.dcpl_id = H5D_get_create_plist(dset)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get creation property list for dataset")
break;
}
/* H5Dget_access_plist */
case H5VL_DATASET_GET_DAPL: {
- hid_t *ret_id = HDva_arg(arguments, hid_t *);
-
- if ((*ret_id = H5D_get_access_plist(dset)) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_CANTGET, FAIL, "can't get access property list for dataset")
+ if ((args->args.get_dapl.dapl_id = H5D_get_access_plist(dset)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get access property list for dataset")
break;
}
/* H5Dget_storage_size */
case H5VL_DATASET_GET_STORAGE_SIZE: {
- hsize_t *ret = HDva_arg(arguments, hsize_t *);
-
- /* Set return value */
- if (H5D__get_storage_size(dset, ret) < 0)
+ if (H5D__get_storage_size(dset, args->args.get_storage_size.storage_size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get size of dataset's storage")
break;
}
@@ -308,69 +443,38 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5VL__native_dataset_specific(void *obj, H5VL_dataset_specific_t specific_type, hid_t H5_ATTR_UNUSED dxpl_id,
- void H5_ATTR_UNUSED **req, va_list arguments)
+H5VL__native_dataset_specific(void *obj, H5VL_dataset_specific_args_t *args, hid_t H5_ATTR_UNUSED dxpl_id,
+ void H5_ATTR_UNUSED **req)
{
H5D_t *dset = (H5D_t *)obj;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
- switch (specific_type) {
- /* H5Dspecific_space */
- case H5VL_DATASET_SET_EXTENT: { /* H5Dset_extent (H5Dextend - deprecated) */
- const hsize_t *size = HDva_arg(arguments, const hsize_t *);
-
- if (H5D__set_extent(dset, size) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set extent of dataset")
+ switch (args->op_type) {
+ /* H5Dset_extent (H5Dextend - deprecated) */
+ case H5VL_DATASET_SET_EXTENT: {
+ if (H5D__set_extent(dset, args->args.set_extent.size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set extent of dataset")
break;
}
- case H5VL_DATASET_FLUSH: { /* H5Dflush */
- hid_t dset_id = HDva_arg(arguments, hid_t);
-
- /* Flush the dataset */
- if (H5D__flush(dset, dset_id) < 0)
+ /* H5Dflush */
+ case H5VL_DATASET_FLUSH: {
+ if (H5D__flush(dset, args->args.flush.dset_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to flush dataset")
break;
}
- case H5VL_DATASET_REFRESH: { /* H5Drefresh */
- hid_t dset_id = HDva_arg(arguments, hid_t);
-
- /* Refresh the dataset */
- if ((H5D__refresh(dset_id, dset)) < 0)
+ /* H5Drefresh */
+ case H5VL_DATASET_REFRESH: {
+ if (H5D__refresh(dset, args->args.refresh.dset_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTLOAD, FAIL, "unable to refresh dataset")
break;
}
- case H5VL_DATASET_WAIT: { /* H5Dwait */
- /* The native VOL connector doesn't support asynchronous
- * operations, so this is a no-op.
- */
- break;
- }
-
- case H5VL_DATASET_CHUNK_ITER: { /* H5Dchunk_iter */
- H5D_chunk_iter_op_t cb = HDva_arg(arguments, H5D_chunk_iter_op_t);
- void * op_data = HDva_arg(arguments, void *);
-
- HDassert(dset->shared);
-
- /* Make sure the dataset is chunked */
- if (H5D_CHUNKED != dset->shared->layout.type) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
- }
-
- /* Call private function */
- if (H5D__chunk_iter(dset, cb, op_data) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't iterate over chunks")
-
- break;
- }
-
default:
HGOTO_ERROR(H5E_VOL, H5E_UNSUPPORTED, FAIL, "invalid specific operation")
} /* end switch */
@@ -389,11 +493,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5VL__native_dataset_optional(void *obj, H5VL_dataset_optional_t optional_type, hid_t dxpl_id,
- void H5_ATTR_UNUSED **req, va_list arguments)
+H5VL__native_dataset_optional(void *obj, H5VL_optional_args_t *args, hid_t dxpl_id, void H5_ATTR_UNUSED **req)
{
- H5D_t *dset = (H5D_t *)obj; /* Dataset */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5D_t * dset = (H5D_t *)obj; /* Dataset */
+ H5VL_native_dataset_optional_args_t *opt_args = args->args; /* Pointer to native operation's arguments */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -403,13 +507,14 @@ H5VL__native_dataset_optional(void *obj, H5VL_dataset_optional_t optional_type,
/* Set DXPL for operation */
H5CX_set_dxpl(dxpl_id);
- switch (optional_type) {
- case H5VL_NATIVE_DATASET_FORMAT_CONVERT: { /* H5Dformat_convert */
+ switch (args->op_type) {
+ /* H5Dformat_convert */
+ case H5VL_NATIVE_DATASET_FORMAT_CONVERT: {
switch (dset->shared->layout.type) {
case H5D_CHUNKED:
/* Convert the chunk indexing type to version 1 B-tree if not */
if (dset->shared->layout.u.chunk.idx_type != H5D_CHUNK_IDX_BTREE)
- if ((H5D__format_convert(dset)) < 0)
+ if (H5D__format_convert(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTLOAD, FAIL,
"unable to downgrade chunk indexing type for dataset")
break;
@@ -418,7 +523,7 @@ H5VL__native_dataset_optional(void *obj, H5VL_dataset_optional_t optional_type,
case H5D_COMPACT:
/* Downgrade the layout version to 3 if greater than 3 */
if (dset->shared->layout.version > H5O_LAYOUT_VERSION_DEFAULT)
- if ((H5D__format_convert(dset)) < 0)
+ if (H5D__format_convert(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTLOAD, FAIL,
"unable to downgrade layout version for dataset")
break;
@@ -438,47 +543,46 @@ H5VL__native_dataset_optional(void *obj, H5VL_dataset_optional_t optional_type,
break;
}
- case H5VL_NATIVE_DATASET_GET_CHUNK_INDEX_TYPE: { /* H5Dget_chunk_index_type */
- H5D_chunk_index_t *idx_type = HDva_arg(arguments, H5D_chunk_index_t *);
-
+ /* H5Dget_chunk_index_type */
+ case H5VL_NATIVE_DATASET_GET_CHUNK_INDEX_TYPE: {
/* Make sure the dataset is chunked */
if (H5D_CHUNKED != dset->shared->layout.type)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
/* Get the chunk indexing type */
- *idx_type = dset->shared->layout.u.chunk.idx_type;
+ *opt_args->get_chunk_idx_type.idx_type = dset->shared->layout.u.chunk.idx_type;
break;
}
- case H5VL_NATIVE_DATASET_GET_CHUNK_STORAGE_SIZE: { /* H5Dget_chunk_storage_size */
- hsize_t *offset = HDva_arg(arguments, hsize_t *);
- hsize_t *chunk_nbytes = HDva_arg(arguments, hsize_t *);
+ /* H5Dget_chunk_storage_size */
+ case H5VL_NATIVE_DATASET_GET_CHUNK_STORAGE_SIZE: {
+ H5VL_native_dataset_get_chunk_storage_size_t *gcss_args = &opt_args->get_chunk_storage_size;
/* Make sure the dataset is chunked */
if (H5D_CHUNKED != dset->shared->layout.type)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
/* Call private function */
- if (H5D__get_chunk_storage_size(dset, offset, chunk_nbytes) < 0)
+ if (H5D__get_chunk_storage_size(dset, gcss_args->offset, gcss_args->size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get storage size of chunk")
break;
}
- case H5VL_NATIVE_DATASET_GET_NUM_CHUNKS: { /* H5Dget_num_chunks */
- const H5S_t *space = NULL;
- hid_t space_id = HDva_arg(arguments, hid_t);
- hsize_t * nchunks = HDva_arg(arguments, hsize_t *);
+ /* H5Dget_num_chunks */
+ case H5VL_NATIVE_DATASET_GET_NUM_CHUNKS: {
+ H5VL_native_dataset_get_num_chunks_t *gnc_args = &opt_args->get_num_chunks;
+ const H5S_t * space = NULL;
HDassert(dset->shared);
HDassert(dset->shared->space);
/* When default dataspace is given, use the dataset's dataspace */
- if (space_id == H5S_ALL)
+ if (gnc_args->space_id == H5S_ALL)
space = dset->shared->space;
else /* otherwise, use the given space ID */
- if (NULL == (space = (const H5S_t *)H5I_object_verify(space_id, H5I_DATASPACE)))
+ if (NULL == (space = (const H5S_t *)H5I_object_verify(gnc_args->space_id, H5I_DATASPACE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a valid dataspace ID")
/* Make sure the dataset is chunked */
@@ -486,29 +590,25 @@ H5VL__native_dataset_optional(void *obj, H5VL_dataset_optional_t optional_type,
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
/* Call private function */
- if (H5D__get_num_chunks(dset, space, nchunks) < 0)
+ if (H5D__get_num_chunks(dset, space, gnc_args->nchunks) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get number of chunks")
break;
}
- case H5VL_NATIVE_DATASET_GET_CHUNK_INFO_BY_IDX: { /* H5Dget_chunk_info */
- const H5S_t *space = NULL;
- hid_t space_id = HDva_arg(arguments, hid_t);
- hsize_t chk_index = HDva_arg(arguments, hsize_t);
- hsize_t * offset = HDva_arg(arguments, hsize_t *);
- unsigned * filter_mask = HDva_arg(arguments, unsigned *);
- haddr_t * addr = HDva_arg(arguments, haddr_t *);
- hsize_t * size = HDva_arg(arguments, hsize_t *);
+ /* H5Dget_chunk_info */
+ case H5VL_NATIVE_DATASET_GET_CHUNK_INFO_BY_IDX: {
+ H5VL_native_dataset_get_chunk_info_by_idx_t *gcibi_args = &opt_args->get_chunk_info_by_idx;
+ const H5S_t * space;
HDassert(dset->shared);
HDassert(dset->shared->space);
/* When default dataspace is given, use the dataset's dataspace */
- if (space_id == H5S_ALL)
+ if (gcibi_args->space_id == H5S_ALL)
space = dset->shared->space;
else /* otherwise, use the given space ID */
- if (NULL == (space = (const H5S_t *)H5I_object_verify(space_id, H5I_DATASPACE)))
+ if (NULL == (space = (const H5S_t *)H5I_object_verify(gcibi_args->space_id, H5I_DATASPACE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a valid dataspace ID")
/* Make sure the dataset is chunked */
@@ -516,16 +616,16 @@ H5VL__native_dataset_optional(void *obj, H5VL_dataset_optional_t optional_type,
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
/* Call private function */
- if (H5D__get_chunk_info(dset, space, chk_index, offset, filter_mask, addr, size) < 0)
+ if (H5D__get_chunk_info(dset, space, gcibi_args->chk_index, gcibi_args->offset,
+ gcibi_args->filter_mask, gcibi_args->addr, gcibi_args->size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info by index")
+
break;
}
- case H5VL_NATIVE_DATASET_GET_CHUNK_INFO_BY_COORD: { /* H5Dget_chunk_info_by_coord */
- hsize_t * offset = HDva_arg(arguments, hsize_t *);
- unsigned *filter_mask = HDva_arg(arguments, unsigned *);
- haddr_t * addr = HDva_arg(arguments, haddr_t *);
- hsize_t * size = HDva_arg(arguments, hsize_t *);
+ /* H5Dget_chunk_info_by_coord */
+ case H5VL_NATIVE_DATASET_GET_CHUNK_INFO_BY_COORD: {
+ H5VL_native_dataset_get_chunk_info_by_coord_t *gcibc_args = &opt_args->get_chunk_info_by_coord;
HDassert(dset->shared);
@@ -534,17 +634,17 @@ H5VL__native_dataset_optional(void *obj, H5VL_dataset_optional_t optional_type,
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
/* Call private function */
- if (H5D__get_chunk_info_by_coord(dset, offset, filter_mask, addr, size) < 0)
+ if (H5D__get_chunk_info_by_coord(dset, gcibc_args->offset, gcibc_args->filter_mask,
+ gcibc_args->addr, gcibc_args->size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info by its logical coordinates")
break;
}
- case H5VL_NATIVE_DATASET_CHUNK_READ: { /* H5Dread_chunk */
- const hsize_t *offset = HDva_arg(arguments, hsize_t *);
- uint32_t * filters = HDva_arg(arguments, uint32_t *);
- void * buf = HDva_arg(arguments, void *);
- hsize_t offset_copy[H5O_LAYOUT_NDIMS]; /* Internal copy of chunk offset */
+ /* H5Dread_chunk */
+ case H5VL_NATIVE_DATASET_CHUNK_READ: {
+ H5VL_native_dataset_chunk_read_t *chunk_read_args = &opt_args->chunk_read;
+ hsize_t offset_copy[H5O_LAYOUT_NDIMS]; /* Internal copy of chunk offset */
/* Check arguments */
if (NULL == dset->oloc.file)
@@ -555,22 +655,21 @@ H5VL__native_dataset_optional(void *obj, H5VL_dataset_optional_t optional_type,
/* Copy the user's offset array so we can be sure it's terminated properly.
* (we don't want to mess with the user's buffer).
*/
- if (H5D__get_offset_copy(dset, offset, offset_copy) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "failure to copy offset array")
+ if (H5D__chunk_get_offset_copy(dset, chunk_read_args->offset, offset_copy) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "failure to copy offset array")
/* Read the raw chunk */
- if (H5D__chunk_direct_read(dset, offset_copy, filters, buf) < 0)
+ if (H5D__chunk_direct_read(dset, offset_copy, &chunk_read_args->filters, chunk_read_args->buf) <
+ 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read unprocessed chunk data")
break;
}
- case H5VL_NATIVE_DATASET_CHUNK_WRITE: { /* H5Dwrite_chunk */
- uint32_t filters = HDva_arg(arguments, uint32_t);
- const hsize_t *offset = HDva_arg(arguments, const hsize_t *);
- uint32_t data_size_32 = HDva_arg(arguments, uint32_t);
- const void * buf = HDva_arg(arguments, const void *);
- hsize_t offset_copy[H5O_LAYOUT_NDIMS]; /* Internal copy of chunk offset */
+ /* H5Dwrite_chunk */
+ case H5VL_NATIVE_DATASET_CHUNK_WRITE: {
+ H5VL_native_dataset_chunk_write_t *chunk_write_args = &opt_args->chunk_write;
+ hsize_t offset_copy[H5O_LAYOUT_NDIMS]; /* Internal copy of chunk offset */
/* Check arguments */
if (NULL == dset->oloc.file)
@@ -581,34 +680,48 @@ H5VL__native_dataset_optional(void *obj, H5VL_dataset_optional_t optional_type,
/* Copy the user's offset array so we can be sure it's terminated properly.
* (we don't want to mess with the user's buffer).
*/
- if (H5D__get_offset_copy(dset, offset, offset_copy) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "failure to copy offset array")
+ if (H5D__chunk_get_offset_copy(dset, chunk_write_args->offset, offset_copy) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "failure to copy offset array")
/* Write chunk */
- if (H5D__chunk_direct_write(dset, filters, offset_copy, data_size_32, buf) < 0)
+ if (H5D__chunk_direct_write(dset, chunk_write_args->filters, offset_copy, chunk_write_args->size,
+ chunk_write_args->buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write unprocessed chunk data")
break;
}
- case H5VL_NATIVE_DATASET_GET_VLEN_BUF_SIZE: { /* H5Dvlen_get_buf_size */
- hid_t type_id = HDva_arg(arguments, hid_t);
- hid_t space_id = HDva_arg(arguments, hid_t);
- hsize_t *size = HDva_arg(arguments, hsize_t *);
+ /* H5Dvlen_get_buf_size */
+ case H5VL_NATIVE_DATASET_GET_VLEN_BUF_SIZE: {
+ H5VL_native_dataset_get_vlen_buf_size_t *gvbs_args = &opt_args->get_vlen_buf_size;
- if (H5D__vlen_get_buf_size(dset, type_id, space_id, size) < 0)
+ if (H5D__vlen_get_buf_size(dset, gvbs_args->type_id, gvbs_args->space_id, gvbs_args->size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get size of vlen buf needed")
break;
}
/* H5Dget_offset */
case H5VL_NATIVE_DATASET_GET_OFFSET: {
- haddr_t *ret = HDva_arg(arguments, haddr_t *);
+ /* Get offset */
+ *opt_args->get_offset.offset = H5D__get_offset(dset);
+
+ break;
+ }
+
+ /* H5Dchunk_iter */
+ case H5VL_NATIVE_DATASET_CHUNK_ITER: {
+ /* Sanity check */
+ HDassert(dset->shared);
+
+ /* Make sure the dataset is chunked */
+ if (H5D_CHUNKED != dset->shared->layout.type)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
+
+ /* Call private function */
+ if ((ret_value = H5D__chunk_iter(dset, opt_args->chunk_iter.op, opt_args->chunk_iter.op_data)) <
+ 0)
+ HERROR(H5E_DATASET, H5E_BADITER, "chunk iteration failed");
- /* Set return value */
- *ret = H5D__get_offset(dset);
- if (!H5F_addr_defined(*ret))
- *ret = HADDR_UNDEF;
break;
}