summaryrefslogtreecommitdiffstats
path: root/src/H5Dint.c
diff options
context:
space:
mode:
authorAllen Byrne <byrn@hdfgroup.org>2020-09-30 14:27:10 (GMT)
committerAllen Byrne <byrn@hdfgroup.org>2020-09-30 14:27:10 (GMT)
commitb2d661b508a7fc7a2592c13bc6bdc175551f075d (patch)
tree13baeb0d83a7c2a4c6299993c182b1227c2f6114 /src/H5Dint.c
parent29ab58b58dce556639ea3154e262895773a8a8df (diff)
downloadhdf5-b2d661b508a7fc7a2592c13bc6bdc175551f075d.zip
hdf5-b2d661b508a7fc7a2592c13bc6bdc175551f075d.tar.gz
hdf5-b2d661b508a7fc7a2592c13bc6bdc175551f075d.tar.bz2
Clang-format of source files
Diffstat (limited to 'src/H5Dint.c')
-rw-r--r--src/H5Dint.c1890
1 files changed, 944 insertions, 946 deletions
diff --git a/src/H5Dint.c b/src/H5Dint.c
index 6e247fc..7a2b93b 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -15,30 +15,27 @@
/* Module Setup */
/****************/
-#include "H5Dmodule.h" /* This source code file is part of the H5D module */
-
+#include "H5Dmodule.h" /* This source code file is part of the H5D module */
/***********/
/* Headers */
/***********/
-#include "H5private.h" /* Generic Functions */
-#include "H5CXprivate.h" /* API Contexts */
-#include "H5Dpkg.h" /* Datasets */
-#include "H5Eprivate.h" /* Error handling */
-#include "H5Fprivate.h" /* Files */
-#include "H5FLprivate.h" /* Free Lists */
-#include "H5FOprivate.h" /* File objects */
-#include "H5Iprivate.h" /* IDs */
-#include "H5Lprivate.h" /* Links */
-#include "H5MMprivate.h" /* Memory management */
-#include "H5VLprivate.h" /* Virtual Object Layer */
-
+#include "H5private.h" /* Generic Functions */
+#include "H5CXprivate.h" /* API Contexts */
+#include "H5Dpkg.h" /* Datasets */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fprivate.h" /* Files */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5FOprivate.h" /* File objects */
+#include "H5Iprivate.h" /* IDs */
+#include "H5Lprivate.h" /* Links */
+#include "H5MMprivate.h" /* Memory management */
+#include "H5VLprivate.h" /* Virtual Object Layer */
/****************/
/* Local Macros */
/****************/
-
/******************/
/* Local Typedefs */
/******************/
@@ -46,60 +43,57 @@
/* Shared data structure for computing variable-length dataset's total size */
/* (Used for both native and generic 'get vlen buf size' operation) */
typedef struct {
- void *fl_tbuf; /* Ptr to the temporary buffer we are using for fixed-length data */
- void *vl_tbuf; /* Ptr to the temporary buffer we are using for VL data */
- size_t vl_tbuf_size; /* Current size of the temp. buffer for VL data */
- hsize_t size; /* Accumulated number of bytes for the selection */
+ void * fl_tbuf; /* Ptr to the temporary buffer we are using for fixed-length data */
+ void * vl_tbuf; /* Ptr to the temporary buffer we are using for VL data */
+ size_t vl_tbuf_size; /* Current size of the temp. buffer for VL data */
+ hsize_t size; /* Accumulated number of bytes for the selection */
} H5D_vlen_bufsize_common_t;
/* Internal data structure for computing variable-length dataset's total size */
/* (Used for native 'get vlen buf size' operation) */
typedef struct {
- H5D_t *dset; /* Dataset for operation */
- H5S_t *fspace; /* Dataset's dataspace for operation */
- H5S_t *mspace; /* Memory dataspace for operation */
- H5D_vlen_bufsize_common_t common; /* VL data buffers & accumulatd size */
+ H5D_t * dset; /* Dataset for operation */
+ H5S_t * fspace; /* Dataset's dataspace for operation */
+ H5S_t * mspace; /* Memory dataspace for operation */
+ H5D_vlen_bufsize_common_t common; /* VL data buffers & accumulatd size */
} H5D_vlen_bufsize_native_t;
/* Internal data structure for computing variable-length dataset's total size */
/* (Used for generic 'get vlen buf size' operation) */
typedef struct {
- H5VL_object_t *dset_vol_obj; /* VOL object for the dataset */
- hid_t fspace_id; /* Dataset dataspace ID of the dataset we are working on */
- H5S_t *fspace; /* Dataset's dataspace for operation */
- hid_t mspace_id; /* Memory dataspace ID of the dataset we are working on */
- hid_t dxpl_id; /* Dataset transfer property list to pass to dataset read */
- H5D_vlen_bufsize_common_t common; /* VL data buffers & accumulatd size */
+ H5VL_object_t * dset_vol_obj; /* VOL object for the dataset */
+ hid_t fspace_id; /* Dataset dataspace ID of the dataset we are working on */
+ H5S_t * fspace; /* Dataset's dataspace for operation */
+ hid_t mspace_id; /* Memory dataspace ID of the dataset we are working on */
+ hid_t dxpl_id; /* Dataset transfer property list to pass to dataset read */
+ H5D_vlen_bufsize_common_t common; /* VL data buffers & accumulatd size */
} H5D_vlen_bufsize_generic_t;
-
/********************/
/* Local Prototypes */
/********************/
/* General stuff */
static H5D_shared_t *H5D__new(hid_t dcpl_id, hid_t dapl_id, hbool_t creating, hbool_t vl_type);
-static herr_t H5D__init_type(H5F_t *file, const H5D_t *dset, hid_t type_id, const H5T_t *type);
-static herr_t H5D__cache_dataspace_info(const H5D_t *dset);
-static herr_t H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space);
-static herr_t H5D__update_oh_info(H5F_t *file, H5D_t *dset, hid_t dapl_id);
+static herr_t H5D__init_type(H5F_t *file, const H5D_t *dset, hid_t type_id, const H5T_t *type);
+static herr_t H5D__cache_dataspace_info(const H5D_t *dset);
+static herr_t H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space);
+static herr_t H5D__update_oh_info(H5F_t *file, H5D_t *dset, hid_t dapl_id);
static herr_t H5D__build_file_prefix(const H5D_t *dset, H5F_prefix_open_t prefix_type, char **file_prefix);
static herr_t H5D__open_oid(H5D_t *dataset, hid_t dapl_id);
-static herr_t H5D__init_storage(const H5D_io_info_t *io_info, hbool_t full_overwrite,
- hsize_t old_dim[]);
+static herr_t H5D__init_storage(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_t old_dim[]);
static herr_t H5D__append_flush_setup(H5D_t *dset, hid_t dapl_id);
static herr_t H5D__close_cb(H5VL_object_t *dset_vol_obj);
static herr_t H5D__use_minimized_dset_headers(H5F_t *file, hbool_t *minimize);
static herr_t H5D__prepare_minimized_oh(H5F_t *file, H5D_t *dset, H5O_loc_t *oloc);
static size_t H5D__calculate_minimum_header_size(H5F_t *file, H5D_t *dset, H5O_t *ohdr);
-static void *H5D__vlen_get_buf_size_alloc(size_t size, void *info);
-static herr_t H5D__vlen_get_buf_size_cb(void *elem, hid_t type_id, unsigned ndim,
- const hsize_t *point, void *op_data);
-static herr_t H5D__vlen_get_buf_size_gen_cb(void *elem, hid_t type_id, unsigned ndim,
- const hsize_t *point, void *op_data);
+static void * H5D__vlen_get_buf_size_alloc(size_t size, void *info);
+static herr_t H5D__vlen_get_buf_size_cb(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point,
+ void *op_data);
+static herr_t H5D__vlen_get_buf_size_gen_cb(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point,
+ void *op_data);
static herr_t H5D__check_filters(H5D_t *dataset);
-
/*********************/
/* Package Variables */
/*********************/
@@ -110,12 +104,10 @@ H5FL_BLK_DEFINE(vlen_vl_buf);
/* Declare a free list to manage other blocks of VL data */
H5FL_BLK_DEFINE(vlen_fl_buf);
-
/*****************************/
/* Library Private Variables */
/*****************************/
-
/*******************/
/* Local Variables */
/*******************/
@@ -141,10 +133,10 @@ H5_GCC_DIAG_ON("larger-than=")
/* Dataset ID class */
static const H5I_class_t H5I_DATASET_CLS[1] = {{
- H5I_DATASET, /* ID class value */
- 0, /* Class flags */
- 0, /* # of reserved IDs for class */
- (H5I_free_t)H5D__close_cb /* Callback routine for closing objects of this class */
+ H5I_DATASET, /* ID class value */
+ 0, /* Class flags */
+ 0, /* # of reserved IDs for class */
+ (H5I_free_t)H5D__close_cb /* Callback routine for closing objects of this class */
}};
/* Flag indicating "top" of interface has been initialized */
@@ -155,7 +147,6 @@ static hbool_t H5D_top_package_initialize_s = FALSE;
static const char *H5D_prefix_ext_env = NULL;
static const char *H5D_prefix_vds_env = NULL;
-
/*-------------------------------------------------------------------------
* Function: H5D_init
*
@@ -169,7 +160,7 @@ static const char *H5D_prefix_vds_env = NULL;
herr_t
H5D_init(void)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
/* FUNC_ENTER() does all the work */
@@ -178,7 +169,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_init() */
-
/*--------------------------------------------------------------------------
NAME
H5D__init_package -- Initialize interface-specific information
@@ -197,13 +187,13 @@ NOTES
herr_t
H5D__init_package(void)
{
- H5P_genplist_t *def_dcpl; /* Default Dataset Creation Property list */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5P_genplist_t *def_dcpl; /* Default Dataset Creation Property list */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
/* Initialize the atom group for the dataset IDs */
- if(H5I_register_type(H5I_DATASET_CLS) < 0)
+ if (H5I_register_type(H5I_DATASET_CLS) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize interface")
/* Reset the "default dataset" information */
@@ -215,19 +205,19 @@ H5D__init_package(void)
/* Get the default dataset creation property list values and initialize the
* default dataset with them.
*/
- if(NULL == (def_dcpl = (H5P_genplist_t *)H5I_object(H5P_LST_DATASET_CREATE_ID_g)))
+ if (NULL == (def_dcpl = (H5P_genplist_t *)H5I_object(H5P_LST_DATASET_CREATE_ID_g)))
HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "can't get default dataset creation property list")
/* Get the default data storage layout */
- if(H5P_get(def_dcpl, H5D_CRT_LAYOUT_NAME, &H5D_def_dset.layout) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve layout")
+ if (H5P_get(def_dcpl, H5D_CRT_LAYOUT_NAME, &H5D_def_dset.layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve layout")
/* Get the default dataset creation properties */
- if(H5P_get(def_dcpl, H5D_CRT_EXT_FILE_LIST_NAME, &H5D_def_dset.dcpl_cache.efl) < 0)
+ if (H5P_get(def_dcpl, H5D_CRT_EXT_FILE_LIST_NAME, &H5D_def_dset.dcpl_cache.efl) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve external file list")
- if(H5P_get(def_dcpl, H5D_CRT_FILL_VALUE_NAME, &H5D_def_dset.dcpl_cache.fill) < 0)
+ if (H5P_get(def_dcpl, H5D_CRT_FILL_VALUE_NAME, &H5D_def_dset.dcpl_cache.fill) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve fill value")
- if(H5P_get(def_dcpl, H5O_CRT_PIPELINE_NAME, &H5D_def_dset.dcpl_cache.pline) < 0)
+ if (H5P_get(def_dcpl, H5O_CRT_PIPELINE_NAME, &H5D_def_dset.dcpl_cache.pline) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve pipeline filter")
/* Mark "top" of interface as initialized, too */
@@ -241,7 +231,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__init_package() */
-
/*-------------------------------------------------------------------------
* Function: H5D_top_term_package
*
@@ -255,12 +244,12 @@ done:
int
H5D_top_term_package(void)
{
- int n = 0;
+ int n = 0;
FUNC_ENTER_NOAPI_NOINIT_NOERR
- if(H5D_top_package_initialize_s) {
- if(H5I_nmembers(H5I_DATASET) > 0) {
+ if (H5D_top_package_initialize_s) {
+ if (H5I_nmembers(H5I_DATASET) > 0) {
/* The dataset API uses the "force" flag set to true because it
* is using the "file objects" (H5FO) API functions to track open
* objects in the file. Using the H5FO code means that dataset
@@ -285,17 +274,16 @@ H5D_top_term_package(void)
*/
(void)H5I_clear_type(H5I_DATASET, TRUE, FALSE);
n++; /*H5I*/
- } /* end if */
+ } /* end if */
/* Mark closed */
- if(0 == n)
+ if (0 == n)
H5D_top_package_initialize_s = FALSE;
} /* end if */
FUNC_LEAVE_NOAPI(n)
} /* end H5D_top_term_package() */
-
/*-------------------------------------------------------------------------
* Function: H5D_term_package
*
@@ -312,11 +300,11 @@ H5D_top_term_package(void)
int
H5D_term_package(void)
{
- int n = 0;
+ int n = 0;
FUNC_ENTER_NOAPI_NOINIT_NOERR
- if(H5_PKG_INIT_VAR) {
+ if (H5_PKG_INIT_VAR) {
/* Sanity checks */
HDassert(0 == H5I_nmembers(H5I_DATASET));
HDassert(FALSE == H5D_top_package_initialize_s);
@@ -325,14 +313,13 @@ H5D_term_package(void)
n += (H5I_dec_type_ref(H5I_DATASET) > 0);
/* Mark closed */
- if(0 == n)
+ if (0 == n)
H5_PKG_INIT_VAR = FALSE;
} /* end if */
FUNC_LEAVE_NOAPI(n)
} /* end H5D_term_package() */
-
/*-------------------------------------------------------------------------
* Function: H5D__close_cb
*
@@ -345,7 +332,7 @@ H5D_term_package(void)
static herr_t
H5D__close_cb(H5VL_object_t *dset_vol_obj)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -353,7 +340,7 @@ H5D__close_cb(H5VL_object_t *dset_vol_obj)
HDassert(dset_vol_obj);
/* Close the dataset */
- if(H5VL_dataset_close(dset_vol_obj, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0)
+ if (H5VL_dataset_close(dset_vol_obj, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to close dataset");
done:
@@ -363,13 +350,12 @@ done:
*/
/* Free the VOL object */
- if(H5VL_free_object(dset_vol_obj) < 0)
+ if (H5VL_free_object(dset_vol_obj) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "unable to free VOL object");
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__close_cb() */
-
/*-------------------------------------------------------------------------
* Function: H5D__create_named
*
@@ -381,12 +367,12 @@ done:
*-------------------------------------------------------------------------
*/
H5D_t *
-H5D__create_named(const H5G_loc_t *loc, const char *name, hid_t type_id,
- const H5S_t *space, hid_t lcpl_id, hid_t dcpl_id, hid_t dapl_id)
+H5D__create_named(const H5G_loc_t *loc, const char *name, hid_t type_id, const H5S_t *space, hid_t lcpl_id,
+ hid_t dcpl_id, hid_t dapl_id)
{
- H5O_obj_create_t ocrt_info; /* Information for object creation */
- H5D_obj_create_t dcrt_info; /* Information for dataset creation */
- H5D_t *ret_value = NULL; /* Return value */
+ H5O_obj_create_t ocrt_info; /* Information for object creation */
+ H5D_obj_create_t dcrt_info; /* Information for dataset creation */
+ H5D_t * ret_value = NULL; /* Return value */
FUNC_ENTER_PACKAGE
@@ -401,17 +387,17 @@ H5D__create_named(const H5G_loc_t *loc, const char *name, hid_t type_id,
/* Set up dataset creation info */
dcrt_info.type_id = type_id;
- dcrt_info.space = space;
+ dcrt_info.space = space;
dcrt_info.dcpl_id = dcpl_id;
dcrt_info.dapl_id = dapl_id;
/* Set up object creation information */
ocrt_info.obj_type = H5O_TYPE_DATASET;
ocrt_info.crt_info = &dcrt_info;
- ocrt_info.new_obj = NULL;
+ ocrt_info.new_obj = NULL;
/* Create the new dataset and link it to its parent group */
- if(H5L_link_object(loc, name, &ocrt_info, lcpl_id) < 0)
+ if (H5L_link_object(loc, name, &ocrt_info, lcpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to create and link to dataset")
HDassert(ocrt_info.new_obj);
@@ -422,7 +408,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__create_named() */
-
/*-------------------------------------------------------------------------
* Function: H5D__get_space_status
*
@@ -436,48 +421,48 @@ done:
herr_t
H5D__get_space_status(const H5D_t *dset, H5D_space_status_t *allocation)
{
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_PACKAGE
HDassert(dset);
/* Check for chunked layout */
- if(dset->shared->layout.type == H5D_CHUNKED) {
- hsize_t space_allocated; /* The number of bytes allocated for chunks */
- hssize_t snelmts; /* Temporary holder for number of elements in dataspace */
- hsize_t nelmts; /* Number of elements in dataspace */
- size_t dt_size; /* Size of datatype */
- hsize_t full_size; /* The number of bytes in the dataset when fully populated */
+ if (dset->shared->layout.type == H5D_CHUNKED) {
+ hsize_t space_allocated; /* The number of bytes allocated for chunks */
+ hssize_t snelmts; /* Temporary holder for number of elements in dataspace */
+ hsize_t nelmts; /* Number of elements in dataspace */
+ size_t dt_size; /* Size of datatype */
+ hsize_t full_size; /* The number of bytes in the dataset when fully populated */
/* For chunked layout set the space status by the storage size */
/* Get the dataset's dataspace */
HDassert(dset->shared->space);
/* Get the total number of elements in dataset's dataspace */
- if((snelmts = H5S_GET_EXTENT_NPOINTS(dset->shared->space)) < 0)
+ if ((snelmts = H5S_GET_EXTENT_NPOINTS(dset->shared->space)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve number of elements in dataspace")
nelmts = (hsize_t)snelmts;
/* Get the size of the dataset's datatype */
- if(0 == (dt_size = H5T_GET_SIZE(dset->shared->type)))
+ if (0 == (dt_size = H5T_GET_SIZE(dset->shared->type)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve size of datatype")
/* Compute the maximum size of the dataset in bytes */
full_size = nelmts * dt_size;
/* Check for overflow during multiplication */
- if(nelmts != (full_size / dt_size))
+ if (nelmts != (full_size / dt_size))
HGOTO_ERROR(H5E_DATASET, H5E_OVERFLOW, FAIL, "size of dataset's storage overflowed")
/* Difficult to error check, since the error value is 0 and 0 is a valid value... :-/ */
- if(H5D__get_storage_size(dset, &space_allocated) < 0)
+ if (H5D__get_storage_size(dset, &space_allocated) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get size of dataset's storage")
/* Decide on how much of the space is allocated */
- if(space_allocated == 0)
+ if (space_allocated == 0)
*allocation = H5D_SPACE_STATUS_NOT_ALLOCATED;
- else if(space_allocated == full_size)
+ else if (space_allocated == full_size)
*allocation = H5D_SPACE_STATUS_ALLOCATED;
else
*allocation = H5D_SPACE_STATUS_PART_ALLOCATED;
@@ -485,7 +470,7 @@ H5D__get_space_status(const H5D_t *dset, H5D_space_status_t *allocation)
else {
/* For non-chunked layouts set space status by result of is_space_alloc
* function */
- if(dset->shared->layout.ops->is_space_alloc(&dset->shared->layout.storage))
+ if (dset->shared->layout.ops->is_space_alloc(&dset->shared->layout.storage))
*allocation = H5D_SPACE_STATUS_ALLOCATED;
else
*allocation = H5D_SPACE_STATUS_NOT_ALLOCATED;
@@ -495,7 +480,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__get_space_status() */
-
/*-------------------------------------------------------------------------
* Function: H5D__new
*
@@ -508,14 +492,14 @@ done:
static H5D_shared_t *
H5D__new(hid_t dcpl_id, hid_t dapl_id, hbool_t creating, hbool_t vl_type)
{
- H5D_shared_t *new_dset = NULL; /* New dataset object */
- H5P_genplist_t *plist; /* Property list created */
- H5D_shared_t *ret_value = NULL; /* Return value */
+ H5D_shared_t * new_dset = NULL; /* New dataset object */
+ H5P_genplist_t *plist; /* Property list created */
+ H5D_shared_t * ret_value = NULL; /* Return value */
FUNC_ENTER_STATIC
/* Allocate new shared dataset structure */
- if(NULL == (new_dset = H5FL_MALLOC(H5D_shared_t)))
+ if (NULL == (new_dset = H5FL_MALLOC(H5D_shared_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Copy the default dataset information */
@@ -524,27 +508,27 @@ H5D__new(hid_t dcpl_id, hid_t dapl_id, hbool_t creating, hbool_t vl_type)
/* If we are using the default dataset creation property list, during creation
* don't bother to copy it, just increment the reference count
*/
- if(!vl_type && creating && dcpl_id == H5P_DATASET_CREATE_DEFAULT) {
- if(H5I_inc_ref(dcpl_id, FALSE) < 0)
+ if (!vl_type && creating && dcpl_id == H5P_DATASET_CREATE_DEFAULT) {
+ if (H5I_inc_ref(dcpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, NULL, "can't increment default DCPL ID")
new_dset->dcpl_id = dcpl_id;
} /* end if */
else {
/* Get the property list */
- if(NULL == (plist = (H5P_genplist_t *)H5I_object(dcpl_id)))
+ if (NULL == (plist = (H5P_genplist_t *)H5I_object(dcpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a property list")
new_dset->dcpl_id = H5P_copy_plist(plist, FALSE);
} /* end else */
- if(!vl_type && creating && dapl_id == H5P_DATASET_ACCESS_DEFAULT) {
- if(H5I_inc_ref(dapl_id, FALSE) < 0)
+ if (!vl_type && creating && dapl_id == H5P_DATASET_ACCESS_DEFAULT) {
+ if (H5I_inc_ref(dapl_id, FALSE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, NULL, "can't increment default DAPL ID")
new_dset->dapl_id = dapl_id;
} /* end if */
else {
/* Get the property list */
- if(NULL == (plist = (H5P_genplist_t *)H5I_object(dapl_id)))
+ if (NULL == (plist = (H5P_genplist_t *)H5I_object(dapl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a property list")
new_dset->dapl_id = H5P_copy_plist(plist, FALSE);
@@ -554,11 +538,11 @@ H5D__new(hid_t dcpl_id, hid_t dapl_id, hbool_t creating, hbool_t vl_type)
ret_value = new_dset;
done:
- if(ret_value == NULL)
- if(new_dset != NULL) {
- if(new_dset->dcpl_id != 0 && H5I_dec_ref(new_dset->dcpl_id) < 0)
+ if (ret_value == NULL)
+ if (new_dset != NULL) {
+ if (new_dset->dcpl_id != 0 && H5I_dec_ref(new_dset->dcpl_id) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, NULL, "can't decrement temporary datatype ID")
- if(new_dset->dapl_id != 0 && H5I_dec_ref(new_dset->dapl_id) < 0)
+ if (new_dset->dapl_id != 0 && H5I_dec_ref(new_dset->dapl_id) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, NULL, "can't decrement temporary datatype ID")
new_dset = H5FL_FREE(H5D_shared_t, new_dset);
} /* end if */
@@ -566,7 +550,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__new() */
-
/*-------------------------------------------------------------------------
* Function: H5D__init_type
*
@@ -580,10 +563,10 @@ done:
static herr_t
H5D__init_type(H5F_t *file, const H5D_t *dset, hid_t type_id, const H5T_t *type)
{
- htri_t relocatable; /* Flag whether the type is relocatable */
- htri_t immutable; /* Flag whether the type is immutable */
- hbool_t use_at_least_v18; /* Flag indicating to use at least v18 format versions */
- herr_t ret_value = SUCCEED; /* Return value */
+ htri_t relocatable; /* Flag whether the type is relocatable */
+ htri_t immutable; /* Flag whether the type is immutable */
+ hbool_t use_at_least_v18; /* Flag indicating to use at least v18 format versions */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -593,55 +576,54 @@ H5D__init_type(H5F_t *file, const H5D_t *dset, hid_t type_id, const H5T_t *type)
HDassert(type);
/* Check whether the datatype is relocatable */
- if((relocatable = H5T_is_relocatable(type)) < 0)
+ if ((relocatable = H5T_is_relocatable(type)) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't check datatype?")
/* Check whether the datatype is immutable */
- if((immutable = H5T_is_immutable(type)) < 0)
+ if ((immutable = H5T_is_immutable(type)) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't check datatype?")
/* To use at least v18 format versions or not */
use_at_least_v18 = (H5F_LOW_BOUND(file) >= H5F_LIBVER_V18);
/* Copy the datatype if it's a custom datatype or if it'll change when its location is changed */
- if(!immutable || relocatable || use_at_least_v18) {
+ if (!immutable || relocatable || use_at_least_v18) {
/* Copy datatype for dataset */
- if((dset->shared->type = H5T_copy(type, H5T_COPY_ALL)) == NULL)
+ if ((dset->shared->type = H5T_copy(type, H5T_COPY_ALL)) == NULL)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't copy datatype")
/* Convert a datatype (if committed) to a transient type if the committed datatype's file
* location is different from the file location where the dataset will be created.
*/
- if(H5T_convert_committed_datatype(dset->shared->type, file) < 0)
+ if (H5T_convert_committed_datatype(dset->shared->type, file) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get shared datatype info")
/* Mark any datatypes as being on disk now */
- if(H5T_set_loc(dset->shared->type, H5F_VOL_OBJ(file), H5T_LOC_DISK) < 0)
+ if (H5T_set_loc(dset->shared->type, H5F_VOL_OBJ(file), H5T_LOC_DISK) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't set datatype location")
/* Set the version for datatype */
- if(H5T_set_version(file, dset->shared->type) < 0)
+ if (H5T_set_version(file, dset->shared->type) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set version of datatype")
/* Get a datatype ID for the dataset's datatype */
- if((dset->shared->type_id = H5I_register(H5I_DATATYPE, dset->shared->type, FALSE)) < 0)
+ if ((dset->shared->type_id = H5I_register(H5I_DATATYPE, dset->shared->type, FALSE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "unable to register type")
} /* end if */
/* Not a custom datatype, just use it directly */
else {
- if(H5I_inc_ref(type_id, FALSE) < 0)
+ if (H5I_inc_ref(type_id, FALSE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, FAIL, "Can't increment datatype ID")
/* Use existing datatype */
dset->shared->type_id = type_id;
- dset->shared->type = (H5T_t *)type; /* (Cast away const OK - QAK) */
- } /* end else */
+ dset->shared->type = (H5T_t *)type; /* (Cast away const OK - QAK) */
+ } /* end else */
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__init_type() */
-
/*-------------------------------------------------------------------------
* Function: H5D__cache_dataspace_info
*
@@ -654,9 +636,9 @@ done:
static herr_t
H5D__cache_dataspace_info(const H5D_t *dset)
{
- int sndims; /* Signed number of dimensions of dataspace rank */
- unsigned u; /* Local index value */
- herr_t ret_value = SUCCEED; /* Return value */
+ int sndims; /* Signed number of dimensions of dataspace rank */
+ unsigned u; /* Local index value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -664,15 +646,16 @@ H5D__cache_dataspace_info(const H5D_t *dset)
HDassert(dset);
/* Cache info for dataset's dataspace */
- if((sndims = H5S_get_simple_extent_dims(dset->shared->space, dset->shared->curr_dims, dset->shared->max_dims)) < 0)
+ if ((sndims = H5S_get_simple_extent_dims(dset->shared->space, dset->shared->curr_dims,
+ dset->shared->max_dims)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't cache dataspace dimensions")
dset->shared->ndims = (unsigned)sndims;
/* Compute the initial 'power2up' values */
- for(u = 0; u < dset->shared->ndims; u++) {
- hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */
+ for (u = 0; u < dset->shared->ndims; u++) {
+ hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */
- if(!(scaled_power2up = H5VM_power2up(dset->shared->curr_dims[u])))
+ if (!(scaled_power2up = H5VM_power2up(dset->shared->curr_dims[u])))
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get the next power of 2")
dset->shared->curr_power2up[u] = scaled_power2up;
}
@@ -681,7 +664,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__cache_dataspace_info() */
-
/*-------------------------------------------------------------------------
* Function: H5D__init_space
*
@@ -695,7 +677,7 @@ done:
static herr_t
H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -705,26 +687,25 @@ H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space)
HDassert(space);
/* Copy dataspace for dataset */
- if(NULL == (dset->shared->space = H5S_copy(space, FALSE, TRUE)))
+ if (NULL == (dset->shared->space = H5S_copy(space, FALSE, TRUE)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't copy dataspace")
/* Cache the dataset's dataspace info */
- if(H5D__cache_dataspace_info(dset) < 0)
+ if (H5D__cache_dataspace_info(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't cache dataspace info")
/* Set the version for dataspace */
- if(H5S_set_version(file, dset->shared->space) < 0)
+ if (H5S_set_version(file, dset->shared->space) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set latest version of datatype")
/* Set the dataset's dataspace to 'all' selection */
- if(H5S_select_all(dset->shared->space, TRUE) < 0)
+ if (H5S_select_all(dset->shared->space, TRUE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set all selection")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__init_space() */
-
/*-------------------------------------------------------------------------
* Function: H5D__use_minimized_dset_headers
*
@@ -742,7 +723,7 @@ done:
static herr_t
H5D__use_minimized_dset_headers(H5F_t *file, hbool_t *minimize)
{
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -750,19 +731,19 @@ H5D__use_minimized_dset_headers(H5F_t *file, hbool_t *minimize)
HDassert(minimize);
/* Get the dataset object header minimize flag for this call */
- if(H5CX_get_dset_min_ohdr_flag(minimize) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset object header minimize flag from API context")
+ if (H5CX_get_dset_min_ohdr_flag(minimize) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
+ "can't get dataset object header minimize flag from API context")
- if(FALSE == *minimize)
+ if (FALSE == *minimize)
*minimize = H5F_get_min_dset_ohdr(file);
done:
- if(FAIL == ret_value)
+ if (FAIL == ret_value)
*minimize = FALSE;
FUNC_LEAVE_NOAPI(ret_value);
} /* H5D__use_minimized_dset_headers */
-
/*-------------------------------------------------------------------------
* Function: H5D__calculate_minimium_header_size
*
@@ -778,7 +759,7 @@ done:
static size_t
H5D__calculate_minimum_header_size(H5F_t *file, H5D_t *dset, H5O_t *ohdr)
{
- H5T_t *type = NULL;
+ H5T_t * type = NULL;
H5O_fill_t *fill_prop = NULL;
hbool_t use_at_least_v18 = FALSE;
const char continuation[1] = ""; /* requred for work-around */
@@ -791,8 +772,8 @@ H5D__calculate_minimum_header_size(H5F_t *file, H5D_t *dset, H5O_t *ohdr)
HDassert(dset);
HDassert(ohdr);
- type = dset->shared->type;
- fill_prop = &(dset->shared->dcpl_cache.fill);
+ type = dset->shared->type;
+ fill_prop = &(dset->shared->dcpl_cache.fill);
use_at_least_v18 = (H5F_LOW_BOUND(file) >= H5F_LIBVER_V18);
/* Datatype message size */
@@ -829,7 +810,7 @@ H5D__calculate_minimum_header_size(H5F_t *file, H5D_t *dset, H5O_t *ohdr)
ret_value += get_value;
/* Fill Value (backwards compatability) message size */
- if(fill_prop->buf && !use_at_least_v18) {
+ if (fill_prop->buf && !use_at_least_v18) {
H5O_fill_t old_fill_prop; /* Copy for writing "old" fill value */
/* Shallow copy the fill value property */
@@ -841,14 +822,15 @@ H5D__calculate_minimum_header_size(H5F_t *file, H5D_t *dset, H5O_t *ohdr)
get_value = H5O_msg_size_oh(file, ohdr, H5O_FILL_ID, &old_fill_prop, 0);
if (get_value == 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, 0, "can't get size of fill value (backwards compat) message")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, 0,
+ "can't get size of fill value (backwards compat) message")
ret_value += get_value;
}
/* Filter/Pipeline message size */
- if(H5D_CHUNKED == dset->shared->layout.type) {
+ if (H5D_CHUNKED == dset->shared->layout.type) {
H5O_pline_t *pline = &dset->shared->dcpl_cache.pline;
- if(pline->nused > 0) {
+ if (pline->nused > 0) {
get_value = H5O_msg_size_oh(file, ohdr, H5O_PLINE_ID, pline, 0);
if (get_value == 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, 0, "can't get size of filter message")
@@ -857,7 +839,7 @@ H5D__calculate_minimum_header_size(H5F_t *file, H5D_t *dset, H5O_t *ohdr)
}
/* External File Link message size */
- if(dset->shared->dcpl_cache.efl.nused > 0) {
+ if (dset->shared->dcpl_cache.efl.nused > 0) {
get_value = H5O_msg_size_oh(file, ohdr, H5O_EFL_ID, &dset->shared->dcpl_cache.efl, 0);
if (get_value == 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, 0, "can't get size of external file link message")
@@ -865,10 +847,10 @@ H5D__calculate_minimum_header_size(H5F_t *file, H5D_t *dset, H5O_t *ohdr)
}
/* Modification Time message size */
- if(H5O_HDR_STORE_TIMES & H5O_OH_GET_FLAGS(ohdr)) {
+ if (H5O_HDR_STORE_TIMES & H5O_OH_GET_FLAGS(ohdr)) {
HDassert(H5O_OH_GET_VERSION(ohdr) >= 1); /* 1 :: H5O_VERSION_1 (H5Opkg.h) */
- if(H5O_OH_GET_VERSION(ohdr) == 1) {
+ if (H5O_OH_GET_VERSION(ohdr) == 1) {
/* v1 object headers store modification time as a message */
time_t mtime;
get_value = H5O_msg_size_oh(file, ohdr, H5O_MTIME_NEW_ID, &mtime, 0);
@@ -882,7 +864,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value);
} /* H5D__calculate_minimum_header_size */
-
/*-------------------------------------------------------------------------
* Function: H5D__prepare_minimized_oh
*
@@ -899,9 +880,9 @@ done:
static herr_t
H5D__prepare_minimized_oh(H5F_t *file, H5D_t *dset, H5O_loc_t *oloc)
{
- H5O_t *oh = NULL;
- size_t ohdr_size = 0;
- herr_t ret_value = SUCCEED;
+ H5O_t *oh = NULL;
+ size_t ohdr_size = 0;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -910,22 +891,21 @@ H5D__prepare_minimized_oh(H5F_t *file, H5D_t *dset, H5O_loc_t *oloc)
HDassert(oloc);
oh = H5O_create_ohdr(file, dset->shared->dcpl_id);
- if(NULL == oh)
+ if (NULL == oh)
HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "can't instantiate object header")
ohdr_size = H5D__calculate_minimum_header_size(file, dset, oh);
if (ohdr_size == 0)
- HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "computed header size is invalid")
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "computed header size is invalid")
/* Special allocation of space for compact datsets is handled by the call here. */
- if(H5O_apply_ohdr(file, oh, dset->shared->dcpl_id, ohdr_size, (size_t)1, oloc) == FAIL)
+ if (H5O_apply_ohdr(file, oh, dset->shared->dcpl_id, ohdr_size, (size_t)1, oloc) == FAIL)
HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "can't apply object header to file")
done:
FUNC_LEAVE_NOAPI(ret_value);
} /* H5D__prepare_minimized_oh */
-
/*-------------------------------------------------------------------------
* Function: H5D__update_oh_info
*
@@ -938,18 +918,18 @@ done:
static herr_t
H5D__update_oh_info(H5F_t *file, H5D_t *dset, hid_t dapl_id)
{
- H5O_t *oh = NULL; /* Pointer to dataset's object header */
- size_t ohdr_size = H5D_MINHDR_SIZE; /* Size of dataset's object header */
- H5O_loc_t *oloc = NULL; /* Dataset's object location */
- H5O_layout_t *layout; /* Dataset's layout information */
- H5T_t *type; /* Dataset's datatype */
- H5O_fill_t *fill_prop; /* Pointer to dataset's fill value information */
- H5D_fill_value_t fill_status; /* Fill value status */
- hbool_t fill_changed = FALSE; /* Flag indicating the fill value was changed */
- hbool_t layout_init = FALSE; /* Flag to indicate that chunk information was initialized */
- hbool_t use_at_least_v18; /* Flag indicating to use at least v18 format versions */
- hbool_t use_minimized_header = FALSE; /* Flag to use minimized dataset object headers */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5O_t * oh = NULL; /* Pointer to dataset's object header */
+ size_t ohdr_size = H5D_MINHDR_SIZE; /* Size of dataset's object header */
+ H5O_loc_t * oloc = NULL; /* Dataset's object location */
+ H5O_layout_t * layout; /* Dataset's layout information */
+ H5T_t * type; /* Dataset's datatype */
+ H5O_fill_t * fill_prop; /* Pointer to dataset's fill value information */
+ H5D_fill_value_t fill_status; /* Fill value status */
+ hbool_t fill_changed = FALSE; /* Flag indicating the fill value was changed */
+ hbool_t layout_init = FALSE; /* Flag to indicate that chunk information was initialized */
+ hbool_t use_at_least_v18; /* Flag indicating to use at least v18 format versions */
+ hbool_t use_minimized_header = FALSE; /* Flag to use minimized dataset object headers */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -958,22 +938,22 @@ H5D__update_oh_info(H5F_t *file, H5D_t *dset, hid_t dapl_id)
HDassert(dset);
/* Set some local variables, for convenience */
- oloc = &dset->oloc;
- layout = &dset->shared->layout;
- type = dset->shared->type;
+ oloc = &dset->oloc;
+ layout = &dset->shared->layout;
+ type = dset->shared->type;
fill_prop = &dset->shared->dcpl_cache.fill;
/* To use at least v18 format versions or not */
use_at_least_v18 = (H5F_LOW_BOUND(file) >= H5F_LIBVER_V18);
/* Retrieve "defined" status of fill value */
- if(H5P_is_fill_value_defined(fill_prop, &fill_status) < 0)
+ if (H5P_is_fill_value_defined(fill_prop, &fill_status) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't tell if fill value defined")
/* Special case handling for variable-length types */
- if(H5T_detect_class(type, H5T_VLEN, FALSE)) {
+ if (H5T_detect_class(type, H5T_VLEN, FALSE)) {
/* If the default fill value is chosen for variable-length types, always write it */
- if(fill_prop->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_DEFAULT) {
+ if (fill_prop->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_DEFAULT) {
/* Update dataset creation property */
fill_prop->fill_time = H5D_FILL_TIME_ALLOC;
@@ -982,82 +962,84 @@ H5D__update_oh_info(H5F_t *file, H5D_t *dset, hid_t dapl_id)
} /* end if */
/* Don't allow never writing fill values with variable-length types */
- if(fill_prop->fill_time == H5D_FILL_TIME_NEVER)
- HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "Dataset doesn't support VL datatype when fill value is not defined")
+ if (fill_prop->fill_time == H5D_FILL_TIME_NEVER)
+ HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL,
+ "Dataset doesn't support VL datatype when fill value is not defined")
} /* end if */
/* Determine whether fill value is defined or not */
- if(fill_status == H5D_FILL_VALUE_DEFAULT || fill_status == H5D_FILL_VALUE_USER_DEFINED) {
+ if (fill_status == H5D_FILL_VALUE_DEFAULT || fill_status == H5D_FILL_VALUE_USER_DEFINED) {
/* Convert fill value buffer to dataset's datatype */
- if(fill_prop->buf && fill_prop->size > 0 && H5O_fill_convert(fill_prop, type, &fill_changed) < 0)
+ if (fill_prop->buf && fill_prop->size > 0 && H5O_fill_convert(fill_prop, type, &fill_changed) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to convert fill value to dataset type")
fill_prop->fill_defined = TRUE;
}
- else if(fill_status == H5D_FILL_VALUE_UNDEFINED)
+ else if (fill_status == H5D_FILL_VALUE_UNDEFINED)
fill_prop->fill_defined = FALSE;
else
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to determine if fill value is defined")
/* Check for invalid fill & allocation time setting */
- if(fill_prop->fill_defined == FALSE && fill_prop->fill_time == H5D_FILL_TIME_ALLOC)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "fill value writing on allocation set, but no fill value defined")
+ if (fill_prop->fill_defined == FALSE && fill_prop->fill_time == H5D_FILL_TIME_ALLOC)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "fill value writing on allocation set, but no fill value defined")
/* Check if the fill value info changed */
- if(fill_changed) {
- H5P_genplist_t *dc_plist; /* Dataset's creation property list */
+ if (fill_changed) {
+ H5P_genplist_t *dc_plist; /* Dataset's creation property list */
/* Get dataset's property list object */
HDassert(dset->shared->dcpl_id != H5P_DATASET_CREATE_DEFAULT);
- if(NULL == (dc_plist = (H5P_genplist_t *)H5I_object(dset->shared->dcpl_id)))
+ if (NULL == (dc_plist = (H5P_genplist_t *)H5I_object(dset->shared->dcpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get dataset creation property list")
/* Update dataset creation property */
- if(H5P_set(dc_plist, H5D_CRT_FILL_VALUE_NAME, fill_prop) < 0)
+ if (H5P_set(dc_plist, H5D_CRT_FILL_VALUE_NAME, fill_prop) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set fill value info")
} /* end if */
- if(H5D__use_minimized_dset_headers(file, &use_minimized_header) == FAIL)
+ if (H5D__use_minimized_dset_headers(file, &use_minimized_header) == FAIL)
HGOTO_ERROR(H5E_ARGS, H5E_CANTGET, FAIL, "can't get minimize settings")
- if(TRUE == use_minimized_header) {
- if(H5D__prepare_minimized_oh(file, dset, oloc) == FAIL)
+ if (TRUE == use_minimized_header) {
+ if (H5D__prepare_minimized_oh(file, dset, oloc) == FAIL)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create minimized dataset object header")
} /* end if */
else {
/* Add the dataset's raw data size to the size of the header, if the
* raw data will be stored as compact
*/
- if(H5D_COMPACT == layout->type)
+ if (H5D_COMPACT == layout->type)
ohdr_size += layout->storage.u.compact.size;
/* Create an object header for the dataset */
- if(H5O_create(file, ohdr_size, (size_t)1, dset->shared->dcpl_id, oloc/*out*/) < 0)
+ if (H5O_create(file, ohdr_size, (size_t)1, dset->shared->dcpl_id, oloc /*out*/) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create dataset object header")
} /* if using default/minimized object headers */
HDassert(file == dset->oloc.file);
/* Pin the object header */
- if(NULL == (oh = H5O_pin(oloc)))
+ if (NULL == (oh = H5O_pin(oloc)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header")
/* Write the dataspace header message */
- if(H5S_append(file, oh, dset->shared->space) < 0)
+ if (H5S_append(file, oh, dset->shared->space) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update dataspace header message")
/* Write the datatype header message */
- if(H5O_msg_append_oh(file, oh, H5O_DTYPE_ID, H5O_MSG_FLAG_CONSTANT, 0, type) < 0)
+ if (H5O_msg_append_oh(file, oh, H5O_DTYPE_ID, H5O_MSG_FLAG_CONSTANT, 0, type) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update datatype header message")
/* Write new fill value message */
- if(H5O_msg_append_oh(file, oh, H5O_FILL_NEW_ID, H5O_MSG_FLAG_CONSTANT, 0, fill_prop) < 0)
+ if (H5O_msg_append_oh(file, oh, H5O_FILL_NEW_ID, H5O_MSG_FLAG_CONSTANT, 0, fill_prop) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update new fill value header message")
/* If there is valid information for the old fill value struct, add it */
/* (only if we aren't using v18 format versions and above */
- if(fill_prop->buf && !use_at_least_v18) {
- H5O_fill_t old_fill_prop; /* Copy of fill value property, for writing as "old" fill value */
+ if (fill_prop->buf && !use_at_least_v18) {
+ H5O_fill_t old_fill_prop; /* Copy of fill value property, for writing as "old" fill value */
/* Shallow copy the fill value property */
/* (we only want to make certain that the shared component isn't modified) */
@@ -1067,72 +1049,70 @@ H5D__update_oh_info(H5F_t *file, H5D_t *dset, hid_t dapl_id)
H5O_msg_reset_share(H5O_FILL_ID, &old_fill_prop);
/* Write old fill value */
- if(H5O_msg_append_oh(file, oh, H5O_FILL_ID, H5O_MSG_FLAG_CONSTANT, 0, &old_fill_prop) < 0)
+ if (H5O_msg_append_oh(file, oh, H5O_FILL_ID, H5O_MSG_FLAG_CONSTANT, 0, &old_fill_prop) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update old fill value header message")
} /* end if */
/* Update/create the layout (and I/O pipeline & EFL) messages */
- if(H5D__layout_oh_create(file, oh, dset, dapl_id) < 0)
+ if (H5D__layout_oh_create(file, oh, dset, dapl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update layout/pline/efl header message")
/* Indicate that the layout information was initialized */
layout_init = TRUE;
#ifdef H5O_ENABLE_BOGUS
-{
- H5P_genplist_t *dc_plist; /* Dataset's creation property list */
+ {
+ H5P_genplist_t *dc_plist; /* Dataset's creation property list */
- /* Get dataset's property list object */
- if(NULL == (dc_plist = (H5P_genplist_t *)H5I_object(dset->shared->dcpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get dataset creation property list")
+ /* Get dataset's property list object */
+ if (NULL == (dc_plist = (H5P_genplist_t *)H5I_object(dset->shared->dcpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get dataset creation property list")
- /* Check whether to add a "bogus" message */
- if( (H5P_exist_plist(dc_plist, H5O_BOGUS_MSG_FLAGS_NAME) > 0) &&
- (H5P_exist_plist(dc_plist, H5O_BOGUS_MSG_ID_NAME) > 0) ) {
+ /* Check whether to add a "bogus" message */
+ if ((H5P_exist_plist(dc_plist, H5O_BOGUS_MSG_FLAGS_NAME) > 0) &&
+ (H5P_exist_plist(dc_plist, H5O_BOGUS_MSG_ID_NAME) > 0)) {
- uint8_t bogus_flags = 0; /* Flags for creating "bogus" message */
- unsigned bogus_id; /* "bogus" ID */
+ uint8_t bogus_flags = 0; /* Flags for creating "bogus" message */
+ unsigned bogus_id; /* "bogus" ID */
- /* Retrieve "bogus" message ID */
- if(H5P_get(dc_plist, H5O_BOGUS_MSG_ID_NAME, &bogus_id) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get bogus ID options")
- /* Retrieve "bogus" message flags */
- if(H5P_get(dc_plist, H5O_BOGUS_MSG_FLAGS_NAME, &bogus_flags) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get bogus message options")
+ /* Retrieve "bogus" message ID */
+ if (H5P_get(dc_plist, H5O_BOGUS_MSG_ID_NAME, &bogus_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get bogus ID options")
+ /* Retrieve "bogus" message flags */
+ if (H5P_get(dc_plist, H5O_BOGUS_MSG_FLAGS_NAME, &bogus_flags) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get bogus message options")
- /* Add a "bogus" message (for error testing). */
- if(H5O_bogus_oh(file, oh, bogus_id, (unsigned)bogus_flags) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create 'bogus' message")
- } /* end if */
-}
+ /* Add a "bogus" message (for error testing). */
+ if (H5O_bogus_oh(file, oh, bogus_id, (unsigned)bogus_flags) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create 'bogus' message")
+ } /* end if */
+ }
#endif /* H5O_ENABLE_BOGUS */
/* Add a modification time message, if using older format. */
/* (If using v18 format versions and above, the the modification time is part of the object
* header and doesn't use a separate message -QAK)
*/
- if(!use_at_least_v18)
- if(H5O_touch_oh(file, oh, TRUE) < 0)
+ if (!use_at_least_v18)
+ if (H5O_touch_oh(file, oh, TRUE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update modification time message")
done:
/* Release pointer to object header itself */
- if(oh != NULL)
- if(H5O_unpin(oh) < 0)
+ if (oh != NULL)
+ if (H5O_unpin(oh) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header")
/* Error cleanup */
- if(ret_value < 0)
- if(layout_init)
+ if (ret_value < 0)
+ if (layout_init)
/* Destroy the layout information for the dataset */
- if(dset->shared->layout.ops->dest && (dset->shared->layout.ops->dest)(dset) < 0)
+ if (dset->shared->layout.ops->dest && (dset->shared->layout.ops->dest)(dset) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy layout info")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__update_oh_info() */
-
-
/*--------------------------------------------------------------------------
* Function: H5D__build_file_prefix
*
@@ -1146,12 +1126,12 @@ done:
static herr_t
H5D__build_file_prefix(const H5D_t *dset, H5F_prefix_open_t prefix_type, char **file_prefix /*out*/)
{
- const char *prefix = NULL; /* prefix used to look for the file */
- char *filepath = NULL; /* absolute path of directory the HDF5 file is in */
- size_t filepath_len; /* length of file path */
- size_t prefix_len; /* length of prefix */
- size_t file_prefix_len; /* length of expanded prefix */
- herr_t ret_value = SUCCEED; /* Return value */
+ const char *prefix = NULL; /* prefix used to look for the file */
+ char * filepath = NULL; /* absolute path of directory the HDF5 file is in */
+ size_t filepath_len; /* length of file path */
+ size_t prefix_len; /* length of prefix */
+ size_t file_prefix_len; /* length of expanded prefix */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1164,19 +1144,19 @@ H5D__build_file_prefix(const H5D_t *dset, H5F_prefix_open_t prefix_type, char **
/* XXX: Future thread-safety note - getenv is not required
* to be reentrant.
*/
- if(H5F_PREFIX_VDS == prefix_type) {
+ if (H5F_PREFIX_VDS == prefix_type) {
prefix = H5D_prefix_vds_env;
- if(prefix == NULL || *prefix == '\0') {
- if(H5CX_get_vds_prefix(&prefix) < 0)
+ if (prefix == NULL || *prefix == '\0') {
+ if (H5CX_get_vds_prefix(&prefix) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get the prefix for vds file")
}
}
- else if(H5F_PREFIX_EFILE == prefix_type) {
+ else if (H5F_PREFIX_EFILE == prefix_type) {
prefix = H5D_prefix_ext_env;
- if(prefix == NULL || *prefix == '\0') {
- if(H5CX_get_ext_file_prefix(&prefix) < 0)
+ if (prefix == NULL || *prefix == '\0') {
+ if (H5CX_get_ext_file_prefix(&prefix) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get the prefix for the external file")
}
}
@@ -1186,34 +1166,33 @@ H5D__build_file_prefix(const H5D_t *dset, H5F_prefix_open_t prefix_type, char **
/* Prefix has to be checked for NULL / empty string again because the
* code above might have updated it.
*/
- if(prefix == NULL || *prefix == '\0' || HDstrcmp(prefix, ".") == 0) {
+ if (prefix == NULL || *prefix == '\0' || HDstrcmp(prefix, ".") == 0) {
/* filename is interpreted as relative to the current directory,
* does not need to be expanded
*/
*file_prefix = NULL;
} /* end if */
else {
- if(HDstrncmp(prefix, "${ORIGIN}", HDstrlen("${ORIGIN}")) == 0) {
+ if (HDstrncmp(prefix, "${ORIGIN}", HDstrlen("${ORIGIN}")) == 0) {
/* Replace ${ORIGIN} at beginning of prefix by directory of HDF5 file */
- filepath_len = HDstrlen(filepath);
- prefix_len = HDstrlen(prefix);
+ filepath_len = HDstrlen(filepath);
+ prefix_len = HDstrlen(prefix);
file_prefix_len = filepath_len + prefix_len - HDstrlen("${ORIGIN}") + 1;
- if(NULL == (*file_prefix = (char *)H5MM_malloc(file_prefix_len)))
+ if (NULL == (*file_prefix = (char *)H5MM_malloc(file_prefix_len)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate buffer")
HDsnprintf(*file_prefix, file_prefix_len, "%s%s", filepath, prefix + HDstrlen("${ORIGIN}"));
} /* end if */
else {
- if(NULL == (*file_prefix = (char *)H5MM_strdup(prefix)))
+ if (NULL == (*file_prefix = (char *)H5MM_strdup(prefix)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
} /* end else */
- } /* end else */
+ } /* end else */
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D__build_file_prefix() */
-
/*-------------------------------------------------------------------------
* Function: H5D__create
*
@@ -1230,21 +1209,20 @@ done:
*-------------------------------------------------------------------------
*/
H5D_t *
-H5D__create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id,
- hid_t dapl_id)
+H5D__create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id, hid_t dapl_id)
{
- H5T_t *type = NULL; /* Datatype for dataset (VOL pointer) */
- H5T_t *dt = NULL; /* Datatype for dataset (non-VOL pointer) */
- H5D_t *new_dset = NULL;
- H5P_genplist_t *dc_plist = NULL; /* New Property list */
- hbool_t has_vl_type = FALSE; /* Flag to indicate a VL-type for dataset */
- hbool_t layout_init = FALSE; /* Flag to indicate that chunk information was initialized */
- hbool_t layout_copied = FALSE; /* Flag to indicate that layout message was copied */
- hbool_t fill_copied = FALSE; /* Flag to indicate that fill-value message was copied */
- hbool_t pline_copied = FALSE; /* Flag to indicate that pipeline message was copied */
- hbool_t efl_copied = FALSE; /* Flag to indicate that external file list message was copied */
- H5G_loc_t dset_loc; /* Dataset location */
- H5D_t *ret_value = NULL; /* Return value */
+ H5T_t * type = NULL; /* Datatype for dataset (VOL pointer) */
+ H5T_t * dt = NULL; /* Datatype for dataset (non-VOL pointer) */
+ H5D_t * new_dset = NULL;
+ H5P_genplist_t *dc_plist = NULL; /* New Property list */
+ hbool_t has_vl_type = FALSE; /* Flag to indicate a VL-type for dataset */
+ hbool_t layout_init = FALSE; /* Flag to indicate that chunk information was initialized */
+ hbool_t layout_copied = FALSE; /* Flag to indicate that layout message was copied */
+ hbool_t fill_copied = FALSE; /* Flag to indicate that fill-value message was copied */
+ hbool_t pline_copied = FALSE; /* Flag to indicate that pipeline message was copied */
+ hbool_t efl_copied = FALSE; /* Flag to indicate that external file list message was copied */
+ H5G_loc_t dset_loc; /* Dataset location */
+ H5D_t * ret_value = NULL; /* Return value */
FUNC_ENTER_PACKAGE
@@ -1255,26 +1233,26 @@ H5D__create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id,
HDassert(H5I_GENPROP_LST == H5I_get_type(dcpl_id));
/* Get the dataset's datatype */
- if(NULL == (dt = (H5T_t *)H5I_object(type_id)))
+ if (NULL == (dt = (H5T_t *)H5I_object(type_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a datatype")
/* If this is a named datatype, get the pointer via the VOL plugin */
type = H5T_get_actual_type(dt);
/* Check if the datatype is "sensible" for use in a dataset */
- if(H5T_is_sensible(type) != TRUE)
+ if (H5T_is_sensible(type) != TRUE)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "datatype is not sensible")
/* Check if the datatype is/contains a VL-type */
- if(H5T_detect_class(type, H5T_VLEN, FALSE))
+ if (H5T_detect_class(type, H5T_VLEN, FALSE))
has_vl_type = TRUE;
/* Check if the dataspace has an extent set (or is NULL) */
- if(!H5S_has_extent(space))
+ if (!H5S_has_extent(space))
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "dataspace extent has not been set.")
/* Initialize the dataset object */
- if(NULL == (new_dset = H5FL_CALLOC(H5D_t)))
+ if (NULL == (new_dset = H5FL_CALLOC(H5D_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Set up & reset dataset location */
@@ -1283,131 +1261,133 @@ H5D__create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id,
H5G_loc_reset(&dset_loc);
/* Initialize the shared dataset space */
- if(NULL == (new_dset->shared = H5D__new(dcpl_id, dapl_id, TRUE, has_vl_type)))
+ if (NULL == (new_dset->shared = H5D__new(dcpl_id, dapl_id, TRUE, has_vl_type)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Copy & initialize datatype for dataset */
- if(H5D__init_type(file, new_dset, type_id, type) < 0)
+ if (H5D__init_type(file, new_dset, type_id, type) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't copy datatype")
/* Copy & initialize dataspace for dataset */
- if(H5D__init_space(file, new_dset, space) < 0)
+ if (H5D__init_space(file, new_dset, space) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't copy dataspace")
/* Set the dataset's checked_filters flag to enable writing */
new_dset->shared->checked_filters = TRUE;
/* Check if the dataset has a non-default DCPL & get important values, if so */
- if(new_dset->shared->dcpl_id != H5P_DATASET_CREATE_DEFAULT) {
- H5O_layout_t *layout; /* Dataset's layout information */
- H5O_pline_t *pline; /* Dataset's I/O pipeline information */
- H5O_fill_t *fill; /* Dataset's fill value info */
- H5O_efl_t *efl; /* Dataset's external file list info */
- htri_t ignore_filters = FALSE; /* Ignore optional filters or not */
-
- if((ignore_filters = H5Z_ignore_filters(new_dset->shared->dcpl_id, dt, space))<0)
+ if (new_dset->shared->dcpl_id != H5P_DATASET_CREATE_DEFAULT) {
+ H5O_layout_t *layout; /* Dataset's layout information */
+ H5O_pline_t * pline; /* Dataset's I/O pipeline information */
+ H5O_fill_t * fill; /* Dataset's fill value info */
+ H5O_efl_t * efl; /* Dataset's external file list info */
+ htri_t ignore_filters = FALSE; /* Ignore optional filters or not */
+
+ if ((ignore_filters = H5Z_ignore_filters(new_dset->shared->dcpl_id, dt, space)) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, NULL, "H5Z_has_optional_filter() failed")
- if(FALSE == ignore_filters) {
+ if (FALSE == ignore_filters) {
/* Check if the filters in the DCPL can be applied to this dataset */
- if(H5Z_can_apply(new_dset->shared->dcpl_id, new_dset->shared->type_id) < 0)
+ if (H5Z_can_apply(new_dset->shared->dcpl_id, new_dset->shared->type_id) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, NULL, "I/O filters can't operate on this dataset")
/* Make the "set local" filter callbacks for this dataset */
- if(H5Z_set_local(new_dset->shared->dcpl_id, new_dset->shared->type_id) < 0)
+ if (H5Z_set_local(new_dset->shared->dcpl_id, new_dset->shared->type_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to set local filter parameters")
} /* ignore_filters */
/* Get new dataset's property list object */
- if(NULL == (dc_plist = (H5P_genplist_t *)H5I_object(new_dset->shared->dcpl_id)))
+ if (NULL == (dc_plist = (H5P_genplist_t *)H5I_object(new_dset->shared->dcpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "can't get dataset creation property list")
/* Retrieve the properties we need */
pline = &new_dset->shared->dcpl_cache.pline;
- if(H5P_get(dc_plist, H5O_CRT_PIPELINE_NAME, pline) < 0)
+ if (H5P_get(dc_plist, H5O_CRT_PIPELINE_NAME, pline) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't retrieve pipeline filter")
pline_copied = TRUE;
- layout = &new_dset->shared->layout;
- if(H5P_get(dc_plist, H5D_CRT_LAYOUT_NAME, layout) < 0)
+ layout = &new_dset->shared->layout;
+ if (H5P_get(dc_plist, H5D_CRT_LAYOUT_NAME, layout) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't retrieve layout")
layout_copied = TRUE;
- fill = &new_dset->shared->dcpl_cache.fill;
- if(H5P_get(dc_plist, H5D_CRT_FILL_VALUE_NAME, fill) < 0)
+ fill = &new_dset->shared->dcpl_cache.fill;
+ if (H5P_get(dc_plist, H5D_CRT_FILL_VALUE_NAME, fill) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't retrieve fill value info")
fill_copied = TRUE;
- efl = &new_dset->shared->dcpl_cache.efl;
- if(H5P_get(dc_plist, H5D_CRT_EXT_FILE_LIST_NAME, efl) < 0)
+ efl = &new_dset->shared->dcpl_cache.efl;
+ if (H5P_get(dc_plist, H5D_CRT_EXT_FILE_LIST_NAME, efl) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't retrieve external file list")
efl_copied = TRUE;
- if(FALSE == ignore_filters) {
+ if (FALSE == ignore_filters) {
/* Check that chunked layout is used if filters are enabled */
- if(pline->nused > 0 && H5D_CHUNKED != layout->type)
+ if (pline->nused > 0 && H5D_CHUNKED != layout->type)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "filters can only be used with chunked layout")
}
/* Check if the alloc_time is the default and error out */
- if(fill->alloc_time == H5D_ALLOC_TIME_DEFAULT)
+ if (fill->alloc_time == H5D_ALLOC_TIME_DEFAULT)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "invalid space allocation state")
/* Don't allow compact datasets to allocate space later */
- if(layout->type == H5D_COMPACT && fill->alloc_time != H5D_ALLOC_TIME_EARLY)
+ if (layout->type == H5D_COMPACT && fill->alloc_time != H5D_ALLOC_TIME_EARLY)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "compact dataset must have early space allocation")
} /* end if */
/* Set the version for the I/O pipeline message */
- if(H5O_pline_set_version(file, &new_dset->shared->dcpl_cache.pline) < 0)
+ if (H5O_pline_set_version(file, &new_dset->shared->dcpl_cache.pline) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of I/O filter pipeline")
/* Set the version for the fill message */
- if(H5O_fill_set_version(file, &new_dset->shared->dcpl_cache.fill) < 0)
+ if (H5O_fill_set_version(file, &new_dset->shared->dcpl_cache.fill) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of fill value")
/* Set the latest version for the layout message */
- if(H5D__layout_set_version(file, &new_dset->shared->layout) < 0)
+ if (H5D__layout_set_version(file, &new_dset->shared->layout) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of layout")
- if(new_dset->shared->layout.version >= H5O_LAYOUT_VERSION_4) {
+ if (new_dset->shared->layout.version >= H5O_LAYOUT_VERSION_4) {
/* Use latest indexing type for layout message version >= 4 */
- if(H5D__layout_set_latest_indexing(&new_dset->shared->layout, new_dset->shared->space, &new_dset->shared->dcpl_cache) < 0)
+ if (H5D__layout_set_latest_indexing(&new_dset->shared->layout, new_dset->shared->space,
+ &new_dset->shared->dcpl_cache) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest indexing")
} /* end if */
/* Check if this dataset is going into a parallel file and set space allocation time */
- if(H5F_HAS_FEATURE(file, H5FD_FEAT_ALLOCATE_EARLY))
+ if (H5F_HAS_FEATURE(file, H5FD_FEAT_ALLOCATE_EARLY))
new_dset->shared->dcpl_cache.fill.alloc_time = H5D_ALLOC_TIME_EARLY;
/* Set the dataset's I/O operations */
- if(H5D__layout_set_io_ops(new_dset) < 0)
+ if (H5D__layout_set_io_ops(new_dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize I/O operations")
/* Create the layout information for the new dataset */
- if(new_dset->shared->layout.ops->construct && (new_dset->shared->layout.ops->construct)(file, new_dset) < 0)
+ if (new_dset->shared->layout.ops->construct &&
+ (new_dset->shared->layout.ops->construct)(file, new_dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to construct layout information")
/* Update the dataset's object header info. */
- if(H5D__update_oh_info(file, new_dset, new_dset->shared->dapl_id) < 0)
+ if (H5D__update_oh_info(file, new_dset, new_dset->shared->dapl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't update the metadata cache")
/* Indicate that the layout information was initialized */
layout_init = TRUE;
/* Set up append flush parameters for the dataset */
- if(H5D__append_flush_setup(new_dset, new_dset->shared->dapl_id) < 0)
+ if (H5D__append_flush_setup(new_dset, new_dset->shared->dapl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to set up flush append property")
/* Set the external file prefix */
- if(H5D__build_file_prefix(new_dset, H5F_PREFIX_EFILE, &new_dset->shared->extfile_prefix) < 0)
+ if (H5D__build_file_prefix(new_dset, H5F_PREFIX_EFILE, &new_dset->shared->extfile_prefix) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize external file prefix")
/* Set the VDS file prefix */
- if(H5D__build_file_prefix(new_dset, H5F_PREFIX_VDS, &new_dset->shared->vds_prefix) < 0)
+ if (H5D__build_file_prefix(new_dset, H5F_PREFIX_VDS, &new_dset->shared->vds_prefix) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize VDS prefix")
/* Add the dataset to the list of opened objects in the file */
- if(H5FO_top_incr(new_dset->oloc.file, new_dset->oloc.addr) < 0)
+ if (H5FO_top_incr(new_dset->oloc.file, new_dset->oloc.addr) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, NULL, "can't incr object ref. count")
- if(H5FO_insert(new_dset->oloc.file, new_dset->oloc.addr, new_dset->shared, TRUE) < 0)
+ if (H5FO_insert(new_dset->oloc.file, new_dset->oloc.addr, new_dset->shared, TRUE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, NULL, "can't insert dataset into list of open objects")
new_dset->shared->fo_count = 1;
@@ -1415,63 +1395,63 @@ H5D__create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id,
ret_value = new_dset;
done:
- if(!ret_value && new_dset) {
- if(new_dset->shared) {
- if(layout_init)
- if(new_dset->shared->layout.ops->dest && (new_dset->shared->layout.ops->dest)(new_dset) < 0)
+ if (!ret_value && new_dset) {
+ if (new_dset->shared) {
+ if (layout_init)
+ if (new_dset->shared->layout.ops->dest && (new_dset->shared->layout.ops->dest)(new_dset) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, NULL, "unable to destroy layout info")
- if(pline_copied)
- if(H5O_msg_reset(H5O_PLINE_ID, &new_dset->shared->dcpl_cache.pline) < 0)
+ if (pline_copied)
+ if (H5O_msg_reset(H5O_PLINE_ID, &new_dset->shared->dcpl_cache.pline) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRESET, NULL, "unable to reset I/O pipeline info")
- if(layout_copied)
- if(H5O_msg_reset(H5O_LAYOUT_ID, &new_dset->shared->layout) < 0)
+ if (layout_copied)
+ if (H5O_msg_reset(H5O_LAYOUT_ID, &new_dset->shared->layout) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRESET, NULL, "unable to reset layout info")
- if(fill_copied)
- if(H5O_msg_reset(H5O_FILL_ID, &new_dset->shared->dcpl_cache.fill) < 0)
+ if (fill_copied)
+ if (H5O_msg_reset(H5O_FILL_ID, &new_dset->shared->dcpl_cache.fill) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRESET, NULL, "unable to reset fill-value info")
- if(efl_copied)
- if(H5O_msg_reset(H5O_EFL_ID, &new_dset->shared->dcpl_cache.efl) < 0)
+ if (efl_copied)
+ if (H5O_msg_reset(H5O_EFL_ID, &new_dset->shared->dcpl_cache.efl) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRESET, NULL, "unable to reset external file list info")
- if(new_dset->shared->space && H5S_close(new_dset->shared->space) < 0)
+ if (new_dset->shared->space && H5S_close(new_dset->shared->space) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release dataspace")
- if(new_dset->shared->type) {
- if(new_dset->shared->type_id > 0) {
- if(H5I_dec_ref(new_dset->shared->type_id) < 0)
+ if (new_dset->shared->type) {
+ if (new_dset->shared->type_id > 0) {
+ if (H5I_dec_ref(new_dset->shared->type_id) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release datatype")
} /* end if */
else {
- if(H5T_close_real(new_dset->shared->type) < 0)
+ if (H5T_close_real(new_dset->shared->type) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release datatype")
} /* end else */
- } /* end if */
+ } /* end if */
- if(H5F_addr_defined(new_dset->oloc.addr)) {
- if(H5O_dec_rc_by_loc(&(new_dset->oloc)) < 0)
- HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, NULL, "unable to decrement refcount on newly created object")
- if(H5O_close(&(new_dset->oloc), NULL) < 0)
+ if (H5F_addr_defined(new_dset->oloc.addr)) {
+ if (H5O_dec_rc_by_loc(&(new_dset->oloc)) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, NULL,
+ "unable to decrement refcount on newly created object")
+ if (H5O_close(&(new_dset->oloc), NULL) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, NULL, "unable to release object header")
- if(file) {
- if(H5O_delete(file, new_dset->oloc.addr) < 0)
+ if (file) {
+ if (H5O_delete(file, new_dset->oloc.addr) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTDELETE, NULL, "unable to delete object header")
} /* end if */
- } /* end if */
- if(new_dset->shared->dcpl_id != 0 && H5I_dec_ref(new_dset->shared->dcpl_id) < 0)
+ } /* end if */
+ if (new_dset->shared->dcpl_id != 0 && H5I_dec_ref(new_dset->shared->dcpl_id) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, NULL, "unable to decrement ref count on property list")
- if(new_dset->shared->dapl_id != 0 && H5I_dec_ref(new_dset->shared->dapl_id) < 0)
+ if (new_dset->shared->dapl_id != 0 && H5I_dec_ref(new_dset->shared->dapl_id) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, NULL, "unable to decrement ref count on property list")
new_dset->shared->extfile_prefix = (char *)H5MM_xfree(new_dset->shared->extfile_prefix);
- new_dset->shared->vds_prefix = (char *)H5MM_xfree(new_dset->shared->vds_prefix);
- new_dset->shared = H5FL_FREE(H5D_shared_t, new_dset->shared);
+ new_dset->shared->vds_prefix = (char *)H5MM_xfree(new_dset->shared->vds_prefix);
+ new_dset->shared = H5FL_FREE(H5D_shared_t, new_dset->shared);
} /* end if */
new_dset->oloc.file = NULL;
- new_dset = H5FL_FREE(H5D_t, new_dset);
+ new_dset = H5FL_FREE(H5D_t, new_dset);
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__create() */
-
/*-------------------------------------------------------------------------
* Function: H5D__open_name
*
@@ -1484,13 +1464,13 @@ done:
H5D_t *
H5D__open_name(const H5G_loc_t *loc, const char *name, hid_t dapl_id)
{
- H5D_t *dset = NULL;
- H5G_loc_t dset_loc; /* Object location of dataset */
- H5G_name_t path; /* Dataset group hier. path */
- H5O_loc_t oloc; /* Dataset object location */
- H5O_type_t obj_type; /* Type of object at location */
- hbool_t loc_found = FALSE; /* Location at 'name' found */
- H5D_t *ret_value = NULL; /* Return value */
+ H5D_t * dset = NULL;
+ H5G_loc_t dset_loc; /* Object location of dataset */
+ H5G_name_t path; /* Dataset group hier. path */
+ H5O_loc_t oloc; /* Dataset object location */
+ H5O_type_t obj_type; /* Type of object at location */
+ hbool_t loc_found = FALSE; /* Location at 'name' found */
+ H5D_t * ret_value = NULL; /* Return value */
FUNC_ENTER_PACKAGE
@@ -1504,32 +1484,31 @@ H5D__open_name(const H5G_loc_t *loc, const char *name, hid_t dapl_id)
H5G_loc_reset(&dset_loc);
/* Find the dataset object */
- if(H5G_loc_find(loc, name, &dset_loc) < 0)
+ if (H5G_loc_find(loc, name, &dset_loc) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_NOTFOUND, NULL, "not found")
loc_found = TRUE;
/* Check that the object found is the correct type */
- if(H5O_obj_type(&oloc, &obj_type) < 0)
+ if (H5O_obj_type(&oloc, &obj_type) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get object type")
- if(obj_type != H5O_TYPE_DATASET)
+ if (obj_type != H5O_TYPE_DATASET)
HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, NULL, "not a dataset")
/* Open the dataset */
- if(NULL == (dset = H5D_open(&dset_loc, dapl_id)))
+ if (NULL == (dset = H5D_open(&dset_loc, dapl_id)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't open dataset")
/* Set return value */
ret_value = dset;
done:
- if(!ret_value)
- if(loc_found && H5G_loc_free(&dset_loc) < 0)
+ if (!ret_value)
+ if (loc_found && H5G_loc_free(&dset_loc) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, NULL, "can't free location")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__open_name() */
-
/*
*-------------------------------------------------------------------------
* Function: H5D_open
@@ -1544,11 +1523,11 @@ done:
H5D_t *
H5D_open(const H5G_loc_t *loc, hid_t dapl_id)
{
- H5D_shared_t *shared_fo = NULL;
- H5D_t *dataset = NULL;
- char *extfile_prefix = NULL; /* Expanded external file prefix */
- char *vds_prefix = NULL; /* Expanded vds prefix */
- H5D_t *ret_value = NULL; /* Return value */
+ H5D_shared_t *shared_fo = NULL;
+ H5D_t * dataset = NULL;
+ char * extfile_prefix = NULL; /* Expanded external file prefix */
+ char * vds_prefix = NULL; /* Expanded vds prefix */
+ H5D_t * ret_value = NULL; /* Return value */
FUNC_ENTER_NOAPI(NULL)
@@ -1556,40 +1535,40 @@ H5D_open(const H5G_loc_t *loc, hid_t dapl_id)
HDassert(loc);
/* Allocate the dataset structure */
- if(NULL == (dataset = H5FL_CALLOC(H5D_t)))
+ if (NULL == (dataset = H5FL_CALLOC(H5D_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Shallow copy (take ownership) of the object location object */
- if(H5O_loc_copy_shallow(&(dataset->oloc), loc->oloc) < 0)
+ if (H5O_loc_copy_shallow(&(dataset->oloc), loc->oloc) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, NULL, "can't copy object location")
/* Shallow copy (take ownership) of the group hier. path */
- if(H5G_name_copy(&(dataset->path), loc->path, H5_COPY_SHALLOW) < 0)
+ if (H5G_name_copy(&(dataset->path), loc->path, H5_COPY_SHALLOW) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, NULL, "can't copy path")
/* Get the external file prefix */
- if(H5D__build_file_prefix(dataset, H5F_PREFIX_EFILE, &extfile_prefix) < 0)
+ if (H5D__build_file_prefix(dataset, H5F_PREFIX_EFILE, &extfile_prefix) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize external file prefix")
/* Get the VDS prefix */
- if(H5D__build_file_prefix(dataset, H5F_PREFIX_VDS, &vds_prefix) < 0)
+ if (H5D__build_file_prefix(dataset, H5F_PREFIX_VDS, &vds_prefix) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize VDS prefix")
/* Check if dataset was already open */
- if(NULL == (shared_fo = (H5D_shared_t *)H5FO_opened(dataset->oloc.file, dataset->oloc.addr))) {
+ if (NULL == (shared_fo = (H5D_shared_t *)H5FO_opened(dataset->oloc.file, dataset->oloc.addr))) {
/* Clear any errors from H5FO_opened() */
H5E_clear_stack(NULL);
/* Open the dataset object */
- if(H5D__open_oid(dataset, dapl_id) < 0)
+ if (H5D__open_oid(dataset, dapl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_NOTFOUND, NULL, "not found")
/* Add the dataset to the list of opened objects in the file */
- if(H5FO_insert(dataset->oloc.file, dataset->oloc.addr, dataset->shared, FALSE) < 0)
+ if (H5FO_insert(dataset->oloc.file, dataset->oloc.addr, dataset->shared, FALSE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, NULL, "can't insert dataset into list of open objects")
/* Increment object count for the object in the top file */
- if(H5FO_top_incr(dataset->oloc.file, dataset->oloc.addr) < 0)
+ if (H5FO_top_incr(dataset->oloc.file, dataset->oloc.addr) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, NULL, "can't increment object count")
/* We're the first dataset to use the the shared info */
@@ -1616,23 +1595,28 @@ H5D_open(const H5G_loc_t *loc, hid_t dapl_id)
/* Check whether the external file prefix of the already open dataset
* matches the new external file prefix
*/
- if(extfile_prefix && dataset->shared->extfile_prefix) {
- if(HDstrcmp(extfile_prefix, dataset->shared->extfile_prefix) != 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, NULL, "new external file prefix does not match external file prefix of already open dataset")
- } else {
- if(extfile_prefix || dataset->shared->extfile_prefix)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, NULL, "new external file prefix does not match external file prefix of already open dataset")
+ if (extfile_prefix && dataset->shared->extfile_prefix) {
+ if (HDstrcmp(extfile_prefix, dataset->shared->extfile_prefix) != 0)
+ HGOTO_ERROR(
+ H5E_DATASET, H5E_CANTOPENOBJ, NULL,
+ "new external file prefix does not match external file prefix of already open dataset")
+ }
+ else {
+ if (extfile_prefix || dataset->shared->extfile_prefix)
+ HGOTO_ERROR(
+ H5E_DATASET, H5E_CANTOPENOBJ, NULL,
+ "new external file prefix does not match external file prefix of already open dataset")
}
/* Check if the object has been opened through the top file yet */
- if(H5FO_top_count(dataset->oloc.file, dataset->oloc.addr) == 0) {
+ if (H5FO_top_count(dataset->oloc.file, dataset->oloc.addr) == 0) {
/* Open the object through this top file */
- if(H5O_open(&(dataset->oloc)) < 0)
+ if (H5O_open(&(dataset->oloc)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, NULL, "unable to open object header")
} /* end if */
/* Increment object count for the object in the top file */
- if(H5FO_top_incr(dataset->oloc.file, dataset->oloc.addr) < 0)
+ if (H5FO_top_incr(dataset->oloc.file, dataset->oloc.addr) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, NULL, "can't increment object count")
} /* end else */
@@ -1641,15 +1625,15 @@ H5D_open(const H5G_loc_t *loc, hid_t dapl_id)
done:
extfile_prefix = (char *)H5MM_xfree(extfile_prefix);
- vds_prefix = (char *)H5MM_xfree(vds_prefix);
+ vds_prefix = (char *)H5MM_xfree(vds_prefix);
- if(ret_value == NULL) {
+ if (ret_value == NULL) {
/* Free the location--casting away const*/
- if(dataset) {
- if(shared_fo == NULL && dataset->shared) { /* Need to free shared fo */
+ if (dataset) {
+ if (shared_fo == NULL && dataset->shared) { /* Need to free shared fo */
dataset->shared->extfile_prefix = (char *)H5MM_xfree(dataset->shared->extfile_prefix);
- dataset->shared->vds_prefix = (char *)H5MM_xfree(dataset->shared->vds_prefix);
- dataset->shared = H5FL_FREE(H5D_shared_t, dataset->shared);
+ dataset->shared->vds_prefix = (char *)H5MM_xfree(dataset->shared->vds_prefix);
+ dataset->shared = H5FL_FREE(H5D_shared_t, dataset->shared);
}
H5O_loc_free(&(dataset->oloc));
@@ -1657,14 +1641,13 @@ done:
dataset = H5FL_FREE(H5D_t, dataset);
} /* end if */
- if(shared_fo)
+ if (shared_fo)
shared_fo->fo_count--;
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_open() */
-
/*
*-------------------------------------------------------------------------
* Function: H5D__flush_append_setup
@@ -1677,7 +1660,7 @@ done:
static herr_t
H5D__append_flush_setup(H5D_t *dset, hid_t dapl_id)
{
- herr_t ret_value = SUCCEED; /* return value */
+ herr_t ret_value = SUCCEED; /* return value */
FUNC_ENTER_STATIC
@@ -1686,60 +1669,60 @@ H5D__append_flush_setup(H5D_t *dset, hid_t dapl_id)
HDassert(dset->shared);
/* Set default append flush values */
- HDmemset(&dset->shared->append_flush, 0, sizeof(dset->shared->append_flush));
+ HDmemset(&dset->shared->append_flush, 0, sizeof(dset->shared->append_flush));
/* If the dataset is chunked and there is a non-default DAPL */
- if(dapl_id != H5P_DATASET_ACCESS_DEFAULT && dset->shared->layout.type == H5D_CHUNKED) {
- H5P_genplist_t *dapl; /* data access property list object pointer */
+ if (dapl_id != H5P_DATASET_ACCESS_DEFAULT && dset->shared->layout.type == H5D_CHUNKED) {
+ H5P_genplist_t *dapl; /* data access property list object pointer */
/* Get dataset access property list */
- if(NULL == (dapl = (H5P_genplist_t *)H5I_object(dapl_id)))
+ if (NULL == (dapl = (H5P_genplist_t *)H5I_object(dapl_id)))
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for dapl ID");
/* Check if append flush property exists */
- if(H5P_exist_plist(dapl, H5D_ACS_APPEND_FLUSH_NAME) > 0) {
+ if (H5P_exist_plist(dapl, H5D_ACS_APPEND_FLUSH_NAME) > 0) {
H5D_append_flush_t info;
/* Get append flush property */
- if(H5P_get(dapl, H5D_ACS_APPEND_FLUSH_NAME, &info) < 0)
+ if (H5P_get(dapl, H5D_ACS_APPEND_FLUSH_NAME, &info) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get append flush info")
- if(info.ndims > 0) {
- hsize_t curr_dims[H5S_MAX_RANK]; /* current dimension sizes */
- hsize_t max_dims[H5S_MAX_RANK]; /* current dimension sizes */
- int rank; /* dataspace # of dimensions */
- unsigned u; /* local index variable */
+ if (info.ndims > 0) {
+ hsize_t curr_dims[H5S_MAX_RANK]; /* current dimension sizes */
+ hsize_t max_dims[H5S_MAX_RANK]; /* current dimension sizes */
+ int rank; /* dataspace # of dimensions */
+ unsigned u; /* local index variable */
/* Get dataset rank */
- if((rank = H5S_get_simple_extent_dims(dset->shared->space, curr_dims, max_dims)) < 0)
+ if ((rank = H5S_get_simple_extent_dims(dset->shared->space, curr_dims, max_dims)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
- if(info.ndims != (unsigned)rank)
- HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "boundary dimension rank does not match dataset rank")
+ if (info.ndims != (unsigned)rank)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL,
+ "boundary dimension rank does not match dataset rank")
/* Validate boundary sizes */
- for(u = 0; u < info.ndims; u++)
- if(info.boundary[u] != 0) /* when a non-zero boundary is set */
+ for (u = 0; u < info.ndims; u++)
+ if (info.boundary[u] != 0) /* when a non-zero boundary is set */
/* the dimension is extendible? */
- if(max_dims[u] != H5S_UNLIMITED && max_dims[u] == curr_dims[u])
+ if (max_dims[u] != H5S_UNLIMITED && max_dims[u] == curr_dims[u])
break;
/* At least one boundary dimension is not extendible */
- if(u != info.ndims)
+ if (u != info.ndims)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "boundary dimension is not valid")
/* Copy append flush settings */
dset->shared->append_flush.ndims = info.ndims;
- dset->shared->append_flush.func = info.func;
+ dset->shared->append_flush.func = info.func;
dset->shared->append_flush.udata = info.udata;
H5MM_memcpy(dset->shared->append_flush.boundary, info.boundary, sizeof(info.boundary));
} /* end if */
- } /* end if */
- } /* end if */
+ } /* end if */
+ } /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D__append_flush_setup() */
-
/*-------------------------------------------------------------------------
* Function: H5D__open_oid
*
@@ -1751,12 +1734,12 @@ done:
static herr_t
H5D__open_oid(H5D_t *dataset, hid_t dapl_id)
{
- H5P_genplist_t *plist; /* Property list */
- H5O_fill_t *fill_prop; /* Pointer to dataset's fill value info */
- unsigned alloc_time_state; /* Allocation time state */
- htri_t msg_exists; /* Whether a particular type of message exists */
- hbool_t layout_init = FALSE; /* Flag to indicate that chunk information was initialized */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5P_genplist_t *plist; /* Property list */
+ H5O_fill_t * fill_prop; /* Pointer to dataset's fill value info */
+ unsigned alloc_time_state; /* Allocation time state */
+ htri_t msg_exists; /* Whether a particular type of message exists */
+ hbool_t layout_init = FALSE; /* Flag to indicate that chunk information was initialized */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC_TAG(dataset->oloc.addr)
@@ -1764,67 +1747,67 @@ H5D__open_oid(H5D_t *dataset, hid_t dapl_id)
HDassert(dataset);
/* (Set the 'vl_type' parameter to FALSE since it doesn't matter from here) */
- if(NULL == (dataset->shared = H5D__new(H5P_DATASET_CREATE_DEFAULT, dapl_id, FALSE, FALSE)))
+ if (NULL == (dataset->shared = H5D__new(H5P_DATASET_CREATE_DEFAULT, dapl_id, FALSE, FALSE)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
/* Open the dataset object */
- if(H5O_open(&(dataset->oloc)) < 0)
+ if (H5O_open(&(dataset->oloc)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to open")
/* Get the type and space */
- if(NULL == (dataset->shared->type = (H5T_t *)H5O_msg_read(&(dataset->oloc), H5O_DTYPE_ID, NULL)))
+ if (NULL == (dataset->shared->type = (H5T_t *)H5O_msg_read(&(dataset->oloc), H5O_DTYPE_ID, NULL)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to load type info from dataset header")
- if(H5T_set_loc(dataset->shared->type, H5F_VOL_OBJ(dataset->oloc.file), H5T_LOC_DISK) < 0)
+ if (H5T_set_loc(dataset->shared->type, H5F_VOL_OBJ(dataset->oloc.file), H5T_LOC_DISK) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "invalid datatype location")
- if(NULL == (dataset->shared->space = H5S_read(&(dataset->oloc))))
+ if (NULL == (dataset->shared->space = H5S_read(&(dataset->oloc))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to load dataspace info from dataset header")
/* Cache the dataset's dataspace info */
- if(H5D__cache_dataspace_info(dataset) < 0)
+ if (H5D__cache_dataspace_info(dataset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't cache dataspace info")
/* Get a datatype ID for the dataset's datatype */
- if((dataset->shared->type_id = H5I_register(H5I_DATATYPE, dataset->shared->type, FALSE)) < 0)
+ if ((dataset->shared->type_id = H5I_register(H5I_DATATYPE, dataset->shared->type, FALSE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "unable to register type")
/* Get dataset creation property list object */
- if(NULL == (plist = (H5P_genplist_t *)H5I_object(dataset->shared->dcpl_id)))
+ if (NULL == (plist = (H5P_genplist_t *)H5I_object(dataset->shared->dcpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get dataset creation property list")
/* Get the layout/pline/efl message information */
- if(H5D__layout_oh_read(dataset, dapl_id, plist) < 0)
+ if (H5D__layout_oh_read(dataset, dapl_id, plist) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get layout/pline/efl info")
/* Indicate that the layout information was initialized */
layout_init = TRUE;
/* Set up flush append property */
- if(H5D__append_flush_setup(dataset, dapl_id))
+ if (H5D__append_flush_setup(dataset, dapl_id))
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set up flush append property")
/* Point at dataset's copy, to cache it for later */
fill_prop = &dataset->shared->dcpl_cache.fill;
/* Try to get the new fill value message from the object header */
- if((msg_exists = H5O_msg_exists(&(dataset->oloc), H5O_FILL_NEW_ID)) < 0)
+ if ((msg_exists = H5O_msg_exists(&(dataset->oloc), H5O_FILL_NEW_ID)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if message exists")
- if(msg_exists) {
- if(NULL == H5O_msg_read(&(dataset->oloc), H5O_FILL_NEW_ID, fill_prop))
+ if (msg_exists) {
+ if (NULL == H5O_msg_read(&(dataset->oloc), H5O_FILL_NEW_ID, fill_prop))
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve message")
} /* end if */
else {
- /* For backward compatibility, try to retrieve the old fill value message */
- if((msg_exists = H5O_msg_exists(&(dataset->oloc), H5O_FILL_ID)) < 0)
+ /* For backward compatibility, try to retrieve the old fill value message */
+ if ((msg_exists = H5O_msg_exists(&(dataset->oloc), H5O_FILL_ID)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if message exists")
- if(msg_exists) {
- if(NULL == H5O_msg_read(&(dataset->oloc), H5O_FILL_ID, fill_prop))
+ if (msg_exists) {
+ if (NULL == H5O_msg_read(&(dataset->oloc), H5O_FILL_ID, fill_prop))
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve message")
} /* end if */
else {
/* Set the space allocation time appropriately, based on the type of dataset storage */
- switch(dataset->shared->layout.type) {
+ switch (dataset->shared->layout.type) {
case H5D_COMPACT:
fill_prop->alloc_time = H5D_ALLOC_TIME_EARLY;
break;
@@ -1846,24 +1829,24 @@ H5D__open_oid(H5D_t *dataset, hid_t dapl_id)
default:
HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "not implemented yet")
} /* end switch */ /*lint !e788 All appropriate cases are covered */
- } /* end else */
+ } /* end else */
/* If "old" fill value size is 0 (undefined), map it to -1 */
- if(fill_prop->size == 0)
+ if (fill_prop->size == 0)
fill_prop->size = (ssize_t)-1;
} /* end if */
alloc_time_state = 0;
- if((dataset->shared->layout.type == H5D_COMPACT && fill_prop->alloc_time == H5D_ALLOC_TIME_EARLY)
- || (dataset->shared->layout.type == H5D_CONTIGUOUS && fill_prop->alloc_time == H5D_ALLOC_TIME_LATE)
- || (dataset->shared->layout.type == H5D_CHUNKED && fill_prop->alloc_time == H5D_ALLOC_TIME_INCR)
- || (dataset->shared->layout.type == H5D_VIRTUAL && fill_prop->alloc_time == H5D_ALLOC_TIME_INCR))
+ if ((dataset->shared->layout.type == H5D_COMPACT && fill_prop->alloc_time == H5D_ALLOC_TIME_EARLY) ||
+ (dataset->shared->layout.type == H5D_CONTIGUOUS && fill_prop->alloc_time == H5D_ALLOC_TIME_LATE) ||
+ (dataset->shared->layout.type == H5D_CHUNKED && fill_prop->alloc_time == H5D_ALLOC_TIME_INCR) ||
+ (dataset->shared->layout.type == H5D_VIRTUAL && fill_prop->alloc_time == H5D_ALLOC_TIME_INCR))
alloc_time_state = 1;
/* Set revised fill value properties, if they are different from the defaults */
- if(H5P_fill_value_cmp(&H5D_def_dset.dcpl_cache.fill, fill_prop, sizeof(H5O_fill_t))) {
- if(H5P_set(plist, H5D_CRT_FILL_VALUE_NAME, fill_prop) < 0)
+ if (H5P_fill_value_cmp(&H5D_def_dset.dcpl_cache.fill, fill_prop, sizeof(H5O_fill_t))) {
+ if (H5P_set(plist, H5D_CRT_FILL_VALUE_NAME, fill_prop) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set fill value")
- if(H5P_set(plist, H5D_CRT_ALLOC_TIME_STATE_NAME, &alloc_time_state) < 0)
+ if (H5P_set(plist, H5D_CRT_ALLOC_TIME_STATE_NAME, &alloc_time_state) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set allocation time state")
} /* end if */
@@ -1872,44 +1855,43 @@ H5D__open_oid(H5D_t *dataset, hid_t dapl_id)
* This is important only for parallel I/O where the space must
* be fully allocated before I/O can happen.
*/
- if((H5F_INTENT(dataset->oloc.file) & H5F_ACC_RDWR)
- && !(*dataset->shared->layout.ops->is_space_alloc)(&dataset->shared->layout.storage)
- && H5F_HAS_FEATURE(dataset->oloc.file, H5FD_FEAT_ALLOCATE_EARLY)) {
+ if ((H5F_INTENT(dataset->oloc.file) & H5F_ACC_RDWR) &&
+ !(*dataset->shared->layout.ops->is_space_alloc)(&dataset->shared->layout.storage) &&
+ H5F_HAS_FEATURE(dataset->oloc.file, H5FD_FEAT_ALLOCATE_EARLY)) {
H5D_io_info_t io_info;
io_info.dset = dataset;
- if(H5D__alloc_storage(&io_info, H5D_ALLOC_OPEN, FALSE, NULL) < 0)
+ if (H5D__alloc_storage(&io_info, H5D_ALLOC_OPEN, FALSE, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize file storage")
} /* end if */
done:
- if(ret_value < 0) {
- if(H5F_addr_defined(dataset->oloc.addr) && H5O_close(&(dataset->oloc), NULL) < 0)
+ if (ret_value < 0) {
+ if (H5F_addr_defined(dataset->oloc.addr) && H5O_close(&(dataset->oloc), NULL) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release object header")
- if(dataset->shared) {
- if(layout_init)
- if(dataset->shared->layout.ops->dest && (dataset->shared->layout.ops->dest)(dataset) < 0)
+ if (dataset->shared) {
+ if (layout_init)
+ if (dataset->shared->layout.ops->dest && (dataset->shared->layout.ops->dest)(dataset) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy layout info")
- if(dataset->shared->space && H5S_close(dataset->shared->space) < 0)
+ if (dataset->shared->space && H5S_close(dataset->shared->space) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release dataspace")
- if(dataset->shared->type) {
- if(dataset->shared->type_id > 0) {
- if(H5I_dec_ref(dataset->shared->type_id) < 0)
+ if (dataset->shared->type) {
+ if (dataset->shared->type_id > 0) {
+ if (H5I_dec_ref(dataset->shared->type_id) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release datatype")
} /* end if */
else {
- if(H5T_close_real(dataset->shared->type) < 0)
+ if (H5T_close_real(dataset->shared->type) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release datatype")
} /* end else */
- } /* end if */
- } /* end if */
- } /* end if */
+ } /* end if */
+ } /* end if */
+ } /* end if */
FUNC_LEAVE_NOAPI_TAG(ret_value)
} /* end H5D__open_oid() */
-
/*-------------------------------------------------------------------------
* Function: H5D_close
*
@@ -1923,10 +1905,10 @@ done:
herr_t
H5D_close(H5D_t *dataset)
{
- hbool_t free_failed = FALSE; /* Set if freeing sub-components failed */
- hbool_t corked; /* Whether the dataset is corked or not */
- hbool_t file_closed = TRUE; /* H5O_close also closed the file? */
- herr_t ret_value = SUCCEED; /* Return value */
+ hbool_t free_failed = FALSE; /* Set if freeing sub-components failed */
+ hbool_t corked; /* Whether the dataset is corked or not */
+ hbool_t file_closed = TRUE; /* H5O_close also closed the file? */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1940,10 +1922,10 @@ H5D_close(H5D_t *dataset)
#endif /* H5D_CHUNK_DEBUG */
dataset->shared->fo_count--;
- if(dataset->shared->fo_count == 0) {
+ if (dataset->shared->fo_count == 0) {
/* Flush the dataset's information. Continue to close even if it fails. */
- if(H5D__flush_real(dataset) < 0)
+ if (H5D__flush_real(dataset) < 0)
HDONE_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to flush cached dataset info")
/* Set a flag to indicate the dataset is closing, before we start freeing things */
@@ -1953,30 +1935,32 @@ H5D_close(H5D_t *dataset)
dataset->shared->closing = TRUE;
/* Free cached information for each kind of dataset */
- switch(dataset->shared->layout.type) {
+ switch (dataset->shared->layout.type) {
case H5D_CONTIGUOUS:
/* Free the data sieve buffer, if it's been allocated */
- if(dataset->shared->cache.contig.sieve_buf)
- dataset->shared->cache.contig.sieve_buf = (unsigned char *)H5FL_BLK_FREE(sieve_buf,dataset->shared->cache.contig.sieve_buf);
+ if (dataset->shared->cache.contig.sieve_buf)
+ dataset->shared->cache.contig.sieve_buf =
+ (unsigned char *)H5FL_BLK_FREE(sieve_buf, dataset->shared->cache.contig.sieve_buf);
break;
case H5D_CHUNKED:
/* Check for skip list for iterating over chunks during I/O to close */
- if(dataset->shared->cache.chunk.sel_chunks) {
+ if (dataset->shared->cache.chunk.sel_chunks) {
HDassert(H5SL_count(dataset->shared->cache.chunk.sel_chunks) == 0);
H5SL_close(dataset->shared->cache.chunk.sel_chunks);
dataset->shared->cache.chunk.sel_chunks = NULL;
} /* end if */
/* Check for cached single chunk dataspace */
- if(dataset->shared->cache.chunk.single_space) {
+ if (dataset->shared->cache.chunk.single_space) {
(void)H5S_close(dataset->shared->cache.chunk.single_space);
dataset->shared->cache.chunk.single_space = NULL;
} /* end if */
/* Check for cached single element chunk info */
- if(dataset->shared->cache.chunk.single_chunk_info) {
- dataset->shared->cache.chunk.single_chunk_info = H5FL_FREE(H5D_chunk_info_t, dataset->shared->cache.chunk.single_chunk_info);
+ if (dataset->shared->cache.chunk.single_chunk_info) {
+ dataset->shared->cache.chunk.single_chunk_info =
+ H5FL_FREE(H5D_chunk_info_t, dataset->shared->cache.chunk.single_chunk_info);
dataset->shared->cache.chunk.single_chunk_info = NULL;
} /* end if */
break;
@@ -1985,32 +1969,35 @@ H5D_close(H5D_t *dataset)
/* Nothing special to do (info freed in the layout destroy) */
break;
- case H5D_VIRTUAL:
- {
+ case H5D_VIRTUAL: {
size_t i, j;
- HDassert(dataset->shared->layout.storage.u.virt.list || (dataset->shared->layout.storage.u.virt.list_nused == 0));
+ HDassert(dataset->shared->layout.storage.u.virt.list ||
+ (dataset->shared->layout.storage.u.virt.list_nused == 0));
/* Close source datasets */
- for(i = 0; i < dataset->shared->layout.storage.u.virt.list_nused; i++) {
+ for (i = 0; i < dataset->shared->layout.storage.u.virt.list_nused; i++) {
/* Close source dataset */
- if(dataset->shared->layout.storage.u.virt.list[i].source_dset.dset) {
+ if (dataset->shared->layout.storage.u.virt.list[i].source_dset.dset) {
HDassert(dataset->shared->layout.storage.u.virt.list[i].source_dset.dset != dataset);
- if(H5D_close(dataset->shared->layout.storage.u.virt.list[i].source_dset.dset) < 0)
+ if (H5D_close(dataset->shared->layout.storage.u.virt.list[i].source_dset.dset) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to close source dataset")
dataset->shared->layout.storage.u.virt.list[i].source_dset.dset = NULL;
} /* end if */
/* Close sub datasets */
- for(j = 0; j < dataset->shared->layout.storage.u.virt.list[i].sub_dset_nused; j++)
- if(dataset->shared->layout.storage.u.virt.list[i].sub_dset[j].dset) {
- HDassert(dataset->shared->layout.storage.u.virt.list[i].sub_dset[j].dset != dataset);
- if(H5D_close(dataset->shared->layout.storage.u.virt.list[i].sub_dset[j].dset) < 0)
- HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to close source dataset")
+ for (j = 0; j < dataset->shared->layout.storage.u.virt.list[i].sub_dset_nused; j++)
+ if (dataset->shared->layout.storage.u.virt.list[i].sub_dset[j].dset) {
+ HDassert(dataset->shared->layout.storage.u.virt.list[i].sub_dset[j].dset !=
+ dataset);
+ if (H5D_close(dataset->shared->layout.storage.u.virt.list[i].sub_dset[j].dset) <
+ 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL,
+ "unable to close source dataset")
dataset->shared->layout.storage.u.virt.list[i].sub_dset[j].dset = NULL;
} /* end if */
- } /* end for */
- } /* end block */
+ } /* end for */
+ } /* end block */
break;
case H5D_LAYOUT_ERROR:
@@ -2019,11 +2006,11 @@ H5D_close(H5D_t *dataset)
HDassert("not implemented yet" && 0);
#ifdef NDEBUG
HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout")
-#endif /* NDEBUG */
+#endif /* NDEBUG */
} /* end switch */ /*lint !e788 All appropriate cases are covered */
/* Destroy any cached layout information for the dataset */
- if(dataset->shared->layout.ops->dest && (dataset->shared->layout.ops->dest)(dataset) < 0)
+ if (dataset->shared->layout.ops->dest && (dataset->shared->layout.ops->dest)(dataset) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy layout info")
/* Free the external file prefix */
@@ -2033,43 +2020,42 @@ H5D_close(H5D_t *dataset)
dataset->shared->vds_prefix = (char *)H5MM_xfree(dataset->shared->vds_prefix);
/* Release layout, fill-value, efl & pipeline messages */
- if(dataset->shared->dcpl_id != H5P_DATASET_CREATE_DEFAULT)
+ if (dataset->shared->dcpl_id != H5P_DATASET_CREATE_DEFAULT)
free_failed |= (H5O_msg_reset(H5O_PLINE_ID, &dataset->shared->dcpl_cache.pline) < 0) ||
- (H5O_msg_reset(H5O_LAYOUT_ID, &dataset->shared->layout) < 0) ||
- (H5O_msg_reset(H5O_FILL_ID, &dataset->shared->dcpl_cache.fill) < 0) ||
- (H5O_msg_reset(H5O_EFL_ID, &dataset->shared->dcpl_cache.efl) < 0);
+ (H5O_msg_reset(H5O_LAYOUT_ID, &dataset->shared->layout) < 0) ||
+ (H5O_msg_reset(H5O_FILL_ID, &dataset->shared->dcpl_cache.fill) < 0) ||
+ (H5O_msg_reset(H5O_EFL_ID, &dataset->shared->dcpl_cache.efl) < 0);
/* Uncork cache entries with object address tag */
- if(H5AC_cork(dataset->oloc.file, dataset->oloc.addr, H5AC__GET_CORKED, &corked) < 0)
+ if (H5AC_cork(dataset->oloc.file, dataset->oloc.addr, H5AC__GET_CORKED, &corked) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve an object's cork status")
- if(corked)
- if(H5AC_cork(dataset->oloc.file, dataset->oloc.addr, H5AC__UNCORK, NULL) < 0)
+ if (corked)
+ if (H5AC_cork(dataset->oloc.file, dataset->oloc.addr, H5AC__UNCORK, NULL) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTUNCORK, FAIL, "unable to uncork an object")
/* Release datatype, dataspace, and creation and access property lists -- there isn't
* much we can do if one of these fails, so we just continue.
*/
- free_failed |= (H5I_dec_ref(dataset->shared->type_id) < 0) ||
- (H5S_close(dataset->shared->space) < 0) ||
- (H5I_dec_ref(dataset->shared->dcpl_id) < 0) ||
- (H5I_dec_ref(dataset->shared->dapl_id) < 0);
+ free_failed |=
+ (H5I_dec_ref(dataset->shared->type_id) < 0) || (H5S_close(dataset->shared->space) < 0) ||
+ (H5I_dec_ref(dataset->shared->dcpl_id) < 0) || (H5I_dec_ref(dataset->shared->dapl_id) < 0);
/* Remove the dataset from the list of opened objects in the file */
- if(H5FO_top_decr(dataset->oloc.file, dataset->oloc.addr) < 0)
+ if (H5FO_top_decr(dataset->oloc.file, dataset->oloc.addr) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "can't decrement count for object")
- if(H5FO_delete(dataset->oloc.file, dataset->oloc.addr) < 0)
+ if (H5FO_delete(dataset->oloc.file, dataset->oloc.addr) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "can't remove dataset from list of open objects")
/* Close the dataset object */
/* (This closes the file, if this is the last object open) */
- if(H5O_close(&(dataset->oloc), &file_closed) < 0)
+ if (H5O_close(&(dataset->oloc), &file_closed) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release object header")
/* Evict dataset metadata if evicting on close */
- if(!file_closed && H5F_SHARED(dataset->oloc.file) && H5F_EVICT_ON_CLOSE(dataset->oloc.file)) {
- if(H5AC_flush_tagged_metadata(dataset->oloc.file, dataset->oloc.addr) < 0)
+ if (!file_closed && H5F_SHARED(dataset->oloc.file) && H5F_EVICT_ON_CLOSE(dataset->oloc.file)) {
+ if (H5AC_flush_tagged_metadata(dataset->oloc.file, dataset->oloc.addr) < 0)
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush tagged metadata")
- if(H5AC_evict_tagged_metadata(dataset->oloc.file, dataset->oloc.addr, FALSE) < 0)
+ if (H5AC_evict_tagged_metadata(dataset->oloc.file, dataset->oloc.addr, FALSE) < 0)
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to evict tagged metadata")
} /* end if */
@@ -2080,41 +2066,41 @@ H5D_close(H5D_t *dataset)
* above).
*/
dataset->oloc.file = NULL;
- dataset->shared = H5FL_FREE(H5D_shared_t, dataset->shared);
+ dataset->shared = H5FL_FREE(H5D_shared_t, dataset->shared);
} /* end if */
else {
/* Decrement the ref. count for this object in the top file */
- if(H5FO_top_decr(dataset->oloc.file, dataset->oloc.addr) < 0)
+ if (H5FO_top_decr(dataset->oloc.file, dataset->oloc.addr) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "can't decrement count for object")
/* Check reference count for this object in the top file */
- if(H5FO_top_count(dataset->oloc.file, dataset->oloc.addr) == 0) {
- if(H5O_close(&(dataset->oloc), NULL) < 0)
+ if (H5FO_top_count(dataset->oloc.file, dataset->oloc.addr) == 0) {
+ if (H5O_close(&(dataset->oloc), NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to close")
} /* end if */
else
/* Free object location (i.e. "unhold" the file if appropriate) */
- if(H5O_loc_free(&(dataset->oloc)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "problem attempting to free location")
+ if (H5O_loc_free(&(dataset->oloc)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "problem attempting to free location")
} /* end else */
/* Release the dataset's path info */
- if(H5G_name_free(&(dataset->path)) < 0)
+ if (H5G_name_free(&(dataset->path)) < 0)
free_failed = TRUE;
/* Free the dataset's memory structure */
dataset = H5FL_FREE(H5D_t, dataset);
/* Check if anything failed in the middle... */
- if(free_failed)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "couldn't free a component of the dataset, but the dataset was freed anyway.")
+ if (free_failed)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "couldn't free a component of the dataset, but the dataset was freed anyway.")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_close() */
-
/*-------------------------------------------------------------------------
* Function: H5D_mult_refresh_close
*
@@ -2127,12 +2113,12 @@ done:
herr_t
H5D_mult_refresh_close(hid_t dset_id)
{
- H5D_t *dataset; /* Dataset to refresh */
- herr_t ret_value = SUCCEED; /* return value */
+ H5D_t *dataset; /* Dataset to refresh */
+ herr_t ret_value = SUCCEED; /* return value */
FUNC_ENTER_NOAPI(FAIL)
- if(NULL == (dataset = (H5D_t *)H5VL_object_verify(dset_id, H5I_DATASET)))
+ if (NULL == (dataset = (H5D_t *)H5VL_object_verify(dset_id, H5I_DATASET)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
/* check args */
@@ -2141,32 +2127,34 @@ H5D_mult_refresh_close(hid_t dset_id)
HDassert(dataset->shared);
HDassert(dataset->shared->fo_count > 0);
- if(dataset->shared->fo_count > 1) {
+ if (dataset->shared->fo_count > 1) {
/* Free cached information for each kind of dataset */
- switch(dataset->shared->layout.type) {
+ switch (dataset->shared->layout.type) {
case H5D_CONTIGUOUS:
/* Free the data sieve buffer, if it's been allocated */
- if(dataset->shared->cache.contig.sieve_buf)
- dataset->shared->cache.contig.sieve_buf = (unsigned char *)H5FL_BLK_FREE(sieve_buf,dataset->shared->cache.contig.sieve_buf);
+ if (dataset->shared->cache.contig.sieve_buf)
+ dataset->shared->cache.contig.sieve_buf =
+ (unsigned char *)H5FL_BLK_FREE(sieve_buf, dataset->shared->cache.contig.sieve_buf);
break;
case H5D_CHUNKED:
/* Check for skip list for iterating over chunks during I/O to close */
- if(dataset->shared->cache.chunk.sel_chunks) {
+ if (dataset->shared->cache.chunk.sel_chunks) {
HDassert(H5SL_count(dataset->shared->cache.chunk.sel_chunks) == 0);
H5SL_close(dataset->shared->cache.chunk.sel_chunks);
dataset->shared->cache.chunk.sel_chunks = NULL;
} /* end if */
/* Check for cached single chunk dataspace */
- if(dataset->shared->cache.chunk.single_space) {
+ if (dataset->shared->cache.chunk.single_space) {
(void)H5S_close(dataset->shared->cache.chunk.single_space);
dataset->shared->cache.chunk.single_space = NULL;
} /* end if */
/* Check for cached single element chunk info */
- if(dataset->shared->cache.chunk.single_chunk_info) {
- dataset->shared->cache.chunk.single_chunk_info = H5FL_FREE(H5D_chunk_info_t, dataset->shared->cache.chunk.single_chunk_info);
+ if (dataset->shared->cache.chunk.single_chunk_info) {
+ dataset->shared->cache.chunk.single_chunk_info =
+ H5FL_FREE(H5D_chunk_info_t, dataset->shared->cache.chunk.single_chunk_info);
dataset->shared->cache.chunk.single_chunk_info = NULL;
} /* end if */
break;
@@ -2182,11 +2170,11 @@ H5D_mult_refresh_close(hid_t dset_id)
HDassert("not implemented yet" && 0);
#ifdef NDEBUG
HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout")
-#endif /* NDEBUG */
+#endif /* NDEBUG */
} /* end switch */ /*lint !e788 All appropriate cases are covered */
/* Destroy any cached layout information for the dataset */
- if(dataset->shared->layout.ops->dest && (dataset->shared->layout.ops->dest)(dataset) < 0)
+ if (dataset->shared->layout.ops->dest && (dataset->shared->layout.ops->dest)(dataset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to destroy layout info")
} /* end if */
@@ -2194,7 +2182,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_mult_refresh_close() */
-
/*-------------------------------------------------------------------------
* Function: H5D_mult_refresh_reopen
*
@@ -2207,7 +2194,7 @@ done:
herr_t
H5D_mult_refresh_reopen(H5D_t *dataset)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -2215,25 +2202,25 @@ H5D_mult_refresh_reopen(H5D_t *dataset)
HDassert(dataset && dataset->oloc.file && dataset->shared);
HDassert(dataset->shared->fo_count > 0);
- if(dataset->shared->fo_count > 1) {
+ if (dataset->shared->fo_count > 1) {
/* Release dataspace info */
- if(H5S_close(dataset->shared->space) < 0)
+ if (H5S_close(dataset->shared->space) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to release dataspace")
/* Re-load dataspace info */
- if(NULL == (dataset->shared->space = H5S_read(&(dataset->oloc))))
+ if (NULL == (dataset->shared->space = H5S_read(&(dataset->oloc))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to load dataspace info from dataset header")
/* Cache the dataset's dataspace info */
- if(H5D__cache_dataspace_info(dataset) < 0)
+ if (H5D__cache_dataspace_info(dataset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't cache dataspace info")
/* Release layout info */
- if(H5O_msg_reset(H5O_LAYOUT_ID, &dataset->shared->layout) < 0)
+ if (H5O_msg_reset(H5O_LAYOUT_ID, &dataset->shared->layout) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTRESET, FAIL, "unable to reset layout info")
/* Re-load layout message info */
- if(NULL == H5O_msg_read(&(dataset->oloc), H5O_LAYOUT_ID, &(dataset->shared->layout)))
+ if (NULL == H5O_msg_read(&(dataset->oloc), H5O_LAYOUT_ID, &(dataset->shared->layout)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to read data layout message")
} /* end if */
@@ -2241,7 +2228,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D_mult_refresh_reopen() */
-
/*-------------------------------------------------------------------------
* Function: H5D_oloc
*
@@ -2260,7 +2246,6 @@ H5D_oloc(H5D_t *dataset)
FUNC_LEAVE_NOAPI(dataset ? &(dataset->oloc) : (H5O_loc_t *)NULL)
} /* end H5D_oloc() */
-
/*-------------------------------------------------------------------------
* Function: H5D_nameof
*
@@ -2279,7 +2264,6 @@ H5D_nameof(const H5D_t *dataset)
FUNC_LEAVE_NOAPI(dataset ? &(dataset->path) : NULL)
} /* end H5D_nameof() */
-
/*-------------------------------------------------------------------------
* Function: H5D__alloc_storage
*
@@ -2289,15 +2273,15 @@ H5D_nameof(const H5D_t *dataset)
*-------------------------------------------------------------------------
*/
herr_t
-H5D__alloc_storage(const H5D_io_info_t *io_info, H5D_time_alloc_t time_alloc,
- hbool_t full_overwrite, hsize_t old_dim[])
+H5D__alloc_storage(const H5D_io_info_t *io_info, H5D_time_alloc_t time_alloc, hbool_t full_overwrite,
+ hsize_t old_dim[])
{
- const H5D_t *dset = io_info->dset; /* The dataset object */
- H5F_t *f = dset->oloc.file; /* The dataset's file pointer */
- H5O_layout_t *layout; /* The dataset's layout information */
- hbool_t must_init_space = FALSE; /* Flag to indicate that space should be initialized */
- hbool_t addr_set = FALSE; /* Flag to indicate that the dataset's storage address was set */
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5D_t * dset = io_info->dset; /* The dataset object */
+ H5F_t * f = dset->oloc.file; /* The dataset's file pointer */
+ H5O_layout_t *layout; /* The dataset's layout information */
+ hbool_t must_init_space = FALSE; /* Flag to indicate that space should be initialized */
+ hbool_t addr_set = FALSE; /* Flag to indicate that the dataset's storage address was set */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -2309,17 +2293,17 @@ H5D__alloc_storage(const H5D_io_info_t *io_info, H5D_time_alloc_t time_alloc,
* We assume that external storage is already
* allocated by the caller, or at least will be before I/O is performed.
*/
- if(!(0 == H5S_GET_EXTENT_NPOINTS(dset->shared->space) || dset->shared->dcpl_cache.efl.nused > 0)) {
+ if (!(0 == H5S_GET_EXTENT_NPOINTS(dset->shared->space) || dset->shared->dcpl_cache.efl.nused > 0)) {
/* Get a pointer to the dataset's layout information */
layout = &(dset->shared->layout);
- switch(layout->type) {
+ switch (layout->type) {
case H5D_CONTIGUOUS:
- if(!(*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) {
+ if (!(*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) {
/* Check if we have a zero-sized dataset */
- if(layout->storage.u.contig.size > 0) {
+ if (layout->storage.u.contig.size > 0) {
/* Reserve space in the file for the entire array */
- if(H5D__contig_alloc(f, &layout->storage.u.contig/*out*/) < 0)
+ if (H5D__contig_alloc(f, &layout->storage.u.contig /*out*/) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize contiguous storage")
/* Indicate that we should initialize storage space */
@@ -2334,9 +2318,9 @@ H5D__alloc_storage(const H5D_io_info_t *io_info, H5D_time_alloc_t time_alloc,
break;
case H5D_CHUNKED:
- if(!(*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) {
+ if (!(*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) {
/* Create the root of the index that manages chunked storage */
- if(H5D__chunk_create(dset /*in,out*/) < 0)
+ if (H5D__chunk_create(dset /*in,out*/) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize chunked storage")
/* Indicate that we set the storage addr */
@@ -2350,21 +2334,24 @@ H5D__alloc_storage(const H5D_io_info_t *io_info, H5D_time_alloc_t time_alloc,
* the dataset, indicate that space should be allocated, so the
* index gets expanded. -QAK
*/
- if(dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_EARLY && time_alloc == H5D_ALLOC_EXTEND)
+ if (dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_EARLY &&
+ time_alloc == H5D_ALLOC_EXTEND)
must_init_space = TRUE;
break;
case H5D_COMPACT:
/* Check if space is already allocated */
- if(NULL == layout->storage.u.compact.buf) {
+ if (NULL == layout->storage.u.compact.buf) {
/* Reserve space in layout header message for the entire array.
* Starting from the 1.8.7 release, we allow dataspace to have
* zero dimension size. So the storage size can be zero.
* SLU 2011/4/4 */
- if(layout->storage.u.compact.size > 0) {
- if(NULL == (layout->storage.u.compact.buf = H5MM_malloc(layout->storage.u.compact.size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate memory for compact dataset")
- if(!full_overwrite)
+ if (layout->storage.u.compact.size > 0) {
+ if (NULL ==
+ (layout->storage.u.compact.buf = H5MM_malloc(layout->storage.u.compact.size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+ "unable to allocate memory for compact dataset")
+ if (!full_overwrite)
HDmemset(layout->storage.u.compact.buf, 0, layout->storage.u.compact.size);
layout->storage.u.compact.dirty = TRUE;
@@ -2373,7 +2360,7 @@ H5D__alloc_storage(const H5D_io_info_t *io_info, H5D_time_alloc_t time_alloc,
}
else {
layout->storage.u.compact.dirty = FALSE;
- must_init_space = FALSE;
+ must_init_space = FALSE;
}
} /* end if */
break;
@@ -2393,12 +2380,12 @@ H5D__alloc_storage(const H5D_io_info_t *io_info, H5D_time_alloc_t time_alloc,
HDassert("not implemented yet" && 0);
#ifdef NDEBUG
HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout")
-#endif /* NDEBUG */
+#endif /* NDEBUG */
} /* end switch */ /*lint !e788 All appropriate cases are covered */
/* Check if we need to initialize the space */
- if(must_init_space) {
- if(layout->type == H5D_CHUNKED) {
+ if (must_init_space) {
+ if (layout->type == H5D_CHUNKED) {
/* If we are doing incremental allocation and the index got
* created during a H5Dwrite call, don't initialize the storage
* now, wait for the actual writes to each block and let the
@@ -2408,25 +2395,29 @@ H5D__alloc_storage(const H5D_io_info_t *io_info, H5D_time_alloc_t time_alloc,
* fill values to the chunks they allocate space for. Yes,
* this is icky. -QAK
*/
- if(!(dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_INCR && time_alloc == H5D_ALLOC_WRITE))
- if(H5D__init_storage(io_info, full_overwrite, old_dim) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize dataset with fill value")
+ if (!(dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_INCR &&
+ time_alloc == H5D_ALLOC_WRITE))
+ if (H5D__init_storage(io_info, full_overwrite, old_dim) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to initialize dataset with fill value")
} /* end if */
else {
- H5D_fill_value_t fill_status; /* The fill value status */
+ H5D_fill_value_t fill_status; /* The fill value status */
/* Check the dataset's fill-value status */
- if(H5P_is_fill_value_defined(&dset->shared->dcpl_cache.fill, &fill_status) < 0)
+ if (H5P_is_fill_value_defined(&dset->shared->dcpl_cache.fill, &fill_status) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't tell if fill value defined")
/* If we are filling the dataset on allocation or "if set" and
* the fill value _is_ set, do that now */
- if(dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_ALLOC ||
- (dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED))
- if(H5D__init_storage(io_info, full_overwrite, old_dim) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize dataset with fill value")
+ if (dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_ALLOC ||
+ (dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_IFSET &&
+ fill_status == H5D_FILL_VALUE_USER_DEFINED))
+ if (H5D__init_storage(io_info, full_overwrite, old_dim) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to initialize dataset with fill value")
} /* end else */
- } /* end if */
+ } /* end if */
/* If we set the address (and aren't in the middle of creating the
* dataset), mark the layout header message for later writing to
@@ -2435,9 +2426,9 @@ H5D__alloc_storage(const H5D_io_info_t *io_info, H5D_time_alloc_t time_alloc,
/* (The layout message is already in the dataset's object header, this
* operation just sets the address and makes it constant)
*/
- if(time_alloc != H5D_ALLOC_CREATE && addr_set)
+ if (time_alloc != H5D_ALLOC_CREATE && addr_set)
/* Mark the layout as dirty, for later writing to the file */
- if(H5D__mark(dset, H5D_MARK_LAYOUT) < 0)
+ if (H5D__mark(dset, H5D_MARK_LAYOUT) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to mark dataspace as dirty")
} /* end if */
@@ -2445,7 +2436,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__alloc_storage() */
-
/*-------------------------------------------------------------------------
* Function: H5D__init_storage
*
@@ -2459,8 +2449,8 @@ done:
static herr_t
H5D__init_storage(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_t old_dim[])
{
- const H5D_t *dset = io_info->dset; /* dataset pointer */
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5D_t *dset = io_info->dset; /* dataset pointer */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -2469,18 +2459,20 @@ H5D__init_storage(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_t
switch (dset->shared->layout.type) {
case H5D_COMPACT:
/* If we will be immediately overwriting the values, don't bother to clear them */
- if(!full_overwrite) {
+ if (!full_overwrite) {
/* Fill the compact dataset storage */
- if(H5D__compact_fill(dset) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize compact dataset storage")
+ if (H5D__compact_fill(dset) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to initialize compact dataset storage")
} /* end if */
break;
case H5D_CONTIGUOUS:
/* Don't write default fill values to external files */
/* If we will be immediately overwriting the values, don't bother to clear them */
- if((dset->shared->dcpl_cache.efl.nused == 0 || dset->shared->dcpl_cache.fill.buf) && !full_overwrite)
- if(H5D__contig_fill(io_info) < 0)
+ if ((dset->shared->dcpl_cache.efl.nused == 0 || dset->shared->dcpl_cache.fill.buf) &&
+ !full_overwrite)
+ if (H5D__contig_fill(io_info) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset")
break;
@@ -2490,13 +2482,13 @@ H5D__init_storage(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_t
* for all chunks now and initialize each chunk with the fill value.
*/
{
- hsize_t zero_dim[H5O_LAYOUT_NDIMS] = {0};
+ hsize_t zero_dim[H5O_LAYOUT_NDIMS] = {0};
/* Use zeros for old dimensions if not specified */
- if(old_dim == NULL)
+ if (old_dim == NULL)
old_dim = zero_dim;
- if(H5D__chunk_allocate(io_info, full_overwrite, old_dim) < 0)
+ if (H5D__chunk_allocate(io_info, full_overwrite, old_dim) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset")
break;
} /* end block */
@@ -2510,14 +2502,13 @@ H5D__init_storage(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_t
HDassert("not implemented yet" && 0);
#ifdef NDEBUG
HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout")
-#endif /* NDEBUG */
+#endif /* NDEBUG */
} /* end switch */ /*lint !e788 All appropriate cases are covered */
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__init_storage() */
-
/*-------------------------------------------------------------------------
* Function: H5D__get_storage_size
*
@@ -2530,15 +2521,16 @@ done:
herr_t
H5D__get_storage_size(const H5D_t *dset, hsize_t *storage_size)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE_TAG(dset->oloc.addr)
- switch(dset->shared->layout.type) {
+ switch (dset->shared->layout.type) {
case H5D_CHUNKED:
- if((*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) {
- if(H5D__chunk_allocated(dset, storage_size) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't retrieve chunked dataset allocated size")
+ if ((*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) {
+ if (H5D__chunk_allocated(dset, storage_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
+ "can't retrieve chunked dataset allocated size")
} /* end if */
else
*storage_size = 0;
@@ -2546,7 +2538,7 @@ H5D__get_storage_size(const H5D_t *dset, hsize_t *storage_size)
case H5D_CONTIGUOUS:
/* Datasets which are not allocated yet are using no space on disk */
- if((*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage))
+ if ((*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage))
*storage_size = dset->shared->layout.storage.u.contig.size;
else
*storage_size = 0;
@@ -2572,7 +2564,6 @@ done:
FUNC_LEAVE_NOAPI_TAG(ret_value)
} /* end H5D__get_storage_size() */
-
/*-------------------------------------------------------------------------
* Function: H5D__get_offset
*
@@ -2588,13 +2579,13 @@ done:
haddr_t
H5D__get_offset(const H5D_t *dset)
{
- haddr_t ret_value = HADDR_UNDEF;
+ haddr_t ret_value = HADDR_UNDEF;
FUNC_ENTER_PACKAGE
HDassert(dset);
- switch(dset->shared->layout.type) {
+ switch (dset->shared->layout.type) {
case H5D_VIRTUAL:
case H5D_CHUNKED:
case H5D_COMPACT:
@@ -2604,7 +2595,8 @@ H5D__get_offset(const H5D_t *dset)
/* If dataspace hasn't been allocated or dataset is stored in
* an external file, the value will be HADDR_UNDEF.
*/
- if(dset->shared->dcpl_cache.efl.nused == 0 || H5F_addr_defined(dset->shared->layout.storage.u.contig.addr))
+ if (dset->shared->dcpl_cache.efl.nused == 0 ||
+ H5F_addr_defined(dset->shared->layout.storage.u.contig.addr))
/* Return the absolute dataset offset from the beginning of file. */
ret_value = dset->shared->layout.storage.u.contig.addr + H5F_BASE_ADDR(dset->oloc.file);
break;
@@ -2619,7 +2611,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__get_offset() */
-
/*-------------------------------------------------------------------------
* Function: H5D__vlen_get_buf_size_alloc
*
@@ -2636,13 +2627,14 @@ static void *
H5D__vlen_get_buf_size_alloc(size_t size, void *info)
{
H5D_vlen_bufsize_common_t *vlen_bufsize_com = (H5D_vlen_bufsize_common_t *)info;
- void *ret_value = NULL; /* Return value */
+ void * ret_value = NULL; /* Return value */
FUNC_ENTER_STATIC
/* Check for increasing the size of the temporary space for VL data */
- if(size > vlen_bufsize_com->vl_tbuf_size) {
- if(NULL == (vlen_bufsize_com->vl_tbuf = H5FL_BLK_REALLOC(vlen_vl_buf, vlen_bufsize_com->vl_tbuf, size)))
+ if (size > vlen_bufsize_com->vl_tbuf_size) {
+ if (NULL ==
+ (vlen_bufsize_com->vl_tbuf = H5FL_BLK_REALLOC(vlen_vl_buf, vlen_bufsize_com->vl_tbuf, size)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't reallocate temporary VL data buffer")
vlen_bufsize_com->vl_tbuf_size = size;
} /* end if */
@@ -2657,7 +2649,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__vlen_get_buf_size_alloc() */
-
/*-------------------------------------------------------------------------
* Function: H5D__vlen_get_buf_size_cb
*
@@ -2668,11 +2659,11 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__vlen_get_buf_size_cb(void H5_ATTR_UNUSED *elem, hid_t type_id,
- unsigned H5_ATTR_UNUSED ndim, const hsize_t *point, void *op_data)
+H5D__vlen_get_buf_size_cb(void H5_ATTR_UNUSED *elem, hid_t type_id, unsigned H5_ATTR_UNUSED ndim,
+ const hsize_t *point, void *op_data)
{
H5D_vlen_bufsize_native_t *vlen_bufsize = (H5D_vlen_bufsize_native_t *)op_data;
- herr_t ret_value = H5_ITER_CONT; /* Return value */
+ herr_t ret_value = H5_ITER_CONT; /* Return value */
FUNC_ENTER_STATIC
@@ -2682,18 +2673,18 @@ H5D__vlen_get_buf_size_cb(void H5_ATTR_UNUSED *elem, hid_t type_id,
HDassert(op_data);
/* Select point to read in */
- if(H5S_select_elements(vlen_bufsize->fspace, H5S_SELECT_SET, (size_t)1, point) < 0)
+ if (H5S_select_elements(vlen_bufsize->fspace, H5S_SELECT_SET, (size_t)1, point) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, H5_ITER_ERROR, "can't select point")
/* Read in the point (with the custom VL memory allocator) */
- if(H5D__read(vlen_bufsize->dset, type_id, vlen_bufsize->mspace, vlen_bufsize->fspace, vlen_bufsize->common.fl_tbuf) < 0)
+ if (H5D__read(vlen_bufsize->dset, type_id, vlen_bufsize->mspace, vlen_bufsize->fspace,
+ vlen_bufsize->common.fl_tbuf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, H5_ITER_ERROR, "can't read point")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__vlen_get_buf_size_cb() */
-
/*-------------------------------------------------------------------------
* Function: H5D__vlen_get_buf_size
*
@@ -2719,80 +2710,78 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D__vlen_get_buf_size(H5D_t *dset, hid_t type_id, hid_t space_id,
- hsize_t *size)
+H5D__vlen_get_buf_size(H5D_t *dset, hid_t type_id, hid_t space_id, hsize_t *size)
{
H5D_vlen_bufsize_native_t vlen_bufsize = {NULL, NULL, NULL, {NULL, NULL, 0, 0}};
- H5S_t *fspace = NULL; /* Dataset's dataspace */
- H5S_t *mspace = NULL; /* Memory dataspace */
- char bogus; /* bogus value to pass to H5Diterate() */
- H5S_t *space; /* Dataspace for iteration */
- H5T_t *type; /* Datatype */
- H5S_sel_iter_op_t dset_op; /* Operator for iteration */
- herr_t ret_value = FAIL; /* Return value */
+ H5S_t * fspace = NULL; /* Dataset's dataspace */
+ H5S_t * mspace = NULL; /* Memory dataspace */
+ char bogus; /* bogus value to pass to H5Diterate() */
+ H5S_t * space; /* Dataspace for iteration */
+ H5T_t * type; /* Datatype */
+ H5S_sel_iter_op_t dset_op; /* Operator for iteration */
+ herr_t ret_value = FAIL; /* Return value */
FUNC_ENTER_PACKAGE
/* Check args */
- if(NULL == (type = (H5T_t *)H5I_object(type_id)))
+ if (NULL == (type = (H5T_t *)H5I_object(type_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an valid base datatype")
- if(NULL == (space = (H5S_t *)H5I_object(space_id)))
+ if (NULL == (space = (H5S_t *)H5I_object(space_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataspace")
- if(!(H5S_has_extent(space)))
+ if (!(H5S_has_extent(space)))
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dataspace does not have extent set")
/* Save the dataset */
vlen_bufsize.dset = dset;
/* Get a copy of the dataset's dataspace */
- if(NULL == (fspace = H5S_copy(dset->shared->space, FALSE, TRUE)))
+ if (NULL == (fspace = H5S_copy(dset->shared->space, FALSE, TRUE)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "unable to get dataspace")
vlen_bufsize.fspace = fspace;
/* Create a scalar for the memory dataspace */
- if(NULL == (mspace = H5S_create(H5S_SCALAR)))
+ if (NULL == (mspace = H5S_create(H5S_SCALAR)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create dataspace")
vlen_bufsize.mspace = mspace;
/* Grab the temporary buffers required */
- if(NULL == (vlen_bufsize.common.fl_tbuf = H5FL_BLK_MALLOC(vlen_fl_buf, H5T_get_size(type))))
+ if (NULL == (vlen_bufsize.common.fl_tbuf = H5FL_BLK_MALLOC(vlen_fl_buf, H5T_get_size(type))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "no temporary buffers available")
- if(NULL == (vlen_bufsize.common.vl_tbuf = H5FL_BLK_MALLOC(vlen_vl_buf, (size_t)1)))
+ if (NULL == (vlen_bufsize.common.vl_tbuf = H5FL_BLK_MALLOC(vlen_vl_buf, (size_t)1)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "no temporary buffers available")
vlen_bufsize.common.vl_tbuf_size = 1;
/* Set the memory manager to the special allocation routine */
- if(H5CX_set_vlen_alloc_info(H5D__vlen_get_buf_size_alloc, &vlen_bufsize.common, NULL, NULL) < 0)
+ if (H5CX_set_vlen_alloc_info(H5D__vlen_get_buf_size_alloc, &vlen_bufsize.common, NULL, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set VL data allocation routine")
/* Set the initial number of bytes required */
vlen_bufsize.common.size = 0;
/* Call H5S_select_iterate with args, etc. */
- dset_op.op_type = H5S_SEL_ITER_OP_APP;
- dset_op.u.app_op.op = H5D__vlen_get_buf_size_cb;
- dset_op.u.app_op.type_id = type_id;
+ dset_op.op_type = H5S_SEL_ITER_OP_APP;
+ dset_op.u.app_op.op = H5D__vlen_get_buf_size_cb;
+ dset_op.u.app_op.type_id = type_id;
ret_value = H5S_select_iterate(&bogus, type, space, &dset_op, &vlen_bufsize);
/* Get the size if we succeeded */
- if(ret_value >= 0)
+ if (ret_value >= 0)
*size = vlen_bufsize.common.size;
done:
- if(fspace && H5S_close(fspace) < 0)
+ if (fspace && H5S_close(fspace) < 0)
HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release dataspace")
- if(mspace && H5S_close(mspace) < 0)
+ if (mspace && H5S_close(mspace) < 0)
HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release dataspace")
- if(vlen_bufsize.common.fl_tbuf != NULL)
+ if (vlen_bufsize.common.fl_tbuf != NULL)
vlen_bufsize.common.fl_tbuf = H5FL_BLK_FREE(vlen_fl_buf, vlen_bufsize.common.fl_tbuf);
- if(vlen_bufsize.common.vl_tbuf != NULL)
+ if (vlen_bufsize.common.vl_tbuf != NULL)
vlen_bufsize.common.vl_tbuf = H5FL_BLK_FREE(vlen_vl_buf, vlen_bufsize.common.vl_tbuf);
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__vlen_get_buf_size() */
-
/*-------------------------------------------------------------------------
* Function: H5D__vlen_get_buf_size_gen_cb
*
@@ -2815,12 +2804,12 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__vlen_get_buf_size_gen_cb(void H5_ATTR_UNUSED *elem, hid_t type_id,
- unsigned H5_ATTR_UNUSED ndim, const hsize_t *point, void *op_data)
+H5D__vlen_get_buf_size_gen_cb(void H5_ATTR_UNUSED *elem, hid_t type_id, unsigned H5_ATTR_UNUSED ndim,
+ const hsize_t *point, void *op_data)
{
H5D_vlen_bufsize_generic_t *vlen_bufsize = (H5D_vlen_bufsize_generic_t *)op_data;
- H5T_t *dt; /* Datatype for operation */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5T_t * dt; /* Datatype for operation */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -2829,26 +2818,28 @@ H5D__vlen_get_buf_size_gen_cb(void H5_ATTR_UNUSED *elem, hid_t type_id,
HDassert(op_data);
/* Check args */
- if(NULL == (dt = (H5T_t *)H5I_object(type_id)))
+ if (NULL == (dt = (H5T_t *)H5I_object(type_id)))
HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "not a datatype")
/* Make certain there is enough fixed-length buffer available */
- if(NULL == (vlen_bufsize->common.fl_tbuf = H5FL_BLK_REALLOC(vlen_fl_buf, vlen_bufsize->common.fl_tbuf, H5T_get_size(dt))))
+ if (NULL == (vlen_bufsize->common.fl_tbuf =
+ H5FL_BLK_REALLOC(vlen_fl_buf, vlen_bufsize->common.fl_tbuf, H5T_get_size(dt))))
HGOTO_ERROR(H5E_DATASET, H5E_NOSPACE, FAIL, "can't resize tbuf")
/* Select point to read in */
- if(H5S_select_elements(vlen_bufsize->fspace, H5S_SELECT_SET, (size_t)1, point) < 0)
+ if (H5S_select_elements(vlen_bufsize->fspace, H5S_SELECT_SET, (size_t)1, point) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't select point")
/* Read in the point (with the custom VL memory allocator) */
- if(H5VL_dataset_read(vlen_bufsize->dset_vol_obj, type_id, vlen_bufsize->mspace_id, vlen_bufsize->fspace_id, vlen_bufsize->dxpl_id, vlen_bufsize->common.fl_tbuf, H5_REQUEST_NULL) < 0)
+ if (H5VL_dataset_read(vlen_bufsize->dset_vol_obj, type_id, vlen_bufsize->mspace_id,
+ vlen_bufsize->fspace_id, vlen_bufsize->dxpl_id, vlen_bufsize->common.fl_tbuf,
+ H5_REQUEST_NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read point")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__vlen_get_buf_size_gen_cb() */
-
/*-------------------------------------------------------------------------
* Function: H5D__vlen_get_buf_size_gen
*
@@ -2863,105 +2854,105 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D__vlen_get_buf_size_gen(H5VL_object_t *vol_obj, hid_t type_id, hid_t space_id,
- hsize_t *size)
+H5D__vlen_get_buf_size_gen(H5VL_object_t *vol_obj, hid_t type_id, hid_t space_id, hsize_t *size)
{
- H5D_vlen_bufsize_generic_t vlen_bufsize = {NULL, H5I_INVALID_HID, NULL, H5I_INVALID_HID, H5I_INVALID_HID, {NULL, NULL, 0, 0}};
- H5P_genplist_t *dxpl = NULL; /* DXPL for operation */
- H5S_t *mspace = NULL; /* Memory dataspace */
- char bogus; /* Bogus value to pass to H5Diterate() */
- H5S_t *space; /* Dataspace for iteration */
- H5T_t *type; /* Datatype */
- H5S_sel_iter_op_t dset_op; /* Operator for iteration */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5D_vlen_bufsize_generic_t vlen_bufsize = {
+ NULL, H5I_INVALID_HID, NULL, H5I_INVALID_HID, H5I_INVALID_HID, {NULL, NULL, 0, 0}};
+ H5P_genplist_t * dxpl = NULL; /* DXPL for operation */
+ H5S_t * mspace = NULL; /* Memory dataspace */
+ char bogus; /* Bogus value to pass to H5Diterate() */
+ H5S_t * space; /* Dataspace for iteration */
+ H5T_t * type; /* Datatype */
+ H5S_sel_iter_op_t dset_op; /* Operator for iteration */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
/* Check args */
- if(NULL == (type = (H5T_t *)H5I_object(type_id)))
+ if (NULL == (type = (H5T_t *)H5I_object(type_id)))
HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "not an valid datatype")
- if(NULL == (space = (H5S_t *)H5I_object(space_id)))
+ if (NULL == (space = (H5S_t *)H5I_object(space_id)))
HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "invalid dataspace")
- if(!(H5S_has_extent(space)))
+ if (!(H5S_has_extent(space)))
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "dataspace does not have extent set")
/* Save the dataset */
vlen_bufsize.dset_vol_obj = vol_obj;
/* Get a copy of the dataset's dataspace */
- if(H5VL_dataset_get(vol_obj, H5VL_DATASET_GET_SPACE, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL, &vlen_bufsize.fspace_id) < 0)
+ if (H5VL_dataset_get(vol_obj, H5VL_DATASET_GET_SPACE, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL,
+ &vlen_bufsize.fspace_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace")
- if(NULL == (vlen_bufsize.fspace = (H5S_t *)H5I_object(vlen_bufsize.fspace_id)))
+ if (NULL == (vlen_bufsize.fspace = (H5S_t *)H5I_object(vlen_bufsize.fspace_id)))
HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "not a dataspace")
/* Create a scalar for the memory dataspace */
- if(NULL == (mspace = H5S_create(H5S_SCALAR)))
+ if (NULL == (mspace = H5S_create(H5S_SCALAR)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create dataspace")
- if((vlen_bufsize.mspace_id = H5I_register(H5I_DATASPACE, mspace, TRUE)) < 0)
+ if ((vlen_bufsize.mspace_id = H5I_register(H5I_DATASPACE, mspace, TRUE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "unable to register dataspace ID")
/* Grab the temporary buffers required */
- if(NULL == (vlen_bufsize.common.fl_tbuf = H5FL_BLK_MALLOC(vlen_fl_buf, H5T_get_size(type))))
+ if (NULL == (vlen_bufsize.common.fl_tbuf = H5FL_BLK_MALLOC(vlen_fl_buf, H5T_get_size(type))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "no temporary buffers available")
- if(NULL == (vlen_bufsize.common.vl_tbuf = H5FL_BLK_MALLOC(vlen_vl_buf, (size_t)1)))
+ if (NULL == (vlen_bufsize.common.vl_tbuf = H5FL_BLK_MALLOC(vlen_vl_buf, (size_t)1)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "no temporary buffers available")
vlen_bufsize.common.vl_tbuf_size = 1;
/* Set the VL allocation callbacks on a DXPL */
- if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(H5P_DATASET_XFER_DEFAULT)))
+ if (NULL == (dxpl = (H5P_genplist_t *)H5I_object(H5P_DATASET_XFER_DEFAULT)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get default DXPL")
- if((vlen_bufsize.dxpl_id = H5P_copy_plist(dxpl, TRUE)) < 0)
+ if ((vlen_bufsize.dxpl_id = H5P_copy_plist(dxpl, TRUE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't copy property list");
- if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(vlen_bufsize.dxpl_id)))
+ if (NULL == (dxpl = (H5P_genplist_t *)H5I_object(vlen_bufsize.dxpl_id)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get copied DXPL")
- if(H5P_set_vlen_mem_manager(dxpl, H5D__vlen_get_buf_size_alloc, &vlen_bufsize.common, NULL, NULL) < 0)
+ if (H5P_set_vlen_mem_manager(dxpl, H5D__vlen_get_buf_size_alloc, &vlen_bufsize.common, NULL, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set VL data allocation routine on DXPL")
/* Set the initial number of bytes required */
vlen_bufsize.common.size = 0;
/* Call H5S_select_iterate with args, etc. */
- dset_op.op_type = H5S_SEL_ITER_OP_APP;
- dset_op.u.app_op.op = H5D__vlen_get_buf_size_gen_cb;
- dset_op.u.app_op.type_id = type_id;
+ dset_op.op_type = H5S_SEL_ITER_OP_APP;
+ dset_op.u.app_op.op = H5D__vlen_get_buf_size_gen_cb;
+ dset_op.u.app_op.type_id = type_id;
ret_value = H5S_select_iterate(&bogus, type, space, &dset_op, &vlen_bufsize);
/* Get the size if we succeeded */
- if(ret_value >= 0)
+ if (ret_value >= 0)
*size = vlen_bufsize.common.size;
done:
- if(vlen_bufsize.fspace_id >= 0) {
- if(H5I_dec_app_ref(vlen_bufsize.fspace_id) < 0)
+ if (vlen_bufsize.fspace_id >= 0) {
+ if (H5I_dec_app_ref(vlen_bufsize.fspace_id) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "problem freeing id")
vlen_bufsize.fspace = NULL;
} /* end if */
- if(vlen_bufsize.fspace && H5S_close(vlen_bufsize.fspace) < 0)
+ if (vlen_bufsize.fspace && H5S_close(vlen_bufsize.fspace) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to release dataspace")
- if(vlen_bufsize.mspace_id >= 0) {
- if(H5I_dec_app_ref(vlen_bufsize.mspace_id) < 0)
+ if (vlen_bufsize.mspace_id >= 0) {
+ if (H5I_dec_app_ref(vlen_bufsize.mspace_id) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "problem freeing id")
mspace = NULL;
} /* end if */
- if(mspace && H5S_close(mspace) < 0)
+ if (mspace && H5S_close(mspace) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to release dataspace")
- if(vlen_bufsize.common.fl_tbuf != NULL)
+ if (vlen_bufsize.common.fl_tbuf != NULL)
vlen_bufsize.common.fl_tbuf = H5FL_BLK_FREE(vlen_fl_buf, vlen_bufsize.common.fl_tbuf);
- if(vlen_bufsize.common.vl_tbuf != NULL)
+ if (vlen_bufsize.common.vl_tbuf != NULL)
vlen_bufsize.common.vl_tbuf = H5FL_BLK_FREE(vlen_vl_buf, vlen_bufsize.common.vl_tbuf);
- if(vlen_bufsize.dxpl_id != H5I_INVALID_HID) {
- if(H5I_dec_app_ref(vlen_bufsize.dxpl_id) < 0)
+ if (vlen_bufsize.dxpl_id != H5I_INVALID_HID) {
+ if (H5I_dec_app_ref(vlen_bufsize.dxpl_id) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "can't close property list")
dxpl = NULL;
} /* end if */
- if(dxpl && H5P_close(dxpl) < 0)
+ if (dxpl && H5P_close(dxpl) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to release DXPL")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__vlen_get_buf_size_gen() */
-
/*-------------------------------------------------------------------------
* Function: H5D__check_filters
*
@@ -2973,8 +2964,8 @@ done:
static herr_t
H5D__check_filters(H5D_t *dataset)
{
- H5O_fill_t *fill; /* Dataset's fill value */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5O_fill_t *fill; /* Dataset's fill value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -2987,31 +2978,30 @@ H5D__check_filters(H5D_t *dataset)
* that requires writing on an extend.
*/
fill = &dataset->shared->dcpl_cache.fill;
- if(!dataset->shared->checked_filters) {
- H5D_fill_value_t fill_status; /* Whether the fill value is defined */
+ if (!dataset->shared->checked_filters) {
+ H5D_fill_value_t fill_status; /* Whether the fill value is defined */
/* Retrieve the "defined" status of the fill value */
- if(H5P_is_fill_value_defined(fill, &fill_status) < 0)
+ if (H5P_is_fill_value_defined(fill, &fill_status) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Couldn't retrieve fill value from dataset.")
/* See if we can check the filter status */
- if(fill_status == H5D_FILL_VALUE_DEFAULT || fill_status == H5D_FILL_VALUE_USER_DEFINED) {
- if(fill->fill_time == H5D_FILL_TIME_ALLOC ||
- (fill->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED)) {
+ if (fill_status == H5D_FILL_VALUE_DEFAULT || fill_status == H5D_FILL_VALUE_USER_DEFINED) {
+ if (fill->fill_time == H5D_FILL_TIME_ALLOC ||
+ (fill->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED)) {
/* Filters must have encoding enabled. Ensure that all filters can be applied */
- if(H5Z_can_apply(dataset->shared->dcpl_id, dataset->shared->type_id) < 0)
+ if (H5Z_can_apply(dataset->shared->dcpl_id, dataset->shared->type_id) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANAPPLY, FAIL, "can't apply filters")
dataset->shared->checked_filters = TRUE;
} /* end if */
- } /* end if */
- } /* end if */
+ } /* end if */
+ } /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__check_filters() */
-
/*-------------------------------------------------------------------------
* Function: H5D__set_extent
*
@@ -3024,11 +3014,11 @@ done:
herr_t
H5D__set_extent(H5D_t *dset, const hsize_t *size)
{
- hsize_t curr_dims[H5S_MAX_RANK]; /* Current dimension sizes */
- htri_t changed; /* Whether the dataspace changed size */
- size_t u, v; /* Local index variable */
- unsigned dim_idx; /* Dimension index */
- herr_t ret_value = SUCCEED; /* Return value */
+ hsize_t curr_dims[H5S_MAX_RANK]; /* Current dimension sizes */
+ htri_t changed; /* Whether the dataspace changed size */
+ size_t u, v; /* Local index variable */
+ unsigned dim_idx; /* Dimension index */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE_TAG(dset->oloc.addr)
@@ -3037,17 +3027,18 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size)
HDassert(size);
/* Check if we are allowed to modify this file */
- if(0 == (H5F_INTENT(dset->oloc.file) & H5F_ACC_RDWR))
+ if (0 == (H5F_INTENT(dset->oloc.file) & H5F_ACC_RDWR))
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "no write intent on file")
- /* Check if we are allowed to modify the space; only datasets with chunked and external storage are allowed to be modified */
- if(H5D_COMPACT == dset->shared->layout.type)
+ /* Check if we are allowed to modify the space; only datasets with chunked and external storage are
+ * allowed to be modified */
+ if (H5D_COMPACT == dset->shared->layout.type)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "dataset has compact storage")
- if(H5D_CONTIGUOUS == dset->shared->layout.type && 0 == dset->shared->dcpl_cache.efl.nused)
+ if (H5D_CONTIGUOUS == dset->shared->layout.type && 0 == dset->shared->dcpl_cache.efl.nused)
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "dataset has contiguous storage")
/* Check if the filters in the DCPL will need to encode, and if so, can they? */
- if(H5D__check_filters(dset) < 0)
+ if (H5D__check_filters(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't apply filters")
/* Keep the current dataspace dimensions for later */
@@ -3055,61 +3046,63 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size)
H5MM_memcpy(curr_dims, dset->shared->curr_dims, H5S_MAX_RANK * sizeof(curr_dims[0]));
/* Modify the size of the dataspace */
- if((changed = H5S_set_extent(dset->shared->space, size)) < 0)
+ if ((changed = H5S_set_extent(dset->shared->space, size)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to modify size of dataspace")
/* Don't bother updating things, unless they've changed */
- if(changed) {
- hbool_t shrink = FALSE; /* Flag to indicate a dimension has shrank */
- hbool_t expand = FALSE; /* Flag to indicate a dimension has grown */
- hbool_t update_chunks = FALSE; /* Flag to indicate chunk cache update is needed */
+ if (changed) {
+ hbool_t shrink = FALSE; /* Flag to indicate a dimension has shrank */
+ hbool_t expand = FALSE; /* Flag to indicate a dimension has grown */
+ hbool_t update_chunks = FALSE; /* Flag to indicate chunk cache update is needed */
/* Determine if we are shrinking and/or expanding any dimensions */
- for(dim_idx = 0; dim_idx < dset->shared->ndims; dim_idx++) {
+ for (dim_idx = 0; dim_idx < dset->shared->ndims; dim_idx++) {
/* Check for various status changes */
- if(size[dim_idx] < curr_dims[dim_idx])
+ if (size[dim_idx] < curr_dims[dim_idx])
shrink = TRUE;
- if(size[dim_idx] > curr_dims[dim_idx])
+ if (size[dim_idx] > curr_dims[dim_idx])
expand = TRUE;
/* Chunked storage specific checks */
- if(H5D_CHUNKED == dset->shared->layout.type && dset->shared->ndims > 1) {
- hsize_t scaled; /* Scaled value */
+ if (H5D_CHUNKED == dset->shared->layout.type && dset->shared->ndims > 1) {
+ hsize_t scaled; /* Scaled value */
/* Compute the scaled dimension size value */
- if(dset->shared->layout.u.chunk.dim[dim_idx] == 0)
+ if (dset->shared->layout.u.chunk.dim[dim_idx] == 0)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", dim_idx)
scaled = size[dim_idx] / dset->shared->layout.u.chunk.dim[dim_idx];
/* Check if scaled dimension size changed */
- if(scaled != dset->shared->cache.chunk.scaled_dims[dim_idx]) {
- hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */
+ if (scaled != dset->shared->cache.chunk.scaled_dims[dim_idx]) {
+ hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */
/* Update the scaled dimension size value for the current dimension */
dset->shared->cache.chunk.scaled_dims[dim_idx] = scaled;
/* Check if algorithm for computing hash values will change */
- if((scaled > dset->shared->cache.chunk.nslots &&
- dset->shared->cache.chunk.scaled_dims[dim_idx] <= dset->shared->cache.chunk.nslots)
- || (scaled <= dset->shared->cache.chunk.nslots &&
- dset->shared->cache.chunk.scaled_dims[dim_idx] > dset->shared->cache.chunk.nslots))
+ if ((scaled > dset->shared->cache.chunk.nslots &&
+ dset->shared->cache.chunk.scaled_dims[dim_idx] <=
+ dset->shared->cache.chunk.nslots) ||
+ (scaled <= dset->shared->cache.chunk.nslots &&
+ dset->shared->cache.chunk.scaled_dims[dim_idx] > dset->shared->cache.chunk.nslots))
update_chunks = TRUE;
- if(!(scaled_power2up = H5VM_power2up(scaled)))
+ if (!(scaled_power2up = H5VM_power2up(scaled)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get the next power of 2")
/* Check if the number of bits required to encode the scaled size value changed */
- if(dset->shared->cache.chunk.scaled_power2up[dim_idx] != scaled_power2up) {
+ if (dset->shared->cache.chunk.scaled_power2up[dim_idx] != scaled_power2up) {
/* Update the 'power2up' & 'encode_bits' values for the current dimension */
dset->shared->cache.chunk.scaled_power2up[dim_idx] = scaled_power2up;
- dset->shared->cache.chunk.scaled_encode_bits[dim_idx] = H5VM_log2_gen(scaled_power2up);
+ dset->shared->cache.chunk.scaled_encode_bits[dim_idx] =
+ H5VM_log2_gen(scaled_power2up);
/* Indicate that the cached chunk indices need to be updated */
update_chunks = TRUE;
} /* end if */
- } /* end if */
- } /* end if */
+ } /* end if */
+ } /* end if */
/* Update the cached copy of the dataset's dimensions */
dset->shared->curr_dims[dim_idx] = size[dim_idx];
@@ -3120,34 +3113,38 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size)
*-------------------------------------------------------------------------
*/
/* Update the index values for the cached chunks for this dataset */
- if(H5D_CHUNKED == dset->shared->layout.type) {
+ if (H5D_CHUNKED == dset->shared->layout.type) {
/* Set the cached chunk info */
- if(H5D__chunk_set_info(dset) < 0)
+ if (H5D__chunk_set_info(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to update # of chunks")
/* Check if updating the chunk cache indices is necessary */
- if(update_chunks)
+ if (update_chunks)
/* Update the chunk cache indices */
- if(H5D__chunk_update_cache(dset) < 0)
+ if (H5D__chunk_update_cache(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices")
} /* end if */
/* Operations for virtual datasets */
- if(H5D_VIRTUAL == dset->shared->layout.type) {
+ if (H5D_VIRTUAL == dset->shared->layout.type) {
/* Check that the dimensions of the VDS are large enough */
- if(H5D_virtual_check_min_dims(dset) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "virtual dataset dimensions not large enough to contain all limited dimensions in all selections")
+ if (H5D_virtual_check_min_dims(dset) < 0)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "virtual dataset dimensions not large enough to contain all limited dimensions "
+ "in all selections")
/* Patch the virtual selection dataspaces */
- for(u = 0; u < dset->shared->layout.storage.u.virt.list_nused; u++) {
+ for (u = 0; u < dset->shared->layout.storage.u.virt.list_nused; u++) {
/* Patch extent */
- if(H5S_set_extent(dset->shared->layout.storage.u.virt.list[u].source_dset.virtual_select, size) < 0)
+ if (H5S_set_extent(dset->shared->layout.storage.u.virt.list[u].source_dset.virtual_select,
+ size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to modify size of dataspace")
dset->shared->layout.storage.u.virt.list[u].virtual_space_status = H5O_VIRTUAL_STATUS_CORRECT;
/* Patch sub-source datasets */
- for(v = 0; v < dset->shared->layout.storage.u.virt.list[u].sub_dset_nused; v++)
- if(H5S_set_extent(dset->shared->layout.storage.u.virt.list[u].sub_dset[v].virtual_select, size) < 0)
+ for (v = 0; v < dset->shared->layout.storage.u.virt.list[u].sub_dset_nused; v++)
+ if (H5S_set_extent(dset->shared->layout.storage.u.virt.list[u].sub_dset[v].virtual_select,
+ size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to modify size of dataspace")
} /* end for */
@@ -3157,12 +3154,12 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size)
} /* end if */
/* Allocate space for the new parts of the dataset, if appropriate */
- if(expand && dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_EARLY) {
+ if (expand && dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_EARLY) {
H5D_io_info_t io_info;
io_info.dset = dset;
- if(H5D__alloc_storage(&io_info, H5D_ALLOC_EXTEND, FALSE, curr_dims) < 0)
+ if (H5D__alloc_storage(&io_info, H5D_ALLOC_EXTEND, FALSE, curr_dims) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to extend dataset storage")
}
/*-------------------------------------------------------------------------
@@ -3171,23 +3168,25 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size)
* and if the chunks are written
*-------------------------------------------------------------------------
*/
- if(H5D_CHUNKED == dset->shared->layout.type) {
- if(shrink && ((*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)
- || (dset->shared->layout.ops->is_data_cached && (*dset->shared->layout.ops->is_data_cached)(dset->shared))))
+ if (H5D_CHUNKED == dset->shared->layout.type) {
+ if (shrink && ((*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage) ||
+ (dset->shared->layout.ops->is_data_cached &&
+ (*dset->shared->layout.ops->is_data_cached)(dset->shared))))
/* Remove excess chunks */
- if(H5D__chunk_prune_by_extent(dset, curr_dims) < 0)
+ if (H5D__chunk_prune_by_extent(dset, curr_dims) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks")
/* Update chunks that are no longer edge chunks as a result of
* expansion */
- if(expand && (dset->shared->layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
- && (dset->shared->dcpl_cache.pline.nused > 0))
- if(H5D__chunk_update_old_edge_chunks(dset, curr_dims) < 0)
+ if (expand &&
+ (dset->shared->layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) &&
+ (dset->shared->dcpl_cache.pline.nused > 0))
+ if (H5D__chunk_update_old_edge_chunks(dset, curr_dims) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to do update old edge chunks")
} /* end if */
/* Mark the dataspace as dirty, for later writing to the file */
- if(H5D__mark(dset, H5D_MARK_SPACE) < 0)
+ if (H5D__mark(dset, H5D_MARK_SPACE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to mark dataspace as dirty")
} /* end if */
@@ -3195,7 +3194,6 @@ done:
FUNC_LEAVE_NOAPI_TAG(ret_value)
} /* end H5D__set_extent() */
-
/*-------------------------------------------------------------------------
* Function: H5D__flush_sieve_buf
*
@@ -3208,7 +3206,7 @@ done:
herr_t
H5D__flush_sieve_buf(H5D_t *dataset)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -3216,11 +3214,13 @@ H5D__flush_sieve_buf(H5D_t *dataset)
HDassert(dataset);
/* Flush the raw data buffer, if we have a dirty one */
- if(dataset->shared->cache.contig.sieve_buf && dataset->shared->cache.contig.sieve_dirty) {
- HDassert(dataset->shared->layout.type != H5D_COMPACT); /* We should never have a sieve buffer for compact storage */
+ if (dataset->shared->cache.contig.sieve_buf && dataset->shared->cache.contig.sieve_dirty) {
+ HDassert(dataset->shared->layout.type !=
+ H5D_COMPACT); /* We should never have a sieve buffer for compact storage */
/* Write dirty data sieve buffer to file */
- if(H5F_shared_block_write(H5F_SHARED(dataset->oloc.file), H5FD_MEM_DRAW, dataset->shared->cache.contig.sieve_loc,
+ if (H5F_shared_block_write(
+ H5F_SHARED(dataset->oloc.file), H5FD_MEM_DRAW, dataset->shared->cache.contig.sieve_loc,
dataset->shared->cache.contig.sieve_size, dataset->shared->cache.contig.sieve_buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed")
@@ -3232,7 +3232,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__flush_sieve_buf() */
-
/*-------------------------------------------------------------------------
* Function: H5D__flush_real
*
@@ -3245,7 +3244,7 @@ done:
herr_t
H5D__flush_real(H5D_t *dataset)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE_TAG(dataset->oloc.addr)
@@ -3254,16 +3253,15 @@ H5D__flush_real(H5D_t *dataset)
HDassert(dataset->shared);
/* Avoid flushing the dataset (again) if it's closing */
- if(!dataset->shared->closing)
+ if (!dataset->shared->closing)
/* Flush cached raw data for each kind of dataset layout */
- if(dataset->shared->layout.ops->flush && (dataset->shared->layout.ops->flush)(dataset) < 0)
+ if (dataset->shared->layout.ops->flush && (dataset->shared->layout.ops->flush)(dataset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to flush raw data")
done:
FUNC_LEAVE_NOAPI_TAG(ret_value)
} /* end H5D__flush_real() */
-
/*-------------------------------------------------------------------------
* Function: H5D__flush
*
@@ -3276,7 +3274,7 @@ done:
herr_t
H5D__flush(H5D_t *dset, hid_t dset_id)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -3285,18 +3283,17 @@ H5D__flush(H5D_t *dset, hid_t dset_id)
HDassert(dset->shared);
/* Flush any dataset information still cached in memory */
- if(H5D__flush_real(dset) < 0)
+ if (H5D__flush_real(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to flush cached dataset info")
/* Flush object's metadata to file */
- if(H5O_flush_common(&dset->oloc, dset_id) < 0)
+ if (H5O_flush_common(&dset->oloc, dset_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to flush dataset and object flush callback")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__flush() */
-
/*-------------------------------------------------------------------------
* Function: H5D__format_convert
*
@@ -3310,81 +3307,83 @@ done:
herr_t
H5D__format_convert(H5D_t *dataset)
{
- H5D_chk_idx_info_t new_idx_info; /* Index info for the new layout */
- H5D_chk_idx_info_t idx_info; /* Index info for the current layout */
- H5O_layout_t *newlayout = NULL; /* The new layout */
- hbool_t init_new_index = FALSE; /* Indicate that the new chunk index is initialized */
- hbool_t delete_old_layout = FALSE; /* Indicate that the old layout message is deleted */
- hbool_t add_new_layout = FALSE; /* Indicate that the new layout message is added */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5D_chk_idx_info_t new_idx_info; /* Index info for the new layout */
+ H5D_chk_idx_info_t idx_info; /* Index info for the current layout */
+ H5O_layout_t * newlayout = NULL; /* The new layout */
+ hbool_t init_new_index = FALSE; /* Indicate that the new chunk index is initialized */
+ hbool_t delete_old_layout = FALSE; /* Indicate that the old layout message is deleted */
+ hbool_t add_new_layout = FALSE; /* Indicate that the new layout message is added */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE_TAG(dataset->oloc.addr)
/* Check args */
HDassert(dataset);
- switch(dataset->shared->layout.type) {
+ switch (dataset->shared->layout.type) {
case H5D_CHUNKED:
HDassert(dataset->shared->layout.u.chunk.idx_type != H5D_CHUNK_IDX_BTREE);
- if(NULL == (newlayout = (H5O_layout_t *)H5MM_calloc(sizeof(H5O_layout_t))))
+ if (NULL == (newlayout = (H5O_layout_t *)H5MM_calloc(sizeof(H5O_layout_t))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate buffer")
/* Set up the current index info */
- idx_info.f = dataset->oloc.file;
- idx_info.pline = &dataset->shared->dcpl_cache.pline;
- idx_info.layout = &dataset->shared->layout.u.chunk;
+ idx_info.f = dataset->oloc.file;
+ idx_info.pline = &dataset->shared->dcpl_cache.pline;
+ idx_info.layout = &dataset->shared->layout.u.chunk;
idx_info.storage = &dataset->shared->layout.storage.u.chunk;
/* Copy the current layout info to the new layout */
H5MM_memcpy(newlayout, &dataset->shared->layout, sizeof(H5O_layout_t));
/* Set up info for version 1 B-tree in the new layout */
- newlayout->version = H5O_LAYOUT_VERSION_3;
- newlayout->storage.u.chunk.idx_type = H5D_CHUNK_IDX_BTREE;
- newlayout->storage.u.chunk.idx_addr = HADDR_UNDEF;
- newlayout->storage.u.chunk.ops = H5D_COPS_BTREE;
+ newlayout->version = H5O_LAYOUT_VERSION_3;
+ newlayout->storage.u.chunk.idx_type = H5D_CHUNK_IDX_BTREE;
+ newlayout->storage.u.chunk.idx_addr = HADDR_UNDEF;
+ newlayout->storage.u.chunk.ops = H5D_COPS_BTREE;
newlayout->storage.u.chunk.u.btree.shared = NULL;
/* Set up the index info to version 1 B-tree */
- new_idx_info.f = dataset->oloc.file;
- new_idx_info.pline = &dataset->shared->dcpl_cache.pline;
- new_idx_info.layout = &(newlayout->u).chunk;
+ new_idx_info.f = dataset->oloc.file;
+ new_idx_info.pline = &dataset->shared->dcpl_cache.pline;
+ new_idx_info.layout = &(newlayout->u).chunk;
new_idx_info.storage = &(newlayout->storage).u.chunk;
/* Initialize version 1 B-tree */
- if(new_idx_info.storage->ops->init && (new_idx_info.storage->ops->init)(&new_idx_info, dataset->shared->space, dataset->oloc.addr) < 0)
+ if (new_idx_info.storage->ops->init &&
+ (new_idx_info.storage->ops->init)(&new_idx_info, dataset->shared->space, dataset->oloc.addr) <
+ 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize indexing information")
init_new_index = TRUE;
/* If the current chunk index exists */
- if(H5F_addr_defined(idx_info.storage->idx_addr)) {
+ if (H5F_addr_defined(idx_info.storage->idx_addr)) {
/* Create v1 B-tree chunk index */
- if((new_idx_info.storage->ops->create)(&new_idx_info) < 0)
+ if ((new_idx_info.storage->ops->create)(&new_idx_info) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create chunk index")
/* Iterate over the chunks in the current index and insert the chunk addresses
* into the version 1 B-tree chunk index
*/
- if(H5D__chunk_format_convert(dataset, &idx_info, &new_idx_info) < 0)
+ if (H5D__chunk_format_convert(dataset, &idx_info, &new_idx_info) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate/convert chunk index")
} /* end if */
/* Delete the old "current" layout message */
- if(H5O_msg_remove(&dataset->oloc, H5O_LAYOUT_ID, H5O_ALL, FALSE) < 0)
+ if (H5O_msg_remove(&dataset->oloc, H5O_LAYOUT_ID, H5O_ALL, FALSE) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTDELETE, FAIL, "unable to delete layout message")
delete_old_layout = TRUE;
/* Append the new layout message to the object header */
- if(H5O_msg_create(&dataset->oloc, H5O_LAYOUT_ID, 0, H5O_UPDATE_TIME, newlayout) < 0)
+ if (H5O_msg_create(&dataset->oloc, H5O_LAYOUT_ID, 0, H5O_UPDATE_TIME, newlayout) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update layout header message")
add_new_layout = TRUE;
/* Release the old (current) chunk index */
- if(idx_info.storage->ops->dest && (idx_info.storage->ops->dest)(&idx_info) < 0)
+ if (idx_info.storage->ops->dest && (idx_info.storage->ops->dest)(&idx_info) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to release chunk index info")
/* Copy the new layout to the dataset's layout */
@@ -3396,7 +3395,8 @@ H5D__format_convert(H5D_t *dataset)
case H5D_COMPACT:
HDassert(dataset->shared->layout.version > H5O_LAYOUT_VERSION_DEFAULT);
dataset->shared->layout.version = H5O_LAYOUT_VERSION_DEFAULT;
- if(H5O_msg_write(&(dataset->oloc), H5O_LAYOUT_ID, 0, H5O_UPDATE_TIME, &(dataset->shared->layout)) < 0)
+ if (H5O_msg_write(&(dataset->oloc), H5O_LAYOUT_ID, 0, H5O_UPDATE_TIME,
+ &(dataset->shared->layout)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to update layout message")
break;
@@ -3412,42 +3412,43 @@ H5D__format_convert(H5D_t *dataset)
} /* end switch */
done:
- if(ret_value < 0 && dataset->shared->layout.type == H5D_CHUNKED) {
+ if (ret_value < 0 && dataset->shared->layout.type == H5D_CHUNKED) {
/* Remove new layout message */
- if(add_new_layout)
- if(H5O_msg_remove(&dataset->oloc, H5O_LAYOUT_ID, H5O_ALL, FALSE) < 0)
+ if (add_new_layout)
+ if (H5O_msg_remove(&dataset->oloc, H5O_LAYOUT_ID, H5O_ALL, FALSE) < 0)
HDONE_ERROR(H5E_SYM, H5E_CANTDELETE, FAIL, "unable to delete layout message")
/* Add back old layout message */
- if(delete_old_layout)
- if(H5O_msg_create(&dataset->oloc, H5O_LAYOUT_ID, 0, H5O_UPDATE_TIME, &dataset->shared->layout) < 0)
+ if (delete_old_layout)
+ if (H5O_msg_create(&dataset->oloc, H5O_LAYOUT_ID, 0, H5O_UPDATE_TIME, &dataset->shared->layout) <
+ 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to add layout header message")
/* Clean up v1 b-tree chunk index */
- if(init_new_index) {
- if(H5F_addr_defined(new_idx_info.storage->idx_addr)) {
+ if (init_new_index) {
+ if (H5F_addr_defined(new_idx_info.storage->idx_addr)) {
/* Check for valid address i.e. tag */
- if(!H5F_addr_defined(dataset->oloc.addr))
+ if (!H5F_addr_defined(dataset->oloc.addr))
HDONE_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "address undefined")
/* Expunge from cache all v1 B-tree type entries associated with tag */
- if(H5AC_expunge_tag_type_metadata(dataset->oloc.file, dataset->oloc.addr, H5AC_BT_ID, H5AC__NO_FLAGS_SET))
+ if (H5AC_expunge_tag_type_metadata(dataset->oloc.file, dataset->oloc.addr, H5AC_BT_ID,
+ H5AC__NO_FLAGS_SET))
HDONE_ERROR(H5E_DATASET, H5E_CANTEXPUNGE, FAIL, "unable to expunge index metadata")
} /* end if */
/* Delete v1 B-tree chunk index */
- if(new_idx_info.storage->ops->dest && (new_idx_info.storage->ops->dest)(&new_idx_info) < 0)
+ if (new_idx_info.storage->ops->dest && (new_idx_info.storage->ops->dest)(&new_idx_info) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to release chunk index info")
} /* end if */
- } /* end if */
+ } /* end if */
- if(newlayout != NULL)
+ if (newlayout != NULL)
newlayout = (H5O_layout_t *)H5MM_xfree(newlayout);
FUNC_LEAVE_NOAPI_TAG(ret_value)
} /* end H5D__format_convert() */
-
/*-------------------------------------------------------------------------
* Function: H5D__mark
*
@@ -3460,8 +3461,8 @@ done:
herr_t
H5D__mark(const H5D_t *dataset, unsigned flags)
{
- H5O_t *oh = NULL; /* Pointer to dataset's object header */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5O_t *oh = NULL; /* Pointer to dataset's object header */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -3470,16 +3471,16 @@ H5D__mark(const H5D_t *dataset, unsigned flags)
HDassert(!(flags & (unsigned)~(H5D_MARK_SPACE | H5D_MARK_LAYOUT)));
/* Mark aspects of the dataset as dirty */
- if(flags) {
- unsigned update_flags = H5O_UPDATE_TIME; /* Modification time flag */
+ if (flags) {
+ unsigned update_flags = H5O_UPDATE_TIME; /* Modification time flag */
/* Pin the object header */
- if(NULL == (oh = H5O_pin(&dataset->oloc)))
+ if (NULL == (oh = H5O_pin(&dataset->oloc)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header")
/* Update the layout on disk, if it's been changed */
- if(flags & H5D_MARK_LAYOUT) {
- if(H5D__layout_oh_write(dataset, oh, update_flags) < 0)
+ if (flags & H5D_MARK_LAYOUT) {
+ if (H5D__layout_oh_write(dataset, oh, update_flags) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update layout info")
/* Reset the "update the modification time" flag, so we only do it once */
@@ -3487,8 +3488,8 @@ H5D__mark(const H5D_t *dataset, unsigned flags)
} /* end if */
/* Update the dataspace on disk, if it's been changed */
- if(flags & H5D_MARK_SPACE) {
- if(H5S_write(dataset->oloc.file, oh, update_flags, dataset->shared->space) < 0)
+ if (flags & H5D_MARK_SPACE) {
+ if (H5S_write(dataset->oloc.file, oh, update_flags, dataset->shared->space) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update file with new dataspace")
/* Reset the "update the modification time" flag, so we only do it once */
@@ -3501,14 +3502,13 @@ H5D__mark(const H5D_t *dataset, unsigned flags)
done:
/* Release pointer to object header */
- if(oh != NULL)
- if(H5O_unpin(oh) < 0)
+ if (oh != NULL)
+ if (H5O_unpin(oh) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__mark() */
-
/*-------------------------------------------------------------------------
* Function: H5D__flush_all_cb
*
@@ -3521,9 +3521,9 @@ done:
static int
H5D__flush_all_cb(void *_dataset, hid_t H5_ATTR_UNUSED id, void *_udata)
{
- H5D_t *dataset = (H5D_t *)_dataset; /* Dataset pointer */
- H5F_t *f = (H5F_t *)_udata; /* User data for callback */
- int ret_value = H5_ITER_CONT; /* Return value */
+ H5D_t *dataset = (H5D_t *)_dataset; /* Dataset pointer */
+ H5F_t *f = (H5F_t *)_udata; /* User data for callback */
+ int ret_value = H5_ITER_CONT; /* Return value */
FUNC_ENTER_STATIC
@@ -3532,16 +3532,15 @@ H5D__flush_all_cb(void *_dataset, hid_t H5_ATTR_UNUSED id, void *_udata)
HDassert(f);
/* Check for dataset in same file */
- if(f == dataset->oloc.file)
+ if (f == dataset->oloc.file)
/* Flush the dataset's information */
- if(H5D__flush_real(dataset) < 0)
+ if (H5D__flush_real(dataset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, H5_ITER_ERROR, "unable to flush cached dataset info")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__flush_all_cb() */
-
/*-------------------------------------------------------------------------
* Function: H5D_flush_all
*
@@ -3554,7 +3553,7 @@ done:
herr_t
H5D_flush_all(H5F_t *f)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -3562,14 +3561,13 @@ H5D_flush_all(H5F_t *f)
HDassert(f);
/* Iterate over all the open datasets */
- if(H5I_iterate(H5I_DATASET, H5D__flush_all_cb, f, FALSE) < 0) /* Casting away const OK -QAK */
+ if (H5I_iterate(H5I_DATASET, H5D__flush_all_cb, f, FALSE) < 0) /* Casting away const OK -QAK */
HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to flush cached dataset info")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_flush_all() */
-
/*-------------------------------------------------------------------------
* Function: H5D_get_create_plist
*
@@ -3584,37 +3582,37 @@ done:
hid_t
H5D_get_create_plist(const H5D_t *dset)
{
- H5P_genplist_t *dcpl_plist; /* Dataset's DCPL */
- H5P_genplist_t *new_plist; /* Copy of dataset's DCPL */
- H5O_layout_t copied_layout; /* Layout to tweak */
- H5O_fill_t copied_fill; /* Fill value to tweak */
- H5O_efl_t copied_efl; /* External file list to tweak */
- hid_t new_dcpl_id = FAIL;
- hid_t ret_value = H5I_INVALID_HID; /* Return value */
+ H5P_genplist_t *dcpl_plist; /* Dataset's DCPL */
+ H5P_genplist_t *new_plist; /* Copy of dataset's DCPL */
+ H5O_layout_t copied_layout; /* Layout to tweak */
+ H5O_fill_t copied_fill; /* Fill value to tweak */
+ H5O_efl_t copied_efl; /* External file list to tweak */
+ hid_t new_dcpl_id = FAIL;
+ hid_t ret_value = H5I_INVALID_HID; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
/* Check args */
- if(NULL == (dcpl_plist = (H5P_genplist_t *)H5I_object(dset->shared->dcpl_id)))
+ if (NULL == (dcpl_plist = (H5P_genplist_t *)H5I_object(dset->shared->dcpl_id)))
HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "can't get property list")
/* Copy the creation property list */
- if((new_dcpl_id = H5P_copy_plist(dcpl_plist, TRUE)) < 0)
+ if ((new_dcpl_id = H5P_copy_plist(dcpl_plist, TRUE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to copy the creation property list")
- if(NULL == (new_plist = (H5P_genplist_t *)H5I_object(new_dcpl_id)))
+ if (NULL == (new_plist = (H5P_genplist_t *)H5I_object(new_dcpl_id)))
HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "can't get property list")
/* Retrieve any object creation properties */
- if(H5O_get_create_plist(&dset->oloc, new_plist) < 0)
+ if (H5O_get_create_plist(&dset->oloc, new_plist) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get object creation info")
/* Get the layout property */
- if(H5P_peek(new_plist, H5D_CRT_LAYOUT_NAME, &copied_layout) < 0)
+ if (H5P_peek(new_plist, H5D_CRT_LAYOUT_NAME, &copied_layout) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get layout")
/* Reset layout values set when dataset is created */
copied_layout.ops = NULL;
- switch(copied_layout.type) {
+ switch (copied_layout.type) {
case H5D_COMPACT:
copied_layout.storage.u.compact.buf = H5MM_xfree(copied_layout.storage.u.compact.buf);
HDmemset(&copied_layout.storage.u.compact, 0, sizeof(copied_layout.storage.u.compact));
@@ -3630,10 +3628,11 @@ H5D_get_create_plist(const H5D_t *dset)
copied_layout.u.chunk.size = 0;
/* Reset index info, if the chunk ops are set */
- if(copied_layout.storage.u.chunk.ops)
+ if (copied_layout.storage.u.chunk.ops)
/* Reset address and pointer of the array struct for the chunked storage index */
- if(H5D_chunk_idx_reset(&copied_layout.storage.u.chunk, TRUE) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to reset chunked storage index in dest")
+ if (H5D_chunk_idx_reset(&copied_layout.storage.u.chunk, TRUE) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to reset chunked storage index in dest")
/* Reset chunk index ops */
copied_layout.storage.u.chunk.ops = NULL;
@@ -3641,7 +3640,7 @@ H5D_get_create_plist(const H5D_t *dset)
case H5D_VIRTUAL:
copied_layout.storage.u.virt.serial_list_hobjid.addr = HADDR_UNDEF;
- copied_layout.storage.u.virt.serial_list_hobjid.idx = 0;
+ copied_layout.storage.u.virt.serial_list_hobjid.idx = 0;
break;
case H5D_LAYOUT_ERROR:
@@ -3651,102 +3650,103 @@ H5D_get_create_plist(const H5D_t *dset)
} /* end switch */
/* Set back the (possibly modified) layout property to property list */
- if(H5P_poke(new_plist, H5D_CRT_LAYOUT_NAME, &copied_layout) < 0)
+ if (H5P_poke(new_plist, H5D_CRT_LAYOUT_NAME, &copied_layout) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set layout")
/* Get the fill value property */
- if(H5P_peek(new_plist, H5D_CRT_FILL_VALUE_NAME, &copied_fill) < 0)
+ if (H5P_peek(new_plist, H5D_CRT_FILL_VALUE_NAME, &copied_fill) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get fill value")
/* Check if there is a fill value, but no type yet */
- if(copied_fill.buf != NULL && copied_fill.type == NULL) {
- H5T_path_t *tpath; /* Conversion information*/
+ if (copied_fill.buf != NULL && copied_fill.type == NULL) {
+ H5T_path_t *tpath; /* Conversion information*/
/* Copy the dataset type into the fill value message */
- if(NULL == (copied_fill.type = H5T_copy(dset->shared->type, H5T_COPY_TRANSIENT)))
+ if (NULL == (copied_fill.type = H5T_copy(dset->shared->type, H5T_COPY_TRANSIENT)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to copy dataset datatype for fill value")
/* Set up type conversion function */
- if(NULL == (tpath = H5T_path_find(dset->shared->type, copied_fill.type)))
- HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to convert between src and dest data types")
+ if (NULL == (tpath = H5T_path_find(dset->shared->type, copied_fill.type)))
+ HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL,
+ "unable to convert between src and dest data types")
/* Convert disk form of fill value into memory form */
- if(!H5T_path_noop(tpath)) {
- hid_t dst_id, src_id; /* Source & destination datatypes for type conversion */
- uint8_t *bkg_buf = NULL; /* Background conversion buffer */
- size_t bkg_size; /* Size of background buffer */
+ if (!H5T_path_noop(tpath)) {
+ hid_t dst_id, src_id; /* Source & destination datatypes for type conversion */
+ uint8_t *bkg_buf = NULL; /* Background conversion buffer */
+ size_t bkg_size; /* Size of background buffer */
/* Wrap copies of types to convert */
dst_id = H5I_register(H5I_DATATYPE, H5T_copy(copied_fill.type, H5T_COPY_TRANSIENT), FALSE);
- if(dst_id < 0)
+ if (dst_id < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to copy/register datatype")
src_id = H5I_register(H5I_DATATYPE, H5T_copy(dset->shared->type, H5T_COPY_ALL), FALSE);
- if(src_id < 0) {
+ if (src_id < 0) {
H5I_dec_ref(dst_id);
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to copy/register datatype")
} /* end if */
/* Allocate a background buffer */
bkg_size = MAX(H5T_GET_SIZE(copied_fill.type), H5T_GET_SIZE(dset->shared->type));
- if(H5T_path_bkg(tpath) && NULL == (bkg_buf = H5FL_BLK_CALLOC(type_conv, bkg_size))) {
+ if (H5T_path_bkg(tpath) && NULL == (bkg_buf = H5FL_BLK_CALLOC(type_conv, bkg_size))) {
H5I_dec_ref(src_id);
H5I_dec_ref(dst_id);
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "memory allocation failed")
} /* end if */
/* Convert fill value */
- if(H5T_convert(tpath, src_id, dst_id, (size_t)1, (size_t)0, (size_t)0, copied_fill.buf, bkg_buf) < 0) {
+ if (H5T_convert(tpath, src_id, dst_id, (size_t)1, (size_t)0, (size_t)0, copied_fill.buf,
+ bkg_buf) < 0) {
H5I_dec_ref(src_id);
H5I_dec_ref(dst_id);
- if(bkg_buf)
+ if (bkg_buf)
bkg_buf = H5FL_BLK_FREE(type_conv, bkg_buf);
HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "datatype conversion failed")
} /* end if */
/* Release local resources */
- if(H5I_dec_ref(src_id) < 0)
+ if (H5I_dec_ref(src_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "unable to close temporary object")
- if(H5I_dec_ref(dst_id) < 0)
+ if (H5I_dec_ref(dst_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "unable to close temporary object")
- if(bkg_buf)
+ if (bkg_buf)
bkg_buf = H5FL_BLK_FREE(type_conv, bkg_buf);
} /* end if */
- } /* end if */
+ } /* end if */
/* Set back the (possibly modified) fill value property to property list */
- if(H5P_poke(new_plist, H5D_CRT_FILL_VALUE_NAME, &copied_fill) < 0)
+ if (H5P_poke(new_plist, H5D_CRT_FILL_VALUE_NAME, &copied_fill) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set fill value")
/* Get the fill value property */
- if(H5P_peek(new_plist, H5D_CRT_EXT_FILE_LIST_NAME, &copied_efl) < 0)
+ if (H5P_peek(new_plist, H5D_CRT_EXT_FILE_LIST_NAME, &copied_efl) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get external file list")
/* Reset efl name_offset and heap_addr, these are the values when the dataset is created */
- if(copied_efl.slot) {
+ if (copied_efl.slot) {
unsigned u;
copied_efl.heap_addr = HADDR_UNDEF;
- for(u = 0; u < copied_efl.nused; u++)
+ for (u = 0; u < copied_efl.nused; u++)
copied_efl.slot[u].name_offset = 0;
} /* end if */
/* Set back the (possibly modified) external file list property to property list */
- if(H5P_poke(new_plist, H5D_CRT_EXT_FILE_LIST_NAME, &copied_efl) < 0)
+ if (H5P_poke(new_plist, H5D_CRT_EXT_FILE_LIST_NAME, &copied_efl) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set external file list")
/* Set the return value */
ret_value = new_dcpl_id;
done:
- if(ret_value < 0)
- if(new_dcpl_id > 0)
- if(H5I_dec_app_ref(new_dcpl_id) < 0)
+ if (ret_value < 0)
+ if (new_dcpl_id > 0)
+ if (H5I_dec_app_ref(new_dcpl_id) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "unable to close temporary object")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_get_create_plist() */
-
/*-------------------------------------------------------------------------
* Function: H5D_get_access_plist
*
@@ -3760,90 +3760,92 @@ done:
hid_t
H5D_get_access_plist(const H5D_t *dset)
{
- H5P_genplist_t *old_plist; /* Stored DAPL from dset */
- H5P_genplist_t *new_plist; /* New DAPL */
- H5P_genplist_t *def_fapl; /* Default FAPL */
- H5D_append_flush_t def_append_flush_info = {0}; /* Default append flush property */
- H5D_rdcc_t def_chunk_info; /* Default chunk cache property */
- hid_t new_dapl_id = FAIL;
- hid_t ret_value = FAIL;
+ H5P_genplist_t * old_plist; /* Stored DAPL from dset */
+ H5P_genplist_t * new_plist; /* New DAPL */
+ H5P_genplist_t * def_fapl; /* Default FAPL */
+ H5D_append_flush_t def_append_flush_info = {0}; /* Default append flush property */
+ H5D_rdcc_t def_chunk_info; /* Default chunk cache property */
+ hid_t new_dapl_id = FAIL;
+ hid_t ret_value = FAIL;
FUNC_ENTER_NOAPI_NOINIT
/* Make a copy of the dataset's dataset access property list */
- if(NULL == (old_plist = (H5P_genplist_t *)H5I_object(dset->shared->dapl_id)))
+ if (NULL == (old_plist = (H5P_genplist_t *)H5I_object(dset->shared->dapl_id)))
HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "can't get property list")
- if((new_dapl_id = H5P_copy_plist(old_plist, TRUE)) < 0)
+ if ((new_dapl_id = H5P_copy_plist(old_plist, TRUE)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't copy dataset access property list")
- if(NULL == (new_plist = (H5P_genplist_t *)H5I_object(new_dapl_id)))
+ if (NULL == (new_plist = (H5P_genplist_t *)H5I_object(new_dapl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list")
/* If the dataset is chunked then copy the rdcc & append flush parameters.
* Otherwise, use the default values. */
- if(dset->shared->layout.type == H5D_CHUNKED) {
- if(H5P_set(new_plist, H5D_ACS_DATA_CACHE_NUM_SLOTS_NAME, &(dset->shared->cache.chunk.nslots)) < 0)
+ if (dset->shared->layout.type == H5D_CHUNKED) {
+ if (H5P_set(new_plist, H5D_ACS_DATA_CACHE_NUM_SLOTS_NAME, &(dset->shared->cache.chunk.nslots)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set data cache number of slots")
- if(H5P_set(new_plist, H5D_ACS_DATA_CACHE_BYTE_SIZE_NAME, &(dset->shared->cache.chunk.nbytes_max)) < 0)
+ if (H5P_set(new_plist, H5D_ACS_DATA_CACHE_BYTE_SIZE_NAME, &(dset->shared->cache.chunk.nbytes_max)) <
+ 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set data cache byte size")
- if(H5P_set(new_plist, H5D_ACS_PREEMPT_READ_CHUNKS_NAME, &(dset->shared->cache.chunk.w0)) < 0)
+ if (H5P_set(new_plist, H5D_ACS_PREEMPT_READ_CHUNKS_NAME, &(dset->shared->cache.chunk.w0)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set preempt read chunks")
- if(H5P_set(new_plist, H5D_ACS_APPEND_FLUSH_NAME, &dset->shared->append_flush) < 0)
+ if (H5P_set(new_plist, H5D_ACS_APPEND_FLUSH_NAME, &dset->shared->append_flush) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set append flush property")
- } else {
+ }
+ else {
/* Get the default FAPL */
- if(NULL == (def_fapl = (H5P_genplist_t *)H5I_object(H5P_LST_FILE_ACCESS_ID_g)))
+ if (NULL == (def_fapl = (H5P_genplist_t *)H5I_object(H5P_LST_FILE_ACCESS_ID_g)))
HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "not a property list")
/* Set the data cache number of slots to the value of the default FAPL */
if (H5P_get(def_fapl, H5D_ACS_DATA_CACHE_NUM_SLOTS_NAME, &def_chunk_info.nslots) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET,FAIL, "can't get data number of slots");
- if(H5P_set(new_plist, H5D_ACS_DATA_CACHE_NUM_SLOTS_NAME, &def_chunk_info.nslots) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get data number of slots");
+ if (H5P_set(new_plist, H5D_ACS_DATA_CACHE_NUM_SLOTS_NAME, &def_chunk_info.nslots) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set data cache number of slots")
/* Set the data cache byte size to the value of the default FAPL */
if (H5P_get(def_fapl, H5D_ACS_DATA_CACHE_BYTE_SIZE_NAME, &def_chunk_info.nbytes_max) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET,FAIL, "can't get data cache byte size");
- if(H5P_set(new_plist, H5D_ACS_DATA_CACHE_BYTE_SIZE_NAME, &def_chunk_info.nbytes_max) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get data cache byte size");
+ if (H5P_set(new_plist, H5D_ACS_DATA_CACHE_BYTE_SIZE_NAME, &def_chunk_info.nbytes_max) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set data cache byte size")
/* Set the preempt read chunks property to the value of the default FAPL */
if (H5P_get(def_fapl, H5D_ACS_PREEMPT_READ_CHUNKS_NAME, &def_chunk_info.w0) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET,FAIL, "can't get preempt read chunks");
- if(H5P_set(new_plist, H5D_ACS_PREEMPT_READ_CHUNKS_NAME, &def_chunk_info.w0) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get preempt read chunks");
+ if (H5P_set(new_plist, H5D_ACS_PREEMPT_READ_CHUNKS_NAME, &def_chunk_info.w0) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set preempt read chunks")
/* Set the append flush property to its default value */
- if(H5P_set(new_plist, H5D_ACS_APPEND_FLUSH_NAME, &def_append_flush_info) < 0)
+ if (H5P_set(new_plist, H5D_ACS_APPEND_FLUSH_NAME, &def_append_flush_info) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set append flush property")
- }/* end if-else */
+ } /* end if-else */
/* Set the VDS view & printf gap options */
- if(H5P_set(new_plist, H5D_ACS_VDS_VIEW_NAME, &(dset->shared->layout.storage.u.virt.view)) < 0)
+ if (H5P_set(new_plist, H5D_ACS_VDS_VIEW_NAME, &(dset->shared->layout.storage.u.virt.view)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set VDS view")
- if(H5P_set(new_plist, H5D_ACS_VDS_PRINTF_GAP_NAME, &(dset->shared->layout.storage.u.virt.printf_gap)) < 0)
+ if (H5P_set(new_plist, H5D_ACS_VDS_PRINTF_GAP_NAME, &(dset->shared->layout.storage.u.virt.printf_gap)) <
+ 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set VDS printf gap")
/* Set the vds prefix option */
- if(H5P_set(new_plist, H5D_ACS_VDS_PREFIX_NAME, &(dset->shared->vds_prefix)) < 0)
+ if (H5P_set(new_plist, H5D_ACS_VDS_PREFIX_NAME, &(dset->shared->vds_prefix)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set vds prefix")
/* Set the external file prefix option */
- if(H5P_set(new_plist, H5D_ACS_EFILE_PREFIX_NAME, &(dset->shared->extfile_prefix)) < 0)
+ if (H5P_set(new_plist, H5D_ACS_EFILE_PREFIX_NAME, &(dset->shared->extfile_prefix)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set external file prefix")
/* Set the return value */
ret_value = new_dapl_id;
done:
- if(ret_value < 0)
- if(new_dapl_id > 0)
- if(H5I_dec_app_ref(new_dapl_id) < 0)
+ if (ret_value < 0)
+ if (new_dapl_id > 0)
+ if (H5I_dec_app_ref(new_dapl_id) < 0)
HDONE_ERROR(H5E_SYM, H5E_CANTDEC, FAIL, "can't free")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_get_access_plist() */
-
/*-------------------------------------------------------------------------
* Function: H5D__get_space
*
@@ -3856,34 +3858,33 @@ done:
hid_t
H5D__get_space(const H5D_t *dset)
{
- H5S_t *space = NULL;
- hid_t ret_value = H5I_INVALID_HID;
+ H5S_t *space = NULL;
+ hid_t ret_value = H5I_INVALID_HID;
FUNC_ENTER_PACKAGE
/* If the layout is virtual, update the extent */
- if(dset->shared->layout.type == H5D_VIRTUAL)
- if(H5D__virtual_set_extent_unlim(dset) < 0)
+ if (dset->shared->layout.type == H5D_VIRTUAL)
+ if (H5D__virtual_set_extent_unlim(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update virtual dataset extent")
/* Read the dataspace message and return a dataspace object */
- if(NULL == (space = H5S_copy(dset->shared->space, FALSE, TRUE)))
+ if (NULL == (space = H5S_copy(dset->shared->space, FALSE, TRUE)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get dataspace")
/* Create an atom */
- if((ret_value = H5I_register(H5I_DATASPACE, space, TRUE)) < 0)
+ if ((ret_value = H5I_register(H5I_DATASPACE, space, TRUE)) < 0)
HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to register dataspace")
done:
- if(ret_value < 0)
- if(space != NULL)
- if(H5S_close(space) < 0)
+ if (ret_value < 0)
+ if (space != NULL)
+ if (H5S_close(space) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release dataspace")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__get_space() */
-
/*-------------------------------------------------------------------------
* Function: H5D__get_type
*
@@ -3897,49 +3898,47 @@ done:
hid_t
H5D__get_type(const H5D_t *dset)
{
- H5T_t *dt = NULL;
- hid_t ret_value = FAIL;
+ H5T_t *dt = NULL;
+ hid_t ret_value = FAIL;
FUNC_ENTER_PACKAGE
/* Patch the datatype's "top level" file pointer */
- if(H5T_patch_file(dset->shared->type, dset->oloc.file) < 0)
+ if (H5T_patch_file(dset->shared->type, dset->oloc.file) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to patch datatype's file pointer")
/* Copy the dataset's datatype */
- if(NULL == (dt = H5T_copy_reopen(dset->shared->type)))
+ if (NULL == (dt = H5T_copy_reopen(dset->shared->type)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to copy datatype")
/* Mark any datatypes as being in memory now */
- if(H5T_set_loc(dt, NULL, H5T_LOC_MEMORY) < 0)
+ if (H5T_set_loc(dt, NULL, H5T_LOC_MEMORY) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "invalid datatype location")
/* Lock copied type */
- if(H5T_lock(dt, FALSE) < 0)
+ if (H5T_lock(dt, FALSE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to lock transient datatype")
/* Create an atom */
- if(H5T_is_named(dt)) {
+ if (H5T_is_named(dt)) {
/* If this is a committed datatype, we need to recreate the
* two-level IDs, where the VOL object is a copy of the
* returned datatype.
*/
- if((ret_value = H5VL_wrap_register(H5I_DATATYPE, dt, TRUE)) < 0)
+ if ((ret_value = H5VL_wrap_register(H5I_DATATYPE, dt, TRUE)) < 0)
HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to register datatype")
} /* end if */
- else
- if((ret_value = H5I_register(H5I_DATATYPE, dt, TRUE)) < 0)
- HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to register datatype")
+ else if ((ret_value = H5I_register(H5I_DATATYPE, dt, TRUE)) < 0)
+ HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to register datatype")
done:
- if(ret_value < 0)
- if(dt && H5T_close(dt) < 0)
+ if (ret_value < 0)
+ if (dt && H5T_close(dt) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release datatype")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__get_type() */
-
/*-------------------------------------------------------------------------
* Function: H5D__refresh
*
@@ -3951,9 +3950,9 @@ done:
herr_t
H5D__refresh(hid_t dset_id, H5D_t *dset)
{
- H5D_virtual_held_file_t *head = NULL; /* Pointer to list of files held open */
- hbool_t virt_dsets_held = FALSE; /* Whether virtual datasets' files are held open */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5D_virtual_held_file_t *head = NULL; /* Pointer to list of files held open */
+ hbool_t virt_dsets_held = FALSE; /* Whether virtual datasets' files are held open */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -3962,27 +3961,26 @@ H5D__refresh(hid_t dset_id, H5D_t *dset)
HDassert(dset->shared);
/* If the layout is virtual... */
- if(dset->shared->layout.type == H5D_VIRTUAL) {
+ if (dset->shared->layout.type == H5D_VIRTUAL) {
/* Hold open the source datasets' files */
- if(H5D__virtual_hold_source_dset_files(dset, &head) < 0)
+ if (H5D__virtual_hold_source_dset_files(dset, &head) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, FAIL, "unable to hold VDS source files open")
virt_dsets_held = TRUE;
/* Refresh source datasets for virtual dataset */
- if(H5D__virtual_refresh_source_dsets(dset) < 0)
+ if (H5D__virtual_refresh_source_dsets(dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to refresh VDS source datasets")
} /* end if */
/* Refresh dataset object */
- if((H5O_refresh_metadata(dset_id, dset->oloc)) < 0)
+ if ((H5O_refresh_metadata(dset_id, dset->oloc)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to refresh dataset")
done:
/* Release hold on (source) virtual datasets' files */
- if(virt_dsets_held)
- if(H5D__virtual_release_source_dset_files(head) < 0)
+ if (virt_dsets_held)
+ if (H5D__virtual_release_source_dset_files(head) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "can't release VDS source files held open")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__refresh() */
-