diff options
author | Bill Wendling <wendling@ncsa.uiuc.edu> | 2002-10-23 22:09:33 (GMT) |
---|---|---|
committer | Bill Wendling <wendling@ncsa.uiuc.edu> | 2002-10-23 22:09:33 (GMT) |
commit | d1f3de3a587d3fc740d076e012d188a19c3300d9 (patch) | |
tree | b347a6c1adfa4684e66605e0f6e216aa92335dee | |
parent | f5b0c68a53004cabf9b307bc50a00f4740448d9e (diff) | |
download | hdf5-d1f3de3a587d3fc740d076e012d188a19c3300d9.zip hdf5-d1f3de3a587d3fc740d076e012d188a19c3300d9.tar.gz hdf5-d1f3de3a587d3fc740d076e012d188a19c3300d9.tar.bz2 |
[svn-r6029] Purpose:
Oops
Description:
The H5FPprivate.h file was being #included even if PARALLEL was
turned off.
Solution:
put the #include of the H5FPprivate.h file within the #ifdef block so
that it doesn't get included if H5_HAVE_FPHDF5 isn't defined.
Platforms tested:
Linux...
-rw-r--r-- | src/H5D.c | 414 | ||||
-rw-r--r-- | src/H5FP.c | 3 | ||||
-rw-r--r-- | src/H5FPclient.c | 3 | ||||
-rw-r--r-- | src/H5FPserver.c | 3 |
4 files changed, 409 insertions, 14 deletions
@@ -1,14 +1,16 @@ -/**************************************************************************** -* NCSA HDF * -* Software Development Group * -* National Center for Supercomputing Applications * -* University of Illinois at Urbana-Champaign * -* 605 E. Springfield, Champaign IL 61820 * -* * -* For conditions of distribution and use, see the accompanying * -* hdf/COPYING file. * -* * -****************************************************************************/ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* $Id$ */ @@ -1459,6 +1461,396 @@ done: FUNC_LEAVE(ret_value); } +H5D_t * +H5D_create_cache_update(H5G_entry_t *loc, const char *name, const H5T_t *type, + const H5S_t *space, const H5P_genplist_t *plist) +{ + H5D_t *new_dset = NULL; + H5D_t *ret_value = NULL; + int i, ndims; + hsize_t comp_data_size; + unsigned u; + hsize_t max_dim[H5O_LAYOUT_NDIMS] = { 0 }; + H5O_efl_t efl; + H5F_t *f = NULL; + H5O_pline_t dcpl_pline; + H5D_layout_t dcpl_layout; + int chunk_ndims = 0; + hsize_t chunk_size[32] = { 0 }; + H5D_alloc_time_t alloc_time; + H5D_fill_time_t fill_time; + H5O_fill_t fill_prop = { NULL, 0, NULL }; + H5O_fill_new_t fill = { NULL, 0, NULL, H5D_ALLOC_TIME_LATE, H5D_FILL_TIME_ALLOC, TRUE }; + H5D_fill_value_t fill_status; + H5P_genplist_t *new_plist; /* New Property list */ + size_t ohdr_size = H5D_MINHDR_SIZE; /* Size of dataset's object header */ + + FUNC_ENTER_NOAPI(H5D_create_cache_update, NULL); + + /* check args */ + assert (loc); + assert (name && *name); + assert (type); + assert (space); + + if (H5P_get(plist, H5D_CRT_DATA_PIPELINE_NAME, &dcpl_pline) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve pipeline filter"); + + if (H5P_get(plist, H5D_CRT_LAYOUT_NAME, &dcpl_layout) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve layout"); + + if (dcpl_pline.nfilters > 0 && H5D_CHUNKED != dcpl_layout) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, + "filters can only be used with chunked layout"); + + if (H5P_get(plist, H5D_CRT_ALLOC_TIME_NAME, &alloc_time) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve space allocation time"); + + /* Check if the alloc_time is the default and set it accordingly */ + if (alloc_time == H5D_ALLOC_TIME_DEFAULT) + switch (dcpl_layout) { + case H5D_COMPACT: + alloc_time = H5D_ALLOC_TIME_EARLY; + break; + case H5D_CONTIGUOUS: + alloc_time = H5D_ALLOC_TIME_LATE; + break; + case H5D_CHUNKED: + alloc_time = H5D_ALLOC_TIME_INCR; + break; + default: + HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, NULL, "not implemented yet"); + } + + /* Don't allow compact datasets to allocate space later */ + if (dcpl_layout == H5D_COMPACT && alloc_time != H5D_ALLOC_TIME_EARLY) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, + "compact dataset doesn't support late space allocation"); + + /* What file is the dataset being added to? */ + if ((f = H5G_insertion_file(loc, name)) == NULL) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to locate insertion point"); + + /* If MPIO or MPIPOSIX is used, no filter support yet. */ + if ((IS_H5FD_MPIO(f) || IS_H5FD_MPIPOSIX(f)) && dcpl_pline.nfilters > 0) + HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, NULL, + "Parallel I/O does not support filters yet"); + + /* Check if this dataset is going into a parallel file and set space allocation time */ + if (IS_H5FD_MPIO(f) || IS_H5FD_MPIPOSIX(f)) + alloc_time = H5D_ALLOC_TIME_EARLY; + + if (H5P_get(plist, H5D_CRT_FILL_TIME_NAME, &fill_time) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve fill time"); + + if (fill_time == H5D_FILL_TIME_NEVER && H5T_detect_class(type, H5T_VLEN)) + HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, NULL, + "Dataset doesn't support VL datatype when fill value is not defined"); + + /* Initialize the dataset object */ + if (NULL == (new_dset = H5D_new(dcpl_id))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); + + /* Check if the datatype is "sensible" for use in a dataset */ + if (H5T_is_sensible(type) != TRUE) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "datatype is not sensible"); + + /* Copy datatype for dataset */ + if ((new_dset->type = H5T_copy(type, H5T_COPY_ALL)) == NULL) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, NULL, "can't copy datatype"); + + /* Mark any VL datatypes as being on disk now */ + if (H5T_vlen_mark(new_dset->type, f, H5T_VLEN_DISK) < 0) + HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "invalid VL location"); + + /* Get new dataset's property list object */ + if ((new_plist = H5I_object(new_dset->dcpl_id)) == NULL) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, + "can't get dataset creation property list"); + + /* Set the alloc_time for the dataset, in case the default was used */ + if (H5P_set(new_plist, H5D_CRT_ALLOC_TIME_NAME, &alloc_time) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, NULL, "can't set allocation time"); + + if (H5P_get(new_plist, H5D_CRT_CHUNK_DIM_NAME, &chunk_ndims) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve layout"); + + if (H5P_get(new_plist, H5D_CRT_EXT_FILE_LIST_NAME, &efl) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve external file list"); + + /* Total raw data size */ + if (H5P_get(new_plist, H5D_CRT_LAYOUT_NAME, &new_dset->layout.type) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve layout"); + + new_dset->layout.ndims = H5S_get_simple_extent_ndims(space) + 1; + assert((unsigned)(new_dset->layout.ndims) <= NELMTS(new_dset->layout.dim)); + new_dset->layout.dim[new_dset->layout.ndims - 1] = H5T_get_size(new_dset->type); + new_dset->layout.addr = HADDR_UNDEF; /* Initialize to no address */ + +/************************************************************* + * Begin Cache Update + *************************************************************/ + + switch (new_dset->layout.type) { + case H5D_CONTIGUOUS: + /* + * The maximum size of the dataset cannot exceed the storage size. + * Also, only the slowest varying dimension of a simple data space + * can be extendible. + */ + if ((ndims = H5S_get_simple_extent_dims(space, new_dset->layout.dim, max_dim)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "unable to initialize contiguous storage"); + + for (i = 1; i < ndims; i++) + if (max_dim[i] > new_dset->layout.dim[i]) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "only the first dimension can be extendible"); + + if (efl.nused > 0) { + hsize_t max_points = H5S_get_npoints_max (space); + hsize_t max_storage = H5O_efl_total_size (&efl); + + if (H5S_UNLIMITED == max_points) { + if (H5O_EFL_UNLIMITED!=max_storage) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "unlimited data space but finite storage"); + } else if (max_points * H5T_get_size (type) < max_points) { + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "data space * type size overflowed"); + } else if (max_points * H5T_get_size (type) > max_storage) { + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "data space size exceeds external storage size"); + } + } else if (ndims > 0 && max_dim[0] > new_dset->layout.dim[0]) { + HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, NULL, + "extendible contiguous non-external dataset"); + } + + break; + + case H5D_CHUNKED: + /* + * Chunked storage allows any type of data space extension, so we + * don't even bother checking. + */ + if (chunk_ndims != H5S_get_simple_extent_ndims(space)) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, + "dimensionality of chunks doesn't match the data space"); + + if (efl.nused > 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, + "external storage not supported with chunked layout"); + + /* + * The chunk size of a dimension with a fixed size cannot exceed + * the maximum dimension size + */ + if (H5P_get(new_plist, H5D_CRT_CHUNK_SIZE_NAME, chunk_size) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve chunk size"); + + if (H5S_get_simple_extent_dims(space, NULL, max_dim)<0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "unable to query maximum dimensions"); + + for (u = 0; u < new_dset->layout.ndims - 1; u++) + if (max_dim[u] != H5S_UNLIMITED && max_dim[u] < chunk_size[u]) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "chunk size must be <= maximum dimension size for fixed-sized dimensions"); + + /* Set the dataset's chunk sizes from the property list's chunk sizes */ + for (u = 0; u < new_dset->layout.ndims - 1; u++) + new_dset->layout.dim[u] = chunk_size[u]; + + break; + + case H5D_COMPACT: + /* + * Compact dataset is stored in dataset object header message of + * layout. + */ + new_dset->layout.size = H5S_get_simple_extent_npoints(space) * + H5T_get_size(type); + + /* + * Verify data size is smaller than maximum header message size + * (64KB) minus other layout message fields. + */ + comp_data_size = H5O_MAX_SIZE-H5O_layout_meta_size(f, &new_dset->layout); + + if (new_dset->layout.size > comp_data_size) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "compact dataset size is bigger than header message maximum size"); + + if ((ndims = H5S_get_simple_extent_dims(space, new_dset->layout.dim, max_dim)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "unable to initialize dimension size of compact dataset storage"); + + /* remember to check if size is small enough to fit header message */ + ohdr_size += new_dset->layout.size; + break; + + default: + HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, NULL, "not implemented yet"); + } /* end switch */ + + /* Create (open for write access) an object header */ + if (H5O_create(f, ohdr_size, &new_dset->ent) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "unable to create dataset object header"); + + /* + * Retrieve properties of fill value and others. Copy them into new + * fill value struct. Convert the fill value to the dataset type and + * write the message + */ + if (H5P_get(new_plist, H5D_CRT_ALLOC_TIME_NAME, &alloc_time) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve space allocation time"); + + if (H5P_get(new_plist, H5D_CRT_FILL_TIME_NAME, &fill_time) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve fill time"); + + if (H5P_fill_value_defined(new_plist, &fill_status)<0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't tell if fill value defined"); + + if (fill_status== H5D_FILL_VALUE_DEFAULT || fill_status==H5D_FILL_VALUE_USER_DEFINED) { + if (H5P_get(new_plist, H5D_CRT_FILL_VALUE_NAME, &fill_prop) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve fill value"); + + if (H5O_copy(H5O_FILL, &fill_prop, &fill) == NULL) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT,NULL, "unable to copy fill value"); + + if (fill_prop.buf && fill_prop.size > 0 && + H5O_fill_convert(&fill, new_dset->type) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "unable to convert fill value to dataset type"); + + fill.fill_defined = TRUE; + } else if (fill_status == H5D_FILL_VALUE_UNDEFINED) { + fill.size = -1; + fill.type = fill.buf = NULL; + fill.fill_defined = FALSE; + } else { + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, + "unable to determine if fill value is defined"); + } /* end else */ + + fill.alloc_time = alloc_time; + fill.fill_time = fill_time; + + if (fill.fill_defined == FALSE && fill_time != H5D_FILL_TIME_NEVER) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT,NULL, "unable to create dataset"); + + /* Write new fill value message */ + if (H5O_modify(&new_dset->ent, H5O_FILL_NEW, 0, H5O_FLAG_CONSTANT, &fill) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "unable to update fill value header message"); + + H5O_reset(H5O_FILL, &fill_prop); + + if (fill.buf && (NULL==H5O_copy(H5O_FILL, &fill, &fill_prop))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to copy fill value"); + + H5O_reset(H5O_FILL_NEW, &fill); + + /* Write old fill value */ + if (fill_prop.buf && + H5O_modify(&new_dset->ent, H5O_FILL, 0, H5O_FLAG_CONSTANT, &fill_prop) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "unable to update fill value header message"); + + if (H5P_set(new_plist, H5D_CRT_FILL_VALUE_NAME, &fill_prop) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, NULL, "can't set fill value"); + + /* Update the type and space header messages */ + if (H5O_modify(&new_dset->ent, H5O_DTYPE, 0, + H5O_FLAG_CONSTANT | H5O_FLAG_SHARED, new_dset->type) < 0 || + H5S_modify(&new_dset->ent, space) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "unable to update type or space header messages"); + + /* Update the filters message */ + if (H5P_get(new_plist, H5D_CRT_DATA_PIPELINE_NAME, &dcpl_pline) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "Can't retrieve pipeline filter"); + + if (dcpl_pline.nfilters>0 && + H5O_modify (&new_dset->ent, H5O_PLINE, 0, H5O_FLAG_CONSTANT, &dcpl_pline) < 0) + HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "unable to update filter header message"); + + /* Add a modification time message. */ + if (H5O_touch(&new_dset->ent, TRUE) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "unable to update modification time message"); + + /* Give the dataset a name */ + if (H5G_insert(loc, name, &new_dset->ent) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to name dataset"); + + /* + * Allocate storage if space allocate time is early; otherwise delay + * allocation until later. + */ + if (alloc_time == H5D_ALLOC_TIME_EARLY) + if (H5D_alloc_storage(f, new_dset, H5D_ALLOC_CREATE) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize storage"); + + /* Update external storage message */ + if (efl.nused > 0) { + size_t heap_size = H5HL_ALIGN(1); + + for (i = 0; i < efl.nused; ++i) + heap_size += H5HL_ALIGN(HDstrlen(efl.slot[i].name) + 1); + + if (H5HL_create(f, heap_size, &efl.heap_addr /*out*/) < 0 || + (size_t)(-1) == H5HL_insert(f, efl.heap_addr, 1, "")) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "unable to create external file list name heap"); + + for (i = 0; i < efl.nused; ++i) { + size_t offset = H5HL_insert(f, efl.heap_addr, + HDstrlen(efl.slot[i].name) + 1, + efl.slot[i].name); + + assert(0 == efl.slot[i].name_offset); + + if ((size_t)(-1) == offset) + HGOTO_ERROR(H5E_EFL, H5E_CANTINIT, NULL, "unable to insert URL into name heap"); + + efl.slot[i].name_offset = offset; + } /* end for */ + + if (H5O_modify(&new_dset->ent, H5O_EFL, 0, H5O_FLAG_CONSTANT, &efl) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, + "unable to update external file list message"); + } /* end if */ + + /* Update layout message */ + if (H5D_COMPACT != new_dset->layout.type && + H5O_modify(&new_dset->ent, H5O_LAYOUT, 0, 0, &new_dset->layout) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to update layout"); + +/************************************************************* + * Cache Updated + *************************************************************/ + + /* Success */ + ret_value = new_dset; + +done: + if (!ret_value && new_dset) { + if (new_dset->type) + H5T_close(new_dset->type); + + if (H5F_addr_defined(new_dset->ent.header)) + H5O_close(&(new_dset->ent)); + + new_dset->ent.file = NULL; + H5FL_FREE(H5D_t,new_dset); + } + + FUNC_LEAVE(ret_value); +} + /*------------------------------------------------------------------------- * Function: H5D_create @@ -17,12 +17,13 @@ #include "H5private.h" /* Generic Functions */ #include "H5Eprivate.h" /* Error Handling */ -#include "H5FPprivate.h" /* Flexible Parallel Functions */ #include "H5Oprivate.h" /* Object Headers */ #include "H5TBprivate.h" /* Threaded, Balanced, Binary Trees */ #ifdef H5_HAVE_FPHDF5 +#include "H5FPprivate.h" /* Flexible Parallel Functions */ + #include "mpi.h" /* Interface initialization */ diff --git a/src/H5FPclient.c b/src/H5FPclient.c index 366b22f..7926813 100644 --- a/src/H5FPclient.c +++ b/src/H5FPclient.c @@ -21,12 +21,13 @@ #include "H5private.h" /* Generic Functions */ #include "H5Eprivate.h" /* Error Handling */ -#include "H5FPprivate.h" /* Flexible Parallel Functions */ #include "H5Oprivate.h" /* Object Headers */ #include "H5TBprivate.h" /* Threaded, Balanced, Binary Trees */ #ifdef H5_HAVE_FPHDF5 +#include "H5FPprivate.h" /* Flexible Parallel Functions */ + #include "mpi.h" /* Pablo mask */ diff --git a/src/H5FPserver.c b/src/H5FPserver.c index aabd9ef..b4eb3bc 100644 --- a/src/H5FPserver.c +++ b/src/H5FPserver.c @@ -31,12 +31,13 @@ #include "H5private.h" /* Generic Functions */ #include "H5Eprivate.h" /* Error Handling */ -#include "H5FPprivate.h" /* Flexible Parallel Functions */ #include "H5Oprivate.h" /* Object Headers */ #include "H5TBprivate.h" /* Threaded, Balanced, Binary Trees */ #ifdef H5_HAVE_FPHDF5 +#include "H5FPprivate.h" /* Flexible Parallel Functions */ + #include "mpi.h" /* Pablo mask */ |