From 6e6760216bd2dc64bba0976cdf1a1fed5a8fa114 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Thu, 27 May 2004 15:26:32 -0500 Subject: [svn-r8592] Purpose: Code optimization & bug fix Description: When dimension information is being stored in the storage layout message on disk, it is stored as 32-bit quantities, possibly truncating the dimension information, if a dimension is greater than 32-bits in size. Solution: Fix the storage layout message problem by revising file format to not store dimension information, since it is already available in the dataspace. Also revise the storage layout data structures to be more compartmentalized for the information for contiguous, chunked and compact storage. Platforms tested: FreeBSD 4.9 (sleipnir) w/parallel Solaris 2.7 (arabica) h5committest --- MANIFEST | 3 + doc/html/H5.format.html | 428 +++++++++++++++++++++++++++++++++++++++++++++--- src/H5A.c | 12 +- src/H5D.c | 302 ++++++++++++++++++---------------- src/H5Dcompact.c | 6 +- src/H5Dcontig.c | 40 ++--- src/H5Dio.c | 116 +++++++------ src/H5Distore.c | 226 ++++++++++++------------- src/H5Dpkg.h | 7 + src/H5Dprivate.h | 2 +- src/H5Dseq.c | 4 +- src/H5Fcompact.c | 6 +- src/H5Fcontig.c | 40 ++--- src/H5Fistore.c | 226 ++++++++++++------------- src/H5Fseq.c | 4 +- src/H5Oattr.c | 11 +- src/H5Olayout.c | 387 +++++++++++++++++++++++++++++++------------ src/H5Oprivate.h | 46 ++++-- src/H5Pdcpl.c | 29 ++-- src/H5S.c | 6 +- src/H5Sall.c | 4 +- src/H5Smpio.c | 6 +- src/H5Sprivate.h | 2 + src/H5V.c | 12 +- src/H5Vprivate.h | 8 +- src/H5Z.c | 16 +- src/Makefile.in | 4 +- test/dsets.c | 2 +- test/tmisc.c | 189 +++++++++++++++++++++ 29 files changed, 1476 insertions(+), 668 deletions(-) diff --git a/MANIFEST b/MANIFEST index de94fe3..3c50b13 100644 --- a/MANIFEST +++ b/MANIFEST @@ -850,6 +850,7 @@ ./src/H5Dprivate.h ./src/H5Dpublic.h ./src/H5Dpkg.h +./src/H5Dtest.c ./src/H5E.c ./src/H5Eprivate.h ./src/H5Epublic.h @@ -1050,6 +1051,7 @@ ./test/gen_old_array.c _DO_NOT_DISTRIBUTE_ ./test/gen_new_array.c _DO_NOT_DISTRIBUTE_ ./test/gen_new_fill.c _DO_NOT_DISTRIBUTE_ +./test/gen_old_layout.c _DO_NOT_DISTRIBUTE_ ./test/gen_old_mtime.c _DO_NOT_DISTRIBUTE_ ./test/gen_new_mtime.c _DO_NOT_DISTRIBUTE_ ./test/gen_new_super.c _DO_NOT_DISTRIBUTE_ @@ -1075,6 +1077,7 @@ ./test/th5s.h5 ./test/theap.c ./test/titerate.c +./test/tlayouto.h5 ./test/tmeta.c ./test/tmisc.c ./test/tmtimen.h5 diff --git a/doc/html/H5.format.html b/doc/html/H5.format.html index 76a9058..ae1ff23 100644 --- a/doc/html/H5.format.html +++ b/doc/html/H5.format.html @@ -104,6 +104,7 @@ TABLE.list TD { border:none; }
  • Name: NIL
  • Name: Simple Dataspace +
  • Name: Reserved - not assigned yet
  • Name: Datatype
  • Name: Data Storage - Fill Value (Old)
  • Name: Data Storage - Fill Value @@ -119,18 +120,20 @@ TABLE.list TD { border:none; }
    1. Disk Format Level 2a - Data Object Headers(Continued)
        -
      1. Name: Data Storage - Compact + +
      2. Name: Reserved - not assigned yet
      3. Name: Data Storage - External Data Files
      4. Name: Data Storage - Layout
      5. Name: Reserved - not assigned yet
      6. Name: Reserved - not assigned yet -
      7. Name: Data Storage - Filter Pipeline +
      8. Name: Data Storage - Filter Pipeline
      9. Name: Attribute -
      10. Name: Object Name -
      11. Name: Object Modification Date and Time -
      12. Name: Shared Object Message +
      13. Name: Object Comment +
      14. Name: Object Modification Date and Time (Old) +
      15. Name: Shared Object Message
      16. Name: Object Header Continuation
      17. Name: Group Message +
      18. Name: Object Modification Date and Time
    2. Disk Format: Level 2b - Shared Data Object Headers
    3. Disk Format: Level 2c - Data Object Data Storage @@ -1620,10 +1623,13 @@ TABLE.list TD { border:none; } first free block (or the undefined address if there is no free block). The free block contains "Size of Lengths" bytes that - are the offset of the next free chunk (or the - undefined address if this is the - last free chunk) followed by "Size of Lengths" bytes that store - the size of this free chunk. + are the offset of the next free block (or the + value '1' if this is the + last free block) followed by "Size of Lengths" bytes that store + the size of this free block. The size of the free block includes + the space used to store the offset of the next free block and + the of the current block, making the minimum size of a free block + 2 * "Size of Lengths".

      @@ -2719,6 +2725,17 @@ TABLE.list TD { border:none; } --> +
      +

      Name: Reserved - Not Assigned Yet

      + Header Message Type: 0x0002
      + Length: N/A
      + Status: N/A
      + Format of Data: N/A
      + +

      Purpose and Description: This message type was skipped during + the initial specification of the file format and may be used in a + future expansion to the format. +


      Name: Datatype

      @@ -3428,7 +3445,12 @@ TABLE.list TD { border:none; } - 0-23 + 0-7 + Length of ASCII tag in bytes. + + + + 8-23 Reserved (zero). @@ -3521,7 +3543,7 @@ TABLE.list TD { border:none; }
      @@ -3649,7 +3671,7 @@ TABLE.list TD { border:none; }
      - Properties Description for Datatype Version 0 + Properties Description for Datatype Version 1
      @@ -4338,7 +4360,8 @@ TABLE.list TD { border:none; } @@ -4453,7 +4476,9 @@ TABLE.list TD { border:none; } @@ -4461,6 +4486,7 @@ TABLE.list TD { border:none; }

      + + +
      +

      Name: Reserved - Not Assigned Yet

      + Header Message Type: 0x0006
      + Length: N/A
      + Status: N/A
      + Format of Data: N/A
      + +

      Purpose and Description: This message type was skipped during + the initial specification of the file format and may be used in a + future expansion to the format.


      Name: Data Storage - @@ -4668,7 +4706,7 @@ TABLE.list TD { border:none; }

      Purpose and Description: Data layout describes how the elements of a multi-dimensional array are arranged in the linear - address space of the file. Two types of data layout are + address space of the file. Three types of data layout are supported:

        @@ -4688,13 +4726,19 @@ TABLE.list TD { border:none; } the size of the entire array; the size of the entire array can be calculated by traversing the B-tree that stores the chunk addresses. + +
      1. The array can be stored in one contiguous block, as part of + this object header message (this is called "compact" storage below).
      + +

      Version 3 of this message re-structured the format into specific + properties that are required for each layout class.

      - Properties Description for Datatype Version 1 + Properties Description for Datatype Version 2
      2 The current version used by the library (version - 1.7.3 or later). In this version, the Size field is + 1.7.3 or later). In this version, the Size and + Fill Value fields are only present if the Fill Value Defined field is set to 1. Fill Value

      The fill value. The bytes of the fill value are interpreted - using the same datatype as for the dataset. + using the same datatype as for the dataset. This field is + not present if the Version field is >1 and the Fill Value + Defined field is set to 0.

      @@ -4730,6 +4774,18 @@ TABLE.list TD { border:none; } + + + + + + + + + + + +
      - Data Layout Message + Data Layout Message, Versions 1 and 2
      ...
      Compact Data Size (4-bytes)
      Compact Data
      ...
      @@ -4743,8 +4799,8 @@ TABLE.list TD { border:none; } Version - A version number for the layout message. This - documentation describes version one. + A version number for the layout message. This value can be + either 1 or 2. @@ -4758,8 +4814,10 @@ TABLE.list TD { border:none; } Layout Class The layout class specifies how the other fields of the layout message are to be interpreted. A value of one - indicates contiguous storage while a value of two - indicates chunked storage. Other values will be defined + indicates contiguous storage, a value of two + indicates chunked storage, + while a value of three + indicates compact storage. Other values will be defined in the future. @@ -4768,7 +4826,10 @@ TABLE.list TD { border:none; } For contiguous storage, this is the address of the first byte of storage. For chunked storage this is the address of the B-tree that is used to look up the addresses of the - chunks. + chunks. This field is not present for compact storage. + If the version for this message is set to 2, the address + may have the "undefined address" value, to indicate that + storage has not yet been allocated for this array. @@ -4777,6 +4838,244 @@ TABLE.list TD { border:none; } size of the array while for chunked storage they define the size of a single chunk. + + + Compact Data Size + This field is only present for compact data storage. + It contains the size of the raw data for the dataset array. + + + Compact Data + This field is only present for compact data storage. + It contains the raw data for the dataset array. + + + + +

      +

      + + + + + + + + + + + + + + + + + + + +
      + Data Layout Message, Version 3 +
      bytebytebytebyte
      VersionLayout Class
      Properties
      +
      + +

      +

      + + + + + + + + + + + + + + + + + + + + + +
      Field NameDescription
      VersionA version number for the layout message. This value can be + either 1, 2 or 3.
      Layout ClassThe layout class specifies how the other fields of the + layout message are to be interpreted. A value of one + indicates contiguous storage, a value of two + indicates chunked storage, + while a value of three + indicates compact storage.
      PropertiesThis variable-sized field encodes information specific to each + layout class and is described below. If there is no property + information specified for a layout class, the size of this field + is zero bytes.
      +
      + +

      Class-specific information for contiguous layout (Class 0): + +

      +

      + + + + + + + + + + + + + + + + + +
      + Property Descriptions +
      bytebytebytebyte

      Address


      Size

      +
      + +

      +

      + + + + + + + + + + + + + + +
      Field NameDescription
      AddressThis is the address of the first byte of raw data storage. + The address may have the "undefined address" value, to indicate + that storage has not yet been allocated for this array.
      SizeThis field contains the size allocated to store the raw data.
      +
      + +

      Class-specific information for chunked layout (Class 1): + +

      +

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + Property Descriptions +
      bytebytebytebyte
      Dimensionality

      Address

      Dimension 0 (4-bytes)
      Dimension 1 (4-bytes)
      ...
      +
      + +

      +

      + + + + + + + + + + + + + + + + + + + + +
      Field NameDescription
      DimensionalityA chunk has a fixed dimensionality. This field + specifies the number of dimension size fields later in the + message.
      AddressThis is the address + of the B-tree that is used to look up the addresses of the + chunks. + The address + may have the "undefined address" value, to indicate that + storage has not yet been allocated for this array.
      DimensionsThe dimension sizes define the size of a single chunk.
      +
      + +

      Class-specific information for compact layout (Class 2): + +

      +

      + + + + + + + + + + + + + + + + + + + + + +
      + Property Descriptions +
      bytebytebytebyte
      Size
      Raw Data
      ...
      +
      + +

      +

      + + + + + + + + + + + + + +
      Field NameDescription
      SizeThis field contains the size of the raw data for the dataset array.
      Raw DataThis field contains the raw data for the dataset array.
      @@ -5124,14 +5423,14 @@ TABLE.list TD { border:none; }
      -

      Name: Object Name

      +

      Name: Object Comment

      Header Message Type: 0x000D
      Length: varies
      Status: Optional, may not be repeated. -

      Purpose and Description: The object name or comment is - designed to be a short description of an object. An object name +

      Purpose and Description: The object comment is + designed to be a short description of an object. An object comment is a sequence of non-zero (\0) ASCII characters with no other formatting included by the library. @@ -5150,7 +5449,7 @@ TABLE.list TD { border:none; } -
      Name

      +
      Comment

      @@ -5171,7 +5470,7 @@ TABLE.list TD { border:none; }


      -

      Name: Object Modification Date & Time

      +

      Name: Object Modification Date & Time (Old)

      Header Message Type: 0x000E
      Length: fixed
      @@ -5183,6 +5482,12 @@ TABLE.list TD { border:none; } updated when any object header message changes according to the system clock where the change was posted. +

      This modification time message is deprecated in favor of the "new" + modification time message (Message Type 0x0012) and is no longer written + to the file in versions of the HDF5 library after the 1.6.0 version. +

      + +

      @@ -5460,6 +5765,75 @@ where the group name heap is located. +
      +

      Name: Object Modification Date & Time

      + +

      Header Message Type: 0x0012 +

      +

      Length: Fixed +

      +

      Status: Optional, may not be repeated. +

      + +

      Description: The object modification date + and time is a timestamp which indicates + the last modification of an object. The time is + updated when any object header message changes according to the + system clock where the change was posted. +

      + +

      +

      +
      + + + + + + + + + + + + + + + + + +
      + Modification Time Message +
      bytebytebytebyte
      VersionReserved
      Seconds After Epoch
      +
      + +

      +

      + + + + + + + + + + + + + + + + + + + +
      Field NameDescription
      VersionThe version number for the message. This document + describes version one of the new modification time message.
      ReservedThis field is reserved and should always be zero.
      Seconds After EpochThe number of seconds since 0 hours, 0 + minutes, 0 seconds, January 1, 1970, Coordinated Universal Time. +
      +
      +

      Disk Format: Level 2b - Shared Data Object Headers

      In order to share header messages between several dataset objects, object header messages may be placed into the global heap. Since these @@ -5570,7 +5944,7 @@ value with all bits set, i.e. 0xffff...ff.

      HDF Help Desk
      -Describes HDF5 Release 1.6.2, February 2004 +Describes HDF5 Release 1.7, the unreleased development branch; working toward HDF5 Release 1.8.0
      Last modified: 5 July 2002 diff --git a/src/H5A.c b/src/H5A.c index aa786da..6d8ce44 100644 --- a/src/H5A.c +++ b/src/H5A.c @@ -272,7 +272,7 @@ H5A_create(const H5G_entry_t *ent, const char *name, const H5T_t *type, assert(attr->dt_size>0); attr->ds_size=H5O_raw_size(H5O_SDSPACE_ID,attr->ent.file,&(space->extent.u.simple)); assert(attr->ds_size>0); - H5_ASSIGN_OVERFLOW(attr->data_size,H5S_get_simple_extent_npoints(attr->ds)*H5T_get_size(attr->dt),hssize_t,size_t); + H5_ASSIGN_OVERFLOW(attr->data_size,H5S_GET_SIMPLE_EXTENT_NPOINTS(attr->ds)*H5T_get_size(attr->dt),hssize_t,size_t); /* Hold the symbol table entry (and file) open */ if (H5O_open(&(attr->ent)) < 0) @@ -613,6 +613,7 @@ H5A_write(H5A_t *attr, const H5T_t *mem_type, const void *buf, hid_t dxpl_id) { uint8_t *tconv_buf = NULL; /* data type conv buffer */ uint8_t *bkg_buf = NULL; /* temp conversion buffer */ + hssize_t snelmts; /* elements in attribute */ hsize_t nelmts; /* elements in attribute */ H5T_path_t *tpath = NULL; /* conversion information*/ hid_t src_id = -1, dst_id = -1;/* temporary type atoms */ @@ -629,7 +630,9 @@ H5A_write(H5A_t *attr, const H5T_t *mem_type, const void *buf, hid_t dxpl_id) assert(buf); /* Create buffer for data to store on disk */ - nelmts=H5S_get_simple_extent_npoints (attr->ds); + if((snelmts=H5S_GET_SIMPLE_EXTENT_NPOINTS(attr->ds))<0) + HGOTO_ERROR (H5E_ATTR, H5E_CANTCOUNT, FAIL, "dataspace is invalid") + nelmts=(hsize_t)snelmts; /* Get the memory and file datatype sizes */ src_type_size = H5T_get_size(mem_type); @@ -761,6 +764,7 @@ H5A_read(H5A_t *attr, const H5T_t *mem_type, void *buf, hid_t dxpl_id) { uint8_t *tconv_buf = NULL; /* data type conv buffer*/ uint8_t *bkg_buf = NULL; /* background buffer */ + hssize_t snelmts; /* elements in attribute */ hsize_t nelmts; /* elements in attribute*/ H5T_path_t *tpath = NULL; /* type conversion info */ hid_t src_id = -1, dst_id = -1;/* temporary type atoms*/ @@ -776,7 +780,9 @@ H5A_read(H5A_t *attr, const H5T_t *mem_type, void *buf, hid_t dxpl_id) assert(buf); /* Create buffer for data to store on disk */ - nelmts=H5S_get_simple_extent_npoints (attr->ds); + if((snelmts=H5S_GET_SIMPLE_EXTENT_NPOINTS(attr->ds))<0) + HGOTO_ERROR (H5E_ATTR, H5E_CANTCOUNT, FAIL, "dataspace is invalid") + nelmts=(hsize_t)snelmts; /* Get the memory and file datatype sizes */ src_type_size = H5T_get_size(attr->dt); diff --git a/src/H5D.c b/src/H5D.c index 17e956c..d1de502 100644 --- a/src/H5D.c +++ b/src/H5D.c @@ -189,7 +189,7 @@ H5D_init_interface(void) H5P_genclass_t *crt_pclass; H5D_layout_t layout = H5D_CRT_LAYOUT_DEF; int chunk_ndims = H5D_CRT_CHUNK_DIM_DEF; - hsize_t chunk_size[H5O_LAYOUT_NDIMS] = H5D_CRT_CHUNK_SIZE_DEF; + size_t chunk_size[H5O_LAYOUT_NDIMS] = H5D_CRT_CHUNK_SIZE_DEF; H5O_fill_t fill = H5D_CRT_FILL_VALUE_DEF; H5D_alloc_time_t alloc_time = H5D_CRT_ALLOC_TIME_DEF; H5D_fill_time_t fill_time = H5D_CRT_FILL_TIME_DEF; @@ -1322,8 +1322,8 @@ H5D_get_space_status(H5D_t *dset, H5D_space_status_t *allocation, hid_t dxpl_id) assert(space); /* Get the total number of elements in dataset's dataspace */ - if((total_elem=H5S_get_simple_extent_npoints(space))<0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "unable to get # of dataspace elements"); + if((total_elem=H5S_GET_SIMPLE_EXTENT_NPOINTS(space))<0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "unable to get # of dataspace elements") /* Get the size of the dataset's datatype */ if((type_size=H5T_get_size(dset->type))==0) @@ -1665,7 +1665,7 @@ H5D_update_entry_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset, H5P_genplist_t *p /* Add the dataset's raw data size to the size of the header, if the raw data will be stored as compact */ if (layout->type == H5D_COMPACT) - ohdr_size += layout->size; + ohdr_size += layout->u.compact.size; /* Create (open for write access) an object header */ if (H5O_create(file, dxpl_id, ohdr_size, ent) < 0) @@ -1910,10 +1910,8 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space H5D_t *new_dset = NULL; int i, ndims; unsigned u; - hsize_t max_dim[H5O_LAYOUT_NDIMS]={0}; H5F_t *file=NULL; int chunk_ndims = 0; - hsize_t chunk_size[H5O_LAYOUT_NDIMS]={0}; H5P_genplist_t *dc_plist=NULL; /* New Property list */ hbool_t has_vl_type=FALSE; /* Flag to indicate a VL-type for dataset */ H5D_t *ret_value; /* Return value */ @@ -2040,100 +2038,114 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space new_dset->alloc_time=H5D_ALLOC_TIME_EARLY; /* Set up layout information */ - new_dset->layout.ndims = H5S_GET_SIMPLE_EXTENT_NDIMS(new_dset->space) + 1; - assert((unsigned)(new_dset->layout.ndims) <= NELMTS(new_dset->layout.dim)); - new_dset->layout.dim[new_dset->layout.ndims-1] = H5T_get_size(new_dset->type); - new_dset->layout.addr = HADDR_UNDEF; /* Initialize to no address */ + new_dset->layout.unused.ndims = H5S_GET_SIMPLE_EXTENT_NDIMS(new_dset->space) + 1; + assert((unsigned)(new_dset->layout.unused.ndims) <= NELMTS(new_dset->layout.unused.dim)); + if (H5S_get_simple_extent_dims(new_dset->space, new_dset->layout.unused.dim, NULL)<0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize storage info") + new_dset->layout.unused.dim[new_dset->layout.unused.ndims-1] = H5T_get_size(new_dset->type); switch (new_dset->layout.type) { case H5D_CONTIGUOUS: - /* - * The maximum size of the dataset cannot exceed the storage size. - * Also, only the slowest varying dimension of a simple data space - * can be extendible. - */ - if ((ndims=H5S_get_simple_extent_dims(new_dset->space, new_dset->layout.dim, max_dim))<0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize contiguous storage"); - for (i=1; inew_dset->layout.dim[i]) - HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "only the first dimension can be extendible"); - } - if (new_dset->efl.nused>0) { - hsize_t max_points = H5S_get_npoints_max (new_dset->space); - hsize_t max_storage = H5O_efl_total_size (&new_dset->efl); - - if (H5S_UNLIMITED==max_points) { - if (H5O_EFL_UNLIMITED!=max_storage) - HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "unlimited data space but finite storage"); - } else if (max_points * H5T_get_size (type) < max_points) { - HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "data space * type size overflowed"); - } else if (max_points * H5T_get_size (type) > max_storage) { - HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "data space size exceeds external storage size"); + { + hssize_t tmp_size; /* Temporary holder for raw data size */ + hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */ + + /* + * The maximum size of the dataset cannot exceed the storage size. + * Also, only the slowest varying dimension of a simple data space + * can be extendible (currently only for external data storage). + */ + new_dset->layout.u.contig.addr = HADDR_UNDEF; /* Initialize to no address */ + + if ((ndims=H5S_get_simple_extent_dims(new_dset->space, NULL, max_dim))<0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize contiguous storage") + for (i=1; inew_dset->layout.unused.dim[i]) + HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "only the first dimension can be extendible") + if (new_dset->efl.nused>0) { + hsize_t max_points = H5S_get_npoints_max (new_dset->space); + hsize_t max_storage = H5O_efl_total_size (&new_dset->efl); + + if (H5S_UNLIMITED==max_points) { + if (H5O_EFL_UNLIMITED!=max_storage) + HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "unlimited data space but finite storage") + } else if (max_points * H5T_get_size (type) < max_points) { + HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "data space * type size overflowed") + } else if (max_points * H5T_get_size (type) > max_storage) { + HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "data space size exceeds external storage size") + } + } else if (ndims>0 && max_dim[0]>new_dset->layout.unused.dim[0]) { + HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, NULL, "extendible contiguous non-external dataset") } - } else if (ndims>0 && max_dim[0]>new_dset->layout.dim[0]) { - HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, NULL, "extendible contiguous non-external dataset"); - } - /* Compute the total size of a chunk */ - for (u=1, new_dset->layout.chunk_size=new_dset->layout.dim[0]; ulayout.ndims; u++) - new_dset->layout.chunk_size *= new_dset->layout.dim[u]; + /* Compute the total size of a chunk */ + tmp_size = H5S_GET_SIMPLE_EXTENT_NPOINTS(new_dset->space) * + H5T_get_size(new_dset->type); + H5_ASSIGN_OVERFLOW(new_dset->layout.u.contig.size,tmp_size,hssize_t,hsize_t); + } /* end case */ break; case H5D_CHUNKED: - /* - * Chunked storage allows any type of data space extension, so we - * don't even bother checking. - */ - if(chunk_ndims != H5S_GET_SIMPLE_EXTENT_NDIMS(new_dset->space)) - HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "dimensionality of chunks doesn't match the data space"); - if (new_dset->efl.nused>0) - HGOTO_ERROR (H5E_DATASET, H5E_BADVALUE, NULL, "external storage not supported with chunked layout"); - - /* - * The chunk size of a dimension with a fixed size cannot exceed - * the maximum dimension size - */ - if(H5P_get(dc_plist, H5D_CRT_CHUNK_SIZE_NAME, chunk_size) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve chunk size"); - - if (H5S_get_simple_extent_dims(new_dset->space, NULL, max_dim)<0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to query maximum dimensions"); - for (u=0; ulayout.ndims-1; u++) { - if(max_dim[u] != H5S_UNLIMITED && max_dim[u] < chunk_size[u]) - HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "chunk size must be <= maximum dimension size for fixed-sized dimensions"); - } + { + hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */ + + /* Set up layout information */ + if((ndims=H5S_GET_SIMPLE_EXTENT_NDIMS(new_dset->space))<0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "unable to get rank") + new_dset->layout.u.chunk.ndims = (unsigned)ndims + 1; + assert((unsigned)(new_dset->layout.u.chunk.ndims) <= NELMTS(new_dset->layout.u.chunk.dim)); - /* Set the dataset's chunk sizes from the property list's chunk sizes */ - for (u=0; ulayout.ndims-1; u++) - new_dset->layout.dim[u] = chunk_size[u]; + new_dset->layout.u.chunk.addr = HADDR_UNDEF; /* Initialize to no address */ + + /* + * Chunked storage allows any type of data space extension, so we + * don't even bother checking. + */ + if(chunk_ndims != ndims) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "dimensionality of chunks doesn't match the data space") + if (new_dset->efl.nused>0) + HGOTO_ERROR (H5E_DATASET, H5E_BADVALUE, NULL, "external storage not supported with chunked layout") - /* Compute the total size of a chunk */ - for (u=1, new_dset->layout.chunk_size=new_dset->layout.dim[0]; ulayout.ndims; u++) - new_dset->layout.chunk_size *= new_dset->layout.dim[u]; + /* + * The chunk size of a dimension with a fixed size cannot exceed + * the maximum dimension size + */ + if(H5P_get(dc_plist, H5D_CRT_CHUNK_SIZE_NAME, new_dset->layout.u.chunk.dim) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve chunk size") + new_dset->layout.u.chunk.dim[new_dset->layout.u.chunk.ndims-1] = H5T_get_size(new_dset->type); + + if (H5S_get_simple_extent_dims(new_dset->space, NULL, max_dim)<0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to query maximum dimensions") + for (u=0; ulayout.u.chunk.ndims-1; u++) + if(max_dim[u] != H5S_UNLIMITED && max_dim[u] < new_dset->layout.u.chunk.dim[u]) + HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "chunk size must be <= maximum dimension size for fixed-sized dimensions") + + /* Compute the total size of a chunk */ + for (u=1, new_dset->layout.u.chunk.size=new_dset->layout.u.chunk.dim[0]; ulayout.u.chunk.ndims; u++) + new_dset->layout.u.chunk.size *= new_dset->layout.u.chunk.dim[u]; + } /* end case */ break; case H5D_COMPACT: { hssize_t tmp_size; /* Temporary holder for raw data size */ - hsize_t comp_data_size; + hsize_t comp_data_size; /* * Compact dataset is stored in dataset object header message of * layout. */ - tmp_size = H5S_get_simple_extent_npoints(space) * + tmp_size = H5S_GET_SIMPLE_EXTENT_NPOINTS(space) * H5T_get_size(new_dset->type); - H5_ASSIGN_OVERFLOW(new_dset->layout.size,tmp_size,hssize_t,size_t); + H5_ASSIGN_OVERFLOW(new_dset->layout.u.compact.size,tmp_size,hssize_t,size_t); + /* Verify data size is smaller than maximum header message size * (64KB) minus other layout message fields. */ comp_data_size=H5O_MAX_SIZE-H5O_layout_meta_size(file, &(new_dset->layout)); - if(new_dset->layout.size > comp_data_size) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "compact dataset size is bigger than header message maximum size"); - if (H5S_get_simple_extent_dims(space, new_dset->layout.dim, NULL)<0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize dimension size of compact dataset storage"); - } - + if(new_dset->layout.u.compact.size > comp_data_size) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "compact dataset size is bigger than header message maximum size") + } /* end case */ break; default: @@ -2328,13 +2340,11 @@ static H5D_t * H5D_open_oid(H5G_entry_t *ent, hid_t dxpl_id) { H5D_t *dataset = NULL; /*new dataset struct */ - H5D_t *ret_value = NULL; /*return value */ H5O_fill_new_t fill = {NULL, 0, NULL, H5D_ALLOC_TIME_LATE, H5D_CRT_FILL_TIME_DEF, TRUE}; H5O_fill_t *fill_prop; /* Pointer to dataset's fill value area */ - H5O_pline_t pline; /* I/O pipeline information */ - H5D_layout_t layout; /* Dataset layout */ - int chunk_ndims; - H5P_genplist_t *plist; /* Property list */ + H5O_pline_t pline; /* I/O pipeline information */ + H5P_genplist_t *plist; /* Property list */ + H5D_t *ret_value = NULL; /*return value */ FUNC_ENTER_NOAPI(H5D_open_oid, NULL); @@ -2384,11 +2394,21 @@ H5D_open_oid(H5G_entry_t *ent, hid_t dxpl_id) */ if (NULL==H5O_read(&(dataset->ent), H5O_LAYOUT_ID, 0, &(dataset->layout), dxpl_id)) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to read data layout message"); + if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &dataset->layout.type) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set layout") switch (dataset->layout.type) { case H5D_CONTIGUOUS: - layout = H5D_CONTIGUOUS; - if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set layout"); + /* Compute the size of the contiguous storage for versions of the + * layout message less than version 3 because versions 1 & 2 would + * truncate the dimension sizes to 32-bits of information. - QAK 5/26/04 + */ + if(dataset->layout.version<3) { + hssize_t tmp_size; /* Temporary holder for raw data size */ + + tmp_size = H5S_GET_SIMPLE_EXTENT_NPOINTS(dataset->space) * + H5T_get_size(dataset->type); + H5_ASSIGN_OVERFLOW(dataset->layout.u.contig.size,tmp_size,hssize_t,hsize_t); + } /* end if */ break; case H5D_CHUNKED: @@ -2397,22 +2417,21 @@ H5D_open_oid(H5G_entry_t *ent, hid_t dxpl_id) * the chunk dimension because the chunk includes a dimension for the * individual bytes of the data type. */ - layout = H5D_CHUNKED; - chunk_ndims = dataset->layout.ndims - 1; - - if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set layout"); - if(H5P_set(plist, H5D_CRT_CHUNK_DIM_NAME, &chunk_ndims) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set chunk dimensions"); - if(H5P_set(plist, H5D_CRT_CHUNK_SIZE_NAME, dataset->layout.dim) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set chunk size"); + { + unsigned chunk_ndims; /* Dimensionality of chunk */ + + chunk_ndims = dataset->layout.u.chunk.ndims - 1; + + if(H5P_set(plist, H5D_CRT_CHUNK_DIM_NAME, &chunk_ndims) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set chunk dimensions") + if(H5P_set(plist, H5D_CRT_CHUNK_SIZE_NAME, dataset->layout.u.chunk.dim) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set chunk size") + } break; case H5D_COMPACT: - layout = H5D_COMPACT; - if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set layout"); break; + default: HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, NULL, "not implemented yet"); } /* end switch */ @@ -2479,20 +2498,23 @@ H5D_open_oid(H5G_entry_t *ent, hid_t dxpl_id) /* Get the external file list message, which might not exist. Space is * also undefined when space allocate time is H5D_ALLOC_TIME_LATE. */ - if( !H5F_addr_defined(dataset->layout.addr)) { + if((dataset->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->layout.u.contig.addr)) + || (dataset->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->layout.u.chunk.addr))) { HDmemset(&dataset->efl,0,sizeof(H5O_efl_t)); if(NULL != H5O_read(&(dataset->ent), H5O_EFL_ID, 0, &dataset->efl, dxpl_id)) if(H5P_set(plist, H5D_CRT_EXT_FILE_LIST_NAME, &dataset->efl) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set external file list"); } + /* * Make sure all storage is properly initialized. * This is important only for parallel I/O where the space must * be fully allocated before I/O can happen. */ if ((H5F_get_intent(dataset->ent.file) & H5F_ACC_RDWR) - && (dataset->layout.type!=H5D_COMPACT && dataset->layout.addr==HADDR_UNDEF) - && (IS_H5FD_MPIO(dataset->ent.file) || IS_H5FD_MPIPOSIX(dataset->ent.file) || IS_H5FD_FPHDF5(dataset->ent.file))) { + && ((dataset->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->layout.u.contig.addr)) + || (dataset->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->layout.u.chunk.addr))) + && IS_H5FD_MPI(dataset->ent.file)) { if (H5D_alloc_storage(dataset->ent.file, dxpl_id, dataset,H5D_ALLOC_OPEN, TRUE, FALSE)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize file storage"); } @@ -2564,10 +2586,10 @@ H5D_close(H5D_t *dataset) H5I_dec_ref(dataset->dcpl_id) < 0); /* Update header message of layout for compact dataset. */ - if(dataset->layout.type==H5D_COMPACT && dataset->layout.dirty) { + if(dataset->layout.type==H5D_COMPACT && dataset->layout.u.compact.dirty) { if(H5O_modify(&(dataset->ent), H5O_LAYOUT_ID, 0, 0, 1, &(dataset->layout), H5AC_dxpl_id)<0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update layout message"); - dataset->layout.dirty = FALSE; + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update layout message") + dataset->layout.u.compact.dirty = FALSE; } /* end if */ /* Remove the dataset from the list of opened objects in the file */ @@ -2587,7 +2609,7 @@ H5D_close(H5D_t *dataset) dataset->ent.file = NULL; /* Free the buffer for the raw data for compact datasets */ if(dataset->layout.type==H5D_COMPACT) - dataset->layout.buf=H5MM_xfree(dataset->layout.buf); + dataset->layout.u.compact.buf=H5MM_xfree(dataset->layout.u.compact.buf); H5FL_FREE(H5D_t,dataset); if (free_failed) @@ -2785,7 +2807,7 @@ H5D_alloc_storage (H5F_t *f, hid_t dxpl_id, H5D_t *dset/*in,out*/, H5D_time_allo switch (layout->type) { case H5D_CONTIGUOUS: - if(layout->addr==HADDR_UNDEF) { + if(layout->u.contig.addr==HADDR_UNDEF) { /* Reserve space in the file for the entire array */ if (H5F_contig_create (f, dxpl_id, layout/*out*/)<0) HGOTO_ERROR (H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize contiguous storage"); @@ -2799,7 +2821,7 @@ H5D_alloc_storage (H5F_t *f, hid_t dxpl_id, H5D_t *dset/*in,out*/, H5D_time_allo break; case H5D_CHUNKED: - if(layout->addr==HADDR_UNDEF) { + if(layout->u.chunk.addr==HADDR_UNDEF) { /* Create the root of the B-tree that describes chunked storage */ if (H5F_istore_create (f, dxpl_id, layout/*out*/)<0) HGOTO_ERROR (H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize chunked storage"); @@ -2822,12 +2844,12 @@ H5D_alloc_storage (H5F_t *f, hid_t dxpl_id, H5D_t *dset/*in,out*/, H5D_time_allo case H5D_COMPACT: /* Check if space is already allocated */ - if(layout->buf==NULL) { + if(layout->u.compact.buf==NULL) { /* Reserve space in layout header message for the entire array. */ - assert(layout->size>0); - if (NULL==(layout->buf=H5MM_malloc(layout->size))) - HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate memory for compact dataset"); - layout->dirty = TRUE; + assert(layout->u.compact.size>0); + if (NULL==(layout->u.compact.buf=H5MM_malloc(layout->u.compact.size))) + HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate memory for compact dataset") + layout->u.compact.dirty = TRUE; /* Indicate that we set the storage addr */ addr_set=1; @@ -2926,7 +2948,7 @@ H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hid_t dxpl_id) space=dset->space; /* Get the number of elements in the dataset's dataspace */ - snpoints = H5S_get_simple_extent_npoints(space); + snpoints = H5S_GET_SIMPLE_EXTENT_NPOINTS(space); assert(snpoints>=0); H5_ASSIGN_OVERFLOW(npoints,snpoints,hssize_t,size_t); @@ -2937,9 +2959,9 @@ H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hid_t dxpl_id) /* If the fill value is defined, initialize the data buffer with it */ if(dset->fill.buf) /* Initialize the cached data buffer with the fill value */ - H5V_array_fill(dset->layout.buf, dset->fill.buf, dset->fill.size, npoints); + H5V_array_fill(dset->layout.u.compact.buf, dset->fill.buf, dset->fill.size, npoints); else /* If the fill value is default, zero set data buf. */ - HDmemset(dset->layout.buf, 0, dset->layout.size); + HDmemset(dset->layout.u.compact.buf, 0, dset->layout.u.compact.size); } /* end if */ break; @@ -2969,13 +2991,19 @@ H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hid_t dxpl_id) /* We only handle simple data spaces so far */ if ((ndims=H5S_get_simple_extent_dims(space, dim, NULL))<0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple data space info"); - dim[ndims] = dset->layout.dim[ndims]; + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple data space info") + dim[ndims] = dset->layout.u.chunk.dim[ndims]; if (H5F_istore_allocate(dset->ent.file, dxpl_id, &(dset->layout), dim, plist, full_overwrite)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset"); } /* end if */ break; + + default: + assert ("not implemented yet" && 0); +#ifdef NDEBUG + HGOTO_ERROR (H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout") +#endif /* NDEBUG */ } /* end switch */ done: @@ -3044,32 +3072,29 @@ done: static hsize_t H5D_get_storage_size(H5D_t *dset, hid_t dxpl_id) { - unsigned u; /* Index variable */ hsize_t ret_value; FUNC_ENTER_NOAPI(H5D_get_storage_size, 0); switch(dset->layout.type) { case H5D_CHUNKED: - if(dset->layout.addr == HADDR_UNDEF) + if(dset->layout.u.chunk.addr == HADDR_UNDEF) ret_value=0; else - ret_value = H5F_istore_allocated(dset->ent.file, dxpl_id, dset->layout.ndims, - dset->layout.addr); + ret_value = H5F_istore_allocated(dset->ent.file, dxpl_id, dset->layout.u.chunk.ndims, + dset->layout.u.chunk.addr); break; case H5D_CONTIGUOUS: /* Datasets which are not allocated yet are using no space on disk */ - if(dset->layout.addr == HADDR_UNDEF) + if(dset->layout.u.contig.addr == HADDR_UNDEF) ret_value=0; - else { - for (u=0, ret_value=1; ulayout.ndims; u++) - ret_value *= dset->layout.dim[u]; - } /* end else */ + else + ret_value=dset->layout.u.contig.size; break; case H5D_COMPACT: - ret_value = dset->layout.size; + ret_value = dset->layout.u.compact.size; break; default: @@ -3138,7 +3163,7 @@ done: static haddr_t H5D_get_offset(H5D_t *dset) { - haddr_t ret_value; + haddr_t ret_value=HADDR_UNDEF; haddr_t base_addr; H5F_t *f; @@ -3149,7 +3174,6 @@ H5D_get_offset(H5D_t *dset) switch(dset->layout.type) { case H5D_CHUNKED: case H5D_COMPACT: - ret_value = HADDR_UNDEF; break; case H5D_CONTIGUOUS: @@ -3161,13 +3185,16 @@ H5D_get_offset(H5D_t *dset) /* If there's user block in file, returns the absolute dataset offset * from the beginning of file. */ if(base_addr!=HADDR_UNDEF) - ret_value = dset->layout.addr + base_addr; + ret_value = dset->layout.u.contig.addr + base_addr; else - ret_value = dset->layout.addr; + ret_value = dset->layout.u.contig.addr; break; default: - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, HADDR_UNDEF, "unknown dataset layout type"); + assert ("not implemented yet" && 0); +#ifdef NDEBUG + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, HADDR_UNDEF, "unknown dataset layout type") +#endif /* NDEBUG */ } done: @@ -3516,7 +3543,7 @@ done: * Purpose: Modifies the dimensions of a dataset, based on H5Dextend. * Can change to a lower dimension. * - * Return: Success: 0, Failure: -1 + * Return: Success: SUCCEED, Failure: FAIL * * Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu * Robb Matzke @@ -3559,7 +3586,7 @@ done: * Purpose: Based in H5D_extend, allows change to a lower dimension, * calls H5S_set_extent and H5F_istore_prune_by_extent instead * - * Return: Success: 0, Failure: -1 + * Return: Success: SUCCEED, Failure: FAIL * * Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu * Robb Matzke @@ -3710,11 +3737,12 @@ H5D_flush(H5F_t *f, hid_t dxpl_id) HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to get dataset ID list"); for(j=0; jlayout.type==H5D_COMPACT && dataset->layout.dirty) + HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to get dataset object") + if(dataset->layout.type==H5D_COMPACT && dataset->layout.u.compact.dirty) { if(H5O_modify(&(dataset->ent), H5O_LAYOUT_ID, 0, 0, 1, &(dataset->layout), dxpl_id)<0) - HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to update layout message"); - dataset->layout.dirty = FALSE; + HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to update layout message") + dataset->layout.u.compact.dirty = FALSE; + } /* end if */ } } /* end if */ @@ -3757,9 +3785,9 @@ H5Ddebug(hid_t dset_id, unsigned UNUSED flags) /* Print B-tree information */ if (H5D_CHUNKED==dset->layout.type) { - H5F_istore_dump_btree(dset->ent.file, H5AC_dxpl_id, stdout, dset->layout.ndims, dset->layout.addr); + (void)H5F_istore_dump_btree(dset->ent.file, H5AC_dxpl_id, stdout, dset->layout.u.chunk.ndims, dset->layout.u.chunk.addr); } else if (H5D_CONTIGUOUS==dset->layout.type) { - HDfprintf(stdout, " %-10s %a\n", "Address:", dset->layout.addr); + HDfprintf(stdout, " %-10s %a\n", "Address:", dset->layout.u.contig.addr); } done: diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c index 96e9955..4f5b75e 100644 --- a/src/H5Dcompact.c +++ b/src/H5Dcompact.c @@ -70,7 +70,7 @@ H5F_compact_readvv(H5F_t UNUSED *f, const H5O_layout_t *layout, FUNC_ENTER_NOAPI(H5F_compact_readvv, FAIL); /* Use the vectorized memory copy routine to do actual work */ - if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,layout->buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0) + if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,layout->u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed"); done: @@ -112,10 +112,10 @@ H5F_compact_writevv(H5F_t UNUSED *f, H5O_layout_t *layout, FUNC_ENTER_NOAPI(H5F_compact_writevv, FAIL); /* Use the vectorized memory copy routine to do actual work */ - if((ret_value=H5V_memcpyvv(layout->buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0) + if((ret_value=H5V_memcpyvv(layout->u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed"); - layout->dirty = TRUE; + layout->u.compact.dirty = TRUE; done: FUNC_LEAVE_NOAPI(ret_value); diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c index 9b1ad40..35b9700 100644 --- a/src/H5Dcontig.c +++ b/src/H5Dcontig.c @@ -31,10 +31,10 @@ #include "H5private.h" /* Generic Functions */ #include "H5Dprivate.h" /* Dataset functions */ #include "H5Eprivate.h" /* Error handling */ -#include "H5Fpkg.h" -#include "H5FDprivate.h" /*file driver */ -#include "H5FLprivate.h" /*Free Lists */ -#include "H5MFprivate.h" /*file memory management */ +#include "H5Fpkg.h" /* Files */ +#include "H5FDprivate.h" /* File drivers */ +#include "H5FLprivate.h" /* Free Lists */ +#include "H5MFprivate.h" /* File memory management */ #include "H5Oprivate.h" /* Object headers */ #include "H5Pprivate.h" /* Property lists */ #include "H5Sprivate.h" /* Dataspace functions */ @@ -46,8 +46,7 @@ #include "H5FDmpiposix.h" /* Private prototypes */ -static herr_t -H5F_contig_write(H5F_t *f, hsize_t max_data, haddr_t addr, +static herr_t H5F_contig_write(H5F_t *f, hsize_t max_data, haddr_t addr, const size_t size, hid_t dxpl_id, const void *buf); /* Interface initialization */ @@ -81,8 +80,6 @@ H5FL_BLK_DEFINE_STATIC(zero_fill); herr_t H5F_contig_create(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout) { - hsize_t size; /* Size of contiguous block of data */ - unsigned u; /* Local index variable */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5F_contig_create, FAIL); @@ -91,14 +88,8 @@ H5F_contig_create(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout) assert(f); assert(layout); - /* Compute size */ - size=layout->dim[0]; - for (u = 1; u < layout->ndims; u++) - size *= layout->dim[u]; - assert (size>0); - /* Allocate space for the contiguous data */ - if (HADDR_UNDEF==(layout->addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, size))) + if (HADDR_UNDEF==(layout->u.contig.addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, layout->u.contig.size))) HGOTO_ERROR (H5E_IO, H5E_NOSPACE, FAIL, "unable to reserve file space"); done: @@ -151,8 +142,8 @@ H5F_contig_fill(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout, assert(f); assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); assert(layout && H5D_CONTIGUOUS==layout->type); - assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS); - assert(H5F_addr_defined(layout->addr)); + assert(H5F_addr_defined(layout->u.contig.addr)); + assert(layout->u.contig.size>0); assert(space); assert(elmt_size>0); @@ -199,7 +190,7 @@ H5F_contig_fill(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout, #endif /* H5_HAVE_PARALLEL */ /* Get the number of elements in the dataset's dataspace */ - snpoints = H5S_get_simple_extent_npoints(space); + snpoints = H5S_GET_SIMPLE_EXTENT_NPOINTS(space); assert(snpoints>=0); H5_ASSIGN_OVERFLOW(npoints,snpoints,hssize_t,size_t); @@ -246,7 +237,7 @@ H5F_contig_fill(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout, } /* end else */ /* Start at the beginning of the dataset */ - addr = layout->addr; + addr = layout->u.contig.addr; /* Loop through writing the fill value to the dataset */ while (npoints>0) { @@ -322,8 +313,6 @@ done: herr_t H5F_contig_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout) { - hsize_t size; /* Size of contiguous block of data */ - unsigned u; /* Local index variable */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5F_contig_delete, FAIL); @@ -332,17 +321,12 @@ H5F_contig_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout) assert(f); assert(layout); - /* Compute size */ - size=layout->dim[0]; - for (u = 1; u < layout->ndims; u++) - size *= layout->dim[u]; - /* Check for overlap with the sieve buffer and reset it */ - if (H5F_sieve_overlap_clear(f, dxpl_id, layout->addr, size)<0) + if (H5F_sieve_overlap_clear(f, dxpl_id, layout->u.contig.addr, layout->u.contig.size)<0) HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to clear sieve buffer"); /* Free the file space for the chunk */ - if (H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, layout->addr, size)<0) + if (H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, layout->u.contig.addr, layout->u.contig.size)<0) HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to free object header"); done: diff --git a/src/H5Dio.c b/src/H5Dio.c index d39112d..d8a8f3c 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -43,7 +43,7 @@ /* Structure holding information about a chunk's selection for mapping */ typedef struct H5D_chunk_info_t { hsize_t index; /* "Index" of chunk in dataset (must be first for TBBT routines) */ - hsize_t chunk_points; /* Number of elements selected in chunk */ + size_t chunk_points; /* Number of elements selected in chunk */ H5S_t *fspace; /* Dataspace describing chunk & selection in it */ hssize_t coords[H5O_LAYOUT_NDIMS]; /* Coordinates of chunk in file dataset's dataspace */ H5S_t *mspace; /* Dataspace describing selection in memory corresponding to this chunk */ @@ -64,6 +64,7 @@ typedef struct fm_map { H5S_sel_iter_t mem_iter; /* Iterator for elements in memory selection */ unsigned m_ndims; /* Number of dimensions for memory dataspace */ hsize_t chunks[H5O_LAYOUT_NDIMS]; /* Number of chunks in each dimension */ + hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in each dimension */ hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */ H5O_layout_t *layout; /* Dataset layout information*/ H5S_sel_type msel_type; /* Selection type in memory */ @@ -690,8 +691,9 @@ H5D_read(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, * fill time is NEVER, there is no way to tell whether part of data * has been overwritten. So just proceed in reading. */ - if(nelmts > 0 && dataset->efl.nused==0 && dataset->layout.type!=H5D_COMPACT - && dataset->layout.addr==HADDR_UNDEF) { + if(nelmts > 0 && dataset->efl.nused==0 && + ((dataset->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->layout.u.contig.addr)) + || (dataset->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->layout.u.chunk.addr)))) { H5D_fill_value_t fill_status; /* Whether/How the fill value is defined */ /* Retrieve dataset's fill-value properties */ @@ -908,14 +910,15 @@ H5D_write(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, /* */ /* Allocate data space and initialize it if it hasn't been. */ - if(nelmts > 0 && dataset->efl.nused==0 && dataset->layout.type!=H5D_COMPACT - && dataset->layout.addr==HADDR_UNDEF) { + if(nelmts > 0 && dataset->efl.nused==0 && + ((dataset->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->layout.u.contig.addr)) + || (dataset->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->layout.u.chunk.addr)))) { hssize_t file_nelmts; /* Number of elements in file dataset's dataspace */ hbool_t full_overwrite; /* Whether we are over-writing all the elements */ /* Get the number of elements in file dataset's dataspace */ - if((file_nelmts=H5S_get_simple_extent_npoints(file_space))<0) - HGOTO_ERROR (H5E_DATASET, H5E_BADVALUE, FAIL, "can't retrieve number of elements in file dataset"); + if((file_nelmts=H5S_GET_SIMPLE_EXTENT_NPOINTS(file_space))<0) + HGOTO_ERROR (H5E_DATASET, H5E_BADVALUE, FAIL, "can't retrieve number of elements in file dataset") /* Always allow fill values to be written if the dataset has a VL datatype */ if(H5T_detect_class(dataset->type, H5T_VLEN)) @@ -1073,7 +1076,9 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, H5_timer_begin(&timer); #endif /* Sanity check dataset, then read it */ - assert(dataset->layout.addr!=HADDR_UNDEF || dataset->efl.nused>0 || + assert(((dataset->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->layout.u.contig.addr)) + || (dataset->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->layout.u.chunk.addr))) + || dataset->efl.nused>0 || dataset->layout.type==H5D_COMPACT); H5_CHECK_OVERFLOW(nelmts,hsize_t,size_t); status = (sconv->read)(dataset->ent.file, &(dataset->layout), @@ -1169,7 +1174,9 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, H5_timer_begin(&timer); #endif /* Sanity check that space is allocated, then read data from it */ - assert(dataset->layout.addr!=HADDR_UNDEF || dataset->efl.nused>0 || + assert(((dataset->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->layout.u.contig.addr)) + || (dataset->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->layout.u.chunk.addr))) + || dataset->efl.nused>0 || dataset->layout.type==H5D_COMPACT); n = H5S_select_fgath(dataset->ent.file, &(dataset->layout), &dataset->dcpl_cache, (H5D_storage_t *)&(dataset->efl), file_space, @@ -1537,8 +1544,9 @@ UNUSED H5_timer_begin(&timer); #endif /* Sanity check dataset, then read it */ - assert(dataset->layout.addr!=HADDR_UNDEF || dataset->efl.nused>0 || - dataset->layout.type==H5D_COMPACT); + assert(((dataset->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->layout.u.contig.addr)) + || (dataset->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->layout.u.chunk.addr))) + || dataset->efl.nused>0 || dataset->layout.type==H5D_COMPACT); /* Get first node in chunk tree */ chunk_node=H5TB_first(fm.fsel->root); @@ -1666,8 +1674,9 @@ UNUSED H5_timer_begin(&timer); #endif /* Sanity check that space is allocated, then read data from it */ - assert(dataset->layout.addr!=HADDR_UNDEF || dataset->efl.nused>0 || - dataset->layout.type==H5D_COMPACT); + assert(((dataset->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->layout.u.contig.addr)) + || (dataset->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->layout.u.chunk.addr))) + || dataset->efl.nused>0 || dataset->layout.type==H5D_COMPACT); n = H5S_select_fgath(dataset->ent.file, &(dataset->layout), &dataset->dcpl_cache, &store, chunk_info->fspace, &file_iter, smine_nelmts, dxpl_cache, dxpl_id, tconv_buf/*out*/); @@ -2262,16 +2271,16 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp hsize_t dims[H5O_LAYOUT_NDIMS]; /* Temporary dimension information */ /* Set up "equivalent" n-dimensional dataspace with size '1' in each dimension */ - for(u=0; ulayout.ndims-1; u++) + for(u=0; ulayout.u.chunk.ndims-1; u++) dims[u]=1; - if((equiv_mspace = H5S_create_simple(dataset->layout.ndims-1,dims,NULL))==NULL) + if((equiv_mspace = H5S_create_simple(dataset->layout.u.chunk.ndims-1,dims,NULL))==NULL) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCREATE, FAIL, "unable to create equivalent dataspace for scalar space") /* Indicate that this space needs to be released */ equiv_mspace_init=1; /* Set the number of dimensions for the memory dataspace */ - fm->m_ndims=dataset->layout.ndims-1; + fm->m_ndims=dataset->layout.u.chunk.ndims-1; } /* end else */ else { equiv_mspace=(H5S_t *)mem_space; /* Casting away 'const' OK... */ @@ -2281,7 +2290,7 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp } /* end else */ /* Get dim number and dimensionality for each dataspace */ - fm->f_ndims=f_ndims=dataset->layout.ndims-1; + fm->f_ndims=f_ndims=dataset->layout.u.chunk.ndims-1; if(H5S_get_simple_extent_dims(file_space, fm->f_dims, NULL)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality"); @@ -2290,8 +2299,11 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp last_nchunks=0; nchunks=1; for(u=0; uchunk_dim[u]=fm->layout->u.chunk.dim[u]; + /* Round up to the next integer # of chunks, to accomodate partial chunks */ - fm->chunks[u] = ((fm->f_dims[u]+dataset->layout.dim[u])-1) / dataset->layout.dim[u]; + fm->chunks[u] = ((fm->f_dims[u]+dataset->layout.u.chunk.dim[u])-1) / dataset->layout.u.chunk.dim[u]; /* Track total number of chunks in dataset */ nchunks *= fm->chunks[u]; @@ -2418,9 +2430,9 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp /* Spaces aren't the same shape, iterate over the memory selection directly */ if(H5S_select_iterate(&bogus, f_tid, file_space, H5D_chunk_mem_cb, fm)<0) - HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create memory chunk selections"); + HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create memory chunk selections") - /* Clean memory chunks' hyperslab span "scratch" information */ + /* Clean up hyperslab stuff, if necessary */ if(fm->msel_type!=H5S_SEL_POINTS) { /* Clean memory chunks' hyperslab span "scratch" information */ curr_node=H5TB_first(fm->fsel->root); @@ -2591,7 +2603,6 @@ H5D_create_chunk_file_map_hyper(fm_map *fm) hssize_t sel_end[H5O_LAYOUT_NDIMS]; /* Offset of high bound of file selection */ hssize_t start_coords[H5O_LAYOUT_NDIMS]; /* Starting coordinates of selection */ hssize_t coords[H5O_LAYOUT_NDIMS]; /* Current coordinates of chunk */ - hsize_t count[H5O_LAYOUT_NDIMS]; /* Hyperslab count information */ hssize_t end[H5O_LAYOUT_NDIMS]; /* Current coordinates of chunk */ hsize_t chunk_index; /* Index of chunk */ int curr_dim; /* Current dimension to increment */ @@ -2613,21 +2624,21 @@ H5D_create_chunk_file_map_hyper(fm_map *fm) /* Set initial chunk location & hyperslab size */ for(u=0; uf_ndims; u++) { - H5_CHECK_OVERFLOW(fm->layout->dim[u],hsize_t,hssize_t); - start_coords[u]=(sel_start[u]/(hssize_t)fm->layout->dim[u])*(hssize_t)fm->layout->dim[u]; + H5_CHECK_OVERFLOW(fm->layout->u.chunk.dim[u],hsize_t,hssize_t); + start_coords[u]=(sel_start[u]/(hssize_t)fm->layout->u.chunk.dim[u])*(hssize_t)fm->layout->u.chunk.dim[u]; coords[u]=start_coords[u]; - count[u]=fm->layout->dim[u]; - end[u]=(coords[u]+count[u])-1; + end[u]=(coords[u]+fm->chunk_dim[u])-1; } /* end for */ /* Calculate the index of this chunk */ - if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->dim,fm->down_chunks,&chunk_index)<0) - HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index"); + if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0) + HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") /* Iterate through each chunk in the dataset */ while(sel_points) { /* Check for intersection of temporary chunk and file selection */ - if(H5S_hyper_intersect_block(fm->file_space,coords,end)==TRUE) { + /* (Casting away const OK - QAK) */ + if(H5S_hyper_intersect_block((H5S_t *)fm->file_space,coords,end)==TRUE) { H5S_t *tmp_fchunk; /* Temporary file dataspace */ H5D_chunk_info_t *new_chunk_info; /* chunk information to insert into tree */ hssize_t schunk_points; /* Number of elements in chunk selection */ @@ -2645,15 +2656,15 @@ H5D_create_chunk_file_map_hyper(fm_map *fm) HGOTO_ERROR (H5E_DATASET, H5E_BADSELECT, FAIL, "unable to normalize dataspace by offset") /* "AND" temporary chunk and current chunk */ - if(H5S_select_hyperslab(tmp_fchunk,H5S_SELECT_AND,coords,NULL,count,NULL)<0) { - H5S_close(tmp_fchunk); - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't create chunk selection"); + if(H5S_select_hyperslab(tmp_fchunk,H5S_SELECT_AND,coords,NULL,fm->chunk_dim,NULL)<0) { + (void)H5S_close(tmp_fchunk); + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't create chunk selection") } /* end if */ /* Resize chunk's dataspace dimensions to size of chunk */ - if(H5S_set_extent_real(tmp_fchunk,count)<0) { - H5S_close(tmp_fchunk); - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't adjust chunk dimensions"); + if(H5S_set_extent_real(tmp_fchunk,fm->chunk_dim)<0) { + (void)H5S_close(tmp_fchunk); + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't adjust chunk dimensions") } /* end if */ /* Move selection back to have correct offset in chunk */ @@ -2695,7 +2706,7 @@ H5D_create_chunk_file_map_hyper(fm_map *fm) /* Get number of elements selected in chunk */ if((schunk_points=H5S_GET_SELECT_NPOINTS(tmp_fchunk))<0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection # of elements") - H5_ASSIGN_OVERFLOW(new_chunk_info->chunk_points,schunk_points,hssize_t,hsize_t); + H5_ASSIGN_OVERFLOW(new_chunk_info->chunk_points,schunk_points,hssize_t,size_t); /* Decrement # of points left in file selection */ sel_points-=schunk_points; @@ -2713,9 +2724,9 @@ H5D_create_chunk_file_map_hyper(fm_map *fm) curr_dim=(int)fm->f_ndims-1; /* Increment chunk location in fastest changing dimension */ - H5_CHECK_OVERFLOW(count[curr_dim],hsize_t,hssize_t); - coords[curr_dim]+=(hssize_t)count[curr_dim]; - end[curr_dim]+=(hssize_t)count[curr_dim]; + H5_CHECK_OVERFLOW(fm->chunk_dim[curr_dim],hsize_t,hssize_t); + coords[curr_dim]+=(hssize_t)fm->chunk_dim[curr_dim]; + end[curr_dim]+=(hssize_t)fm->chunk_dim[curr_dim]; /* Bring chunk location back into bounds, if necessary */ if(coords[curr_dim]>sel_end[curr_dim]) { @@ -2727,13 +2738,13 @@ H5D_create_chunk_file_map_hyper(fm_map *fm) curr_dim--; /* Increment chunk location in current dimension */ - coords[curr_dim]+=(hssize_t)count[curr_dim]; - end[curr_dim]=(coords[curr_dim]+(hssize_t)count[curr_dim])-1; + coords[curr_dim]+=(hssize_t)fm->chunk_dim[curr_dim]; + end[curr_dim]=(coords[curr_dim]+(hssize_t)fm->chunk_dim[curr_dim])-1; } while(coords[curr_dim]>sel_end[curr_dim]); /* Re-Calculate the index of this chunk */ - if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->dim,fm->down_chunks,&chunk_index)<0) - HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index"); + if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0) + HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") } /* end if */ } /* end while */ @@ -2809,7 +2820,8 @@ H5D_create_chunk_mem_map_hyper(const fm_map *fm) } /* end if */ else { /* Just point at the memory dataspace & selection */ - chunk_info->mspace=fm->mem_space; + /* (Casting away const OK -QAK) */ + chunk_info->mspace=(H5S_t *)fm->mem_space; /* Indicate that the chunk's memory space is shared */ chunk_info->mspace_shared=1; @@ -2869,10 +2881,8 @@ H5D_create_chunk_mem_map_hyper(const fm_map *fm) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy selection") /* Compensate for the chunk offset */ - for(u=0; uf_ndims; u++) { - H5_CHECK_OVERFLOW(fm->layout->dim[u],hsize_t,hssize_t); + for(u=0; uf_ndims; u++) chunk_adjust[u]=adjust[u]-chunk_info->coords[u]; /*lint !e771 The adjust array will always be initialized */ - } /* end for */ #ifdef QAK { int mpi_rank; @@ -2954,7 +2964,7 @@ H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, hsize_t ndims, hssize FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_file_cb) /* Calculate the index of this chunk */ - if(H5V_chunk_index((unsigned)ndims,coords,fm->layout->dim,fm->down_chunks,&chunk_index)<0) + if(H5V_chunk_index((unsigned)ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") /* Find correct chunk in file & memory TBBTs */ @@ -2985,7 +2995,7 @@ H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, hsize_t ndims, hssize new_chunk_info->index=chunk_index; /* Create a dataspace for the chunk */ - if((fspace = H5S_create_simple(fm->f_ndims,fm->layout->dim,NULL))==NULL) { + if((fspace = H5S_create_simple(fm->f_ndims,fm->chunk_dim,NULL))==NULL) { H5FL_FREE(H5D_chunk_info_t,new_chunk_info); HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCREATE, FAIL, "unable to create dataspace for chunk") } /* end if */ @@ -3009,8 +3019,8 @@ H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, hsize_t ndims, hssize /* Compute the chunk's coordinates */ for(u=0; uf_ndims; u++) { - H5_CHECK_OVERFLOW(fm->layout->dim[u],hsize_t,hssize_t); - new_chunk_info->coords[u]=(coords[u]/(hssize_t)fm->layout->dim[u])*(hssize_t)fm->layout->dim[u]; + H5_CHECK_OVERFLOW(fm->layout->u.chunk.dim[u],hsize_t,hssize_t); + new_chunk_info->coords[u]=(coords[u]/(hssize_t)fm->layout->u.chunk.dim[u])*(hssize_t)fm->layout->u.chunk.dim[u]; } /* end for */ new_chunk_info->coords[fm->f_ndims]=0; @@ -3034,7 +3044,7 @@ H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, hsize_t ndims, hssize /* Get the coordinates of the element in the chunk */ for(u=0; uf_ndims; u++) - coords_in_chunk[u]=coords[u]%fm->layout->dim[u]; + coords_in_chunk[u]=coords[u]%fm->layout->u.chunk.dim[u]; /* Add point to file selection for chunk */ if(H5S_select_elements(chunk_info->fspace,H5S_SELECT_APPEND,1,(const hssize_t **)coords_in_chunk)<0) @@ -3077,8 +3087,8 @@ H5D_chunk_mem_cb(void UNUSED *elem, hid_t UNUSED type_id, hsize_t ndims, hssize_ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_mem_cb); /* Calculate the index of this chunk */ - if(H5V_chunk_index((unsigned)ndims,coords,fm->layout->dim,fm->down_chunks,&chunk_index)<0) - HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index"); + if(H5V_chunk_index((unsigned)ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0) + HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") /* Find correct chunk in file & memory TBBTs */ if(chunk_index==fm->last_index) { diff --git a/src/H5Distore.c b/src/H5Distore.c index 43045b1..2775548 100644 --- a/src/H5Distore.c +++ b/src/H5Distore.c @@ -252,11 +252,11 @@ H5F_istore_sizeof_rkey(H5F_t UNUSED *f, const void *_udata) FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5F_istore_sizeof_rkey); assert(udata); - assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS); + assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims <= H5O_LAYOUT_NDIMS); nbytes = 4 + /*storage size */ 4 + /*filter mask */ - udata->mesg.ndims*8; /*dimension indices */ + udata->mesg.u.chunk.ndims*8; /*dimension indices */ FUNC_LEAVE_NOAPI(nbytes); } @@ -379,7 +379,7 @@ H5F_istore_debug_key (FILE *stream, H5F_t UNUSED *f, hid_t UNUSED dxpl_id, int i "Filter mask:", key->filter_mask); HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Logical offset:"); - for (u=0; umesg.ndims; u++) + for (u=0; umesg.u.chunk.ndims; u++) HDfprintf (stream, "%s%Hd", u?", ":"", key->offset[u]); HDfputs ("}\n", stream); @@ -423,10 +423,10 @@ H5F_istore_cmp2(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, void *_uda assert(lt_key); assert(rt_key); assert(udata); - assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS); + assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims <= H5O_LAYOUT_NDIMS); /* Compare the offsets but ignore the other fields */ - ret_value = H5V_vector_cmp_s(udata->mesg.ndims, lt_key->offset, rt_key->offset); + ret_value = H5V_vector_cmp_s(udata->mesg.u.chunk.ndims, lt_key->offset, rt_key->offset); done: FUNC_LEAVE_NOAPI(ret_value); @@ -476,12 +476,12 @@ H5F_istore_cmp3(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, void *_uda assert(lt_key); assert(rt_key); assert(udata); - assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS); + assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims <= H5O_LAYOUT_NDIMS); - if (H5V_vector_lt_s(udata->mesg.ndims, udata->key.offset, + if (H5V_vector_lt_s(udata->mesg.u.chunk.ndims, udata->key.offset, lt_key->offset)) { ret_value = -1; - } else if (H5V_vector_ge_s(udata->mesg.ndims, udata->key.offset, + } else if (H5V_vector_ge_s(udata->mesg.u.chunk.ndims, udata->key.offset, rt_key->offset)) { ret_value = 1; } @@ -529,7 +529,7 @@ H5F_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op, assert(lt_key); assert(rt_key); assert(udata); - assert(udata->mesg.ndims > 0 && udata->mesg.ndims < H5O_LAYOUT_NDIMS); + assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims < H5O_LAYOUT_NDIMS); assert(addr_p); /* Allocate new storage */ @@ -545,7 +545,7 @@ H5F_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op, */ lt_key->nbytes = udata->key.nbytes; lt_key->filter_mask = udata->key.filter_mask; - for (u=0; umesg.ndims; u++) + for (u=0; umesg.u.chunk.ndims; u++) lt_key->offset[u] = udata->key.offset[u]; /* @@ -555,12 +555,11 @@ H5F_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op, if (H5B_INS_LEFT != op) { rt_key->nbytes = 0; rt_key->filter_mask = 0; - for (u=0; umesg.ndims; u++) { - assert (udata->mesg.dim[u] < HSSIZET_MAX); - assert (udata->key.offset[u]+(hssize_t)(udata->mesg.dim[u]) > + for (u=0; umesg.u.chunk.ndims; u++) { + assert (udata->key.offset[u]+(hssize_t)(udata->mesg.u.chunk.dim[u]) > udata->key.offset[u]); rt_key->offset[u] = udata->key.offset[u] + - (hssize_t)(udata->mesg.dim[u]); + (hssize_t)(udata->mesg.u.chunk.dim[u]); } } @@ -614,8 +613,8 @@ H5F_istore_found(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr, const void assert(lt_key); /* Is this *really* the requested chunk? */ - for (u=0; umesg.ndims; u++) { - if (udata->key.offset[u] >= lt_key->offset[u]+(hssize_t)(udata->mesg.dim[u])) + for (u=0; umesg.u.chunk.ndims; u++) { + if (udata->key.offset[u] >= lt_key->offset[u]+(hssize_t)(udata->mesg.u.chunk.dim[u])) HGOTO_DONE(FAIL); } @@ -624,7 +623,7 @@ H5F_istore_found(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr, const void udata->key.nbytes = lt_key->nbytes; udata->key.filter_mask = lt_key->filter_mask; assert (lt_key->nbytes>0); - for (u = 0; u < udata->mesg.ndims; u++) + for (u = 0; u < udata->mesg.u.chunk.ndims; u++) udata->key.offset[u] = lt_key->offset[u]; done: @@ -700,7 +699,7 @@ H5F_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key, assert("HDF5 INTERNAL ERROR -- see rpm" && 0); HGOTO_ERROR(H5E_STORAGE, H5E_UNSUPPORTED, H5B_INS_ERROR, "internal error"); - } else if (H5V_vector_eq_s (udata->mesg.ndims, + } else if (H5V_vector_eq_s (udata->mesg.u.chunk.ndims, udata->key.offset, lt_key->offset) && lt_key->nbytes>0) { /* @@ -739,20 +738,20 @@ H5F_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key, ret_value = H5B_INS_NOOP; } - } else if (H5V_hyper_disjointp(udata->mesg.ndims, - lt_key->offset, udata->mesg.dim, - udata->key.offset, udata->mesg.dim)) { - assert(H5V_hyper_disjointp(udata->mesg.ndims, - rt_key->offset, udata->mesg.dim, - udata->key.offset, udata->mesg.dim)); + } else if (H5V_hyper_disjointp(udata->mesg.u.chunk.ndims, + lt_key->offset, udata->mesg.u.chunk.dim, + udata->key.offset, udata->mesg.u.chunk.dim)) { + assert(H5V_hyper_disjointp(udata->mesg.u.chunk.ndims, + rt_key->offset, udata->mesg.u.chunk.dim, + udata->key.offset, udata->mesg.u.chunk.dim)); /* * Split this node, inserting the new new node to the right of the * current node. The MD_KEY is where the split occurs. */ md_key->nbytes = udata->key.nbytes; md_key->filter_mask = udata->key.filter_mask; - for (u=0; umesg.ndims; u++) { - assert(0 == udata->key.offset[u] % udata->mesg.dim[u]); + for (u=0; umesg.u.chunk.ndims; u++) { + assert(0 == udata->key.offset[u] % udata->mesg.u.chunk.dim[u]); md_key->offset[u] = udata->key.offset[u]; } @@ -851,7 +850,7 @@ H5F_istore_iter_dump (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, hadd } HDfprintf(bt_udata->stream, " 0x%08x %8Zu %10a [", lt_key->filter_mask, lt_key->nbytes, addr); - for (u=0; umesg.ndims; u++) + for (u=0; umesg.u.chunk.ndims; u++) HDfprintf(bt_udata->stream, "%s%Hd", u?", ":"", lt_key->offset[u]); HDfputs("]\n", bt_udata->stream); @@ -940,7 +939,7 @@ H5F_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, udata.key.filter_mask = 0; udata.addr = HADDR_UNDEF; udata.key.nbytes = ent->chunk_size; - for (u=0; ulayout.ndims; u++) + for (u=0; ulayout.u.chunk.ndims; u++) udata.key.offset[u] = ent->offset[u]; alloc = ent->alloc_size; @@ -978,7 +977,7 @@ H5F_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, * Create the chunk it if it doesn't exist, or reallocate the chunk if * its size changed. Then write the data into the file. */ - if (H5B_insert(f, dxpl_id, H5B_ISTORE, ent->layout.addr, &udata)<0) + if (H5B_insert(f, dxpl_id, H5B_ISTORE, ent->layout.u.chunk.addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk"); if (H5F_block_write(f, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, dxpl_id, buf)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file"); @@ -1369,22 +1368,22 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); /* Get the chunk's size */ - assert(layout->chunk_size>0); - H5_ASSIGN_OVERFLOW(chunk_size,layout->chunk_size,hsize_t,size_t); + assert(layout->u.chunk.size>0); + H5_ASSIGN_OVERFLOW(chunk_size,layout->u.chunk.size,hsize_t,size_t); /* Search for the chunk in the cache */ if (rdcc->nslots>0) { - for (u=0, temp_idx=0; undims; u++) { + for (u=0, temp_idx=0; uu.chunk.ndims; u++) { temp_idx += offset[u]; - temp_idx *= layout->dim[u]; + temp_idx *= layout->u.chunk.dim[u]; } - temp_idx += (hsize_t)(layout->addr); + temp_idx += (hsize_t)(layout->u.chunk.addr); idx=H5F_HASH(f,temp_idx); ent = rdcc->slot[idx]; - if (ent && layout->ndims==ent->layout.ndims && - H5F_addr_eq(layout->addr, ent->layout.addr)) { - for (u=0, found=TRUE; ulayout.ndims; u++) { + if (ent && layout->u.chunk.ndims==ent->layout.u.chunk.ndims && + H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { + for (u=0, found=TRUE; ulayout.u.chunk.ndims; u++) { if (offset[u]!=ent->offset[u]) { found = FALSE; break; @@ -1512,7 +1511,7 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con ent->alloc_size = chunk_size; H5O_copy(H5O_LAYOUT_ID, layout, &ent->layout); H5O_copy(H5O_PLINE_ID, pline, &ent->pline); - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) ent->offset[u] = offset[u]; ent->rd_count = chunk_size; ent->wr_count = chunk_size; @@ -1649,10 +1648,10 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, x.dirty = TRUE; H5O_copy (H5O_LAYOUT_ID, layout, &x.layout); H5O_copy (H5O_PLINE_ID, pline, &x.pline); - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) x.offset[u] = offset[u]; - assert(layout->chunk_size>0); - H5_ASSIGN_OVERFLOW(x.chunk_size,layout->chunk_size,hsize_t,size_t); + assert(layout->u.chunk.size>0); + H5_ASSIGN_OVERFLOW(x.chunk_size,layout->u.chunk.size,hsize_t,size_t); x.alloc_size = x.chunk_size; x.chunk = chunk; @@ -1713,7 +1712,7 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp assert(f); assert(dxpl_cache); assert(layout && H5D_CHUNKED==layout->type); - assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS); + assert(layout->u.chunk.ndims>0 && layout->u.chunk.ndims<=H5O_LAYOUT_NDIMS); assert(dcpl_cache); assert(chunk_len_arr); assert(chunk_offset_arr); @@ -1722,19 +1721,19 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp assert(buf); #ifndef NDEBUG - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */ #endif /* Get the address of this chunk on disk */ #ifdef QAK HDfprintf(stderr,"%s: chunk_coords={",FUNC); -for(u=0; undims; u++) - HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->ndims-1) ? ", " : "}\n")); +for(u=0; uu.chunk.ndims; u++) + HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n")); #endif /* QAK */ chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata); #ifdef QAK -HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->chunk_size); +HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size); HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]); HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]); HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]); @@ -1747,9 +1746,9 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a * for the chunk has been defined, then don't load the chunk into the * cache, just write the data to it directly. */ - if (layout->chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nfilters==0 && + if (layout->u.chunk.size>f->shared->rdcc_nbytes && dcpl_cache->pline.nfilters==0 && chunk_addr!=HADDR_UNDEF) { - if ((ret_value=H5F_contig_readvv(f, layout->chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0) + if ((ret_value=H5F_contig_readvv(f, (hsize_t)layout->u.chunk.size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0) HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL, "unable to read raw data to file"); } /* end if */ else { @@ -1817,7 +1816,7 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, assert(f); assert(dxpl_cache); assert(layout && H5D_CHUNKED==layout->type); - assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS); + assert(layout->u.chunk.ndims>0 && layout->u.chunk.ndims<=H5O_LAYOUT_NDIMS); assert(dcpl_cache); assert(chunk_len_arr); assert(chunk_offset_arr); @@ -1826,19 +1825,19 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, assert(buf); #ifndef NDEBUG - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */ #endif /* Get the address of this chunk on disk */ #ifdef QAK HDfprintf(stderr,"%s: chunk_coords={",FUNC); -for(u=0; undims; u++) - HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->ndims-1) ? ", " : "}\n")); +for(u=0; uu.chunk.ndims; u++) + HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n")); #endif /* QAK */ chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata); #ifdef QAK -HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->chunk_size); +HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size); HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]); HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]); HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]); @@ -1856,16 +1855,14 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a * writing to other elements in the same chunk. Do a direct * write-through of only the elements requested. */ - if ((layout->chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nfilters==0 && - chunk_addr!=HADDR_UNDEF) - || ((IS_H5FD_MPIO(f) ||IS_H5FD_MPIPOSIX(f) || IS_H5FD_FPHDF5(f)) && - (H5F_ACC_RDWR & f->shared->flags))) { + if ((layout->u.chunk.size>f->shared->rdcc_nbytes && dcpl_cache->pline.nfilters==0 && chunk_addr!=HADDR_UNDEF) + || (IS_H5FD_MPI(f) && (H5F_ACC_RDWR & f->shared->flags))) { #ifdef H5_HAVE_PARALLEL /* Additional sanity check when operating in parallel */ if (chunk_addr==HADDR_UNDEF || dcpl_cache->pline.nfilters>0) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk"); #endif /* H5_HAVE_PARALLEL */ - if ((ret_value=H5F_contig_writevv(f, layout->chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0) + if ((ret_value=H5F_contig_writevv(f, (hsize_t)layout->u.chunk.size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file"); } /* end if */ else { @@ -1878,7 +1875,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a * Lock the chunk, copy from application to chunk, then unlock the * chunk. */ - if(chunk_max_nseq==1 && chunk_len_arr[0] == layout->chunk_size) + if(chunk_max_nseq==1 && chunk_len_arr[0] == layout->u.chunk.size) relax = TRUE; else relax = FALSE; @@ -1939,14 +1936,14 @@ H5F_istore_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout /*out */ ) /* Check args */ assert(f); assert(layout && H5D_CHUNKED == layout->type); - assert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS); + assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); #ifndef NDEBUG - for (u = 0; u < layout->ndims; u++) - assert(layout->dim[u] > 0); + for (u = 0; u < layout->u.chunk.ndims; u++) + assert(layout->u.chunk.dim[u] > 0); #endif - udata.mesg.ndims = layout->ndims; - if (H5B_create(f, dxpl_id, H5B_ISTORE, &udata, &(layout->addr)/*out*/) < 0) + udata.mesg.u.chunk.ndims = layout->u.chunk.ndims; + if (H5B_create(f, dxpl_id, H5B_ISTORE, &udata, &(layout->u.chunk.addr)/*out*/) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "can't create B-tree"); done: @@ -1982,7 +1979,7 @@ H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr) FUNC_ENTER_NOAPI(H5F_istore_allocated, 0); HDmemset(&udata, 0, sizeof udata); - udata.mesg.ndims = ndims; + udata.mesg.u.chunk.ndims = ndims; if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_iter_allocated, addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over chunk B-tree"); @@ -2024,20 +2021,20 @@ H5F_istore_get_addr(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, FUNC_ENTER_NOAPI_NOINIT(H5F_istore_get_addr); assert(f); - assert(layout && (layout->ndims > 0)); + assert(layout && (layout->u.chunk.ndims > 0)); assert(offset); /* Check for udata struct to return */ udata = (_udata!=NULL ? _udata : &tmp_udata); /* Initialize the information about the chunk we are looking for */ - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) udata->key.offset[u] = offset[u]; udata->mesg = *layout; udata->addr = HADDR_UNDEF; /* Go get the chunk information */ - if (H5B_find (f, dxpl_id, H5B_ISTORE, layout->addr, udata)<0) { + if (H5B_find (f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, udata)<0) { H5E_clear(); HGOTO_ERROR(H5E_BTREE,H5E_NOTFOUND,HADDR_UNDEF,"Can't locate chunk info"); @@ -2195,8 +2192,8 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, assert(f); assert(space_dim); assert(layout && H5D_CHUNKED==layout->type); - assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS); - assert(H5F_addr_defined(layout->addr)); + assert(layout->u.chunk.ndims>0 && layout->u.chunk.ndims<=H5O_LAYOUT_NDIMS); + assert(H5F_addr_defined(layout->u.chunk.addr)); assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); assert(dc_plist); @@ -2262,9 +2259,9 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, * Setup indice to go through all chunks. (Future improvement * should allocate only chunks that have no file space assigned yet. */ - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) chunk_offset[u] = 0; - chunk_size = layout->chunk_size; + chunk_size = layout->u.chunk.size; /* Check the dataset's fill-value status */ if (H5P_is_fill_value_defined(&fill, &fill_status) < 0) @@ -2329,12 +2326,12 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, /* Look for chunk in cache */ for(ent = rdcc->head; ent && !chunk_exists; ent = ent->next) { /* Make certain we are dealing with the correct B-tree, etc */ - if (layout->ndims==ent->layout.ndims && - H5F_addr_eq(layout->addr, ent->layout.addr)) { + if (layout->u.chunk.ndims==ent->layout.u.chunk.ndims && + H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { /* Assume a match */ chunk_exists = 1; - for(u = 0; u < layout->ndims && chunk_exists; u++) { + for(u = 0; u < layout->u.chunk.ndims && chunk_exists; u++) { if(ent->offset[u] != chunk_offset[u]) chunk_exists = 0; /* Reset if no match */ } /* end for */ @@ -2349,11 +2346,11 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, udata.addr = HADDR_UNDEF; H5_CHECK_OVERFLOW(chunk_size,hsize_t,size_t); udata.key.nbytes = (size_t)chunk_size; - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) udata.key.offset[u] = chunk_offset[u]; /* Allocate the chunk with all processes */ - if (H5B_insert(f, dxpl_id, H5B_ISTORE, layout->addr, &udata)<0) + if (H5B_insert(f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk"); /* Check if fill values should be written to blocks */ @@ -2382,8 +2379,8 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, } /* end if */ /* Increment indices */ - for (i=layout->ndims-1, carry=1; i>=0 && carry; --i) { - chunk_offset[i] += layout->dim[i]; + for (i=layout->u.chunk.ndims-1, carry=1; i>=0 && carry; --i) { + chunk_offset[i] += layout->u.chunk.dim[i]; if (chunk_offset[i] >= (hssize_t)(space_dim[i])) chunk_offset[i] = 0; else @@ -2530,8 +2527,8 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, /* Check args */ assert(f); assert(layout && H5D_CHUNKED == layout->type); - assert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS); - assert(H5F_addr_defined(layout->addr)); + assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + assert(H5F_addr_defined(layout->u.chunk.addr)); assert(space); /* Go get the rank & dimensions */ @@ -2547,10 +2544,10 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, next = ent->next; /* Make certain we are dealing with the correct B-tree, etc */ - if (layout->ndims==ent->layout.ndims && - H5F_addr_eq(layout->addr, ent->layout.addr)) { + if (layout->u.chunk.ndims==ent->layout.u.chunk.ndims && + H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { found = 0; - for(u = 0; u < ent->layout.ndims - 1; u++) { + for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) { if((hsize_t)ent->offset[u] > curr_dims[u]) { found = 1; break; @@ -2561,7 +2558,7 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, if(found) { #if defined (H5F_ISTORE_DEBUG) HDfputs("cache:remove:[", stdout); - for(u = 0; u < ent->layout.ndims - 1; u++) { + for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) { HDfprintf(stdout, "%s%Hd", u ? ", " : "", ent->offset[u]); } HDfputs("]\n", stdout); @@ -2580,13 +2577,13 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, HDmemset(&udata, 0, sizeof udata); udata.stream = stdout; - udata.mesg.addr = layout->addr; - udata.mesg.ndims = layout->ndims; - for(u = 0; u < udata.mesg.ndims; u++) - udata.mesg.dim[u] = layout->dim[u]; + udata.mesg.u.chunk.addr = layout->u.chunk.addr; + udata.mesg.u.chunk.ndims = layout->u.chunk.ndims; + for(u = 0; u < udata.mesg.u.chunk.ndims; u++) + udata.mesg.u.chunk.dim[u] = layout->u.chunk.dim[u]; udata.dims = curr_dims; - if(H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_prune_extent, layout->addr, &udata) < 0) + if(H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_prune_extent, layout->u.chunk.addr, &udata) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over B-tree"); done: @@ -2628,11 +2625,11 @@ H5F_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr_t UNUSED a FUNC_ENTER_NOAPI_NOINIT(H5F_istore_prune_extent); /* Figure out what chunks are no longer in use for the specified extent and release them */ - for(u = 0; u < bt_udata->mesg.ndims - 1; u++) + for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) if((hsize_t)lt_key->offset[u] > bt_udata->dims[u]) { #if defined (H5F_ISTORE_DEBUG) HDfputs("b-tree:remove:[", bt_udata->stream); - for(u = 0; u < bt_udata->mesg.ndims - 1; u++) { + for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) { HDfprintf(bt_udata->stream, "%s%Hd", u ? ", " : "", lt_key->offset[u]); } @@ -2644,7 +2641,7 @@ H5F_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr_t UNUSED a udata.mesg = bt_udata->mesg; /* Remove */ - if(H5B_remove(f, dxpl_id, H5B_ISTORE, bt_udata->mesg.addr, &udata) < 0) + if(H5B_remove(f, dxpl_id, H5B_ISTORE, bt_udata->mesg.u.chunk.addr, &udata) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, H5B_ITER_ERROR, "unable to remove entry"); break; } /* end if */ @@ -2741,6 +2738,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca hsize_t count[H5O_LAYOUT_NDIMS]; /*element count of hyperslab */ hsize_t size[H5O_LAYOUT_NDIMS]; /*current size of dimensions */ H5S_t *space_chunk = NULL; /*dataspace for a chunk */ + hsize_t chunk_dims[H5O_LAYOUT_NDIMS]; /*current chunk dimensions */ hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */ int srank; /*current # of dimensions (signed) */ unsigned rank; /*current # of dimensions */ @@ -2757,8 +2755,8 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca /* Check args */ assert(f); assert(layout && H5D_CHUNKED == layout->type); - assert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS); - assert(H5F_addr_defined(layout->addr)); + assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + assert(H5F_addr_defined(layout->u.chunk.addr)); assert(space); /* Get necessary properties from property list */ @@ -2781,10 +2779,12 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca /* Copy current dimensions */ for(u = 0; u < rank; u++) size[u] = curr_dims[u]; - size[u] = layout->dim[u]; + size[u] = layout->u.chunk.dim[u]; /* Create a data space for a chunk & set the extent */ - if(NULL == (space_chunk = H5S_create_simple(rank,layout->dim,NULL))) + for(u = 0; u < rank; u++) + chunk_dims[u] = layout->u.chunk.dim[u]; + if(NULL == (space_chunk = H5S_create_simple(rank,chunk_dims,NULL))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace"); /* @@ -2792,18 +2792,18 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca * loop through the chunks copying each chunk from the application to the * chunk cache. */ - for(u = 0; u < layout->ndims; u++) { - idx_max[u] = (size[u] - 1) / layout->dim[u] + 1; + for(u = 0; u < layout->u.chunk.ndims; u++) { + idx_max[u] = (size[u] - 1) / layout->u.chunk.dim[u] + 1; idx_cur[u] = 0; } /* end for */ /* Loop over all chunks */ carry=0; while(carry==0) { - for(u = 0, naccessed = 1; u < layout->ndims; u++) { + for(u = 0, naccessed = 1; u < layout->u.chunk.ndims; u++) { /* The location and size of the chunk being accessed */ - chunk_offset[u] = idx_cur[u] * (hssize_t)(layout->dim[u]); - sub_size[u] = MIN((idx_cur[u] + 1) * layout->dim[u], + chunk_offset[u] = idx_cur[u] * (hssize_t)(layout->u.chunk.dim[u]); + sub_size[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u], size[u]) - chunk_offset[u]; naccessed *= sub_size[u]; } /* end for */ @@ -2812,8 +2812,8 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca * Figure out what chunks have to be initialized. These are the chunks where the dataspace * extent boundary is within the chunk */ - for(u = 0, found = 0; u < layout->ndims - 1; u++) { - end_chunk = chunk_offset[u] + layout->dim[u]; + for(u = 0, found = 0; u < layout->u.chunk.ndims - 1; u++) { + end_chunk = chunk_offset[u] + layout->u.chunk.dim[u]; if(end_chunk > size[u]) { found = 1; break; @@ -2830,15 +2830,15 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to select space"); for(u = 0; u < rank; u++) - count[u] = MIN((idx_cur[u] + 1) * layout->dim[u], size[u] - chunk_offset[u]); + count[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u], size[u] - chunk_offset[u]); #if defined (H5F_ISTORE_DEBUG) HDfputs("cache:initialize:offset:[", stdout); - for(u = 0; u < layout->ndims - 1; u++) + for(u = 0; u < layout->u.chunk.ndims - 1; u++) HDfprintf(stdout, "%s%Hd", u ? ", " : "", chunk_offset[u]); HDfputs("]", stdout); HDfputs(":count:[", stdout); - for(u = 0; u < layout->ndims - 1; u++) + for(u = 0; u < layout->u.chunk.ndims - 1; u++) HDfprintf(stdout, "%s%Hd", u ? ", " : "", count[u]); HDfputs("]\n", stdout); #endif @@ -2861,7 +2861,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca } /*found */ /* Increment indices */ - for(i = layout->ndims - 1, carry = 1; i >= 0 && carry; --i) { + for(i = layout->u.chunk.ndims - 1, carry = 1; i >= 0 && carry; --i) { if(++idx_cur[i] >= idx_max[i]) idx_cur[i] = 0; else @@ -2908,14 +2908,14 @@ H5F_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") /* Check if the B-tree has been created in the file */ - if(H5F_addr_defined(layout->addr)) { + if(H5F_addr_defined(layout->u.chunk.addr)) { /* Iterate through the entries in the cache, checking for the chunks to be deleted */ for (ent=rdcc->head; ent; ent=next) { /* Get pointer to next node, in case this one is deleted */ next=ent->next; /* Is the chunk to be deleted this cache entry? */ - if(layout->addr==ent->layout.addr) + if(layout->u.chunk.addr==ent->layout.u.chunk.addr) /* Remove entry without flushing */ if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, ent, FALSE )<0) HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks"); @@ -2926,7 +2926,7 @@ H5F_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout) udata.mesg = *layout; /* Delete entire B-tree */ - if(H5B_delete(f, dxpl_id, H5B_ISTORE, layout->addr, &udata)<0) + if(H5B_delete(f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_CANTDELETE, 0, "unable to delete chunk B-tree"); } /* end if */ @@ -2962,7 +2962,7 @@ H5F_istore_dump_btree(H5F_t *f, hid_t dxpl_id, FILE *stream, unsigned ndims, had FUNC_ENTER_NOAPI(H5F_istore_dump_btree, FAIL); HDmemset(&udata, 0, sizeof udata); - udata.mesg.ndims = ndims; + udata.mesg.u.chunk.ndims = ndims; udata.stream = stream; if(stream) HDfprintf(stream, " Address: %a\n",addr); @@ -3066,7 +3066,7 @@ H5F_istore_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int inden FUNC_ENTER_NOAPI(H5F_istore_debug, FAIL); HDmemset (&udata, 0, sizeof udata); - udata.mesg.ndims = ndims; + udata.mesg.u.chunk.ndims = ndims; H5B_debug (f, dxpl_id, addr, stream, indent, fwidth, H5B_ISTORE, &udata); diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index b9047a6..8a4abae 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -86,4 +86,11 @@ extern H5D_dxpl_cache_t H5D_def_dxpl_cache; H5_DLL herr_t H5D_alloc_storage (H5F_t *f, hid_t dxpl_id, H5D_t *dset, H5D_time_alloc_t time_alloc, hbool_t update_time, hbool_t full_overwrite); + +/* Testing functions */ +#ifdef H5D_TESTING +H5_DLL herr_t H5D_layout_version_test(hid_t did, unsigned *version); +H5_DLL herr_t H5D_layout_contig_size_test(hid_t did, hsize_t *size); +#endif /* H5D_TESTING */ + #endif /*_H5Dpkg_H*/ diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h index 7f90465..688d3e3 100644 --- a/src/H5Dprivate.h +++ b/src/H5Dprivate.h @@ -47,7 +47,7 @@ #define H5D_CRT_CHUNK_DIM_DEF 1 /* Definitions for chunk size */ #define H5D_CRT_CHUNK_SIZE_NAME "chunk_size" -#define H5D_CRT_CHUNK_SIZE_SIZE sizeof(hsize_t[H5O_LAYOUT_NDIMS]) +#define H5D_CRT_CHUNK_SIZE_SIZE sizeof(size_t[H5O_LAYOUT_NDIMS]) #define H5D_CRT_CHUNK_SIZE_DEF {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,\ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1} /* Definitions for fill value. size=0 means fill value will be 0 as diff --git a/src/H5Dseq.c b/src/H5Dseq.c index a51e554..6fd583c 100644 --- a/src/H5Dseq.c +++ b/src/H5Dseq.c @@ -231,7 +231,7 @@ H5F_seq_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_i HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "external data read failed"); } else { /* Pass along the vector of sequences to read */ - if((ret_value=H5F_contig_readvv(f, layout->chunk_size, layout->addr, + if((ret_value=H5F_contig_readvv(f, layout->u.contig.size, layout->u.contig.addr, dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0) @@ -340,7 +340,7 @@ H5F_seq_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "external data write failed"); } else { /* Pass along the vector of sequences to write */ - if ((ret_value=H5F_contig_writevv(f, layout->chunk_size, layout->addr, + if ((ret_value=H5F_contig_writevv(f, layout->u.contig.size, layout->u.contig.addr, dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0) diff --git a/src/H5Fcompact.c b/src/H5Fcompact.c index 96e9955..4f5b75e 100644 --- a/src/H5Fcompact.c +++ b/src/H5Fcompact.c @@ -70,7 +70,7 @@ H5F_compact_readvv(H5F_t UNUSED *f, const H5O_layout_t *layout, FUNC_ENTER_NOAPI(H5F_compact_readvv, FAIL); /* Use the vectorized memory copy routine to do actual work */ - if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,layout->buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0) + if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,layout->u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed"); done: @@ -112,10 +112,10 @@ H5F_compact_writevv(H5F_t UNUSED *f, H5O_layout_t *layout, FUNC_ENTER_NOAPI(H5F_compact_writevv, FAIL); /* Use the vectorized memory copy routine to do actual work */ - if((ret_value=H5V_memcpyvv(layout->buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0) + if((ret_value=H5V_memcpyvv(layout->u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed"); - layout->dirty = TRUE; + layout->u.compact.dirty = TRUE; done: FUNC_LEAVE_NOAPI(ret_value); diff --git a/src/H5Fcontig.c b/src/H5Fcontig.c index 9b1ad40..35b9700 100644 --- a/src/H5Fcontig.c +++ b/src/H5Fcontig.c @@ -31,10 +31,10 @@ #include "H5private.h" /* Generic Functions */ #include "H5Dprivate.h" /* Dataset functions */ #include "H5Eprivate.h" /* Error handling */ -#include "H5Fpkg.h" -#include "H5FDprivate.h" /*file driver */ -#include "H5FLprivate.h" /*Free Lists */ -#include "H5MFprivate.h" /*file memory management */ +#include "H5Fpkg.h" /* Files */ +#include "H5FDprivate.h" /* File drivers */ +#include "H5FLprivate.h" /* Free Lists */ +#include "H5MFprivate.h" /* File memory management */ #include "H5Oprivate.h" /* Object headers */ #include "H5Pprivate.h" /* Property lists */ #include "H5Sprivate.h" /* Dataspace functions */ @@ -46,8 +46,7 @@ #include "H5FDmpiposix.h" /* Private prototypes */ -static herr_t -H5F_contig_write(H5F_t *f, hsize_t max_data, haddr_t addr, +static herr_t H5F_contig_write(H5F_t *f, hsize_t max_data, haddr_t addr, const size_t size, hid_t dxpl_id, const void *buf); /* Interface initialization */ @@ -81,8 +80,6 @@ H5FL_BLK_DEFINE_STATIC(zero_fill); herr_t H5F_contig_create(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout) { - hsize_t size; /* Size of contiguous block of data */ - unsigned u; /* Local index variable */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5F_contig_create, FAIL); @@ -91,14 +88,8 @@ H5F_contig_create(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout) assert(f); assert(layout); - /* Compute size */ - size=layout->dim[0]; - for (u = 1; u < layout->ndims; u++) - size *= layout->dim[u]; - assert (size>0); - /* Allocate space for the contiguous data */ - if (HADDR_UNDEF==(layout->addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, size))) + if (HADDR_UNDEF==(layout->u.contig.addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, layout->u.contig.size))) HGOTO_ERROR (H5E_IO, H5E_NOSPACE, FAIL, "unable to reserve file space"); done: @@ -151,8 +142,8 @@ H5F_contig_fill(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout, assert(f); assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); assert(layout && H5D_CONTIGUOUS==layout->type); - assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS); - assert(H5F_addr_defined(layout->addr)); + assert(H5F_addr_defined(layout->u.contig.addr)); + assert(layout->u.contig.size>0); assert(space); assert(elmt_size>0); @@ -199,7 +190,7 @@ H5F_contig_fill(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout, #endif /* H5_HAVE_PARALLEL */ /* Get the number of elements in the dataset's dataspace */ - snpoints = H5S_get_simple_extent_npoints(space); + snpoints = H5S_GET_SIMPLE_EXTENT_NPOINTS(space); assert(snpoints>=0); H5_ASSIGN_OVERFLOW(npoints,snpoints,hssize_t,size_t); @@ -246,7 +237,7 @@ H5F_contig_fill(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout, } /* end else */ /* Start at the beginning of the dataset */ - addr = layout->addr; + addr = layout->u.contig.addr; /* Loop through writing the fill value to the dataset */ while (npoints>0) { @@ -322,8 +313,6 @@ done: herr_t H5F_contig_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout) { - hsize_t size; /* Size of contiguous block of data */ - unsigned u; /* Local index variable */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5F_contig_delete, FAIL); @@ -332,17 +321,12 @@ H5F_contig_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout) assert(f); assert(layout); - /* Compute size */ - size=layout->dim[0]; - for (u = 1; u < layout->ndims; u++) - size *= layout->dim[u]; - /* Check for overlap with the sieve buffer and reset it */ - if (H5F_sieve_overlap_clear(f, dxpl_id, layout->addr, size)<0) + if (H5F_sieve_overlap_clear(f, dxpl_id, layout->u.contig.addr, layout->u.contig.size)<0) HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to clear sieve buffer"); /* Free the file space for the chunk */ - if (H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, layout->addr, size)<0) + if (H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, layout->u.contig.addr, layout->u.contig.size)<0) HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to free object header"); done: diff --git a/src/H5Fistore.c b/src/H5Fistore.c index 43045b1..2775548 100644 --- a/src/H5Fistore.c +++ b/src/H5Fistore.c @@ -252,11 +252,11 @@ H5F_istore_sizeof_rkey(H5F_t UNUSED *f, const void *_udata) FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5F_istore_sizeof_rkey); assert(udata); - assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS); + assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims <= H5O_LAYOUT_NDIMS); nbytes = 4 + /*storage size */ 4 + /*filter mask */ - udata->mesg.ndims*8; /*dimension indices */ + udata->mesg.u.chunk.ndims*8; /*dimension indices */ FUNC_LEAVE_NOAPI(nbytes); } @@ -379,7 +379,7 @@ H5F_istore_debug_key (FILE *stream, H5F_t UNUSED *f, hid_t UNUSED dxpl_id, int i "Filter mask:", key->filter_mask); HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Logical offset:"); - for (u=0; umesg.ndims; u++) + for (u=0; umesg.u.chunk.ndims; u++) HDfprintf (stream, "%s%Hd", u?", ":"", key->offset[u]); HDfputs ("}\n", stream); @@ -423,10 +423,10 @@ H5F_istore_cmp2(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, void *_uda assert(lt_key); assert(rt_key); assert(udata); - assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS); + assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims <= H5O_LAYOUT_NDIMS); /* Compare the offsets but ignore the other fields */ - ret_value = H5V_vector_cmp_s(udata->mesg.ndims, lt_key->offset, rt_key->offset); + ret_value = H5V_vector_cmp_s(udata->mesg.u.chunk.ndims, lt_key->offset, rt_key->offset); done: FUNC_LEAVE_NOAPI(ret_value); @@ -476,12 +476,12 @@ H5F_istore_cmp3(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, void *_uda assert(lt_key); assert(rt_key); assert(udata); - assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS); + assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims <= H5O_LAYOUT_NDIMS); - if (H5V_vector_lt_s(udata->mesg.ndims, udata->key.offset, + if (H5V_vector_lt_s(udata->mesg.u.chunk.ndims, udata->key.offset, lt_key->offset)) { ret_value = -1; - } else if (H5V_vector_ge_s(udata->mesg.ndims, udata->key.offset, + } else if (H5V_vector_ge_s(udata->mesg.u.chunk.ndims, udata->key.offset, rt_key->offset)) { ret_value = 1; } @@ -529,7 +529,7 @@ H5F_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op, assert(lt_key); assert(rt_key); assert(udata); - assert(udata->mesg.ndims > 0 && udata->mesg.ndims < H5O_LAYOUT_NDIMS); + assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims < H5O_LAYOUT_NDIMS); assert(addr_p); /* Allocate new storage */ @@ -545,7 +545,7 @@ H5F_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op, */ lt_key->nbytes = udata->key.nbytes; lt_key->filter_mask = udata->key.filter_mask; - for (u=0; umesg.ndims; u++) + for (u=0; umesg.u.chunk.ndims; u++) lt_key->offset[u] = udata->key.offset[u]; /* @@ -555,12 +555,11 @@ H5F_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op, if (H5B_INS_LEFT != op) { rt_key->nbytes = 0; rt_key->filter_mask = 0; - for (u=0; umesg.ndims; u++) { - assert (udata->mesg.dim[u] < HSSIZET_MAX); - assert (udata->key.offset[u]+(hssize_t)(udata->mesg.dim[u]) > + for (u=0; umesg.u.chunk.ndims; u++) { + assert (udata->key.offset[u]+(hssize_t)(udata->mesg.u.chunk.dim[u]) > udata->key.offset[u]); rt_key->offset[u] = udata->key.offset[u] + - (hssize_t)(udata->mesg.dim[u]); + (hssize_t)(udata->mesg.u.chunk.dim[u]); } } @@ -614,8 +613,8 @@ H5F_istore_found(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr, const void assert(lt_key); /* Is this *really* the requested chunk? */ - for (u=0; umesg.ndims; u++) { - if (udata->key.offset[u] >= lt_key->offset[u]+(hssize_t)(udata->mesg.dim[u])) + for (u=0; umesg.u.chunk.ndims; u++) { + if (udata->key.offset[u] >= lt_key->offset[u]+(hssize_t)(udata->mesg.u.chunk.dim[u])) HGOTO_DONE(FAIL); } @@ -624,7 +623,7 @@ H5F_istore_found(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr, const void udata->key.nbytes = lt_key->nbytes; udata->key.filter_mask = lt_key->filter_mask; assert (lt_key->nbytes>0); - for (u = 0; u < udata->mesg.ndims; u++) + for (u = 0; u < udata->mesg.u.chunk.ndims; u++) udata->key.offset[u] = lt_key->offset[u]; done: @@ -700,7 +699,7 @@ H5F_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key, assert("HDF5 INTERNAL ERROR -- see rpm" && 0); HGOTO_ERROR(H5E_STORAGE, H5E_UNSUPPORTED, H5B_INS_ERROR, "internal error"); - } else if (H5V_vector_eq_s (udata->mesg.ndims, + } else if (H5V_vector_eq_s (udata->mesg.u.chunk.ndims, udata->key.offset, lt_key->offset) && lt_key->nbytes>0) { /* @@ -739,20 +738,20 @@ H5F_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key, ret_value = H5B_INS_NOOP; } - } else if (H5V_hyper_disjointp(udata->mesg.ndims, - lt_key->offset, udata->mesg.dim, - udata->key.offset, udata->mesg.dim)) { - assert(H5V_hyper_disjointp(udata->mesg.ndims, - rt_key->offset, udata->mesg.dim, - udata->key.offset, udata->mesg.dim)); + } else if (H5V_hyper_disjointp(udata->mesg.u.chunk.ndims, + lt_key->offset, udata->mesg.u.chunk.dim, + udata->key.offset, udata->mesg.u.chunk.dim)) { + assert(H5V_hyper_disjointp(udata->mesg.u.chunk.ndims, + rt_key->offset, udata->mesg.u.chunk.dim, + udata->key.offset, udata->mesg.u.chunk.dim)); /* * Split this node, inserting the new new node to the right of the * current node. The MD_KEY is where the split occurs. */ md_key->nbytes = udata->key.nbytes; md_key->filter_mask = udata->key.filter_mask; - for (u=0; umesg.ndims; u++) { - assert(0 == udata->key.offset[u] % udata->mesg.dim[u]); + for (u=0; umesg.u.chunk.ndims; u++) { + assert(0 == udata->key.offset[u] % udata->mesg.u.chunk.dim[u]); md_key->offset[u] = udata->key.offset[u]; } @@ -851,7 +850,7 @@ H5F_istore_iter_dump (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, hadd } HDfprintf(bt_udata->stream, " 0x%08x %8Zu %10a [", lt_key->filter_mask, lt_key->nbytes, addr); - for (u=0; umesg.ndims; u++) + for (u=0; umesg.u.chunk.ndims; u++) HDfprintf(bt_udata->stream, "%s%Hd", u?", ":"", lt_key->offset[u]); HDfputs("]\n", bt_udata->stream); @@ -940,7 +939,7 @@ H5F_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, udata.key.filter_mask = 0; udata.addr = HADDR_UNDEF; udata.key.nbytes = ent->chunk_size; - for (u=0; ulayout.ndims; u++) + for (u=0; ulayout.u.chunk.ndims; u++) udata.key.offset[u] = ent->offset[u]; alloc = ent->alloc_size; @@ -978,7 +977,7 @@ H5F_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, * Create the chunk it if it doesn't exist, or reallocate the chunk if * its size changed. Then write the data into the file. */ - if (H5B_insert(f, dxpl_id, H5B_ISTORE, ent->layout.addr, &udata)<0) + if (H5B_insert(f, dxpl_id, H5B_ISTORE, ent->layout.u.chunk.addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk"); if (H5F_block_write(f, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, dxpl_id, buf)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file"); @@ -1369,22 +1368,22 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); /* Get the chunk's size */ - assert(layout->chunk_size>0); - H5_ASSIGN_OVERFLOW(chunk_size,layout->chunk_size,hsize_t,size_t); + assert(layout->u.chunk.size>0); + H5_ASSIGN_OVERFLOW(chunk_size,layout->u.chunk.size,hsize_t,size_t); /* Search for the chunk in the cache */ if (rdcc->nslots>0) { - for (u=0, temp_idx=0; undims; u++) { + for (u=0, temp_idx=0; uu.chunk.ndims; u++) { temp_idx += offset[u]; - temp_idx *= layout->dim[u]; + temp_idx *= layout->u.chunk.dim[u]; } - temp_idx += (hsize_t)(layout->addr); + temp_idx += (hsize_t)(layout->u.chunk.addr); idx=H5F_HASH(f,temp_idx); ent = rdcc->slot[idx]; - if (ent && layout->ndims==ent->layout.ndims && - H5F_addr_eq(layout->addr, ent->layout.addr)) { - for (u=0, found=TRUE; ulayout.ndims; u++) { + if (ent && layout->u.chunk.ndims==ent->layout.u.chunk.ndims && + H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { + for (u=0, found=TRUE; ulayout.u.chunk.ndims; u++) { if (offset[u]!=ent->offset[u]) { found = FALSE; break; @@ -1512,7 +1511,7 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con ent->alloc_size = chunk_size; H5O_copy(H5O_LAYOUT_ID, layout, &ent->layout); H5O_copy(H5O_PLINE_ID, pline, &ent->pline); - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) ent->offset[u] = offset[u]; ent->rd_count = chunk_size; ent->wr_count = chunk_size; @@ -1649,10 +1648,10 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, x.dirty = TRUE; H5O_copy (H5O_LAYOUT_ID, layout, &x.layout); H5O_copy (H5O_PLINE_ID, pline, &x.pline); - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) x.offset[u] = offset[u]; - assert(layout->chunk_size>0); - H5_ASSIGN_OVERFLOW(x.chunk_size,layout->chunk_size,hsize_t,size_t); + assert(layout->u.chunk.size>0); + H5_ASSIGN_OVERFLOW(x.chunk_size,layout->u.chunk.size,hsize_t,size_t); x.alloc_size = x.chunk_size; x.chunk = chunk; @@ -1713,7 +1712,7 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp assert(f); assert(dxpl_cache); assert(layout && H5D_CHUNKED==layout->type); - assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS); + assert(layout->u.chunk.ndims>0 && layout->u.chunk.ndims<=H5O_LAYOUT_NDIMS); assert(dcpl_cache); assert(chunk_len_arr); assert(chunk_offset_arr); @@ -1722,19 +1721,19 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp assert(buf); #ifndef NDEBUG - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */ #endif /* Get the address of this chunk on disk */ #ifdef QAK HDfprintf(stderr,"%s: chunk_coords={",FUNC); -for(u=0; undims; u++) - HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->ndims-1) ? ", " : "}\n")); +for(u=0; uu.chunk.ndims; u++) + HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n")); #endif /* QAK */ chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata); #ifdef QAK -HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->chunk_size); +HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size); HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]); HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]); HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]); @@ -1747,9 +1746,9 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a * for the chunk has been defined, then don't load the chunk into the * cache, just write the data to it directly. */ - if (layout->chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nfilters==0 && + if (layout->u.chunk.size>f->shared->rdcc_nbytes && dcpl_cache->pline.nfilters==0 && chunk_addr!=HADDR_UNDEF) { - if ((ret_value=H5F_contig_readvv(f, layout->chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0) + if ((ret_value=H5F_contig_readvv(f, (hsize_t)layout->u.chunk.size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0) HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL, "unable to read raw data to file"); } /* end if */ else { @@ -1817,7 +1816,7 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, assert(f); assert(dxpl_cache); assert(layout && H5D_CHUNKED==layout->type); - assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS); + assert(layout->u.chunk.ndims>0 && layout->u.chunk.ndims<=H5O_LAYOUT_NDIMS); assert(dcpl_cache); assert(chunk_len_arr); assert(chunk_offset_arr); @@ -1826,19 +1825,19 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, assert(buf); #ifndef NDEBUG - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */ #endif /* Get the address of this chunk on disk */ #ifdef QAK HDfprintf(stderr,"%s: chunk_coords={",FUNC); -for(u=0; undims; u++) - HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->ndims-1) ? ", " : "}\n")); +for(u=0; uu.chunk.ndims; u++) + HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n")); #endif /* QAK */ chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata); #ifdef QAK -HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->chunk_size); +HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size); HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]); HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]); HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]); @@ -1856,16 +1855,14 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a * writing to other elements in the same chunk. Do a direct * write-through of only the elements requested. */ - if ((layout->chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nfilters==0 && - chunk_addr!=HADDR_UNDEF) - || ((IS_H5FD_MPIO(f) ||IS_H5FD_MPIPOSIX(f) || IS_H5FD_FPHDF5(f)) && - (H5F_ACC_RDWR & f->shared->flags))) { + if ((layout->u.chunk.size>f->shared->rdcc_nbytes && dcpl_cache->pline.nfilters==0 && chunk_addr!=HADDR_UNDEF) + || (IS_H5FD_MPI(f) && (H5F_ACC_RDWR & f->shared->flags))) { #ifdef H5_HAVE_PARALLEL /* Additional sanity check when operating in parallel */ if (chunk_addr==HADDR_UNDEF || dcpl_cache->pline.nfilters>0) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk"); #endif /* H5_HAVE_PARALLEL */ - if ((ret_value=H5F_contig_writevv(f, layout->chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0) + if ((ret_value=H5F_contig_writevv(f, (hsize_t)layout->u.chunk.size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file"); } /* end if */ else { @@ -1878,7 +1875,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a * Lock the chunk, copy from application to chunk, then unlock the * chunk. */ - if(chunk_max_nseq==1 && chunk_len_arr[0] == layout->chunk_size) + if(chunk_max_nseq==1 && chunk_len_arr[0] == layout->u.chunk.size) relax = TRUE; else relax = FALSE; @@ -1939,14 +1936,14 @@ H5F_istore_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout /*out */ ) /* Check args */ assert(f); assert(layout && H5D_CHUNKED == layout->type); - assert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS); + assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); #ifndef NDEBUG - for (u = 0; u < layout->ndims; u++) - assert(layout->dim[u] > 0); + for (u = 0; u < layout->u.chunk.ndims; u++) + assert(layout->u.chunk.dim[u] > 0); #endif - udata.mesg.ndims = layout->ndims; - if (H5B_create(f, dxpl_id, H5B_ISTORE, &udata, &(layout->addr)/*out*/) < 0) + udata.mesg.u.chunk.ndims = layout->u.chunk.ndims; + if (H5B_create(f, dxpl_id, H5B_ISTORE, &udata, &(layout->u.chunk.addr)/*out*/) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "can't create B-tree"); done: @@ -1982,7 +1979,7 @@ H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr) FUNC_ENTER_NOAPI(H5F_istore_allocated, 0); HDmemset(&udata, 0, sizeof udata); - udata.mesg.ndims = ndims; + udata.mesg.u.chunk.ndims = ndims; if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_iter_allocated, addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over chunk B-tree"); @@ -2024,20 +2021,20 @@ H5F_istore_get_addr(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, FUNC_ENTER_NOAPI_NOINIT(H5F_istore_get_addr); assert(f); - assert(layout && (layout->ndims > 0)); + assert(layout && (layout->u.chunk.ndims > 0)); assert(offset); /* Check for udata struct to return */ udata = (_udata!=NULL ? _udata : &tmp_udata); /* Initialize the information about the chunk we are looking for */ - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) udata->key.offset[u] = offset[u]; udata->mesg = *layout; udata->addr = HADDR_UNDEF; /* Go get the chunk information */ - if (H5B_find (f, dxpl_id, H5B_ISTORE, layout->addr, udata)<0) { + if (H5B_find (f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, udata)<0) { H5E_clear(); HGOTO_ERROR(H5E_BTREE,H5E_NOTFOUND,HADDR_UNDEF,"Can't locate chunk info"); @@ -2195,8 +2192,8 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, assert(f); assert(space_dim); assert(layout && H5D_CHUNKED==layout->type); - assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS); - assert(H5F_addr_defined(layout->addr)); + assert(layout->u.chunk.ndims>0 && layout->u.chunk.ndims<=H5O_LAYOUT_NDIMS); + assert(H5F_addr_defined(layout->u.chunk.addr)); assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER)); assert(dc_plist); @@ -2262,9 +2259,9 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, * Setup indice to go through all chunks. (Future improvement * should allocate only chunks that have no file space assigned yet. */ - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) chunk_offset[u] = 0; - chunk_size = layout->chunk_size; + chunk_size = layout->u.chunk.size; /* Check the dataset's fill-value status */ if (H5P_is_fill_value_defined(&fill, &fill_status) < 0) @@ -2329,12 +2326,12 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, /* Look for chunk in cache */ for(ent = rdcc->head; ent && !chunk_exists; ent = ent->next) { /* Make certain we are dealing with the correct B-tree, etc */ - if (layout->ndims==ent->layout.ndims && - H5F_addr_eq(layout->addr, ent->layout.addr)) { + if (layout->u.chunk.ndims==ent->layout.u.chunk.ndims && + H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { /* Assume a match */ chunk_exists = 1; - for(u = 0; u < layout->ndims && chunk_exists; u++) { + for(u = 0; u < layout->u.chunk.ndims && chunk_exists; u++) { if(ent->offset[u] != chunk_offset[u]) chunk_exists = 0; /* Reset if no match */ } /* end for */ @@ -2349,11 +2346,11 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, udata.addr = HADDR_UNDEF; H5_CHECK_OVERFLOW(chunk_size,hsize_t,size_t); udata.key.nbytes = (size_t)chunk_size; - for (u=0; undims; u++) + for (u=0; uu.chunk.ndims; u++) udata.key.offset[u] = chunk_offset[u]; /* Allocate the chunk with all processes */ - if (H5B_insert(f, dxpl_id, H5B_ISTORE, layout->addr, &udata)<0) + if (H5B_insert(f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk"); /* Check if fill values should be written to blocks */ @@ -2382,8 +2379,8 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, } /* end if */ /* Increment indices */ - for (i=layout->ndims-1, carry=1; i>=0 && carry; --i) { - chunk_offset[i] += layout->dim[i]; + for (i=layout->u.chunk.ndims-1, carry=1; i>=0 && carry; --i) { + chunk_offset[i] += layout->u.chunk.dim[i]; if (chunk_offset[i] >= (hssize_t)(space_dim[i])) chunk_offset[i] = 0; else @@ -2530,8 +2527,8 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, /* Check args */ assert(f); assert(layout && H5D_CHUNKED == layout->type); - assert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS); - assert(H5F_addr_defined(layout->addr)); + assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + assert(H5F_addr_defined(layout->u.chunk.addr)); assert(space); /* Go get the rank & dimensions */ @@ -2547,10 +2544,10 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, next = ent->next; /* Make certain we are dealing with the correct B-tree, etc */ - if (layout->ndims==ent->layout.ndims && - H5F_addr_eq(layout->addr, ent->layout.addr)) { + if (layout->u.chunk.ndims==ent->layout.u.chunk.ndims && + H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { found = 0; - for(u = 0; u < ent->layout.ndims - 1; u++) { + for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) { if((hsize_t)ent->offset[u] > curr_dims[u]) { found = 1; break; @@ -2561,7 +2558,7 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, if(found) { #if defined (H5F_ISTORE_DEBUG) HDfputs("cache:remove:[", stdout); - for(u = 0; u < ent->layout.ndims - 1; u++) { + for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) { HDfprintf(stdout, "%s%Hd", u ? ", " : "", ent->offset[u]); } HDfputs("]\n", stdout); @@ -2580,13 +2577,13 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, HDmemset(&udata, 0, sizeof udata); udata.stream = stdout; - udata.mesg.addr = layout->addr; - udata.mesg.ndims = layout->ndims; - for(u = 0; u < udata.mesg.ndims; u++) - udata.mesg.dim[u] = layout->dim[u]; + udata.mesg.u.chunk.addr = layout->u.chunk.addr; + udata.mesg.u.chunk.ndims = layout->u.chunk.ndims; + for(u = 0; u < udata.mesg.u.chunk.ndims; u++) + udata.mesg.u.chunk.dim[u] = layout->u.chunk.dim[u]; udata.dims = curr_dims; - if(H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_prune_extent, layout->addr, &udata) < 0) + if(H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_prune_extent, layout->u.chunk.addr, &udata) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over B-tree"); done: @@ -2628,11 +2625,11 @@ H5F_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr_t UNUSED a FUNC_ENTER_NOAPI_NOINIT(H5F_istore_prune_extent); /* Figure out what chunks are no longer in use for the specified extent and release them */ - for(u = 0; u < bt_udata->mesg.ndims - 1; u++) + for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) if((hsize_t)lt_key->offset[u] > bt_udata->dims[u]) { #if defined (H5F_ISTORE_DEBUG) HDfputs("b-tree:remove:[", bt_udata->stream); - for(u = 0; u < bt_udata->mesg.ndims - 1; u++) { + for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) { HDfprintf(bt_udata->stream, "%s%Hd", u ? ", " : "", lt_key->offset[u]); } @@ -2644,7 +2641,7 @@ H5F_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr_t UNUSED a udata.mesg = bt_udata->mesg; /* Remove */ - if(H5B_remove(f, dxpl_id, H5B_ISTORE, bt_udata->mesg.addr, &udata) < 0) + if(H5B_remove(f, dxpl_id, H5B_ISTORE, bt_udata->mesg.u.chunk.addr, &udata) < 0) HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, H5B_ITER_ERROR, "unable to remove entry"); break; } /* end if */ @@ -2741,6 +2738,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca hsize_t count[H5O_LAYOUT_NDIMS]; /*element count of hyperslab */ hsize_t size[H5O_LAYOUT_NDIMS]; /*current size of dimensions */ H5S_t *space_chunk = NULL; /*dataspace for a chunk */ + hsize_t chunk_dims[H5O_LAYOUT_NDIMS]; /*current chunk dimensions */ hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */ int srank; /*current # of dimensions (signed) */ unsigned rank; /*current # of dimensions */ @@ -2757,8 +2755,8 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca /* Check args */ assert(f); assert(layout && H5D_CHUNKED == layout->type); - assert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS); - assert(H5F_addr_defined(layout->addr)); + assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + assert(H5F_addr_defined(layout->u.chunk.addr)); assert(space); /* Get necessary properties from property list */ @@ -2781,10 +2779,12 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca /* Copy current dimensions */ for(u = 0; u < rank; u++) size[u] = curr_dims[u]; - size[u] = layout->dim[u]; + size[u] = layout->u.chunk.dim[u]; /* Create a data space for a chunk & set the extent */ - if(NULL == (space_chunk = H5S_create_simple(rank,layout->dim,NULL))) + for(u = 0; u < rank; u++) + chunk_dims[u] = layout->u.chunk.dim[u]; + if(NULL == (space_chunk = H5S_create_simple(rank,chunk_dims,NULL))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace"); /* @@ -2792,18 +2792,18 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca * loop through the chunks copying each chunk from the application to the * chunk cache. */ - for(u = 0; u < layout->ndims; u++) { - idx_max[u] = (size[u] - 1) / layout->dim[u] + 1; + for(u = 0; u < layout->u.chunk.ndims; u++) { + idx_max[u] = (size[u] - 1) / layout->u.chunk.dim[u] + 1; idx_cur[u] = 0; } /* end for */ /* Loop over all chunks */ carry=0; while(carry==0) { - for(u = 0, naccessed = 1; u < layout->ndims; u++) { + for(u = 0, naccessed = 1; u < layout->u.chunk.ndims; u++) { /* The location and size of the chunk being accessed */ - chunk_offset[u] = idx_cur[u] * (hssize_t)(layout->dim[u]); - sub_size[u] = MIN((idx_cur[u] + 1) * layout->dim[u], + chunk_offset[u] = idx_cur[u] * (hssize_t)(layout->u.chunk.dim[u]); + sub_size[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u], size[u]) - chunk_offset[u]; naccessed *= sub_size[u]; } /* end for */ @@ -2812,8 +2812,8 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca * Figure out what chunks have to be initialized. These are the chunks where the dataspace * extent boundary is within the chunk */ - for(u = 0, found = 0; u < layout->ndims - 1; u++) { - end_chunk = chunk_offset[u] + layout->dim[u]; + for(u = 0, found = 0; u < layout->u.chunk.ndims - 1; u++) { + end_chunk = chunk_offset[u] + layout->u.chunk.dim[u]; if(end_chunk > size[u]) { found = 1; break; @@ -2830,15 +2830,15 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to select space"); for(u = 0; u < rank; u++) - count[u] = MIN((idx_cur[u] + 1) * layout->dim[u], size[u] - chunk_offset[u]); + count[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u], size[u] - chunk_offset[u]); #if defined (H5F_ISTORE_DEBUG) HDfputs("cache:initialize:offset:[", stdout); - for(u = 0; u < layout->ndims - 1; u++) + for(u = 0; u < layout->u.chunk.ndims - 1; u++) HDfprintf(stdout, "%s%Hd", u ? ", " : "", chunk_offset[u]); HDfputs("]", stdout); HDfputs(":count:[", stdout); - for(u = 0; u < layout->ndims - 1; u++) + for(u = 0; u < layout->u.chunk.ndims - 1; u++) HDfprintf(stdout, "%s%Hd", u ? ", " : "", count[u]); HDfputs("]\n", stdout); #endif @@ -2861,7 +2861,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca } /*found */ /* Increment indices */ - for(i = layout->ndims - 1, carry = 1; i >= 0 && carry; --i) { + for(i = layout->u.chunk.ndims - 1, carry = 1; i >= 0 && carry; --i) { if(++idx_cur[i] >= idx_max[i]) idx_cur[i] = 0; else @@ -2908,14 +2908,14 @@ H5F_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") /* Check if the B-tree has been created in the file */ - if(H5F_addr_defined(layout->addr)) { + if(H5F_addr_defined(layout->u.chunk.addr)) { /* Iterate through the entries in the cache, checking for the chunks to be deleted */ for (ent=rdcc->head; ent; ent=next) { /* Get pointer to next node, in case this one is deleted */ next=ent->next; /* Is the chunk to be deleted this cache entry? */ - if(layout->addr==ent->layout.addr) + if(layout->u.chunk.addr==ent->layout.u.chunk.addr) /* Remove entry without flushing */ if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, ent, FALSE )<0) HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks"); @@ -2926,7 +2926,7 @@ H5F_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout) udata.mesg = *layout; /* Delete entire B-tree */ - if(H5B_delete(f, dxpl_id, H5B_ISTORE, layout->addr, &udata)<0) + if(H5B_delete(f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_CANTDELETE, 0, "unable to delete chunk B-tree"); } /* end if */ @@ -2962,7 +2962,7 @@ H5F_istore_dump_btree(H5F_t *f, hid_t dxpl_id, FILE *stream, unsigned ndims, had FUNC_ENTER_NOAPI(H5F_istore_dump_btree, FAIL); HDmemset(&udata, 0, sizeof udata); - udata.mesg.ndims = ndims; + udata.mesg.u.chunk.ndims = ndims; udata.stream = stream; if(stream) HDfprintf(stream, " Address: %a\n",addr); @@ -3066,7 +3066,7 @@ H5F_istore_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int inden FUNC_ENTER_NOAPI(H5F_istore_debug, FAIL); HDmemset (&udata, 0, sizeof udata); - udata.mesg.ndims = ndims; + udata.mesg.u.chunk.ndims = ndims; H5B_debug (f, dxpl_id, addr, stream, indent, fwidth, H5B_ISTORE, &udata); diff --git a/src/H5Fseq.c b/src/H5Fseq.c index a51e554..6fd583c 100644 --- a/src/H5Fseq.c +++ b/src/H5Fseq.c @@ -231,7 +231,7 @@ H5F_seq_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_i HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "external data read failed"); } else { /* Pass along the vector of sequences to read */ - if((ret_value=H5F_contig_readvv(f, layout->chunk_size, layout->addr, + if((ret_value=H5F_contig_readvv(f, layout->u.contig.size, layout->u.contig.addr, dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0) @@ -340,7 +340,7 @@ H5F_seq_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "external data write failed"); } else { /* Pass along the vector of sequences to write */ - if ((ret_value=H5F_contig_writevv(f, layout->chunk_size, layout->addr, + if ((ret_value=H5F_contig_writevv(f, layout->u.contig.size, layout->u.contig.addr, dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0) diff --git a/src/H5Oattr.c b/src/H5Oattr.c index f5bd52b..2ae0278 100644 --- a/src/H5Oattr.c +++ b/src/H5Oattr.c @@ -16,6 +16,10 @@ #define H5O_PACKAGE /*suppress error about including H5Opkg */ #define H5S_PACKAGE /*suppress error about including H5Spkg */ +/* Pablo information */ +/* (Put before include files to avoid problems with inline functions) */ +#define PABLO_MASK H5O_attr_mask + #include "H5private.h" /* Generic Functions */ #include "H5Apkg.h" /* Attributes */ #include "H5Eprivate.h" /* Error handling */ @@ -25,8 +29,6 @@ #include "H5Opkg.h" /* Object headers */ #include "H5Spkg.h" /* Dataspaces */ -#define PABLO_MASK H5O_attr_mask - /* PRIVATE PROTOTYPES */ static herr_t H5O_attr_encode (H5F_t *f, uint8_t *p, const void *mesg); static void *H5O_attr_decode (H5F_t *f, hid_t dxpl_id, const uint8_t *p, H5O_shared_t *sh); @@ -201,13 +203,14 @@ H5O_attr_decode(H5F_t *f, hid_t dxpl_id, const uint8_t *p, H5O_shared_t UNUSED * /* Default to entire dataspace being selected */ if(H5S_select_all(attr->ds,0)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTSET, NULL, "unable to set all selection"); + if(version < H5O_ATTR_VERSION_NEW) p += H5O_ALIGN(attr->ds_size); else p += attr->ds_size; /* Compute the size of the data */ - H5_ASSIGN_OVERFLOW(attr->data_size,H5S_get_simple_extent_npoints(attr->ds)*H5T_get_size(attr->dt),hsize_t,size_t); + H5_ASSIGN_OVERFLOW(attr->data_size,H5S_GET_SIMPLE_EXTENT_NPOINTS(attr->ds)*H5T_get_size(attr->dt),hsize_t,size_t); /* Go get the data */ if (NULL==(attr->data = H5MM_malloc(attr->data_size))) @@ -262,7 +265,7 @@ H5O_attr_encode(H5F_t *f, uint8_t *p, const void *mesg) size_t name_len; /* Attribute name length */ unsigned version; /* Attribute version */ hbool_t type_shared; /* Flag to indicate that a shared datatype is used for this attribute */ - herr_t ret_value=SUCCEED; /* Return value */ + herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5O_attr_encode, FAIL); diff --git a/src/H5Olayout.c b/src/H5Olayout.c index 5dca442..15013ee 100644 --- a/src/H5Olayout.c +++ b/src/H5Olayout.c @@ -58,9 +58,11 @@ const H5O_class_t H5O_LAYOUT[1] = {{ }}; /* For forward and backward compatibility. Version is 1 when space is - * allocated; 2 when space is delayed for allocation. */ + * allocated; 2 when space is delayed for allocation; 3 + * is revised to just store information needed for each storage type. */ #define H5O_LAYOUT_VERSION_1 1 #define H5O_LAYOUT_VERSION_2 2 +#define H5O_LAYOUT_VERSION_3 3 /* Interface initialization */ #define PABLO_MASK H5O_layout_mask @@ -98,7 +100,6 @@ static void * H5O_layout_decode(H5F_t *f, hid_t UNUSED dxpl_id, const uint8_t *p, H5O_shared_t UNUSED *sh) { H5O_layout_t *mesg = NULL; - int version; unsigned u; void *ret_value; /* Return value */ @@ -114,44 +115,108 @@ H5O_layout_decode(H5F_t *f, hid_t UNUSED dxpl_id, const uint8_t *p, H5O_shared_t HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed"); /* Version. 1 when space allocated; 2 when space allocation is delayed */ - version = *p++; - if (version!=H5O_LAYOUT_VERSION_1 && version!=H5O_LAYOUT_VERSION_2) + mesg->version = *p++; + if (mesg->versionversion>H5O_LAYOUT_VERSION_3) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for layout message"); - /* Dimensionality */ - mesg->ndims = *p++; - if (mesg->ndims>H5O_LAYOUT_NDIMS) - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large"); - - /* Layout class */ - mesg->type = *p++; - assert(H5D_CONTIGUOUS == mesg->type || H5D_CHUNKED == mesg->type || H5D_COMPACT == mesg->type); - - /* Reserved bytes */ - p += 5; - - /* Address */ - if(mesg->type!=H5D_COMPACT) - H5F_addr_decode(f, &p, &(mesg->addr)); - - /* Read the size */ - for (u = 0; u < mesg->ndims; u++) - UINT32DECODE(p, mesg->dim[u]); - - if(mesg->type == H5D_COMPACT) { - UINT32DECODE(p, mesg->size); - if(mesg->size > 0) { - if(NULL==(mesg->buf=H5MM_malloc(mesg->size))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for fill value"); - HDmemcpy(mesg->buf, p, mesg->size); - p += mesg->size; + if(mesg->version < H5O_LAYOUT_VERSION_3) { + unsigned ndims; /* Num dimensions in chunk */ + + /* Dimensionality */ + ndims = *p++; + if (ndims>H5O_LAYOUT_NDIMS) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large"); + + /* Layout class */ + mesg->type = *p++; + assert(H5D_CONTIGUOUS == mesg->type || H5D_CHUNKED == mesg->type || H5D_COMPACT == mesg->type); + + /* Reserved bytes */ + p += 5; + + /* Address */ + if(mesg->type==H5D_CONTIGUOUS) + H5F_addr_decode(f, &p, &(mesg->u.contig.addr)); + else if(mesg->type==H5D_CHUNKED) + H5F_addr_decode(f, &p, &(mesg->u.chunk.addr)); + + /* Read the size */ + if(mesg->type!=H5D_CHUNKED) { + mesg->unused.ndims=ndims; + + for (u = 0; u < ndims; u++) + UINT32DECODE(p, mesg->unused.dim[u]); + + /* Don't compute size of contiguous storage here, due to possible + * truncation of the dimension sizes when they were stored in this + * version of the layout message. Compute the contiguous storage + * size in the dataset code, where we've got the dataspace + * information available also. - QAK 5/26/04 + */ + } /* end if */ + else { + mesg->u.chunk.ndims=ndims; + for (u = 0; u < ndims; u++) + UINT32DECODE(p, mesg->u.chunk.dim[u]); + + /* Compute chunk size */ + for (u=1, mesg->u.chunk.size=mesg->u.chunk.dim[0]; uu.chunk.size *= mesg->u.chunk.dim[u]; + } /* end if */ + + if(mesg->type == H5D_COMPACT) { + UINT32DECODE(p, mesg->u.compact.size); + if(mesg->u.compact.size > 0) { + if(NULL==(mesg->u.compact.buf=H5MM_malloc(mesg->u.compact.size))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for compact data buffer"); + HDmemcpy(mesg->u.compact.buf, p, mesg->u.compact.size); + p += mesg->u.compact.size; + } } - } - else if(mesg->type == H5D_CHUNKED || mesg->type == H5D_CONTIGUOUS) { - /* Compute chunk size */ - for (u=1, mesg->chunk_size=mesg->dim[0]; undims; u++) - mesg->chunk_size *= mesg->dim[u]; } /* end if */ + else { + /* Layout class */ + mesg->type = *p++; + + /* Interpret the rest of the message according to the layout class */ + switch(mesg->type) { + case H5D_CONTIGUOUS: + H5F_addr_decode(f, &p, &(mesg->u.contig.addr)); + H5F_DECODE_LENGTH(f, p, mesg->u.contig.size); + break; + + case H5D_CHUNKED: + /* Dimensionality */ + mesg->u.chunk.ndims = *p++; + if (mesg->u.chunk.ndims>H5O_LAYOUT_NDIMS) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large"); + + /* B-tree address */ + H5F_addr_decode(f, &p, &(mesg->u.chunk.addr)); + + /* Chunk dimensions */ + for (u = 0; u < mesg->u.chunk.ndims; u++) + UINT32DECODE(p, mesg->u.chunk.dim[u]); + + /* Compute chunk size */ + for (u=1, mesg->u.chunk.size=mesg->u.chunk.dim[0]; uu.chunk.ndims; u++) + mesg->u.chunk.size *= mesg->u.chunk.dim[u]; + break; + + case H5D_COMPACT: + UINT16DECODE(p, mesg->u.compact.size); + if(mesg->u.compact.size > 0) { + if(NULL==(mesg->u.compact.buf=H5MM_malloc(mesg->u.compact.size))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for compact data buffer"); + HDmemcpy(mesg->u.compact.buf, p, mesg->u.compact.size); + p += mesg->u.compact.size; + } /* end if */ + break; + + default: + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "Invalid layout class"); + } /* end switch */ + } /* end else */ /* Set return value */ ret_value=mesg; @@ -197,45 +262,87 @@ H5O_layout_encode(H5F_t *f, uint8_t *p, const void *_mesg) /* check args */ assert(f); assert(mesg); - assert(mesg->ndims > 0 && mesg->ndims <= H5O_LAYOUT_NDIMS); + assert(mesg->version>0); assert(p); - /* Version: 1 when space allocated; 2 when space allocation is delayed */ - if(mesg->type==H5D_CONTIGUOUS) { - if(mesg->addr==HADDR_UNDEF) - *p++ = H5O_LAYOUT_VERSION_2; - else - *p++ = H5O_LAYOUT_VERSION_1; - } else if(mesg->type==H5D_COMPACT) { - *p++ = H5O_LAYOUT_VERSION_2; - } else - *p++ = H5O_LAYOUT_VERSION_1; - - /* number of dimensions */ - *p++ = mesg->ndims; - - /* layout class */ - *p++ = mesg->type; - - /* reserved bytes should be zero */ - for (u=0; u<5; u++) - *p++ = 0; - - /* data or B-tree address */ - if(mesg->type!=H5D_COMPACT) - H5F_addr_encode(f, &p, mesg->addr); - - /* dimension size */ - for (u = 0; u < mesg->ndims; u++) - UINT32ENCODE(p, mesg->dim[u]); - - if(mesg->type==H5D_COMPACT) { - UINT32ENCODE(p, mesg->size); - if(mesg->size>0 && mesg->buf) { - HDmemcpy(p, mesg->buf, mesg->size); - p += mesg->size; + /* Version */ + *p++ = mesg->version; + + /* Check for which information to write */ + if(mesg->version<3) { + /* number of dimensions */ + assert(mesg->unused.ndims > 0 && mesg->unused.ndims <= H5O_LAYOUT_NDIMS); + *p++ = mesg->unused.ndims; + + /* layout class */ + *p++ = mesg->type; + + /* reserved bytes should be zero */ + for (u=0; u<5; u++) + *p++ = 0; + + /* data or B-tree address */ + if(mesg->type==H5D_CONTIGUOUS) + H5F_addr_encode(f, &p, mesg->u.contig.addr); + else if(mesg->type==H5D_CHUNKED) + H5F_addr_encode(f, &p, mesg->u.chunk.addr); + + /* dimension size */ + if(mesg->type!=H5D_CHUNKED) + for (u = 0; u < mesg->unused.ndims; u++) + UINT32ENCODE(p, mesg->unused.dim[u]) + else + for (u = 0; u < mesg->u.chunk.ndims; u++) + UINT32ENCODE(p, mesg->u.chunk.dim[u]); + + if(mesg->type==H5D_COMPACT) { + UINT32ENCODE(p, mesg->u.compact.size); + if(mesg->u.compact.size>0 && mesg->u.compact.buf) { + HDmemcpy(p, mesg->u.compact.buf, mesg->u.compact.size); + p += mesg->u.compact.size; + } } - } + } /* end if */ + else { + /* Layout class */ + *p++ = mesg->type; + + /* Write out layout class specific information */ + switch(mesg->type) { + case H5D_CONTIGUOUS: + H5F_addr_encode(f, &p, mesg->u.contig.addr); + H5F_ENCODE_LENGTH(f, p, mesg->u.contig.size); + break; + + case H5D_CHUNKED: + /* Number of dimensions */ + assert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + *p++ = mesg->u.chunk.ndims; + + /* B-tree address */ + H5F_addr_encode(f, &p, mesg->u.chunk.addr); + + /* Dimension sizes */ + for (u = 0; u < mesg->u.chunk.ndims; u++) + UINT32ENCODE(p, mesg->u.chunk.dim[u]); + break; + + case H5D_COMPACT: + /* Size of raw data */ + UINT16ENCODE(p, mesg->u.compact.size); + + /* Raw data */ + if(mesg->u.compact.size>0 && mesg->u.compact.buf) { + HDmemcpy(p, mesg->u.compact.buf, mesg->u.compact.size); + p += mesg->u.compact.size; + } /* end if */ + break; + + default: + HGOTO_ERROR(H5E_OHDR, H5E_CANTENCODE, FAIL, "Invalid layout class"); + break; + } /* end switch */ + } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value); @@ -279,11 +386,11 @@ H5O_layout_copy(const void *_mesg, void *_dest) /* Deep copy the buffer for compact datasets also */ if(mesg->type==H5D_COMPACT) { /* Allocate memory for the raw data */ - if (NULL==(dest->buf=H5MM_malloc(dest->size))) + if (NULL==(dest->u.compact.buf=H5MM_malloc(dest->u.compact.size))) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "unable to allocate memory for compact dataset"); /* Copy over the raw data */ - HDmemcpy(dest->buf,mesg->buf,dest->size); + HDmemcpy(dest->u.compact.buf,mesg->u.compact.buf,dest->u.compact.size); } /* end if */ /* Set return value */ @@ -315,7 +422,8 @@ done: size_t H5O_layout_meta_size(H5F_t *f, const void *_mesg) { - const H5O_layout_t *mesg = (const H5O_layout_t *) _mesg; + /* Casting away const OK - QAK */ + H5O_layout_t *mesg = (H5O_layout_t *) _mesg; size_t ret_value; FUNC_ENTER_NOAPI(H5O_layout_meta_size, 0); @@ -323,18 +431,80 @@ H5O_layout_meta_size(H5F_t *f, const void *_mesg) /* check args */ assert(f); assert(mesg); - assert(mesg->ndims > 0 && mesg->ndims <= H5O_LAYOUT_NDIMS); - ret_value = 1 + /* Version number */ - 1 + /* layout class type */ - 1 + /* dimensionality */ - 5 + /* reserved bytes */ - mesg->ndims * 4; /* size of each dimension */ + /* Check version information for new datasets */ + if(mesg->version==0) { + unsigned u; + + /* Check for dimension that would be truncated */ + assert(mesg->unused.ndims > 0 && mesg->unused.ndims <= H5O_LAYOUT_NDIMS); + for (u = 0; u < mesg->unused.ndims; u++) + if(mesg->unused.dim[u]!=(0xffffffff&mesg->unused.dim[u])) { + /* Make certain the message is encoded with the new version */ + mesg->version=3; + break; + } /* end if */ + + /* If the message doesn't _have_ to be encoded with the new version */ + if(mesg->version==0) { + /* Version: 1 when space allocated; 2 when space allocation is delayed */ + if(mesg->type==H5D_CONTIGUOUS) { + if(mesg->u.contig.addr==HADDR_UNDEF) + mesg->version = H5O_LAYOUT_VERSION_2; + else + mesg->version = H5O_LAYOUT_VERSION_1; + } else if(mesg->type==H5D_COMPACT) { + mesg->version = H5O_LAYOUT_VERSION_2; + } else + mesg->version = H5O_LAYOUT_VERSION_1; + } /* end if */ + } /* end if */ + assert(mesg->version>0); - if(mesg->type==H5D_COMPACT) - ret_value += 4; /* size field for compact dataset */ - else - ret_value += H5F_SIZEOF_ADDR(f); /* file address of data or B-tree for chunked dataset */ + if(mesg->versionunused.ndims * 4; /* size of each dimension */ + + if(mesg->type==H5D_COMPACT) + ret_value += 4; /* size field for compact dataset */ + else + ret_value += H5F_SIZEOF_ADDR(f); /* file address of data or B-tree for chunked dataset */ + } /* end if */ + else { + ret_value = 1 + /* Version number */ + 1; /* layout class type */ + + switch(mesg->type) { + case H5D_CONTIGUOUS: + ret_value += H5F_SIZEOF_ADDR(f); /* Address of data */ + ret_value += H5F_SIZEOF_SIZE(f); /* Length of data */ + break; + + case H5D_CHUNKED: + /* Number of dimensions (1 byte) */ + assert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + ret_value++; + + /* B-tree address */ + ret_value += H5F_SIZEOF_ADDR(f); /* Address of data */ + + /* Dimension sizes */ + ret_value += mesg->u.chunk.ndims*4; + break; + + case H5D_COMPACT: + /* Size of raw data */ + ret_value+=2; + break; + + default: + HGOTO_ERROR(H5E_OHDR, H5E_CANTENCODE, 0, "Invalid layout class"); + break; + } /* end switch */ + } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value); @@ -370,11 +540,10 @@ H5O_layout_size(H5F_t *f, const void *_mesg) /* check args */ assert(f); assert(mesg); - assert(mesg->ndims > 0 && mesg->ndims <= H5O_LAYOUT_NDIMS); ret_value = H5O_layout_meta_size(f, mesg); if(mesg->type==H5D_COMPACT) - ret_value += mesg->size;/* data for compact dataset */ + ret_value += mesg->u.compact.size;/* data for compact dataset */ done: FUNC_LEAVE_NOAPI(ret_value); } @@ -406,10 +575,11 @@ H5O_layout_reset (void *_mesg) if(mesg) { /* Free the compact storage buffer */ if(mesg->type==H5D_COMPACT) - mesg->buf=H5MM_xfree(mesg->buf); + mesg->u.compact.buf=H5MM_xfree(mesg->u.compact.buf); /* Reset the message */ mesg->type=H5D_CONTIGUOUS; + mesg->version=0; } /* end if */ done: @@ -443,7 +613,7 @@ H5O_layout_free (void *_mesg) /* Free the compact storage buffer */ if(mesg->type==H5D_COMPACT) - mesg->buf=H5MM_xfree(mesg->buf); + mesg->u.compact.buf=H5MM_xfree(mesg->u.compact.buf); H5FL_FREE(H5O_layout_t,mesg); @@ -536,21 +706,30 @@ H5O_layout_debug(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const void *_mesg, FILE assert(indent >= 0); assert(fwidth >= 0); - HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, - H5D_CHUNKED == mesg->type ? "B-tree address:" : "Data address:", - mesg->addr); - - HDfprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth, - "Number of dimensions:", - (unsigned long) (mesg->ndims)); - - /* Size */ - HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Size:"); - for (u = 0; u < mesg->ndims; u++) { - HDfprintf(stream, "%s%lu", u ? ", " : "", - (unsigned long) (mesg->dim[u])); - } - HDfprintf(stream, "}\n"); + if(mesg->type==H5D_CHUNKED) { + HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, + "B-tree address:", mesg->u.chunk.addr); + HDfprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth, + "Number of dimensions:", + (unsigned long) (mesg->u.chunk.ndims)); + /* Size */ + HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Size:"); + for (u = 0; u < mesg->u.chunk.ndims; u++) { + HDfprintf(stream, "%s%lu", u ? ", " : "", + (unsigned long) (mesg->u.chunk.dim[u])); + } + HDfprintf(stream, "}\n"); + } /* end if */ + else if(mesg->type==H5D_CONTIGUOUS) { + HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, + "Data address:", mesg->u.contig.addr); + HDfprintf(stream, "%*s%-*s %Hu\n", indent, "", fwidth, + "Data Size:", mesg->u.contig.size); + } /* end if */ + else { + HDfprintf(stream, "%*s%-*s %Zu\n", indent, "", fwidth, + "Data Size:", mesg->u.compact.size); + } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value); diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h index 6dd8a79..574c516 100644 --- a/src/H5Oprivate.h +++ b/src/H5Oprivate.h @@ -48,17 +48,20 @@ /* Flags which are part of a message */ #define H5O_FLAG_CONSTANT 0x01u #define H5O_FLAG_SHARED 0x02u -#define H5O_FLAG_BITS 0x03u +#define H5O_FLAG_BITS (H5O_FLAG_CONSTANT|H5O_FLAG_SHARED) /* Header message IDs */ #define H5O_NULL_ID 0x0000 /* Null Message. */ #define H5O_SDSPACE_ID 0x0001 /* Simple Dataspace Message. */ +/* Complex dataspace is/was planned for message 0x0002 */ #define H5O_DTYPE_ID 0x0003 /* Datatype Message. */ #define H5O_FILL_ID 0x0004 /* Fill Value Message. (Old) */ #define H5O_FILL_NEW_ID 0x0005 /* Fill Value Message. (New) */ +/* Compact data storage is/was planned for message 0x0006 */ #define H5O_EFL_ID 0x0007 /* External File List Message */ #define H5O_LAYOUT_ID 0x0008 /* Data Storage Layout Message. */ #define H5O_BOGUS_ID 0x0009 /* "Bogus" Message. */ +/* message 0x000a appears unused... */ #define H5O_PLINE_ID 0x000b /* Filter pipeline message. */ #define H5O_ATTR_ID 0x000c /* Attribute Message. */ #define H5O_NAME_ID 0x000d /* Object name message. */ @@ -68,7 +71,6 @@ #define H5O_STAB_ID 0x0011 /* Symbol table message. */ #define H5O_MTIME_NEW_ID 0x0012 /* Modification time message. (New) */ - /* * Fill Value Message. (Old) * (Data structure in memory) @@ -102,7 +104,7 @@ typedef struct H5O_fill_new_t { #define H5O_EFL_UNLIMITED H5F_UNLIMITED /*max possible file size */ typedef struct H5O_efl_entry_t { - size_t name_offset; /*offset of name within heap */ + size_t name_offset; /*offset of name within heap */ char *name; /*malloc'd name */ off_t offset; /*offset of data within file */ hsize_t size; /*size allocated within file */ @@ -121,15 +123,37 @@ typedef struct H5O_efl_t { */ #define H5O_LAYOUT_NDIMS (H5S_MAX_RANK+1) +typedef struct H5O_layout_contig_t { + haddr_t addr; /* File address of data */ + hsize_t size; /* Size of data in bytes */ +} H5O_layout_contig_t; + +typedef struct H5O_layout_chunk_t { + haddr_t addr; /* File address of B-tree */ + unsigned ndims; /* Num dimensions in chunk */ + size_t dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in elements */ + size_t size; /* Size of chunk in bytes */ +} H5O_layout_chunk_t; + +typedef struct H5O_layout_compact_t { + hbool_t dirty; /* Dirty flag for compact dataset */ + size_t size; /* Size of buffer in bytes */ + void *buf; /* Buffer for compact dataset */ +} H5O_layout_compact_t; + typedef struct H5O_layout_t { - int type; /*type of layout, H5D_layout_t */ - haddr_t addr; /*file address of data or B-tree */ - unsigned ndims; /*num dimensions in stored data */ - hsize_t dim[H5O_LAYOUT_NDIMS]; /*size of data or chunk in bytes */ - hsize_t chunk_size; /*size of chunk in bytes */ - hbool_t dirty; /*dirty flag for compact dataset */ - size_t size; /*size of compact dataset in bytes */ - void *buf; /*buffer for compact dataset */ + H5D_layout_t type; /* Type of layout */ + unsigned version; /* Version of message */ + /* Structure for "unused" dimension information */ + struct { + unsigned ndims; /*num dimensions in stored data */ + hsize_t dim[H5O_LAYOUT_NDIMS]; /*size of data or chunk in bytes */ + } unused; + union { + H5O_layout_contig_t contig; /* Information for contiguous layout */ + H5O_layout_chunk_t chunk; /* Information for chunked layout */ + H5O_layout_compact_t compact; /* Information for compact layout */ + } u; } H5O_layout_t; /* Enable reading/writing "bogus" messages */ diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c index 1906fb9..cafd036 100644 --- a/src/H5Pdcpl.c +++ b/src/H5Pdcpl.c @@ -151,7 +151,7 @@ herr_t H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/]) { int i; - hsize_t real_dims[H5O_LAYOUT_NDIMS]; /* Full-sized array to hold chunk dims */ + size_t real_dims[H5O_LAYOUT_NDIMS]; /* Full-sized array to hold chunk dims */ H5D_layout_t layout; H5P_genplist_t *plist; /* Property list pointer */ herr_t ret_value=SUCCEED; /* return value */ @@ -172,12 +172,14 @@ H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/]) HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID"); /* Initialize chunk dims to 0s */ - HDmemset(real_dims,0,H5O_LAYOUT_NDIMS*sizeof(hsize_t)); + HDmemset(real_dims,0,sizeof(real_dims)); for (i=0; iselect.num_elem=(hsize_t)H5S_get_simple_extent_npoints(dst); + dst->select.num_elem=(hsize_t)H5S_GET_SIMPLE_EXTENT_NPOINTS(dst); done: FUNC_LEAVE_NOAPI(ret_value); @@ -723,7 +723,7 @@ H5S_select_all (H5S_t *space, unsigned rel_prev) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release selection"); /* Set number of elements in selection */ - space->select.num_elem=(hsize_t)H5S_get_simple_extent_npoints(space); + space->select.num_elem=(hsize_t)H5S_GET_SIMPLE_EXTENT_NPOINTS(space); /* Set selection type */ space->select.type=H5S_SEL_ALL; diff --git a/src/H5Smpio.c b/src/H5Smpio.c index bea5982..0158d14 100644 --- a/src/H5Smpio.c +++ b/src/H5Smpio.c @@ -107,7 +107,6 @@ H5S_mpio_all_type( const H5S_t *space, size_t elmt_size, hsize_t total_bytes; hssize_t snelmts; /*total number of elmts (signed) */ hsize_t nelmts; /*total number of elmts */ - unsigned u; herr_t ret_value = SUCCEED; FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_all_type); @@ -116,7 +115,7 @@ H5S_mpio_all_type( const H5S_t *space, size_t elmt_size, assert (space); /* Just treat the entire extent as a block of bytes */ - if((snelmts = H5S_get_simple_extent_npoints(space))<0) + if((snelmts = H5S_GET_SIMPLE_EXTENT_NPOINTS(space))<0) HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection") H5_ASSIGN_OVERFLOW(nelmts,snelmts,hssize_t,hsize_t); @@ -678,7 +677,8 @@ H5S_mpio_spaces_xfer(H5F_t *f, const H5O_layout_t *layout, size_t elmt_size, * the address to read from. This should be used as the diplacement for * a call to MPI_File_set_view() in the read or write call. */ - addr = f->shared->base_addr + layout->addr + mpi_file_offset; + assert(layout->type==H5D_CONTIGUOUS); + addr = f->shared->base_addr + layout->u.contig.addr + mpi_file_offset; #ifdef H5Smpi_DEBUG HDfprintf(stderr, "spaces_xfer: addr=%a\n", addr ); #endif diff --git a/src/H5Sprivate.h b/src/H5Sprivate.h index 8b5a5b8..59174fc 100644 --- a/src/H5Sprivate.h +++ b/src/H5Sprivate.h @@ -176,6 +176,7 @@ typedef struct H5S_conv_t { #ifdef H5S_PACKAGE #define H5S_GET_SIMPLE_EXTENT_TYPE(S) ((S)->extent.type) #define H5S_GET_SIMPLE_EXTENT_NDIMS(S) ((S)->extent.u.simple.rank) +#define H5S_GET_SIMPLE_EXTENT_NPOINTS(S) ((S)->extent.nelem) #define H5S_GET_SELECT_NPOINTS(S) ((S)->select.num_elem) #define H5S_GET_SELECT_TYPE(S) ((S)->select.type) #define H5S_SELECT_GET_SEQ_LIST(S,FLAGS,ITER,MAXSEQ,MAXBYTES,NSEQ,NBYTES,OFF,LEN) ((*(S)->select.get_seq_list)(S,FLAGS,ITER,MAXSEQ,MAXBYTES,NSEQ,NBYTES,OFF,LEN)) @@ -197,6 +198,7 @@ typedef struct H5S_conv_t { #else /* H5S_PACKAGE */ #define H5S_GET_SIMPLE_EXTENT_TYPE(S) (H5S_get_simple_extent_type(S)) #define H5S_GET_SIMPLE_EXTENT_NDIMS(S) (H5S_get_simple_extent_ndims(S)) +#define H5S_GET_SIMPLE_EXTENT_NPOINTS(S) (H5S_get_simple_extent_npoints(S)) #define H5S_GET_SELECT_NPOINTS(S) (H5S_get_select_npoints(S)) #define H5S_GET_SELECT_TYPE(S) (H5S_get_select_type(S)) #define H5S_SELECT_GET_SEQ_LIST(S,FLAGS,ITER,MAXSEQ,MAXBYTES,NSEQ,NBYTES,OFF,LEN) (H5S_select_get_seq_list(S,FLAGS,ITER,MAXSEQ,MAXBYTES,NSEQ,NBYTES,OFF,LEN)) diff --git a/src/H5V.c b/src/H5V.c index dfadee9..4163a6f 100644 --- a/src/H5V.c +++ b/src/H5V.c @@ -413,8 +413,8 @@ done: */ htri_t H5V_hyper_disjointp(unsigned n, - const hssize_t *offset1, const hsize_t *size1, - const hssize_t *offset2, const hsize_t *size2) + const hssize_t *offset1, const size_t *size1, + const hssize_t *offset2, const size_t *size2) { unsigned u; htri_t ret_value=FALSE; /* Return value */ @@ -1192,7 +1192,7 @@ done: *------------------------------------------------------------------------- */ herr_t -H5V_chunk_index(unsigned ndims, const hssize_t *coord, const hsize_t *chunk, +H5V_chunk_index(unsigned ndims, const hssize_t *coord, const size_t *chunk, const hsize_t *down_nchunks, hsize_t *chunk_idx) { hssize_t scaled_coord[H5V_HYPER_NDIMS]; /* Scaled, coordinates, in terms of chunks */ @@ -1208,8 +1208,10 @@ H5V_chunk_index(unsigned ndims, const hssize_t *coord, const hsize_t *chunk, assert(chunk_idx); /* Compute the scaled coordinates for actual coordinates */ - for(u=0; u 0) { unsigned chunk_ndims; /* # of chunk dimensions */ - hsize_t chunk_size[H5O_LAYOUT_NDIMS]; /* Size of chunk dimensions */ + size_t chunk_size[H5O_LAYOUT_NDIMS]; /* Size of chunk dimensions */ + hsize_t chunk_dims[H5O_LAYOUT_NDIMS]; /* Size of chunk dimensions */ H5S_t *space; /* Dataspace describing chunk */ hid_t space_id; /* ID for dataspace describing chunk */ size_t u; /* Local index variable */ @@ -542,8 +544,10 @@ H5Z_prelude_callback(hid_t dcpl_id, hid_t type_id, H5Z_prelude_type_t prelude_ty HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve chunk size"); /* Create a data space for a chunk & set the extent */ - if(NULL == (space = H5S_create_simple(chunk_ndims,chunk_size,NULL))) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace"); + for(u=0; u