From 1bd45cb96f5b463c14ac56d8362b3992be6128c6 Mon Sep 17 00:00:00 2001 From: HDF Tester Date: Sun, 3 Aug 2014 05:01:09 -0500 Subject: [svn-r25506] Snapshot version 1.9 release 191 --- README.txt | 2 +- c++/src/Makefile.in | 2 +- config/lt_vers.am | 2 +- configure | 22 +++++++++++----------- configure.ac | 2 +- fortran/src/Makefile.in | 2 +- hl/c++/src/Makefile.in | 2 +- hl/fortran/src/Makefile.in | 2 +- hl/src/H5LTanalyze.c | 2 +- hl/src/H5LTparse.c | 13 ++++++------- hl/src/Makefile.in | 2 +- release_docs/RELEASE.txt | 2 +- src/H5public.h | 4 ++-- src/Makefile.in | 2 +- vms/src/h5pubconf.h | 6 +++--- 15 files changed, 33 insertions(+), 34 deletions(-) diff --git a/README.txt b/README.txt index 1d01626..ff9ae10 100644 --- a/README.txt +++ b/README.txt @@ -1,4 +1,4 @@ -HDF5 version 1.9.191 currently under development +HDF5 version 1.9.192 currently under development Please refer to the release_docs/INSTALL file for installation instructions. ------------------------------------------------------------------------------ diff --git a/c++/src/Makefile.in b/c++/src/Makefile.in index 12095d1..b24733d 100644 --- a/c++/src/Makefile.in +++ b/c++/src/Makefile.in @@ -681,7 +681,7 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog # Add libtool shared library version numbers to the HDF5 library # See libtool versioning documentation online. LT_VERS_INTERFACE = 6 -LT_VERS_REVISION = 181 +LT_VERS_REVISION = 182 LT_VERS_AGE = 0 # This is our main target diff --git a/config/lt_vers.am b/config/lt_vers.am index dfe4920..f237db3 100644 --- a/config/lt_vers.am +++ b/config/lt_vers.am @@ -17,7 +17,7 @@ # Add libtool shared library version numbers to the HDF5 library # See libtool versioning documentation online. LT_VERS_INTERFACE = 6 -LT_VERS_REVISION = 181 +LT_VERS_REVISION = 182 LT_VERS_AGE = 0 ## If the API changes *at all*, increment LT_VERS_INTERFACE and diff --git a/configure b/configure index 3ff2a0c..c3b6374 100755 --- a/configure +++ b/configure @@ -1,7 +1,7 @@ #! /bin/sh # From configure.ac Id: configure.ac 22697 2012-08-19 14:35:47Z hdftest . # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for HDF5 1.9.191. +# Generated by GNU Autoconf 2.69 for HDF5 1.9.192. # # Report bugs to . # @@ -591,8 +591,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='HDF5' PACKAGE_TARNAME='hdf5' -PACKAGE_VERSION='1.9.191' -PACKAGE_STRING='HDF5 1.9.191' +PACKAGE_VERSION='1.9.192' +PACKAGE_STRING='HDF5 1.9.192' PACKAGE_BUGREPORT='help@hdfgroup.org' PACKAGE_URL='' @@ -1489,7 +1489,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures HDF5 1.9.191 to adapt to many kinds of systems. +\`configure' configures HDF5 1.9.192 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1559,7 +1559,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of HDF5 1.9.191:";; + short | recursive ) echo "Configuration of HDF5 1.9.192:";; esac cat <<\_ACEOF @@ -1752,7 +1752,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -HDF5 configure 1.9.191 +HDF5 configure 1.9.192 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2846,7 +2846,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by HDF5 $as_me 1.9.191, which was +It was created by HDF5 $as_me 1.9.192, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -3717,7 +3717,7 @@ fi # Define the identity of the package. PACKAGE='hdf5' - VERSION='1.9.191' + VERSION='1.9.192' cat >>confdefs.h <<_ACEOF @@ -31732,7 +31732,7 @@ Usage: $0 [OPTIONS] Report bugs to ." lt_cl_version="\ -HDF5 config.lt 1.9.191 +HDF5 config.lt 1.9.192 configured by $0, generated by GNU Autoconf 2.69. Copyright (C) 2011 Free Software Foundation, Inc. @@ -33874,7 +33874,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by HDF5 $as_me 1.9.191, which was +This file was extended by HDF5 $as_me 1.9.192, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -33940,7 +33940,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -HDF5 config.status 1.9.191 +HDF5 config.status 1.9.192 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 9df19d8..9c9b73c 100644 --- a/configure.ac +++ b/configure.ac @@ -26,7 +26,7 @@ AC_PREREQ([2.69]) ## NOTE: Do not forget to change the version number here when we do a ## release!!! ## -AC_INIT([HDF5], [1.9.191], [help@hdfgroup.org]) +AC_INIT([HDF5], [1.9.192], [help@hdfgroup.org]) AC_CONFIG_SRCDIR([src/H5.c]) AC_CONFIG_HEADER([src/H5config.h]) diff --git a/fortran/src/Makefile.in b/fortran/src/Makefile.in index 8b370f3..315636c 100644 --- a/fortran/src/Makefile.in +++ b/fortran/src/Makefile.in @@ -732,7 +732,7 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog # Add libtool shared library version numbers to the HDF5 library # See libtool versioning documentation online. LT_VERS_INTERFACE = 6 -LT_VERS_REVISION = 181 +LT_VERS_REVISION = 182 LT_VERS_AGE = 0 AM_FCLIBS = $(LIBHDF5) diff --git a/hl/c++/src/Makefile.in b/hl/c++/src/Makefile.in index 71e6250..c321eaf 100644 --- a/hl/c++/src/Makefile.in +++ b/hl/c++/src/Makefile.in @@ -673,7 +673,7 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog # Add libtool shared library version numbers to the HDF5 library # See libtool versioning documentation online. LT_VERS_INTERFACE = 6 -LT_VERS_REVISION = 181 +LT_VERS_REVISION = 182 LT_VERS_AGE = 0 # This is our main target diff --git a/hl/fortran/src/Makefile.in b/hl/fortran/src/Makefile.in index 8bf92b9..c8b6f7d 100644 --- a/hl/fortran/src/Makefile.in +++ b/hl/fortran/src/Makefile.in @@ -688,7 +688,7 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog # Add libtool shared library version numbers to the HDF5 library # See libtool versioning documentation online. LT_VERS_INTERFACE = 6 -LT_VERS_REVISION = 181 +LT_VERS_REVISION = 182 LT_VERS_AGE = 0 # Our main target, the high-level fortran library diff --git a/hl/src/H5LTanalyze.c b/hl/src/H5LTanalyze.c index 5fe53ef..d9ffb36 100644 --- a/hl/src/H5LTanalyze.c +++ b/hl/src/H5LTanalyze.c @@ -869,7 +869,7 @@ int my_yyinput(char *, int); #undef YY_INPUT #define YY_INPUT(b, r, ms) (r=my_yyinput(b, ms)) #define token(x) (int)x -#define hid(x) (hid_t)x +#define hid(x) (hid_t)x #ifdef YY_BUF_SIZE #undef YY_BUF_SIZE diff --git a/hl/src/H5LTparse.c b/hl/src/H5LTparse.c index e9a3ec9..aa12185 100644 --- a/hl/src/H5LTparse.c +++ b/hl/src/H5LTparse.c @@ -626,12 +626,11 @@ static const char *const yytname[] = "NUMBER", "'{'", "'}'", "'['", "']'", "'\"'", "':'", "';'", "$accept", "start", "ddl_type", "atomic_type", "integer_type", "fp_type", "compound_type", "$@1", "memb_list", "memb_def", "$@2", "field_name", - "field_offset", "offset", "array_type", "$@3", "dim_list", "dim", - "$@4", "$@5", "dimsize", "vlen_type", "opaque_type", "$@6", "@7", - "$@8", "$@9", "opaque_size", "opaque_tag", "string_type", "$@10", - "$@11", "$@12", "$@13", "@14", "strsize", "strpad", "cset", "ctype", - "enum_type", "$@15", "enum_list", "enum_def", "$@16", "enum_symbol", - "enum_val", YY_NULL + "field_offset", "offset", "array_type", "$@3", "dim_list", "dim", "$@4", + "$@5", "dimsize", "vlen_type", "opaque_type", "$@6", "@7", "$@8", "$@9", + "opaque_size", "opaque_tag", "string_type", "$@10", "$@11", "$@12", + "$@13", "@14", "strsize", "strpad", "cset", "ctype", "enum_type", "$@15", + "enum_list", "enum_def", "$@16", "enum_symbol", "enum_val", YY_NULL }; #endif @@ -2188,7 +2187,7 @@ yyreduce: /* Line 1807 of yacc.c */ -#line 2192 "H5LTparse.c" +#line 2191 "H5LTparse.c" default: break; } /* User semantic actions sometimes alter yychar, and that requires diff --git a/hl/src/Makefile.in b/hl/src/Makefile.in index 123e149..2760fb1 100644 --- a/hl/src/Makefile.in +++ b/hl/src/Makefile.in @@ -669,7 +669,7 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog # Add libtool shared library version numbers to the HDF5 library # See libtool versioning documentation online. LT_VERS_INTERFACE = 6 -LT_VERS_REVISION = 181 +LT_VERS_REVISION = 182 LT_VERS_AGE = 0 # This library is our main target. diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index dc19950..6b47bab 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -1,4 +1,4 @@ -HDF5 version 1.9.191 currently under development +HDF5 version 1.9.192 currently under development ================================================================================ diff --git a/src/H5public.h b/src/H5public.h index 44f94d4..2400d59 100644 --- a/src/H5public.h +++ b/src/H5public.h @@ -94,10 +94,10 @@ extern "C" { /* Version numbers */ #define H5_VERS_MAJOR 1 /* For major interface/format changes */ #define H5_VERS_MINOR 9 /* For minor interface/format changes */ -#define H5_VERS_RELEASE 191 /* For tweaks, bug-fixes, or development */ +#define H5_VERS_RELEASE 192 /* For tweaks, bug-fixes, or development */ #define H5_VERS_SUBRELEASE "" /* For pre-releases like snap0 */ /* Empty string for real releases. */ -#define H5_VERS_INFO "HDF5 library version: 1.9.191" /* Full version string */ +#define H5_VERS_INFO "HDF5 library version: 1.9.192" /* Full version string */ #define H5check() H5check_version(H5_VERS_MAJOR,H5_VERS_MINOR, \ H5_VERS_RELEASE) diff --git a/src/Makefile.in b/src/Makefile.in index d0f1202..5284e40 100644 --- a/src/Makefile.in +++ b/src/Makefile.in @@ -731,7 +731,7 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog # Add libtool shared library version numbers to the HDF5 library # See libtool versioning documentation online. LT_VERS_INTERFACE = 6 -LT_VERS_REVISION = 181 +LT_VERS_REVISION = 182 LT_VERS_AGE = 0 # Our main target, the HDF5 library diff --git a/vms/src/h5pubconf.h b/vms/src/h5pubconf.h index f78e381..44db827 100644 --- a/vms/src/h5pubconf.h +++ b/vms/src/h5pubconf.h @@ -501,7 +501,7 @@ #define H5_PACKAGE_NAME "HDF5" /* Define to the full name and version of this package. */ -#define H5_PACKAGE_STRING "HDF5 1.9.191" +#define H5_PACKAGE_STRING "HDF5 1.9.192" /* Define to the one symbol short name of this package. */ #define H5_PACKAGE_TARNAME "hdf5" @@ -510,7 +510,7 @@ #define H5_PACKAGE_URL "" /* Define to the version of this package. */ -#define H5_PACKAGE_VERSION "1.9.191" +#define H5_PACKAGE_VERSION "1.9.192" /* Width for printf() for type `long long' or `__int64', use `ll' */ #define H5_PRINTF_LL_WIDTH "ll" @@ -673,7 +673,7 @@ /* #undef H5_USING_MEMCHECKER */ /* Version number of package */ -#define H5_VERSION "1.9.191" +#define H5_VERSION "1.9.192" /* Define if vsnprintf() returns the correct value for formatted strings that don't fit into size allowed */ -- cgit v0.12 From 4defea00206886aa5dded50896c1ac5c5c1fcceb Mon Sep 17 00:00:00 2001 From: Dana Robinson Date: Mon, 4 Aug 2014 13:37:55 -0500 Subject: [svn-r25508] Added a check that ensures page_size is not set to zero in H5Pset_core_write_tracking(). This ensures that the behavior matches the description in the reference manual. Tested on a local linux VM. This is a very minor change. --- src/H5Pfapl.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c index 83e0a0f..089bfb9 100644 --- a/src/H5Pfapl.c +++ b/src/H5Pfapl.c @@ -3025,6 +3025,10 @@ H5Pset_core_write_tracking(hid_t plist_id, hbool_t is_enabled, size_t page_size) FUNC_ENTER_API(FAIL) H5TRACE3("e", "ibz", plist_id, is_enabled, page_size); + /* The page size cannot be zero */ + if(page_size == 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "page_size cannot be zero") + /* Get the plist structure */ if(NULL == (plist = H5P_object_verify(plist_id, H5P_FILE_ACCESS))) HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID") -- cgit v0.12 From db51b5504991175e87e58ffcaca2c34f672c522d Mon Sep 17 00:00:00 2001 From: Mohamad Chaarawi Date: Mon, 4 Aug 2014 14:58:03 -0500 Subject: [svn-r25509] HDFFV-8878: Make the fill value operation for chunked datasets in parallel collective/fast. tested with h5committest --- src/H5Dchunk.c | 245 +++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 205 insertions(+), 40 deletions(-) diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 20b3c1a..6914607 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -176,6 +176,13 @@ typedef struct H5D_chunk_file_iter_ud_t { #endif /* H5_HAVE_PARALLEL */ } H5D_chunk_file_iter_ud_t; +#ifdef H5_HAVE_PARALLEL +/* information to construct a collective I/O operation for filling chunks */ +typedef struct H5D_chunk_coll_info_t { + size_t num_io; /* Number of write operations */ + haddr_t *addr; /* array of the file addresses of the write operation */ +} H5D_chunk_coll_info_t; +#endif /* H5_HAVE_PARALLEL */ /********************/ /* Local Prototypes */ @@ -230,7 +237,10 @@ static herr_t H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, static herr_t H5D__chunk_cache_prune(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *dxpl_cache, size_t size); static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata); - +#ifdef H5_HAVE_PARALLEL +static herr_t H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id, + H5D_chunk_coll_info_t *chunk_info, size_t chunk_size, const void *fill_buf); +#endif /* H5_HAVE_PARALLEL */ /*********************/ /* Package Variables */ @@ -3247,6 +3257,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, hsize_t max_unalloc[H5O_LAYOUT_NDIMS]; /* Last chunk in each dimension that is unallocated */ hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */ size_t orig_chunk_size; /* Original size of chunk in bytes */ + size_t chunk_size; /* Actual size of chunk in bytes, possibly filtered */ unsigned filter_mask = 0; /* Filter mask for chunks that have them */ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */ const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */ @@ -3256,11 +3267,9 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ #ifdef H5_HAVE_PARALLEL - MPI_Comm mpi_comm = MPI_COMM_NULL; /* MPI communicator for file */ - int mpi_rank = (-1); /* This process's rank */ - int mpi_code; /* MPI return code */ hbool_t blocks_written = FALSE; /* Flag to indicate that chunk was actually written */ hbool_t using_mpi = FALSE; /* Flag to indicate that the file is being accessed with an MPI-capable file driver */ + H5D_chunk_coll_info_t chunk_info; /* chunk address information for doing I/O */ #endif /* H5_HAVE_PARALLEL */ hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */ int space_ndims; /* Dataset's space rank */ @@ -3269,7 +3278,6 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, int op_dim; /* Current operationg dimension */ H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */ hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */ - hid_t data_dxpl_id; /* DXPL ID to use for raw data I/O operations */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_TAG(dxpl_id, dset->oloc.addr, FAIL) @@ -3299,30 +3307,17 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, #ifdef H5_HAVE_PARALLEL /* Retrieve MPI parameters */ if(H5F_HAS_FEATURE(dset->oloc.file, H5FD_FEAT_HAS_MPI)) { - /* Get the MPI communicator */ - if(MPI_COMM_NULL == (mpi_comm = H5F_mpi_get_comm(dset->oloc.file))) - HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI communicator") - - /* Get the MPI rank */ - if((mpi_rank = H5F_mpi_get_rank(dset->oloc.file)) < 0) - HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI rank") - /* Set the MPI-capable file driver flag */ using_mpi = TRUE; - /* Use the internal "independent" DXPL */ - data_dxpl_id = H5AC_ind_dxpl_id; + /* init chunk info stuff for collective I/O */ + chunk_info.num_io = 0; + chunk_info.addr = NULL; } /* end if */ - else { -#endif /* H5_HAVE_PARALLEL */ - /* Use the DXPL we were given */ - data_dxpl_id = dxpl_id; -#ifdef H5_HAVE_PARALLEL - } /* end else */ #endif /* H5_HAVE_PARALLEL */ /* Fill the DXPL cache values for later use */ - if(H5D__get_dxpl_cache(data_dxpl_id, &dxpl_cache) < 0) + if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") /* Get original chunk size */ @@ -3352,7 +3347,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, if(H5D__fill_init(&fb_info, NULL, (H5MM_allocate_t)H5D__chunk_alloc, (void *)pline, (H5MM_free_t)H5D__chunk_xfree, (void *)pline, &dset->shared->dcpl_cache.fill, dset->shared->type, - dset->shared->type_id, (size_t)0, orig_chunk_size, data_dxpl_id) < 0) + dset->shared->type_id, (size_t)0, orig_chunk_size, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info") fb_info_init = TRUE; @@ -3426,7 +3421,8 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, } /* end else */ while(!carry) { - size_t chunk_size = orig_chunk_size; /* Size of chunk in bytes, possibly filtered */ + /* Reset size of chunk in bytes, in case filtered size changes */ + chunk_size = orig_chunk_size; #ifndef NDEBUG /* None of the chunks should be allocated */ @@ -3462,6 +3458,9 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, if(fb_info_init && fb_info.has_vlen_fill_type) { /* Sanity check */ HDassert(should_fill); +#ifdef H5_HAVE_PARALLEL + HDassert(!using_mpi); /* Can't write VL datatypes in parallel currently */ +#endif /* Check to make sure the buffer is large enough. It is * possible (though ill-advised) for the filter to shrink the @@ -3474,7 +3473,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, } /* end if */ /* Fill the buffer with VL datatype fill values */ - if(H5D__fill_refill_vl(&fb_info, fb_info.elmts_per_buf, data_dxpl_id) < 0) + if(H5D__fill_refill_vl(&fb_info, fb_info.elmts_per_buf, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "can't refill fill value buffer") /* Check if there are filters which need to be applied to the chunk */ @@ -3519,18 +3518,26 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, #ifdef H5_HAVE_PARALLEL /* Check if this file is accessed with an MPI-capable file driver */ if(using_mpi) { - /* Write the chunks out from only one process */ - /* !! Use the internal "independent" DXPL!! -QAK */ - if(H5_PAR_META_WRITE == mpi_rank) - if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") + /* collect all chunk addresses to be written to + write collectively at the end */ + /* allocate/resize address array if no more space left */ + if(0 == chunk_info.num_io % 1024) { + if(NULL == (chunk_info.addr = (haddr_t *)HDrealloc + (chunk_info.addr, (chunk_info.num_io + 1024) * sizeof(haddr_t)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "memory allocation failed for chunk addresses"); + } /* end if */ + + /* Store the chunk's address for later */ + chunk_info.addr[chunk_info.num_io] = udata.addr; + chunk_info.num_io++; - /* Indicate that blocks are being written */ + /* Indicate that blocks will be written */ blocks_written = TRUE; } /* end if */ else { #endif /* H5_HAVE_PARALLEL */ - if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0) + if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, + dxpl_id, fb_info.fill_buf) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") #ifdef H5_HAVE_PARALLEL } /* end else */ @@ -3563,15 +3570,10 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite, } /* end for(op_dim=0...) */ #ifdef H5_HAVE_PARALLEL - /* Only need to block at the barrier if we actually initialized a chunk */ - /* using an MPI-capable file driver */ + /* do final collective I/O */ if(using_mpi && blocks_written) { - /* Wait at barrier to avoid race conditions where some processes are - * still writing out chunks and other processes race ahead to read - * them in, getting bogus data. - */ - if(MPI_SUCCESS != (mpi_code = MPI_Barrier(mpi_comm))) - HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code) + if(H5D__chunk_collective_fill(dset, dxpl_id, &chunk_info, chunk_size, fb_info.fill_buf) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") } /* end if */ #endif /* H5_HAVE_PARALLEL */ @@ -3583,9 +3585,172 @@ done: if(fb_info_init && H5D__fill_term(&fb_info) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info") +#ifdef H5_HAVE_PARALLEL + if(using_mpi) { + if(chunk_info.addr) + HDfree(chunk_info.addr); + } /* end if */ +#endif + FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL) } /* end H5D__chunk_allocate() */ +#ifdef H5_HAVE_PARALLEL + +/*------------------------------------------------------------------------- + * Function: H5D__chunk_collective_fill + * + * Purpose: Use MPIO collective write to fill the chunks (if number of + * chunks to fill is greater than the number of MPI procs; + * otherwise use independent I/O). + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Mohamad Chaarawi + * July 30, 2014 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id, + H5D_chunk_coll_info_t *chunk_info, size_t chunk_size, const void *fill_buf) +{ + MPI_Comm mpi_comm = MPI_COMM_NULL; /* MPI communicator for file */ + int mpi_rank = (-1); /* This process's rank */ + int mpi_size = (-1); /* MPI Comm size */ + int mpi_code; /* MPI return code */ + size_t num_blocks; /* Number of blocks between processes. */ + size_t leftover_blocks; /* Number of leftover blocks to handle */ + int blocks, leftover, block_len; /* converted to int for MPI */ + MPI_Aint *chunk_disp_array = NULL; + int *block_lens = NULL; + MPI_Datatype mem_type, file_type; + hid_t data_dxpl_id = -1; /* DXPL ID to use for raw data I/O operations */ + int i; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Get the MPI communicator */ + if(MPI_COMM_NULL == (mpi_comm = H5F_mpi_get_comm(dset->oloc.file))) + HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI communicator") + + /* Get the MPI rank */ + if((mpi_rank = H5F_mpi_get_rank(dset->oloc.file)) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI rank") + + /* Get the MPI size */ + if((mpi_size = H5F_mpi_get_size(dset->oloc.file)) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI size") + + /* Get a copy of the DXPL, to modify */ + if((data_dxpl_id = H5P_copy_plist((H5P_genplist_t *)H5I_object(dxpl_id), TRUE)) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTCOPY, FAIL, "can't copy property list") + + /* Distribute evenly the number of blocks between processes. */ + num_blocks = chunk_info->num_io / mpi_size; /* value should be the same on all procs */ + + /* after evenly distributing the blocks between processes, are + there any leftover blocks for each individual process + (round-robin) */ + leftover_blocks = chunk_info->num_io % mpi_size; + + /* Cast values to types needed by MPI */ + H5_ASSIGN_OVERFLOW(blocks, num_blocks, size_t, int); + H5_ASSIGN_OVERFLOW(leftover, leftover_blocks, size_t, int); + H5_ASSIGN_OVERFLOW(block_len, chunk_size, size_t, int); + + /* Allocate buffers */ + /* (MSC - should not need block_lens if MPI_type_create_hindexed_block is working) */ + if(NULL == (block_lens = (int *)H5MM_malloc((blocks + 1) * sizeof(int)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk lengths buffer") + if(NULL == (chunk_disp_array = (MPI_Aint *)H5MM_malloc((blocks + 1) * sizeof(MPI_Aint)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file displacement buffer") + + for(i = 0 ; i < blocks ; i++) { + /* store the chunk address as an MPI_Aint */ + chunk_disp_array[i] = (MPI_Aint)(chunk_info->addr[i + mpi_rank*blocks]); + + /* MSC - should not need this if MPI_type_create_hindexed_block is working */ + block_lens[i] = block_len; + + /* make sure that the addresses in the datatype are + monotonically non decreasing */ + if(i) + HDassert(chunk_disp_array[i] > chunk_disp_array[i - 1]); + } /* end if */ + + /* calculate if there are any leftover blocks after evenly + distributing. If there are, then round robin the distribution + to processes 0 -> leftover. */ + if(leftover && leftover > mpi_rank) { + chunk_disp_array[blocks] = (MPI_Aint)chunk_info->addr[blocks*mpi_size + mpi_rank]; + block_lens[blocks] = block_len; + blocks++; + } + + /* MSC - should use this if MPI_type_create_hindexed block is working */ + //mpi_code = MPI_Type_create_hindexed_block(blocks, block_len, chunk_disp_array, + //MPI_BYTE, &file_type); + mpi_code = MPI_Type_create_hindexed(blocks, block_lens, chunk_disp_array, + MPI_BYTE, &file_type); + if(mpi_code != MPI_SUCCESS) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) + if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&file_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) + + mpi_code = MPI_Type_create_hvector(blocks, block_len, 0, MPI_BYTE, &mem_type); + if(mpi_code != MPI_SUCCESS) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code) + if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&mem_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) + + /* set MPI-IO VFD properties */ + { + H5FD_mpio_xfer_t xfer_mode = H5FD_MPIO_COLLECTIVE; + H5P_genplist_t *plist; /* Property list pointer */ + + if(NULL == (plist = H5P_object_verify(data_dxpl_id, H5P_DATASET_XFER))) + HGOTO_ERROR(H5E_PLIST, H5E_BADTYPE, FAIL, "not a dataset transfer list") + + /* Set buffer MPI type */ + if(H5P_set(plist, H5FD_MPI_XFER_MEM_MPI_TYPE_NAME, &mem_type) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O property") + + /* Set File MPI type */ + if(H5P_set(plist, H5FD_MPI_XFER_FILE_MPI_TYPE_NAME, &file_type) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O property") + + /* set transfer mode */ + if(H5P_set(plist, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set transfer mode") + } + + /* low level write */ + if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, (haddr_t)0, (blocks) ? (size_t)1 : (size_t)0, + data_dxpl_id, fill_buf) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") + + /* Barrier so processes don't race ahead */ + if(MPI_SUCCESS != (mpi_code = MPI_Barrier(mpi_comm))) + HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code) + +done: + if(data_dxpl_id > 0 && H5I_dec_ref(data_dxpl_id) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't free property list") + + /* free things */ + if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&file_type))) + HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code) + if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&mem_type))) + HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code) + H5MM_xfree(chunk_disp_array); + H5MM_xfree(block_lens); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__chunk_collective_fill() */ +#endif /* H5_HAVE_PARALLEL */ + /*------------------------------------------------------------------------- * Function: H5D__chunk_prune_fill -- cgit v0.12 From a72cb6e43e74c4aab0ce64017a306b6594288566 Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Tue, 5 Aug 2014 09:06:35 -0500 Subject: [svn-r25510] Added the parameters to the code comment section: H5T_VLEN_F (9) H5T_ARRAY_F (10) No source changes. --- fortran/src/H5Tf.c | 35 +++++++++++++------------ fortran/src/H5Tff.f90 | 71 ++++++++++++++++++++++----------------------------- 2 files changed, 49 insertions(+), 57 deletions(-) diff --git a/fortran/src/H5Tf.c b/fortran/src/H5Tf.c index df581ee..32cdb72 100644 --- a/fortran/src/H5Tf.c +++ b/fortran/src/H5Tf.c @@ -225,31 +225,32 @@ nh5tequal_c ( hid_t_f *type1_id , hid_t_f *type2_id, int_f *c_flag) return ret_value; } - /****if* H5Tf/h5tget_class_c * NAME - * h5tget_class_c + * h5tget_class_c * PURPOSE - * Call H5Tget_class to determine the datatype class + * Call H5Tget_class to determine the datatype class * INPUTS - * type_id - identifier of the dataspace + * type_id - identifier of the dataspace * OUTPUTS - * classtype - class type; possible values are: - * H5T_NO_CLASS_F (-1) - * H5T_INTEGER_F (0) - * H5T_FLOAT_F (1) - * H5T_TIME_F (2) - * H5T_STRING_F (3) - * H5T_BITFIELD_F (4) - * H5T_OPAQUE_F (5) - * H5T_COMPOUNDF (6) - * H5T_REFERENCE_F (7) - * H5T_ENUMF (8) + * classtype - class type; possible values are: + * H5T_NO_CLASS_F (-1) + * H5T_INTEGER_F (0) + * H5T_FLOAT_F (1) + * H5T_TIME_F (2) + * H5T_STRING_F (3) + * H5T_BITFIELD_F (4) + * H5T_OPAQUE_F (5) + * H5T_COMPOUNDF (6) + * H5T_REFERENCE_F (7) + * H5T_ENUM_F (8) + * H5T_VLEN_F (9) + * H5T_ARRAY_F (10) * RETURNS - * 0 on success, -1 on failure + * 0 on success, -1 on failure * AUTHOR * Elena Pourmal - * Saturday, August 14, 1999 + * Saturday, August 14, 1999 * HISTORY * * SOURCE diff --git a/fortran/src/H5Tff.f90 b/fortran/src/H5Tff.f90 index 89bd972..0e1dbb0 100644 --- a/fortran/src/H5Tff.f90 +++ b/fortran/src/H5Tff.f90 @@ -335,20 +335,22 @@ CONTAINS ! Returns the datatype class identifier. ! ! INPUTS -! type_id - datatype identifier +! type_id - Datatype identifier ! OUTPUTS -! class - class, possible values are: -! H5T_NO_CLASS_F (-1) -! H5T_INTEGER_F (0) -! H5T_FLOAT_F (1) -! H5T_TIME_F (2) -! H5T_STRING_F (3) -! H5T_BITFIELD_F (4) -! H5T_OPAQUE_F (5) -! H5T_COMPOUND_F (6) -! H5T_REFERENCE_F (7) -! H5T_ENUM_F (8) -! hdferr - Returns 0 if successful and -1 if fails +! class - Class, possible values are: +! H5T_NO_CLASS_F (-1) +! H5T_INTEGER_F (0) +! H5T_FLOAT_F (1) +! H5T_TIME_F (2) +! H5T_STRING_F (3) +! H5T_BITFIELD_F (4) +! H5T_OPAQUE_F (5) +! H5T_COMPOUND_F (6) +! H5T_REFERENCE_F (7) +! H5T_ENUM_F (8) +! H5T_VLEN_F (9) +! H5T_ARRAY_F (10) +! hdferr - Returns 0 if successful and -1 if fails ! ! AUTHOR ! Elena Pourmal @@ -361,35 +363,24 @@ CONTAINS ! ! SOURCE SUBROUTINE h5tget_class_f(type_id, class, hdferr) - IMPLICIT NONE - INTEGER(HID_T), INTENT(IN) :: type_id ! Datatype identifier - INTEGER, INTENT(OUT) :: class - ! Datatype class, possible values are: - ! H5T_NO_CLASS_F (-1) - ! H5T_INTEGER_F (0) - ! H5T_FLOAT_F (1) - ! H5T_TIME_F (2) - ! H5T_STRING_F (3) - ! H5T_BITFIELD_F (4) - ! H5T_OPAQUE_F (5) - ! H5T_COMPOUND_F (6) - ! H5T_REFERENCE_F (7) - ! H5T_ENUM_F (8) - INTEGER, INTENT(OUT) :: hdferr ! Error code + IMPLICIT NONE + INTEGER(HID_T), INTENT(IN) :: type_id + INTEGER, INTENT(OUT) :: class + INTEGER, INTENT(OUT) :: hdferr !***** - INTERFACE - INTEGER FUNCTION h5tget_class_c(type_id, class) - USE H5GLOBAL - !DEC$IF DEFINED(HDF5F90_WINDOWS) - !DEC$ATTRIBUTES C,reference,decorate,alias:'H5TGET_CLASS_C'::h5tget_class_c - !DEC$ENDIF - INTEGER(HID_T), INTENT(IN) :: type_id - INTEGER, INTENT(OUT) :: class - END FUNCTION h5tget_class_c - END INTERFACE + INTERFACE + INTEGER FUNCTION h5tget_class_c(type_id, class) + USE H5GLOBAL + !DEC$IF DEFINED(HDF5F90_WINDOWS) + !DEC$ATTRIBUTES C,reference,decorate,alias:'H5TGET_CLASS_C'::h5tget_class_c + !DEC$ENDIF + INTEGER(HID_T), INTENT(IN) :: type_id + INTEGER, INTENT(OUT) :: class + END FUNCTION h5tget_class_c + END INTERFACE - hdferr = h5tget_class_c(type_id, class) - END SUBROUTINE h5tget_class_f + hdferr = h5tget_class_c(type_id, class) + END SUBROUTINE h5tget_class_f ! !****s* H5T/h5tget_size_f ! -- cgit v0.12 From 7433fb9dc11ded216e3428d504c7531e387570a0 Mon Sep 17 00:00:00 2001 From: Mohamad Chaarawi Date: Tue, 5 Aug 2014 10:36:38 -0500 Subject: [svn-r25514] Replace MPI deprecated routines with undeprecated ones. --- src/H5Dmpio.c | 8 ++++---- src/H5Smpio.c | 27 ++++++++++++++------------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 3e380a4..cbb3c4a 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -994,15 +994,15 @@ if(H5DEBUG(D)) } /* end for */ /* Create final MPI derived datatype for the file */ - if(MPI_SUCCESS != (mpi_code = MPI_Type_struct((int)num_chunk, chunk_mpi_file_counts, chunk_disp_array, chunk_ftype, &chunk_final_ftype))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_struct failed", mpi_code) + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)num_chunk, chunk_mpi_file_counts, chunk_disp_array, chunk_ftype, &chunk_final_ftype))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code) if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&chunk_final_ftype))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) chunk_final_ftype_is_derived = TRUE; /* Create final MPI derived datatype for memory */ - if(MPI_SUCCESS != (mpi_code = MPI_Type_struct((int)num_chunk, chunk_mpi_mem_counts, chunk_mem_disp_array, chunk_mtype, &chunk_final_mtype))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_struct failed", mpi_code) + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)num_chunk, chunk_mpi_mem_counts, chunk_mem_disp_array, chunk_mtype, &chunk_final_mtype))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code) if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&chunk_final_mtype))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) chunk_final_mtype_is_derived = TRUE; diff --git a/src/H5Smpio.c b/src/H5Smpio.c index 8735ffa..b2d4abb 100644 --- a/src/H5Smpio.c +++ b/src/H5Smpio.c @@ -503,6 +503,7 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, int block_length[3]; MPI_Datatype inner_type, outer_type, old_types[3]; MPI_Aint extent_len, displacement[3]; + MPI_Aint lb; /* Needed as an argument for MPI_Type_get_extent */ unsigned u; /* Local index variable */ int i; /* Local index variable */ int mpi_code; /* MPI return code */ @@ -671,8 +672,8 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, /* calculate start and extent values of this dimension */ displacement[1] = d[i].start * offset[i] * elmt_size; displacement[2] = (MPI_Aint)elmt_size * max_xtent[i]; - if(MPI_SUCCESS != (mpi_code = MPI_Type_extent(outer_type, &extent_len))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_extent failed", mpi_code) + if(MPI_SUCCESS != (mpi_code = MPI_Type_get_extent(outer_type, &lb, &extent_len))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_get_extent failed", mpi_code) /************************************************* * Restructure this datatype ("outer_type") @@ -697,11 +698,11 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, FUNC, i, (long)displacement[0], (long)displacement[1], (long)displacement[2]); #endif - mpi_code = MPI_Type_struct(3, /* count */ - block_length, /* blocklengths */ - displacement, /* displacements */ - old_types, /* old types */ - &inner_type); /* new type */ + mpi_code = MPI_Type_create_struct(3, /* count */ + block_length, /* blocklengths */ + displacement, /* displacements */ + old_types, /* old types */ + &inner_type); /* new type */ MPI_Type_free(&outer_type); if(mpi_code != MPI_SUCCESS) @@ -884,8 +885,8 @@ H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span, outercount++; } /* end while */ - if(MPI_SUCCESS != (mpi_code = MPI_Type_hindexed((int)outercount, blocklen, disp, *elmt_type, span_type))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_hindexed failed", mpi_code) + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)outercount, blocklen, disp, *elmt_type, span_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) span_type_valid = TRUE; } /* end if */ else { @@ -935,9 +936,9 @@ H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span, /* Build the MPI datatype for this node */ stride = (*down) * elmt_size; H5_CHECK_OVERFLOW(tspan->nelem, hsize_t, int) - if(MPI_SUCCESS != (mpi_code = MPI_Type_hvector((int)tspan->nelem, 1, stride, down_type, &inner_type[outercount]))) { + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector((int)tspan->nelem, 1, stride, down_type, &inner_type[outercount]))) { MPI_Type_free(&down_type); - HMPI_GOTO_ERROR(FAIL, "MPI_Type_hvector failed", mpi_code) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code) } /* end if */ /* Release MPI datatype for next dimension down */ @@ -950,8 +951,8 @@ H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span, /* building the whole vector datatype */ H5_CHECK_OVERFLOW(outercount, size_t, int) - if(MPI_SUCCESS != (mpi_code = MPI_Type_struct((int)outercount, blocklen, disp, inner_type, span_type))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_struct failed", mpi_code) + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)outercount, blocklen, disp, inner_type, span_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code) span_type_valid = TRUE; /* Release inner node types */ -- cgit v0.12 From 333f1d47835f3f0ac30f0f2e6774c974b75a6c9c Mon Sep 17 00:00:00 2001 From: Mohamad Chaarawi Date: Wed, 6 Aug 2014 12:10:54 -0500 Subject: [svn-r25516] remove use of MPI_LB and MPI_UB deprecated markers and use resize instead. --- src/H5Smpio.c | 45 ++++++++++++++++----------------------------- 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/src/H5Smpio.c b/src/H5Smpio.c index b2d4abb..8a2bd27 100644 --- a/src/H5Smpio.c +++ b/src/H5Smpio.c @@ -500,10 +500,9 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, hsize_t max_xtent[H5S_MAX_RANK]; H5S_hyper_dim_t *diminfo; /* [rank] */ unsigned rank; - int block_length[3]; - MPI_Datatype inner_type, outer_type, old_types[3]; - MPI_Aint extent_len, displacement[3]; - MPI_Aint lb; /* Needed as an argument for MPI_Type_get_extent */ + MPI_Datatype inner_type, outer_type; + MPI_Aint extent_len, start_disp, new_extent; + MPI_Aint lb; /* Needed as an argument for MPI_Type_get_extent */ unsigned u; /* Local index variable */ int i; /* Local index variable */ int mpi_code; /* MPI return code */ @@ -670,8 +669,8 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, * Then build the dimension type as (start, vector type, xtent). ****************************************/ /* calculate start and extent values of this dimension */ - displacement[1] = d[i].start * offset[i] * elmt_size; - displacement[2] = (MPI_Aint)elmt_size * max_xtent[i]; + start_disp = d[i].start * offset[i] * elmt_size; + new_extent = (MPI_Aint)elmt_size * max_xtent[i]; if(MPI_SUCCESS != (mpi_code = MPI_Type_get_extent(outer_type, &lb, &extent_len))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_get_extent failed", mpi_code) @@ -680,32 +679,20 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, * so that it still starts at 0, but its extent * is the full extent in this dimension. *************************************************/ - if(displacement[1] > 0 || (int)extent_len < displacement[2]) { + if(start_disp > 0 || extent_len < new_extent) { + MPI_Datatype interm_type; + int block_len = 1; - block_length[0] = 1; - block_length[1] = 1; - block_length[2] = 1; - - displacement[0] = 0; - - old_types[0] = MPI_LB; - old_types[1] = outer_type; - old_types[2] = MPI_UB; -#ifdef H5S_DEBUG - if(H5DEBUG(S)) - HDfprintf(H5DEBUG(S), "%s: i=%d Extending struct type\n" - "***displacements: %ld, %ld, %ld\n", - FUNC, i, (long)displacement[0], (long)displacement[1], (long)displacement[2]); -#endif - - mpi_code = MPI_Type_create_struct(3, /* count */ - block_length, /* blocklengths */ - displacement, /* displacements */ - old_types, /* old types */ - &inner_type); /* new type */ + HDassert(0 == lb); + mpi_code = MPI_Type_create_hindexed(1, &block_len, &start_disp, outer_type, &interm_type); MPI_Type_free(&outer_type); - if(mpi_code != MPI_SUCCESS) + if(mpi_code != MPI_SUCCESS) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) + + mpi_code = MPI_Type_create_resized(interm_type, lb, new_extent, &inner_type); + MPI_Type_free(&interm_type); + if(mpi_code != MPI_SUCCESS) HMPI_GOTO_ERROR(FAIL, "couldn't resize MPI vector type", mpi_code) } /* end if */ else -- cgit v0.12 From a794cc9103b1b8b78b9f5ed8ac72ea8dfc19835a Mon Sep 17 00:00:00 2001 From: Allen Byrne Date: Wed, 6 Aug 2014 16:49:19 -0500 Subject: [svn-r25518] HDFFV-8880: Remove t_posix_compliant test references --- release_docs/INSTALL_parallel | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/release_docs/INSTALL_parallel b/release_docs/INSTALL_parallel index 6f60165..e5be615 100644 --- a/release_docs/INSTALL_parallel +++ b/release_docs/INSTALL_parallel @@ -23,8 +23,7 @@ with your system support staff of information how to compile an MPI program, how to run an MPI application, and how to access the parallel file system. There are sample MPI-IO C and Fortran programs in the appendix section of "Sample programs". You can use them to run simple tests of your MPI compilers -and the parallel file system. Also, the t_posix_compliant test in testpar -verifies if the file system is POSIX compliant. +and the parallel file system. 1.2. Further Help @@ -322,11 +321,6 @@ non-zero code. Failure to support file size greater than 2GB is not a fatal error for HDF5 because HDF5 can use other file-drivers such as families of files to bypass the file size limit. -The t_posix_compliant tests if the file system is POSIX compliant when POSIX -and MPI IO APIs are used. This is for information only and it always exits -with 0 even when non-compliance errors have occurred. This is to prevent -the test from aborting the remaining parallel HDF5 tests unnecessarily. - The t_cache does many small sized I/O requests and may not run well in a slow file system such as NFS disk. If it takes a long time to run it, try set the environment variable $HDF5_PARAPREFIX to a file system more suitable -- cgit v0.12