summaryrefslogtreecommitdiffstats
path: root/testpar/t_dset.c
diff options
context:
space:
mode:
authorLarry Knox <lrknox@hdfgroup.org>2021-02-08 16:56:16 (GMT)
committerGitHub <noreply@github.com>2021-02-08 16:56:16 (GMT)
commit22c67ff6570f16050f0a5654cf1c84df917bb598 (patch)
tree38fb63aa498b250c2543fe7e22a511706de96a42 /testpar/t_dset.c
parent551f15f8ae02ca9c995619b216121081eb07633e (diff)
downloadhdf5-22c67ff6570f16050f0a5654cf1c84df917bb598.zip
hdf5-22c67ff6570f16050f0a5654cf1c84df917bb598.tar.gz
hdf5-22c67ff6570f16050f0a5654cf1c84df917bb598.tar.bz2
1.8/master (#317)hdf5-1_8_22
* Revert incorrect bool type * Correct grep text for filter test * Check sizeof type to use correct ref_type * h5dump reg ref chnages has no error file * Fix comment * Remove unneeded files * Verify the decoded version for "all" and "none" selection. * Remove double entry * Add missing include * Snapshot version 1.8 release 22 (snap4) Update version to 1.8-snap5 * Fixes for test failures with --disable-tests --disable-tools options. Don't test ph5diff if tests are disabled. * Correct conditions for parallel builds with and without tests. * Snapshot version 1.8 release 22 (snap5) Update version to 1.8.22-snap6 * TRILAB-192 merging warnings changes from develop * Autotools reconfigure update * Restore page ejects. * Restore page eject chars * Fix orphaned token * Update LT files * Correct extra flags and merge TRILAB-24 * Update release note for CMake warnings * H5repack bug fixes from develop * TRILAB-192 - merge changes from develop Single source, config files, for warnings for both autotools and CMake. Update CMake libraries, tools, tests to use correct flags. * Add missing m4 file * Remove 128bit config check fo 1.8 * TRILAB-192 update LTparse files * A fix in the cleaning up code for datatype when datatype initialization via H5D__init_type() fails. The code to fix the problem is the same as what is done in H5D__open_oid(). * Add missing " and update function calls incorrectly named load_gcc_arguments. Commit changes to gnu-cxxflags to remove unmatched " and to gnu-fflags to not add C warnings flags to H5_FCFLAGS. * TRILAB-192 - cleanup edits and match CMake CXX to autotools * Fix shadowed type * TRILAB-192 correct fortran commands * TRILAB-192 version of std=f2008 special check * Fix shadow variable * Add prefix_relto_examplesdir function to set relative path to h5c++. * TRILAB-192 - parser files cannot handle this warning * Parallel Fortran tests now use the MPI module instead of including mpif.h. * Don't add general warnings flags for unsupported old versions of gcc and g++ (older than gcc/g++ 4.2). Correct gnu-cxxflags to determine warnings flags to be added based on C++ compiler version instead of C compiler version. * Snapshot version 1.8 release 22 (snap6) Update version to 1.8.22-snap7 * TRILAB-244 separate CXX warnings and errors from C * Fix NoFilter build * Move ADD_H5_FLAGS macro from *CompilerFlags.cmake files to config/cmake_ext_mod/HDFMacros.cmake for common use. * Restrict errors to gcc 4.8 and above. * OESS-65 replace szip with aec szip * Correct CMake version * Correct -Werror=unused-variable * Add -Werror= flags to lists displayed in libhdf5.settings. * Fix -Werror=unused-variable * More fixes for unused variable * Resolve warning errors from -Werror= flags: -Werror=bad-function-cast -Werror=bad-function-cast -Werror=implicit-function-declaration -Werror=nested-externs * Remove verbose assignments in smoke_check 3 and 5 functions. * Update to use optimization flags, C++ error flags, and correct libhdf5.settings.in to remove unworkable changes. * Update config/gnu-cxxflags. * Add missing headers * Fix ifort warning flag. Restore AC_DEFUN([PAC_PROG_FC_DEFAULT_REALisDBLE] in m4/aclocal_fc.m4. Add --enable-symbols, --enable-profiling, and --enable-optimization options to autotools configure; all configure flags for --enable-production and --disable-production should match HDF5 1.10 and 1.12 production and debug default flags. * Add github actions file * Correct path * OESS-65 Fix packaging * Update reference name * Correct filename * disable pr action and enable fail fast * Snapshot 1.8.22-snap7 Update version to 1.8.22-snap8. * Correct mingw path * Correct mingw paths * Revise gnu-*flags and cmake/HDF*CompilerFlags.cmake files to add warning flags for GCC compilers version 4.8 and above. Removed files from gnu-warnings that only apply to versions < 4.8. Consolidated warnings from versions < 4.8 that apply to versions >= 4.8 into the 4.8 warnings files. * Update MANIFEST for removal of older warnings files. * Yanked all MPI-1 calls Added batch scripts in bin/batch. * Snapshot 1.8.22-snap8. Update version to 1.8.22-snap9. * Squashed commit of the following: commit 45b0cbc71929930a874e1c09e0770aa64d0ae697 Author: Larry Knox <lrknox@hdfgroup.org> Date: Thu Apr 23 13:14:56 2020 -0500 Add C++ warnings treated as error for autotools builds. commit 6debcddcc766601799d5bd661bd60946f951a07e Author: Larry Knox <lrknox@hdfgroup.org> Date: Fri Jun 19 16:50:03 2020 -0500 Remove tests for develop branch from testh5cc.sh.in. Add @H5_ECXXFLAGS@ to AM_CXXFLAGS. commit fed497e7f207313b2a133d3b68c942b7d7104b90 Author: Larry Knox <lrknox@hdfgroup.org> Date: Fri Jun 12 15:06:39 2020 -0500 Merge pull request #2646 in HDFFV/hdf5 from ~LRKNOX/hdf5_lrk:develop to develop * commit '998c6470d78e14228695419c3b514c35ecf3d69e': Remove unnecessary version conditions for Clang compilers. commit 92e52ce5c211bd1d3991a3b8bb67287ac7b652aa Author: Larry Knox <lrknox@hdfgroup.org> Date: Wed Jun 10 18:53:45 2020 -0500 Merge pull request #2639 in HDFFV/hdf5 from ~LRKNOX/hdf5_lrk:develop to develop * commit 'a33667faf226f5d8c9633bf537893e8fce1bf1f6': Add c++ to --enable-sanitize-checks option. commit 2e802534fb997b81fa98fdd1c7d97d4310898e0d Author: Larry Knox <lrknox@hdfgroup.org> Date: Wed Jun 10 15:18:36 2020 -0500 Merge pull request #2633 in HDFFV/hdf5 from ~LRKNOX/hdf5_lrk:develop to develop * commit '20eab963899841c9a003baebead8e3bc07d9e127': Remove duplicate entries in MANIFEST. Make changes to CMake CompilerFlags.cmake files so extra flags are loaded for non-GNU compilers. Update new clang files to not pick up clang as vendor for pgCC. Add new files to MANIFEST Temporary demotion of 2 -Werror warning flags that fail on macos 10.12 Remove Production flag unknown to Apple clang. commit 96ef60a58a23847a7da89a891f2415055ec2ab60 Author: Larry Knox <lrknox@hdfgroup.org> Date: Mon Jun 8 16:24:49 2020 -0500 Merge pull request #2631 in HDFFV/hdf5 from ~LRKNOX/hdf5_lrk:develop to develop * commit 'b942a4d8a3e27317cac50ce37ff5302d477664d8': Clean up code to get clang version in config/linux-gnulibc1 commit 8a7c687da568e8b50b38fa53da1ca63759be6ec4 Author: Larry Knox <lrknox@hdfgroup.org> Date: Mon Jun 8 11:42:37 2020 -0500 Merge pull request #2623 in HDFFV/hdf5 from ~LRKNOX/hdf5_lrk:develop to develop * commit 'c7626f79fdee4eee13925e240ef41e61f3367eab': Add flags from config/clang-warnings/*general files to H5 C and CXX flags for all versions of Clang and Clang++ compilers. Switched from cut to awk in testcheck_version.sh.in to avoid dependence on tab vs. " " in version definitions in H5public.h. Add files for adding warning flags for clang compilers in autotools configure. commit db3ef7ff3c1ed79167cecef831501411cff8291f Merge: 5a0f8d7 f9f1310 Author: Larry Knox <lrknox@hdfgroup.org> Date: Fri Jun 19 14:46:22 2020 -0500 Merge branch 'hdf5_1_12' of https://bitbucket.hdfgroup.org/scm/~lrknox/hdf5_lrk into hdf5_1_12 commit 5a0f8d7940ae57b445f545a0abd7e303ce6924ee Author: Larry Knox <lrknox@hdfgroup.org> Date: Wed Jun 10 20:15:41 2020 -0500 Merge pull request #2636 in HDFFV/hdf5 from ~LRKNOX/hdf5_lrk:HDFFV-11000-update-testh5cc.sh.in-for-version to develop * commit '5c0bd670d6e9919c463fbc05de99e2ba55b0761e': Add tests for all version to H5_NO_DEPRECATED_SYMBOLS section and to section for current version, with and without default API version flags. HDFFV-11000: update-testh5cc.sh.in to test sample versioned functions in HDF5 1.10, 1.12 and develop. * Update configure and Makefile.ins after bin/reconfigure. * Fix MANIFEST * Fix HDFFV-10591 Description: h52gif produced a segfault when a buffer overflow occurred because the data size was corrupted and became very large. This commit added a check on the data size against the buffer size to prevent the segfault. It also added error reporting to h52gif to display an error message instead of silently exiting when the failure occurred. Platforms tested: Linux/64 (jelly) * Snapshot 1.8.22-snap9. Update version to 1.8.22-snap10. * Merge pull request #2686 in HDFFV/hdf5 from ~LRKNOX/hdf5_lrk:develop to develop * commit '800f93f7c994f88dfa716746153ded4b1e690e3a': Remove path to szip header file from AM_CPPFLAGS when configure check of libsz fails. Fix for HDFFV-10830. * Regenerate configure. * Merge from 1.10-1.12-dev - LT parse needs update * Incorporate pull request #2693 from develop for the fix to HDFFV-11080: (1) Patch up the file pointer when reading attribute of variable length datatype (2) Test to verify the fix when doing multiple threads (3) Update MANIFEST (4) Add new test to Cmake * Switch bison and flex update of LT files from bin/genltanalyse to bin/genparser, and from automatically updating the LT files whenever reconfigure is run to running only on man<F12>ual command. * Update hl/sr/H5LT files. Add bin/genparser to MANIFEST; remove bin/genltanalyze. * Fix to the merge of PR #2708 for HDFFV-11080 to the 1.8 branch. Need to checkin test/Makefile.in for the new file added. * Fix HDFFV-11120 and HDFFV-11121 (CVE-2018-13870 and CVE-2018-13869) Description: When a buffer overflow occurred because a name length was corrupted and became very large, h5dump produced a segfault on one file and a memcpy parameter overlap on another file. This commit added checks that detect a read pass the end of the buffer to prevent these error conditions. Platforms tested: Linux/64 (jelly) * Fixed typo * Check for header szlib.h only when libsz passes AC_CHECK_LIB, so that H5_HAVE_SALIB_H is not defined when szip is not enabled, to prevent compile failures for incompatible szlib. * HDFFV-11127 - force RTLD_LOCAL in dlopen * spelling * Remove extra parens * Remove unnecessary assignment freom configure.ac. * Add hypen to subrelease regex * Add special case when H5_VERS_SUBRELEASE is 0 * Revert H5_VERS_SUBRELEASE check * The version string has a second use for packaging * MinGW uses "MinGW Makefiles" * Snapshot 1.8.22-snap10. Update version to 1.8.22-snap11. * Merge clang-format changes from develop * remove stray file * Source formatted * Add options to github actions. * Add back file, remove py file * HDFFV-11096 Correct h5dump usage text * Update formating * remove autogen for 1.8 branch * Fix comment whitespace * Change '==' to 'eq' in test script * Merge from develop github updates bin scripts parser updates * Snapshot 1.8.22-snap11. Update version to 1.8.22-snap12. * Merge from 1.10 Comments, whitespace Simple init and if block brackets. Minimal code changes limited to return value and spelling * Update autotools files * Update FD source * More merges from 1.10 comments and inits * Comment fixes * Fix macro argument * Remove autogen file and cache dir * Revert autotools files to previous version * Correct boolean value and missing define * URL and options corrections * revert autoconf files * Fix compile issues with vfd drivers. * Fix HDFS test * Remove extra #endif * Move init above execution * Multiple changes to bring branch up-to-date (#87) Correct TARGET variable and CMake config file location. Add option to allow filter plugins to be built inline. Update CMake tools macros. * hdf5 1 8 release text (#113) * Multiple changes to bring branch up-to-date Correct TARGET variable and CMake config file location. Add option to allow filter plugins to be built inline. Update CMake tools macros. * release text updates - mostly whitespace * Fixed HDFFV-10480 and HDFFV-11159 (#145) * Fixed HDFFV-10480 and HDFFV-11159 Description Checked against buffer size to prevent segfault, in case of data corruption. + HDFFV-11159 CVE-2018-14033 Buffer over-read in H5O_layout_decode + HDFFV-10480 CVE-2018-11206 Buffer over-read in H5O_fill_new[/old]_decode and A user's patch was applied to this previously, but it is redone for a more correct fix, that is the check now accounted for the previous advance of the buffer pointer. Platforms tested: Linux/64 (jelly) * Fixed typo * Update HDF5 1.8 copyright headers with updated copyright URL (#148) * Update URL for COPYING file in copyright headers. * Make fortran copyright headers uniformly spaced. * Update url for Copyright file in c++/src/footer.html. * OESS-98 Update plugin build option (#171) * 1.8 has extra fortran CMake option (#176) * 1.8 has extra fortran CMake option * Update license URL * OESS-98 fix tools test for plugins (#180) * Hdf5 1 8 (#169) * Fixed HDFFV-10480 and HDFFV-11159 Description Checked against buffer size to prevent segfault, in case of data corruption. + HDFFV-11159 CVE-2018-14033 Buffer over-read in H5O_layout_decode + HDFFV-10480 CVE-2018-11206 Buffer over-read in H5O_fill_new[/old]_decode and A user's patch was applied to this previously, but it is redone for a more correct fix, that is the check now accounted for the previous advance of the buffer pointer. Platforms tested: Linux/64 (jelly) * Fixed typo * Fixed HDFFV-11150 Description Replaced an HDassert with a check for null pointer in H5O_dec_rc() to catch null pointer in corrupted data situation. Reversed the fix in svn-r24463 in which a check for null pointer prior to calling H5O_dec_rc() Platforms tested: Linux/64 (jelly) * HDF5 1 8 fix fortran build on macs (#186) * Correct fortran and shared libs option * Fix for no shared fortran build * OESS-98 fix hdf5 link target (#191) * Partial changes to RELEASE.txt for release. (#185) * Partial changes to RELEASE.txt for release. * Update supported and tested platforms. * Update version to 1.8.22-12. * close #195. (#196) * Update HDF5PluginMacros.cmake * Update HDF5PluginMacros.cmake * Update directory for SZ filter in HDF5PluginMacros.cmake. Updates for release: Switch configure default to production mode. Set HDF5_GENERATE_HEADERS to OFF. * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. * Restores maintainer mode in the autotools (#200) (#203) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> * Hdf5 1 8 22 (#212) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> * Update so numbers for 1.8.22 release. * Add so numbers changes in Makefile.ins for 1.8.22 release. * Brings ttsafe_attr_vlen.c changes from develop (#214) Fixes exposed pthread problem on Windows. * Update SO numbers for Hdf5 1 8 22 (#215) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. * Update so numbers for 1.8.22 release. * Add so numbers changes in Makefile.ins for 1.8.22 release. Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> * Update pkgconfig settings with version - #218 (#223) * Add notice of final HDFF5 1.8 release. Add solaris 64bit alignment issue to "Known Problems". * Update 1.8 final release notice. * Hdf5 1 8 22 (#224) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. * Update so numbers for 1.8.22 release. * Add so numbers changes in Makefile.ins for 1.8.22 release. * Update pkgconfig settings with version - #218 (#223) * Add notice of final HDFF5 1.8 release. Add solaris 64bit alignment issue to "Known Problems". * Update 1.8 final release notice. Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> * Update CMake/HDF5Examples version in bin/release * Update CMake/HDF5Examples version number in bin/release (#225) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. * Update so numbers for 1.8.22 release. * Add so numbers changes in Makefile.ins for 1.8.22 release. * Update pkgconfig settings with version - #218 (#223) * Add notice of final HDFF5 1.8 release. Add solaris 64bit alignment issue to "Known Problems". * Update 1.8 final release notice. * Update CMake/HDF5Examples version in bin/release Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> * Fixed typo in an error message. (#228) * Fixed typo in an error message. (#227) * Remove duplicate setting (#239) * Bring 3 small changes from Hdf5 1.8 to 1.8.22 (#241) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. * Update so numbers for 1.8.22 release. * Add so numbers changes in Makefile.ins for 1.8.22 release. * Update pkgconfig settings with version - #218 (#223) * Add notice of final HDFF5 1.8 release. Add solaris 64bit alignment issue to "Known Problems". * Update 1.8 final release notice. * Update CMake/HDF5Examples version in bin/release * Fixed typo in an error message. (#227) * Remove duplicate setting (#239) Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> * Stat st blocks fix 1822 (#251) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. * Update so numbers for 1.8.22 release. * Add so numbers changes in Makefile.ins for 1.8.22 release. * Update pkgconfig settings with version - #218 (#223) * Add notice of final HDFF5 1.8 release. Add solaris 64bit alignment issue to "Known Problems". * Update 1.8 final release notice. * Update CMake/HDF5Examples version in bin/release * Fixed typo in an error message. (#227) * Remove duplicate setting (#239) * Fixes Autotools detection of the st_blocks field in stat (#246) * Fixes Autotools detection of the st_blocks field in stat The Autotools and CMake will now both correctly determine if the stat struct has the st_blocks field and set H5_HAVE_STAT_ST_BLOCKS appropriately. * Fixes a typo in configure.ac * Restore lines in RELEASE.txt. Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> * Stat st blocks fix 1822 (#256) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. * Update so numbers for 1.8.22 release. * Add so numbers changes in Makefile.ins for 1.8.22 release. * Update pkgconfig settings with version - #218 (#223) * Add notice of final HDFF5 1.8 release. Add solaris 64bit alignment issue to "Known Problems". * Update 1.8 final release notice. * Update CMake/HDF5Examples version in bin/release * Fixed typo in an error message. (#227) * Remove duplicate setting (#239) * Fixes Autotools detection of the st_blocks field in stat (#246) * Fixes Autotools detection of the st_blocks field in stat The Autotools and CMake will now both correctly determine if the stat struct has the st_blocks field and set H5_HAVE_STAT_ST_BLOCKS appropriately. * Fixes a typo in configure.ac * Restore lines in RELEASE.txt. * Updated configure with reconfigure. Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> * RELEASE.txt cleanup. * Hdf5 1 8 22 (#261) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. * Update so numbers for 1.8.22 release. * Add so numbers changes in Makefile.ins for 1.8.22 release. * Update pkgconfig settings with version - #218 (#223) * Add notice of final HDFF5 1.8 release. Add solaris 64bit alignment issue to "Known Problems". * Update 1.8 final release notice. * Update CMake/HDF5Examples version in bin/release * Fixed typo in an error message. (#227) * Remove duplicate setting (#239) * RELEASE.txt cleanup. Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> * Add macOS Big Sur to tested machines, also missing entries for macOS 10.13 and 10.14. * )Update version. * Hdf5 1 8 22 (#266) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. * Update so numbers for 1.8.22 release. * Add so numbers changes in Makefile.ins for 1.8.22 release. * Update pkgconfig settings with version - #218 (#223) * Add notice of final HDFF5 1.8 release. Add solaris 64bit alignment issue to "Known Problems". * Update 1.8 final release notice. * Update CMake/HDF5Examples version in bin/release * Fixed typo in an error message. (#227) * Remove duplicate setting (#239) * RELEASE.txt cleanup. * Add macOS Big Sur to tested machines, also missing entries for macOS 10.13 and 10.14. * )Update version. Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> * Reverts lock/unlock callback signature to 1.8.21 version (#254) * Reverts lock/unlock callback signature to 1.8.21 version This callback is unused in 1.8. The ros3 and hdfs VFDs are the only VFDs that have the lock callback implemented and that is just as no-op stubs. These stubs were removed so the callbacks are now NULL pointers, like the other VFDs in 1.8. * Trivial whitespace fix * Update version to 1.8.22-14. * Hdf5 1 8 22 - Reverts lock/unlock callback signature to 1.8.21 version (#267) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. * Update so numbers for 1.8.22 release. * Add so numbers changes in Makefile.ins for 1.8.22 release. * Update pkgconfig settings with version - #218 (#223) * Add notice of final HDFF5 1.8 release. Add solaris 64bit alignment issue to "Known Problems". * Update 1.8 final release notice. * Update CMake/HDF5Examples version in bin/release * Fixed typo in an error message. (#227) * Remove duplicate setting (#239) * RELEASE.txt cleanup. * Add macOS Big Sur to tested machines, also missing entries for macOS 10.13 and 10.14. * )Update version. * Reverts lock/unlock callback signature to 1.8.21 version (#254) * Reverts lock/unlock callback signature to 1.8.21 version This callback is unused in 1.8. The ros3 and hdfs VFDs are the only VFDs that have the lock callback implemented and that is just as no-op stubs. These stubs were removed so the callbacks are now NULL pointers, like the other VFDs in 1.8. * Trivial whitespace fix * Update version to 1.8.22-14. Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> * Update version in H5public.h * Hdf5 1 8 22 (#269) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. * Update so numbers for 1.8.22 release. * Add so numbers changes in Makefile.ins for 1.8.22 release. * Update pkgconfig settings with version - #218 (#223) * Add notice of final HDFF5 1.8 release. Add solaris 64bit alignment issue to "Known Problems". * Update 1.8 final release notice. * Update CMake/HDF5Examples version in bin/release * Fixed typo in an error message. (#227) * Remove duplicate setting (#239) * RELEASE.txt cleanup. * Add macOS Big Sur to tested machines, also missing entries for macOS 10.13 and 10.14. * )Update version. * Reverts lock/unlock callback signature to 1.8.21 version (#254) * Reverts lock/unlock callback signature to 1.8.21 version This callback is unused in 1.8. The ros3 and hdfs VFDs are the only VFDs that have the lock callback implemented and that is just as no-op stubs. These stubs were removed so the callbacks are now NULL pointers, like the other VFDs in 1.8. * Trivial whitespace fix * Update version to 1.8.22-14. * Update version in H5public.h Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> * Set version 1.8.22 for release. * dd RELEASE.txt entry for HDFFV-10741. * Hdf5 1 8 22 (#279) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. * Update so numbers for 1.8.22 release. * Add so numbers changes in Makefile.ins for 1.8.22 release. * Update pkgconfig settings with version - #218 (#223) * Add notice of final HDFF5 1.8 release. Add solaris 64bit alignment issue to "Known Problems". * Update 1.8 final release notice. * Update CMake/HDF5Examples version in bin/release * Fixed typo in an error message. (#227) * Remove duplicate setting (#239) * RELEASE.txt cleanup. * Add macOS Big Sur to tested machines, also missing entries for macOS 10.13 and 10.14. * )Update version. * Reverts lock/unlock callback signature to 1.8.21 version (#254) * Reverts lock/unlock callback signature to 1.8.21 version This callback is unused in 1.8. The ros3 and hdfs VFDs are the only VFDs that have the lock callback implemented and that is just as no-op stubs. These stubs were removed so the callbacks are now NULL pointers, like the other VFDs in 1.8. * Trivial whitespace fix * Update version to 1.8.22-14. * Update version in H5public.h * Set version 1.8.22 for release. * dd RELEASE.txt entry for HDFFV-10741. Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> * Improve performance of multiple calls to H5Sget_select_elem_pointlist (#270) (#277) * Cache the pointer to the next point to process after the last call to H5S__get_select_elem_pointlist. This allows the normal process of iterating over the points in batches to be much more efficient, as the library does not need to traverse the entirety of the preceding points every time the funciton is re-entered. * Update RELEASE.txt for point selection iteration performance fix. * Hdf5 1 8 22 (#281) * Restores maintainer mode in the autotools (#200) Maintainer mode should be enabled in development branches. Also adds helpful commenting. Add bin/switch_maint_mode Disable maintainer mode for release. Fix incomplete merge for stub functions in H5Fdhdfs.c * Update configure for Restores maintainer mode in the autotools (#200). * Update MANIFEST for switch_maint_mode script. * Update so numbers for 1.8.22 release. * Add so numbers changes in Makefile.ins for 1.8.22 release. * Update pkgconfig settings with version - #218 (#223) * Add notice of final HDFF5 1.8 release. Add solaris 64bit alignment issue to "Known Problems". * Update 1.8 final release notice. * Update CMake/HDF5Examples version in bin/release * Fixed typo in an error message. (#227) * Remove duplicate setting (#239) * RELEASE.txt cleanup. * Add macOS Big Sur to tested machines, also missing entries for macOS 10.13 and 10.14. * )Update version. * Reverts lock/unlock callback signature to 1.8.21 version (#254) * Reverts lock/unlock callback signature to 1.8.21 version This callback is unused in 1.8. The ros3 and hdfs VFDs are the only VFDs that have the lock callback implemented and that is just as no-op stubs. These stubs were removed so the callbacks are now NULL pointers, like the other VFDs in 1.8. * Trivial whitespace fix * Update version to 1.8.22-14. * Update version in H5public.h * Set version 1.8.22 for release. * dd RELEASE.txt entry for HDFFV-10741. * Improve performance of multiple calls to H5Sget_select_elem_pointlist (#270) (#277) * Cache the pointer to the next point to process after the last call to H5S__get_select_elem_pointlist. This allows the normal process of iterating over the points in batches to be much more efficient, as the library does not need to traverse the entirety of the preceding points every time the funciton is re-entered. * Update RELEASE.txt for point selection iteration performance fix. Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> Co-authored-by: Neil Fortner <nfortne2@hdfgroup.org> * Hdf5 1 8 22 (#284) * Fixed typo in an error message. * Updated for HDFFV-11150, HDFFV-10480, and HDFFV-11159 * Update "Support for New Platforms and Compilers" section in RELEASE.txt; add check_version workaround for binary compatibility to "Known Problems". * Add SUSE Linux to tested platforms. * Update numbers in config/lt_vers.am and run bin/reconfigure for so numbers. * Update version in 3 files missed by merge. Co-authored-by: Allen Byrne <byrn@hdfgroup.org> Co-authored-by: Vailin Choi <vchoi@hdfgroup.org> Co-authored-by: vchoi <vchoi@jelly.ad.hdfgroup.org> Co-authored-by: hdftest <hdftest@hdfgroup.org> Co-authored-by: Jordan Henderson <jhenderson@hdfgroup.org> Co-authored-by: Dana Robinson <derobins@hdfgroup.org> Co-authored-by: Binh-Minh Ribler <bmribler@hdfgroup.org> Co-authored-by: Allen Byrne <50328838+byrnHDF@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> Co-authored-by: H. Joe Lee <hyoklee@hdfgroup.org> Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: Neil Fortner <nfortne2@hdfgroup.org>
Diffstat (limited to 'testpar/t_dset.c')
-rw-r--r--testpar/t_dset.c2732
1 files changed, 1350 insertions, 1382 deletions
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index 29d6dcf..b841cd3 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -6,7 +6,7 @@
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
* If you do not have access to either file, you may request a copy from *
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
@@ -36,130 +36,135 @@
* Setup the dimensions of the hyperslab.
* Two modes--by rows or by columns.
* Assume dimension rank is 2.
- * BYROW divide into slabs of rows
- * BYCOL divide into blocks of columns
- * ZROW same as BYROW except process 0 gets 0 rows
- * ZCOL same as BYCOL except process 0 gets 0 columns
+ * BYROW divide into slabs of rows
+ * BYCOL divide into blocks of columns
+ * ZROW same as BYROW except process 0 gets 0 rows
+ * ZCOL same as BYCOL except process 0 gets 0 columns
*/
static void
-slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[],
- hsize_t stride[], hsize_t block[], int mode)
+slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ int mode)
{
- switch (mode){
- case BYROW:
- /* Each process takes a slabs of rows. */
- block[0] = dim0/mpi_size;
- block[1] = dim1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = mpi_rank*block[0];
- start[1] = 0;
-if(VERBOSE_MED) printf("slab_set BYROW\n");
- break;
- case BYCOL:
- /* Each process takes a block of columns. */
- block[0] = dim0;
- block[1] = dim1/mpi_size;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = mpi_rank*block[1];
-if(VERBOSE_MED) printf("slab_set BYCOL\n");
- break;
- case ZROW:
- /* Similar to BYROW except process 0 gets 0 row */
- block[0] = (mpi_rank ? dim0/mpi_size : 0);
- block[1] = dim1;
- stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (mpi_rank? mpi_rank*block[0] : 0);
- start[1] = 0;
-if(VERBOSE_MED) printf("slab_set ZROW\n");
- break;
- case ZCOL:
- /* Similar to BYCOL except process 0 gets 0 column */
- block[0] = dim0;
- block[1] = (mpi_rank ? dim1/mpi_size : 0);
- stride[0] = block[0];
- stride[1] = (mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = (mpi_rank? mpi_rank*block[1] : 0);
-if(VERBOSE_MED) printf("slab_set ZCOL\n");
- break;
- default:
- /* Unknown mode. Set it to cover the whole dataset. */
- printf("unknown slab_set mode (%d)\n", mode);
- block[0] = dim0;
- block[1] = dim1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = 0;
-if(VERBOSE_MED) printf("slab_set wholeset\n");
- break;
+ switch (mode) {
+ case BYROW:
+ /* Each process takes a slabs of rows. */
+ block[0] = dim0 / mpi_size;
+ block[1] = dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = mpi_rank * block[0];
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYROW\n");
+ break;
+ case BYCOL:
+ /* Each process takes a block of columns. */
+ block[0] = dim0;
+ block[1] = dim1 / mpi_size;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = mpi_rank * block[1];
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYCOL\n");
+ break;
+ case ZROW:
+ /* Similar to BYROW except process 0 gets 0 row */
+ block[0] = (mpi_rank ? dim0 / mpi_size : 0);
+ block[1] = dim1;
+ stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (mpi_rank ? mpi_rank * block[0] : 0);
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZROW\n");
+ break;
+ case ZCOL:
+ /* Similar to BYCOL except process 0 gets 0 column */
+ block[0] = dim0;
+ block[1] = (mpi_rank ? dim1 / mpi_size : 0);
+ stride[0] = block[0];
+ stride[1] = (mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (mpi_rank ? mpi_rank * block[1] : 0);
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZCOL\n");
+ break;
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ HDprintf("unknown slab_set mode (%d)\n", mode);
+ block[0] = dim0;
+ block[1] = dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set wholeset\n");
+ break;
}
-if(VERBOSE_MED){
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0]*block[1]*count[0]*count[1]));
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
}
}
/*
* Setup the coordinates for point selection.
*/
-void point_set(hsize_t start[],
- hsize_t count[],
- hsize_t stride[],
- hsize_t block[],
- size_t num_points,
- hsize_t coords[],
- int order)
+void
+point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order)
{
- hsize_t i,j, k = 0, m ,n, s1 ,s2;
+ hsize_t i, j, k = 0, m, n, s1, s2;
HDcompile_assert(RANK == 2);
- if(OUT_OF_ORDER == order)
+ if (OUT_OF_ORDER == order)
k = (num_points * RANK) - 1;
- else if(IN_ORDER == order)
+ else if (IN_ORDER == order)
k = 0;
s1 = start[0];
s2 = start[1];
- for(i = 0 ; i < count[0]; i++)
- for(j = 0 ; j < count[1]; j++)
- for(m = 0 ; m < block[0]; m++)
- for(n = 0 ; n < block[1]; n++)
- if(OUT_OF_ORDER == order) {
+ for (i = 0; i < count[0]; i++)
+ for (j = 0; j < count[1]; j++)
+ for (m = 0; m < block[0]; m++)
+ for (n = 0; n < block[1]; n++)
+ if (OUT_OF_ORDER == order) {
coords[k--] = s2 + (stride[1] * j) + n;
coords[k--] = s1 + (stride[0] * i) + m;
}
- else if(IN_ORDER == order) {
+ else if (IN_ORDER == order) {
coords[k++] = s1 + stride[0] * i + m;
coords[k++] = s2 + stride[1] * j + n;
}
- if(VERBOSE_MED) {
- printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
k = 0;
- for(i = 0; i < num_points ; i++) {
- printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
+ for (i = 0; i < num_points; i++) {
+ HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
k += 2;
}
}
@@ -170,92 +175,90 @@ void point_set(hsize_t start[],
* Assume dimension rank is 2 and data is stored contiguous.
*/
static void
-dataset_fill(hsize_t start[], hsize_t block[], DATATYPE * dataset)
+dataset_fill(hsize_t start[], hsize_t block[], DATATYPE *dataset)
{
DATATYPE *dataptr = dataset;
- hsize_t i, j;
+ hsize_t i, j;
/* put some trivial data in the data_array */
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- *dataptr = (DATATYPE)((i+start[0])*100 + (j+start[1]+1));
- dataptr++;
- }
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ *dataptr = (DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1));
+ dataptr++;
+ }
}
}
-
/*
* Print the content of the dataset.
*/
static void
-dataset_print(hsize_t start[], hsize_t block[], DATATYPE * dataset)
+dataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
{
DATATYPE *dataptr = dataset;
- hsize_t i, j;
+ hsize_t i, j;
/* print the column heading */
- printf("%-8s", "Cols:");
- for (j=0; j < block[1]; j++){
- printf("%3lu ", (unsigned long)(start[1]+j));
+ HDprintf("%-8s", "Cols:");
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%3lu ", (unsigned long)(start[1] + j));
}
- printf("\n");
+ HDprintf("\n");
/* print the slab data */
- for (i=0; i < block[0]; i++){
- printf("Row %2lu: ", (unsigned long)(i+start[0]));
- for (j=0; j < block[1]; j++){
- printf("%03d ", *dataptr++);
- }
- printf("\n");
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2lu: ", (unsigned long)(i + start[0]));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
}
}
-
/*
* Print the content of the dataset.
*/
int
-dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, DATATYPE *original)
+dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original)
{
hsize_t i, j;
- int vrfyerrs;
+ int vrfyerrs;
/* print it if VERBOSE_MED */
- if(VERBOSE_MED) {
- printf("dataset_vrfy dumping:::\n");
- printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
- printf("original values:\n");
- dataset_print(start, block, original);
- printf("compared values:\n");
- dataset_print(start, block, dataset);
+ if (VERBOSE_MED) {
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ dataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ dataset_print(start, block, dataset);
}
vrfyerrs = 0;
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(*dataset != *original){
- if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
- printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j,
- (unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
- *(original), *(dataset));
- }
- dataset++;
- original++;
- }
- }
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ if (*dataset != *original) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j, (unsigned long)(i + start[0]),
+ (unsigned long)(j + start[1]), *(original), *(dataset));
+ }
+ dataset++;
+ original++;
+ }
+ }
}
- if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
- if(vrfyerrs)
- printf("%d errors found in dataset_vrfy\n", vrfyerrs);
- return(vrfyerrs);
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (vrfyerrs)
+ HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs);
+ return (vrfyerrs);
}
-
/*
* Part 1.a--Independent read/write for fixed dimension datasets.
*/
@@ -271,36 +274,36 @@ dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[]
void
dataset_writeInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- printf("Independent write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Independent write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* ----------------------------------------
@@ -318,7 +321,6 @@ dataset_writeInd(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* ---------------------------------------------
* Define the dimensions of the overall datasets
* and the slabs local to the MPI process.
@@ -326,21 +328,17 @@ dataset_writeInd(void)
/* setup dimensionality object */
dims[0] = dim0;
dims[1] = dim1;
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
-
/* create a dataset collectively */
- dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
/* create another dataset collectively */
- dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
-
/*
* To test the independent orders of writes between processes, all
* even number processes write to dataset1 first, then dataset2.
@@ -355,43 +353,40 @@ dataset_writeInd(void)
MESG("data_array initialized");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* write data independently */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
/* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
/* setup dimensions again to write with zero rows for process 0 */
- if(VERBOSE_MED)
- printf("writeInd by some with zero row\n");
+ if (VERBOSE_MED)
+ HDprintf("writeInd by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeInd by some with zero row");
-if((mpi_rank/2)*2 != mpi_rank){
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
- VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
-}
+ if ((mpi_rank / 2) * 2 != mpi_rank) {
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
+ }
#ifdef BARRIER_CHECKS
-MPI_Barrier(MPI_COMM_WORLD);
+ MPI_Barrier(MPI_COMM_WORLD);
#endif /* BARRIER_CHECKS */
/* release dataspace ID */
@@ -410,44 +405,45 @@ MPI_Barrier(MPI_COMM_WORLD);
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
+ if (data_array1)
+ HDfree(data_array1);
}
/* Example of using the parallel HDF5 library to read a dataset */
void
dataset_readInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ DATATYPE * data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- printf("Independent read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Independent read test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* setup file access template */
@@ -455,7 +451,7 @@ dataset_readInd(void)
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
VRFY((fid >= 0), "");
/* Release file-access template */
@@ -470,40 +466,39 @@ dataset_readInd(void)
dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
VRFY((dataset2 >= 0), "");
-
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
/* read data independently */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* close dataset collectively */
ret = H5Dclose(dataset1);
@@ -518,11 +513,12 @@ dataset_readInd(void)
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
- if(data_origin1) HDfree(data_origin1);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
}
-
/*
* Part 1.b--Collective read/write for fixed dimension datasets.
*/
@@ -539,49 +535,49 @@ dataset_readInd(void)
void
dataset_writeAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
- hid_t dataset5, dataset6, dataset7; /* Dataset ID */
- hid_t datatype; /* Datatype ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
+ hid_t dataset5, dataset6, dataset7; /* Dataset ID */
+ hid_t datatype; /* Datatype ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- size_t num_points; /* for point selection */
- hsize_t *coords = NULL; /* for point selection */
- hsize_t current_dims; /* for point selection */
- int i;
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
+ int i;
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- printf("Collective write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Collective write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* set up the coords array selection */
num_points = dim1;
- coords = (hsize_t *)HDmalloc(dim1 * RANK * sizeof(hsize_t));
+ coords = (hsize_t *)HDmalloc(dim1 * RANK * sizeof(hsize_t));
VRFY((coords != NULL), "coords malloc succeeded");
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -599,7 +595,6 @@ dataset_writeAll(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* --------------------------
* Define the dimensions of the overall datasets
* and create the dataset
@@ -607,17 +602,16 @@ dataset_writeAll(void)
/* setup 2-D dimensionality object */
dims[0] = dim0;
dims[1] = dim1;
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
-
/* create a dataset collectively */
dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
/* create another dataset collectively */
datatype = H5Tcopy(H5T_NATIVE_INT);
- ret = H5Tset_order(datatype, H5T_ORDER_LE);
+ ret = H5Tset_order(datatype, H5T_ORDER_LE);
VRFY((ret >= 0), "H5Tset_order succeeded");
dataset2 = H5Dcreate2(fid, DATASETNAME2, datatype, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -656,54 +650,51 @@ dataset_writeAll(void)
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* write data collectively */
MESG("writeAll by Row");
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
/* setup dimensions again to writeAll with zero rows for process 0 */
- if(VERBOSE_MED)
- printf("writeAll by some with zero row\n");
+ if (VERBOSE_MED)
+ HDprintf("writeAll by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeAll by some with zero row");
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
/* release all temporary handles. */
@@ -719,59 +710,56 @@ dataset_writeAll(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
/* setup dimensions again to writeAll with zero columns for process 0 */
- if(VERBOSE_MED)
- printf("writeAll by some with zero col\n");
+ if (VERBOSE_MED)
+ HDprintf("writeAll by some with zero col\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeAll by some with zero col");
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded");
/* release all temporary handles. */
@@ -781,16 +769,15 @@ dataset_writeAll(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
-
/* Dataset3: each process takes a block of rows, except process zero uses "none" selection. */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset3);
+ file_dataspace = H5Dget_space(dataset3);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- if(MAINPROCESS) {
- ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
@@ -798,42 +785,39 @@ dataset_writeAll(void)
} /* end else */
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
- if(MAINPROCESS) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
} /* end if */
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
} /* end if */
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* write data collectively */
MESG("writeAll with none");
- ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
/* write data collectively (with datatype conversion) */
MESG("writeAll with none");
- ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
/* release all temporary handles. */
@@ -847,11 +831,11 @@ dataset_writeAll(void)
/* Additionally, these are in a scalar dataspace */
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset4);
+ file_dataspace = H5Dget_space(dataset4);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- if(MAINPROCESS) {
- ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_all(file_dataspace);
@@ -861,9 +845,9 @@ dataset_writeAll(void)
/* create a memory dataspace independently */
mem_dataspace = H5Screate(H5S_SCALAR);
VRFY((mem_dataspace >= 0), "");
- if(MAINPROCESS) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_all(mem_dataspace);
@@ -873,31 +857,29 @@ dataset_writeAll(void)
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
} /* end if */
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* write data collectively */
MESG("writeAll with scalar dataspace");
- ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
/* write data collectively (with datatype conversion) */
MESG("writeAll with scalar dataspace");
- ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
/* release all temporary handles. */
@@ -905,55 +887,54 @@ dataset_writeAll(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
-
- if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- block[0] = 1;
- block[1] = dim1;
+ block[0] = 1;
+ block[1] = dim1;
stride[0] = 1;
stride[1] = dim1;
- count[0] = 1;
- count[1] = 1;
- start[0] = dim0/mpi_size * mpi_rank;
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = dim0 / mpi_size * mpi_rank;
+ start[1] = 0;
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* Dataset5: point selection in File - Hyperslab selection in Memory*/
/* create a file dataspace independently */
- point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- file_dataspace = H5Dget_space (dataset5);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space(dataset5);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
- start[0] = 0;
- start[1] = 0;
- mem_dataspace = H5Dget_space (dataset5);
+ start[0] = 0;
+ start[1] = 0;
+ mem_dataspace = H5Dget_space(dataset5);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* write data collectively */
- ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset5 succeeded");
/* release all temporary handles. */
@@ -963,35 +944,34 @@ dataset_writeAll(void)
/* Dataset6: point selection in File - Point selection in Memory*/
/* create a file dataspace independently */
- start[0] = dim0/mpi_size * mpi_rank;
+ start[0] = dim0 / mpi_size * mpi_rank;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- file_dataspace = H5Dget_space (dataset6);
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space(dataset6);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
start[0] = 0;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, IN_ORDER);
- mem_dataspace = H5Dget_space (dataset6);
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ mem_dataspace = H5Dget_space(dataset6);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* write data collectively */
- ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset6 succeeded");
/* release all temporary handles. */
@@ -1001,34 +981,33 @@ dataset_writeAll(void)
/* Dataset7: point selection in File - All selection in Memory*/
/* create a file dataspace independently */
- start[0] = dim0/mpi_size * mpi_rank;
+ start[0] = dim0 / mpi_size * mpi_rank;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, IN_ORDER);
- file_dataspace = H5Dget_space (dataset7);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space(dataset7);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
- current_dims = num_points;
- mem_dataspace = H5Screate_simple (1, &current_dims, NULL);
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
ret = H5Sselect_all(mem_dataspace);
VRFY((ret >= 0), "H5Sselect_all succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* write data collectively */
- ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset7 succeeded");
/* release all temporary handles. */
@@ -1058,8 +1037,10 @@ dataset_writeAll(void)
H5Fclose(fid);
/* release data buffers */
- if(coords) HDfree(coords);
- if(data_array1) HDfree(data_array1);
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
}
/*
@@ -1074,48 +1055,48 @@ dataset_writeAll(void)
void
dataset_readAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ DATATYPE * data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- size_t num_points; /* for point selection */
- hsize_t *coords = NULL; /* for point selection */
- hsize_t current_dims; /* for point selection */
- int i,j,k;
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
+ int i, j, k;
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- printf("Collective read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Collective read test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* set up the coords array selection */
num_points = dim1;
- coords = (hsize_t *)HDmalloc(dim0 * dim1 * RANK * sizeof(hsize_t));
+ coords = (hsize_t *)HDmalloc(dim0 * dim1 * RANK * sizeof(hsize_t));
VRFY((coords != NULL), "coords malloc succeeded");
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -1126,14 +1107,13 @@ dataset_readAll(void)
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
VRFY((fid >= 0), "H5Fopen succeeded");
/* Release file-access template */
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* --------------------------
* Open the datasets in it
* ------------------------- */
@@ -1161,62 +1141,61 @@ dataset_readAll(void)
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* read data collectively */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* setup dimensions again to readAll with zero columns for process 0 */
- if(VERBOSE_MED)
- printf("readAll by some with zero col\n");
+ if (VERBOSE_MED)
+ HDprintf("readAll by some with zero col\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("readAll by some with zero col");
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
/* Could have used them for dataset2 but it is cleaner */
@@ -1229,219 +1208,221 @@ dataset_readAll(void)
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* read data collectively */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset2 succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* setup dimensions again to readAll with zero rows for process 0 */
- if(VERBOSE_MED)
- printf("readAll by some with zero row\n");
+ if (VERBOSE_MED)
+ HDprintf("readAll by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
- if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("readAll by some with zero row");
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
- if(data_array1) free(data_array1);
- if(data_origin1) free(data_origin1);
- data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ if (data_array1)
+ free(data_array1);
+ if (data_origin1)
+ free(data_origin1);
+ data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- data_origin1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
- block[0] = 1;
- block[1] = dim1;
+ block[0] = 1;
+ block[1] = dim1;
stride[0] = 1;
stride[1] = dim1;
- count[0] = 1;
- count[1] = 1;
- start[0] = dim0/mpi_size * mpi_rank;
- start[1] = 0;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = dim0 / mpi_size * mpi_rank;
+ start[1] = 0;
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* Dataset5: point selection in memory - Hyperslab selection in file*/
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset5);
+ file_dataspace = H5Dget_space(dataset5);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
start[0] = 0;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- mem_dataspace = H5Dget_space (dataset5);
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space(dataset5);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* read data collectively */
- ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset5 succeeded");
-
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
-
- if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* Dataset6: point selection in File - Point selection in Memory*/
/* create a file dataspace independently */
- start[0] = dim0/mpi_size * mpi_rank;
+ start[0] = dim0 / mpi_size * mpi_rank;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, IN_ORDER);
- file_dataspace = H5Dget_space (dataset6);
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space(dataset6);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
start[0] = 0;
start[1] = 0;
- point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
- mem_dataspace = H5Dget_space (dataset6);
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space(dataset6);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* read data collectively */
- ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset6 succeeded");
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
- if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* Dataset7: point selection in memory - All selection in file*/
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset7);
+ file_dataspace = H5Dget_space(dataset7);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_all(file_dataspace);
VRFY((ret >= 0), "H5Sselect_all succeeded");
num_points = dim0 * dim1;
- k=0;
- for (i=0 ; i<dim0; i++) {
- for (j=0 ; j<dim1; j++) {
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ for (j = 0; j < dim1; j++) {
coords[k++] = i;
coords[k++] = j;
}
}
- mem_dataspace = H5Dget_space (dataset7);
+ mem_dataspace = H5Dget_space(dataset7);
VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
/* read data collectively */
- ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset7 succeeded");
- start[0] = dim0/mpi_size * mpi_rank;
+ start[0] = dim0 / mpi_size * mpi_rank;
start[1] = 0;
- ret = dataset_vrfy(start, count, stride, block, data_array1+(dim0/mpi_size * dim1 * mpi_rank), data_origin1);
- if(ret) nerrors++;
+ ret = dataset_vrfy(start, count, stride, block, data_array1 + (dim0 / mpi_size * dim1 * mpi_rank),
+ data_origin1);
+ if (ret)
+ nerrors++;
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -1466,12 +1447,14 @@ dataset_readAll(void)
H5Fclose(fid);
/* release data buffers */
- if(coords) HDfree(coords);
- if(data_array1) HDfree(data_array1);
- if(data_origin1) HDfree(data_origin1);
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
}
-
/*
* Part 2--Independent read/write for extendible datasets.
*/
@@ -1487,45 +1470,44 @@ dataset_readAll(void)
void
extend_writeInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t max_dims[RANK] =
- {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- printf("Extend independent write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
chunk_dims[0] = chunkdim0;
chunk_dims[1] = chunkdim1;
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -1535,22 +1517,22 @@ extend_writeInd(void)
acc_tpl = create_faccess_plist(comm, info, facc_type);
VRFY((acc_tpl >= 0), "");
-/* Reduce the number of metadata cache slots, so that there are cache
- * collisions during the raw data I/O on the chunked dataset. This stresses
- * the metadata cache and tests for cache bugs. -QAK
- */
-{
- int mdc_nelmts;
- size_t rdcc_nelmts;
- size_t rdcc_nbytes;
- double rdcc_w0;
-
- ret = H5Pget_cache(acc_tpl,&mdc_nelmts,&rdcc_nelmts,&rdcc_nbytes,&rdcc_w0);
- VRFY((ret >= 0), "H5Pget_cache succeeded");
- mdc_nelmts=4;
- ret = H5Pset_cache(acc_tpl,mdc_nelmts,rdcc_nelmts,rdcc_nbytes,rdcc_w0);
- VRFY((ret >= 0), "H5Pset_cache succeeded");
-}
+ /* Reduce the number of metadata cache slots, so that there are cache
+ * collisions during the raw data I/O on the chunked dataset. This stresses
+ * the metadata cache and tests for cache bugs. -QAK
+ */
+ {
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+
+ ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
+ VRFY((ret >= 0), "H5Pget_cache succeeded");
+ mdc_nelmts = 4;
+ ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+ }
/* create the file collectively */
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
@@ -1560,14 +1542,13 @@ extend_writeInd(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* --------------------------------------------------------------
* Define the dimensions of the overall datasets and create them.
* ------------------------------------------------------------- */
/* set up dataset storage chunk sizes and creation property list */
- if(VERBOSE_MED)
- printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
@@ -1576,7 +1557,7 @@ extend_writeInd(void)
/* setup dimensionality object */
/* start out with no rows, extend it later. */
dims[0] = dims[1] = 0;
- sid = H5Screate_simple (RANK, dims, max_dims);
+ sid = H5Screate_simple(RANK, dims, max_dims);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* create an extendible dataset collectively */
@@ -1591,8 +1572,6 @@ extend_writeInd(void)
H5Sclose(sid);
H5Pclose(dataset_pl);
-
-
/* -------------------------
* Test writing to dataset1
* -------------------------*/
@@ -1602,37 +1581,35 @@ extend_writeInd(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* Extend its current dim sizes before writing */
dims[0] = dim0;
dims[1] = dim1;
- ret = H5Dset_extent(dataset1, dims);
+ ret = H5Dset_extent(dataset1, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
-
/* -------------------------
* Test writing to dataset2
* -------------------------*/
@@ -1642,13 +1619,13 @@ extend_writeInd(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* Try write to dataset2 beyond its current dim sizes. Should fail. */
@@ -1657,14 +1634,13 @@ extend_writeInd(void)
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently. Should fail. */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret < 0), "H5Dwrite failed as expected");
/* restore auto error reporting */
@@ -1674,18 +1650,17 @@ extend_writeInd(void)
/* Extend dataset2 and try again. Should succeed. */
dims[0] = dim0;
dims[1] = dim1;
- ret = H5Dset_extent(dataset2, dims);
+ ret = H5Dset_extent(dataset2, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -1694,7 +1669,6 @@ extend_writeInd(void)
ret = H5Sclose(mem_dataspace);
VRFY((ret >= 0), "H5Sclose succeeded");
-
/* close dataset collectively */
ret = H5Dclose(dataset1);
VRFY((ret >= 0), "H5Dclose1 succeeded");
@@ -1705,7 +1679,8 @@ extend_writeInd(void)
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
+ if (data_array1)
+ HDfree(data_array1);
}
/*
@@ -1718,30 +1693,30 @@ void
extend_writeInd2(void)
{
const char *filename;
- hid_t fid; /* HDF5 file ID */
- hid_t fapl; /* File access templates */
- hid_t fs; /* File dataspace ID */
- hid_t ms; /* Memory dataspace ID */
- hid_t dataset; /* Dataset ID */
- hsize_t orig_size=10; /* Original dataset dim size */
- hsize_t new_size=20; /* Extended dataset dim size */
- hsize_t one=1;
- hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
- hsize_t chunk_size = 16384; /* chunk size */
- hid_t dcpl; /* dataset create prop. list */
- int written[10], /* Data to write */
- retrieved[10]; /* Data read in */
- int mpi_size, mpi_rank; /* MPI settings */
- int i; /* Local index variable */
- herr_t ret; /* Generic return value */
+ hid_t fid; /* HDF5 file ID */
+ hid_t fapl; /* File access templates */
+ hid_t fs; /* File dataspace ID */
+ hid_t ms; /* Memory dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ hsize_t orig_size = 10; /* Original dataset dim size */
+ hsize_t new_size = 20; /* Extended dataset dim size */
+ hsize_t one = 1;
+ hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
+ hsize_t chunk_size = 16384; /* chunk size */
+ hid_t dcpl; /* dataset create prop. list */
+ int written[10], /* Data to write */
+ retrieved[10]; /* Data read in */
+ int mpi_size, mpi_rank; /* MPI settings */
+ int i; /* Local index variable */
+ herr_t ret; /* Generic return value */
filename = GetTestParameters();
- if(VERBOSE_MED)
- printf("Extend independent write test #2 on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test #2 on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* -------------------
* START AN HDF5 FILE
@@ -1758,7 +1733,6 @@ extend_writeInd2(void)
ret = H5Pclose(fapl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* --------------------------------------------------------------
* Define the dimensions of the overall datasets and create them.
* ------------------------------------------------------------- */
@@ -1770,7 +1744,7 @@ extend_writeInd2(void)
VRFY((ret >= 0), "H5Pset_chunk succeeded");
/* setup dimensionality object */
- fs = H5Screate_simple (1, &orig_size, &max_size);
+ fs = H5Screate_simple(1, &orig_size, &max_size);
VRFY((fs >= 0), "H5Screate_simple succeeded");
/* create an extendible dataset collectively */
@@ -1781,7 +1755,6 @@ extend_writeInd2(void)
ret = H5Pclose(dcpl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* -------------------------
* Test writing to dataset
* -------------------------*/
@@ -1790,14 +1763,14 @@ extend_writeInd2(void)
VRFY((ms >= 0), "H5Screate_simple succeeded");
/* put some trivial data in the data_array */
- for(i = 0; i < (int)orig_size; i++)
+ for (i = 0; i < (int)orig_size; i++)
written[i] = i;
MESG("data array initialized");
- if(VERBOSE_MED) {
- MESG("writing at offset zero: ");
- for(i = 0; i < (int)orig_size; i++)
- printf("%s%d", i?", ":"", written[i]);
- printf("\n");
+ if (VERBOSE_MED) {
+ MESG("writing at offset zero: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", written[i]);
+ HDprintf("\n");
}
ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
VRFY((ret >= 0), "H5Dwrite succeeded");
@@ -1807,17 +1780,17 @@ extend_writeInd2(void)
* -------------------------*/
ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
VRFY((ret >= 0), "H5Dread succeeded");
- for (i=0; i<(int)orig_size; i++)
- if(written[i]!=retrieved[i]) {
- printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
- i,written[i], i,retrieved[i]);
+ for (i = 0; i < (int)orig_size; i++)
+ if (written[i] != retrieved[i]) {
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i,
+ written[i], i, retrieved[i]);
nerrors++;
}
- if(VERBOSE_MED){
- MESG("read at offset zero: ");
- for (i=0; i<(int)orig_size; i++)
- printf("%s%d", i?", ":"", retrieved[i]);
- printf("\n");
+ if (VERBOSE_MED) {
+ MESG("read at offset zero: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", retrieved[i]);
+ HDprintf("\n");
}
/* -------------------------
@@ -1833,14 +1806,14 @@ extend_writeInd2(void)
/* -------------------------
* Write to the second half of the dataset
* -------------------------*/
- for (i=0; i<(int)orig_size; i++)
+ for (i = 0; i < (int)orig_size; i++)
written[i] = orig_size + i;
MESG("data array re-initialized");
- if(VERBOSE_MED) {
- MESG("writing at offset 10: ");
- for (i=0; i<(int)orig_size; i++)
- printf("%s%d", i?", ":"", written[i]);
- printf("\n");
+ if (VERBOSE_MED) {
+ MESG("writing at offset 10: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", written[i]);
+ HDprintf("\n");
}
ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size);
VRFY((ret >= 0), "H5Sselect_hyperslab succeeded");
@@ -1852,20 +1825,19 @@ extend_writeInd2(void)
* -------------------------*/
ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
VRFY((ret >= 0), "H5Dread succeeded");
- for (i=0; i<(int)orig_size; i++)
- if(written[i]!=retrieved[i]) {
- printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
- i,written[i], i,retrieved[i]);
+ for (i = 0; i < (int)orig_size; i++)
+ if (written[i] != retrieved[i]) {
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i,
+ written[i], i, retrieved[i]);
nerrors++;
}
- if(VERBOSE_MED){
- MESG("read at offset 10: ");
- for (i=0; i<(int)orig_size; i++)
- printf("%s%d", i?", ":"", retrieved[i]);
- printf("\n");
+ if (VERBOSE_MED) {
+ MESG("read at offset 10: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", retrieved[i]);
+ HDprintf("\n");
}
-
/* Close dataset collectively */
ret = H5Dclose(dataset);
VRFY((ret >= 0), "H5Dclose succeeded");
@@ -1879,41 +1851,41 @@ extend_writeInd2(void)
void
extend_readInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_array2 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ DATATYPE * data_array2 = NULL; /* data buffer */
+ DATATYPE * data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- printf("Extend independent read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent read test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_array2 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array2 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -1924,7 +1896,7 @@ extend_readInd(void)
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
VRFY((fid >= 0), "");
/* Release file-access template */
@@ -1944,7 +1916,7 @@ extend_readInd(void)
H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
@@ -1956,72 +1928,70 @@ extend_readInd(void)
H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
-
/* Read dataset1 using BYROW pattern */
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* read data independently */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
VRFY((ret == 0), "dataset1 read verified correct");
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
H5Sclose(mem_dataspace);
H5Sclose(file_dataspace);
-
/* Read dataset2 using BYCOL pattern */
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
VRFY((ret == 0), "dataset2 read verified correct");
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
H5Sclose(mem_dataspace);
H5Sclose(file_dataspace);
@@ -2032,14 +2002,16 @@ extend_readInd(void)
ret = H5Dclose(dataset2);
VRFY((ret >= 0), "");
-
/* close the file collectively */
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
- if(data_array2) HDfree(data_array2);
- if(data_origin1) HDfree(data_origin1);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_array2)
+ HDfree(data_array2);
+ if (data_origin1)
+ HDfree(data_origin1);
}
/*
@@ -2057,46 +2029,45 @@ extend_readInd(void)
void
extend_writeAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t max_dims[RANK] =
- {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- printf("Extend independent write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
chunk_dims[0] = chunkdim0;
chunk_dims[1] = chunkdim1;
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -2106,22 +2077,22 @@ extend_writeAll(void)
acc_tpl = create_faccess_plist(comm, info, facc_type);
VRFY((acc_tpl >= 0), "");
-/* Reduce the number of metadata cache slots, so that there are cache
- * collisions during the raw data I/O on the chunked dataset. This stresses
- * the metadata cache and tests for cache bugs. -QAK
- */
-{
- int mdc_nelmts;
- size_t rdcc_nelmts;
- size_t rdcc_nbytes;
- double rdcc_w0;
-
- ret = H5Pget_cache(acc_tpl,&mdc_nelmts,&rdcc_nelmts,&rdcc_nbytes,&rdcc_w0);
- VRFY((ret >= 0), "H5Pget_cache succeeded");
- mdc_nelmts=4;
- ret = H5Pset_cache(acc_tpl,mdc_nelmts,rdcc_nelmts,rdcc_nbytes,rdcc_w0);
- VRFY((ret >= 0), "H5Pset_cache succeeded");
-}
+ /* Reduce the number of metadata cache slots, so that there are cache
+ * collisions during the raw data I/O on the chunked dataset. This stresses
+ * the metadata cache and tests for cache bugs. -QAK
+ */
+ {
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+
+ ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
+ VRFY((ret >= 0), "H5Pget_cache succeeded");
+ mdc_nelmts = 4;
+ ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+ }
/* create the file collectively */
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
@@ -2131,14 +2102,13 @@ extend_writeAll(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "");
-
/* --------------------------------------------------------------
* Define the dimensions of the overall datasets and create them.
* ------------------------------------------------------------- */
/* set up dataset storage chunk sizes and creation property list */
- if(VERBOSE_MED)
- printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
@@ -2147,7 +2117,7 @@ extend_writeAll(void)
/* setup dimensionality object */
/* start out with no rows, extend it later. */
dims[0] = dims[1] = 0;
- sid = H5Screate_simple (RANK, dims, max_dims);
+ sid = H5Screate_simple(RANK, dims, max_dims);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* create an extendible dataset collectively */
@@ -2162,8 +2132,6 @@ extend_writeAll(void)
H5Sclose(sid);
H5Pclose(dataset_pl);
-
-
/* -------------------------
* Test writing to dataset1
* -------------------------*/
@@ -2173,41 +2141,39 @@ extend_writeAll(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* Extend its current dim sizes before writing */
dims[0] = dim0;
dims[1] = dim1;
- ret = H5Dset_extent(dataset1, dims);
+ ret = H5Dset_extent(dataset1, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* write data collectively */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -2215,7 +2181,6 @@ extend_writeAll(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
-
/* -------------------------
* Test writing to dataset2
* -------------------------*/
@@ -2225,40 +2190,38 @@ extend_writeAll(void)
/* put some trivial data in the data_array */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* Try write to dataset2 beyond its current dim sizes. Should fail. */
/* Temporary turn off auto error reporting */
H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently. Should fail. */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret < 0), "H5Dwrite failed as expected");
/* restore auto error reporting */
@@ -2268,18 +2231,17 @@ extend_writeAll(void)
/* Extend dataset2 and try again. Should succeed. */
dims[0] = dim0;
dims[1] = dim1;
- ret = H5Dset_extent(dataset2, dims);
+ ret = H5Dset_extent(dataset2, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* write data independently */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -2290,7 +2252,6 @@ extend_writeAll(void)
ret = H5Pclose(xfer_plist);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* close dataset collectively */
ret = H5Dclose(dataset1);
VRFY((ret >= 0), "H5Dclose1 succeeded");
@@ -2301,49 +2262,50 @@ extend_writeAll(void)
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
+ if (data_array1)
+ HDfree(data_array1);
}
/* Example of using the parallel HDF5 library to read an extendible dataset */
void
extend_readAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_array2 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE * data_array1 = NULL; /* data buffer */
+ DATATYPE * data_array2 = NULL; /* data buffer */
+ DATATYPE * data_origin1 = NULL; /* expected data buffer */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- printf("Extend independent read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent read test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_array2 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array2 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc(dim0 * dim1 * sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -2354,7 +2316,7 @@ extend_readAll(void)
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
VRFY((fid >= 0), "");
/* Release file-access template */
@@ -2374,7 +2336,7 @@ extend_readAll(void)
H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
@@ -2386,95 +2348,91 @@ extend_readAll(void)
H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
H5Sclose(file_dataspace);
-
/* Read dataset1 using BYROW pattern */
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* read data collectively */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
VRFY((ret == 0), "dataset1 read verified correct");
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
H5Sclose(mem_dataspace);
H5Sclose(file_dataspace);
H5Pclose(xfer_plist);
-
/* Read dataset2 using BYCOL pattern */
/* set up dimensions of the slab this process accesses */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "");
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* read data collectively */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
VRFY((ret == 0), "dataset2 read verified correct");
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
H5Sclose(mem_dataspace);
H5Sclose(file_dataspace);
@@ -2486,14 +2444,16 @@ extend_readAll(void)
ret = H5Dclose(dataset2);
VRFY((ret >= 0), "");
-
/* close the file collectively */
H5Fclose(fid);
/* release data buffers */
- if(data_array1) HDfree(data_array1);
- if(data_array2) HDfree(data_array2);
- if(data_origin1) HDfree(data_origin1);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_array2)
+ HDfree(data_array2);
+ if (data_origin1)
+ HDfree(data_origin1);
}
/*
@@ -2504,44 +2464,44 @@ extend_readAll(void)
void
compress_readAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t dcpl; /* Dataset creation property list */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t dataspace; /* Dataspace ID */
- hid_t dataset; /* Dataset ID */
- int rank=1; /* Dataspace rank */
- hsize_t dim=dim0; /* Dataspace dimensions */
- unsigned u; /* Local index variable */
- DATATYPE *data_read = NULL; /* data buffer */
- DATATYPE *data_orig = NULL; /* expected data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t dataspace; /* Dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ int rank = 1; /* Dataspace rank */
+ hsize_t dim = dim0; /* Dataspace dimensions */
+ unsigned u; /* Local index variable */
+ DATATYPE * data_read = NULL; /* data buffer */
+ DATATYPE * data_orig = NULL; /* expected data buffer */
const char *filename;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
- int mpi_size, mpi_rank;
- herr_t ret; /* Generic return value */
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
filename = GetTestParameters();
- if(VERBOSE_MED)
- printf("Collective chunked dataset read test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Collective chunked dataset read test on file %s\n", filename);
/* Retrieve MPI parameters */
- MPI_Comm_size(comm,&mpi_size);
- MPI_Comm_rank(comm,&mpi_rank);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
/* Allocate data buffer */
- data_orig = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE));
+ data_orig = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE));
VRFY((data_orig != NULL), "data_origin1 HDmalloc succeeded");
- data_read = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE));
+ data_read = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE));
VRFY((data_read != NULL), "data_array1 HDmalloc succeeded");
/* Initialize data buffers */
- for(u=0; u<dim;u++)
- data_orig[u]=u;
+ for (u = 0; u < dim; u++)
+ data_orig[u] = u;
/* Process zero creates the file with a compressed, chunked dataset */
- if(mpi_rank==0) {
- hsize_t chunk_dim; /* Chunk dimensions */
+ if (mpi_rank == 0) {
+ hsize_t chunk_dim; /* Chunk dimensions */
/* Create the file */
fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
@@ -2556,7 +2516,7 @@ compress_readAll(void)
/* Use eight chunks */
chunk_dim = dim / 8;
- ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
+ ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
VRFY((ret >= 0), "H5Pset_chunk succeeded");
ret = H5Pset_deflate(dcpl, 9);
@@ -2567,7 +2527,8 @@ compress_readAll(void)
VRFY((dataspace > 0), "H5Screate_simple succeeded");
/* Create dataset */
- dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ dataset =
+ H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset > 0), "H5Dcreate2 succeeded");
/* Write compressed data */
@@ -2597,48 +2558,45 @@ compress_readAll(void)
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl);
VRFY((fid > 0), "H5Fopen succeeded");
/* Release file-access template */
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* Open dataset with compressed chunks */
dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT);
VRFY((dataset > 0), "H5Dopen2 succeeded");
/* Try reading & writing data */
- if(dataset>0) {
+ if (dataset > 0) {
/* Create dataset transfer property list */
xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist > 0), "H5Pcreate succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
}
-
/* Try reading the data */
ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY((ret >= 0), "H5Dread succeeded");
/* Verify data read */
- for(u=0; u<dim; u++)
- if(data_orig[u]!=data_read[u]) {
- printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
- (unsigned)u,data_orig[u],(unsigned)u,data_read[u]);
+ for (u = 0; u < dim; u++)
+ if (data_orig[u] != data_read[u]) {
+ HDprintf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n", __LINE__,
+ (unsigned)u, data_orig[u], (unsigned)u, data_read[u]);
nerrors++;
}
/* Writing to the compressed, chunked dataset in parallel should fail */
- H5E_BEGIN_TRY {
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
- } H5E_END_TRY;
+ H5E_BEGIN_TRY { ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); }
+ H5E_END_TRY;
VRFY((ret < 0), "H5Dwrite failed");
ret = H5Pclose(xfer_plist);
@@ -2647,12 +2605,15 @@ compress_readAll(void)
VRFY((ret >= 0), "H5Dclose succeeded");
} /* end if */
+ /* Close file */
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
/* release data buffers */
- if(data_read) HDfree(data_read);
- if(data_orig) HDfree(data_orig);
+ if (data_read)
+ HDfree(data_read);
+ if (data_orig)
+ HDfree(data_orig);
}
#endif /* H5_HAVE_FILTER_DEFLATE */
@@ -2671,39 +2632,39 @@ compress_readAll(void)
void
none_selection_chunk(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_origin = NULL; /* data buffer */
- DATATYPE *data_array = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE * data_origin = NULL; /* data buffer */
+ DATATYPE * data_array = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
- hsize_t mstart[RANK]; /* for data buffer in memory */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t mstart[RANK]; /* for data buffer in memory */
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
filename = GetTestParameters();
- if(VERBOSE_MED)
- printf("Extend independent write test on file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
chunk_dims[0] = chunkdim0;
@@ -2729,8 +2690,8 @@ none_selection_chunk(void)
* ------------------------------------------------------------- */
/* set up dataset storage chunk sizes and creation property list */
- if(VERBOSE_MED)
- printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
@@ -2739,7 +2700,7 @@ none_selection_chunk(void)
/* setup dimensionality object */
dims[0] = dim0;
dims[1] = dim1;
- sid = H5Screate_simple(RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* create an extendible dataset collectively */
@@ -2762,65 +2723,64 @@ none_selection_chunk(void)
/* allocate memory for data buffer. Only allocate enough buffer for
* each processor's data. */
- if(mpi_rank) {
- data_origin = (DATATYPE *)HDmalloc(block[0]*block[1]*sizeof(DATATYPE));
+ if (mpi_rank) {
+ data_origin = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE));
VRFY((data_origin != NULL), "data_origin HDmalloc succeeded");
- data_array = (DATATYPE *)HDmalloc(block[0]*block[1]*sizeof(DATATYPE));
+ data_array = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE));
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
/* put some trivial data in the data_array */
mstart[0] = mstart[1] = 0;
dataset_fill(mstart, block, data_origin);
MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(mstart, block, data_origin);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(mstart, block, data_origin);
}
}
/* create a memory dataspace independently */
- mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
/* Process 0 has no selection */
- if(!mpi_rank) {
+ if (!mpi_rank) {
ret = H5Sselect_none(mem_dataspace);
VRFY((ret >= 0), "H5Sselect_none succeeded");
}
/* create a file dataspace independently */
- file_dataspace = H5Dget_space (dataset1);
+ file_dataspace = H5Dget_space(dataset1);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* Process 0 has no selection */
- if(!mpi_rank) {
+ if (!mpi_rank) {
ret = H5Sselect_none(file_dataspace);
VRFY((ret >= 0), "H5Sselect_none succeeded");
}
/* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
/* write data collectively */
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_origin);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* read data independently */
- ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array);
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
- if(mpi_rank) {
+ if (mpi_rank) {
ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
}
/* -------------------------
@@ -2830,19 +2790,18 @@ none_selection_chunk(void)
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
/* write data collectively */
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_origin);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* read data independently */
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
- if(mpi_rank) {
+ if (mpi_rank) {
ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
- if(ret) nerrors++;
+ if (ret)
+ nerrors++;
}
/* release resource */
@@ -2853,7 +2812,6 @@ none_selection_chunk(void)
ret = H5Pclose(xfer_plist);
VRFY((ret >= 0), "H5Pclose succeeded");
-
/* close dataset collectively */
ret = H5Dclose(dataset1);
VRFY((ret >= 0), "H5Dclose1 succeeded");
@@ -2864,24 +2822,25 @@ none_selection_chunk(void)
H5Fclose(fid);
/* release data buffers */
- if(data_origin) HDfree(data_origin);
- if(data_array) HDfree(data_array);
+ if (data_origin)
+ HDfree(data_origin);
+ if (data_array)
+ HDfree(data_array);
}
-
/* Function: test_actual_io_mode
*
- * Purpose: tests one specific case of collective I/O and checks that the
+ * Purpose: tests one specific case of collective I/O and checks that the
* actual_chunk_opt_mode property and the actual_io_mode
* properties in the DXPL have the correct values.
*
* Input: selection_mode: changes the way processes select data from the space, as well
* as some dxpl flags to get collective I/O to break in different ways.
- *
+ *
* The relevant I/O function and expected response for each mode:
* TEST_ACTUAL_IO_MULTI_CHUNK_IND:
* H5D_mpi_chunk_collective_io, each process reports independent I/O
- *
+ *
* TEST_ACTUAL_IO_MULTI_CHUNK_COL:
* H5D_mpi_chunk_collective_io, each process reports collective I/O
*
@@ -2893,7 +2852,7 @@ none_selection_chunk(void)
* collective, the rest report independent I/O
*
* TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
- * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND.
+ * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND.
* Set directly go to multi-chunk-io without num threshold calc.
* TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL:
* Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL.
@@ -2918,78 +2877,76 @@ none_selection_chunk(void)
*
* Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE
* is not needed as they are covered by DIRECT_CHUNK_MIX and
- * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing
+ * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing
* path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO insted of num-threshold.
*
* Modification:
- * - Refctore to remove multi-chunk-without-opimization test and update for
- * testing direct to multi-chunk-io
+ * - Refctore to remove multi-chunk-without-opimization test and update for
+ * testing direct to multi-chunk-io
* Programmer: Jonathan Kim
* Date: 2012-10-10
*
- *
+ *
* Programmer: Jacob Gruber
* Date: 2011-04-06
*/
-static void
-test_actual_io_mode(int selection_mode) {
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1;
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1;
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_write = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_read = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_expected = -1;
- const char * filename;
- const char * test_name;
- hbool_t direct_multi_chunk_io;
- hbool_t multi_chunk_io;
- hbool_t is_chunked;
- hbool_t is_collective;
- int mpi_size = -1;
- int mpi_rank = -1;
- int length;
- int * buffer;
- int i;
- MPI_Comm mpi_comm = MPI_COMM_NULL;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl = -1;
- hid_t mem_space = -1;
- hid_t file_space = -1;
- hid_t dcpl = -1;
- hid_t dxpl_write = -1;
- hid_t dxpl_read = -1;
- hsize_t dims[RANK];
- hsize_t chunk_dims[RANK];
- hsize_t start[RANK];
- hsize_t stride[RANK];
- hsize_t count[RANK];
- hsize_t block[RANK];
- char message[256];
- herr_t ret;
-
+static void
+test_actual_io_mode(int selection_mode)
+{
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = -1;
+ H5D_mpio_actual_io_mode_t actual_io_mode_write = -1;
+ H5D_mpio_actual_io_mode_t actual_io_mode_read = -1;
+ H5D_mpio_actual_io_mode_t actual_io_mode_expected = -1;
+ const char * filename;
+ const char * test_name;
+ hbool_t direct_multi_chunk_io;
+ hbool_t multi_chunk_io;
+ hbool_t is_chunked;
+ hbool_t is_collective;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int length;
+ int * buffer;
+ int i;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
+ hid_t fapl = -1;
+ hid_t mem_space = -1;
+ hid_t file_space = -1;
+ hid_t dcpl = -1;
+ hid_t dxpl_write = -1;
+ hid_t dxpl_read = -1;
+ hsize_t dims[RANK];
+ hsize_t chunk_dims[RANK];
+ hsize_t start[RANK];
+ hsize_t stride[RANK];
+ hsize_t count[RANK];
+ hsize_t block[RANK];
+ char message[256];
+ herr_t ret;
+
/* Set up some flags to make some future if statements slightly more readable */
- direct_multi_chunk_io = (
- selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND ||
- selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL );
-
+ direct_multi_chunk_io = (selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND ||
+ selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
+
/* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then
* tests independent I/O
*/
- multi_chunk_io = (
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX ||
- selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE ||
- selection_mode == TEST_ACTUAL_IO_RESET );
-
- is_chunked = (
- selection_mode != TEST_ACTUAL_IO_CONTIGUOUS &&
- selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE);
-
+ multi_chunk_io =
+ (selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || selection_mode == TEST_ACTUAL_IO_RESET);
+
+ is_chunked =
+ (selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE);
+
is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE;
/* Set up MPI parameters */
@@ -2997,7 +2954,7 @@ test_actual_io_mode(int selection_mode) {
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Barrier(MPI_COMM_WORLD);
-
+
HDassert(mpi_size >= 1);
mpi_comm = MPI_COMM_WORLD;
@@ -3014,10 +2971,10 @@ test_actual_io_mode(int selection_mode) {
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
VRFY((fid >= 0), "H5Fcreate succeeded");
- /* Create the basic Space */
+ /* Create the basic Space */
dims[0] = dim0;
dims[1] = dim1;
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* Create the dataset creation plist */
@@ -3025,27 +2982,26 @@ test_actual_io_mode(int selection_mode) {
VRFY((dcpl >= 0), "dataset creation plist created successfully");
/* If we are not testing contiguous datasets */
- if(is_chunked) {
+ if (is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/mpi_size;
+ chunk_dims[0] = dims[0] / mpi_size;
chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0),"chunk creation property list succeeded");
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
}
/* Create the dataset */
- dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT,
- dcpl, H5P_DEFAULT);
+ dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
/* Create the file dataspace */
file_space = H5Dget_space(dataset);
VRFY((file_space >= 0), "H5Dget_space succeeded");
- /* Choose a selection method based on the type of I/O we want to occur,
+ /* Choose a selection method based on the type of I/O we want to occur,
* and also set up some selection-dependeent test info. */
- switch(selection_mode) {
-
+ switch (selection_mode) {
+
/* Independent I/O with optimization */
case TEST_ACTUAL_IO_MULTI_CHUNK_IND:
case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
@@ -3054,10 +3010,10 @@ test_actual_io_mode(int selection_mode) {
* independent.
*/
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- test_name = "Multi Chunk - Independent";
+
+ test_name = "Multi Chunk - Independent";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
break;
/* Collective I/O with optimization */
@@ -3068,15 +3024,15 @@ test_actual_io_mode(int selection_mode) {
* selections to each chunk, the operation is purely collective.
*/
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
-
- test_name = "Multi Chunk - Collective";
+
+ test_name = "Multi Chunk - Collective";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- if(mpi_size > 1)
+ if (mpi_size > 1)
actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
else
actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
break;
-
+
/* Mixed I/O with optimization */
case TEST_ACTUAL_IO_MULTI_CHUNK_MIX:
/* A chunk will be assigned collective I/O only if it is selected by each
@@ -3087,32 +3043,33 @@ test_actual_io_mode(int selection_mode) {
* assigned independent I/O. Each process will access one chunk collectively
* and at least one chunk independently, reporting mixed I/O.
*/
-
- if(mpi_rank == 0) {
- /* Select the first column */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
- } else {
+
+ if (mpi_rank == 0) {
+ /* Select the first column */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+ }
+ else {
/* Select the first and the nth chunk in the nth column */
- block[0] = dim0 / mpi_size;
- block[1] = dim1 / mpi_size;
- count[0] = 2;
- count[1] = 1;
+ block[0] = dim0 / mpi_size;
+ block[1] = dim1 / mpi_size;
+ count[0] = 2;
+ count[1] = 1;
stride[0] = mpi_rank * block[0];
stride[1] = 1;
- start[0] = 0;
- start[1] = mpi_rank*block[1];
+ start[0] = 0;
+ start[1] = mpi_rank * block[1];
}
-
- test_name = "Multi Chunk - Mixed";
+
+ test_name = "Multi Chunk - Mixed";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
break;
/* RESET tests that the properties are properly reset to defaults each time I/O is
* performed. To acheive this, we have RESET perform collective I/O (which would change
* the values from the defaults) followed by independent I/O (which should report the
* default values). RESET doesn't need to have a unique selection, so we reuse
- * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works
+ * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works
* on all builds. The independent section of RESET can be found at the end of this function.
*/
case TEST_ACTUAL_IO_RESET:
@@ -3121,55 +3078,56 @@ test_actual_io_mode(int selection_mode) {
case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE:
/* A chunk will be assigned collective I/O only if it is selected by each
* process. To get mixed I/O with disagreement, assign process n to the
- * first chunk and the nth chunk. The first chunk, selected by all, is
+ * first chunk and the nth chunk. The first chunk, selected by all, is
* assgigned collective I/O, while each other process gets independent I/O.
* Since the root process with only access the first chunk, it will report
* collective I/O. The subsequent processes will access the first chunk
* collectively, and their other chunk indpendently, reporting mixed I/O.
*/
- if(mpi_rank == 0) {
- /* Select the first chunk in the first column */
- slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
- block[0] = block[0] / mpi_size;
- } else {
+ if (mpi_rank == 0) {
+ /* Select the first chunk in the first column */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+ block[0] = block[0] / mpi_size;
+ }
+ else {
/* Select the first and the nth chunk in the nth column */
- block[0] = dim0 / mpi_size;
- block[1] = dim1 / mpi_size;
- count[0] = 2;
- count[1] = 1;
+ block[0] = dim0 / mpi_size;
+ block[1] = dim1 / mpi_size;
+ count[0] = 2;
+ count[1] = 1;
stride[0] = mpi_rank * block[0];
stride[1] = 1;
- start[0] = 0;
- start[1] = mpi_rank*block[1];
+ start[0] = 0;
+ start[1] = mpi_rank * block[1];
}
-
+
/* If the testname was not already set by the RESET case */
if (selection_mode == TEST_ACTUAL_IO_RESET)
test_name = "RESET";
else
test_name = "Multi Chunk - Mixed (Disagreement)";
-
+
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
- if(mpi_size > 1) {
- if(mpi_rank == 0)
+ if (mpi_size > 1) {
+ if (mpi_rank == 0)
actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
else
actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
}
else
actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
-
- break;
+
+ break;
/* Linked Chunk I/O */
- case TEST_ACTUAL_IO_LINK_CHUNK:
+ case TEST_ACTUAL_IO_LINK_CHUNK:
/* Nothing special; link chunk I/O is forced in the dxpl settings. */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- test_name = "Link Chunk";
+
+ test_name = "Link Chunk";
actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK;
- actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
break;
/* Contiguous Dataset */
@@ -3177,36 +3135,36 @@ test_actual_io_mode(int selection_mode) {
/* A non overlapping, regular selection in a contiguous dataset leads to
* collective I/O */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- test_name = "Contiguous";
+
+ test_name = "Contiguous";
actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
- actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
+ actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
break;
case TEST_ACTUAL_IO_NO_COLLECTIVE:
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
- test_name = "Independent";
+
+ test_name = "Independent";
actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
- actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
+ actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
break;
default:
- test_name = "Undefined Selection Mode";
+ test_name = "Undefined Selection Mode";
actual_chunk_opt_mode_expected = -1;
- actual_io_mode_expected = -1;
+ actual_io_mode_expected = -1;
break;
}
ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
+
/* Create a memory dataspace mirroring the dataset and select the same hyperslab
- * as in the file space.
+ * as in the file space.
*/
- mem_space = H5Screate_simple (RANK, dims, NULL);
+ mem_space = H5Screate_simple(RANK, dims, NULL);
VRFY((mem_space >= 0), "mem_space created");
-
+
ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
@@ -3215,38 +3173,38 @@ test_actual_io_mode(int selection_mode) {
/* Allocate and initialize the buffer */
buffer = (int *)HDmalloc(sizeof(int) * length);
- VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for (i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
dxpl_write = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
+
/* Set collective I/O properties in the dxpl. */
- if(is_collective) {
+ if (is_collective) {
/* Request collective I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* Set the threshold number of processes per chunk to twice mpi_size.
- * This will prevent the threshold from ever being met, thus forcing
+
+ /* Set the threshold number of processes per chunk to twice mpi_size.
+ * This will prevent the threshold from ever being met, thus forcing
* multi chunk io instead of link chunk io.
* This is via deault.
*/
- if(multi_chunk_io) {
+ if (multi_chunk_io) {
/* force multi-chunk-io by threshold */
- ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned) mpi_size*2);
+ ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned)mpi_size * 2);
VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
/* set this to manipulate testing senario about allocating processes
* to chunks */
- ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned) 99);
+ ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned)99);
VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
}
/* Set directly go to multi-chunk-io without threshold calc. */
- if(direct_multi_chunk_io) {
+ if (direct_multi_chunk_io) {
/* set for multi chunk io by property*/
ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
@@ -3259,43 +3217,46 @@ test_actual_io_mode(int selection_mode) {
/* Write */
ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
/* Retreive Actual io valuess */
ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
- VRFY((ret >= 0), "retriving actual io mode suceeded" );
+ VRFY((ret >= 0), "retriving actual io mode suceeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" );
-
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
+
/* Read */
ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
+
/* Retreive Actual io values */
ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
- VRFY((ret >= 0), "retriving actual io mode succeeded" );
+ VRFY((ret >= 0), "retriving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" );
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
/* Check write vs read */
VRFY((actual_io_mode_read == actual_io_mode_write),
- "reading and writing are the same for actual_io_mode");
+ "reading and writing are the same for actual_io_mode");
VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write),
- "reading and writing are the same for actual_chunk_opt_mode");
+ "reading and writing are the same for actual_chunk_opt_mode");
/* Test values */
- if(actual_chunk_opt_mode_expected != (unsigned) -1 && actual_io_mode_expected != (unsigned) -1) {
- sprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name);
+ if (actual_chunk_opt_mode_expected != (unsigned)-1 && actual_io_mode_expected != (unsigned)-1) {
+ HDsprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n", test_name);
VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message);
- sprintf(message, "Actual IO Mode has the correct value for %s.\n",test_name);
+ HDsprintf(message, "Actual IO Mode has the correct value for %s.\n", test_name);
VRFY((actual_io_mode_write == actual_io_mode_expected), message);
- } else {
- HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank,
- actual_chunk_opt_mode_write, actual_io_mode_write);
+ }
+ else {
+ HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, actual_chunk_opt_mode_write,
+ actual_io_mode_write);
}
/* To test that the property is succesfully reset to the default, we perform some
@@ -3315,30 +3276,30 @@ test_actual_io_mode(int selection_mode) {
/* Check Properties */
ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
- VRFY( (ret >= 0), "retriving actual io mode succeeded" );
+ VRFY((ret >= 0), "retriving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
- VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" );
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
- "actual_chunk_opt_mode has correct value for reset write (independent)");
+ "actual_chunk_opt_mode has correct value for reset write (independent)");
VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE,
- "actual_io_mode has correct value for reset write (independent)");
-
+ "actual_io_mode has correct value for reset write (independent)");
+
/* Read */
ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer);
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
/* Check Properties */
ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
- VRFY( (ret >= 0), "retriving actual io mode succeeded" );
+ VRFY((ret >= 0), "retriving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
- VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" );
-
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
+
VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
- "actual_chunk_opt_mode has correct value for reset read (independent)");
+ "actual_chunk_opt_mode has correct value for reset read (independent)");
VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE,
- "actual_io_mode has correct value for reset read (independent)");
- }
+ "actual_io_mode has correct value for reset read (independent)");
+ }
}
/* Release some resources */
@@ -3355,24 +3316,24 @@ test_actual_io_mode(int selection_mode) {
return;
}
-
/* Function: actual_io_mode_tests
*
- * Purpose: Tests all possible cases of the actual_io_mode property.
+ * Purpose: Tests all possible cases of the actual_io_mode property.
*
* Programmer: Jacob Gruber
* Date: 2011-04-06
*/
void
-actual_io_mode_tests(void) {
+actual_io_mode_tests(void)
+{
int mpi_size = -1;
int mpi_rank = -1;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_rank);
-
+
test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
-
- /*
+
+ /*
* Test multi-chunk-io via proc_num threshold
*/
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
@@ -3383,10 +3344,10 @@ actual_io_mode_tests(void) {
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX);
else
HDfprintf(stdout, "Multi Chunk Mixed test requires 3 proceses minimum\n");
-
+
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
- /*
+ /*
* Test multi-chunk-io via setting direct property
*/
test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND);
@@ -3394,31 +3355,31 @@ actual_io_mode_tests(void) {
test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK);
test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS);
-
+
test_actual_io_mode(TEST_ACTUAL_IO_RESET);
return;
}
-/*
+/*
* Function: test_no_collective_cause_mode
*
- * Purpose:
- * tests cases for broken collective I/O and checks that the
+ * Purpose:
+ * tests cases for broken collective I/O and checks that the
* H5Pget_mpio_no_collective_cause properties in the DXPL have the correct values.
*
- * Input:
+ * Input:
* selection_mode: various mode to cause broken collective I/O
* Note: Originally, each TEST case is supposed to be used alone.
* After some discussion, this is updated to take multiple TEST cases
- * with '|'. However there is no error check for any of combined
+ * with '|'. However there is no error check for any of combined
* test cases, so a tester is responsible to understand and feed
* proper combination of TESTs if needed.
*
- *
+ *
* TEST_COLLECTIVE:
* Test for regular collective I/O without cause of breaking.
* Just to test normal behavior.
- *
+ *
* TEST_SET_INDEPENDENT:
* Test for Independent I/O as the cause of breaking collective I/O.
*
@@ -3430,7 +3391,7 @@ actual_io_mode_tests(void) {
*
* TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES:
* Test for NULL dataspace as the cause of breaking collective I/O.
- *
+ *
* TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT:
* Test for Compact layout as the cause of breaking collective I/O.
*
@@ -3439,47 +3400,48 @@ actual_io_mode_tests(void) {
*
* TEST_FILTERS:
* Test for using filter (checksum) as the cause of breaking collective I/O.
- * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter feature. Use test_no_collective_cause_mode_filter() function instead.
+ * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter
+ * feature. Use test_no_collective_cause_mode_filter() function instead.
+ *
*
- *
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
#define DSET_NOCOLCAUSE "nocolcause"
-#define NELM 2
-#define FILE_EXTERNAL "nocolcause_extern.data"
-static void
-test_no_collective_cause_mode(int selection_mode)
+#define NELM 2
+#define FILE_EXTERNAL "nocolcause_extern.data"
+static void
+test_no_collective_cause_mode(int selection_mode)
{
- uint32_t no_collective_cause_local_write = 0;
- uint32_t no_collective_cause_local_read = 0;
- uint32_t no_collective_cause_local_expected = 0;
- uint32_t no_collective_cause_global_write = 0;
- uint32_t no_collective_cause_global_read = 0;
+ uint32_t no_collective_cause_local_write = 0;
+ uint32_t no_collective_cause_local_read = 0;
+ uint32_t no_collective_cause_local_expected = 0;
+ uint32_t no_collective_cause_global_write = 0;
+ uint32_t no_collective_cause_global_read = 0;
uint32_t no_collective_cause_global_expected = 0;
- hsize_t coord[NELM][RANK];
-
- const char * filename;
- const char * test_name;
- hbool_t is_chunked=1;
- hbool_t is_independent=0;
- int mpi_size = -1;
- int mpi_rank = -1;
+ hsize_t coord[NELM][RANK];
+
+ const char *filename;
+ const char *test_name;
+ hbool_t is_chunked = 1;
+ hbool_t is_independent = 0;
+ int mpi_size = -1;
+ int mpi_rank = -1;
int length;
- int * buffer;
+ int * buffer;
int i;
MPI_Comm mpi_comm;
MPI_Info mpi_info;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl = -1;
- hid_t dcpl = -1;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
+ hid_t fapl = -1;
+ hid_t dcpl = -1;
hid_t dxpl_write = -1;
- hid_t dxpl_read = -1;
+ hid_t dxpl_read = -1;
hsize_t dims[RANK];
- hid_t mem_space = -1;
+ hid_t mem_space = -1;
hid_t file_space = -1;
hsize_t chunk_dims[RANK];
herr_t ret;
@@ -3487,7 +3449,7 @@ test_no_collective_cause_mode(int selection_mode)
H5Z_filter_t filter_info;
#endif /* LATER */
/* set to global value as default */
- int l_facc_type = facc_type;
+ int l_facc_type = facc_type;
char message[256];
/* Set up MPI parameters */
@@ -3506,27 +3468,29 @@ test_no_collective_cause_mode(int selection_mode)
VRFY((dcpl >= 0), "dataset creation plist created successfully");
if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) {
- ret = H5Pset_layout (dcpl, H5D_COMPACT);
- VRFY((ret >= 0),"set COMPACT layout succeeded");
+ ret = H5Pset_layout(dcpl, H5D_COMPACT);
+ VRFY((ret >= 0), "set COMPACT layout succeeded");
is_chunked = 0;
}
if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
- ret = H5Pset_external (dcpl, FILE_EXTERNAL, (off_t) 0, H5F_UNLIMITED);
- VRFY((ret >= 0),"set EXTERNAL file layout succeeded");
+ ret = H5Pset_external(dcpl, FILE_EXTERNAL, (off_t)0, H5F_UNLIMITED);
+ VRFY((ret >= 0), "set EXTERNAL file layout succeeded");
is_chunked = 0;
}
#ifdef LATER /* fletcher32 */
if (selection_mode & TEST_FILTERS) {
ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
- VRFY ((ret >=0 ), "Fletcher32 filter is available.\n");
+ VRFY((ret >= 0), "Fletcher32 filter is available.\n");
- ret = H5Zget_filter_info (H5Z_FILTER_FLETCHER32, &filter_info);
- VRFY ( ( (filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) , "Fletcher32 filter encoding and decoding available.\n");
+ ret = H5Zget_filter_info(H5Z_FILTER_FLETCHER32, &filter_info);
+ VRFY(((filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) ||
+ (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED)),
+ "Fletcher32 filter encoding and decoding available.\n");
ret = H5Pset_fletcher32(dcpl);
- VRFY((ret >= 0),"set filter (flecher32) succeeded");
+ VRFY((ret >= 0), "set filter (flecher32) succeeded");
}
#endif /* LATER */
@@ -3546,10 +3510,9 @@ test_no_collective_cause_mode(int selection_mode)
dims[0] = dim0;
dims[1] = dim1;
}
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
}
-
filename = (const char *)GetTestParameters();
HDassert(filename != NULL);
@@ -3564,30 +3527,27 @@ test_no_collective_cause_mode(int selection_mode)
VRFY((fid >= 0), "H5Fcreate succeeded");
/* If we are not testing contiguous datasets */
- if(is_chunked) {
+ if (is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/mpi_size;
+ chunk_dims[0] = dims[0] / mpi_size;
chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0),"chunk creation property list succeeded");
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
}
-
/* Create the dataset */
- dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT,
- dcpl, H5P_DEFAULT);
+ dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
-
- /*
- * Set expected causes and some tweaks based on the type of test
+ /*
+ * Set expected causes and some tweaks based on the type of test
*/
if (selection_mode & TEST_DATATYPE_CONVERSION) {
test_name = "Broken Collective I/O - Datatype Conversion";
no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION;
no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION;
/* set different sign to trigger type conversion */
- data_type = H5T_NATIVE_UINT;
+ data_type = H5T_NATIVE_UINT;
}
if (selection_mode & TEST_DATA_TRANSFORMS) {
@@ -3618,14 +3578,14 @@ test_no_collective_cause_mode(int selection_mode)
#endif /* LATER */
if (selection_mode & TEST_COLLECTIVE) {
- test_name = "Broken Collective I/O - Not Broken";
- no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
+ test_name = "Broken Collective I/O - Not Broken";
+ no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
no_collective_cause_global_expected = H5D_MPIO_COLLECTIVE;
}
if (selection_mode & TEST_SET_INDEPENDENT) {
- test_name = "Broken Collective I/O - Independent";
- no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT;
+ test_name = "Broken Collective I/O - Independent";
+ no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT;
no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT;
/* switch to independent io */
is_independent = 1;
@@ -3635,7 +3595,7 @@ test_no_collective_cause_mode(int selection_mode)
if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES ||
selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
file_space = H5S_ALL;
- mem_space = H5S_ALL;
+ mem_space = H5S_ALL;
}
else {
/* Get the file dataspace */
@@ -3643,7 +3603,7 @@ test_no_collective_cause_mode(int selection_mode)
VRFY((file_space >= 0), "H5Dget_space succeeded");
/* Create the memory dataspace */
- mem_space = H5Screate_simple (RANK, dims, NULL);
+ mem_space = H5Screate_simple(RANK, dims, NULL);
VRFY((mem_space >= 0), "mem_space created");
}
@@ -3652,15 +3612,15 @@ test_no_collective_cause_mode(int selection_mode)
/* Allocate and initialize the buffer */
buffer = (int *)HDmalloc(sizeof(int) * length);
- VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for (i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
dxpl_write = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
- if(is_independent) {
+
+ if (is_independent) {
/* Set Independent I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
@@ -3669,32 +3629,31 @@ test_no_collective_cause_mode(int selection_mode)
/* Set Collective I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
}
if (selection_mode & TEST_DATA_TRANSFORMS) {
- ret = H5Pset_data_transform (dxpl_write, "x+1");
+ ret = H5Pset_data_transform(dxpl_write, "x+1");
VRFY((ret >= 0), "H5Pset_data_transform succeeded");
}
/*---------------------
* Test Write access
- *---------------------*/
+ *---------------------*/
/* Write */
ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
-
/* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause (dxpl_write, &no_collective_cause_local_write, &no_collective_cause_global_write);
- VRFY((ret >= 0), "retriving no collective cause succeeded" );
-
+ ret = H5Pget_mpio_no_collective_cause(dxpl_write, &no_collective_cause_local_write,
+ &no_collective_cause_global_write);
+ VRFY((ret >= 0), "retriving no collective cause succeeded");
/*---------------------
* Test Read access
- *---------------------*/
+ *---------------------*/
/* Make a copy of the dxpl to test the read operation */
dxpl_read = H5Pcopy(dxpl_write);
@@ -3703,25 +3662,27 @@ test_no_collective_cause_mode(int selection_mode)
/* Read */
ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
+
/* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause (dxpl_read, &no_collective_cause_local_read, &no_collective_cause_global_read);
- VRFY((ret >= 0), "retriving no collective cause succeeded" );
+ ret = H5Pget_mpio_no_collective_cause(dxpl_read, &no_collective_cause_local_read,
+ &no_collective_cause_global_read);
+ VRFY((ret >= 0), "retriving no collective cause succeeded");
/* Check write vs read */
VRFY((no_collective_cause_local_read == no_collective_cause_local_write),
- "reading and writing are the same for local cause of Broken Collective I/O");
+ "reading and writing are the same for local cause of Broken Collective I/O");
VRFY((no_collective_cause_global_read == no_collective_cause_global_write),
- "reading and writing are the same for global cause of Broken Collective I/O");
-
+ "reading and writing are the same for global cause of Broken Collective I/O");
+
/* Test values */
- memset (message, 0, sizeof (message));
- sprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset(message, 0, sizeof(message));
+ HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
- memset (message, 0, sizeof (message));
- sprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset(message, 0, sizeof(message));
+ HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
/* Release some resources */
@@ -3752,55 +3713,54 @@ test_no_collective_cause_mode(int selection_mode)
return;
}
-
-/*
+/*
* Function: test_no_collective_cause_mode_filter
*
- * Purpose:
- * Test specific for using filter as a caus of broken collective I/O and
+ * Purpose:
+ * Test specific for using filter as a caus of broken collective I/O and
* checks that the H5Pget_mpio_no_collective_cause properties in the DXPL
* have the correct values.
*
- * NOTE:
- * This is a temporary function.
+ * NOTE:
+ * This is a temporary function.
* test_no_collective_cause_mode(TEST_FILTERS) will replace this when
* H5Dcreate and H5write support for mpio and filter feature.
*
- * Input:
+ * Input:
* TEST_FILTERS_READ:
* Test for using filter (checksum) as the cause of breaking collective I/O.
- *
+ *
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
-static void
-test_no_collective_cause_mode_filter(int selection_mode)
+static void
+test_no_collective_cause_mode_filter(int selection_mode)
{
- uint32_t no_collective_cause_local_read = 0;
- uint32_t no_collective_cause_local_expected = 0;
- uint32_t no_collective_cause_global_read = 0;
+ uint32_t no_collective_cause_local_read = 0;
+ uint32_t no_collective_cause_local_expected = 0;
+ uint32_t no_collective_cause_global_read = 0;
uint32_t no_collective_cause_global_expected = 0;
- const char * filename;
- const char * test_name;
- hbool_t is_chunked=1;
- int mpi_size = -1;
- int mpi_rank = -1;
+ const char *filename;
+ const char *test_name = "I/O";
+ hbool_t is_chunked = 1;
+ int mpi_size = -1;
+ int mpi_rank = -1;
int length;
- int * buffer;
+ int * buffer;
int i;
- MPI_Comm mpi_comm = MPI_COMM_NULL;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
hid_t fapl_write = -1;
- hid_t fapl_read = -1;
- hid_t dcpl = -1;
- hid_t dxpl = -1;
+ hid_t fapl_read = -1;
+ hid_t dcpl = -1;
+ hid_t dxpl = -1;
hsize_t dims[RANK];
- hid_t mem_space = -1;
+ hid_t mem_space = -1;
hid_t file_space = -1;
hsize_t chunk_dims[RANK];
herr_t ret;
@@ -3814,7 +3774,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Barrier(MPI_COMM_WORLD);
-
+
HDassert(mpi_size >= 1);
mpi_comm = MPI_COMM_WORLD;
@@ -3824,28 +3784,29 @@ test_no_collective_cause_mode_filter(int selection_mode)
dcpl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dcpl >= 0), "dataset creation plist created successfully");
- if (selection_mode == TEST_FILTERS_READ ) {
+ if (selection_mode == TEST_FILTERS_READ) {
#ifdef LATER /* fletcher32 */
- ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
- VRFY ((ret >=0 ), "Fletcher32 filter is available.\n");
+ ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
+ VRFY((ret >= 0), "Fletcher32 filter is available.\n");
- ret = H5Zget_filter_info (H5Z_FILTER_FLETCHER32, (unsigned int *) &filter_info);
- VRFY ( ( (filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) , "Fletcher32 filter encoding and decoding available.\n");
+ ret = H5Zget_filter_info(H5Z_FILTER_FLETCHER32, (unsigned int *)&filter_info);
+ VRFY(((filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) ||
+ (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED)),
+ "Fletcher32 filter encoding and decoding available.\n");
- ret = H5Pset_fletcher32(dcpl);
- VRFY((ret >= 0),"set filter (flecher32) succeeded");
+ ret = H5Pset_fletcher32(dcpl);
+ VRFY((ret >= 0), "set filter (flecher32) succeeded");
#endif /* LATER */
}
- else {
+ else {
VRFY(0, "Unexpected mode, only test for TEST_FILTERS_READ.");
}
- /* Create the basic Space */
+ /* Create the basic Space */
dims[0] = dim0;
dims[1] = dim1;
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
-
filename = (const char *)GetTestParameters();
HDassert(filename != NULL);
@@ -3858,24 +3819,22 @@ test_no_collective_cause_mode_filter(int selection_mode)
VRFY((fid >= 0), "H5Fcreate succeeded");
/* If we are not testing contiguous datasets */
- if(is_chunked) {
+ if (is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/mpi_size;
+ chunk_dims[0] = dims[0] / mpi_size;
chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0),"chunk creation property list succeeded");
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
}
-
/* Create the dataset */
- dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT,
- dcpl, H5P_DEFAULT);
+ dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
#ifdef LATER /* fletcher32 */
/* Set expected cause */
- test_name = "Broken Collective I/O - Filter is required";
- no_collective_cause_local_expected = H5D_MPIO_FILTERS;
+ test_name = "Broken Collective I/O - Filter is required";
+ no_collective_cause_local_expected = H5D_MPIO_FILTERS;
no_collective_cause_global_expected = H5D_MPIO_FILTERS;
#endif /* LATER */
@@ -3884,7 +3843,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
VRFY((file_space >= 0), "H5Dget_space succeeded");
/* Create the memory dataspace */
- mem_space = H5Screate_simple (RANK, dims, NULL);
+ mem_space = H5Screate_simple(RANK, dims, NULL);
VRFY((mem_space >= 0), "mem_space created");
/* Get the number of elements in the selection */
@@ -3892,34 +3851,33 @@ test_no_collective_cause_mode_filter(int selection_mode)
/* Allocate and initialize the buffer */
buffer = (int *)HDmalloc(sizeof(int) * length);
- VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for (i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
dxpl = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
- if (selection_mode == TEST_FILTERS_READ) {
- /* To test read in collective I/O mode , write in independent mode
+
+ if (selection_mode == TEST_FILTERS_READ) {
+ /* To test read in collective I/O mode , write in independent mode
* because write fails with mpio + filter */
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
}
- else {
+ else {
/* To test write in collective I/O mode. */
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
}
-
/* Write */
ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
-
/* Make a copy of the dxpl to test the read operation */
dxpl = H5Pcopy(dxpl);
VRFY((dxpl >= 0), "H5Pcopy succeeded");
@@ -3931,7 +3889,6 @@ test_no_collective_cause_mode_filter(int selection_mode)
if (fid)
H5Fclose(fid);
-
/*---------------------
* Test Read access
*---------------------*/
@@ -3940,8 +3897,8 @@ test_no_collective_cause_mode_filter(int selection_mode)
fapl_read = create_faccess_plist(mpi_comm, mpi_info, facc_type);
VRFY((fapl_read >= 0), "create_faccess_plist() succeeded");
- fid = H5Fopen (filename, H5F_ACC_RDONLY, fapl_read);
- dataset = H5Dopen2 (fid, DSET_NOCOLCAUSE, H5P_DEFAULT);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl_read);
+ dataset = H5Dopen2(fid, DSET_NOCOLCAUSE, H5P_DEFAULT);
/* Set collective I/O properties in the dxpl. */
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
@@ -3950,19 +3907,21 @@ test_no_collective_cause_mode_filter(int selection_mode)
/* Read */
ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl, buffer);
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
+
/* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause (dxpl, &no_collective_cause_local_read, &no_collective_cause_global_read);
- VRFY((ret >= 0), "retriving no collective cause succeeded" );
+ ret = H5Pget_mpio_no_collective_cause(dxpl, &no_collective_cause_local_read,
+ &no_collective_cause_global_read);
+ VRFY((ret >= 0), "retriving no collective cause succeeded");
/* Test values */
- memset (message, 0, sizeof (message));
- sprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset(message, 0, sizeof(message));
+ HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_local_read == (uint32_t)no_collective_cause_local_expected), message);
- memset (message, 0, sizeof (message));
- sprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset(message, 0, sizeof(message));
+ HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_global_read == (uint32_t)no_collective_cause_global_expected), message);
/* Release some resources */
@@ -3988,38 +3947,39 @@ test_no_collective_cause_mode_filter(int selection_mode)
/* Function: no_collective_cause_tests
*
- * Purpose: Tests cases for broken collective IO.
+ * Purpose: Tests cases for broken collective IO.
*
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
-void
-no_collective_cause_tests(void)
+void
+no_collective_cause_tests(void)
{
- /*
- * Test individual cause
+ /*
+ * Test individual cause
*/
- test_no_collective_cause_mode (TEST_COLLECTIVE);
- test_no_collective_cause_mode (TEST_SET_INDEPENDENT);
- test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION);
- test_no_collective_cause_mode (TEST_DATA_TRANSFORMS);
- test_no_collective_cause_mode (TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
- test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
- test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
+ test_no_collective_cause_mode(TEST_COLLECTIVE);
+ test_no_collective_cause_mode(TEST_SET_INDEPENDENT);
+ test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION);
+ test_no_collective_cause_mode(TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
#ifdef LATER /* fletcher32 */
- /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
- * H5Dwrite is ready for mpio + filter feature.
- */
+ /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
+ * H5Dwrite is ready for mpio + filter feature.
+ */
/* test_no_collective_cause_mode (TEST_FILTERS); */
- test_no_collective_cause_mode_filter (TEST_FILTERS_READ);
-#endif /* LATER */
+ test_no_collective_cause_mode_filter(TEST_FILTERS_READ);
+#endif /* LATER */
- /*
- * Test combined causes
+ /*
+ * Test combined causes
*/
- test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION);
- test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
- test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION);
+ test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION |
+ TEST_DATA_TRANSFORMS);
return;
}
@@ -4038,41 +3998,42 @@ no_collective_cause_tests(void)
void
dataset_atomicity(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t dataset1; /* Dataset IDs */
- hsize_t dims[RANK]; /* dataset dim sizes */
- int *write_buf = NULL; /* data buffer */
- int *read_buf = NULL; /* data buffer */
- int buf_size;
- hid_t dataset2;
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* Memory dataspace ID */
- hsize_t start[RANK];
- hsize_t stride[RANK];
- hsize_t count[RANK];
- hsize_t block[RANK];
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t dataset1; /* Dataset IDs */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ int * write_buf = NULL; /* data buffer */
+ int * read_buf = NULL; /* data buffer */
+ int buf_size;
+ hid_t dataset2;
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* Memory dataspace ID */
+ hsize_t start[RANK];
+ hsize_t stride[RANK];
+ hsize_t count[RANK];
+ hsize_t block[RANK];
const char *filename;
- herr_t ret; /* Generic return value */
- int mpi_size, mpi_rank;
- int i, j, k;
- hbool_t atomicity = FALSE;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
-
- dim0 = 64; dim1 = 32;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+ int i, j, k;
+ hbool_t atomicity = FALSE;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ dim0 = 64;
+ dim1 = 32;
filename = GetTestParameters();
if (facc_type != FACC_MPIO) {
- printf("Atomicity tests will not work without the MPIO VFD\n");
+ HDprintf("Atomicity tests will not work without the MPIO VFD\n");
return;
}
- if(VERBOSE_MED)
- printf("atomic writes to file %s\n", filename);
+ if (VERBOSE_MED)
+ HDprintf("atomic writes to file %s\n", filename);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
buf_size = dim0 * dim1;
/* allocate memory for data buffer */
@@ -4097,29 +4058,25 @@ dataset_atomicity(void)
/* setup dimensionality object */
dims[0] = dim0;
dims[1] = dim1;
- sid = H5Screate_simple (RANK, dims, NULL);
+ sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* create datasets */
- dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
- dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
/* initialize datasets to 0s */
if (0 == mpi_rank) {
- ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- H5P_DEFAULT, write_buf);
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
- H5P_DEFAULT, write_buf);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
}
-
+
ret = H5Dclose(dataset1);
VRFY((ret >= 0), "H5Dclose succeeded");
ret = H5Dclose(dataset2);
@@ -4129,35 +4086,39 @@ dataset_atomicity(void)
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* make sure setting atomicity fails on a serial file ID */
- /* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT);
- VRFY((fid >= 0), "H5Fopen succeeed");
+ /* file locking allows only one file open (serial) for writing */
+ if (MAINPROCESS) {
+ fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
+ VRFY((fid >= 0), "H5Fopen succeeed");
+ }
/* should fail */
- ret = H5Fset_mpi_atomicity (fid , TRUE);
+ ret = H5Fset_mpi_atomicity(fid, TRUE);
VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
+ if (MAINPROCESS) {
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* setup file access template */
acc_tpl = create_faccess_plist(comm, info, facc_type);
VRFY((acc_tpl >= 0), "");
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
+ fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl);
VRFY((fid >= 0), "H5Fopen succeeded");
/* Release file-access template */
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Fset_mpi_atomicity (fid , TRUE);
+ ret = H5Fset_mpi_atomicity(fid, TRUE);
VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded");
/* open dataset1 (contiguous case) */
@@ -4165,22 +4126,22 @@ dataset_atomicity(void)
VRFY((dataset1 >= 0), "H5Dopen2 succeeded");
if (0 == mpi_rank) {
- for (i=0 ; i<buf_size ; i++) {
+ for (i = 0; i < buf_size; i++) {
write_buf[i] = 5;
}
}
else {
- for (i=0 ; i<buf_size ; i++) {
+ for (i = 0; i < buf_size; i++) {
read_buf[i] = 8;
}
}
/* check that the atomicity flag is set */
- ret = H5Fget_mpi_atomicity (fid , &atomicity);
+ ret = H5Fget_mpi_atomicity(fid, &atomicity);
VRFY((ret >= 0), "atomcity get failed");
VRFY((atomicity == TRUE), "atomcity set failed");
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* Process 0 writes contiguously to the entire dataset */
if (0 == mpi_rank) {
@@ -4193,27 +4154,30 @@ dataset_atomicity(void)
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
}
- if(VERBOSE_MED) {
- i=0;j=0;k=0;
- for (i=0 ; i<dim0 ; i++) {
- printf ("\n");
- for (j=0 ; j<dim1 ; j++)
- printf ("%d ", read_buf[k++]);
+ if (VERBOSE_MED) {
+ i = 0;
+ j = 0;
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ HDprintf("\n");
+ for (j = 0; j < dim1; j++)
+ HDprintf("%d ", read_buf[k++]);
}
}
/* The processes that read the dataset must either read all values
as 0 (read happened before process 0 wrote to dataset 1), or 5
(read happened after process 0 wrote to dataset 1) */
- if (0 != mpi_rank) {
+ if (0 != mpi_rank) {
int compare = read_buf[0];
- VRFY((compare == 0 || compare == 5),
+ VRFY((compare == 0 || compare == 5),
"Atomicity Test Failed Process %d: Value read should be 0 or 5\n");
- for (i=1; i<buf_size; i++) {
+ for (i = 1; i < buf_size; i++) {
if (read_buf[i] != compare) {
- printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, read_buf[i], compare);
- nerrors ++;
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i,
+ read_buf[i], compare);
+ nerrors++;
}
}
}
@@ -4222,8 +4186,10 @@ dataset_atomicity(void)
VRFY((ret >= 0), "H5D close succeeded");
/* release data buffers */
- if(write_buf) HDfree(write_buf);
- if(read_buf) HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
/* open dataset2 (non-contiguous case) */
dataset2 = H5Dopen2(fid, DATASETNAME6, H5P_DEFAULT);
@@ -4236,102 +4202,104 @@ dataset_atomicity(void)
read_buf = (int *)HDcalloc(buf_size, sizeof(int));
VRFY((read_buf != NULL), "read_buf HDcalloc succeeded");
- for (i=0 ; i<buf_size ; i++) {
+ for (i = 0; i < buf_size; i++) {
write_buf[i] = 5;
}
- for (i=0 ; i<buf_size ; i++) {
+ for (i = 0; i < buf_size; i++) {
read_buf[i] = 8;
}
atomicity = FALSE;
/* check that the atomicity flag is set */
- ret = H5Fget_mpi_atomicity (fid , &atomicity);
+ ret = H5Fget_mpi_atomicity(fid, &atomicity);
VRFY((ret >= 0), "atomcity get failed");
VRFY((atomicity == TRUE), "atomcity set failed");
-
- block[0] = dim0/mpi_size - 1;
- block[1] = dim1/mpi_size - 1;
+ block[0] = dim0 / mpi_size - 1;
+ block[1] = dim1 / mpi_size - 1;
stride[0] = block[0] + 1;
stride[1] = block[1] + 1;
- count[0] = mpi_size;
- count[1] = mpi_size;
- start[0] = 0;
- start[1] = 0;
+ count[0] = mpi_size;
+ count[1] = mpi_size;
+ start[0] = 0;
+ start[1] = 0;
/* create a file dataspace */
- file_dataspace = H5Dget_space (dataset2);
+ file_dataspace = H5Dget_space(dataset2);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace */
- mem_dataspace = H5Screate_simple (RANK, dims, NULL);
+ mem_dataspace = H5Screate_simple(RANK, dims, NULL);
VRFY((mem_dataspace >= 0), "");
ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* Process 0 writes to the dataset */
if (0 == mpi_rank) {
- ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, write_buf);
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, write_buf);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
}
/* All processes wait for the write to finish. This works because
atomicity is set to true */
- MPI_Barrier (comm);
+ MPI_Barrier(comm);
/* The other processes read the entire dataset */
if (0 != mpi_rank) {
- ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, read_buf);
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, read_buf);
VRFY((ret >= 0), "H5Dread dataset2 succeeded");
}
- if(VERBOSE_MED) {
+ if (VERBOSE_MED) {
if (mpi_rank == 1) {
- i=0;j=0;k=0;
- for (i=0 ; i<dim0 ; i++) {
- printf ("\n");
- for (j=0 ; j<dim1 ; j++)
- printf ("%d ", read_buf[k++]);
+ i = 0;
+ j = 0;
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ HDprintf("\n");
+ for (j = 0; j < dim1; j++)
+ HDprintf("%d ", read_buf[k++]);
}
- printf ("\n");
+ HDprintf("\n");
}
}
/* The processes that read the dataset must either read all values
as 5 (read happened after process 0 wrote to dataset 1) */
- if (0 != mpi_rank) {
+ if (0 != mpi_rank) {
int compare;
- i=0;j=0;k=0;
+ i = 0;
+ j = 0;
+ k = 0;
compare = 5;
- for (i=0 ; i<dim0 ; i++) {
- if (i >= mpi_rank*(block[0]+1)) {
+ for (i = 0; i < dim0; i++) {
+ if (i >= mpi_rank * (block[0] + 1)) {
break;
}
- if ((i+1)%(block[0]+1)==0) {
+ if ((i + 1) % (block[0] + 1) == 0) {
k += dim1;
continue;
}
- for (j=0 ; j<dim1 ; j++) {
- if (j >= mpi_rank*(block[1]+1)) {
- k += dim1 - mpi_rank*(block[1]+1);
+ for (j = 0; j < dim1; j++) {
+ if (j >= mpi_rank * (block[1] + 1)) {
+ k += dim1 - mpi_rank * (block[1] + 1);
break;
}
- if ((j+1)%(block[1]+1)==0) {
+ if ((j + 1) % (block[1] + 1) == 0) {
k++;
continue;
}
else if (compare != read_buf[k]) {
- printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, k, read_buf[k], compare);
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank,
+ k, read_buf[k], compare);
nerrors++;
}
- k ++;
+ k++;
}
}
}
@@ -4344,12 +4312,13 @@ dataset_atomicity(void)
VRFY((ret >= 0), "H5Sclose succeeded");
/* release data buffers */
- if(write_buf) HDfree(write_buf);
- if(read_buf) HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
-
}
/* Function: dense_attr_test
@@ -4359,24 +4328,24 @@ dataset_atomicity(void)
* Programmer: Quincey Koziol
* Date: April, 2013
*/
-void
-test_dense_attr(void)
+void
+test_dense_attr(void)
{
- int mpi_size, mpi_rank;
- hid_t fpid, fid;
- hid_t gid, gpid;
- hid_t atFileSpace, atid;
- hsize_t atDims[1] = {10000};
- herr_t status;
+ int mpi_size, mpi_rank;
+ hid_t fpid, fid;
+ hid_t gid, gpid;
+ hid_t atFileSpace, atid;
+ hsize_t atDims[1] = {10000};
+ herr_t status;
const char *filename;
/* get filename */
filename = (const char *)GetTestParameters();
- HDassert( filename != NULL );
+ HDassert(filename != NULL);
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
fpid = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fpid > 0), "H5Pcreate succeeded");
@@ -4398,7 +4367,7 @@ test_dense_attr(void)
status = H5Pclose(gpid);
VRFY((status >= 0), "H5Pclose succeeded");
- atFileSpace = H5Screate_simple(1, atDims, NULL);
+ atFileSpace = H5Screate_simple(1, atDims, NULL);
VRFY((atFileSpace > 0), "H5Screate_simple succeeded");
atid = H5Acreate2(gid, "bar", H5T_STD_U64LE, atFileSpace, H5P_DEFAULT, H5P_DEFAULT);
VRFY((atid > 0), "H5Acreate succeeded");
@@ -4415,4 +4384,3 @@ test_dense_attr(void)
return;
}
-