From 3dbbd8aea1b8389ad7df0c982c9a78a9a2d4dee8 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Wed, 24 Mar 2021 15:57:32 -0500 Subject: Align with "parallel fence" changes (#479) * Small code updates and simplifications * Committing clang-format changes * Fix 'make installcheck' for parallel builds Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> --- examples/testh5cc.sh.in | 6 +++ src/H5AC.c | 15 ++---- src/H5ACmpio.c | 12 ++--- src/H5C.c | 31 +++-------- src/H5Cmpio.c | 67 +++++++++-------------- src/H5FDmpio.c | 83 +++++++++++++++------------- src/H5Fint.c | 10 ++-- src/H5Fmpi.c | 29 ---------- src/H5Fpkg.h | 1 - src/H5Fprivate.h | 1 - testpar/t_bigio.c | 95 +++++--------------------------- testpar/t_cache.c | 50 +++++------------ testpar/t_mdset.c | 141 ++++++++++-------------------------------------- 13 files changed, 148 insertions(+), 393 deletions(-) diff --git a/examples/testh5cc.sh.in b/examples/testh5cc.sh.in index 0c122c7..ba80f2d 100644 --- a/examples/testh5cc.sh.in +++ b/examples/testh5cc.sh.in @@ -91,6 +91,8 @@ temp_FILES="a.out $applib" cat > $appmain < $prog1 < +void sub1(void) { printf("in sub1\n"); @@ -123,6 +127,8 @@ EOF # generate prog2 cat > $prog2 < +void sub2(void) { printf("in sub2\n"); diff --git a/src/H5AC.c b/src/H5AC.c index ffdca78..2eadd21 100644 --- a/src/H5AC.c +++ b/src/H5AC.c @@ -350,19 +350,14 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co if (NULL == (aux_ptr->candidate_slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL))) HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create candidate entry list") - if (aux_ptr != NULL) - if (aux_ptr->mpi_rank == 0) - f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE, H5AC__DEFAULT_MIN_CLEAN_SIZE, - (H5AC_NTYPES - 1), H5AC_class_s, H5AC__check_if_write_permitted, - TRUE, H5AC__log_flushed_entry, (void *)aux_ptr); - else - f->shared->cache = - H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE, H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1), - H5AC_class_s, H5AC__check_if_write_permitted, TRUE, NULL, (void *)aux_ptr); + if (aux_ptr->mpi_rank == 0) + f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE, H5AC__DEFAULT_MIN_CLEAN_SIZE, + (H5AC_NTYPES - 1), H5AC_class_s, H5AC__check_if_write_permitted, + TRUE, H5AC__log_flushed_entry, (void *)aux_ptr); else f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE, H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1), - H5AC_class_s, H5AC__check_if_write_permitted, TRUE, NULL, NULL); + H5AC_class_s, H5AC__check_if_write_permitted, TRUE, NULL, (void *)aux_ptr); } /* end if */ else { #endif /* H5_HAVE_PARALLEL */ diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c index 140fd96..7d2ba25 100644 --- a/src/H5ACmpio.c +++ b/src/H5ACmpio.c @@ -1271,7 +1271,7 @@ H5AC__propagate_and_apply_candidate_list(H5F_t *f) if (aux_ptr->write_done) (aux_ptr->write_done)(); - /* to prevent "messages from the past" we must synchronize all + /* To prevent "messages from the past" we must synchronize all * processes again before we go on. */ if (MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm))) @@ -1514,7 +1514,7 @@ H5AC__receive_and_apply_clean_list(H5F_t *f) HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't receive clean list") if (num_entries > 0) - /* mark the indicated entries as clean */ + /* Mark the indicated entries as clean */ if (H5C_mark_entries_as_clean(f, num_entries, haddr_buf_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't mark entries clean.") @@ -1900,28 +1900,22 @@ H5AC__rsp__p0_only__flush(H5F_t *f) /* Check for error on the write operation */ if (result < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.") /* this code exists primarily for the test bed -- it allows us to * enforce POSIX semantics on the server that pretends to be a * file system in our parallel tests. */ - if (aux_ptr->write_done) { - + if (aux_ptr->write_done) (aux_ptr->write_done)(); - } } /* end if */ /* Propagate cleaned entries to other ranks. */ if (H5AC__propagate_flushed_and_still_clean_entries_list(f) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate clean entries list.") done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5AC__rsp__p0_only__flush() */ /*------------------------------------------------------------------------- diff --git a/src/H5C.c b/src/H5C.c index 551a2b2..1b885e3 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -21,14 +21,8 @@ * things which exist on disk, and which may be * unambiguously referenced by their disk addresses. * - * The code in this module was initially written in - * support of a complete re-write of the metadata cache - * in H5AC.c However, other uses for the cache code - * suggested themselves, and thus this file was created - * in an attempt to support re-use. - * - * For a detailed overview of the cache, please see the - * header comment for H5C_t in H5Cpkg.h. + * For a detailed overview of the cache, please see the + * header comment for H5C_t in H5Cpkg.h. * *------------------------------------------------------------------------- */ @@ -39,9 +33,7 @@ * * Code Changes: * - * - Remove extra functionality in H5C__flush_single_entry()? - * - * - Change protect/unprotect to lock/unlock. + * - Change protect/unprotect to lock/unlock. * * - Flush entries in increasing address order in * H5C__make_space_in_cache(). @@ -53,18 +45,9 @@ * I/O overhead. Can't do this just yet as some entries are not * contiguous. Do this in parallel only or in serial as well? * - * - Create MPI type for dirty objects when flushing in parallel. - * - * - Now that TBBT routines aren't used, fix nodes in memory to - * point directly to the skip list node from the LRU list, eliminating - * skip list lookups when evicting objects from the cache. - * - * Tests: - * - * - Trim execution time. (This is no longer a major issue with the - * shift from the TBBT to a hash table for indexing.) - * - * - Add random tests. + * - Fix nodes in memory to point directly to the skip list node from + * the LRU list, eliminating skip list lookups when evicting objects + * from the cache. * **************************************************************************/ @@ -6650,9 +6633,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) } if (H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file") - #ifdef H5_HAVE_PARALLEL } #endif /* H5_HAVE_PARALLEL */ diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index 7a5c630..66c6601 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -154,19 +154,12 @@ static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned * Programmer: John Mainzer * 3/17/10 * - * Changes: Updated sanity checks to allow for the possibility that - * the slist is disabled. - * JRM -- 8/3/20 - * *------------------------------------------------------------------------- */ herr_t H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, haddr_t *candidates_list_ptr, int mpi_rank, int mpi_size) { - int i; - int m; - unsigned n; unsigned first_entry_to_flush; unsigned last_entry_to_flush; unsigned total_entries_to_clear = 0; @@ -176,15 +169,13 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha unsigned entries_to_clear[H5C_RING_NTYPES]; haddr_t addr; H5C_cache_entry_t *entry_ptr = NULL; - #if H5C_DO_SANITY_CHECKS haddr_t last_addr; #endif /* H5C_DO_SANITY_CHECKS */ - #if H5C_APPLY_CANDIDATE_LIST__DEBUG char tbl_buf[1024]; #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ - + unsigned m, n; unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ @@ -226,9 +217,7 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha } /* end if */ n = num_candidates / (unsigned)mpi_size; - if (num_candidates % (unsigned)mpi_size > INT_MAX) - HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "m overflow") - m = (int)(num_candidates % (unsigned)mpi_size); + m = num_candidates % (unsigned)mpi_size; if (NULL == (candidate_assignment_table = (unsigned *)H5MM_malloc(sizeof(unsigned) * (size_t)(mpi_size + 1)))) @@ -239,31 +228,31 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha candidate_assignment_table[mpi_size] = num_candidates; if (m == 0) { /* mpi_size is an even divisor of num_candidates */ - for (i = 1; i < mpi_size; i++) - candidate_assignment_table[i] = candidate_assignment_table[i - 1] + n; + for (u = 1; u < (unsigned)mpi_size; u++) + candidate_assignment_table[u] = candidate_assignment_table[u - 1] + n; } /* end if */ else { - for (i = 1; i <= m; i++) - candidate_assignment_table[i] = candidate_assignment_table[i - 1] + n + 1; + for (u = 1; u <= m; u++) + candidate_assignment_table[u] = candidate_assignment_table[u - 1] + n + 1; if (num_candidates < (unsigned)mpi_size) { - for (i = m + 1; i < mpi_size; i++) - candidate_assignment_table[i] = num_candidates; + for (u = m + 1; u < (unsigned)mpi_size; u++) + candidate_assignment_table[u] = num_candidates; } /* end if */ else { - for (i = m + 1; i < mpi_size; i++) - candidate_assignment_table[i] = candidate_assignment_table[i - 1] + n; + for (u = m + 1; u < (unsigned)mpi_size; u++) + candidate_assignment_table[u] = candidate_assignment_table[u - 1] + n; } /* end else */ } /* end else */ HDassert((candidate_assignment_table[mpi_size - 1] + n) == num_candidates); #if H5C_DO_SANITY_CHECKS /* Verify that the candidate assignment table has the expected form */ - for (i = 1; i < mpi_size - 1; i++) { + for (u = 1; u < (unsigned)(mpi_size - 1); u++) { unsigned a, b; - a = candidate_assignment_table[i] - candidate_assignment_table[i - 1]; - b = candidate_assignment_table[i + 1] - candidate_assignment_table[i]; + a = candidate_assignment_table[u] - candidate_assignment_table[u - 1]; + b = candidate_assignment_table[u + 1] - candidate_assignment_table[u]; HDassert(n + 1 >= a); HDassert(a >= b); @@ -275,11 +264,11 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha last_entry_to_flush = candidate_assignment_table[mpi_rank + 1] - 1; #if H5C_APPLY_CANDIDATE_LIST__DEBUG - for (i = 0; i < 1024; i++) - tbl_buf[i] = '\0'; + for (u = 0; u < 1024; u++) + tbl_buf[u] = '\0'; HDsprintf(&(tbl_buf[0]), "candidate assignment table = "); - for (i = 0; i <= mpi_size; i++) - HDsprintf(&(tbl_buf[HDstrlen(tbl_buf)]), " %u", candidate_assignment_table[i]); + for (u = 0; u <= (unsigned)mpi_size; u++) + HDsprintf(&(tbl_buf[HDstrlen(tbl_buf)]), " %u", candidate_assignment_table[u]); HDsprintf(&(tbl_buf[HDstrlen(tbl_buf)]), "\n"); HDfprintf(stdout, "%s", tbl_buf); @@ -354,9 +343,9 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha #if H5C_DO_SANITY_CHECKS m = 0; n = 0; - for (i = 0; i < H5C_RING_NTYPES; i++) { - m += (int)entries_to_flush[i]; - n += entries_to_clear[i]; + for (u = 0; u < H5C_RING_NTYPES; u++) { + m += entries_to_flush[u]; + n += entries_to_clear[u]; } /* end if */ HDassert((unsigned)m == total_entries_to_flush); @@ -957,10 +946,8 @@ H5C__collective_write(H5F_t *f) int * length_array = NULL; MPI_Aint * buf_array = NULL; MPI_Aint * offset_array = NULL; - MPI_Datatype btype; - hbool_t btype_created = FALSE; - MPI_Datatype ftype; - hbool_t ftype_created = FALSE; + MPI_Datatype btype = MPI_BYTE; + MPI_Datatype ftype = MPI_BYTE; int mpi_code; char unused = 0; /* Unused, except for non-NULL pointer value */ size_t buf_count; @@ -1032,7 +1019,6 @@ H5C__collective_write(H5F_t *f) if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed(count, length_array, buf_array, MPI_BYTE, &btype))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) - btype_created = TRUE; if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(&btype))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) @@ -1040,7 +1026,6 @@ H5C__collective_write(H5F_t *f) if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed(count, length_array, offset_array, MPI_BYTE, &ftype))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) - ftype_created = TRUE; if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(&ftype))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) @@ -1048,10 +1033,6 @@ H5C__collective_write(H5F_t *f) buf_count = 1; } /* end if */ else { - /* Pass trivial buf type, file type to the file driver */ - btype = MPI_BYTE; - ftype = MPI_BYTE; - /* Set non-NULL pointer for I/O operation */ base_buf = &unused; @@ -1074,9 +1055,9 @@ done: offset_array = (MPI_Aint *)H5MM_xfree(offset_array); /* Free MPI Types */ - if (btype_created && MPI_SUCCESS != (mpi_code = MPI_Type_free(&btype))) + if (MPI_BYTE != btype && MPI_SUCCESS != (mpi_code = MPI_Type_free(&btype))) HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code) - if (ftype_created && MPI_SUCCESS != (mpi_code = MPI_Type_free(&ftype))) + if (MPI_BYTE != ftype && MPI_SUCCESS != (mpi_code = MPI_Type_free(&ftype))) HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code) /* Reset transfer mode in API context, if changed */ diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c index f0a6842..f79b9cd 100644 --- a/src/H5FDmpio.c +++ b/src/H5FDmpio.c @@ -750,18 +750,18 @@ done: static H5FD_t * H5FD__mpio_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t H5_ATTR_UNUSED maxaddr) { - H5FD_mpio_t * file = NULL; - MPI_File fh; - hbool_t file_opened = FALSE; /* Flag to indicate that the file was successfully opened */ - int mpi_amode; - int mpi_rank; /* MPI rank of this process */ - int mpi_size; /* Total number of MPI processes */ - int mpi_code; /* MPI return code */ - MPI_Offset size; - H5P_genplist_t *plist; /* Property list pointer */ - MPI_Comm comm = MPI_COMM_NULL; - MPI_Info info = MPI_INFO_NULL; - H5FD_t * ret_value = NULL; /* Return value */ + H5FD_mpio_t * file = NULL; /* VFD File struct for new file */ + H5P_genplist_t *plist; /* Property list pointer */ + MPI_Comm comm = MPI_COMM_NULL; /* MPI Communicator, from plist */ + MPI_Info info = MPI_INFO_NULL; /* MPI Info, from plist */ + MPI_File fh; /* MPI file handle */ + hbool_t file_opened = FALSE; /* Flag to indicate that the file was successfully opened */ + int mpi_amode; /* MPI file access flags */ + int mpi_rank = INT_MAX; /* MPI rank of this process */ + int mpi_size; /* Total number of MPI processes */ + int mpi_code; /* MPI return code */ + MPI_Offset file_size; /* File size (of existing files) */ + H5FD_t * ret_value = NULL; /* Return value */ FUNC_ENTER_STATIC @@ -781,6 +781,12 @@ H5FD__mpio_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t H5_ATTR if (H5P_get(plist, H5F_ACS_MPI_PARAMS_INFO_NAME, &info) < 0) HGOTO_ERROR(H5E_VFL, H5E_CANTGET, NULL, "can't get MPI info object") + /* Get the MPI rank of this process and the total number of processes */ + if (MPI_SUCCESS != (mpi_code = MPI_Comm_rank(comm, &mpi_rank))) + HMPI_GOTO_ERROR(NULL, "MPI_Comm_rank failed", mpi_code) + if (MPI_SUCCESS != (mpi_code = MPI_Comm_size(comm, &mpi_size))) + HMPI_GOTO_ERROR(NULL, "MPI_Comm_size failed", mpi_code) + /* Convert HDF5 flags to MPI-IO flags */ /* Some combinations are illegal; let MPI-IO figure it out */ mpi_amode = (flags & H5F_ACC_RDWR) ? MPI_MODE_RDWR : MPI_MODE_RDONLY; @@ -810,12 +816,6 @@ H5FD__mpio_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t H5_ATTR HMPI_GOTO_ERROR(NULL, "MPI_File_open failed", mpi_code) file_opened = TRUE; - /* Get the MPI rank of this process and the total number of processes */ - if (MPI_SUCCESS != (mpi_code = MPI_Comm_rank(comm, &mpi_rank))) - HMPI_GOTO_ERROR(NULL, "MPI_Comm_rank failed", mpi_code) - if (MPI_SUCCESS != (mpi_code = MPI_Comm_size(comm, &mpi_size))) - HMPI_GOTO_ERROR(NULL, "MPI_Comm_size failed", mpi_code) - /* Build the return value and initialize it */ if (NULL == (file = (H5FD_mpio_t *)H5MM_calloc(sizeof(H5FD_mpio_t)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") @@ -826,17 +826,16 @@ H5FD__mpio_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t H5_ATTR file->mpi_size = mpi_size; /* Only processor p0 will get the filesize and broadcast it. */ - if (mpi_rank == 0) { - if (MPI_SUCCESS != (mpi_code = MPI_File_get_size(fh, &size))) + if (mpi_rank == 0) + if (MPI_SUCCESS != (mpi_code = MPI_File_get_size(fh, &file_size))) HMPI_GOTO_ERROR(NULL, "MPI_File_get_size failed", mpi_code) - } /* end if */ /* Broadcast file size */ - if (MPI_SUCCESS != (mpi_code = MPI_Bcast(&size, (int)sizeof(MPI_Offset), MPI_BYTE, 0, comm))) + if (MPI_SUCCESS != (mpi_code = MPI_Bcast(&file_size, (int)sizeof(MPI_Offset), MPI_BYTE, 0, comm))) HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code) /* Determine if the file should be truncated */ - if (size && (flags & H5F_ACC_TRUNC)) { + if (file_size && (flags & H5F_ACC_TRUNC)) { if (MPI_SUCCESS != (mpi_code = MPI_File_set_size(fh, (MPI_Offset)0))) HMPI_GOTO_ERROR(NULL, "MPI_File_set_size failed", mpi_code) @@ -845,11 +844,11 @@ H5FD__mpio_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t H5_ATTR HMPI_GOTO_ERROR(NULL, "MPI_Barrier failed", mpi_code) /* File is zero size now */ - size = 0; + file_size = 0; } /* end if */ /* Set the size of the file (from library's perspective) */ - file->eof = H5FD_mpi_MPIOff_to_haddr(size); + file->eof = H5FD_mpi_MPIOff_to_haddr(file_size); file->local_eof = file->eof; /* Set return value */ @@ -906,7 +905,7 @@ H5FD__mpio_close(H5FD_t *_file) HDassert(H5FD_MPIO == file->pub.driver_id); /* MPI_File_close sets argument to MPI_FILE_NULL */ - if (MPI_SUCCESS != (mpi_code = MPI_File_close(&(file->f) /*in,out*/))) + if (MPI_SUCCESS != (mpi_code = MPI_File_close(&(file->f)))) HMPI_GOTO_ERROR(FAIL, "MPI_File_close failed", mpi_code) /* Clean up other stuff */ @@ -1228,8 +1227,10 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU if (MPI_SUCCESS != (mpi_code = MPI_Bcast(buf, size_i, buf_type, 0, file->comm))) HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_code) } /* end if */ - else if (MPI_SUCCESS != - (mpi_code = MPI_File_read_at_all(file->f, mpi_off, buf, size_i, buf_type, &mpi_stat))) + else + /* Perform collective read operation */ + if (MPI_SUCCESS != + (mpi_code = MPI_File_read_at_all(file->f, mpi_off, buf, size_i, buf_type, &mpi_stat))) HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at_all failed", mpi_code) } /* end if */ else { @@ -1238,6 +1239,7 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU HDfprintf(stdout, "%s: doing MPI independent IO\n", FUNC); #endif + /* Perform independent read operation */ if (MPI_SUCCESS != (mpi_code = MPI_File_read_at(file->f, mpi_off, buf, size_i, buf_type, &mpi_stat))) HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at failed", mpi_code) @@ -1250,7 +1252,9 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU H5FD_mpi_native_g, file->info))) HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code) } /* end if */ - else if (MPI_SUCCESS != (mpi_code = MPI_File_read_at(file->f, mpi_off, buf, size_i, buf_type, &mpi_stat))) + else + /* Perform independent read operation */ + if (MPI_SUCCESS != (mpi_code = MPI_File_read_at(file->f, mpi_off, buf, size_i, buf_type, &mpi_stat))) HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at failed", mpi_code) /* Only retrieve bytes read if this rank _actually_ participated in I/O */ @@ -1446,6 +1450,7 @@ H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, h if (H5FD_mpio_Debug[(int)'w']) HDfprintf(stdout, "%s: doing MPI collective IO\n", FUNC); #endif + /* Perform collective write operation */ if (MPI_SUCCESS != (mpi_code = MPI_File_write_at_all(file->f, mpi_off, buf, size_i, buf_type, &mpi_stat))) HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at_all failed", mpi_code) @@ -1458,6 +1463,7 @@ H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, h if (H5FD_mpio_Debug[(int)'w']) HDfprintf(stdout, "%s: doing MPI independent IO\n", FUNC); #endif + /* Perform independent write operation */ if (MPI_SUCCESS != (mpi_code = MPI_File_write_at(file->f, mpi_off, buf, size_i, buf_type, &mpi_stat))) HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at failed", mpi_code) @@ -1468,8 +1474,9 @@ H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, h H5FD_mpi_native_g, file->info))) HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code) } /* end if */ - else if (MPI_SUCCESS != - (mpi_code = MPI_File_write_at(file->f, mpi_off, buf, size_i, buf_type, &mpi_stat))) + else + /* Perform independent write operation */ + if (MPI_SUCCESS != (mpi_code = MPI_File_write_at(file->f, mpi_off, buf, size_i, buf_type, &mpi_stat))) HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at failed", mpi_code) /* How many bytes were actually written? */ @@ -1507,9 +1514,9 @@ H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, h file->local_eof = addr + (haddr_t)bytes_written; done: - if (derived_type) { + if (derived_type) MPI_Type_free(&buf_type); - } + #ifdef H5FDmpio_DEBUG if (H5FD_mpio_Debug[(int)'t']) HDfprintf(stdout, "%s: Leaving, proc %d: ret_value = %d\n", FUNC, file->mpi_rank, ret_value); @@ -1569,13 +1576,14 @@ done: * * This is a little sticky in the mpio case, as it is not * easy for us to track the current EOF by extracting it from - * write calls. + * write calls, since other ranks could have written to the + * file beyond the local EOF. * - * Instead, we first check to see if the eoa has changed since + * Instead, we first check to see if the EOA has changed since * the last call to this function. If it has, we call * MPI_File_get_size() to determine the current EOF, and * only call MPI_File_set_size() if this value disagrees - * with the current eoa. + * with the current EOA. * * Return: SUCCEED/FAIL * @@ -1637,7 +1645,7 @@ H5FD__mpio_truncate(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, hbool_t H5_ATTR if (H5FD_mpi_haddr_to_MPIOff(file->eoa, &needed_eof) < 0) HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "cannot convert from haddr_t to MPI_Offset") - /* eoa != eof. Set eof to eoa */ + /* EOA != EOF. Set EOF to EOA */ if (size != needed_eof) { /* Extend the file's size */ if (MPI_SUCCESS != (mpi_code = MPI_File_set_size(file->f, needed_eof))) @@ -1749,5 +1757,4 @@ H5FD__mpio_communicator(const H5FD_t *_file) FUNC_LEAVE_NOAPI(file->comm) } /* end H5FD__mpio_communicator() */ - #endif /* H5_HAVE_PARALLEL */ diff --git a/src/H5Fint.c b/src/H5Fint.c index 6da9473..ed6b9c1 100644 --- a/src/H5Fint.c +++ b/src/H5Fint.c @@ -81,6 +81,7 @@ static herr_t H5F__build_name(const char *prefix, const char *file_name, char ** static char * H5F__getenv_prefix_name(char **env_prefix /*in,out*/); static H5F_t *H5F__new(H5F_shared_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t *lf); static herr_t H5F__check_if_using_file_locks(H5P_genplist_t *fapl, hbool_t *use_file_locking); +static herr_t H5F__dest(H5F_t *f, hbool_t flush); static herr_t H5F__build_actual_name(const H5F_t *f, const H5P_genplist_t *fapl, const char *name, char ** /*out*/ actual_name); static herr_t H5F__flush_phase1(H5F_t *f); @@ -1379,12 +1380,12 @@ done: * Return: SUCCEED/FAIL *------------------------------------------------------------------------- */ -herr_t +static herr_t H5F__dest(H5F_t *f, hbool_t flush) { herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_PACKAGE + FUNC_ENTER_STATIC /* Sanity check */ HDassert(f); @@ -2512,11 +2513,6 @@ H5F_try_close(H5F_t *f, hbool_t *was_closed /*out*/) if (H5F__efc_try_close(f) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "can't attempt to close EFC") - /* Delay flush until the shared file struct is closed, in H5F__dest. If the - * application called H5Fclose, it would have been flushed in that function - * (unless it will have been flushed in H5F__dest anyways). - */ - /* Destroy the H5F_t struct and decrement the reference count for the * shared H5F_shared_t struct. If the reference count for the H5F_shared_t * struct reaches zero then destroy it also. diff --git a/src/H5Fmpi.c b/src/H5Fmpi.c index 3336d4f..4b5283e 100644 --- a/src/H5Fmpi.c +++ b/src/H5Fmpi.c @@ -68,35 +68,6 @@ /*******************/ #ifdef H5_HAVE_PARALLEL - -/*------------------------------------------------------------------------- - * Function: H5F_get_mpi_handle - * - * Purpose: Retrieves MPI File handle. - * - * Return: Success: The size (positive) - * Failure: Negative - * - *------------------------------------------------------------------------- - */ -herr_t -H5F_get_mpi_handle(const H5F_t *f, MPI_File **f_handle) -{ - herr_t ret_value = SUCCEED; - hid_t fapl_id = H5I_INVALID_HID; - - FUNC_ENTER_NOAPI(FAIL) - - HDassert(f && f->shared); - - /* Dispatch to driver */ - if ((ret_value = H5FD_get_vfd_handle(f->shared->lf, fapl_id, (void **)f_handle)) < 0) - HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get mpi file handle") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5F_get_mpi_handle() */ - /*------------------------------------------------------------------------- * Function: H5F_mpi_get_rank * diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h index 371260d..087c9c9 100644 --- a/src/H5Fpkg.h +++ b/src/H5Fpkg.h @@ -405,7 +405,6 @@ H5_DLLVAR htri_t use_locks_env_g; /* General routines */ H5_DLL herr_t H5F__post_open(H5F_t *f); H5_DLL H5F_t * H5F__reopen(H5F_t *f); -H5_DLL herr_t H5F__dest(H5F_t *f, hbool_t flush); H5_DLL herr_t H5F__flush(H5F_t *f); H5_DLL htri_t H5F__is_hdf5(const char *name, hid_t fapl_id); H5_DLL ssize_t H5F__get_file_image(H5F_t *f, void *buf_ptr, size_t buf_len); diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h index 0c75cf0..5c5937b 100644 --- a/src/H5Fprivate.h +++ b/src/H5Fprivate.h @@ -955,7 +955,6 @@ H5_DLL herr_t H5F_eoa_dirty(H5F_t *f); /* Parallel I/O (i.e. MPI) related routines */ #ifdef H5_HAVE_PARALLEL -H5_DLL herr_t H5F_get_mpi_handle(const H5F_t *f, MPI_File **f_handle); H5_DLL int H5F_mpi_get_rank(const H5F_t *f); H5_DLL MPI_Comm H5F_mpi_get_comm(const H5F_t *f); H5_DLL int H5F_shared_mpi_get_size(const H5F_shared_t *f_sh); diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c index 7884ecb..ed99fc4 100644 --- a/testpar/t_bigio.c +++ b/testpar/t_bigio.c @@ -1104,7 +1104,7 @@ static void single_rank_independent_io(void) { if (mpi_rank_g == 0) - HDprintf("single_rank_independent_io\n"); + HDprintf("\nSingle Rank Independent I/O\n"); if (MAIN_PROCESS) { hsize_t dims[] = {LARGE_DIM}; @@ -1223,8 +1223,6 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -1250,7 +1248,7 @@ coll_chunk1(void) { const char *filename = FILENAME[0]; if (mpi_rank_g == 0) - HDprintf("coll_chunk1\n"); + HDprintf("\nCollective chunk I/O Test #1\n"); coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); @@ -1303,7 +1301,7 @@ coll_chunk2(void) { const char *filename = FILENAME[0]; if (mpi_rank_g == 0) - HDprintf("coll_chunk2\n"); + HDprintf("\nCollective chunk I/O Test #2\n"); coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); @@ -1329,8 +1327,6 @@ coll_chunk2(void) * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -1357,7 +1353,7 @@ coll_chunk3(void) { const char *filename = FILENAME[0]; if (mpi_rank_g == 0) - HDprintf("coll_chunk3\n"); + HDprintf("\nCollective chunk I/O Test #3\n"); coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); @@ -1385,17 +1381,9 @@ coll_chunk3(void) * * Failure: -1 * - * Modifications: - * Remove invalid temporary property checkings for API_LINK_HARD and - * API_LINK_TRUE cases. - * Programmer: Jonathan Kim - * Date: 2012-10-10 - * * Programmer: Unknown * July 12th, 2004 * - * Modifications: - * *------------------------------------------------------------------------- */ @@ -1845,55 +1833,10 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap HDfree(data_origin1); } -/***************************************************************************** - * - * Function: do_express_test() - * - * Purpose: Do an MPI_Allreduce to obtain the maximum value returned - * by GetTestExpress() across all processes. Return this - * value. - * - * Envirmoment variables can be different across different - * processes. This function ensures that all processes agree - * on whether to do an express test. - * - * Return: Success: Maximum of the values returned by - * GetTestExpress() across all processes. - * - * Failure: -1 - * - * Programmer: JRM -- 4/25/06 - * - *****************************************************************************/ -static int -do_express_test(int world_mpi_rank) -{ - int express_test; - int max_express_test; - int result; - - express_test = GetTestExpress(); - - result = - MPI_Allreduce((void *)&express_test, (void *)&max_express_test, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); - - if (result != MPI_SUCCESS) { - nerrors++; - max_express_test = -1; - if (VERBOSE_MED && (world_mpi_rank == 0)) { - HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", world_mpi_rank, FUNC); - } - } - - return (max_express_test); - -} /* do_express_test() */ - int main(int argc, char **argv) { - int ExpressMode = 0; - hsize_t newsize = 1048576; + hsize_t newsize = 1048576; /* Set the bigio processing limit to be 'newsize' bytes */ hsize_t oldsize = H5_mpi_set_bigio_count(newsize); @@ -1902,9 +1845,8 @@ main(int argc, char **argv) * that we try to ensure that our bigio handling is actually * envoked and tested. */ - if (newsize != oldsize) { + if (newsize != oldsize) bigcount = newsize * 2; - } MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size_g); @@ -1915,34 +1857,25 @@ main(int argc, char **argv) * hang in the atexit post processing in which it may try to make MPI * calls. By then, MPI calls may not work. */ - if (H5dont_atexit() < 0) { + if (H5dont_atexit() < 0) HDprintf("Failed to turn off atexit processing. Continue.\n"); - }; /* set alarm. */ ALARM_ON; - ExpressMode = do_express_test(mpi_rank_g); - dataset_big_write(); MPI_Barrier(MPI_COMM_WORLD); dataset_big_read(); MPI_Barrier(MPI_COMM_WORLD); - if (ExpressMode > 0) { - if (mpi_rank_g == 0) - HDprintf("***Express test mode on. Several tests are skipped\n"); - } - else { - coll_chunk1(); - MPI_Barrier(MPI_COMM_WORLD); - coll_chunk2(); - MPI_Barrier(MPI_COMM_WORLD); - coll_chunk3(); - MPI_Barrier(MPI_COMM_WORLD); - single_rank_independent_io(); - } + coll_chunk1(); + MPI_Barrier(MPI_COMM_WORLD); + coll_chunk2(); + MPI_Barrier(MPI_COMM_WORLD); + coll_chunk3(); + MPI_Barrier(MPI_COMM_WORLD); + single_rank_independent_io(); /* turn off alarm */ ALARM_OFF; diff --git a/testpar/t_cache.c b/testpar/t_cache.c index 4846a75..4cf1139 100644 --- a/testpar/t_cache.c +++ b/testpar/t_cache.c @@ -6874,28 +6874,20 @@ main(int argc, char **argv) * hang in the atexit post processing in which it may try to make MPI * calls. By then, MPI calls may not work. */ - if (H5dont_atexit() < 0) { + if (H5dont_atexit() < 0) HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank); - }; + H5open(); express_test = do_express_test(); -#if 0 /* JRM */ - express_test = 0; -#endif /* JRM */ - if (express_test) { - + if (express_test) virt_num_data_entries = EXPRESS_VIRT_NUM_DATA_ENTRIES; - } - else { - + else virt_num_data_entries = STD_VIRT_NUM_DATA_ENTRIES; - } #ifdef H5_HAVE_MPE - if (MAINPROCESS) { + if (MAINPROCESS) HDprintf(" Tests compiled for MPE.\n"); - } virt_num_data_entries = MPE_VIRT_NUM_DATA_ENTIES; #endif /* H5_HAVE_MPE */ @@ -6908,11 +6900,8 @@ main(int argc, char **argv) } if (mpi_size < 3) { - - if (MAINPROCESS) { - + if (MAINPROCESS) HDprintf(" Need at least 3 processes. Exiting.\n"); - } goto finish; } @@ -6930,27 +6919,22 @@ main(int argc, char **argv) /* setup file access property list with the world communicator */ if (FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS))) { nerrors++; - if (verbose) { + if (verbose) HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n", world_mpi_rank, FUNC); - } } if (H5Pset_fapl_mpio(fapl, world_mpi_comm, MPI_INFO_NULL) < 0) { - nerrors++; - if (verbose) { + if (verbose) HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 1.\n", world_mpi_rank, FUNC); - } } /* fix the file names */ for (u = 0; u < sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; ++u) { if (h5_fixname(FILENAME[u], fapl, filenames[u], sizeof(filenames[u])) == NULL) { - nerrors++; - if (verbose) { + if (verbose) HDfprintf(stdout, "%d:%s: h5_fixname() failed.\n", world_mpi_rank, FUNC); - } break; } } @@ -6958,9 +6942,8 @@ main(int argc, char **argv) /* close the fapl before we set it up again */ if (H5Pclose(fapl) < 0) { nerrors++; - if (verbose) { + if (verbose) HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n", world_mpi_rank, FUNC); - } } /* now create the fapl again, excluding the server process. */ @@ -6969,32 +6952,25 @@ main(int argc, char **argv) /* setup file access property list */ if (FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS))) { nerrors++; - if (verbose) { + if (verbose) HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n", world_mpi_rank, FUNC); - } } if (H5Pset_fapl_mpio(fapl, file_mpi_comm, MPI_INFO_NULL) < 0) { - nerrors++; - if (verbose) { + if (verbose) HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n", world_mpi_rank, FUNC); - } } } setup_rand(); max_nerrors = get_max_nerrors(); - if (max_nerrors != 0) { /* errors in setup -- no point in continuing */ - - if (world_mpi_rank == 0) { - + if (world_mpi_rank == 0) HDfprintf(stdout, "Errors in test initialization. Exiting.\n"); - } goto finish; } diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index 2eca297..3041e77 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -129,12 +129,6 @@ zero_dim_dset(void) /* * Example of using PHDF5 to create ndatasets datasets. Each process write * a slab of array to the file. - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/11/04 */ void multiple_dset_write(void) @@ -218,12 +212,6 @@ multiple_dset_write(void) } /* Example of using PHDF5 to create, write, and read compact dataset. - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/11/04 */ void compact_dataset(void) @@ -353,14 +341,6 @@ compact_dataset(void) /* * Example of using PHDF5 to create, write, and read dataset and attribute * of Null dataspace. - * - * Changes: Removed the assert that mpi_size <= the SIZE #define. - * As best I can tell, this assert isn't needed here, - * and in any case, the SIZE #define is being removed - * in an update of the functions in this file to run - * with an arbitrary number of processes. - * - * JRM - 8/24/04 */ void null_dataset(void) @@ -465,14 +445,6 @@ null_dataset(void) * Actual data is _not_ written to these datasets. Dataspaces are exact * sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over * the boundary of interest. - * - * Changes: Removed the assert that mpi_size <= the SIZE #define. - * As best I can tell, this assert isn't needed here, - * and in any case, the SIZE #define is being removed - * in an update of the functions in this file to run - * with an arbitrary number of processes. - * - * JRM - 8/11/04 */ void big_dataset(void) @@ -594,16 +566,6 @@ big_dataset(void) /* Example of using PHDF5 to read a partial written dataset. The dataset does * not have actual data written to the entire raw data area and relies on the * default fill value of zeros to work correctly. - * - * Changes: Removed the assert that mpi_size <= the SIZE #define. - * As best I can tell, this assert isn't needed here, - * and in any case, the SIZE #define is being removed - * in an update of the functions in this file to run - * with an arbitrary number of processes. - * - * Also added code to free dynamically allocated buffers. - * - * JRM - 8/11/04 */ void dataset_fillvalue(void) @@ -710,15 +672,16 @@ dataset_fillvalue(void) for (i = 0; i < (int)dset_dims[0]; i++) for (j = 0; j < (int)dset_dims[1]; j++) for (k = 0; k < (int)dset_dims[2]; k++) - for (l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++) + for (l = 0; l < (int)dset_dims[3]; l++, trdata++) if (*trdata != 0) if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i, - j, k, l, *trdata); + HDprintf( + "Rank %d: Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", + mpi_rank, i, j, k, l, *trdata); if (err_num > MAX_ERR_REPORT && !VERBOSE_MED) - HDprintf("[more errors ...]\n"); + HDprintf("Rank %d: [more errors ...]\n", mpi_rank); if (err_num) { - HDprintf("%d errors found in check_value\n", err_num); + HDprintf("Rank %d: %d errors found in check_value\n", mpi_rank, err_num); nerrors++; } } @@ -856,12 +819,6 @@ collective_group_write_independent_group_read(void) /* Write multiple groups with a chunked dataset in each group collectively. * These groups and datasets are for testing independent read later. - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/16/04 */ void collective_group_write(void) @@ -896,6 +853,7 @@ collective_group_write(void) plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); + VRFY((fid >= 0), "H5Fcreate"); H5Pclose(plist); /* decide the hyperslab according to process number. */ @@ -909,13 +867,13 @@ collective_group_write(void) ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); VRFY((memspace >= 0), "memspace"); VRFY((filespace >= 0), "filespace"); - VRFY((ret1 >= 0), "mgroup memspace selection"); - VRFY((ret2 >= 0), "mgroup filespace selection"); + VRFY((ret1 == 0), "mgroup memspace selection"); + VRFY((ret2 == 0), "mgroup filespace selection"); dcpl = H5Pcreate(H5P_DATASET_CREATE); ret1 = H5Pset_chunk(dcpl, 2, chunk_size); VRFY((dcpl >= 0), "dataset creation property"); - VRFY((ret1 >= 0), "set chunk for dataset creation property"); + VRFY((ret1 == 0), "set chunk for dataset creation property"); /* creates ngroups groups under the root group, writes chunked * datasets in parallel. */ @@ -932,10 +890,14 @@ collective_group_write(void) for (j = 0; j < size; j++) outme[(i * size) + j] = (i + j) * 1000 + mpi_rank; - H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme); + ret1 = H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme); + VRFY((ret1 == 0), "H5Dwrite"); - H5Dclose(did); - H5Gclose(gid); + ret1 = H5Dclose(did); + VRFY((ret1 == 0), "H5Dclose"); + + ret1 = H5Gclose(gid); + VRFY((ret1 == 0), "H5Gclose"); #ifdef BARRIER_CHECKS if (!((m + 1) % 10)) { @@ -948,7 +910,9 @@ collective_group_write(void) H5Pclose(dcpl); H5Sclose(filespace); H5Sclose(memspace); - H5Fclose(fid); + + ret1 = H5Fclose(fid); + VRFY((ret1 == 0), "H5Fclose"); HDfree(outme); } @@ -964,6 +928,7 @@ independent_group_read(void) const H5Ptest_param_t *pt; char * filename; int ngroups; + herr_t ret; pt = GetTestParameters(); filename = pt->name; @@ -975,6 +940,7 @@ independent_group_read(void) H5Pset_all_coll_metadata_ops(plist, FALSE); fid = H5Fopen(filename, H5F_ACC_RDONLY, plist); + VRFY((fid > 0), "H5Fopen"); H5Pclose(plist); /* open groups and read datasets. Odd number processes read even number @@ -989,20 +955,11 @@ independent_group_read(void) group_dataset_read(fid, mpi_rank, m); } - H5Fclose(fid); + ret = H5Fclose(fid); + VRFY((ret == 0), "H5Fclose"); } /* Open and read datasets and compare data - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * Also added code to verify the results of dynamic memory - * allocations, and to free dynamically allocated memeory - * when we are done with it. - * - * JRM - 8/16/04 */ static void group_dataset_read(hid_t fid, int mpi_rank, int m) @@ -1035,16 +992,17 @@ group_dataset_read(hid_t fid, int mpi_rank, int m) /* this is the original value */ for (i = 0; i < size; i++) - for (j = 0; j < size; j++) { + for (j = 0; j < size; j++) outdata[(i * size) + j] = (i + j) * 1000 + mpi_rank; - } /* compare the original value(outdata) to the value in file(indata).*/ ret = check_value(indata, outdata, size); VRFY((ret == 0), "check the data"); - H5Dclose(did); - H5Gclose(gid); + ret = H5Dclose(did); + VRFY((ret == 0), "H5Dclose"); + ret = H5Gclose(gid); + VRFY((ret == 0), "H5Gclose"); HDfree(indata); HDfree(outdata); @@ -1076,11 +1034,6 @@ group_dataset_read(hid_t fid, int mpi_rank, int m) * + means the group has attribute(s). * ' means the datasets in the groups have attribute(s). * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/16/04 */ void multiple_group_write(void) @@ -1164,12 +1117,6 @@ multiple_group_write(void) /* * In a group, creates NDATASETS datasets. Each process writes a hyperslab * of a data array to the file. - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/16/04 */ static void write_dataset(hid_t memspace, hid_t filespace, hid_t gid) @@ -1243,12 +1190,6 @@ create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter) /* * This function is to verify the data from multiple group testing. It opens * every dataset in every group and check their correctness. - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/11/04 */ void multiple_group_read(void) @@ -1323,12 +1264,6 @@ multiple_group_read(void) /* * This function opens all the datasets in a certain, checks the data using * dataset_vrfy function. - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/11/04 */ static int read_dataset(hid_t memspace, hid_t filespace, hid_t gid) @@ -1475,12 +1410,6 @@ read_attribute(hid_t obj_id, int this_type, int num) /* This functions compares the original data with the read-in data for its * hyperslab part only by process ID. - * - * Changes: Modified function to use a passed in size parameter - * instead of the old SIZE #define. This should let us - * run with an arbitrary number of processes. - * - * JRM - 8/16/04 */ static int check_value(DATATYPE *indata, DATATYPE *outdata, int size) @@ -1513,12 +1442,6 @@ check_value(DATATYPE *indata, DATATYPE *outdata, int size) } /* Decide the portion of data chunk in dataset by process ID. - * - * Changes: Modified function to use a passed in size parameter - * instead of the old SIZE #define. This should let us - * run with an arbitrary number of processes. - * - * JRM - 8/11/04 */ static void @@ -1560,8 +1483,6 @@ get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t * This function reproduces this situation. At present the test hangs * on failure. * JRM - 9/13/04 - * - * Changes: None. */ #define N 4 @@ -1805,10 +1726,6 @@ io_mode_confusion(void) * cache clients will have to construct on disk images on demand. * * JRM -- 10/13/10 - * - * Changes: - * Break it into two parts, a writer to write the file and a reader - * the correctness of the writer. AKC -- 2010/10/27 */ #define NUM_DATA_SETS 4 -- cgit v0.12