diff options
100 files changed, 15534 insertions, 2472 deletions
@@ -478,6 +478,7 @@ ./src/H5ACpkg.h ./src/H5ACprivate.h ./src/H5ACpublic.h +./src/H5ACproxy_entry.c ./src/H5B.c ./src/H5Bcache.c ./src/H5Bdbg.c @@ -490,6 +491,8 @@ ./src/H5B2dbg.c ./src/H5B2hdr.c ./src/H5B2int.c +./src/H5B2internal.c +./src/H5B2leaf.c ./src/H5B2module.h ./src/H5B2pkg.h ./src/H5B2private.h @@ -584,6 +587,7 @@ ./src/H5FAdblkpage.c ./src/H5FAdblock.c ./src/H5FAhdr.c +./src/H5FAint.c ./src/H5FAmodule.h ./src/H5FApkg.h ./src/H5FAprivate.h @@ -615,6 +619,7 @@ ./src/H5FDspace.c ./src/H5FDstdio.c ./src/H5FDstdio.h +./src/H5FDtest.c ./src/H5FDwindows.c ./src/H5FDwindows.h ./src/H5FL.c @@ -625,6 +630,7 @@ ./src/H5FS.c ./src/H5FScache.c ./src/H5FSdbg.c +./src/H5FSint.c ./src/H5FSmodule.h ./src/H5FSpkg.h ./src/H5FSprivate.h @@ -990,6 +996,7 @@ # ====end distribute this for now. See HDFFV-8236==== ./test/specmetaread.h5 ./test/stab.c +./test/swmr.c ./test/tarray.c ./test/tarrold.h5 ./test/tattr.c diff --git a/c++/test/tfile.cpp b/c++/test/tfile.cpp index dba0980..47b9a85 100644 --- a/c++/test/tfile.cpp +++ b/c++/test/tfile.cpp @@ -727,7 +727,7 @@ static void test_libver_bounds() /* Run the tests */ test_libver_bounds_real(H5F_LIBVER_EARLIEST, H5O_VERSION_1, H5F_LIBVER_LATEST, H5O_VERSION_2); - test_libver_bounds_real(H5F_LIBVER_LATEST, H5O_VERSION_2, H5F_LIBVER_EARLIEST, H5O_VERSION_1); + test_libver_bounds_real(H5F_LIBVER_LATEST, H5O_VERSION_2, H5F_LIBVER_EARLIEST, H5O_VERSION_2); PASSED(); } /* end test_libver_bounds() */ diff --git a/hl/test/test_file_image.c b/hl/test/test_file_image.c index 9b18539..6ff5bf4 100644 --- a/hl/test/test_file_image.c +++ b/hl/test/test_file_image.c @@ -20,6 +20,12 @@ #define RANK 2 +/* For superblock version 0, 1: the offset to "file consistency flags" is 20 with size of 4 bytes */ +/* The file consistency flags is the "status_flags" field in H5F_super_t */ +/* Note: the offset and size will be different when using superblock version 2 for the test file */ +#define SUPER_STATUS_FLAGS_OFF_V0_V1 20 +#define SUPER_STATUS_FLAGS_SIZE_V0_V1 4 + /* Test of file image operations. The following code provides a means to thoroughly test the file image @@ -214,10 +220,32 @@ test_file_image(size_t open_images, size_t nflags, unsigned *flags) else VERIFY(*core_buf_ptr_ptr != buf_ptr[i], "vfd buffer and user buffer should be different"); - /* test whether the contents of the user buffer and driver buffer */ - /* are equal. */ - if (HDmemcmp(*core_buf_ptr_ptr, buf_ptr[i], (size_t)buf_size[i]) != 0) - FAIL_PUTS_ERROR("comparison of vfd and user buffer failed"); + /* + * When the vfd and user buffers are different and H5LT_FILE_IMAGE_OPEN_RW is enabled, + * status_flags in the superblock needs to be cleared in the vfd buffer for + * the comparison to proceed as expected. The user buffer as returned from H5Fget_file_image() + * has already cleared status_flags. The superblock's status_flags is used for the + * implementation of file locking. + */ + if(input_flags[i] & H5LT_FILE_IMAGE_OPEN_RW && !(input_flags[i] & H5LT_FILE_IMAGE_DONT_COPY)) { + + void *tmp_ptr = HDmalloc((size_t)buf_size[i]); + /* Copy vfd buffer to a temporary buffer */ + HDmemcpy(tmp_ptr, (void *)*core_buf_ptr_ptr, (size_t)buf_size[i]); + /* Clear status_flags in the superblock for the vfd buffer: file locking is using status_flags */ + HDmemset((uint8_t *)tmp_ptr + SUPER_STATUS_FLAGS_OFF_V0_V1, (int)0, (size_t)SUPER_STATUS_FLAGS_SIZE_V0_V1); + /* Does the comparision */ + if(HDmemcmp(tmp_ptr, buf_ptr[i], (size_t)buf_size[i]) != 0) + FAIL_PUTS_ERROR("comparison of TMP vfd and user buffer failed"); + /* Free the temporary buffer */ + if(tmp_ptr) HDfree(tmp_ptr); + } else { + + /* test whether the contents of the user buffer and driver buffer */ + /* are equal. */ + if (HDmemcmp(*core_buf_ptr_ptr, buf_ptr[i], (size_t)buf_size[i]) != 0) + FAIL_PUTS_ERROR("comparison of vfd and user buffer failed"); + } } /* end else */ } /* end for */ diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index a4c6367..55de5ea 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -47,6 +47,7 @@ set (H5AC_SOURCES ${HDF5_SRC_DIR}/H5ACdbg.c ${HDF5_SRC_DIR}/H5AClog.c ${HDF5_SRC_DIR}/H5ACmpio.c + ${HDF5_SRC_DIR}/H5ACproxy_entry.c ) set (H5AC_HDRS @@ -73,6 +74,8 @@ set (H5B2_SOURCES ${HDF5_SRC_DIR}/H5B2dbg.c ${HDF5_SRC_DIR}/H5B2hdr.c ${HDF5_SRC_DIR}/H5B2int.c + ${HDF5_SRC_DIR}/H5B2internal.c + ${HDF5_SRC_DIR}/H5B2leaf.c ${HDF5_SRC_DIR}/H5B2stat.c ${HDF5_SRC_DIR}/H5B2test.c ) @@ -209,6 +212,7 @@ set (H5FA_SOURCES ${HDF5_SRC_DIR}/H5FAdblkpage.c ${HDF5_SRC_DIR}/H5FAdblock.c ${HDF5_SRC_DIR}/H5FAhdr.c + ${HDF5_SRC_DIR}/H5FAint.c ${HDF5_SRC_DIR}/H5FAstat.c ${HDF5_SRC_DIR}/H5FAtest.c ) @@ -231,6 +235,7 @@ set (H5FD_SOURCES ${HDF5_SRC_DIR}/H5FDsec2.c ${HDF5_SRC_DIR}/H5FDspace.c ${HDF5_SRC_DIR}/H5FDstdio.c + ${HDF5_SRC_DIR}/H5FDtest.c ${HDF5_SRC_DIR}/H5FDwindows.c ) @@ -271,6 +276,7 @@ set (H5FS_SOURCES ${HDF5_SRC_DIR}/H5FS.c ${HDF5_SRC_DIR}/H5FScache.c ${HDF5_SRC_DIR}/H5FSdbg.c + ${HDF5_SRC_DIR}/H5FSint.c ${HDF5_SRC_DIR}/H5FSsection.c ${HDF5_SRC_DIR}/H5FSstat.c ${HDF5_SRC_DIR}/H5FStest.c @@ -135,6 +135,7 @@ static const char *H5AC_entry_type_names[H5AC_NTYPES] = "fixed array data block pages", "superblock", "driver info", + "proxy entry", "test entry" /* for testing only -- not used for actual files */ }; @@ -604,6 +605,52 @@ done: /*------------------------------------------------------------------------- + * Function: H5AC_evict + * + * Purpose: Evict all entries except the pinned entries + * in the cache. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * Dec 2013 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_evict(H5F_t *f, hid_t dxpl_id) +{ + hbool_t log_enabled; /* TRUE if logging was set up */ + hbool_t curr_logging; /* TRUE if currently logging */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + HDassert(f); + HDassert(f->shared); + HDassert(f->shared->cache); + + /* Check if log messages are being emitted */ + if(H5C_get_logging_status(f->shared->cache, &log_enabled, &curr_logging) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status") + + /* Evict all entries in the cache except the pinned superblock entry */ + if(H5C_evict(f, dxpl_id) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "can't evict cache") + +done: + + /* If currently logging, generate a message */ + if(curr_logging) + if(H5AC__write_evict_cache_log_msg(f->shared->cache, ret_value) < 0) + HDONE_ERROR(H5E_CACHE, H5E_LOGFAIL, FAIL, "unable to emit log message") + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5AC_evict() */ + + +/*------------------------------------------------------------------------- * Function: H5AC_expunge_entry * * Purpose: Expunge the target entry from the cache without writing it @@ -1004,6 +1051,84 @@ done: /*------------------------------------------------------------------------- + * Function: H5AC_mark_entry_clean + * + * Purpose: Mark a pinned entry as dirty. The target + * entry MUST be pinned. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * 7/23/16 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_mark_entry_clean(void *thing) +{ +#if H5AC__TRACE_FILE_ENABLED + char trace[128] = ""; + FILE * trace_file_ptr = NULL; +#endif /* H5AC__TRACE_FILE_ENABLED */ + hbool_t log_enabled; /* TRUE if logging was set up */ + hbool_t curr_logging; /* TRUE if currently logging */ + H5AC_info_t *entry_ptr = NULL; /* Pointer to the cache entry */ + H5C_t *cache_ptr = NULL; /* Pointer to the entry's associated metadata cache */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + HDassert(thing); + +#if H5AC__TRACE_FILE_ENABLED + /* For the mark pinned or protected entry clean call, only the addr + * is really necessary in the trace file. Write the result to catch + * occult errors. + */ + if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(thing))) + sprintf(trace, "%s 0x%lx", FUNC, + (unsigned long)(((H5C_cache_entry_t *)thing)->addr)); +#endif /* H5AC__TRACE_FILE_ENABLED */ + + entry_ptr = (H5AC_info_t *)thing; + cache_ptr = entry_ptr->cache_ptr; + + /* Check if log messages are being emitted */ + if(H5C_get_logging_status(cache_ptr, &log_enabled, &curr_logging) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status") + +#ifdef H5_HAVE_PARALLEL +{ + H5AC_aux_t *aux_ptr; + + aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); + if((!entry_ptr->is_dirty) && (!entry_ptr->is_protected) && + (entry_ptr->is_pinned) && (NULL != aux_ptr)) + if(H5AC__log_cleaned_entry(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "can't log cleaned entry") +} +#endif /* H5_HAVE_PARALLEL */ + + if(H5C_mark_entry_clean(thing) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "can't mark pinned or protected entry clean") + +done: +#if H5AC__TRACE_FILE_ENABLED + if(trace_file_ptr) + HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value); +#endif /* H5AC__TRACE_FILE_ENABLED */ + + /* If currently logging, generate a message */ + if(curr_logging) + if(H5AC__write_mark_clean_entry_log_msg(cache_ptr, entry_ptr, ret_value) < 0) + HDONE_ERROR(H5E_CACHE, H5E_LOGFAIL, FAIL, "unable to emit log message") + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5AC_mark_entry_clean() */ + + +/*------------------------------------------------------------------------- * Function: H5AC_move_entry * * Purpose: Use this function to notify the cache that an object's @@ -2619,3 +2744,67 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5AC_reset_ring() */ + +/*------------------------------------------------------------------------- + * Function: H5AC_remove_entry() + * + * Purpose: Remove an entry from the cache. Must be not protected, pinned, + * dirty, involved in flush dependencies, etc. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_remove_entry(void *_entry) +{ + H5AC_info_t *entry = (H5AC_info_t *)_entry; /* Entry to remove */ + H5C_t *cache = NULL; /* Pointer to the entry's associated metadata cache */ +#if H5AC__TRACE_FILE_ENABLED + char trace[128] = ""; + FILE * trace_file_ptr = NULL; +#endif /* H5AC__TRACE_FILE_ENABLED */ + hbool_t log_enabled; /* TRUE if logging was set up */ + hbool_t curr_logging; /* TRUE if currently logging */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(entry); + cache = entry->cache_ptr; + HDassert(cache); + +#if H5AC__TRACE_FILE_ENABLED + /* For the remove entry call, only the addr is really necessary + * in the trace file. Also write the result to catch occult errors. + */ + if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(entry))) + sprintf(trace, "%s 0x%lx", FUNC, (unsigned long)(entry->addr)); +#endif /* H5AC__TRACE_FILE_ENABLED */ + + /* Check if log messages are being emitted */ + if(H5C_get_logging_status(cache, &log_enabled, &curr_logging) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "unable to get logging status") + + /* Remove the entry from the cache*/ + if(H5C_remove_entry(entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry") + +done: +#if H5AC__TRACE_FILE_ENABLED + if(trace_file_ptr) + HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value); +#endif /* H5AC__TRACE_FILE_ENABLED */ + + /* If currently logging, generate a message */ + if(curr_logging) + if(H5AC__write_remove_entry_log_msg(cache, entry, ret_value) < 0) + HDONE_ERROR(H5E_CACHE, H5E_LOGFAIL, FAIL, "unable to emit log message") + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5AC_remove_entry() */ + diff --git a/src/H5AClog.c b/src/H5AClog.c index 980b105..1cdaa00 100644 --- a/src/H5AClog.c +++ b/src/H5AClog.c @@ -206,6 +206,51 @@ done: /*------------------------------------------------------------------------- + * Function: H5AC__write_evict_cache_log_msg + * + * Purpose: Write a log message for eviction of cache entries. + * + * Return: Success: SUCCEED + * Failure: FAIL + * + * Programmer: Dana Robinson + * Sunday, March 16, 2014 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC__write_evict_cache_log_msg(const H5AC_t *cache, + herr_t fxn_ret_value) +{ + char msg[MSG_SIZE]; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(cache); + + /* Create the log message string */ + HDsnprintf(msg, MSG_SIZE, +"\ +{\ +\"timestamp\":%lld,\ +\"action\":\"evict\",\ +\"returned\":%d\ +},\n\ +" + , (long long)HDtime(NULL), (int)fxn_ret_value); + + /* Write the log message to the file */ + if(H5C_write_log_message(cache, msg) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5AC__write_evict_cache_log_msg() */ + + +/*------------------------------------------------------------------------- * Function: H5AC__write_expunge_entry_log_msg * * Purpose: Write a log message for expunge of cache entries. @@ -404,6 +449,53 @@ done: /*------------------------------------------------------------------------- + * Function: H5AC__write_mark_clean_entry_log_msg + * + * Purpose: Write a log message for marking cache entries as clean. + * + * Return: Success: SUCCEED + * Failure: FAIL + * + * Programmer: Quincey Koziol + * Saturday, July 23, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC__write_mark_clean_entry_log_msg(const H5AC_t *cache, const H5AC_info_t *entry, + herr_t fxn_ret_value) +{ + char msg[MSG_SIZE]; /* Log message buffer */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(cache); + HDassert(entry); + + /* Create the log message string */ + HDsnprintf(msg, MSG_SIZE, +"\ +{\ +\"timestamp\":%lld,\ +\"action\":\"clean\",\ +\"address\":0x%lx,\ +\"returned\":%d\ +},\n\ +" + , (long long)HDtime(NULL), (unsigned long)entry->addr, (int)fxn_ret_value); + + /* Write the log message to the file */ + if(H5C_write_log_message(cache, msg) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5AC__write_mark_clean_entry_log_msg() */ + + +/*------------------------------------------------------------------------- * Function: H5AC__write_move_entry_log_msg * * Purpose: Write a log message for moving a cache entry. @@ -870,3 +962,51 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5AC__write_set_cache_config_log_msg() */ + +/*------------------------------------------------------------------------- + * Function: H5AC__write_remove_entry_log_msg + * + * Purpose: Write a log message for removing a cache entry. + * + * Return: Success: SUCCEED + * Failure: FAIL + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC__write_remove_entry_log_msg(const H5AC_t *cache, const H5AC_info_t *entry, + herr_t fxn_ret_value) +{ + char msg[MSG_SIZE]; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(cache); + HDassert(entry); + + /* Create the log message string */ + HDsnprintf(msg, MSG_SIZE, +"\ +{\ +\"timestamp\":%lld,\ +\"action\":\"remove\",\ +\"address\":0x%lx,\ +\"returned\":%d\ +},\n\ +" + , (long long)HDtime(NULL), (unsigned long)entry->addr, + (int)fxn_ret_value); + + /* Write the log message to the file */ + if(H5C_write_log_message(cache, msg) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5AC__write_remove_entry_log_msg() */ + diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c index 89a8f9d..570783a 100644 --- a/src/H5ACmpio.c +++ b/src/H5ACmpio.c @@ -805,6 +805,63 @@ done: /*------------------------------------------------------------------------- * + * Function: H5AC__log_cleaned_entry() + * + * Purpose: Treat this operation as a 'clear' and remove the entry + * from both the cleaned and dirtied lists if it is present. + * Reduces the dirty_bytes count by the size of the entry. + * + * Return: Non-negative on success/Negative on failure. + * + * Programmer: Quincey Koziol + * 7/23/16 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC__log_cleaned_entry(const H5AC_info_t *entry_ptr) +{ + H5AC_t * cache_ptr; + H5AC_aux_t * aux_ptr; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity check */ + HDassert(entry_ptr); + HDassert(entry_ptr->is_dirty == FALSE); + cache_ptr = entry_ptr->cache_ptr; + HDassert(cache_ptr != NULL); + aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr); + HDassert(aux_ptr != NULL); + HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); + + if(aux_ptr->mpi_rank == 0) { + H5AC_slist_entry_t *slist_entry_ptr; + haddr_t addr = entry_ptr->addr; + + /* Sanity checks */ + HDassert(aux_ptr->d_slist_ptr != NULL); + HDassert(aux_ptr->c_slist_ptr != NULL); + + /* Remove it from both the cleaned list and the dirtied list. */ + if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->c_slist_ptr, (void *)(&addr)))) + slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr); + if(NULL != (slist_entry_ptr = (H5AC_slist_entry_t *)H5SL_remove(aux_ptr->d_slist_ptr, (void *)(&addr)))) + slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr); + + } /* end if */ + + /* Decrement the dirty byte count */ + aux_ptr->dirty_bytes -= entry_ptr->size; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5AC__log_cleaned_entry() */ + + +/*------------------------------------------------------------------------- + * * Function: H5AC__log_flushed_entry() * * Purpose: Update the clean entry slist for the flush of an entry -- diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h index a298f85..3900ff1 100644 --- a/src/H5ACpkg.h +++ b/src/H5ACpkg.h @@ -412,6 +412,7 @@ typedef struct H5AC_aux_t /* Parallel I/O routines */ H5_DLL herr_t H5AC__log_deleted_entry(const H5AC_info_t *entry_ptr); H5_DLL herr_t H5AC__log_dirtied_entry(const H5AC_info_t *entry_ptr); +H5_DLL herr_t H5AC__log_cleaned_entry(const H5AC_info_t *entry_ptr); H5_DLL herr_t H5AC__log_flushed_entry(H5C_t *cache_ptr, haddr_t addr, hbool_t was_dirty, unsigned flags); H5_DLL herr_t H5AC__log_inserted_entry(const H5AC_info_t *entry_ptr); @@ -432,6 +433,8 @@ H5_DLL herr_t H5AC__open_trace_file(H5AC_t *cache_ptr, const char *trace_file_na /* Cache logging routines */ H5_DLL herr_t H5AC__write_create_cache_log_msg(H5AC_t *cache); H5_DLL herr_t H5AC__write_destroy_cache_log_msg(H5AC_t *cache); +H5_DLL herr_t H5AC__write_evict_cache_log_msg(const H5AC_t *cache, + herr_t fxn_ret_value); H5_DLL herr_t H5AC__write_expunge_entry_log_msg(const H5AC_t *cache, haddr_t address, int type_id, @@ -447,6 +450,8 @@ H5_DLL herr_t H5AC__write_insert_entry_log_msg(const H5AC_t *cache, H5_DLL herr_t H5AC__write_mark_dirty_entry_log_msg(const H5AC_t *cache, const H5AC_info_t *entry, herr_t fxn_ret_value); +H5_DLL herr_t H5AC__write_mark_clean_entry_log_msg(const H5AC_t *cache, + const H5AC_info_t *entry, herr_t fxn_ret_value); H5_DLL herr_t H5AC__write_move_entry_log_msg(const H5AC_t *cache, haddr_t old_addr, haddr_t new_addr, @@ -482,6 +487,9 @@ H5_DLL herr_t H5AC__write_unprotect_entry_log_msg(const H5AC_t *cache, H5_DLL herr_t H5AC__write_set_cache_config_log_msg(const H5AC_t *cache, const H5AC_cache_config_t *config, herr_t fxn_ret_value); +H5_DLL herr_t H5AC__write_remove_entry_log_msg(const H5AC_t *cache, + const H5AC_info_t *entry, + herr_t fxn_ret_value); #endif /* _H5ACpkg_H */ diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h index 2251af4..76013a3 100644 --- a/src/H5ACprivate.h +++ b/src/H5ACprivate.h @@ -86,7 +86,8 @@ typedef enum { H5AC_FARRAY_DBLK_PAGE_ID, /* (24) fixed array data block page */ H5AC_SUPERBLOCK_ID, /* (25) file superblock */ H5AC_DRVRINFO_ID, /* (26) driver info block (supplements superblock)*/ - H5AC_TEST_ID, /* (27) test entry -- not used for actual files */ + H5AC_PROXY_ENTRY_ID, /* (27) cache entry proxy */ + H5AC_TEST_ID, /* (28) test entry -- not used for actual files */ H5AC_NTYPES /* Number of types, must be last */ } H5AC_type_t; @@ -199,6 +200,25 @@ typedef H5C_cache_entry_t H5AC_info_t; /* Typedef for metadata cache (defined in H5Cpkg.h) */ typedef H5C_t H5AC_t; +/* Metadata cache proxy entry type */ +typedef struct H5AC_proxy_entry_t { + H5AC_info_t cache_info; /* Information for H5AC cache functions */ + /* (MUST be first field in structure) */ + + /* General fields */ + haddr_t addr; /* Address of the entry in the file */ + /* (Should be in 'temporary' address space) */ + + /* Parent fields */ + H5SL_t *parents; /* Skip list to track parent addresses */ + + /* Child fields */ + size_t nchildren; /* Number of children */ + size_t ndirty_children; /* Number of dirty children */ + /* (Note that this currently duplicates some cache functionality) */ +} H5AC_proxy_entry_t; + + #define H5AC_RING_NAME "H5AC_ring_type" /* Dataset transfer property lists for metadata calls */ @@ -353,11 +373,14 @@ H5_DLL herr_t H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr, void *thing, unsigned flags); H5_DLL herr_t H5AC_flush(H5F_t *f, hid_t dxpl_id); H5_DLL herr_t H5AC_mark_entry_dirty(void *thing); +H5_DLL herr_t H5AC_mark_entry_clean(void *thing); H5_DLL herr_t H5AC_move_entry(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t new_addr, hid_t dxpl_id); H5_DLL herr_t H5AC_dest(H5F_t *f, hid_t dxpl_id); +H5_DLL herr_t H5AC_evict(H5F_t *f, hid_t dxpl_id); H5_DLL herr_t H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr, unsigned flags); +H5_DLL herr_t H5AC_remove_entry(void *entry); H5_DLL herr_t H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr, H5AC_cache_config_t *config_ptr); H5_DLL herr_t H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr, @@ -381,6 +404,15 @@ H5_DLL herr_t H5AC_set_ring(hid_t dxpl_id, H5AC_ring_t ring, H5P_genplist_t **dx H5_DLL herr_t H5AC_reset_ring(H5P_genplist_t *dxpl, H5AC_ring_t orig_ring); H5_DLL herr_t H5AC_expunge_tag_type_metadata(H5F_t *f, hid_t dxpl_id, haddr_t tag, int type_id, unsigned flags); +/* Virtual entry routines */ +H5_DLL H5AC_proxy_entry_t *H5AC_proxy_entry_create(void); +H5_DLL herr_t H5AC_proxy_entry_add_parent(H5AC_proxy_entry_t *pentry, void *parent); +H5_DLL herr_t H5AC_proxy_entry_remove_parent(H5AC_proxy_entry_t *pentry, void *parent); +H5_DLL herr_t H5AC_proxy_entry_add_child(H5AC_proxy_entry_t *pentry, H5F_t *f, + hid_t dxpl_id, void *child); +H5_DLL herr_t H5AC_proxy_entry_remove_child(H5AC_proxy_entry_t *pentry, void *child); +H5_DLL herr_t H5AC_proxy_entry_dest(H5AC_proxy_entry_t *pentry); + #ifdef H5_HAVE_PARALLEL H5_DLL herr_t H5AC_add_candidate(H5AC_t * cache_ptr, haddr_t addr); #endif /* H5_HAVE_PARALLEL */ diff --git a/src/H5ACproxy_entry.c b/src/H5ACproxy_entry.c new file mode 100644 index 0000000..ab5edb9 --- /dev/null +++ b/src/H5ACproxy_entry.c @@ -0,0 +1,796 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*------------------------------------------------------------------------- + * + * Created: H5ACproxy_entry.c + * + * Purpose: Functions and a cache client for a "proxy" cache entry. + * A proxy cache entry is used as a placeholder for entire + * data structures to attach flush dependencies, etc. + * + *------------------------------------------------------------------------- + */ + +/****************/ +/* Module Setup */ +/****************/ +#include "H5ACmodule.h" /* This source code file is part of the H5AC module */ + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5ACpkg.h" /* Metadata cache */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5MFprivate.h" /* File memory management */ + + +/****************/ +/* Local Macros */ +/****************/ + + +/******************/ +/* Local Typedefs */ +/******************/ + + +/********************/ +/* Package Typedefs */ +/********************/ + + +/********************/ +/* Local Prototypes */ +/********************/ + +/* Metadata cache (H5AC) callbacks */ +static herr_t H5AC__proxy_entry_image_len(const void *thing, size_t *image_len); +static herr_t H5AC__proxy_entry_serialize(const H5F_t *f, void *image_ptr, + size_t len, void *thing); +static herr_t H5AC__proxy_entry_notify(H5AC_notify_action_t action, void *thing); +static herr_t H5AC__proxy_entry_free_icr(void *thing); + +/*********************/ +/* Package Variables */ +/*********************/ + +/* H5AC proxy entries inherit cache-like properties from H5AC */ +const H5AC_class_t H5AC_PROXY_ENTRY[1] = {{ + H5AC_PROXY_ENTRY_ID, /* Metadata client ID */ + "Proxy entry", /* Metadata client name (for debugging) */ + H5FD_MEM_SUPER, /* File space memory type for client */ + 0, /* Client class behavior flags */ + NULL, /* 'get_initial_load_size' callback */ + NULL, /* 'get_final_load_size' callback */ + NULL, /* 'verify_chksum' callback */ + NULL, /* 'deserialize' callback */ + H5AC__proxy_entry_image_len, /* 'image_len' callback */ + NULL, /* 'pre_serialize' callback */ + H5AC__proxy_entry_serialize, /* 'serialize' callback */ + H5AC__proxy_entry_notify, /* 'notify' callback */ + H5AC__proxy_entry_free_icr, /* 'free_icr' callback */ + NULL, /* 'fsf_size' callback */ +}}; + + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + + +/*******************/ +/* Local Variables */ +/*******************/ + +/* Declare a free list to manage H5AC_proxy_entry_t objects */ +H5FL_DEFINE_STATIC(H5AC_proxy_entry_t); + + + +/*------------------------------------------------------------------------- + * Function: H5AC_proxy_entry_create + * + * Purpose: Create a new proxy entry + * + * Return: Success: Pointer to the new proxy entry object. + * Failure: NULL + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +H5AC_proxy_entry_t * +H5AC_proxy_entry_create(void) +{ + H5AC_proxy_entry_t *pentry = NULL; /* Pointer to new proxy entry */ + H5AC_proxy_entry_t *ret_value = NULL; /* Return value */ + + FUNC_ENTER_NOAPI(NULL) + + /* Allocate new proxy entry */ + if(NULL == (pentry = H5FL_CALLOC(H5AC_proxy_entry_t))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "can't allocate proxy entry") + + /* Set non-zero fields */ + pentry->addr = HADDR_UNDEF; + + /* Set return value */ + ret_value = pentry; + +done: + /* Release resources on error */ + if(!ret_value) + if(pentry) + pentry = H5FL_FREE(H5AC_proxy_entry_t, pentry); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5AC_proxy_entry_create() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC_proxy_entry_add_parent + * + * Purpose: Add a parent to a proxy entry + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_proxy_entry_add_parent(H5AC_proxy_entry_t *pentry, void *_parent) +{ + H5AC_info_t *parent = (H5AC_info_t *)_parent; /* Parent entry's cache info */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(parent); + HDassert(pentry); + + /* Add parent to the list of parents */ + if(NULL == pentry->parents) + if(NULL == (pentry->parents = H5SL_create(H5SL_TYPE_HADDR, NULL))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "unable to create skip list for parents of proxy entry") + + /* Insert parent address into skip list */ + if(H5SL_insert(pentry->parents, parent, &parent->addr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert parent into proxy's skip list") + + /* Add flush dependency on parent */ + if(pentry->nchildren > 0) { + /* Sanity check */ + HDassert(H5F_addr_defined(pentry->addr)); + + if(H5AC_create_flush_dependency(parent, pentry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "unable to set flush dependency on proxy entry") + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5AC_proxy_entry_add_parent() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC_proxy_entry_remove_parent + * + * Purpose: Removes a parent from a proxy entry + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_proxy_entry_remove_parent(H5AC_proxy_entry_t *pentry, void *_parent) +{ + H5AC_info_t *parent = (H5AC_info_t *)_parent; /* Pointer to the parent entry */ + H5AC_info_t *rem_parent; /* Pointer to the removed parent entry */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(pentry); + HDassert(pentry->parents); + HDassert(parent); + + /* Remove parent from skip list */ + if(NULL == (rem_parent = (H5AC_info_t *)H5SL_remove(pentry->parents, &parent->addr))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "unable to remove proxy entry parent from skip list") + if(!H5F_addr_eq(rem_parent->addr, parent->addr)) + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "removed proxy entry parent not the same as real parent") + + /* Shut down the skip list, if this is the last parent */ + if(0 == H5SL_count(pentry->parents)) { + /* Sanity check */ + HDassert(0 == pentry->nchildren); + + if(H5SL_close(pentry->parents) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CLOSEERROR, FAIL, "can't close proxy parent skip list") + pentry->parents = NULL; + } /* end if */ + + /* Remove flush dependency between the proxy entry and a parent */ + if(pentry->nchildren > 0) + if(H5AC_destroy_flush_dependency(parent, pentry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "unable to remove flush dependency on proxy entry") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5AC_proxy_entry_remove_parent() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC__proxy_entry_add_child_cb + * + * Purpose: Callback routine for adding an entry as a flush dependency for + * a proxy entry. + * + * Return: Success: Non-negative on success + * Failure: Negative + * + * Programmer: Quincey Koziol + * Thursday, September 22, 2016 + * + *------------------------------------------------------------------------- + */ +static int +H5AC__proxy_entry_add_child_cb(void *_item, void H5_ATTR_UNUSED *_key, void *_udata) +{ + H5AC_info_t *parent = (H5AC_info_t *)_item; /* Pointer to the parent entry */ + H5AC_proxy_entry_t *pentry = (H5AC_proxy_entry_t *)_udata; /* Pointer to the proxy entry */ + int ret_value = H5_ITER_CONT; /* Callback return value */ + + FUNC_ENTER_STATIC + + /* Add flush dependency on parent for proxy entry */ + if(H5AC_create_flush_dependency(parent, pentry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, H5_ITER_ERROR, "unable to set flush dependency for virtual entry") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5AC__proxy_entry_add_child_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC_proxy_entry_add_child + * + * Purpose: Add a child a proxy entry + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_proxy_entry_add_child(H5AC_proxy_entry_t *pentry, H5F_t *f, hid_t dxpl_id, + void *child) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(pentry); + HDassert(child); + + /* Check for first child */ + if(0 == pentry->nchildren) { + /* Get an address, if the proxy doesn't already have one */ + if(!H5F_addr_defined(pentry->addr)) + if(HADDR_UNDEF == (pentry->addr = H5MF_alloc_tmp(f, 1))) + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "temporary file space allocation failed for proxy entry") + + /* Insert the proxy entry into the cache */ + if(H5AC_insert_entry(f, dxpl_id, H5AC_PROXY_ENTRY, pentry->addr, pentry, H5AC__PIN_ENTRY_FLAG) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to cache proxy entry") + + /* Proxies start out clean (insertions are automatically marked dirty) */ + if(H5AC_mark_entry_clean(pentry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTCLEAN, FAIL, "can't mark proxy entry clean") + + /* If there are currently parents, iterate over the list of parents, creating flush dependency on them */ + if(pentry->parents) + if(H5SL_iterate(pentry->parents, H5AC__proxy_entry_add_child_cb, pentry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "can't visit parents") + } /* end if */ + + /* Add flush dependency on proxy entry */ + if(H5AC_create_flush_dependency(pentry, child) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "unable to set flush dependency on proxy entry") + + /* Increment count of children */ + pentry->nchildren++; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5AC_proxy_entry_add_child() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC__proxy_entry_remove_child_cb + * + * Purpose: Callback routine for removing an entry as a flush dependency for + * proxy entry. + * + * Return: Success: Non-negative on success + * Failure: Negative + * + * Programmer: Quincey Koziol + * Thursday, September 22, 2016 + * + *------------------------------------------------------------------------- + */ +static int +H5AC__proxy_entry_remove_child_cb(void *_item, void H5_ATTR_UNUSED *_key, void *_udata) +{ + H5AC_info_t *parent = (H5AC_info_t *)_item; /* Pointer to the parent entry */ + H5AC_proxy_entry_t *pentry = (H5AC_proxy_entry_t *)_udata; /* Pointer to the proxy entry */ + int ret_value = H5_ITER_CONT; /* Callback return value */ + + FUNC_ENTER_STATIC + + /* Remove flush dependency on parent for proxy entry */ + if(H5AC_destroy_flush_dependency(parent, pentry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, H5_ITER_ERROR, "unable to remove flush dependency for proxy entry") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5AC__proxy_entry_remove_child_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC_proxy_entry_remove_child + * + * Purpose: Remove a child a proxy entry + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_proxy_entry_remove_child(H5AC_proxy_entry_t *pentry, void *child) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(pentry); + HDassert(child); + + /* Remove flush dependency on proxy entry */ + if(H5AC_destroy_flush_dependency(pentry, child) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "unable to remove flush dependency on proxy entry") + + /* Decrement count of children */ + pentry->nchildren--; + + /* Check for last child */ + if(0 == pentry->nchildren) { + /* Check for flush dependencies on proxy's parents */ + if(pentry->parents) + /* Iterate over the list of parents, removing flush dependency on them */ + if(H5SL_iterate(pentry->parents, H5AC__proxy_entry_remove_child_cb, pentry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "can't visit parents") + + /* Unpin proxy */ + if(H5AC_unpin_entry(pentry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin proxy entry") + + /* Remove proxy entry from cache */ + if(H5AC_remove_entry(pentry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "unable to remove proxy entry") + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5AC_proxy_entry_remove_child() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC_proxy_entry_dest + * + * Purpose: Destroys a proxy entry in memory. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_proxy_entry_dest(H5AC_proxy_entry_t *pentry) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(pentry); + HDassert(NULL == pentry->parents); + HDassert(0 == pentry->nchildren); + HDassert(0 == pentry->ndirty_children); + + /* Free the proxy entry object */ + pentry = H5FL_FREE(H5AC_proxy_entry_t, pentry); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5AC_proxy_entry_dest() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC__proxy_entry_image_len + * + * Purpose: Compute the size of the data structure on disk. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5AC__proxy_entry_image_len(const void H5_ATTR_UNUSED *thing, size_t *image_len) +{ + FUNC_ENTER_STATIC_NOERR + + /* Check arguments */ + HDassert(image_len); + + /* Set the image length size to 1 byte */ + *image_len = 1; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5AC__proxy_entry_image_len() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC__proxy_entry_serialize + * + * Purpose: Serializes a data structure for writing to disk. + * + * Note: Should never be invoked. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5AC__proxy_entry_serialize(const H5F_t H5_ATTR_UNUSED *f, void H5_ATTR_UNUSED *image, + size_t H5_ATTR_UNUSED len, void H5_ATTR_UNUSED *thing) +{ + FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */ + + /* Should never be invoked */ + HDassert(0 && "Invalid callback?!?"); + + HERROR(H5E_CACHE, H5E_CANTSERIALIZE, "called unreachable fcn."); + + FUNC_LEAVE_NOAPI(FAIL) +} /* end H5AC__proxy_entry_serialize() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC__proxy_entry_notify + * + * Purpose: Handle cache action notifications + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5AC__proxy_entry_notify(H5AC_notify_action_t action, void *_thing) +{ + H5AC_proxy_entry_t *pentry = (H5AC_proxy_entry_t *)_thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity check */ + HDassert(pentry); + + switch(action) { + case H5AC_NOTIFY_ACTION_AFTER_INSERT: + break; + + case H5AC_NOTIFY_ACTION_AFTER_LOAD: +#ifdef NDEBUG + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid notify action from metadata cache") +#else /* NDEBUG */ + HDassert(0 && "Invalid action?!?"); +#endif /* NDEBUG */ + break; + + case H5AC_NOTIFY_ACTION_AFTER_FLUSH: +#ifdef NDEBUG + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid notify action from metadata cache") +#else /* NDEBUG */ + HDassert(0 && "Invalid action?!?"); +#endif /* NDEBUG */ + break; + + case H5AC_NOTIFY_ACTION_BEFORE_EVICT: + /* Sanity checks */ + HDassert(0 == pentry->ndirty_children); + + /* No action */ + break; + + case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: + /* Sanity checks */ + HDassert(pentry->ndirty_children > 0); + + /* No action */ + break; + + case H5AC_NOTIFY_ACTION_ENTRY_CLEANED: + /* Sanity checks */ + HDassert(0 == pentry->ndirty_children); + + /* No action */ + break; + + case H5AC_NOTIFY_ACTION_CHILD_DIRTIED: + /* Increment # of dirty children */ + pentry->ndirty_children++; + + /* Check for first dirty child */ + if(1 == pentry->ndirty_children) + if(H5AC_mark_entry_dirty(pentry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTDIRTY, FAIL, "can't mark proxy entry dirty") + break; + + case H5AC_NOTIFY_ACTION_CHILD_CLEANED: + /* Sanity check */ + HDassert(pentry->ndirty_children > 0); + + /* Decrement # of dirty children */ + pentry->ndirty_children--; + + /* Check for last dirty child */ + if(0 == pentry->ndirty_children) + if(H5AC_mark_entry_clean(pentry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTCLEAN, FAIL, "can't mark proxy entry clean") + break; + + default: +#ifdef NDEBUG + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown notify action from metadata cache") +#else /* NDEBUG */ + HDassert(0 && "Unknown action?!?"); +#endif /* NDEBUG */ + } /* end switch */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5AC__proxy_entry_notify() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC__proxy_entry_free_icr + * + * Purpose: Destroy/release an "in core representation" of a data + * structure + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5AC__proxy_entry_free_icr(void *_thing) +{ + H5AC_proxy_entry_t *pentry = (H5AC_proxy_entry_t *)_thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Destroy the proxy entry */ + if(H5AC_proxy_entry_dest(pentry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to destroy proxy entry") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5AC__proxy_entry_free_icr() */ + +#ifdef OLD_CODE + +/*------------------------------------------------------------------------- + * Function: H5AC_virt_entry_dirty_parent + * + * Purpose: Indicate that a virtual entry's parent became dirty + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * July 23, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_virt_entry_dirty_parent(H5AC_virt_entry_t *ventry) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + HDassert(ventry); + HDassert(ventry->track_parents); + HDassert(ventry->nparents > 0); + + /* If this is the first dirty parent or child, mark the virtual entry dirty */ + if(ventry->in_cache && 0 == ventry->ndirty_parents && 0 == ventry->ndirty_children) + if(H5AC_mark_entry_dirty(ventry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTDIRTY, FAIL, "can't mark virtual entry dirty") + + /* Increment the number of dirty parents */ + ventry->ndirty_parents++; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5AC_virt_entry_dirty_parent() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC_virt_entry_clean_parent + * + * Purpose: Indicate that a virtual entry's parent became clean + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * July 23, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_virt_entry_clean_parent(H5AC_virt_entry_t *ventry) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + HDassert(ventry); + HDassert(ventry->track_parents); +// HDassert(ventry->nparents > 0); + HDassert(ventry->ndirty_parents > 0); + + /* Decrement the number of dirty parents */ + ventry->ndirty_parents--; + + /* If this is the last dirty parent or child, mark the virtual entry clean */ + if(ventry->in_cache && 0 == ventry->ndirty_parents && 0 == ventry->ndirty_children) + if(H5AC_mark_entry_clean(ventry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTCLEAN, FAIL, "can't mark virtual entry clean") + + /* Destroy the skip list, if no more parents */ + if(0 == ventry->nparents && 0 == ventry->ndirty_parents) { +// /* Sanity check */ +// HDassert(0 == ventry->ndirty_parents); + + if(H5SL_close(ventry->parents) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CLOSEERROR, FAIL, "can't close parent list") + ventry->parents = NULL; + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5AC_virt_entry_clean_parent() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC_virt_entry_dirty_child + * + * Purpose: Indicate that a virtual entry's child became dirty + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * July 24, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_virt_entry_dirty_child(H5AC_virt_entry_t *ventry) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + HDassert(ventry); + HDassert(ventry->nchildren > 0); + + /* If this is the first dirty parent or child, mark the virtual entry dirty */ + if(ventry->in_cache && 0 == ventry->ndirty_parents && 0 == ventry->ndirty_children) + if(H5AC_mark_entry_dirty(ventry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTDIRTY, FAIL, "can't mark virtual entry dirty") + + /* Increment the number of dirty children */ + ventry->ndirty_children++; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5AC_virt_entry_dirty_child() */ + + +/*------------------------------------------------------------------------- + * Function: H5AC_virt_entry_clean_child + * + * Purpose: Indicate that a virtual entry's child became clean + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * July 24, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5AC_virt_entry_clean_child(H5AC_virt_entry_t *ventry) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + HDassert(ventry); + HDassert(ventry->nchildren > 0); + HDassert(ventry->ndirty_children > 0); + + /* Decrement the number of dirty children */ + ventry->ndirty_children--; + + /* If this is the last dirty parent or child, mark the virtual entry clean */ + if(ventry->in_cache && 0 == ventry->ndirty_parents && 0 == ventry->ndirty_children) + if(H5AC_mark_entry_clean(ventry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTCLEAN, FAIL, "can't mark virtual entry clean") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5AC_virt_entry_clean_child() */ + +#endif /* OLD_CODE */ + + @@ -290,7 +290,7 @@ H5B2_insert(H5B2_t *bt2, hid_t dxpl_id, void *udata) hdr = bt2->hdr; /* Insert the record */ - if(H5B2__insert_hdr(hdr, dxpl_id, udata) < 0) + if(H5B2__insert(hdr, dxpl_id, udata) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree") done: @@ -336,17 +336,17 @@ H5B2_update(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op, void *op_ /* Check if the root node is allocated yet */ if(!H5F_addr_defined(hdr->root.addr)) { /* Create root node as leaf node in B-tree */ - if(H5B2__create_leaf(hdr, dxpl_id, &(hdr->root)) < 0) + if(H5B2__create_leaf(hdr, dxpl_id, hdr, &(hdr->root)) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create root node") } /* end if */ /* Attempt to insert record into B-tree */ if(hdr->depth > 0) { - if(H5B2__update_internal(hdr, dxpl_id, hdr->depth, NULL, &hdr->root, &status, H5B2_POS_ROOT, udata, op, op_data) < 0) + if(H5B2__update_internal(hdr, dxpl_id, hdr->depth, NULL, &hdr->root, &status, H5B2_POS_ROOT, hdr, udata, op, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update record in B-tree internal node") } /* end if */ else { - if(H5B2__update_leaf(hdr, dxpl_id, &hdr->root, &status, H5B2_POS_ROOT, udata, op, op_data) < 0) + if(H5B2__update_leaf(hdr, dxpl_id, &hdr->root, &status, H5B2_POS_ROOT, hdr, udata, op, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update record in B-tree leaf node") } /* end else */ @@ -354,9 +354,19 @@ H5B2_update(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op, void *op_ HDassert(H5B2_UPDATE_UNKNOWN != status); /* Use insert algorithm if nodes to leaf full */ - if(H5B2_UPDATE_INSERT_CHILD_FULL == status) - if(H5B2__insert_hdr(hdr, dxpl_id, udata) < 0) + if(H5B2_UPDATE_INSERT_CHILD_FULL == status) { + if(H5B2__insert(hdr, dxpl_id, udata) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree") + } /* end if */ + else if(H5B2_UPDATE_SHADOW_DONE == status || H5B2_UPDATE_INSERT_DONE == status) { + /* Mark B-tree header as dirty */ + if(H5B2__hdr_dirty(hdr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTMARKDIRTY, FAIL, "unable to mark B-tree header dirty") + } /* end else-if */ + else { + /* Sanity check */ + HDassert(H5B2_UPDATE_MODIFY_DONE == status); + } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value) @@ -432,7 +442,7 @@ H5B2_iterate(H5B2_t *bt2, hid_t dxpl_id, H5B2_operator_t op, void *op_data) /* Iterate through records */ if(hdr->root.node_nrec > 0) { /* Iterate through nodes */ - if((ret_value = H5B2__iterate_node(hdr, dxpl_id, hdr->depth, &hdr->root, op, op_data)) < 0) + if((ret_value = H5B2__iterate_node(hdr, dxpl_id, hdr->depth, &hdr->root, hdr, op, op_data)) < 0) HERROR(H5E_BTREE, H5E_CANTLIST, "node iteration failed"); } /* end if */ @@ -469,6 +479,7 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op, { H5B2_hdr_t *hdr; /* Pointer to the B-tree header */ H5B2_node_ptr_t curr_node_ptr; /* Node pointer info for current node */ + void *parent = NULL; /* Parent of current node */ uint16_t depth; /* Current depth of the tree */ int cmp; /* Comparison value of records */ unsigned idx; /* Location of record which matches key */ @@ -522,6 +533,10 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op, /* Current depth of the tree */ depth = hdr->depth; + /* Set initial parent, if doing swmr writes */ + if(hdr->swmr_write) + parent = hdr; + /* Walk down B-tree to find record or leaf node where record is located */ cmp = -1; curr_pos = H5B2_POS_ROOT; @@ -530,12 +545,18 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op, H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */ /* Lock B-tree current node */ - if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG))) + if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, parent, &curr_node_ptr, depth, FALSE, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + /* Unpin parent if necessary */ + if(parent) { + if(parent != hdr && H5AC_unpin_entry(parent) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + parent = NULL; + } /* end if */ + /* Locate node pointer for child */ - if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, - udata, &idx, &cmp) < 0) { + if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx, &cmp) < 0) { /* Unlock current node before failing */ H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, H5AC__NO_FLAGS_SET); HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") @@ -566,9 +587,13 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op, } /* end if */ /* Unlock current node */ - if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, H5AC__NO_FLAGS_SET) < 0) + if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, (unsigned)(hdr->swmr_write ? H5AC__PIN_ENTRY_FLAG : H5AC__NO_FLAGS_SET)) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + /* Keep track of parent if necessary */ + if(hdr->swmr_write) + parent = internal; + /* Set pointer to next node to load */ curr_node_ptr = next_node_ptr; } /* end if */ @@ -598,16 +623,22 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op, H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */ /* Lock B-tree leaf node */ - if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG))) + if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, parent, &curr_node_ptr, FALSE, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + /* Unpin parent if necessary */ + if(parent) { + if(parent != hdr && H5AC_unpin_entry(parent) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + parent = NULL; + } /* end if */ + /* Locate record */ - if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, - udata, &idx, &cmp) < 0) { - /* unlock current node before failing */ + if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx, &cmp) < 0) { + /* Unlock current node before failing */ H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr.addr, leaf, H5AC__NO_FLAGS_SET); HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") - } + } /* end if */ if(cmp != 0) { /* Unlock leaf node */ @@ -655,6 +686,12 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op, } /* end block */ done: + if(parent) { + HDassert(ret_value < 0); + if(parent != hdr && H5AC_unpin_entry(parent) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + } /* end if */ + FUNC_LEAVE_NOAPI(ret_value) } /* H5B2_find() */ @@ -683,6 +720,7 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx, { H5B2_hdr_t *hdr; /* Pointer to the B-tree header */ H5B2_node_ptr_t curr_node_ptr; /* Node pointer info for current node */ + void *parent = NULL; /* Parent of current node */ uint16_t depth; /* Current depth of the tree */ herr_t ret_value = SUCCEED; /* Return value */ @@ -712,6 +750,10 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx, /* Current depth of the tree */ depth = hdr->depth; + /* Set initial parent, if doing swmr writes */ + if(hdr->swmr_write) + parent = hdr; + /* Check for reverse indexing and map requested index to appropriate forward index */ if(order == H5_ITER_DEC) idx = curr_node_ptr.all_nrec - (idx + 1); @@ -723,9 +765,16 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx, unsigned u; /* Local index variable */ /* Lock B-tree current node */ - if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG))) + if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, parent, &curr_node_ptr, depth, FALSE, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + /* Unpin parent if necessary */ + if(parent) { + if(parent != hdr && H5AC_unpin_entry(parent) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + parent = NULL; + } /* end if */ + /* Search for record with correct index */ for(u = 0; u < internal->nrec; u++) { /* Check if record is in child node */ @@ -734,9 +783,13 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx, next_node_ptr = internal->node_ptrs[u]; /* Unlock current node */ - if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, H5AC__NO_FLAGS_SET) < 0) + if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, (unsigned)(hdr->swmr_write ? H5AC__PIN_ENTRY_FLAG : H5AC__NO_FLAGS_SET)) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + /* Keep track of parent if necessary */ + if(hdr->swmr_write) + parent = internal; + /* Set pointer to next node to load */ curr_node_ptr = next_node_ptr; @@ -776,9 +829,13 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx, next_node_ptr = internal->node_ptrs[u]; /* Unlock current node */ - if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, H5AC__NO_FLAGS_SET) < 0) + if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, (unsigned)(hdr->swmr_write ? H5AC__PIN_ENTRY_FLAG : H5AC__NO_FLAGS_SET)) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + /* Keep track of parent if necessary */ + if(hdr->swmr_write) + parent = internal; + /* Set pointer to next node to load */ curr_node_ptr = next_node_ptr; } /* end if */ @@ -795,9 +852,16 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx, H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */ /* Lock B-tree leaf node */ - if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG))) + if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, parent, &curr_node_ptr, FALSE, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + /* Unpin parent if necessary */ + if(parent) { + if(parent != hdr && H5AC_unpin_entry(parent) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + parent = NULL; + } /* end if */ + /* Sanity check index */ HDassert(idx < leaf->nrec); @@ -816,6 +880,12 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx, } /* end block */ done: + if(parent) { + HDassert(ret_value < 0); + if(parent != hdr && H5AC_unpin_entry(parent) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + } /* end if */ + FUNC_LEAVE_NOAPI(ret_value) } /* H5B2_index() */ @@ -859,8 +929,9 @@ H5B2_remove(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_remove_t op, if(hdr->depth > 0) { hbool_t depth_decreased = FALSE; /* Flag to indicate whether the depth of the B-tree decreased */ - if(H5B2__remove_internal(hdr, dxpl_id, &depth_decreased, NULL, hdr->depth, - &(hdr->cache_info), NULL, H5B2_POS_ROOT, &hdr->root, udata, op, op_data) < 0) + if(H5B2__remove_internal(hdr, dxpl_id, &depth_decreased, NULL, NULL, + hdr->depth, &(hdr->cache_info), NULL, H5B2_POS_ROOT, &hdr->root, + udata, op, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node") /* Check for decreasing the depth of the B-tree */ @@ -878,7 +949,7 @@ H5B2_remove(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_remove_t op, } /* end for */ } /* end if */ else { - if(H5B2__remove_leaf(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, udata, op, op_data) < 0) + if(H5B2__remove_leaf(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, hdr, udata, op, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node") } /* end else */ @@ -941,8 +1012,9 @@ H5B2_remove_by_idx(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, if(hdr->depth > 0) { hbool_t depth_decreased = FALSE; /* Flag to indicate whether the depth of the B-tree decreased */ - if(H5B2__remove_internal_by_idx(hdr, dxpl_id, &depth_decreased, NULL, hdr->depth, - &(hdr->cache_info), NULL, &hdr->root, H5B2_POS_ROOT, idx, op, op_data) < 0) + if(H5B2__remove_internal_by_idx(hdr, dxpl_id, &depth_decreased, NULL, NULL, + hdr->depth, &(hdr->cache_info), NULL, &hdr->root, H5B2_POS_ROOT, + idx, op, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node") /* Check for decreasing the depth of the B-tree */ @@ -960,7 +1032,7 @@ H5B2_remove_by_idx(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, } /* end for */ } /* end if */ else { - if(H5B2__remove_leaf_by_idx(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, (unsigned)idx, op, op_data) < 0) + if(H5B2__remove_leaf_by_idx(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, hdr, (unsigned)idx, op, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node") } /* end else */ @@ -1055,11 +1127,11 @@ H5B2_neighbor(H5B2_t *bt2, hid_t dxpl_id, H5B2_compare_t range, void *udata, /* Attempt to find neighbor record in B-tree */ if(hdr->depth > 0) { - if(H5B2__neighbor_internal(hdr, dxpl_id, hdr->depth, &hdr->root, NULL, range, udata, op, op_data) < 0) + if(H5B2__neighbor_internal(hdr, dxpl_id, hdr->depth, &hdr->root, NULL, range, hdr, udata, op, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree internal node") } /* end if */ else { - if(H5B2__neighbor_leaf(hdr, dxpl_id, &hdr->root, NULL, range, udata, op, op_data) < 0) + if(H5B2__neighbor_leaf(hdr, dxpl_id, &hdr->root, NULL, range, hdr, udata, op, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree leaf node") } /* end else */ @@ -1094,6 +1166,7 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op, { H5B2_hdr_t *hdr; /* Pointer to the B-tree header */ H5B2_node_ptr_t curr_node_ptr; /* Node pointer info for current node */ + void *parent = NULL; /* Parent of current node */ H5B2_nodepos_t curr_pos; /* Position of current node */ uint16_t depth; /* Current depth of the tree */ int cmp; /* Comparison value of records */ @@ -1122,6 +1195,10 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op, /* Current depth of the tree */ depth = hdr->depth; + /* Set initial parent, if doing swmr writes */ + if(hdr->swmr_write) + parent = hdr; + /* Walk down B-tree to find record or leaf node where record is located */ cmp = -1; curr_pos = H5B2_POS_ROOT; @@ -1131,16 +1208,22 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op, H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */ /* Lock B-tree current node */ - if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__NO_FLAGS_SET))) + if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, parent, &curr_node_ptr, depth, FALSE, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + /* Unpin parent if necessary */ + if(parent) { + if(parent != hdr && H5AC_unpin_entry(parent) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + parent = NULL; + } /* end if */ + /* Locate node pointer for child */ - if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, - udata, &idx, &cmp) < 0) { - /* Unlock current node */ + if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx, &cmp) < 0) { + /* Unlock current node before failing */ H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, H5AC__NO_FLAGS_SET); HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") - } + } /* end if */ if(cmp > 0) idx++; @@ -1168,9 +1251,13 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op, } /* end if */ /* Unlock current node */ - if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, H5AC__NO_FLAGS_SET) < 0) + if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, (unsigned)(hdr->swmr_write ? H5AC__PIN_ENTRY_FLAG : H5AC__NO_FLAGS_SET)) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + /* Keep track of parent if necessary */ + if(hdr->swmr_write) + parent = internal; + /* Set pointer to next node to load */ curr_node_ptr = next_node_ptr; } /* end if */ @@ -1209,15 +1296,22 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op, hbool_t changed = FALSE;/* Whether the 'modify' callback changed the record */ /* Lock B-tree leaf node */ - if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__NO_FLAGS_SET))) + if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, parent, &curr_node_ptr, FALSE, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + /* Unpin parent if necessary */ + if(parent) { + if(parent != hdr && H5AC_unpin_entry(parent) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + parent = NULL; + } /* end if */ + /* Locate record */ - if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, - udata, &idx, &cmp) < 0) { + if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx, &cmp) < 0) { + /* Unlock current node before failing */ H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr.addr, leaf, H5AC__NO_FLAGS_SET); HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") - } + } /* end if */ if(cmp != 0) { /* Unlock leaf node */ @@ -1278,6 +1372,12 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op, } /* end block */ done: + if(parent) { + HDassert(ret_value < 0); + if(parent != hdr && H5AC_unpin_entry(parent) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + } /* end if */ + FUNC_LEAVE_NOAPI(ret_value) } /* H5B2_modify() */ @@ -1451,6 +1551,59 @@ done: /*------------------------------------------------------------------------- + * Function: H5B2_depend + * + * Purpose: Make a child flush dependency between the v2 B-tree's + * header and another piece of metadata in the file. + * + * Return: SUCCEED/FAIL + * + * Programmer: Dana Robinson + * Fall 2012 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2_depend(H5B2_t *bt2, hid_t dxpl_id, H5AC_proxy_entry_t *parent) +{ + /* Local variables */ + H5B2_hdr_t *hdr = bt2->hdr; /* Header for B-tree */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(SUCCEED) + + /* + * Check arguments. + */ + HDassert(bt2); + HDassert(hdr); + HDassert(parent); + HDassert(hdr->parent == NULL || hdr->parent == parent); + + /* + * Check to see if the flush dependency between the parent + * and the v2 B-tree header has already been setup. If it hasn't, then + * set it up. + */ + if(NULL == hdr->parent) { + /* Sanity check */ + HDassert(hdr->top_proxy); + + /* Set the shared v2 B-tree header's file context for this operation */ + hdr->f = bt2->f; + + /* Add the v2 B-tree as a child of the parent (proxy) */ + if(H5AC_proxy_entry_add_child(parent, hdr->f, dxpl_id, hdr->top_proxy) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSET, FAIL, "unable to add v2 B-tree as child of proxy") + hdr->parent = parent; + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B2_depend() */ + + +/*------------------------------------------------------------------------- * Function: H5B2_patch_file * * Purpose: Patch the top-level file pointer contained in bt2 diff --git a/src/H5B2cache.c b/src/H5B2cache.c index 51ad32b..6954e6c 100644 --- a/src/H5B2cache.c +++ b/src/H5B2cache.c @@ -72,6 +72,7 @@ static void *H5B2__cache_hdr_deserialize(const void *image, size_t len, static herr_t H5B2__cache_hdr_image_len(const void *thing, size_t *image_len); static herr_t H5B2__cache_hdr_serialize(const H5F_t *f, void *image, size_t len, void *thing); +static herr_t H5B2__cache_hdr_notify(H5AC_notify_action_t action, void *thing); static herr_t H5B2__cache_hdr_free_icr(void *thing); static herr_t H5B2__cache_int_get_initial_load_size(void *udata, size_t *image_len); @@ -81,6 +82,7 @@ static void *H5B2__cache_int_deserialize(const void *image, size_t len, static herr_t H5B2__cache_int_image_len(const void *thing, size_t *image_len); static herr_t H5B2__cache_int_serialize(const H5F_t *f, void *image, size_t len, void *thing); +static herr_t H5B2__cache_int_notify(H5AC_notify_action_t action, void *thing); static herr_t H5B2__cache_int_free_icr(void *thing); static herr_t H5B2__cache_leaf_get_initial_load_size(void *udata, size_t *image_len); @@ -90,6 +92,7 @@ static void *H5B2__cache_leaf_deserialize(const void *image, size_t len, static herr_t H5B2__cache_leaf_image_len(const void *thing, size_t *image_len); static herr_t H5B2__cache_leaf_serialize(const H5F_t *f, void *image, size_t len, void *thing); +static herr_t H5B2__cache_leaf_notify(H5AC_notify_action_t action, void *thing); static herr_t H5B2__cache_leaf_free_icr(void *thing); /*********************/ @@ -109,7 +112,7 @@ const H5AC_class_t H5AC_BT2_HDR[1] = {{ H5B2__cache_hdr_image_len, /* 'image_len' callback */ NULL, /* 'pre_serialize' callback */ H5B2__cache_hdr_serialize, /* 'serialize' callback */ - NULL, /* 'notify' callback */ + H5B2__cache_hdr_notify, /* 'notify' callback */ H5B2__cache_hdr_free_icr, /* 'free_icr' callback */ NULL, /* 'fsf_size' callback */ }}; @@ -127,7 +130,7 @@ const H5AC_class_t H5AC_BT2_INT[1] = {{ H5B2__cache_int_image_len, /* 'image_len' callback */ NULL, /* 'pre_serialize' callback */ H5B2__cache_int_serialize, /* 'serialize' callback */ - NULL, /* 'notify' callback */ + H5B2__cache_int_notify, /* 'notify' callback */ H5B2__cache_int_free_icr, /* 'free_icr' callback */ NULL, /* 'fsf_size' callback */ }}; @@ -145,7 +148,7 @@ const H5AC_class_t H5AC_BT2_LEAF[1] = {{ H5B2__cache_leaf_image_len, /* 'image_len' callback */ NULL, /* 'pre_serialize' callback */ H5B2__cache_leaf_serialize, /* 'serialize' callback */ - NULL, /* 'notify' callback */ + H5B2__cache_leaf_notify, /* 'notify' callback */ H5B2__cache_leaf_free_icr, /* 'free_icr' callback */ NULL, /* 'fsf_size' callback */ }}; @@ -433,6 +436,91 @@ H5B2__cache_hdr_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED le /*------------------------------------------------------------------------- + * Function: H5B2__cache_hdr_notify + * + * Purpose: Handle cache action notifications + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * nfortne2@hdfgroup.org + * Apr 24 2012 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5B2__cache_hdr_notify(H5AC_notify_action_t action, void *_thing) +{ + H5B2_hdr_t *hdr = (H5B2_hdr_t *)_thing; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_STATIC + + /* + * Check arguments. + */ + HDassert(hdr); + + /* Check if the file was opened with SWMR-write access */ + if(hdr->swmr_write) { + switch(action) { + case H5AC_NOTIFY_ACTION_AFTER_INSERT: + case H5AC_NOTIFY_ACTION_AFTER_LOAD: + /* do nothing */ + break; + + case H5AC_NOTIFY_ACTION_AFTER_FLUSH: + /* Increment the shadow epoch, forcing new modifications to + * internal and leaf nodes to create new shadow copies */ + hdr->shadow_epoch++; + break; + + case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: + case H5AC_NOTIFY_ACTION_ENTRY_CLEANED: + case H5AC_NOTIFY_ACTION_CHILD_DIRTIED: + case H5AC_NOTIFY_ACTION_CHILD_CLEANED: + /* do nothing */ + break; + + case H5AC_NOTIFY_ACTION_BEFORE_EVICT: + /* If hdr->parent != NULL, hdr->parent is used to destroy + * the flush dependency before the header is evicted. + */ + if(hdr->parent) { + /* Sanity check */ + HDassert(hdr->top_proxy); + + /* Destroy flush dependency on object header proxy */ + if(H5AC_proxy_entry_remove_child((H5AC_proxy_entry_t *)hdr->parent, (void *)hdr->top_proxy) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency between v2 B-tree and proxy") + hdr->parent = NULL; + } /* end if */ + + /* Detach from 'top' proxy for extensible array */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_remove_child(hdr->top_proxy, hdr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency between header and v2 B-tree 'top' proxy") + /* Don't reset hdr->top_proxy here, it's destroyed when the header is freed -QAK */ + } /* end if */ + break; + + default: +#ifdef NDEBUG + HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, FAIL, "unknown action from metadata cache") +#else /* NDEBUG */ + HDassert(0 && "Unknown action?!?"); +#endif /* NDEBUG */ + } /* end switch */ + } /* end if */ + else + HDassert(NULL == hdr->parent); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B2__cache_hdr_notify() */ + + +/*------------------------------------------------------------------------- * Function: H5B2__cache_hdr_free_icr * * Purpose: Destroy/release an "in core representation" of a data @@ -573,9 +661,8 @@ H5B2__cache_int_deserialize(const void *_image, size_t H5_ATTR_UNUSED len, HDassert(udata); /* Allocate new internal node and reset cache info */ - if(NULL == (internal = H5FL_MALLOC(H5B2_internal_t))) + if(NULL == (internal = H5FL_CALLOC(H5B2_internal_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") - HDmemset(&internal->cache_info, 0, sizeof(H5AC_info_t)); /* Increment ref. count on B-tree header */ if(H5B2__hdr_incr(udata->hdr) < 0) @@ -583,6 +670,8 @@ H5B2__cache_int_deserialize(const void *_image, size_t H5_ATTR_UNUSED len, /* Share B-tree information */ internal->hdr = udata->hdr; + internal->parent = udata->parent; + internal->shadow_epoch = udata->hdr->shadow_epoch; /* Magic number */ if(HDmemcmp(image, H5B2_INT_MAGIC, (size_t)H5_SIZEOF_MAGIC)) @@ -775,6 +864,80 @@ done: /*------------------------------------------------------------------------- + * Function: H5B2__cache_int_notify + * + * Purpose: Handle cache action notifications + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * nfortne2@hdfgroup.org + * Apr 25 2012 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5B2__cache_int_notify(H5AC_notify_action_t action, void *_thing) +{ + H5B2_internal_t *internal = (H5B2_internal_t *)_thing; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + + /* + * Check arguments. + */ + HDassert(internal); + HDassert(internal->hdr); + + /* Check if the file was opened with SWMR-write access */ + if(internal->hdr->swmr_write) { + switch(action) { + case H5AC_NOTIFY_ACTION_AFTER_INSERT: + case H5AC_NOTIFY_ACTION_AFTER_LOAD: + /* Create flush dependency on parent */ + if(H5B2__create_flush_depend((H5AC_info_t *)internal->parent, (H5AC_info_t *)internal) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency") + break; + + case H5AC_NOTIFY_ACTION_AFTER_FLUSH: + case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: + case H5AC_NOTIFY_ACTION_ENTRY_CLEANED: + case H5AC_NOTIFY_ACTION_CHILD_DIRTIED: + case H5AC_NOTIFY_ACTION_CHILD_CLEANED: + /* do nothing */ + break; + + case H5AC_NOTIFY_ACTION_BEFORE_EVICT: + /* Destroy flush dependency on parent */ + if(H5B2__destroy_flush_depend((H5AC_info_t *)internal->parent, (H5AC_info_t *)internal) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency") + + /* Detach from 'top' proxy for v2 B-tree */ + if(internal->top_proxy) { + if(H5AC_proxy_entry_remove_child(internal->top_proxy, internal) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency between internal node and v2 B-tree 'top' proxy") + internal->top_proxy = NULL; + } /* end if */ + break; + + default: +#ifdef NDEBUG + HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, FAIL, "unknown action from metadata cache") +#else /* NDEBUG */ + HDassert(0 && "Unknown action?!?"); +#endif /* NDEBUG */ + } /* end switch */ + } /* end if */ + else + HDassert(NULL == internal->top_proxy); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B2__cache_int_notify() */ + + +/*------------------------------------------------------------------------- * Function: H5B2__cache_int_free_icr * * Purpose: Destroy/release an "in core representation" of a data @@ -915,9 +1078,8 @@ H5B2__cache_leaf_deserialize(const void *_image, size_t H5_ATTR_UNUSED len, HDassert(udata); /* Allocate new leaf node and reset cache info */ - if(NULL == (leaf = H5FL_MALLOC(H5B2_leaf_t))) + if(NULL == (leaf = H5FL_CALLOC(H5B2_leaf_t))) HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "memory allocation failed") - HDmemset(&leaf->cache_info, 0, sizeof(H5AC_info_t)); /* Increment ref. count on B-tree header */ if(H5B2__hdr_incr(udata->hdr) < 0) @@ -925,6 +1087,8 @@ H5B2__cache_leaf_deserialize(const void *_image, size_t H5_ATTR_UNUSED len, /* Share B-tree header information */ leaf->hdr = udata->hdr; + leaf->parent = udata->parent; + leaf->shadow_epoch = udata->hdr->shadow_epoch; /* Magic number */ if(HDmemcmp(image, H5B2_LEAF_MAGIC, (size_t)H5_SIZEOF_MAGIC)) @@ -1027,7 +1191,7 @@ H5B2__cache_leaf_image_len(const void *_thing, size_t *image_len) *------------------------------------------------------------------------- */ static herr_t -H5B2__cache_leaf_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len, +H5B2__cache_leaf_serialize(const H5F_t H5_ATTR_UNUSED *f, void *_image, size_t H5_ATTR_UNUSED len, void *_thing) { H5B2_leaf_t *leaf = (H5B2_leaf_t *)_thing; /* Pointer to the B-tree leaf node */ @@ -1086,6 +1250,80 @@ done: /*------------------------------------------------------------------------- + * Function: H5B2__cache_leaf_notify + * + * Purpose: Handle cache action notifications + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * nfortne2@hdfgroup.org + * Apr 25 2012 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5B2__cache_leaf_notify(H5AC_notify_action_t action, void *_thing) +{ + H5B2_leaf_t *leaf = (H5B2_leaf_t *)_thing; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI_NOINIT + + /* + * Check arguments. + */ + HDassert(leaf); + HDassert(leaf->hdr); + + /* Check if the file was opened with SWMR-write access */ + if(leaf->hdr->swmr_write) { + switch(action) { + case H5AC_NOTIFY_ACTION_AFTER_INSERT: + case H5AC_NOTIFY_ACTION_AFTER_LOAD: + /* Create flush dependency on parent */ + if(H5B2__create_flush_depend((H5AC_info_t *)leaf->parent, (H5AC_info_t *)leaf) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency") + break; + + case H5AC_NOTIFY_ACTION_AFTER_FLUSH: + case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: + case H5AC_NOTIFY_ACTION_ENTRY_CLEANED: + case H5AC_NOTIFY_ACTION_CHILD_DIRTIED: + case H5AC_NOTIFY_ACTION_CHILD_CLEANED: + /* do nothing */ + break; + + case H5AC_NOTIFY_ACTION_BEFORE_EVICT: + /* Destroy flush dependency on parent */ + if(H5B2__destroy_flush_depend((H5AC_info_t *)leaf->parent, (H5AC_info_t *)leaf) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency") + + /* Detach from 'top' proxy for v2 B-tree */ + if(leaf->top_proxy) { + if(H5AC_proxy_entry_remove_child(leaf->top_proxy, leaf) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency between leaf node and v2 B-tree 'top' proxy") + leaf->top_proxy = NULL; + } /* end if */ + break; + + default: +#ifdef NDEBUG + HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, FAIL, "unknown action from metadata cache") +#else /* NDEBUG */ + HDassert(0 && "Unknown action?!?"); +#endif /* NDEBUG */ + } /* end switch */ + } /* end if */ + else + HDassert(NULL == leaf->top_proxy); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B2__cache_leaf_notify() */ + + +/*------------------------------------------------------------------------- * Function: H5B2__cache_leaf_free_icr * * Purpose: Destroy/release an "in core representation" of a data diff --git a/src/H5B2dbg.c b/src/H5B2dbg.c index ddff9e9..19ca89a 100644 --- a/src/H5B2dbg.c +++ b/src/H5B2dbg.c @@ -188,6 +188,7 @@ H5B2__int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, { H5B2_hdr_t *hdr = NULL; /* B-tree header */ H5B2_internal_t *internal = NULL; /* B-tree internal node */ + H5B2_node_ptr_t node_ptr; /* Fake node pointer for protect */ unsigned u; /* Local index variable */ char temp_str[128]; /* Temporary string, for formatting */ herr_t ret_value=SUCCEED; /* Return value */ @@ -217,9 +218,10 @@ H5B2__int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, /* * Load the B-tree internal node */ - H5_CHECK_OVERFLOW(nrec, unsigned, uint16_t); H5_CHECK_OVERFLOW(depth, unsigned, uint16_t); - if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, addr, (uint16_t)nrec, (uint16_t)depth, H5AC__READ_ONLY_FLAG))) + node_ptr.addr = addr; + H5_CHECKED_ASSIGN(node_ptr.node_nrec, unsigned, nrec, uint16_t) + if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, NULL, &node_ptr, (uint16_t)depth, FALSE, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree internal node") /* Print opening message */ @@ -298,6 +300,7 @@ H5B2__leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent { H5B2_hdr_t *hdr = NULL; /* B-tree header */ H5B2_leaf_t *leaf = NULL; /* B-tree leaf node */ + H5B2_node_ptr_t node_ptr; /* Fake node pointer for protect */ unsigned u; /* Local index variable */ char temp_str[128]; /* Temporary string, for formatting */ herr_t ret_value = SUCCEED; /* Return value */ @@ -328,7 +331,9 @@ H5B2__leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent * Load the B-tree leaf node */ H5_CHECK_OVERFLOW(nrec, unsigned, uint16_t); - if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, addr, (uint16_t)nrec, H5AC__READ_ONLY_FLAG))) + node_ptr.addr = addr; + H5_CHECKED_ASSIGN(node_ptr.node_nrec, unsigned, nrec, uint16_t) + if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, NULL, &node_ptr, FALSE, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Print opening message */ diff --git a/src/H5B2hdr.c b/src/H5B2hdr.c index 49ffb5b..251a33d 100644 --- a/src/H5B2hdr.c +++ b/src/H5B2hdr.c @@ -133,10 +133,6 @@ H5B2__hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam, void *ctx_udata, HDassert(cparam->split_percent > 0 && cparam->split_percent <= 100); HDassert(cparam->merge_percent < (cparam->split_percent / 2)); - /* Initialize basic information */ - hdr->rc = 0; - hdr->pending_delete = FALSE; - /* Assign dynamic information */ hdr->depth = depth; @@ -207,6 +203,13 @@ H5B2__hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam, void *ctx_udata, } /* end for */ } /* end if */ + /* Determine if we are doing SWMR writes. Only enable for data chunks for now. */ + hdr->swmr_write = (H5F_INTENT(hdr->f) & H5F_ACC_SWMR_WRITE) > 0 + && (hdr->cls->id == H5B2_CDSET_ID || hdr->cls->id == H5B2_CDSET_FILT_ID); + + /* Reset the shadow epoch */ + hdr->shadow_epoch = 0; + /* Create the callback context, if the callback exists */ if(hdr->cls->crt_context) if(NULL == (hdr->cb_ctx = (*hdr->cls->crt_context)(ctx_udata))) @@ -285,6 +288,7 @@ H5B2__hdr_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam, void *ctx_udata) { H5B2_hdr_t *hdr = NULL; /* The new v2 B-tree header information */ + hbool_t inserted = FALSE; /* Whether the header was inserted into cache */ haddr_t ret_value = HADDR_UNDEF; /* Return value */ FUNC_ENTER_PACKAGE @@ -307,17 +311,40 @@ H5B2__hdr_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam, if(HADDR_UNDEF == (hdr->addr = H5MF_alloc(f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)hdr->hdr_size))) HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, HADDR_UNDEF, "file allocation failed for B-tree header") + /* Create 'top' proxy for extensible array entries */ + if(hdr->swmr_write) + if(NULL == (hdr->top_proxy = H5AC_proxy_entry_create())) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCREATE, HADDR_UNDEF, "can't create v2 B-tree proxy") + /* Cache the new B-tree node */ if(H5AC_insert_entry(f, dxpl_id, H5AC_BT2_HDR, hdr->addr, hdr, H5AC__NO_FLAGS_SET) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, HADDR_UNDEF, "can't add B-tree header to cache") + inserted = TRUE; + + /* Add header as child of 'top' proxy */ + if(hdr->top_proxy) + if(H5AC_proxy_entry_add_child(hdr->top_proxy, f, dxpl_id, hdr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSET, HADDR_UNDEF, "unable to add v2 B-tree header as child of array proxy") /* Set address of v2 B-tree header to return */ ret_value = hdr->addr; done: - if(!H5F_addr_defined(ret_value) && hdr) - if(H5B2__hdr_free(hdr) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTRELEASE, HADDR_UNDEF, "unable to release v2 B-tree header") + if(!H5F_addr_defined(ret_value)) + if(hdr) { + /* Remove from cache, if inserted */ + if(inserted) + if(H5AC_remove_entry(hdr) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTREMOVE, HADDR_UNDEF, "unable to remove v2 B-tree header from cache") + + /* Release header's disk space */ + if(H5F_addr_defined(hdr->addr) && H5MF_xfree(f, H5FD_MEM_BTREE, dxpl_id, hdr->addr, (hsize_t)hdr->hdr_size) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, HADDR_UNDEF, "unable to free v2 B-tree header") + + /* Destroy header */ + if(H5B2__hdr_free(hdr) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTRELEASE, HADDR_UNDEF, "unable to release v2 B-tree header") + } /* end if */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__hdr_create() */ @@ -503,6 +530,7 @@ H5B2__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t hdr_addr, void *ctx_udata, unsigned flags) { H5B2_hdr_cache_ud_t udata; /* User data for cache callbacks */ + H5B2_hdr_t *hdr = NULL; /* v2 B-tree header */ H5B2_hdr_t *ret_value = NULL; /* Return value */ FUNC_ENTER_PACKAGE @@ -520,11 +548,32 @@ H5B2__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t hdr_addr, void *ctx_udata, udata.ctx_udata = ctx_udata; /* Protect the header */ - if(NULL == (ret_value = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &udata, flags))) + if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &udata, flags))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to load v2 B-tree header, address = %llu", (unsigned long long)hdr_addr) - ret_value->f = f; /* (Must be set again here, in case the header was already in the cache -QAK) */ + hdr->f = f; /* (Must be set again here, in case the header was already in the cache -QAK) */ + + /* Create top proxy, if it doesn't exist */ + if(hdr->swmr_write && NULL == hdr->top_proxy) { + /* Create 'top' proxy for v2 B-tree entries */ + if(NULL == (hdr->top_proxy = H5AC_proxy_entry_create())) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCREATE, NULL, "can't create v2 B-tree proxy") + + /* Add header as child of 'top' proxy */ + if(H5AC_proxy_entry_add_child(hdr->top_proxy, f, dxpl_id, hdr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSET, NULL, "unable to add v2 B-tree header as child of proxy") + } /* end if */ + + /* Set return value */ + ret_value = hdr; done: + /* Clean up on error */ + if(!ret_value) { + /* Release the header, if it was protected */ + if(hdr && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_HDR, hdr_addr, hdr, H5AC__NO_FLAGS_SET) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, NULL, "unable to unprotect v2 B-tree header, address = %llu", (unsigned long long)hdr_addr) + } /* end if */ + FUNC_LEAVE_NOAPI(ret_value) } /* end H5B2__hdr_protect() */ @@ -623,6 +672,13 @@ H5B2__hdr_free(H5B2_hdr_t *hdr) if(hdr->max_native_rec) hdr->max_native_rec = H5MM_xfree(hdr->max_native_rec); + /* Destroy the 'top' proxy */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_dest(hdr->top_proxy) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTRELEASE, FAIL, "unable to destroy v2 B-tree 'top' proxy") + hdr->top_proxy = NULL; + } /* end if */ + /* Free B-tree header info */ hdr = H5FL_FREE(H5B2_hdr_t, hdr); @@ -671,7 +727,7 @@ H5B2__hdr_delete(H5B2_hdr_t *hdr, hid_t dxpl_id) /* Delete all nodes in B-tree */ if(H5F_addr_defined(hdr->root.addr)) - if(H5B2__delete_node(hdr, dxpl_id, hdr->depth, &hdr->root, hdr->remove_op, hdr->remove_op_data) < 0) + if(H5B2__delete_node(hdr, dxpl_id, hdr->depth, &hdr->root, hdr, hdr->remove_op, hdr->remove_op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to delete B-tree nodes") /* Indicate that the heap header should be deleted & file space freed */ diff --git a/src/H5B2int.c b/src/H5B2int.c index a8fa59f..47100fd 100644 --- a/src/H5B2int.c +++ b/src/H5B2int.c @@ -37,16 +37,13 @@ #include "H5private.h" /* Generic Functions */ #include "H5B2pkg.h" /* v2 B-trees */ #include "H5Eprivate.h" /* Error handling */ -#include "H5MFprivate.h" /* File memory management */ -#include "H5MMprivate.h" /* Memory management */ #include "H5VMprivate.h" /* Vectors and arrays */ + /****************/ /* Local Macros */ /****************/ -/* Uncomment this macro to enable extra sanity checking */ -/* #define H5B2_DEBUG */ /******************/ /* Local Typedefs */ @@ -61,44 +58,15 @@ /********************/ /* Local Prototypes */ /********************/ +static herr_t H5B2__update_child_flush_depends(H5B2_hdr_t *hdr, hid_t dxpl_id, + unsigned depth, const H5B2_node_ptr_t *node_ptrs, unsigned start_idx, + unsigned end_idx, void *old_parent, void *new_parent); -/* Helper functions */ -static herr_t H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr, - H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx); -static herr_t H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - H5B2_internal_t *internal, unsigned idx); -static herr_t H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx); -static herr_t H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr, - H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx); -static herr_t H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr, - H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx); -static herr_t H5B2__swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx, - void *swap_loc); -static herr_t H5B2__create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, - H5B2_node_ptr_t *node_ptr, uint16_t depth); -#ifdef H5B2_DEBUG -/* Don't label these with H5_ATTR_PURE or you'll get even more warnings... */ -static herr_t H5B2__assert_leaf(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf); -static herr_t H5B2__assert_leaf2(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf, const H5B2_leaf_t *leaf2); -static herr_t H5B2__assert_internal(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal); -static herr_t H5B2__assert_internal2(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal, const H5B2_internal_t *internal2); -#endif /* H5B2_DEBUG */ /*********************/ /* Package Variables */ /*********************/ -/* Declare a free list to manage the H5B2_internal_t struct */ -H5FL_DEFINE(H5B2_internal_t); - -/* Declare a free list to manage the H5B2_leaf_t struct */ -H5FL_DEFINE(H5B2_leaf_t); - /*****************************/ /* Library Private Variables */ @@ -137,7 +105,7 @@ H5FL_SEQ_EXTERN(H5B2_node_info_t); */ herr_t H5B2__locate_record(const H5B2_class_t *type, unsigned nrec, size_t *rec_off, - const uint8_t *native, const void *udata, unsigned *idx, int *cmp) + const uint8_t *native, const void *udata, unsigned *idx, int *cmp) { unsigned lo = 0, hi; /* Low & high index values */ unsigned my_idx = 0; /* Final index value */ @@ -179,7 +147,7 @@ done: * *------------------------------------------------------------------------- */ -static herr_t +herr_t H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr, H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx) @@ -195,7 +163,7 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_STATIC + FUNC_ENTER_PACKAGE /* Check arguments. */ HDassert(hdr); @@ -214,19 +182,20 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Create new internal node */ internal->node_ptrs[idx + 1].all_nrec = internal->node_ptrs[idx + 1].node_nrec = 0; - if(H5B2__create_internal(hdr, dxpl_id, &(internal->node_ptrs[idx + 1]), (uint16_t)(depth - 1)) < 0) + if(H5B2__create_internal(hdr, dxpl_id, internal, &(internal->node_ptrs[idx + 1]), (uint16_t)(depth - 1)) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create new internal node") /* Setup information for unlocking child nodes */ child_class = H5AC_BT2_INT; - left_addr = internal->node_ptrs[idx].addr; - right_addr = internal->node_ptrs[idx + 1].addr; /* Protect both leaves */ - if(NULL == (left_int = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET))) + /* (Shadow left node if doing SWMR writes) */ + if(NULL == (left_int = H5B2__protect_internal(hdr, dxpl_id, internal, &internal->node_ptrs[idx], (uint16_t)(depth - 1), hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") - if(NULL == (right_int = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET))) + left_addr = internal->node_ptrs[idx].addr; + if(NULL == (right_int = H5B2__protect_internal(hdr, dxpl_id, internal, &internal->node_ptrs[idx + 1], (uint16_t)(depth - 1), FALSE, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") + right_addr = internal->node_ptrs[idx + 1].addr; /* More setup for child nodes */ left_child = left_int; @@ -243,19 +212,20 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Create new leaf node */ internal->node_ptrs[idx + 1].all_nrec = internal->node_ptrs[idx + 1].node_nrec = 0; - if(H5B2__create_leaf(hdr, dxpl_id, &(internal->node_ptrs[idx + 1])) < 0) + if(H5B2__create_leaf(hdr, dxpl_id, internal, &(internal->node_ptrs[idx + 1])) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create new leaf node") /* Setup information for unlocking child nodes */ child_class = H5AC_BT2_LEAF; - left_addr = internal->node_ptrs[idx].addr; - right_addr = internal->node_ptrs[idx + 1].addr; /* Protect both leaves */ - if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET))) + /* (Shadow the left node if doing SWMR writes) */ + if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, internal, &internal->node_ptrs[idx], hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET))) + left_addr = internal->node_ptrs[idx].addr; + if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, internal, &internal->node_ptrs[idx + 1], FALSE, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + right_addr = internal->node_ptrs[idx + 1].addr; /* More setup for child nodes */ left_child = left_leaf; @@ -295,7 +265,7 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Determine total number of records in new child nodes */ if(depth > 1) { - unsigned u; /* Local index variable */ + unsigned u; /* Local index variable */ hsize_t new_left_all_nrec; /* New total number of records in left child */ hsize_t new_right_all_nrec; /* New total number of records in right child */ @@ -329,6 +299,12 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, if(parent_cache_info_flags_ptr) *parent_cache_info_flags_ptr |= H5AC__DIRTIED_FLAG; + /* Update flush dependencies for grandchildren, if using SWMR */ + if(hdr->swmr_write && depth > 1) + if(H5B2__update_child_flush_depends(hdr, dxpl_id, depth, right_node_ptrs, + 0, (unsigned)(*right_nrec + 1), left_child, right_child) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update child nodes to new parent") + #ifdef H5B2_DEBUG H5B2__assert_internal((hsize_t)0, hdr, internal); if(depth > 1) { @@ -407,11 +383,11 @@ H5B2__split_root(H5B2_hdr_t *hdr, hid_t dxpl_id) /* Create new internal node to use as root */ hdr->root.node_nrec = 0; - if(H5B2__create_internal(hdr, dxpl_id, &(hdr->root), hdr->depth) < 0) + if(H5B2__create_internal(hdr, dxpl_id, hdr, &(hdr->root), hdr->depth) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create new internal node") /* Protect new root node */ - if(NULL == (new_root = H5B2__protect_internal(hdr, dxpl_id, hdr->root.addr, hdr->root.node_nrec, hdr->depth, H5AC__NO_FLAGS_SET))) + if(NULL == (new_root = H5B2__protect_internal(hdr, dxpl_id, hdr, &hdr->root, hdr->depth, FALSE, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* Set first node pointer in root node to old root node pointer info */ @@ -444,7 +420,7 @@ done: * *------------------------------------------------------------------------- */ -static herr_t +herr_t H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, H5B2_internal_t *internal, unsigned idx) { @@ -458,7 +434,7 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_STATIC + FUNC_ENTER_PACKAGE /* Check arguments. */ HDassert(hdr); @@ -471,14 +447,15 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Setup information for unlocking child nodes */ child_class = H5AC_BT2_INT; - left_addr = internal->node_ptrs[idx].addr; - right_addr = internal->node_ptrs[idx + 1].addr; /* Lock left & right B-tree child nodes */ - if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET))) + /* (Shadow both nodes if doing SWMR writes) */ + if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, internal, &internal->node_ptrs[idx], (uint16_t)(depth - 1), hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET))) + left_addr = internal->node_ptrs[idx].addr; + if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, internal, &internal->node_ptrs[idx + 1], (uint16_t)(depth - 1), hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + right_addr = internal->node_ptrs[idx + 1].addr; /* More setup for child nodes */ left_child = left_internal; @@ -496,14 +473,15 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Setup information for unlocking child nodes */ child_class = H5AC_BT2_LEAF; - left_addr = internal->node_ptrs[idx].addr; - right_addr = internal->node_ptrs[idx + 1].addr; /* Lock left & right B-tree child nodes */ - if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET))) + /* (Shadow both nodes if doing SWMR writes) */ + if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, internal, &internal->node_ptrs[idx], hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET))) + left_addr = internal->node_ptrs[idx].addr; + if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, internal, &internal->node_ptrs[idx + 1], hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + right_addr = internal->node_ptrs[idx + 1].addr; /* More setup for child nodes */ left_child = left_leaf; @@ -549,7 +527,7 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Handle node pointers, if we have an internal node */ if(depth > 1) { hsize_t moved_nrec = move_nrec; /* Total number of records moved, for internal redistrib */ - unsigned u; /* Local index variable */ + unsigned u; /* Local index variable */ /* Count the number of records being moved */ for(u = 0; u < move_nrec; u++) @@ -564,6 +542,12 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, HDmemmove(&(right_node_ptrs[0]), &(right_node_ptrs[move_nrec]), sizeof(H5B2_node_ptr_t) * (new_right_nrec + (unsigned)1)); } /* end if */ + /* Update flush dependencies for grandchildren, if using SWMR */ + if(hdr->swmr_write && depth > 1) + if(H5B2__update_child_flush_depends(hdr, dxpl_id, depth, left_node_ptrs, + (unsigned)(*left_nrec + 1), (unsigned)(*left_nrec + move_nrec + 1), right_child, left_child) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update child nodes to new parent") + /* Update number of records in child nodes */ *left_nrec = (uint16_t)(*left_nrec + move_nrec); *right_nrec = new_right_nrec; @@ -599,7 +583,7 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Handle node pointers, if we have an internal node */ if(depth > 1) { hsize_t moved_nrec = move_nrec; /* Total number of records moved, for internal redistrib */ - unsigned u; /* Local index variable */ + unsigned u; /* Local index variable */ /* Slide node pointers in right node up */ HDmemmove(&(right_node_ptrs[move_nrec]), &(right_node_ptrs[0]), sizeof(H5B2_node_ptr_t) * (size_t)(*right_nrec + 1)); @@ -614,6 +598,12 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, H5_CHECKED_ASSIGN(right_moved_nrec, hssize_t, moved_nrec, hsize_t) } /* end if */ + /* Update flush dependencies for grandchildren, if using SWMR */ + if(hdr->swmr_write && depth > 1) + if(H5B2__update_child_flush_depends(hdr, dxpl_id, depth, right_node_ptrs, + 0, (unsigned)move_nrec, left_child, right_child) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update child nodes to new parent") + /* Update number of records in child nodes */ *left_nrec = new_left_nrec; *right_nrec = (uint16_t)(*right_nrec + move_nrec); @@ -674,7 +664,7 @@ done: * *------------------------------------------------------------------------- */ -static herr_t +herr_t H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx) { @@ -695,7 +685,7 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, unsigned middle_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_STATIC + FUNC_ENTER_PACKAGE /* Check arguments. */ HDassert(hdr); @@ -710,17 +700,18 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Setup information for unlocking child nodes */ child_class = H5AC_BT2_INT; - left_addr = internal->node_ptrs[idx - 1].addr; - middle_addr = internal->node_ptrs[idx].addr; - right_addr = internal->node_ptrs[idx + 1].addr; /* Lock B-tree child nodes */ - if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET))) + /* (Shadow all nodes if doing SWMR writes) */ + if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, internal, &internal->node_ptrs[idx - 1], (uint16_t)(depth - 1), hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") - if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET))) + left_addr = internal->node_ptrs[idx - 1].addr; + if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, internal, &internal->node_ptrs[idx], (uint16_t)(depth - 1), hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") - if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET))) + middle_addr = internal->node_ptrs[idx].addr; + if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, internal, &internal->node_ptrs[idx + 1], (uint16_t)(depth - 1), hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") + right_addr = internal->node_ptrs[idx + 1].addr; /* More setup for child nodes */ left_child = left_internal; @@ -743,17 +734,18 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Setup information for unlocking child nodes */ child_class = H5AC_BT2_LEAF; - left_addr = internal->node_ptrs[idx - 1].addr; - middle_addr = internal->node_ptrs[idx].addr; - right_addr = internal->node_ptrs[idx + 1].addr; /* Lock B-tree child nodes */ - if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC__NO_FLAGS_SET))) + /* (Shadow all nodes if doing SWMR writes) */ + if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, internal, &internal->node_ptrs[idx - 1], hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET))) + left_addr = internal->node_ptrs[idx - 1].addr; + if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, internal, &internal->node_ptrs[idx], hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET))) + middle_addr = internal->node_ptrs[idx].addr; + if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, internal, &internal->node_ptrs[idx + 1], hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + right_addr = internal->node_ptrs[idx + 1].addr; /* More setup for child nodes */ left_child = left_leaf; @@ -802,7 +794,7 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Move node pointers also if this is an internal node */ if(depth > 1) { - hsize_t moved_nrec; /* Total number of records moved, for internal redistrib */ + hsize_t moved_nrec; /* Total number of records moved, for internal redistrib */ unsigned move_nptrs; /* Number of node pointers to move */ unsigned u; /* Local index variable */ @@ -820,6 +812,12 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, HDmemmove(&(middle_node_ptrs[0]), &(middle_node_ptrs[move_nptrs]), sizeof(H5B2_node_ptr_t) * ((*middle_nrec - move_nptrs) + 1)); } /* end if */ + /* Update flush dependencies for grandchildren, if using SWMR */ + if(hdr->swmr_write && depth > 1) + if(H5B2__update_child_flush_depends(hdr, dxpl_id, depth, left_node_ptrs, + (unsigned)(*left_nrec + 1), (unsigned)(*left_nrec + moved_middle_nrec + 1), middle_child, left_child) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update child nodes to new parent") + /* Update the current number of records in middle node */ curr_middle_nrec = (uint16_t)(curr_middle_nrec - moved_middle_nrec); @@ -847,7 +845,7 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Move node pointers also if this is an internal node */ if(depth > 1) { - hsize_t moved_nrec; /* Total number of records moved, for internal redistrib */ + hsize_t moved_nrec; /* Total number of records moved, for internal redistrib */ unsigned u; /* Local index variable */ /* Slide the node pointers in right node up */ @@ -863,6 +861,12 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, middle_moved_nrec -= (hssize_t)(moved_nrec + right_nrec_move); } /* end if */ + /* Update flush dependencies for grandchildren, if using SWMR */ + if(hdr->swmr_write && depth > 1) + if(H5B2__update_child_flush_depends(hdr, dxpl_id, depth, right_node_ptrs, + 0, (unsigned)right_nrec_move, middle_child, right_child) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update child nodes to new parent") + /* Update the current number of records in middle node */ curr_middle_nrec = (uint16_t)(curr_middle_nrec - right_nrec_move); @@ -890,7 +894,7 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Move node pointers also if this is an internal node */ if(depth > 1) { - hsize_t moved_nrec; /* Total number of records moved, for internal redistrib */ + hsize_t moved_nrec; /* Total number of records moved, for internal redistrib */ unsigned u; /* Local index variable */ /* Slide the node pointers in middle node up */ @@ -906,6 +910,12 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, middle_moved_nrec += (hssize_t)(moved_nrec + left_nrec_move); } /* end if */ + /* Update flush dependencies for grandchildren, if using SWMR */ + if(hdr->swmr_write && depth > 1) + if(H5B2__update_child_flush_depends(hdr, dxpl_id, depth, middle_node_ptrs, + 0, (unsigned)left_nrec_move, left_child, middle_child) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update child nodes to new parent") + /* Update the current number of records in middle node */ curr_middle_nrec = (uint16_t)(curr_middle_nrec + left_nrec_move); @@ -932,7 +942,7 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Move node pointers also if this is an internal node */ if(depth > 1) { - hsize_t moved_nrec; /* Total number of records moved, for internal redistrib */ + hsize_t moved_nrec; /* Total number of records moved, for internal redistrib */ unsigned u; /* Local index variable */ /* Move right node pointers into middle node */ @@ -948,6 +958,12 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, HDmemmove(&(right_node_ptrs[0]), &(right_node_ptrs[right_nrec_move]), sizeof(H5B2_node_ptr_t) * (size_t)(new_right_nrec + 1)); } /* end if */ + /* Update flush dependencies for grandchildren, if using SWMR */ + if(hdr->swmr_write && depth > 1) + if(H5B2__update_child_flush_depends(hdr, dxpl_id, depth, middle_node_ptrs, + (unsigned)(curr_middle_nrec + 1), (unsigned)(curr_middle_nrec + right_nrec_move + 1), right_child, middle_child) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update child nodes to new parent") + /* Mark nodes as dirty */ middle_child_flags |= H5AC__DIRTIED_FLAG; right_child_flags |= H5AC__DIRTIED_FLAG; @@ -979,46 +995,6 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Mark parent as dirty */ *internal_flags_ptr |= H5AC__DIRTIED_FLAG; -#ifdef QAK -{ - unsigned u; - - HDfprintf(stderr, "%s: Internal records:\n", FUNC); - for(u = 0; u < internal->nrec; u++) { - HDfprintf(stderr, "%s: u = %u\n", FUNC, u); - (hdr->cls->debug)(stderr, hdr->f, dxpl_id, 3, 4, H5B2_INT_NREC(internal, hdr, u), NULL); - } /* end for */ - - HDfprintf(stderr, "%s: Left Child records:\n", FUNC); - for(u = 0; u < *left_nrec; u++) { - HDfprintf(stderr, "%s: u = %u\n", FUNC, u); - (hdr->cls->debug)(stderr, hdr->f, dxpl_id, 3, 4, H5B2_NAT_NREC(left_native, hdr, u), NULL); - } /* end for */ - - HDfprintf(stderr, "%s: Middle Child records:\n", FUNC); - for(u = 0; u < *middle_nrec; u++) { - HDfprintf(stderr, "%s: u = %u\n", FUNC, u); - (hdr->cls->debug)(stderr, hdr->f, dxpl_id, 3, 4, H5B2_NAT_NREC(middle_native, hdr, u), NULL); - } /* end for */ - - HDfprintf(stderr, "%s: Right Child records:\n", FUNC); - for(u = 0; u < *right_nrec; u++) { - HDfprintf(stderr, "%s: u = %u\n", FUNC, u); - (hdr->cls->debug)(stderr, hdr->f, dxpl_id, 3, 4, H5B2_NAT_NREC(right_native, hdr, u), NULL); - } /* end for */ - - for(u = 0; u < internal->nrec + 1; u++) - HDfprintf(stderr, "%s: internal->node_ptrs[%u] = (%Hu/%u/%a)\n", FUNC, u, internal->node_ptrs[u].all_nrec, internal->node_ptrs[u].node_nrec, internal->node_ptrs[u].addr); - if(depth > 1) { - for(u = 0; u < *left_nrec + 1; u++) - HDfprintf(stderr, "%s: left_node_ptr[%u] = (%Hu/%u/%a)\n", FUNC, u, left_node_ptrs[u].all_nrec, left_node_ptrs[u].node_nrec, left_node_ptrs[u].addr); - for(u = 0; u < *middle_nrec + 1; u++) - HDfprintf(stderr, "%s: middle_node_ptr[%u] = (%Hu/%u/%a)\n", FUNC, u, middle_node_ptrs[u].all_nrec, middle_node_ptrs[u].node_nrec, middle_node_ptrs[u].addr); - for(u = 0; u < *right_nrec + 1; u++) - HDfprintf(stderr, "%s: right_node_ptr[%u] = (%Hu/%u/%a)\n", FUNC, u, right_node_ptrs[u].all_nrec, right_node_ptrs[u].node_nrec, right_node_ptrs[u].addr); - } /* end if */ -} -#endif /* QAK */ #ifdef H5B2_DEBUG H5B2__assert_internal((hsize_t)0, hdr, internal); if(depth > 1) { @@ -1062,7 +1038,7 @@ done: * *------------------------------------------------------------------------- */ -static herr_t +herr_t H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr, H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx) @@ -1076,7 +1052,7 @@ H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_STATIC + FUNC_ENTER_PACKAGE /* Check arguments. */ HDassert(hdr); @@ -1091,14 +1067,15 @@ H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Setup information for unlocking child nodes */ child_class = H5AC_BT2_INT; - left_addr = internal->node_ptrs[idx].addr; - right_addr = internal->node_ptrs[idx + 1].addr; /* Lock left & right B-tree child nodes */ - if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET))) + /* (Shadow the left node if doing SWMR writes) */ + if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, internal, &internal->node_ptrs[idx], (uint16_t)(depth - 1), hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") - if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET))) + left_addr = internal->node_ptrs[idx].addr; + if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, internal, &internal->node_ptrs[idx + 1], (uint16_t)(depth - 1), FALSE, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") + right_addr = internal->node_ptrs[idx + 1].addr; /* More setup for accessing child node information */ left_child = left_internal; @@ -1116,14 +1093,15 @@ H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Setup information for unlocking child nodes */ child_class = H5AC_BT2_LEAF; - left_addr = internal->node_ptrs[idx].addr; - right_addr = internal->node_ptrs[idx + 1].addr; /* Lock left & right B-tree child nodes */ - if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET))) + /* (Shadow the left node if doing SWMR writes) */ + if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, internal, &internal->node_ptrs[idx], hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET))) + left_addr = internal->node_ptrs[idx].addr; + if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, internal, &internal->node_ptrs[idx + 1], FALSE, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + right_addr = internal->node_ptrs[idx + 1].addr; /* More setup for accessing child node information */ left_child = left_leaf; @@ -1146,12 +1124,20 @@ H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, if(depth > 1) HDmemcpy(&(left_node_ptrs[*left_nrec + 1]), &(right_node_ptrs[0]), sizeof(H5B2_node_ptr_t) * (size_t)(*right_nrec + 1)); + /* Update flush dependencies for grandchildren, if using SWMR */ + if(hdr->swmr_write && depth > 1) + if(H5B2__update_child_flush_depends(hdr, dxpl_id, depth, left_node_ptrs, + (unsigned)(*left_nrec + 1), (unsigned)(*left_nrec + *right_nrec + 2), right_child, left_child) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update child nodes to new parent") + /* Update # of records in left node */ *left_nrec = (uint16_t)(*left_nrec + *right_nrec + 1); /* Mark nodes as dirty */ left_child_flags |= H5AC__DIRTIED_FLAG; - right_child_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; + right_child_flags |= H5AC__DELETED_FLAG; + if(!(hdr->swmr_write)) + right_child_flags |= H5AC__DIRTIED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; } /* end block */ /* Update # of records in child nodes */ @@ -1215,7 +1201,7 @@ done: * *------------------------------------------------------------------------- */ -static herr_t +herr_t H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr, H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx) @@ -1236,7 +1222,7 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, unsigned middle_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_STATIC + FUNC_ENTER_PACKAGE /* Check arguments. */ HDassert(hdr); @@ -1252,17 +1238,18 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Setup information for unlocking child nodes */ child_class = H5AC_BT2_INT; - left_addr = internal->node_ptrs[idx - 1].addr; - middle_addr = internal->node_ptrs[idx].addr; - right_addr = internal->node_ptrs[idx + 1].addr; /* Lock B-tree child nodes */ - if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET))) + /* (Shadow left and middle nodes if doing SWMR writes) */ + if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, internal, &internal->node_ptrs[idx - 1], (uint16_t)(depth - 1), hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") - if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET))) + left_addr = internal->node_ptrs[idx - 1].addr; + if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, internal, &internal->node_ptrs[idx], (uint16_t)(depth - 1), hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") - if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET))) + middle_addr = internal->node_ptrs[idx].addr; + if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, internal, &internal->node_ptrs[idx + 1], (uint16_t)(depth - 1), FALSE, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") + right_addr = internal->node_ptrs[idx + 1].addr; /* More setup for accessing child node information */ left_child = left_internal; @@ -1285,17 +1272,18 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Setup information for unlocking child nodes */ child_class = H5AC_BT2_LEAF; - left_addr = internal->node_ptrs[idx - 1].addr; - middle_addr = internal->node_ptrs[idx].addr; - right_addr = internal->node_ptrs[idx + 1].addr; /* Lock B-tree child nodes */ - if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC__NO_FLAGS_SET))) + /* (Shadow left and middle nodes if doing SWMR writes) */ + if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, internal, &internal->node_ptrs[idx - 1], hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET))) + left_addr = internal->node_ptrs[idx - 1].addr; + if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, internal, &internal->node_ptrs[idx], hdr->swmr_write, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET))) + middle_addr = internal->node_ptrs[idx].addr; + if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, internal, &internal->node_ptrs[idx + 1], FALSE, H5AC__NO_FLAGS_SET))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + right_addr = internal->node_ptrs[idx + 1].addr; /* More setup for accessing child node information */ left_child = left_leaf; @@ -1344,6 +1332,12 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, HDmemmove(&(middle_node_ptrs[0]), &(middle_node_ptrs[middle_nrec_move]), sizeof(H5B2_node_ptr_t) * (size_t)((unsigned)(*middle_nrec + 1) - middle_nrec_move)); } /* end if */ + /* Update flush dependencies for grandchildren, if using SWMR */ + if(hdr->swmr_write && depth > 1) + if(H5B2__update_child_flush_depends(hdr, dxpl_id, depth, left_node_ptrs, + (unsigned)(*left_nrec + 1), (unsigned)(*left_nrec + middle_nrec_move + 1), middle_child, left_child) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update child nodes to new parent") + /* Update # of records in left & middle nodes */ *left_nrec = (uint16_t)(*left_nrec + middle_nrec_move); *middle_nrec = (uint16_t)(*middle_nrec - middle_nrec_move); @@ -1366,12 +1360,20 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Copy node pointers from right node into middle node */ HDmemcpy(&(middle_node_ptrs[*middle_nrec + 1]), &(right_node_ptrs[0]), sizeof(H5B2_node_ptr_t) * (size_t)(*right_nrec + 1)); + /* Update flush dependencies for grandchildren, if using SWMR */ + if(hdr->swmr_write && depth > 1) + if(H5B2__update_child_flush_depends(hdr, dxpl_id, depth, middle_node_ptrs, + (unsigned)(*middle_nrec + 1), (unsigned)(*middle_nrec + *right_nrec + 2), right_child, middle_child) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update child nodes to new parent") + /* Update # of records in middle node */ *middle_nrec = (uint16_t)(*middle_nrec + (*right_nrec + 1)); /* Mark nodes as dirty */ middle_child_flags |= H5AC__DIRTIED_FLAG; - right_child_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; + right_child_flags |= H5AC__DELETED_FLAG; + if(!(hdr->swmr_write)) + right_child_flags |= H5AC__DIRTIED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; } /* end block */ /* Update # of records in child nodes */ @@ -1429,98 +1431,7 @@ done: /*------------------------------------------------------------------------- - * Function: H5B2__swap_leaf - * - * Purpose: Swap a record in a node with a record in a leaf node - * - * Return: Success: Non-negative - * - * Failure: Negative - * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Mar 4 2005 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5B2__swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - H5B2_internal_t *internal, unsigned *internal_flags_ptr, - unsigned idx, void *swap_loc) -{ - const H5AC_class_t *child_class; /* Pointer to child node's class info */ - haddr_t child_addr; /* Address of child node */ - void *child = NULL; /* Pointer to child node */ - uint8_t *child_native; /* Pointer to child's native records */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_STATIC - - /* Check arguments. */ - HDassert(hdr); - HDassert(internal); - HDassert(internal_flags_ptr); - HDassert(idx <= internal->nrec); - - /* Check for the kind of B-tree node to swap */ - if(depth > 1) { - H5B2_internal_t *child_internal; /* Pointer to internal node */ - - /* Setup information for unlocking child node */ - child_class = H5AC_BT2_INT; - child_addr = internal->node_ptrs[idx].addr; - - /* Lock B-tree child nodes */ - if(NULL == (child_internal = H5B2__protect_internal(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") - - /* More setup for accessing child node information */ - child = child_internal; - child_native = child_internal->int_native; - } /* end if */ - else { - H5B2_leaf_t *child_leaf; /* Pointer to leaf node */ - - /* Setup information for unlocking child nodes */ - child_class = H5AC_BT2_LEAF; - child_addr = internal->node_ptrs[idx].addr; - - /* Lock B-tree child node */ - if(NULL == (child_leaf = H5B2__protect_leaf(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - - /* More setup for accessing child node information */ - child = child_leaf; - child_native = child_leaf->leaf_native; - } /* end else */ - - /* Swap records (use disk page as temporary buffer) */ - HDmemcpy(hdr->page, H5B2_NAT_NREC(child_native, hdr, 0), hdr->cls->nrec_size); - HDmemcpy(H5B2_NAT_NREC(child_native, hdr, 0), swap_loc, hdr->cls->nrec_size); - HDmemcpy(swap_loc, hdr->page, hdr->cls->nrec_size); - - /* Mark parent as dirty */ - *internal_flags_ptr |= H5AC__DIRTIED_FLAG; - -#ifdef H5B2_DEBUG - H5B2__assert_internal((hsize_t)0, hdr, internal); - if(depth > 1) - H5B2__assert_internal(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)child); - else - H5B2__assert_leaf(hdr, (H5B2_leaf_t *)child); -#endif /* H5B2_DEBUG */ - -done: - /* Unlock child node */ - if(child && H5AC_unprotect(hdr->f, dxpl_id, child_class, child_addr, child, H5AC__DIRTIED_FLAG) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") - - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5B2__swap_leaf() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__insert_hdr + * Function: H5B2__insert * * Purpose: Adds a new record to the B-tree. * @@ -1533,7 +1444,7 @@ done: *------------------------------------------------------------------------- */ herr_t -H5B2__insert_hdr(H5B2_hdr_t *hdr, hid_t dxpl_id, void *udata) +H5B2__insert(H5B2_hdr_t *hdr, hid_t dxpl_id, void *udata) { herr_t ret_value = SUCCEED; /* Return value */ @@ -1546,7 +1457,7 @@ H5B2__insert_hdr(H5B2_hdr_t *hdr, hid_t dxpl_id, void *udata) /* Check if the root node is allocated yet */ if(!H5F_addr_defined(hdr->root.addr)) { /* Create root node as leaf node in B-tree */ - if(H5B2__create_leaf(hdr, dxpl_id, &(hdr->root)) < 0) + if(H5B2__create_leaf(hdr, dxpl_id, hdr, &(hdr->root)) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create root node") } /* end if */ /* Check if we need to split the root node (equiv. to a 1->2 node split) */ @@ -1558,11 +1469,11 @@ H5B2__insert_hdr(H5B2_hdr_t *hdr, hid_t dxpl_id, void *udata) /* Attempt to insert record into B-tree */ if(hdr->depth > 0) { - if(H5B2__insert_internal(hdr, dxpl_id, hdr->depth, NULL, &hdr->root, H5B2_POS_ROOT, udata) < 0) + if(H5B2__insert_internal(hdr, dxpl_id, hdr->depth, NULL, &hdr->root, H5B2_POS_ROOT, hdr, udata) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree internal node") } /* end if */ else { - if(H5B2__insert_leaf(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, udata) < 0) + if(H5B2__insert_leaf(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, hdr, udata) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree leaf node") } /* end else */ @@ -1572,834 +1483,7 @@ H5B2__insert_hdr(H5B2_hdr_t *hdr, hid_t dxpl_id, void *udata) done: FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__insert_hdr() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__insert_leaf - * - * Purpose: Adds a new record to a B-tree leaf node. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Mar 3 2005 - * - *------------------------------------------------------------------------- - */ -herr_t -H5B2__insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr, - H5B2_nodepos_t curr_pos, void *udata) -{ - H5B2_leaf_t *leaf; /* Pointer to leaf node */ - int cmp; /* Comparison value of records */ - unsigned idx; /* Location of record which matches key */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check arguments. */ - HDassert(hdr); - HDassert(curr_node_ptr); - HDassert(H5F_addr_defined(curr_node_ptr->addr)); - - /* Lock current B-tree node */ - if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - - /* Must have a leaf node with enough space to insert a record now */ - HDassert(curr_node_ptr->node_nrec < hdr->node_info[0].max_nrec); - - /* Sanity check number of records */ - HDassert(curr_node_ptr->all_nrec == curr_node_ptr->node_nrec); - HDassert(leaf->nrec == curr_node_ptr->node_nrec); - - /* Check for inserting into empty leaf */ - if(leaf->nrec == 0) - idx = 0; - else { - /* Find correct location to insert this record */ - if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx, &cmp) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") - if(cmp == 0) - HGOTO_ERROR(H5E_BTREE, H5E_EXISTS, FAIL, "record is already in B-tree") - if(cmp > 0) - idx++; - - /* Make room for new record */ - if(idx < leaf->nrec) - HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx + 1), H5B2_LEAF_NREC(leaf, hdr, idx), hdr->cls->nrec_size * (leaf->nrec - idx)); - } /* end else */ - - /* Make callback to store record in native form */ - if((hdr->cls->store)(H5B2_LEAF_NREC(leaf, hdr, idx), udata) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into leaf node") - - /* Update record count for node pointer to current node */ - curr_node_ptr->all_nrec++; - curr_node_ptr->node_nrec++; - - /* Update record count for current node */ - leaf->nrec++; - - /* Check for new record being the min or max for the tree */ - /* (Don't use 'else' for the idx check, to allow for root leaf node) */ - if(H5B2_POS_MIDDLE != curr_pos) { - if(idx == 0) { - if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) { - if(hdr->min_native_rec == NULL) - if(NULL == (hdr->min_native_rec = H5MM_malloc(hdr->cls->nrec_size))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "memory allocation failed for v2 B-tree min record info") - HDmemcpy(hdr->min_native_rec, H5B2_LEAF_NREC(leaf, hdr, idx), hdr->cls->nrec_size); - } /* end if */ - } /* end if */ - if(idx == (unsigned)(leaf->nrec - 1)) { - if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) { - if(hdr->max_native_rec == NULL) - if(NULL == (hdr->max_native_rec = H5MM_malloc(hdr->cls->nrec_size))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "memory allocation failed for v2 B-tree max record info") - HDmemcpy(hdr->max_native_rec, H5B2_LEAF_NREC(leaf, hdr, idx), hdr->cls->nrec_size); - } /* end if */ - } /* end if */ - } /* end if */ - -done: - /* Release the B-tree leaf node (marked as dirty) */ - if(leaf && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr->addr, leaf, H5AC__DIRTIED_FLAG) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node") - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__insert_leaf() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__insert_internal - * - * Purpose: Adds a new record to a B-tree node. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Mar 2 2005 - * - *------------------------------------------------------------------------- - */ -herr_t -H5B2__insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - unsigned *parent_cache_info_flags_ptr, H5B2_node_ptr_t *curr_node_ptr, - H5B2_nodepos_t curr_pos, void *udata) -{ - H5B2_internal_t *internal = NULL; /* Pointer to internal node */ - unsigned internal_flags = H5AC__NO_FLAGS_SET; - unsigned idx; /* Location of record which matches key */ - H5B2_nodepos_t next_pos = H5B2_POS_MIDDLE; /* Position of node */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check arguments. */ - HDassert(hdr); - HDassert(depth > 0); - HDassert(curr_node_ptr); - HDassert(H5F_addr_defined(curr_node_ptr->addr)); - - /* Lock current B-tree node */ - if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") - - /* Sanity check number of records */ - HDassert(internal->nrec == curr_node_ptr->node_nrec); - - /* Split or redistribute child node pointers, if necessary */ - { - int cmp; /* Comparison value of records */ - unsigned retries; /* Number of times to attempt redistribution */ - size_t split_nrec; /* Number of records to split node at */ - - /* Locate node pointer for child */ - if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, - udata, &idx, &cmp) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") - if(cmp == 0) - HGOTO_ERROR(H5E_BTREE, H5E_EXISTS, FAIL, "record is already in B-tree") - if(cmp > 0) - idx++; - - /* Set the number of redistribution retries */ - /* This takes care of the case where a B-tree node needs to be - * redistributed, but redistributing the node causes the index - * for insertion to move to another node, which also needs to be - * redistributed. Now, we loop trying to redistribute and then - * eventually force a split */ - retries = 2; - - /* Determine the correct number of records to split child node at */ - split_nrec = hdr->node_info[depth - 1].split_nrec; - - /* Preemptively split/redistribute a node we will enter */ - while(internal->node_ptrs[idx].node_nrec == split_nrec) { - /* Attempt to redistribute records among children */ - if(idx == 0) { /* Left-most child */ - if(retries > 0 && (internal->node_ptrs[idx + 1].node_nrec < split_nrec)) { - if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, idx) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") - } /* end if */ - else { - if(H5B2__split1(hdr, dxpl_id, depth, curr_node_ptr, - parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to split child node") - } /* end else */ - } /* end if */ - else if(idx == internal->nrec) { /* Right-most child */ - if(retries > 0 && (internal->node_ptrs[idx - 1].node_nrec < split_nrec)) { - if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, (idx - 1)) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") - } /* end if */ - else { - if(H5B2__split1(hdr, dxpl_id, depth, curr_node_ptr, - parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to split child node") - } /* end else */ - } /* end if */ - else { /* Middle child */ - if(retries > 0 && ((internal->node_ptrs[idx + 1].node_nrec < split_nrec) || - (internal->node_ptrs[idx - 1].node_nrec < split_nrec))) { - if(H5B2__redistribute3(hdr, dxpl_id, depth, internal, &internal_flags, idx) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") - } /* end if */ - else { - if(H5B2__split1(hdr, dxpl_id, depth, curr_node_ptr, - parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to split child node") - } /* end else */ - } /* end else */ - - /* Locate node pointer for child (after split/redistribute) */ - /* Actually, this can be easily updated (for 2-node redistrib.) and shouldn't require re-searching */ - if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, - udata, &idx, &cmp) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") - if(cmp == 0) - HGOTO_ERROR(H5E_BTREE, H5E_EXISTS, FAIL, "record is already in B-tree") - if(cmp > 0) - idx++; - - /* Decrement the number of redistribution retries left */ - retries--; - } /* end while */ - } /* end block */ - - /* Check if this node is left/right-most */ - if(H5B2_POS_MIDDLE != curr_pos) { - if(idx == 0) { - if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) - next_pos = H5B2_POS_LEFT; - } /* end if */ - else if(idx == internal->nrec) { - if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) - next_pos = H5B2_POS_RIGHT; - } /* end else */ - } /* end if */ - - /* Attempt to insert node */ - if(depth > 1) { - if(H5B2__insert_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &internal_flags, &internal->node_ptrs[idx], next_pos, udata) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree internal node") - } /* end if */ - else { - if(H5B2__insert_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], next_pos, udata) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree leaf node") - } /* end else */ - - /* Update record count for node pointer to current node */ - curr_node_ptr->all_nrec++; - - /* Mark node as dirty */ - internal_flags |= H5AC__DIRTIED_FLAG; - -done: - /* Release the B-tree internal node */ - if(internal && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr->addr, internal, internal_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__insert_internal() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__update_leaf - * - * Purpose: Insert or modify a record in a B-tree leaf node. - * If the record exists already, it is modified as if H5B2_modify - * was called). If it doesn't exist, it is inserted as if - * H5B2_insert was called. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * koziol@hdfgroup.org - * Dec 23 2015 - * - *------------------------------------------------------------------------- - */ -herr_t -H5B2__update_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr, - H5B2_update_status_t *status, H5B2_nodepos_t curr_pos, void *udata, - H5B2_modify_t op, void *op_data) -{ - H5B2_leaf_t *leaf; /* Pointer to leaf node */ - unsigned leaf_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting the leaf node */ - int cmp = -1; /* Comparison value of records */ - unsigned idx; /* Location of record which matches key */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check arguments. */ - HDassert(hdr); - HDassert(curr_node_ptr); - HDassert(H5F_addr_defined(curr_node_ptr->addr)); - - /* Lock current B-tree node */ - if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - - /* Sanity check number of records */ - HDassert(curr_node_ptr->all_nrec == curr_node_ptr->node_nrec); - HDassert(leaf->nrec == curr_node_ptr->node_nrec); - - /* Check for inserting into empty leaf */ - if(leaf->nrec == 0) - idx = 0; - else { - /* Find correct location to insert this record */ - if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx, &cmp) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") - - /* Check for inserting a record */ - if(0 != cmp) { - /* Check if the leaf node is full */ - if(curr_node_ptr->node_nrec == hdr->node_info[0].split_nrec) { - /* Indicate that the leaf is full, but we need to insert */ - *status = H5B2_UPDATE_INSERT_CHILD_FULL; - - /* Let calling routine handle insertion */ - HGOTO_DONE(SUCCEED) - } /* end if */ - - /* Adjust index to leave room for record to insert */ - if(cmp > 0) - idx++; - - /* Make room for new record */ - if(idx < leaf->nrec) - HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx + 1), H5B2_LEAF_NREC(leaf, hdr, idx), hdr->cls->nrec_size * (leaf->nrec - idx)); - } /* end if */ - } /* end else */ - - /* Check for modifying existing record */ - if(0 == cmp) { - hbool_t changed = FALSE; /* Whether the 'modify' callback changed the record */ - - /* Make callback for current record */ - if((op)(H5B2_LEAF_NREC(leaf, hdr, idx), op_data, &changed) < 0) { - /* Make certain that the callback didn't modify the value if it failed */ - HDassert(changed == FALSE); - - HGOTO_ERROR(H5E_BTREE, H5E_CANTMODIFY, FAIL, "'modify' callback failed for B-tree update operation") - } /* end if */ - - /* Mark the node as dirty if it changed */ - leaf_flags |= (changed ? H5AC__DIRTIED_FLAG : 0); - - /* Indicate that the record was modified */ - *status = H5B2_UPDATE_MODIFY_DONE; - } /* end if */ - else { - /* Must have a leaf node with enough space to insert a record now */ - HDassert(curr_node_ptr->node_nrec < hdr->node_info[0].max_nrec); - - /* Make callback to store record in native form */ - if((hdr->cls->store)(H5B2_LEAF_NREC(leaf, hdr, idx), udata) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into leaf node") - - /* Mark the node as dirty */ - leaf_flags |= H5AC__DIRTIED_FLAG; - - /* Indicate that the record was inserted */ - *status = H5B2_UPDATE_INSERT_DONE; - - /* Update record count for node pointer to current node */ - curr_node_ptr->all_nrec++; - curr_node_ptr->node_nrec++; - - /* Update record count for current node */ - leaf->nrec++; - } /* end else */ - - /* Check for new record being the min or max for the tree */ - /* (Don't use 'else' for the idx check, to allow for root leaf node) */ - if(H5B2_POS_MIDDLE != curr_pos) { - if(idx == 0) { - if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) { - if(hdr->min_native_rec == NULL) - if(NULL == (hdr->min_native_rec = H5MM_malloc(hdr->cls->nrec_size))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "memory allocation failed for v2 B-tree min record info") - HDmemcpy(hdr->min_native_rec, H5B2_LEAF_NREC(leaf, hdr, idx), hdr->cls->nrec_size); - } /* end if */ - } /* end if */ - if(idx == (unsigned)(leaf->nrec - 1)) { - if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) { - if(hdr->max_native_rec == NULL) - if(NULL == (hdr->max_native_rec = H5MM_malloc(hdr->cls->nrec_size))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "memory allocation failed for v2 B-tree max record info") - HDmemcpy(hdr->max_native_rec, H5B2_LEAF_NREC(leaf, hdr, idx), hdr->cls->nrec_size); - } /* end if */ - } /* end if */ - } /* end if */ - -done: - /* Release the B-tree leaf node */ - if(leaf && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr->addr, leaf, leaf_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node") - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__update_leaf() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__update_internal - * - * Purpose: Insert or modify a record in a B-tree leaf node. - * If the record exists already, it is modified as if H5B2_modify - * was called). If it doesn't exist, it is inserted as if - * H5B2_insert was called. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * koziol@hdfgroup.org - * Dec 24 2015 - * - *------------------------------------------------------------------------- - */ -herr_t -H5B2__update_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - unsigned *parent_cache_info_flags_ptr, H5B2_node_ptr_t *curr_node_ptr, - H5B2_update_status_t *status, H5B2_nodepos_t curr_pos, void *udata, - H5B2_modify_t op, void *op_data) -{ - H5B2_internal_t *internal = NULL; /* Pointer to internal node */ - unsigned internal_flags = H5AC__NO_FLAGS_SET; - int cmp; /* Comparison value of records */ - unsigned idx; /* Location of record which matches key */ - H5B2_nodepos_t next_pos = H5B2_POS_MIDDLE; /* Position of node */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check arguments. */ - HDassert(hdr); - HDassert(depth > 0); - HDassert(curr_node_ptr); - HDassert(H5F_addr_defined(curr_node_ptr->addr)); - - /* Lock current B-tree node */ - if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") - - /* Sanity check number of records */ - HDassert(internal->nrec == curr_node_ptr->node_nrec); - - /* Locate node pointer for child */ - if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx, &cmp) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") - - /* Check for modifying existing record */ - if(0 == cmp) { - hbool_t changed = FALSE; /* Whether the 'modify' callback changed the record */ - - /* Make callback for current record */ - if((op)(H5B2_INT_NREC(internal, hdr, idx), op_data, &changed) < 0) { - /* Make certain that the callback didn't modify the value if it failed */ - HDassert(changed == FALSE); - - HGOTO_ERROR(H5E_BTREE, H5E_CANTMODIFY, FAIL, "'modify' callback failed for B-tree update operation") - } /* end if */ - - /* Mark the node as dirty if it changed */ - internal_flags |= (changed ? H5AC__DIRTIED_FLAG : 0); - - /* Indicate that the record was modified */ - *status = H5B2_UPDATE_MODIFY_DONE; - } /* end if */ - else { - /* Adjust index to leave room for node to insert */ - if(cmp > 0) - idx++; - - /* Check if this node is left/right-most */ - if(H5B2_POS_MIDDLE != curr_pos) { - if(idx == 0) { - if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) - next_pos = H5B2_POS_LEFT; - } /* end if */ - else if(idx == internal->nrec) { - if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) - next_pos = H5B2_POS_RIGHT; - } /* end else */ - } /* end if */ - - /* Attempt to update record in child */ - if(depth > 1) { - if(H5B2__update_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &internal_flags, &internal->node_ptrs[idx], status, next_pos, udata, op, op_data) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update record in internal B-tree node") - } /* end if */ - else { - if(H5B2__update_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], status, next_pos, udata, op, op_data) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update record in leaf B-tree node") - } /* end else */ - - /* Take actions based on child's status report */ - switch(*status) { - case H5B2_UPDATE_MODIFY_DONE: - /* No action */ - break; - - case H5B2_UPDATE_INSERT_DONE: - /* Mark node as dirty */ - internal_flags |= H5AC__DIRTIED_FLAG; - - /* Update total record count for node pointer to current node */ - curr_node_ptr->all_nrec++; - break; - - case H5B2_UPDATE_INSERT_CHILD_FULL: - /* Split/redistribute this node */ - if(internal->nrec == hdr->node_info[depth].split_nrec) { - hbool_t could_split = FALSE; /* Whether the child node could split */ - -#ifdef QAK -HDfprintf(stderr, "%s: idx = %u, internal->nrec = %u\n", FUNC, idx, internal->nrec); -HDfprintf(stderr, "%s: hdr->node_info[%u].split_nrec = %u\n", FUNC, (unsigned)depth, (unsigned)hdr->node_info[depth].split_nrec); -HDfprintf(stderr, "%s: hdr->node_info[%u].split_nrec = %u\n", FUNC, (unsigned)(depth - 1), (unsigned)hdr->node_info[depth - 1].split_nrec); -#endif /* QAK */ - if(idx == 0) { /* Left-most child */ -#ifdef QAK -HDfprintf(stderr, "%s: Left-most child\n", FUNC); -HDfprintf(stderr, "%s: internal->node_ptrs[%u].node_nrec = %u\n", FUNC, (unsigned)idx, (unsigned)internal->node_ptrs[idx].node_nrec); -HDfprintf(stderr, "%s: internal->node_ptrs[%u].node_nrec = %u\n", FUNC, (unsigned)(idx + 1), (unsigned)internal->node_ptrs[idx + 1].node_nrec); -#endif /* QAK */ - /* Check for left-most child and its neighbor being close to full */ - if((internal->node_ptrs[idx].node_nrec + internal->node_ptrs[idx + 1].node_nrec) - >= ((hdr->node_info[depth - 1].split_nrec * 2) - 1)) - could_split = TRUE; - } /* end if */ - else if(idx == internal->nrec) { /* Right-most child */ -#ifdef QAK -HDfprintf(stderr, "%s: Right-most child\n", FUNC); -HDfprintf(stderr, "%s: internal->node_ptrs[%u].node_nrec = %u\n", FUNC, (unsigned)(idx - 1), (unsigned)internal->node_ptrs[idx - 1].node_nrec); -HDfprintf(stderr, "%s: internal->node_ptrs[%u].node_nrec = %u\n", FUNC, (unsigned)idx, (unsigned)internal->node_ptrs[idx].node_nrec); -#endif /* QAK */ - /* Check for right-most child and its neighbor being close to full */ - if((internal->node_ptrs[idx - 1].node_nrec + internal->node_ptrs[idx].node_nrec) - >= ((hdr->node_info[depth - 1].split_nrec * 2) - 1)) - could_split = TRUE; - } /* end else-if */ - else { /* Middle child */ -#ifdef QAK -HDfprintf(stderr, "%s: Middle child\n", FUNC); -HDfprintf(stderr, "%s: internal->node_ptrs[%u].node_nrec = %u\n", FUNC, (unsigned)(idx - 1), (unsigned)internal->node_ptrs[idx - 1].node_nrec); -HDfprintf(stderr, "%s: internal->node_ptrs[%u].node_nrec = %u\n", FUNC, (unsigned)idx, (unsigned)internal->node_ptrs[idx].node_nrec); -HDfprintf(stderr, "%s: internal->node_ptrs[%u].node_nrec = %u\n", FUNC, (unsigned)(idx + 1), (unsigned)internal->node_ptrs[idx + 1].node_nrec); -#endif /* QAK */ - /* Check for middle child and its left neighbor being close to full */ - if((internal->node_ptrs[idx - 1].node_nrec + internal->node_ptrs[idx].node_nrec) - >= ((hdr->node_info[depth - 1].split_nrec * 2) - 1)) - could_split = TRUE; - /* Check for middle child and its right neighbor being close to full */ - else if((internal->node_ptrs[idx].node_nrec + internal->node_ptrs[idx + 1].node_nrec) - >= ((hdr->node_info[depth - 1].split_nrec * 2) - 1)) - could_split = TRUE; - } /* end if */ - - /* If this node is full and the child node insertion could - * cause a split, punt back up to caller, leaving the - * "insert child full" status. - */ - if(could_split) { - /* Release the internal B-tree node */ - if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr->addr, internal, internal_flags) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") - internal = NULL; - -#ifdef QAK -HDfprintf(stderr, "%s: Punting back to caller\n", FUNC); -#endif /* QAK */ - /* Punt back to caller */ - HGOTO_DONE(SUCCEED); - } /* end if */ - } /* end if */ - - /* Release the internal B-tree node */ - if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr->addr, internal, internal_flags) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") - internal = NULL; - - /* Indicate that the record was inserted */ - *status = H5B2_UPDATE_INSERT_DONE; - - /* Dodge sideways into inserting a record into this node */ - if(H5B2__insert_internal(hdr, dxpl_id, depth, parent_cache_info_flags_ptr, curr_node_ptr, curr_pos, udata) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into internal B-tree node") - break; - - case H5B2_UPDATE_UNKNOWN: - default: - HDassert(0 && "Invalid update status"); - HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "invalid update status") - } /* end switch */ - } /* end else */ - -done: - /* Release the internal B-tree node */ - if(internal && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr->addr, internal, internal_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__update_internal() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__create_leaf - * - * Purpose: Creates empty leaf node of a B-tree and update node pointer - * to point to it. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Feb 2 2005 - * - *------------------------------------------------------------------------- - */ -herr_t -H5B2__create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *node_ptr) -{ - H5B2_leaf_t *leaf = NULL; /* Pointer to new leaf node created */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check arguments. */ - HDassert(hdr); - HDassert(node_ptr); - - /* Allocate memory for leaf information */ - if(NULL == (leaf = H5FL_MALLOC(H5B2_leaf_t))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree leaf info") - - /* Set metadata cache info */ - HDmemset(&leaf->cache_info, 0, sizeof(H5AC_info_t)); - - /* Increment ref. count on B-tree header */ - if(H5B2__hdr_incr(hdr) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, FAIL, "can't increment ref. count on B-tree header") - - /* Share B-tree header information */ - leaf->hdr = hdr; - - /* Allocate space for the native keys in memory */ - if(NULL == (leaf->leaf_native = (uint8_t *)H5FL_FAC_MALLOC(hdr->node_info[0].nat_rec_fac))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree leaf native keys") - HDmemset(leaf->leaf_native, 0, hdr->cls->nrec_size * hdr->node_info[0].max_nrec); - - /* Set number of records */ - leaf->nrec = 0; - - /* Allocate space on disk for the leaf */ - if(HADDR_UNDEF == (node_ptr->addr = H5MF_alloc(hdr->f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)hdr->node_size))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "file allocation failed for B-tree leaf node") - - /* Cache the new B-tree node */ - if(H5AC_insert_entry(hdr->f, dxpl_id, H5AC_BT2_LEAF, node_ptr->addr, leaf, H5AC__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "can't add B-tree leaf to cache") - -done: - if(ret_value < 0) { - if(leaf) - if(H5B2__leaf_free(leaf) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree leaf node") - } /* end if */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__create_leaf() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__protect_leaf - * - * Purpose: "Protect" an leaf node in the metadata cache - * - * Return: Pointer to leaf node on success/NULL on failure - * - * Programmer: Quincey Koziol - * koziol@hdfgroup.org - * May 5 2010 - * - *------------------------------------------------------------------------- - */ -H5B2_leaf_t * -H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, uint16_t nrec, - unsigned flags) -{ - H5B2_leaf_cache_ud_t udata; /* User-data for callback */ - H5B2_leaf_t *ret_value = NULL; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check arguments. */ - HDassert(hdr); - HDassert(H5F_addr_defined(addr)); - - /* only H5AC__READ_ONLY_FLAG may appear in flags */ - HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0); - - /* Set up user data for callback */ - udata.f = hdr->f; - udata.hdr = hdr; - H5_CHECKED_ASSIGN(udata.nrec, uint16_t, nrec, unsigned) - - /* Protect the leaf node */ - if(NULL == (ret_value = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, addr, &udata, flags))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to protect B-tree leaf node") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__protect_leaf() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__create_internal - * - * Purpose: Creates empty internal node of a B-tree and update node pointer - * to point to it. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Feb 3 2005 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5B2__create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *node_ptr, - uint16_t depth) -{ - H5B2_internal_t *internal = NULL; /* Pointer to new internal node created */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_STATIC - - /* Check arguments. */ - HDassert(hdr); - HDassert(node_ptr); - HDassert(depth > 0); - - /* Allocate memory for internal node information */ - if(NULL == (internal = H5FL_MALLOC(H5B2_internal_t))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree internal info") - - /* Set metadata cache info */ - HDmemset(&internal->cache_info, 0, sizeof(H5AC_info_t)); - - /* Increment ref. count on B-tree header */ - if(H5B2__hdr_incr(hdr) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, FAIL, "can't increment ref. count on B-tree header") - - /* Share B-tree header information */ - internal->hdr = hdr; - - /* Allocate space for the native keys in memory */ - if(NULL == (internal->int_native = (uint8_t *)H5FL_FAC_MALLOC(hdr->node_info[depth].nat_rec_fac))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree internal native keys") - HDmemset(internal->int_native, 0, hdr->cls->nrec_size * hdr->node_info[depth].max_nrec); - - /* Allocate space for the node pointers in memory */ - if(NULL == (internal->node_ptrs = (H5B2_node_ptr_t *)H5FL_FAC_MALLOC(hdr->node_info[depth].node_ptr_fac))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree internal node pointers") - HDmemset(internal->node_ptrs, 0, sizeof(H5B2_node_ptr_t) * (hdr->node_info[depth].max_nrec + 1)); - - /* Set number of records & depth of the node */ - internal->nrec = 0; - internal->depth = depth; - - /* Allocate space on disk for the internal node */ - if(HADDR_UNDEF == (node_ptr->addr = H5MF_alloc(hdr->f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)hdr->node_size))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "file allocation failed for B-tree internal node") - - /* Cache the new B-tree node */ - if(H5AC_insert_entry(hdr->f, dxpl_id, H5AC_BT2_INT, node_ptr->addr, internal, H5AC__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "can't add B-tree internal node to cache") - -done: - if(ret_value < 0) { - if(internal) - if(H5B2__internal_free(internal) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree internal node") - } /* end if */ - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__create_internal() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__protect_internal - * - * Purpose: "Protect" an internal node in the metadata cache - * - * Return: Pointer to internal node on success/NULL on failure - * - * Programmer: Quincey Koziol - * koziol@hdfgroup.org - * Aug 25 2006 - * - *------------------------------------------------------------------------- - */ -H5B2_internal_t * -H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, - uint16_t nrec, uint16_t depth, unsigned flags) -{ - H5B2_internal_cache_ud_t udata; /* User data to pass through to cache 'deserialize' callback */ - H5B2_internal_t *ret_value = NULL; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check arguments. */ - HDassert(hdr); - HDassert(H5F_addr_defined(addr)); - HDassert(depth > 0); - - /* only H5AC__READ_ONLY_FLAG may appear in flags */ - HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0); - - /* Set up user data for callback */ - udata.f = hdr->f; - udata.hdr = hdr; - udata.nrec = nrec; - udata.depth = depth; - - /* Protect the internal node */ - if(NULL == (ret_value = (H5B2_internal_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_INT, addr, &udata, flags))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to protect B-tree internal node") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__protect_internal() */ +} /* H5B2__insert() */ /*------------------------------------------------------------------------- @@ -2421,13 +1505,15 @@ done: */ herr_t H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - const H5B2_node_ptr_t *curr_node, H5B2_operator_t op, void *op_data) + const H5B2_node_ptr_t *curr_node, void *parent, H5B2_operator_t op, + void *op_data) { const H5AC_class_t *curr_node_class = NULL; /* Pointer to current node's class info */ void *node = NULL; /* Pointers to current node */ uint8_t *node_native; /* Pointers to node's native records */ uint8_t *native = NULL; /* Pointers to copy of node's native records */ H5B2_node_ptr_t *node_ptrs = NULL; /* Pointers to node's node pointers */ + hbool_t node_pinned = FALSE; /* Whether node is pinned */ unsigned u; /* Local index */ herr_t ret_value = H5_ITER_CONT; /* Iterator return value */ @@ -2443,7 +1529,7 @@ H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, H5B2_internal_t *internal; /* Pointer to internal node */ /* Lock the current B-tree node */ - if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC__READ_ONLY_FLAG))) + if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, parent, (H5B2_node_ptr_t *)curr_node, depth, FALSE, H5AC__READ_ONLY_FLAG))) /* Casting away const OK -QAK */ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* Set up information about current node */ @@ -2462,7 +1548,7 @@ H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, H5B2_leaf_t *leaf; /* Pointer to leaf node */ /* Lock the current B-tree node */ - if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC__READ_ONLY_FLAG))) + if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, parent, (H5B2_node_ptr_t *)curr_node, FALSE, H5AC__READ_ONLY_FLAG))) /* Casting away const OK -QAK */ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Set up information about current node */ @@ -2479,15 +1565,18 @@ H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, HDmemcpy(native, node_native, (hdr->cls->nrec_size * curr_node->node_nrec)); /* Unlock the node */ - if(H5AC_unprotect(hdr->f, dxpl_id, curr_node_class, curr_node->addr, node, H5AC__NO_FLAGS_SET) < 0) + if(H5AC_unprotect(hdr->f, dxpl_id, curr_node_class, curr_node->addr, node, (unsigned)(hdr->swmr_write ? H5AC__PIN_ENTRY_FLAG : H5AC__NO_FLAGS_SET)) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") - node = NULL; + if(hdr->swmr_write) + node_pinned = TRUE; + else + node = NULL; /* Iterate through records, in order */ for(u = 0; u < curr_node->node_nrec && !ret_value; u++) { /* Descend into child node, if current node is an internal node */ if(depth > 0) { - if((ret_value = H5B2__iterate_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(node_ptrs[u]), op, op_data)) < 0) + if((ret_value = H5B2__iterate_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(node_ptrs[u]), node, op, op_data)) < 0) HERROR(H5E_BTREE, H5E_CANTLIST, "node iteration failed"); } /* end if */ @@ -2500,11 +1589,15 @@ H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Descend into last child node, if current node is an internal node */ if(!ret_value && depth > 0) { - if((ret_value = H5B2__iterate_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(node_ptrs[u]), op, op_data)) < 0) + if((ret_value = H5B2__iterate_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(node_ptrs[u]), node, op, op_data)) < 0) HERROR(H5E_BTREE, H5E_CANTLIST, "node iteration failed"); } /* end if */ done: + /* Unpin the node if it was pinned */ + if(node_pinned && H5AC_unpin_entry(node) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "can't unpin node") + /* Release the node pointers & native records, if they were copied */ if(node_ptrs) node_ptrs = (H5B2_node_ptr_t *)H5FL_FAC_FREE(hdr->node_info[depth].node_ptr_fac, node_ptrs); @@ -2516,867 +1609,6 @@ done: /*------------------------------------------------------------------------- - * Function: H5B2__remove_leaf - * - * Purpose: Removes a record from a B-tree leaf node. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Mar 3 2005 - * - *------------------------------------------------------------------------- - */ -herr_t -H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr, - H5B2_nodepos_t curr_pos, void *udata, H5B2_remove_t op, void *op_data) -{ - H5B2_leaf_t *leaf; /* Pointer to leaf node */ - haddr_t leaf_addr = HADDR_UNDEF; /* Leaf address on disk */ - unsigned leaf_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting leaf node */ - unsigned idx; /* Location of record which matches key */ - int cmp; /* Comparison value of records */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check arguments. */ - HDassert(hdr); - HDassert(curr_node_ptr); - HDassert(H5F_addr_defined(curr_node_ptr->addr)); - - /* Lock current B-tree node */ - leaf_addr = curr_node_ptr->addr; - if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - - /* Sanity check number of records */ - HDassert(curr_node_ptr->all_nrec == curr_node_ptr->node_nrec); - HDassert(leaf->nrec == curr_node_ptr->node_nrec); - - /* Find correct location to remove this record */ - if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx, &cmp) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") - if(cmp != 0) - HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "record is not in B-tree") - - /* Check for invalidating the min/max record for the tree */ - if(H5B2_POS_MIDDLE != curr_pos) { - /* (Don't use 'else' for the idx check, to allow for root leaf node) */ - if(idx == 0) { - if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) { - if(hdr->min_native_rec) - hdr->min_native_rec = H5MM_xfree(hdr->min_native_rec); - } /* end if */ - } /* end if */ - if(idx == (unsigned)(leaf->nrec - 1)) { - if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) { - if(hdr->max_native_rec) - hdr->max_native_rec = H5MM_xfree(hdr->max_native_rec); - } /* end if */ - } /* end if */ - } /* end if */ - - /* Make 'remove' callback if there is one */ - if(op) - if((op)(H5B2_LEAF_NREC(leaf, hdr, idx), op_data) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record into leaf node") - - /* Update number of records in node */ - leaf->nrec--; - - /* Mark leaf node as dirty also */ - leaf_flags |= H5AC__DIRTIED_FLAG; - - if(leaf->nrec > 0) { - /* Pack record out of leaf */ - if(idx < leaf->nrec) - HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx), H5B2_LEAF_NREC(leaf, hdr, (idx + 1)), hdr->cls->nrec_size * (leaf->nrec - idx)); - } /* end if */ - else { - /* Let the cache know that the object is deleted */ - leaf_flags |= H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; - - /* Reset address of parent node pointer */ - curr_node_ptr->addr = HADDR_UNDEF; - } /* end else */ - - /* Update record count for parent of leaf node */ - curr_node_ptr->node_nrec--; - -done: - /* Release the B-tree leaf node */ - if(leaf && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, leaf_addr, leaf, leaf_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node") - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__remove_leaf() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__remove_internal - * - * Purpose: Removes a record from a B-tree node. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Mar 3 2005 - * - *------------------------------------------------------------------------- - */ -herr_t -H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased, - void *swap_loc, uint16_t depth, H5AC_info_t *parent_cache_info, - unsigned *parent_cache_info_flags_ptr, H5B2_nodepos_t curr_pos, - H5B2_node_ptr_t *curr_node_ptr, void *udata, H5B2_remove_t op, void *op_data) -{ - H5AC_info_t *new_cache_info; /* Pointer to new cache info */ - unsigned *new_cache_info_flags_ptr = NULL; - H5B2_node_ptr_t *new_node_ptr; /* Pointer to new node pointer */ - H5B2_internal_t *internal; /* Pointer to internal node */ - H5B2_nodepos_t next_pos = H5B2_POS_MIDDLE; /* Position of next node */ - unsigned internal_flags = H5AC__NO_FLAGS_SET; - haddr_t internal_addr; /* Address of internal node */ - size_t merge_nrec; /* Number of records to merge node at */ - hbool_t collapsed_root = FALSE; /* Whether the root was collapsed */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check arguments. */ - HDassert(hdr); - HDassert(depth > 0); - HDassert(parent_cache_info); - HDassert(curr_node_ptr); - HDassert(H5F_addr_defined(curr_node_ptr->addr)); - - /* Lock current B-tree node */ - internal_addr = curr_node_ptr->addr; - if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") - - /* Determine the correct number of records to merge at */ - merge_nrec = hdr->node_info[depth - 1].merge_nrec; - - /* Check for needing to collapse the root node */ - /* (The root node is the only internal node allowed to have 1 record) */ - if(internal->nrec == 1 && - ((internal->node_ptrs[0].node_nrec + internal->node_ptrs[1].node_nrec) <= ((merge_nrec * 2) + 1))) { - - /* Merge children of root node */ - if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr, - parent_cache_info_flags_ptr, internal, &internal_flags, 0) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") - - /* Let the cache know that the object is deleted */ - internal_flags |= H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; - - /* Reset information in header's root node pointer */ - curr_node_ptr->addr = internal->node_ptrs[0].addr; - curr_node_ptr->node_nrec = internal->node_ptrs[0].node_nrec; - - /* Indicate that the level of the B-tree decreased */ - *depth_decreased = TRUE; - - /* Set pointers for advancing to child node */ - new_cache_info = parent_cache_info; - new_cache_info_flags_ptr = parent_cache_info_flags_ptr; - new_node_ptr = curr_node_ptr; - - /* Set flag to indicate root was collapsed */ - collapsed_root = TRUE; - - /* Indicate position of next node */ - next_pos = H5B2_POS_ROOT; - } /* end if */ - /* Merge or redistribute child node pointers, if necessary */ - else { - unsigned idx; /* Location of record which matches key */ - int cmp = 0; /* Comparison value of records */ - unsigned retries; /* Number of times to attempt redistribution */ - - /* Locate node pointer for child */ - if(swap_loc) - idx = 0; - else { - if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, - udata, &idx, &cmp) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") - if(cmp >= 0) - idx++; - } /* end else */ - - /* Set the number of redistribution retries */ - /* This takes care of the case where a B-tree node needs to be - * redistributed, but redistributing the node causes the index - * for removal to move to another node, which also needs to be - * redistributed. Now, we loop trying to redistribute and then - * eventually force a merge */ - retries = 2; - - /* Preemptively merge/redistribute a node we will enter */ - while(internal->node_ptrs[idx].node_nrec == merge_nrec) { - /* Attempt to redistribute records among children */ - /* (NOTE: These 2-node redistributions should actually get the - * record to promote from the node with more records. - QAK) - */ - /* (NOTE: This code is the same in both H5B2__remove_internal() and - * H5B2__remove_internal_by_idx(), fix bugs in both places! - QAK) - */ - if(idx == 0) { /* Left-most child */ - if(retries > 0 && (internal->node_ptrs[idx + 1].node_nrec > merge_nrec)) { - if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, idx) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") - } /* end if */ - else { - if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr, - parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") - } /* end else */ - } /* end if */ - else if(idx == internal->nrec) { /* Right-most child */ - if(retries > 0 && (internal->node_ptrs[idx - 1].node_nrec > merge_nrec)) { - if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, (idx - 1)) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") - } /* end if */ - else { - if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr, - parent_cache_info_flags_ptr, internal, &internal_flags, (idx - 1)) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") - } /* end else */ - } /* end if */ - else { /* Middle child */ - if(retries > 0 && ((internal->node_ptrs[idx + 1].node_nrec > merge_nrec) || - (internal->node_ptrs[idx - 1].node_nrec > merge_nrec))) { - if(H5B2__redistribute3(hdr, dxpl_id, depth, internal, &internal_flags, idx) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") - } /* end if */ - else { - if(H5B2__merge3(hdr, dxpl_id, depth, curr_node_ptr, - parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") - } /* end else */ - } /* end else */ - - /* Locate node pointer for child (after merge/redistribute) */ - if(swap_loc) - idx = 0; - else { -/* Actually, this can be easily updated (for 2-node redistrib.) and shouldn't require re-searching */ - if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx, &cmp) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") - if(cmp >= 0) - idx++; - } /* end else */ - - /* Decrement the number of redistribution retries left */ - retries--; - } /* end while */ - - /* Handle deleting a record from an internal node */ - if(!swap_loc && cmp == 0) - swap_loc = H5B2_INT_NREC(internal, hdr, idx - 1); - - /* Swap record to delete with record from leaf, if we are the last internal node */ - if(swap_loc && depth == 1) - if(H5B2__swap_leaf(hdr, dxpl_id, depth, internal, &internal_flags, idx, swap_loc) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSWAP, FAIL, "Can't swap records in B-tree") - - /* Set pointers for advancing to child node */ - new_cache_info_flags_ptr = &internal_flags; - new_cache_info = &internal->cache_info; - new_node_ptr = &internal->node_ptrs[idx]; - - /* Indicate position of next node */ - if(H5B2_POS_MIDDLE != curr_pos) { - if(idx == 0) { - if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) - next_pos = H5B2_POS_LEFT; - } /* end if */ - else if(idx == internal->nrec) { - if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) - next_pos = H5B2_POS_RIGHT; - } /* end if */ - } /* end if */ - } /* end else */ - - /* Attempt to remove record from child node */ - if(depth > 1) { - if(H5B2__remove_internal(hdr, dxpl_id, depth_decreased, swap_loc, (uint16_t)(depth - 1), - new_cache_info, new_cache_info_flags_ptr, next_pos, new_node_ptr, udata, op, op_data) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node") - } /* end if */ - else { - if(H5B2__remove_leaf(hdr, dxpl_id, new_node_ptr, next_pos, udata, op, op_data) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node") - } /* end else */ - - /* Update record count for node pointer to current node */ - if(!collapsed_root) - new_node_ptr->all_nrec--; - - /* Mark node as dirty */ - internal_flags |= H5AC__DIRTIED_FLAG; - -#ifdef H5B2_DEBUG - H5B2__assert_internal((!collapsed_root ? (curr_node_ptr->all_nrec - 1) : new_node_ptr->all_nrec), hdr, internal); -#endif /* H5B2_DEBUG */ - -done: - /* Release the B-tree internal node */ - if(internal && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, internal_addr, internal, internal_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__remove_internal() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__remove_leaf_by_idx - * - * Purpose: Removes a record from a B-tree leaf node, according to the - * offset in the B-tree records. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * koziol@hdfgroup.org - * Nov 14 2006 - * - *------------------------------------------------------------------------- - */ -herr_t -H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id, - H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, - unsigned idx, H5B2_remove_t op, void *op_data) -{ - H5B2_leaf_t *leaf; /* Pointer to leaf node */ - haddr_t leaf_addr = HADDR_UNDEF; /* Leaf address on disk */ - unsigned leaf_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting leaf node */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check arguments. */ - HDassert(hdr); - HDassert(curr_node_ptr); - HDassert(H5F_addr_defined(curr_node_ptr->addr)); - - /* Lock B-tree leaf node */ - leaf_addr = curr_node_ptr->addr; - if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - - /* Sanity check number of records */ - HDassert(curr_node_ptr->all_nrec == curr_node_ptr->node_nrec); - HDassert(leaf->nrec == curr_node_ptr->node_nrec); - HDassert(idx < leaf->nrec); - - /* Check for invalidating the min/max record for the tree */ - if(H5B2_POS_MIDDLE != curr_pos) { - /* (Don't use 'else' for the idx check, to allow for root leaf node) */ - if(idx == 0) { - if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) { - if(hdr->min_native_rec) - hdr->min_native_rec = H5MM_xfree(hdr->min_native_rec); - } /* end if */ - } /* end if */ - if(idx == (unsigned)(leaf->nrec - 1)) { - if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) { - if(hdr->max_native_rec) - hdr->max_native_rec = H5MM_xfree(hdr->max_native_rec); - } /* end if */ - } /* end if */ - } /* end if */ - - /* Make 'remove' callback if there is one */ - if(op) - if((op)(H5B2_LEAF_NREC(leaf, hdr, idx), op_data) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record into leaf node") - - /* Update number of records in node */ - leaf->nrec--; - - /* Mark leaf node as dirty also */ - leaf_flags |= H5AC__DIRTIED_FLAG; - - if(leaf->nrec > 0) { - /* Pack record out of leaf */ - if(idx < leaf->nrec) - HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx), H5B2_LEAF_NREC(leaf, hdr, (idx + 1)), hdr->cls->nrec_size * (leaf->nrec - idx)); - } /* end if */ - else { - /* Let the cache know that the object is deleted */ - leaf_flags |= H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; - - /* Reset address of parent node pointer */ - curr_node_ptr->addr = HADDR_UNDEF; - } /* end else */ - - /* Update record count for parent of leaf node */ - curr_node_ptr->node_nrec--; - -done: - /* Release the B-tree leaf node */ - if(leaf && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, leaf_addr, leaf, leaf_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node") - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__remove_leaf_by_idx() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__remove_internal_by_idx - * - * Purpose: Removes a record from a B-tree node, according to the offset - * in the B-tree records - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * koziol@hdfgroup.org - * Nov 14 2006 - * - *------------------------------------------------------------------------- - */ -herr_t -H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id, - hbool_t *depth_decreased, void *swap_loc, uint16_t depth, - H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr, - H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, hsize_t n, - H5B2_remove_t op, void *op_data) -{ - H5AC_info_t *new_cache_info; /* Pointer to new cache info */ - unsigned *new_cache_info_flags_ptr = NULL; - H5B2_node_ptr_t *new_node_ptr; /* Pointer to new node pointer */ - H5B2_internal_t *internal; /* Pointer to internal node */ - H5B2_nodepos_t next_pos = H5B2_POS_MIDDLE; /* Position of next node */ - unsigned internal_flags = H5AC__NO_FLAGS_SET; - haddr_t internal_addr; /* Address of internal node */ - size_t merge_nrec; /* Number of records to merge node at */ - hbool_t collapsed_root = FALSE; /* Whether the root was collapsed */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check arguments. */ - HDassert(hdr); - HDassert(depth > 0); - HDassert(parent_cache_info); - HDassert(curr_node_ptr); - HDassert(H5F_addr_defined(curr_node_ptr->addr)); - - /* Lock current B-tree node */ - internal_addr = curr_node_ptr->addr; - if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") - HDassert(internal->nrec == curr_node_ptr->node_nrec); - HDassert(depth == hdr->depth || internal->nrec > 1); - - /* Determine the correct number of records to merge at */ - merge_nrec = hdr->node_info[depth - 1].merge_nrec; - - /* Check for needing to collapse the root node */ - /* (The root node is the only internal node allowed to have 1 record) */ - if(internal->nrec == 1 && - ((internal->node_ptrs[0].node_nrec + internal->node_ptrs[1].node_nrec) <= ((merge_nrec * 2) + 1))) { - HDassert(depth == hdr->depth); - - /* Merge children of root node */ - if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr, - parent_cache_info_flags_ptr, internal, &internal_flags, 0) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") - - /* Let the cache know that the object is deleted */ - internal_flags |= H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; - - /* Reset information in header's root node pointer */ - curr_node_ptr->addr = internal->node_ptrs[0].addr; - curr_node_ptr->node_nrec = internal->node_ptrs[0].node_nrec; - - /* Indicate that the level of the B-tree decreased */ - *depth_decreased = TRUE; - - /* Set pointers for advancing to child node */ - new_cache_info = parent_cache_info; - new_cache_info_flags_ptr = parent_cache_info_flags_ptr; - new_node_ptr = curr_node_ptr; - - /* Set flag to indicate root was collapsed */ - collapsed_root = TRUE; - - /* Indicate position of next node */ - next_pos = H5B2_POS_ROOT; - } /* end if */ - /* Merge or redistribute child node pointers, if necessary */ - else { - hsize_t orig_n = n; /* Original index looked for */ - unsigned idx; /* Location of record which matches key */ - hbool_t found = FALSE; /* Comparison value of records */ - unsigned retries; /* Number of times to attempt redistribution */ - - /* Locate node pointer for child */ - if(swap_loc) - idx = 0; - else { - /* Search for record with correct index */ - for(idx = 0; idx < internal->nrec; idx++) { - /* Check which child node contains indexed record */ - if(internal->node_ptrs[idx].all_nrec >= n) { - /* Check if record is in this node */ - if(internal->node_ptrs[idx].all_nrec == n) { - /* Indicate the record was found and that the index - * in child nodes is zero from now on - */ - found = TRUE; - n = 0; - - /* Increment to next record */ - idx++; - } /* end if */ - - /* Break out of loop early */ - break; - } /* end if */ - - /* Decrement index we are looking for to account for the node we - * just advanced past. - */ - n -= (internal->node_ptrs[idx].all_nrec + 1); - } /* end for */ - } /* end else */ - - /* Set the number of redistribution retries */ - /* This takes care of the case where a B-tree node needs to be - * redistributed, but redistributing the node causes the index - * for removal to move to another node, which also needs to be - * redistributed. Now, we loop trying to redistribute and then - * eventually force a merge */ - retries = 2; - - /* Preemptively merge/redistribute a node we will enter */ - while(internal->node_ptrs[idx].node_nrec == merge_nrec) { - /* Attempt to redistribute records among children */ - /* (NOTE: These 2-node redistributions should actually get the - * record to promote from the node with more records. - QAK) - */ - /* (NOTE: This code is the same in both H5B2__remove_internal() and - * H5B2__remove_internal_by_idx(), fix bugs in both places! - QAK) - */ - if(idx == 0) { /* Left-most child */ - if(retries > 0 && (internal->node_ptrs[idx + 1].node_nrec > merge_nrec)) { - if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, idx) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") - } /* end if */ - else { - if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr, - parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") - } /* end else */ - } /* end if */ - else if(idx == internal->nrec) { /* Right-most child */ - if(retries > 0 && (internal->node_ptrs[idx - 1].node_nrec > merge_nrec)) { - if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, (idx - 1)) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") - } /* end if */ - else { - if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr, - parent_cache_info_flags_ptr, internal, &internal_flags, (idx - 1)) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") - } /* end else */ - } /* end if */ - else { /* Middle child */ - if(retries > 0 && ((internal->node_ptrs[idx + 1].node_nrec > merge_nrec) || - (internal->node_ptrs[idx - 1].node_nrec > merge_nrec))) { - if(H5B2__redistribute3(hdr, dxpl_id, depth, internal, &internal_flags, idx) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") - } /* end if */ - else { - if(H5B2__merge3(hdr, dxpl_id, depth, curr_node_ptr, - parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") - } /* end else */ - } /* end else */ - - /* Locate node pointer for child (after merge/redistribute) */ - if(swap_loc) - idx = 0; - else { - /* Count from the orginal index value again */ - n = orig_n; - - /* Reset "found" flag - the record may have shifted during the - * redistribute/merge - */ - found = FALSE; - - /* Search for record with correct index */ - for(idx = 0; idx < internal->nrec; idx++) { - /* Check which child node contains indexed record */ - if(internal->node_ptrs[idx].all_nrec >= n) { - /* Check if record is in this node */ - if(internal->node_ptrs[idx].all_nrec == n) { - /* Indicate the record was found and that the index - * in child nodes is zero from now on - */ - found = TRUE; - n = 0; - - /* Increment to next record */ - idx++; - } /* end if */ - - /* Break out of loop early */ - break; - } /* end if */ - - /* Decrement index we are looking for to account for the node we - * just advanced past. - */ - n -= (internal->node_ptrs[idx].all_nrec + 1); - } /* end for */ - } /* end else */ - - /* Decrement the number of redistribution retries left */ - retries--; - } /* end while */ - - /* Handle deleting a record from an internal node */ - if(!swap_loc && found) - swap_loc = H5B2_INT_NREC(internal, hdr, idx - 1); - - /* Swap record to delete with record from leaf, if we are the last internal node */ - if(swap_loc && depth == 1) - if(H5B2__swap_leaf(hdr, dxpl_id, depth, internal, &internal_flags, idx, swap_loc) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTSWAP, FAIL, "can't swap records in B-tree") - - /* Set pointers for advancing to child node */ - new_cache_info_flags_ptr = &internal_flags; - new_cache_info = &internal->cache_info; - new_node_ptr = &internal->node_ptrs[idx]; - - /* Indicate position of next node */ - if(H5B2_POS_MIDDLE != curr_pos) { - if(idx == 0) { - if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) - next_pos = H5B2_POS_LEFT; - } /* end if */ - else if(idx == internal->nrec) { - if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) - next_pos = H5B2_POS_RIGHT; - } /* end if */ - } /* end if */ - } /* end else */ - - /* Attempt to remove record from child node */ - if(depth > 1) { - if(H5B2__remove_internal_by_idx(hdr, dxpl_id, depth_decreased, swap_loc, (uint16_t)(depth - 1), - new_cache_info, new_cache_info_flags_ptr, new_node_ptr, next_pos, n, op, op_data) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node") - } /* end if */ - else { - if(H5B2__remove_leaf_by_idx(hdr, dxpl_id, new_node_ptr, next_pos, (unsigned)n, op, op_data) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node") - } /* end else */ - - /* Update record count for node pointer to child node */ - if(!collapsed_root) - new_node_ptr->all_nrec--; - - /* Mark node as dirty */ - internal_flags |= H5AC__DIRTIED_FLAG; - -#ifdef H5B2_DEBUG - H5B2__assert_internal((!collapsed_root ? (curr_node_ptr->all_nrec - 1) : new_node_ptr->all_nrec), hdr, internal); -#endif /* H5B2_DEBUG */ - -done: - /* Release the B-tree internal node */ - if(internal && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, internal_addr, internal, internal_flags) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__remove_internal_by_idx() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__neighbor_leaf - * - * Purpose: Locate a record relative to the specified information in a - * B-tree leaf node and return that information by filling in - * fields of the - * caller-supplied UDATA pointer depending on the type of leaf node - * requested. The UDATA can point to additional data passed - * to the key comparison function. - * - * The 'OP' routine is called with the record found and the - * OP_DATA pointer, to allow caller to return information about - * the record. - * - * The RANGE indicates whether to search for records less than or - * equal to, or greater than or equal to the information passed - * in with UDATA. - * - * Return: Non-negative on success, negative on failure. - * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Mar 9 2005 - * - *------------------------------------------------------------------------- - */ -herr_t -H5B2__neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr, - void *neighbor_loc, H5B2_compare_t comp, void *udata, H5B2_found_t op, - void *op_data) -{ - H5B2_leaf_t *leaf; /* Pointer to leaf node */ - unsigned idx; /* Location of record which matches key */ - int cmp = 0; /* Comparison value of records */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check arguments. */ - HDassert(hdr); - HDassert(curr_node_ptr); - HDassert(H5F_addr_defined(curr_node_ptr->addr)); - HDassert(op); - - /* Lock current B-tree node */ - if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") - - /* Locate node pointer for child */ - if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx, &cmp) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") - if(cmp > 0) - idx++; - else - if(cmp == 0 && comp == H5B2_COMPARE_GREATER) - idx++; - - /* Set the neighbor location, if appropriate */ - if(comp == H5B2_COMPARE_LESS) { - if(idx > 0) - neighbor_loc = H5B2_LEAF_NREC(leaf, hdr, idx - 1); - } /* end if */ - else { - HDassert(comp == H5B2_COMPARE_GREATER); - - if(idx < leaf->nrec) - neighbor_loc = H5B2_LEAF_NREC(leaf, hdr, idx); - } /* end else */ - - /* Make callback if neighbor record has been found */ - if(neighbor_loc) { - /* Make callback for current record */ - if((op)(neighbor_loc, op_data) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "'found' callback failed for B-tree neighbor operation") - } /* end if */ - else - HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree") - -done: - /* Release the B-tree internal node */ - if(leaf && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr->addr, leaf, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node") - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__neighbor_leaf() */ - - -/*------------------------------------------------------------------------- - * Function: H5B2__neighbor_internal - * - * Purpose: Locate a record relative to the specified information in a - * B-tree internal node and return that information by filling in - * fields of the - * caller-supplied UDATA pointer depending on the type of leaf node - * requested. The UDATA can point to additional data passed - * to the key comparison function. - * - * The 'OP' routine is called with the record found and the - * OP_DATA pointer, to allow caller to return information about - * the record. - * - * The RANGE indicates whether to search for records less than or - * equal to, or greater than or equal to the information passed - * in with UDATA. - * - * Return: Non-negative on success, negative on failure. - * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Mar 9 2005 - * - *------------------------------------------------------------------------- - */ -herr_t -H5B2__neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - H5B2_node_ptr_t *curr_node_ptr, void *neighbor_loc, H5B2_compare_t comp, - void *udata, H5B2_found_t op, void *op_data) -{ - H5B2_internal_t *internal; /* Pointer to internal node */ - unsigned idx; /* Location of record which matches key */ - int cmp = 0; /* Comparison value of records */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Check arguments. */ - HDassert(hdr); - HDassert(depth > 0); - HDassert(curr_node_ptr); - HDassert(H5F_addr_defined(curr_node_ptr->addr)); - HDassert(op); - - /* Lock current B-tree node */ - if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") - - /* Locate node pointer for child */ - if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, - udata, &idx, &cmp) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") - if(cmp > 0) - idx++; - - /* Set the neighbor location, if appropriate */ - if(comp == H5B2_COMPARE_LESS) { - if(idx > 0) - neighbor_loc = H5B2_INT_NREC(internal, hdr, idx - 1); - } /* end if */ - else { - HDassert(comp == H5B2_COMPARE_GREATER); - - if(idx < internal->nrec) - neighbor_loc = H5B2_INT_NREC(internal, hdr, idx); - } /* end else */ - - /* Attempt to find neighboring record */ - if(depth > 1) { - if(H5B2__neighbor_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &internal->node_ptrs[idx], neighbor_loc, comp, udata, op, op_data) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree internal node") - } /* end if */ - else { - if(H5B2__neighbor_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], neighbor_loc, comp, udata, op, op_data) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree leaf node") - } /* end else */ - -done: - /* Release the B-tree internal node */ - if(internal && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr->addr, internal, H5AC__NO_FLAGS_SET) < 0) - HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") - - FUNC_LEAVE_NOAPI(ret_value) -} /* H5B2__neighbor_internal() */ - - -/*------------------------------------------------------------------------- * Function: H5B2__delete_node * * Purpose: Iterate over all the nodes in a B-tree node deleting them @@ -3392,7 +1624,8 @@ done: */ herr_t H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - const H5B2_node_ptr_t *curr_node, H5B2_remove_t op, void *op_data) + const H5B2_node_ptr_t *curr_node, void *parent, H5B2_remove_t op, + void *op_data) { const H5AC_class_t *curr_node_class = NULL; /* Pointer to current node's class info */ void *node = NULL; /* Pointers to current node */ @@ -3410,7 +1643,7 @@ H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, unsigned u; /* Local index */ /* Lock the current B-tree node */ - if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC__NO_FLAGS_SET))) + if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, parent, (H5B2_node_ptr_t *)curr_node, depth, FALSE, H5AC__NO_FLAGS_SET))) /* Casting away const OK -QAK */ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* Set up information about current node */ @@ -3420,14 +1653,14 @@ H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Descend into children */ for(u = 0; u < internal->nrec + (unsigned)1; u++) - if(H5B2__delete_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[u]), op, op_data) < 0) + if(H5B2__delete_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[u]), internal, op, op_data) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "node descent failed") } /* end if */ else { H5B2_leaf_t *leaf; /* Pointer to leaf node */ /* Lock the current B-tree node */ - if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC__NO_FLAGS_SET))) + if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, parent, (H5B2_node_ptr_t *)curr_node, FALSE, H5AC__NO_FLAGS_SET))) /* Casting away const OK -QAK */ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") /* Set up information about current node */ @@ -3450,7 +1683,7 @@ H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, done: /* Unlock & delete current node */ - if(node && H5AC_unprotect(hdr->f, dxpl_id, curr_node_class, curr_node->addr, node, H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG) < 0) + if(node && H5AC_unprotect(hdr->f, dxpl_id, curr_node_class, curr_node->addr, node, (unsigned)(H5AC__DELETED_FLAG | (hdr->swmr_write ? 0 : H5AC__FREE_FILE_SPACE_FLAG))) < 0) HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") FUNC_LEAVE_NOAPI(ret_value) @@ -3472,7 +1705,7 @@ done: */ herr_t H5B2__node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - const H5B2_node_ptr_t *curr_node, hsize_t *btree_size) + const H5B2_node_ptr_t *curr_node, void *parent, hsize_t *btree_size) { H5B2_internal_t *internal = NULL; /* Pointer to internal node */ herr_t ret_value = SUCCEED; /* Iterator return value */ @@ -3486,7 +1719,7 @@ H5B2__node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, HDassert(depth > 0); /* Lock the current B-tree node */ - if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC__READ_ONLY_FLAG))) + if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, parent, (H5B2_node_ptr_t *)curr_node, depth, FALSE, H5AC__READ_ONLY_FLAG))) /* Casting away const OK -QAK */ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") /* Recursively descend into child nodes, if we are above the "twig" level in the B-tree */ @@ -3495,7 +1728,7 @@ H5B2__node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, /* Descend into children */ for(u = 0; u < internal->nrec + (unsigned)1; u++) - if(H5B2__node_size(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[u]), btree_size) < 0) + if(H5B2__node_size(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[u]), internal, btree_size) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "node iteration failed") } /* end if */ else /* depth is 1: count all the leaf nodes from this node */ @@ -3513,219 +1746,205 @@ done: /*------------------------------------------------------------------------- - * Function: H5B2__internal_free + * Function: H5B2__create_flush_depend * - * Purpose: Destroys a B-tree internal node in memory. + * Purpose: Create a flush dependency between two data structure components * - * Return: Non-negative on success/Negative on failure + * Return: SUCCEED/FAIL * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Feb 2 2005 + * Programmer: Dana Robinson + * Fall 2012 * *------------------------------------------------------------------------- */ herr_t -H5B2__internal_free(H5B2_internal_t *internal) +H5B2__create_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry) { herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_PACKAGE - - /* - * Check arguments. - */ - HDassert(internal); - - /* Release internal node's native key buffer */ - if(internal->int_native) - internal->int_native = (uint8_t *)H5FL_FAC_FREE(internal->hdr->node_info[internal->depth].nat_rec_fac, internal->int_native); - - /* Release internal node's node pointer buffer */ - if(internal->node_ptrs) - internal->node_ptrs = (H5B2_node_ptr_t *)H5FL_FAC_FREE(internal->hdr->node_info[internal->depth].node_ptr_fac, internal->node_ptrs); - - /* Decrement ref. count on B-tree header */ - if(H5B2__hdr_decr(internal->hdr) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTDEC, FAIL, "can't decrement ref. count on B-tree header") + FUNC_ENTER_NOAPI_NOINIT + + /* Sanity check */ + HDassert(parent_entry); + HDassert(child_entry); - /* Free B-tree internal node info */ - internal = H5FL_FREE(H5B2_internal_t, internal); + /* Create a flush dependency between parent and child entry */ + if(H5AC_create_flush_dependency(parent_entry, child_entry) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency") done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5B2__internal_free() */ +} /* end H5B2__create_flush_depend() */ /*------------------------------------------------------------------------- - * Function: H5B2__leaf_free + * Function: H5B2__update_flush_depend * - * Purpose: Destroys a B-tree leaf node in memory. + * Purpose: Update flush dependencies for children of a node * - * Return: Non-negative on success/Negative on failure + * Return: SUCCEED/FAIL * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Feb 2 2005 + * Programmer: Quincey Koziol + * koziol@lbl.gov + * Dec 1 2016 * *------------------------------------------------------------------------- */ herr_t -H5B2__leaf_free(H5B2_leaf_t *leaf) +H5B2__update_flush_depend(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, + const H5B2_node_ptr_t *node_ptr, void *old_parent, void *new_parent) { + const H5AC_class_t *child_class; /* Pointer to child node's class info */ + void *child = NULL; /* Pointer to child node */ + unsigned node_status = 0; /* Node's status in the metadata cache */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE + + /* Sanity checks */ + HDassert(hdr); + HDassert(depth > 0); + HDassert(node_ptr); + HDassert(old_parent); + HDassert(new_parent); - /* - * Check arguments. - */ - HDassert(leaf); + /* Check the node's entry status in the metadata cache */ + if(H5AC_get_entry_status(hdr->f, node_ptr->addr, &node_status) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "unable to check status of B-tree node") - /* Release leaf's native key buffer */ - if(leaf->leaf_native) - leaf->leaf_native = (uint8_t *)H5FL_FAC_FREE(leaf->hdr->node_info[0].nat_rec_fac, leaf->leaf_native); + /* If the node is in the cache, check for retargeting its parent */ + if(node_status & H5AC_ES__IN_CACHE) { + void **parent_ptr; /* Pointer to child node's parent */ + hbool_t update_deps = FALSE; /* Whether to update flush dependencies */ - /* Decrement ref. count on B-tree header */ - if(H5B2__hdr_decr(leaf->hdr) < 0) - HGOTO_ERROR(H5E_BTREE, H5E_CANTDEC, FAIL, "can't decrement ref. count on B-tree header") + /* Get child node pointer */ + if(depth > 1) { + H5B2_internal_t *child_int; - /* Free B-tree leaf node info */ - leaf = H5FL_FREE(H5B2_leaf_t, leaf); + /* Protect child */ + if(NULL == (child_int = H5B2__protect_internal(hdr, dxpl_id, new_parent, (H5B2_node_ptr_t *)node_ptr, (uint16_t)(depth - 1), FALSE, H5AC__NO_FLAGS_SET))) /* Casting away const OK -QAK */ + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") + child_class = H5AC_BT2_INT; + child = child_int; -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5B2__leaf_free() */ + if(child_int->parent == old_parent) { + parent_ptr = &child_int->parent; + update_deps = TRUE; + } /* end if */ + else + HDassert(child_int->parent == new_parent); + } /* end if */ + else { + H5B2_leaf_t *child_leaf; -#ifdef H5B2_DEBUG - -/*------------------------------------------------------------------------- - * Function: H5B2__assert_leaf - * - * Purpose: Verify than a leaf node is mostly sane - * - * Return: Non-negative on success, negative on failure - * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Feb 19 2005 - * - *------------------------------------------------------------------------- - */ -H5_ATTR_PURE static herr_t -H5B2__assert_leaf(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf) -{ - /* General sanity checking on node */ - HDassert(leaf->nrec <= hdr->node_info->split_nrec); + /* Protect child */ + if(NULL == (child_leaf = H5B2__protect_leaf(hdr, dxpl_id, new_parent, (H5B2_node_ptr_t *)node_ptr, FALSE, H5AC__NO_FLAGS_SET))) /* Casting away const OK -QAK */ + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + child_class = H5AC_BT2_LEAF; + child = child_leaf; - return(0); -} /* end H5B2__assert_leaf() */ + if(child_leaf->parent == old_parent) { + parent_ptr = &child_leaf->parent; + update_deps = TRUE; + } /* end if */ + else + HDassert(child_leaf->parent == new_parent); + } /* end else */ - -/*------------------------------------------------------------------------- - * Function: H5B2__assert_leaf2 - * - * Purpose: Verify than a leaf node is mostly sane - * - * Return: Non-negative on success, negative on failure - * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Feb 19 2005 - * - *------------------------------------------------------------------------- - */ -H5_ATTR_PURE static herr_t -H5B2__assert_leaf2(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf, const H5B2_leaf_t H5_ATTR_UNUSED *leaf2) -{ - /* General sanity checking on node */ - HDassert(leaf->nrec <= hdr->node_info->split_nrec); + /* Update flush dependencies if necessary */ + if(update_deps) { + /* Sanity check */ + HDassert(parent_ptr); + + /* Switch the flush dependency for the node */ + if(H5B2__destroy_flush_depend((H5AC_info_t *)old_parent, (H5AC_info_t *)child) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency") + *parent_ptr = new_parent; + if(H5B2__create_flush_depend((H5AC_info_t *)new_parent, (H5AC_info_t *)child) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency") + } /* end if */ + } /* end if */ - return(0); -} /* end H5B2__assert_leaf2() */ +done: + /* Unprotect the child */ + if(child) + if(H5AC_unprotect(hdr->f, dxpl_id, child_class, node_ptr->addr, child, H5AC__NO_FLAGS_SET) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B2__update_flush_depend() */ /*------------------------------------------------------------------------- - * Function: H5B2__assert_internal + * Function: H5B2__update_child_flush_depends * - * Purpose: Verify than an internal node is mostly sane + * Purpose: Update flush dependencies for children of a node * - * Return: Non-negative on success, negative on failure + * Return: SUCCEED/FAIL * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Feb 19 2005 + * Programmer: Quincey Koziol + * koziol@lbl.gov + * Dec 1 2016 * *------------------------------------------------------------------------- */ -H5_ATTR_PURE static herr_t -H5B2__assert_internal(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal) +static herr_t +H5B2__update_child_flush_depends(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth, + const H5B2_node_ptr_t *node_ptrs, unsigned start_idx, unsigned end_idx, + void *old_parent, void *new_parent) { - hsize_t tot_all_nrec; /* Total number of records at or below this node */ - uint16_t u, v; /* Local index variables */ - - /* General sanity checking on node */ - HDassert(internal->nrec <= hdr->node_info->split_nrec); - - /* Sanity checking on node pointers */ - tot_all_nrec = internal->nrec; - for(u = 0; u < internal->nrec + 1; u++) { - tot_all_nrec += internal->node_ptrs[u].all_nrec; - - HDassert(H5F_addr_defined(internal->node_ptrs[u].addr)); - HDassert(internal->node_ptrs[u].addr > 0); - for(v = 0; v < u; v++) - HDassert(internal->node_ptrs[u].addr != internal->node_ptrs[v].addr); - } /* end for */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ - /* Sanity check all_nrec total in parent */ - if(parent_all_nrec > 0) - HDassert(tot_all_nrec == parent_all_nrec); + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(hdr); + HDassert(depth > 1); + HDassert(node_ptrs); + HDassert(start_idx <= end_idx); + HDassert(old_parent); + HDassert(new_parent); + + /* Loop over children */ + for(u = start_idx; u < end_idx; u++) + /* Update parent for children */ + if(H5B2__update_flush_depend(hdr, dxpl_id, depth - 1, &node_ptrs[u], old_parent, new_parent) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update child node to new parent") - return(0); -} /* end H5B2__assert_internal() */ +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B2__update_child_flush_depends() */ /*------------------------------------------------------------------------- - * Function: H5B2__assert_internal2 + * Function: H5B2__destroy_flush_depend * - * Purpose: Verify than internal nodes are mostly sane + * Purpose: Destroy a flush dependency between two data structure components * - * Return: Non-negative on success, negative on failure + * Return: SUCCEED/FAIL * - * Programmer: Quincey Koziol - * koziol@ncsa.uiuc.edu - * Feb 19 2005 + * Programmer: Dana Robinson + * Fall 2012 * *------------------------------------------------------------------------- */ -H5_ATTR_PURE static herr_t -H5B2__assert_internal2(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal, const H5B2_internal_t *internal2) +herr_t +H5B2__destroy_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry) { - hsize_t tot_all_nrec; /* Total number of records at or below this node */ - uint16_t u, v; /* Local index variables */ - - /* General sanity checking on node */ - HDassert(internal->nrec <= hdr->node_info->split_nrec); - - /* Sanity checking on node pointers */ - tot_all_nrec =internal->nrec; - for(u =0; u < internal->nrec + 1; u++) { - tot_all_nrec += internal->node_ptrs[u].all_nrec; - - HDassert(H5F_addr_defined(internal->node_ptrs[u].addr)); - HDassert(internal->node_ptrs[u].addr > 0); - for(v = 0; v < u; v++) - HDassert(internal->node_ptrs[u].addr != internal->node_ptrs[v].addr); - for(v = 0; v < internal2->nrec + 1; v++) - HDassert(internal->node_ptrs[u].addr != internal2->node_ptrs[v].addr); - } /* end for */ + herr_t ret_value = SUCCEED; /* Return value */ - /* Sanity check all_nrec total in parent */ - if(parent_all_nrec > 0) - HDassert(tot_all_nrec == parent_all_nrec); + FUNC_ENTER_NOAPI_NOINIT + + /* Sanity check */ + HDassert(parent_entry); + HDassert(child_entry); - return(0); -} /* end H5B2__assert_internal2() */ -#endif /* H5B2_DEBUG */ + /* Destroy a flush dependency between parent and child entry */ + if(H5AC_destroy_flush_dependency(parent_entry, child_entry) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B2__destroy_flush_depend() */ diff --git a/src/H5B2internal.c b/src/H5B2internal.c new file mode 100644 index 0000000..e74ae59 --- /dev/null +++ b/src/H5B2internal.c @@ -0,0 +1,1443 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*------------------------------------------------------------------------- + * + * Created: H5B2internal.c + * Dec 01 2016 + * Quincey Koziol <koziol@lbl.gov> + * + * Purpose: Routines for managing v2 B-tree internal ndoes. + * + *------------------------------------------------------------------------- + */ + +/****************/ +/* Module Setup */ +/****************/ + +#include "H5B2module.h" /* This source code file is part of the H5B2 module */ + + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5B2pkg.h" /* v2 B-trees */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5MFprivate.h" /* File memory management */ + + +/****************/ +/* Local Macros */ +/****************/ + + +/******************/ +/* Local Typedefs */ +/******************/ + + +/********************/ +/* Package Typedefs */ +/********************/ + + +/********************/ +/* Local Prototypes */ +/********************/ +static herr_t H5B2__shadow_internal(H5B2_internal_t *internal, hid_t dxpl_id, + H5B2_node_ptr_t *curr_node_ptr); + + +/*********************/ +/* Package Variables */ +/*********************/ + +/* Declare a free list to manage the H5B2_internal_t struct */ +H5FL_DEFINE(H5B2_internal_t); + + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + + +/*******************/ +/* Local Variables */ +/*******************/ + + + +/*------------------------------------------------------------------------- + * Function: H5B2__create_internal + * + * Purpose: Creates empty internal node of a B-tree and update node pointer + * to point to it. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Feb 3 2005 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, void *parent, + H5B2_node_ptr_t *node_ptr, uint16_t depth) +{ + H5B2_internal_t *internal = NULL; /* Pointer to new internal node created */ + hbool_t inserted = FALSE; /* Whether the internal node was inserted into cache */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(node_ptr); + HDassert(depth > 0); + + /* Allocate memory for internal node information */ + if(NULL == (internal = H5FL_CALLOC(H5B2_internal_t))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree internal info") + + /* Increment ref. count on B-tree header */ + if(H5B2__hdr_incr(hdr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, FAIL, "can't increment ref. count on B-tree header") + + /* Share B-tree header information */ + internal->hdr = hdr; + + /* Allocate space for the native keys in memory */ + if(NULL == (internal->int_native = (uint8_t *)H5FL_FAC_MALLOC(hdr->node_info[depth].nat_rec_fac))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree internal native keys") + HDmemset(internal->int_native, 0, hdr->cls->nrec_size * hdr->node_info[depth].max_nrec); + + /* Allocate space for the node pointers in memory */ + if(NULL == (internal->node_ptrs = (H5B2_node_ptr_t *)H5FL_FAC_MALLOC(hdr->node_info[depth].node_ptr_fac))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree internal node pointers") + HDmemset(internal->node_ptrs, 0, sizeof(H5B2_node_ptr_t) * (hdr->node_info[depth].max_nrec + 1)); + + /* Set depth of the node */ + internal->depth = depth; + + /* Set parent */ + internal->parent = parent; + + /* Set shadowed epoch to header's epoch */ + internal->shadow_epoch = hdr->shadow_epoch; + + /* Allocate space on disk for the internal node */ + if(HADDR_UNDEF == (node_ptr->addr = H5MF_alloc(hdr->f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)hdr->node_size))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "file allocation failed for B-tree internal node") + + /* Cache the new B-tree node */ + if(H5AC_insert_entry(hdr->f, dxpl_id, H5AC_BT2_INT, node_ptr->addr, internal, H5AC__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "can't add B-tree internal node to cache") + inserted = TRUE; + + /* Add internal node as child of 'top' proxy */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, internal) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSET, FAIL, "unable to add v2 B-tree node as child of proxy") + internal->top_proxy = hdr->top_proxy; + } /* end if */ + +done: + if(ret_value < 0) { + if(internal) { + /* Remove from cache, if inserted */ + if(inserted) + if(H5AC_remove_entry(internal) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTREMOVE, FAIL, "unable to remove v2 B-tree internal node from cache") + + /* Release internal node's disk space */ + if(H5F_addr_defined(node_ptr->addr) && H5MF_xfree(hdr->f, H5FD_MEM_BTREE, dxpl_id, node_ptr->addr, (hsize_t)hdr->node_size) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release file space for v2 B-tree internal node") + + /* Destroy internal node */ + if(H5B2__internal_free(internal) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree internal node") + } /* end if */ + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__create_internal() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__protect_internal + * + * Purpose: "Protect" an internal node in the metadata cache + * + * Return: Pointer to internal node on success/NULL on failure + * + * Programmer: Quincey Koziol + * koziol@hdfgroup.org + * Aug 25 2006 + * + *------------------------------------------------------------------------- + */ +H5B2_internal_t * +H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, void *parent, + H5B2_node_ptr_t *node_ptr, uint16_t depth, hbool_t shadow, unsigned flags) +{ + H5B2_internal_cache_ud_t udata; /* User data to pass through to cache 'deserialize' callback */ + H5B2_internal_t *internal = NULL; /* v2 B-tree internal node */ + H5B2_internal_t *ret_value = NULL; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(node_ptr); + HDassert(H5F_addr_defined(node_ptr->addr)); + HDassert(depth > 0); + + /* only H5AC__READ_ONLY_FLAG may appear in flags */ + HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0); + + /* Set up user data for callback */ + udata.f = hdr->f; + udata.hdr = hdr; + udata.parent = parent; + udata.nrec = node_ptr->node_nrec; + udata.depth = depth; + + /* Protect the internal node */ + if(NULL == (internal = (H5B2_internal_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_INT, node_ptr->addr, &udata, flags))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to protect B-tree internal node") + + /* Create top proxy, if it doesn't exist */ + if(hdr->top_proxy && NULL == internal->top_proxy) { + /* Add internal node as child of 'top' proxy */ + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, internal) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSET, NULL, "unable to add v2 B-tree internal node as child of proxy") + internal->top_proxy = hdr->top_proxy; + } /* end if */ + + /* Shadow the node, if requested */ + if(shadow) + if(H5B2__shadow_internal(internal, dxpl_id, node_ptr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, NULL, "unable to shadow internal node") + + /* Set return value */ + ret_value = internal; + +done: + /* Clean up on error */ + if(!ret_value) { + /* Release the internal node, if it was protected */ + if(internal) { + /* Remove from v2 B-tree's proxy, if added */ + if(internal->top_proxy) { + if(H5AC_proxy_entry_remove_child(internal->top_proxy, internal) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, NULL, "unable to destroy flush dependency between internal node and v2 B-tree 'top' proxy") + internal->top_proxy = NULL; + } /* end if */ + + /* Unprotect internal node */ + if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, node_ptr->addr, internal, H5AC__NO_FLAGS_SET) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, NULL, "unable to unprotect v2 B-tree internal node, address = %llu", (unsigned long long)node_ptr->addr) + } /* end if */ + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__protect_internal() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__neighbor_internal + * + * Purpose: Locate a record relative to the specified information in a + * B-tree internal node and return that information by filling in + * fields of the + * caller-supplied UDATA pointer depending on the type of leaf node + * requested. The UDATA can point to additional data passed + * to the key comparison function. + * + * The 'OP' routine is called with the record found and the + * OP_DATA pointer, to allow caller to return information about + * the record. + * + * The RANGE indicates whether to search for records less than or + * equal to, or greater than or equal to the information passed + * in with UDATA. + * + * Return: Non-negative on success, negative on failure. + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Mar 9 2005 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, + H5B2_node_ptr_t *curr_node_ptr, void *neighbor_loc, H5B2_compare_t comp, + void *parent, void *udata, H5B2_found_t op, void *op_data) +{ + H5B2_internal_t *internal; /* Pointer to internal node */ + unsigned idx = 0; /* Location of record which matches key */ + int cmp = 0; /* Comparison value of records */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(depth > 0); + HDassert(curr_node_ptr); + HDassert(H5F_addr_defined(curr_node_ptr->addr)); + HDassert(op); + + /* Lock current B-tree node */ + if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, parent, curr_node_ptr, depth, FALSE, H5AC__READ_ONLY_FLAG))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") + + /* Locate node pointer for child */ + if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx, &cmp) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") + if(cmp > 0) + idx++; + + /* Set the neighbor location, if appropriate */ + if(comp == H5B2_COMPARE_LESS) { + if(idx > 0) + neighbor_loc = H5B2_INT_NREC(internal, hdr, idx - 1); + } /* end if */ + else { + HDassert(comp == H5B2_COMPARE_GREATER); + + if(idx < internal->nrec) + neighbor_loc = H5B2_INT_NREC(internal, hdr, idx); + } /* end else */ + + /* Attempt to find neighboring record */ + if(depth > 1) { + if(H5B2__neighbor_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &internal->node_ptrs[idx], neighbor_loc, comp, internal, udata, op, op_data) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree internal node") + } /* end if */ + else { + if(H5B2__neighbor_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], neighbor_loc, comp, internal, udata, op, op_data) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree leaf node") + } /* end else */ + +done: + /* Release the B-tree internal node */ + if(internal && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr->addr, internal, H5AC__NO_FLAGS_SET) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__neighbor_internal() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__insert_internal + * + * Purpose: Adds a new record to a B-tree node. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Mar 2 2005 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, + unsigned *parent_cache_info_flags_ptr, H5B2_node_ptr_t *curr_node_ptr, + H5B2_nodepos_t curr_pos, void *parent, void *udata) +{ + H5B2_internal_t *internal = NULL; /* Pointer to internal node */ + unsigned internal_flags = H5AC__NO_FLAGS_SET; + unsigned idx = 0; /* Location of record which matches key */ + H5B2_nodepos_t next_pos = H5B2_POS_MIDDLE; /* Position of node */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(depth > 0); + HDassert(curr_node_ptr); + HDassert(H5F_addr_defined(curr_node_ptr->addr)); + + /* Lock current B-tree node */ + if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, parent, curr_node_ptr, depth, FALSE, H5AC__NO_FLAGS_SET))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") + + /* Sanity check number of records */ + HDassert(internal->nrec == curr_node_ptr->node_nrec); + + /* Split or redistribute child node pointers, if necessary */ + { + int cmp; /* Comparison value of records */ + unsigned retries; /* Number of times to attempt redistribution */ + size_t split_nrec; /* Number of records to split node at */ + + /* Locate node pointer for child */ + if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, + udata, &idx, &cmp) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") + if(cmp == 0) + HGOTO_ERROR(H5E_BTREE, H5E_EXISTS, FAIL, "record is already in B-tree") + if(cmp > 0) + idx++; + + /* Set the number of redistribution retries */ + /* This takes care of the case where a B-tree node needs to be + * redistributed, but redistributing the node causes the index + * for insertion to move to another node, which also needs to be + * redistributed. Now, we loop trying to redistribute and then + * eventually force a split */ + retries = 2; + + /* Determine the correct number of records to split child node at */ + split_nrec = hdr->node_info[depth - 1].split_nrec; + + /* Preemptively split/redistribute a node we will enter */ + while(internal->node_ptrs[idx].node_nrec == split_nrec) { + /* Attempt to redistribute records among children */ + if(idx == 0) { /* Left-most child */ + if(retries > 0 && (internal->node_ptrs[idx + 1].node_nrec < split_nrec)) { + if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, idx) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") + } /* end if */ + else { + if(H5B2__split1(hdr, dxpl_id, depth, curr_node_ptr, + parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to split child node") + } /* end else */ + } /* end if */ + else if(idx == internal->nrec) { /* Right-most child */ + if(retries > 0 && (internal->node_ptrs[idx - 1].node_nrec < split_nrec)) { + if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, (idx - 1)) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") + } /* end if */ + else { + if(H5B2__split1(hdr, dxpl_id, depth, curr_node_ptr, + parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to split child node") + } /* end else */ + } /* end if */ + else { /* Middle child */ + if(retries > 0 && ((internal->node_ptrs[idx + 1].node_nrec < split_nrec) || + (internal->node_ptrs[idx - 1].node_nrec < split_nrec))) { + if(H5B2__redistribute3(hdr, dxpl_id, depth, internal, &internal_flags, idx) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") + } /* end if */ + else { + if(H5B2__split1(hdr, dxpl_id, depth, curr_node_ptr, + parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to split child node") + } /* end else */ + } /* end else */ + + /* Locate node pointer for child (after split/redistribute) */ + /* Actually, this can be easily updated (for 2-node redistrib.) and shouldn't require re-searching */ + if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, + udata, &idx, &cmp) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") + if(cmp == 0) + HGOTO_ERROR(H5E_BTREE, H5E_EXISTS, FAIL, "record is already in B-tree") + if(cmp > 0) + idx++; + + /* Decrement the number of redistribution retries left */ + retries--; + } /* end while */ + } /* end block */ + + /* Check if this node is left/right-most */ + if(H5B2_POS_MIDDLE != curr_pos) { + if(idx == 0) { + if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) + next_pos = H5B2_POS_LEFT; + } /* end if */ + else if(idx == internal->nrec) { + if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) + next_pos = H5B2_POS_RIGHT; + } /* end else */ + } /* end if */ + + /* Attempt to insert node */ + if(depth > 1) { + if(H5B2__insert_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &internal_flags, &internal->node_ptrs[idx], next_pos, internal, udata) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree internal node") + } /* end if */ + else { + if(H5B2__insert_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], next_pos, internal, udata) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree leaf node") + } /* end else */ + + /* Update record count for node pointer to current node */ + curr_node_ptr->all_nrec++; + + /* Mark node as dirty */ + internal_flags |= H5AC__DIRTIED_FLAG; + +done: + /* Release the B-tree internal node */ + if(internal) { + /* Shadow the node if doing SWMR writes */ + if(hdr->swmr_write && (internal_flags & H5AC__DIRTIED_FLAG)) + if(H5B2__shadow_internal(internal, dxpl_id, curr_node_ptr) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal B-tree node") + + /* Unprotect node */ + if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr->addr, internal, internal_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__insert_internal() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__update_internal + * + * Purpose: Insert or modify a record in a B-tree internal node. + * If the record exists already, it is modified as if H5B2_modify + * was called). If it doesn't exist, it is inserted as if + * H5B2_insert was called. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * koziol@hdfgroup.org + * Dec 24 2015 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__update_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, + unsigned *parent_cache_info_flags_ptr, H5B2_node_ptr_t *curr_node_ptr, + H5B2_update_status_t *status, H5B2_nodepos_t curr_pos, void *parent, + void *udata, H5B2_modify_t op, void *op_data) +{ + H5B2_internal_t *internal = NULL; /* Pointer to internal node */ + unsigned internal_flags = H5AC__NO_FLAGS_SET; + int cmp; /* Comparison value of records */ + unsigned idx = 0; /* Location of record which matches key */ + H5B2_nodepos_t next_pos = H5B2_POS_MIDDLE; /* Position of node */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(depth > 0); + HDassert(curr_node_ptr); + HDassert(H5F_addr_defined(curr_node_ptr->addr)); + + /* Lock current B-tree node */ + if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, parent, curr_node_ptr, depth, FALSE, H5AC__NO_FLAGS_SET))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") + + /* Sanity check number of records */ + HDassert(internal->nrec == curr_node_ptr->node_nrec); + + /* Locate node pointer for child */ + if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx, &cmp) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") + + /* Check for modifying existing record */ + if(0 == cmp) { + hbool_t changed = FALSE; /* Whether the 'modify' callback changed the record */ + + /* Make callback for current record */ + if((op)(H5B2_INT_NREC(internal, hdr, idx), op_data, &changed) < 0) { + /* Make certain that the callback didn't modify the value if it failed */ + HDassert(changed == FALSE); + + HGOTO_ERROR(H5E_BTREE, H5E_CANTMODIFY, FAIL, "'modify' callback failed for B-tree update operation") + } /* end if */ + + /* Mark the node as dirty if it changed */ + internal_flags |= (changed ? H5AC__DIRTIED_FLAG : 0); + + /* Indicate that the record was modified */ + *status = H5B2_UPDATE_MODIFY_DONE; + } /* end if */ + else { + /* Adjust index to leave room for node to insert */ + if(cmp > 0) + idx++; + + /* Check if this node is left/right-most */ + if(H5B2_POS_MIDDLE != curr_pos) { + if(idx == 0) { + if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) + next_pos = H5B2_POS_LEFT; + } /* end if */ + else if(idx == internal->nrec) { + if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) + next_pos = H5B2_POS_RIGHT; + } /* end else */ + } /* end if */ + + /* Attempt to update record in child */ + if(depth > 1) { + if(H5B2__update_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &internal_flags, &internal->node_ptrs[idx], status, next_pos, internal, udata, op, op_data) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update record in internal B-tree node") + } /* end if */ + else { + if(H5B2__update_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], status, next_pos, internal, udata, op, op_data) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update record in leaf B-tree node") + } /* end else */ + + /* Take actions based on child's status report */ + switch(*status) { + case H5B2_UPDATE_MODIFY_DONE: + /* No action */ + break; + + case H5B2_UPDATE_SHADOW_DONE: + /* If child node was shadowed (if SWMR is enabled), mark this node dirty */ + if(hdr->swmr_write) + internal_flags |= H5AC__DIRTIED_FLAG; + + /* No further modifications up the tree are necessary though, so downgrade to merely "modified" */ + *status = H5B2_UPDATE_MODIFY_DONE; + break; + + case H5B2_UPDATE_INSERT_DONE: + /* Mark node as dirty */ + internal_flags |= H5AC__DIRTIED_FLAG; + + /* Update total record count for node pointer to current node */ + curr_node_ptr->all_nrec++; + break; + + case H5B2_UPDATE_INSERT_CHILD_FULL: + /* Split/redistribute this node */ + if(internal->nrec == hdr->node_info[depth].split_nrec) { + hbool_t could_split = FALSE; /* Whether the child node could split */ + + if(idx == 0) { /* Left-most child */ + /* Check for left-most child and its neighbor being close to full */ + if((internal->node_ptrs[idx].node_nrec + internal->node_ptrs[idx + 1].node_nrec) + >= ((hdr->node_info[depth - 1].split_nrec * 2) - 1)) + could_split = TRUE; + } /* end if */ + else if(idx == internal->nrec) { /* Right-most child */ + /* Check for right-most child and its neighbor being close to full */ + if((internal->node_ptrs[idx - 1].node_nrec + internal->node_ptrs[idx].node_nrec) + >= ((hdr->node_info[depth - 1].split_nrec * 2) - 1)) + could_split = TRUE; + } /* end else-if */ + else { /* Middle child */ + /* Check for middle child and its left neighbor being close to full */ + if((internal->node_ptrs[idx - 1].node_nrec + internal->node_ptrs[idx].node_nrec) + >= ((hdr->node_info[depth - 1].split_nrec * 2) - 1)) + could_split = TRUE; + /* Check for middle child and its right neighbor being close to full */ + else if((internal->node_ptrs[idx].node_nrec + internal->node_ptrs[idx + 1].node_nrec) + >= ((hdr->node_info[depth - 1].split_nrec * 2) - 1)) + could_split = TRUE; + } /* end if */ + + /* If this node is full and the child node insertion could + * cause a split, punt back up to caller, leaving the + * "insert child full" status. + */ + if(could_split) { + /* Release the internal B-tree node */ + if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr->addr, internal, internal_flags) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") + internal = NULL; + + /* Punt back to caller */ + HGOTO_DONE(SUCCEED); + } /* end if */ + } /* end if */ + + /* Release the internal B-tree node */ + if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr->addr, internal, internal_flags) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") + internal = NULL; + + /* Indicate that the record was inserted */ + *status = H5B2_UPDATE_INSERT_DONE; + + /* Dodge sideways into inserting a record into this node */ + if(H5B2__insert_internal(hdr, dxpl_id, depth, parent_cache_info_flags_ptr, curr_node_ptr, curr_pos, parent, udata) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into internal B-tree node") + break; + + case H5B2_UPDATE_UNKNOWN: + default: + HDassert(0 && "Invalid update status"); + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "invalid update status") + } /* end switch */ + } /* end else */ + +done: + /* Release the internal B-tree node */ + if(internal) { + /* Check if we should shadow this node */ + if(hdr->swmr_write && (internal_flags & H5AC__DIRTIED_FLAG)) { + /* Attempt to shadow the node if doing SWMR writes */ + if(H5B2__shadow_internal(internal, dxpl_id, curr_node_ptr) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal B-tree node") + + /* Change the state to "shadowed" if only modified currently */ + /* (Triggers parent to be marked dirty) */ + if(*status == H5B2_UPDATE_MODIFY_DONE) + *status = H5B2_UPDATE_SHADOW_DONE; + } /* end if */ + + /* Unprotect node */ + if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr->addr, internal, internal_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__update_internal() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__shadow_internal + * + * Purpose: "Shadow" an internal node - copy it to a new location, + * leaving the data in the old location intact (for now). + * This is done when writing in SWMR mode to ensure that + * readers do not see nodes that are out of date with + * respect to each other and thereby inconsistent. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * Apr 27 2012 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5B2__shadow_internal(H5B2_internal_t *internal, hid_t dxpl_id, + H5B2_node_ptr_t *curr_node_ptr) +{ + H5B2_hdr_t *hdr; /* B-tree header */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* + * Check arguments. + */ + HDassert(internal); + HDassert(curr_node_ptr); + HDassert(H5F_addr_defined(curr_node_ptr->addr)); + hdr = internal->hdr; + HDassert(hdr); + HDassert(hdr->swmr_write); + + /* We only need to shadow the node if it has not been shadowed since the + * last time the header was flushed, as otherwise it will be unreachable by + * the readers so there will be no need to shadow. To check if it has been + * shadowed, compare the epoch of this node and the header. If this node's + * epoch is <= to the header's, it hasn't been shadowed yet. */ + if(internal->shadow_epoch <= hdr->shadow_epoch) { + haddr_t new_node_addr; /* Address to move node to */ + + /* + * We must clone the old node so readers with an out-of-date version of + * the parent can still see the correct number of children, via the + * shadowed node. Remove it from cache but do not mark it free on disk. + */ + /* Allocate space for the cloned node */ + if(HADDR_UNDEF == (new_node_addr = H5MF_alloc(hdr->f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)hdr->node_size))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "unable to allocate file space to move B-tree node") + + /* Move the location of the node on the disk */ + if(H5AC_move_entry(hdr->f, H5AC_BT2_INT, curr_node_ptr->addr, new_node_addr, dxpl_id) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTMOVE, FAIL, "unable to move B-tree node") + curr_node_ptr->addr = new_node_addr; + + /* Should free the space in the file, but this is not supported by + * SWMR_WRITE code yet - QAK, 2016/12/01 + */ + + /* Set shadow epoch for node ahead of header */ + internal->shadow_epoch = hdr->shadow_epoch + 1; + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B2__shadow_internal() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__remove_internal + * + * Purpose: Removes a record from a B-tree node. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Mar 3 2005 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased, + void *swap_loc, void *swap_parent, uint16_t depth, H5AC_info_t *parent_cache_info, + unsigned *parent_cache_info_flags_ptr, H5B2_nodepos_t curr_pos, + H5B2_node_ptr_t *curr_node_ptr, void *udata, H5B2_remove_t op, void *op_data) +{ + H5AC_info_t *new_cache_info; /* Pointer to new cache info */ + unsigned *new_cache_info_flags_ptr = NULL; + H5B2_node_ptr_t *new_node_ptr; /* Pointer to new node pointer */ + H5B2_internal_t *internal; /* Pointer to internal node */ + H5B2_nodepos_t next_pos = H5B2_POS_MIDDLE; /* Position of next node */ + unsigned internal_flags = H5AC__NO_FLAGS_SET; + haddr_t internal_addr = HADDR_UNDEF; /* Address of internal node */ + size_t merge_nrec; /* Number of records to merge node at */ + hbool_t collapsed_root = FALSE; /* Whether the root was collapsed */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(depth > 0); + HDassert(parent_cache_info); + HDassert(curr_node_ptr); + HDassert(H5F_addr_defined(curr_node_ptr->addr)); + + /* Lock current B-tree node */ + if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, parent_cache_info, curr_node_ptr, depth, FALSE, H5AC__NO_FLAGS_SET))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") + internal_addr = curr_node_ptr->addr; + + /* Determine the correct number of records to merge at */ + merge_nrec = hdr->node_info[depth - 1].merge_nrec; + + /* Check for needing to collapse the root node */ + /* (The root node is the only internal node allowed to have 1 record) */ + if(internal->nrec == 1 && + ((internal->node_ptrs[0].node_nrec + internal->node_ptrs[1].node_nrec) <= ((merge_nrec * 2) + 1))) { + + /* Merge children of root node */ + if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr, + parent_cache_info_flags_ptr, internal, &internal_flags, 0) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") + + /* Let the cache know that the object is deleted */ + internal_flags |= H5AC__DELETED_FLAG; + if(!hdr->swmr_write) + internal_flags |= H5AC__FREE_FILE_SPACE_FLAG; + + /* Reset information in header's root node pointer */ + curr_node_ptr->addr = internal->node_ptrs[0].addr; + curr_node_ptr->node_nrec = internal->node_ptrs[0].node_nrec; + + /* Update flush dependency for child, if using SWMR */ + if(hdr->swmr_write) + if(H5B2__update_flush_depend(hdr, dxpl_id, depth, curr_node_ptr, internal, hdr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update child node to new parent") + + /* Indicate that the level of the B-tree decreased */ + *depth_decreased = TRUE; + + /* Set pointers for advancing to child node */ + new_cache_info = parent_cache_info; + new_cache_info_flags_ptr = parent_cache_info_flags_ptr; + new_node_ptr = curr_node_ptr; + + /* Set flag to indicate root was collapsed */ + collapsed_root = TRUE; + + /* Indicate position of next node */ + next_pos = H5B2_POS_ROOT; + } /* end if */ + /* Merge or redistribute child node pointers, if necessary */ + else { + unsigned idx = 0; /* Location of record which matches key */ + int cmp = 0; /* Comparison value of records */ + unsigned retries; /* Number of times to attempt redistribution */ + + /* Shadow the node if doing SWMR writes */ + if(hdr->swmr_write) { + if(H5B2__shadow_internal(internal, dxpl_id, curr_node_ptr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node") + internal_addr = curr_node_ptr->addr; + } /* end if */ + + /* Locate node pointer for child */ + if(swap_loc) + idx = 0; + else { + if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx, &cmp) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") + if(cmp >= 0) + idx++; + } /* end else */ + + /* Set the number of redistribution retries */ + /* This takes care of the case where a B-tree node needs to be + * redistributed, but redistributing the node causes the index + * for removal to move to another node, which also needs to be + * redistributed. Now, we loop trying to redistribute and then + * eventually force a merge */ + retries = 2; + + /* Preemptively merge/redistribute a node we will enter */ + while(internal->node_ptrs[idx].node_nrec == merge_nrec) { + /* Attempt to redistribute records among children */ + /* (NOTE: These 2-node redistributions should actually get the + * record to promote from the node with more records. - QAK) + */ + /* (NOTE: This code is the same in both H5B2__remove_internal() and + * H5B2__remove_internal_by_idx(), fix bugs in both places! - QAK) + */ + if(idx == 0) { /* Left-most child */ + if(retries > 0 && (internal->node_ptrs[idx + 1].node_nrec > merge_nrec)) { + if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, idx) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") + } /* end if */ + else { + if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr, + parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") + } /* end else */ + } /* end if */ + else if(idx == internal->nrec) { /* Right-most child */ + if(retries > 0 && (internal->node_ptrs[idx - 1].node_nrec > merge_nrec)) { + if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, (idx - 1)) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") + } /* end if */ + else { + if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr, + parent_cache_info_flags_ptr, internal, &internal_flags, (idx - 1)) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") + } /* end else */ + } /* end if */ + else { /* Middle child */ + if(retries > 0 && ((internal->node_ptrs[idx + 1].node_nrec > merge_nrec) || + (internal->node_ptrs[idx - 1].node_nrec > merge_nrec))) { + if(H5B2__redistribute3(hdr, dxpl_id, depth, internal, &internal_flags, idx) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") + } /* end if */ + else { + if(H5B2__merge3(hdr, dxpl_id, depth, curr_node_ptr, + parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") + } /* end else */ + } /* end else */ + + /* Locate node pointer for child (after merge/redistribute) */ + if(swap_loc) + idx = 0; + else { +/* Actually, this can be easily updated (for 2-node redistrib.) and shouldn't require re-searching */ + if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx, &cmp) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") + if(cmp >= 0) + idx++; + } /* end else */ + + /* Decrement the number of redistribution retries left */ + retries--; + } /* end while */ + + /* Handle deleting a record from an internal node */ + if(!swap_loc && cmp == 0) { + swap_loc = H5B2_INT_NREC(internal, hdr, idx - 1); + swap_parent = internal; + } /* end if */ + + /* Swap record to delete with record from leaf, if we are the last internal node */ + if(swap_loc && depth == 1) + if(H5B2__swap_leaf(hdr, dxpl_id, depth, internal, &internal_flags, idx, swap_loc) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSWAP, FAIL, "Can't swap records in B-tree") + + /* Set pointers for advancing to child node */ + new_cache_info_flags_ptr = &internal_flags; + new_cache_info = &internal->cache_info; + new_node_ptr = &internal->node_ptrs[idx]; + + /* Indicate position of next node */ + if(H5B2_POS_MIDDLE != curr_pos) { + if(idx == 0) { + if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) + next_pos = H5B2_POS_LEFT; + } /* end if */ + else if(idx == internal->nrec) { + if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) + next_pos = H5B2_POS_RIGHT; + } /* end if */ + } /* end if */ + } /* end else */ + + /* Attempt to remove record from child node */ + if(depth > 1) { + if(H5B2__remove_internal(hdr, dxpl_id, depth_decreased, swap_loc, swap_parent, (uint16_t)(depth - 1), + new_cache_info, new_cache_info_flags_ptr, next_pos, new_node_ptr, udata, op, op_data) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node") + } /* end if */ + else { + if(H5B2__remove_leaf(hdr, dxpl_id, new_node_ptr, next_pos, new_cache_info, udata, op, op_data) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node") + } /* end else */ + + /* Update record count for node pointer to current node */ + if(!collapsed_root) + new_node_ptr->all_nrec--; + + /* Mark node as dirty */ + if(!(hdr->swmr_write && collapsed_root)) + internal_flags |= H5AC__DIRTIED_FLAG; + +#ifdef H5B2_DEBUG + H5B2__assert_internal((!collapsed_root ? (curr_node_ptr->all_nrec - 1) : new_node_ptr->all_nrec), hdr, internal); +#endif /* H5B2_DEBUG */ + +done: + /* Release the B-tree internal node */ + if(internal && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, internal_addr, internal, internal_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__remove_internal() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__remove_internal_by_idx + * + * Purpose: Removes a record from a B-tree node, according to the offset + * in the B-tree records + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * koziol@hdfgroup.org + * Nov 14 2006 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id, + hbool_t *depth_decreased, void *swap_loc, void *swap_parent, uint16_t depth, + H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr, + H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, hsize_t n, + H5B2_remove_t op, void *op_data) +{ + H5AC_info_t *new_cache_info; /* Pointer to new cache info */ + unsigned *new_cache_info_flags_ptr = NULL; + H5B2_node_ptr_t *new_node_ptr; /* Pointer to new node pointer */ + H5B2_internal_t *internal; /* Pointer to internal node */ + H5B2_nodepos_t next_pos = H5B2_POS_MIDDLE; /* Position of next node */ + unsigned internal_flags = H5AC__NO_FLAGS_SET; + haddr_t internal_addr = HADDR_UNDEF; /* Address of internal node */ + size_t merge_nrec; /* Number of records to merge node at */ + hbool_t collapsed_root = FALSE; /* Whether the root was collapsed */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(depth > 0); + HDassert(parent_cache_info); + HDassert(curr_node_ptr); + HDassert(H5F_addr_defined(curr_node_ptr->addr)); + + /* Lock current B-tree node */ + if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, parent_cache_info, curr_node_ptr, depth, FALSE, H5AC__NO_FLAGS_SET))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") + internal_addr = curr_node_ptr->addr; + HDassert(internal->nrec == curr_node_ptr->node_nrec); + HDassert(depth == hdr->depth || internal->nrec > 1); + + /* Determine the correct number of records to merge at */ + merge_nrec = hdr->node_info[depth - 1].merge_nrec; + + /* Check for needing to collapse the root node */ + /* (The root node is the only internal node allowed to have 1 record) */ + if(internal->nrec == 1 && + ((internal->node_ptrs[0].node_nrec + internal->node_ptrs[1].node_nrec) <= ((merge_nrec * 2) + 1))) { + HDassert(depth == hdr->depth); + + /* Merge children of root node */ + if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr, + parent_cache_info_flags_ptr, internal, &internal_flags, 0) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") + + /* Let the cache know that the object is deleted */ + internal_flags |= H5AC__DELETED_FLAG; + if(!hdr->swmr_write) + internal_flags |= H5AC__FREE_FILE_SPACE_FLAG; + + /* Reset information in header's root node pointer */ + curr_node_ptr->addr = internal->node_ptrs[0].addr; + curr_node_ptr->node_nrec = internal->node_ptrs[0].node_nrec; + + /* Update flush dependency for child, if using SWMR */ + if(hdr->swmr_write) + if(H5B2__update_flush_depend(hdr, dxpl_id, depth, curr_node_ptr, internal, hdr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUPDATE, FAIL, "unable to update child node to new parent") + + /* Indicate that the level of the B-tree decreased */ + *depth_decreased = TRUE; + + /* Set pointers for advancing to child node */ + new_cache_info = parent_cache_info; + new_cache_info_flags_ptr = parent_cache_info_flags_ptr; + new_node_ptr = curr_node_ptr; + + /* Set flag to indicate root was collapsed */ + collapsed_root = TRUE; + + /* Indicate position of next node */ + next_pos = H5B2_POS_ROOT; + } /* end if */ + /* Merge or redistribute child node pointers, if necessary */ + else { + hsize_t orig_n = n; /* Original index looked for */ + unsigned idx; /* Location of record which matches key */ + hbool_t found = FALSE; /* Comparison value of records */ + unsigned retries; /* Number of times to attempt redistribution */ + + /* Shadow the node if doing SWMR writes */ + if(hdr->swmr_write) { + if(H5B2__shadow_internal(internal, dxpl_id, curr_node_ptr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node") + internal_addr = curr_node_ptr->addr; + } /* end if */ + + /* Locate node pointer for child */ + if(swap_loc) + idx = 0; + else { + /* Search for record with correct index */ + for(idx = 0; idx < internal->nrec; idx++) { + /* Check which child node contains indexed record */ + if(internal->node_ptrs[idx].all_nrec >= n) { + /* Check if record is in this node */ + if(internal->node_ptrs[idx].all_nrec == n) { + /* Indicate the record was found and that the index + * in child nodes is zero from now on + */ + found = TRUE; + n = 0; + + /* Increment to next record */ + idx++; + } /* end if */ + + /* Break out of loop early */ + break; + } /* end if */ + + /* Decrement index we are looking for to account for the node we + * just advanced past. + */ + n -= (internal->node_ptrs[idx].all_nrec + 1); + } /* end for */ + } /* end else */ + + /* Set the number of redistribution retries */ + /* This takes care of the case where a B-tree node needs to be + * redistributed, but redistributing the node causes the index + * for removal to move to another node, which also needs to be + * redistributed. Now, we loop trying to redistribute and then + * eventually force a merge */ + retries = 2; + + /* Preemptively merge/redistribute a node we will enter */ + while(internal->node_ptrs[idx].node_nrec == merge_nrec) { + /* Attempt to redistribute records among children */ + /* (NOTE: These 2-node redistributions should actually get the + * record to promote from the node with more records. - QAK) + */ + /* (NOTE: This code is the same in both H5B2__remove_internal() and + * H5B2__remove_internal_by_idx(), fix bugs in both places! - QAK) + */ + if(idx == 0) { /* Left-most child */ + if(retries > 0 && (internal->node_ptrs[idx + 1].node_nrec > merge_nrec)) { + if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, idx) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") + } /* end if */ + else { + if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr, + parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") + } /* end else */ + } /* end if */ + else if(idx == internal->nrec) { /* Right-most child */ + if(retries > 0 && (internal->node_ptrs[idx - 1].node_nrec > merge_nrec)) { + if(H5B2__redistribute2(hdr, dxpl_id, depth, internal, (idx - 1)) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") + } /* end if */ + else { + if(H5B2__merge2(hdr, dxpl_id, depth, curr_node_ptr, + parent_cache_info_flags_ptr, internal, &internal_flags, (idx - 1)) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") + } /* end else */ + } /* end if */ + else { /* Middle child */ + if(retries > 0 && ((internal->node_ptrs[idx + 1].node_nrec > merge_nrec) || + (internal->node_ptrs[idx - 1].node_nrec > merge_nrec))) { + if(H5B2__redistribute3(hdr, dxpl_id, depth, internal, &internal_flags, idx) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTREDISTRIBUTE, FAIL, "unable to redistribute child node records") + } /* end if */ + else { + if(H5B2__merge3(hdr, dxpl_id, depth, curr_node_ptr, + parent_cache_info_flags_ptr, internal, &internal_flags, idx) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node") + } /* end else */ + } /* end else */ + + /* Locate node pointer for child (after merge/redistribute) */ + if(swap_loc) + idx = 0; + else { + /* Count from the orginal index value again */ + n = orig_n; + + /* Reset "found" flag - the record may have shifted during the + * redistribute/merge + */ + found = FALSE; + + /* Search for record with correct index */ + for(idx = 0; idx < internal->nrec; idx++) { + /* Check which child node contains indexed record */ + if(internal->node_ptrs[idx].all_nrec >= n) { + /* Check if record is in this node */ + if(internal->node_ptrs[idx].all_nrec == n) { + /* Indicate the record was found and that the index + * in child nodes is zero from now on + */ + found = TRUE; + n = 0; + + /* Increment to next record */ + idx++; + } /* end if */ + + /* Break out of loop early */ + break; + } /* end if */ + + /* Decrement index we are looking for to account for the node we + * just advanced past. + */ + n -= (internal->node_ptrs[idx].all_nrec + 1); + } /* end for */ + } /* end else */ + + /* Decrement the number of redistribution retries left */ + retries--; + } /* end while */ + + /* Handle deleting a record from an internal node */ + if(!swap_loc && found) { + swap_loc = H5B2_INT_NREC(internal, hdr, idx - 1); + swap_parent = internal; + } /* end if */ + + /* Swap record to delete with record from leaf, if we are the last internal node */ + if(swap_loc && depth == 1) + if(H5B2__swap_leaf(hdr, dxpl_id, depth, internal, &internal_flags, idx, swap_loc) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSWAP, FAIL, "can't swap records in B-tree") + + /* Set pointers for advancing to child node */ + new_cache_info_flags_ptr = &internal_flags; + new_cache_info = &internal->cache_info; + new_node_ptr = &internal->node_ptrs[idx]; + + /* Indicate position of next node */ + if(H5B2_POS_MIDDLE != curr_pos) { + if(idx == 0) { + if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) + next_pos = H5B2_POS_LEFT; + } /* end if */ + else if(idx == internal->nrec) { + if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) + next_pos = H5B2_POS_RIGHT; + } /* end if */ + } /* end if */ + } /* end else */ + + /* Attempt to remove record from child node */ + if(depth > 1) { + if(H5B2__remove_internal_by_idx(hdr, dxpl_id, depth_decreased, swap_loc, swap_parent, (uint16_t)(depth - 1), + new_cache_info, new_cache_info_flags_ptr, new_node_ptr, next_pos, n, op, op_data) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node") + } /* end if */ + else { + if(H5B2__remove_leaf_by_idx(hdr, dxpl_id, new_node_ptr, next_pos, new_cache_info, (unsigned)n, op, op_data) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node") + } /* end else */ + + /* Update record count for node pointer to child node */ + if(!collapsed_root) + new_node_ptr->all_nrec--; + + /* Mark node as dirty */ + if(!(hdr->swmr_write && collapsed_root)) + internal_flags |= H5AC__DIRTIED_FLAG; + +#ifdef H5B2_DEBUG + H5B2__assert_internal((!collapsed_root ? (curr_node_ptr->all_nrec - 1) : new_node_ptr->all_nrec), hdr, internal); +#endif /* H5B2_DEBUG */ + +done: + /* Release the B-tree internal node */ + if(internal && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, internal_addr, internal, internal_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node") + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__remove_internal_by_idx() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__internal_free + * + * Purpose: Destroys a B-tree internal node in memory. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Feb 2 2005 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__internal_free(H5B2_internal_t *internal) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* + * Check arguments. + */ + HDassert(internal); + + /* Release internal node's native key buffer */ + if(internal->int_native) + internal->int_native = (uint8_t *)H5FL_FAC_FREE(internal->hdr->node_info[internal->depth].nat_rec_fac, internal->int_native); + + /* Release internal node's node pointer buffer */ + if(internal->node_ptrs) + internal->node_ptrs = (H5B2_node_ptr_t *)H5FL_FAC_FREE(internal->hdr->node_info[internal->depth].node_ptr_fac, internal->node_ptrs); + + /* Decrement ref. count on B-tree header */ + if(H5B2__hdr_decr(internal->hdr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTDEC, FAIL, "can't decrement ref. count on B-tree header") + + /* Sanity check */ + HDassert(NULL == internal->top_proxy); + + /* Free B-tree internal node info */ + internal = H5FL_FREE(H5B2_internal_t, internal); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B2__internal_free() */ + +#ifdef H5B2_DEBUG + +/*------------------------------------------------------------------------- + * Function: H5B2__assert_internal + * + * Purpose: Verify than an internal node is mostly sane + * + * Return: Non-negative on success, negative on failure + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Feb 19 2005 + * + *------------------------------------------------------------------------- + */ +H5_ATTR_PURE herr_t +H5B2__assert_internal(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal) +{ + hsize_t tot_all_nrec; /* Total number of records at or below this node */ + uint16_t u, v; /* Local index variables */ + + /* General sanity checking on node */ + HDassert(internal->nrec <= hdr->node_info->split_nrec); + + /* Sanity checking on node pointers */ + tot_all_nrec = internal->nrec; + for(u = 0; u < internal->nrec + 1; u++) { + tot_all_nrec += internal->node_ptrs[u].all_nrec; + + HDassert(H5F_addr_defined(internal->node_ptrs[u].addr)); + HDassert(internal->node_ptrs[u].addr > 0); + for(v = 0; v < u; v++) + HDassert(internal->node_ptrs[u].addr != internal->node_ptrs[v].addr); + } /* end for */ + + /* Sanity check all_nrec total in parent */ + if(parent_all_nrec > 0) + HDassert(tot_all_nrec == parent_all_nrec); + + return(0); +} /* end H5B2__assert_internal() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__assert_internal2 + * + * Purpose: Verify than internal nodes are mostly sane + * + * Return: Non-negative on success, negative on failure + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Feb 19 2005 + * + *------------------------------------------------------------------------- + */ +H5_ATTR_PURE herr_t +H5B2__assert_internal2(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal, const H5B2_internal_t *internal2) +{ + hsize_t tot_all_nrec; /* Total number of records at or below this node */ + uint16_t u, v; /* Local index variables */ + + /* General sanity checking on node */ + HDassert(internal->nrec <= hdr->node_info->split_nrec); + + /* Sanity checking on node pointers */ + tot_all_nrec =internal->nrec; + for(u =0; u < internal->nrec + 1; u++) { + tot_all_nrec += internal->node_ptrs[u].all_nrec; + + HDassert(H5F_addr_defined(internal->node_ptrs[u].addr)); + HDassert(internal->node_ptrs[u].addr > 0); + for(v = 0; v < u; v++) + HDassert(internal->node_ptrs[u].addr != internal->node_ptrs[v].addr); + for(v = 0; v < internal2->nrec + 1; v++) + HDassert(internal->node_ptrs[u].addr != internal2->node_ptrs[v].addr); + } /* end for */ + + /* Sanity check all_nrec total in parent */ + if(parent_all_nrec > 0) + HDassert(tot_all_nrec == parent_all_nrec); + + return(0); +} /* end H5B2__assert_internal2() */ +#endif /* H5B2_DEBUG */ + diff --git a/src/H5B2leaf.c b/src/H5B2leaf.c new file mode 100644 index 0000000..4f8b8e6 --- /dev/null +++ b/src/H5B2leaf.c @@ -0,0 +1,1065 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*------------------------------------------------------------------------- + * + * Created: H5B2leaf.c + * Dec 01 2016 + * Quincey Koziol <koziol@lbl.gov> + * + * Purpose: Routines for managing v2 B-tree leaf ndoes. + * + *------------------------------------------------------------------------- + */ + +/****************/ +/* Module Setup */ +/****************/ + +#include "H5B2module.h" /* This source code file is part of the H5B2 module */ + + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5B2pkg.h" /* v2 B-trees */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5MFprivate.h" /* File memory management */ +#include "H5MMprivate.h" /* Memory management */ + + +/****************/ +/* Local Macros */ +/****************/ + + +/******************/ +/* Local Typedefs */ +/******************/ + + +/********************/ +/* Package Typedefs */ +/********************/ + + +/********************/ +/* Local Prototypes */ +/********************/ +static herr_t H5B2__shadow_leaf(H5B2_leaf_t *leaf, hid_t dxpl_id, + H5B2_node_ptr_t *curr_node_ptr); + + +/*********************/ +/* Package Variables */ +/*********************/ + +/* Declare a free list to manage the H5B2_leaf_t struct */ +H5FL_DEFINE(H5B2_leaf_t); + + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + + +/*******************/ +/* Local Variables */ +/*******************/ + + + +/*------------------------------------------------------------------------- + * Function: H5B2__create_leaf + * + * Purpose: Creates empty leaf node of a B-tree and update node pointer + * to point to it. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Feb 2 2005 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, void *parent, H5B2_node_ptr_t *node_ptr) +{ + H5B2_leaf_t *leaf = NULL; /* Pointer to new leaf node created */ + hbool_t inserted = FALSE; /* Whether the leaf node was inserted into cache */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(node_ptr); + + /* Allocate memory for leaf information */ + if(NULL == (leaf = H5FL_CALLOC(H5B2_leaf_t))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree leaf info") + + /* Increment ref. count on B-tree header */ + if(H5B2__hdr_incr(hdr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTINC, FAIL, "can't increment ref. count on B-tree header") + + /* Share B-tree header information */ + leaf->hdr = hdr; + + /* Allocate space for the native keys in memory */ + if(NULL == (leaf->leaf_native = (uint8_t *)H5FL_FAC_MALLOC(hdr->node_info[0].nat_rec_fac))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree leaf native keys") + HDmemset(leaf->leaf_native, 0, hdr->cls->nrec_size * hdr->node_info[0].max_nrec); + + /* Set parent */ + leaf->parent = parent; + + /* Set shadowed epoch to header's epoch */ + leaf->shadow_epoch = hdr->shadow_epoch; + + /* Allocate space on disk for the leaf */ + if(HADDR_UNDEF == (node_ptr->addr = H5MF_alloc(hdr->f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)hdr->node_size))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "file allocation failed for B-tree leaf node") + + /* Cache the new B-tree node */ + if(H5AC_insert_entry(hdr->f, dxpl_id, H5AC_BT2_LEAF, node_ptr->addr, leaf, H5AC__NO_FLAGS_SET) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "can't add B-tree leaf to cache") + inserted = TRUE; + + /* Add leaf node as child of 'top' proxy */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, leaf) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSET, FAIL, "unable to add v2 B-tree node as child of proxy") + leaf->top_proxy = hdr->top_proxy; + } /* end if */ + +done: + if(ret_value < 0) { + if(leaf) { + /* Remove from cache, if inserted */ + if(inserted) + if(H5AC_remove_entry(leaf) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTREMOVE, FAIL, "unable to remove v2 B-tree leaf node from cache") + + /* Release leaf node's disk space */ + if(H5F_addr_defined(node_ptr->addr) && H5MF_xfree(hdr->f, H5FD_MEM_BTREE, dxpl_id, node_ptr->addr, (hsize_t)hdr->node_size) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release file space for v2 B-tree leaf node") + + /* Destroy leaf node */ + if(H5B2__leaf_free(leaf) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree leaf node") + } /* end if */ + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__create_leaf() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__protect_leaf + * + * Purpose: "Protect" an leaf node in the metadata cache + * + * Return: Pointer to leaf node on success/NULL on failure + * + * Programmer: Quincey Koziol + * koziol@hdfgroup.org + * May 5 2010 + * + *------------------------------------------------------------------------- + */ +H5B2_leaf_t * +H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, void *parent, + H5B2_node_ptr_t *node_ptr, hbool_t shadow, unsigned flags) +{ + H5B2_leaf_cache_ud_t udata; /* User-data for callback */ + H5B2_leaf_t *leaf; /* v2 B-tree leaf node */ + H5B2_leaf_t *ret_value = NULL; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(node_ptr); + HDassert(H5F_addr_defined(node_ptr->addr)); + + /* only H5AC__READ_ONLY_FLAG may appear in flags */ + HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0); + + /* Set up user data for callback */ + udata.f = hdr->f; + udata.hdr = hdr; + udata.parent = parent; + udata.nrec = node_ptr->node_nrec; + + /* Protect the leaf node */ + if(NULL == (leaf = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, node_ptr->addr, &udata, flags))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to protect B-tree leaf node") + + /* Create top proxy, if it doesn't exist */ + if(hdr->top_proxy && NULL == leaf->top_proxy) { + /* Add leaf node as child of 'top' proxy */ + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, leaf) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTSET, NULL, "unable to add v2 B-tree leaf node as child of proxy") + leaf->top_proxy = hdr->top_proxy; + } /* end if */ + + /* Shadow the node, if requested */ + if(shadow) + if(H5B2__shadow_leaf(leaf, dxpl_id, node_ptr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, NULL, "unable to shadow leaf node") + + /* Set return value */ + ret_value = leaf; + +done: + /* Clean up on error */ + if(!ret_value) { + /* Release the leaf node, if it was protected */ + if(leaf) { + /* Remove from v2 B-tree's proxy, if added */ + if(leaf->top_proxy) { + if(H5AC_proxy_entry_remove_child(leaf->top_proxy, leaf) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, NULL, "unable to destroy flush dependency between leaf node and v2 B-tree 'top' proxy") + leaf->top_proxy = NULL; + } /* end if */ + + /* Unprotect leaf node */ + if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, node_ptr->addr, leaf, H5AC__NO_FLAGS_SET) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, NULL, "unable to unprotect v2 B-tree leaf node, address = %llu", (unsigned long long)node_ptr->addr) + } /* end if */ + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__protect_leaf() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__neighbor_leaf + * + * Purpose: Locate a record relative to the specified information in a + * B-tree leaf node and return that information by filling in + * fields of the + * caller-supplied UDATA pointer depending on the type of leaf node + * requested. The UDATA can point to additional data passed + * to the key comparison function. + * + * The 'OP' routine is called with the record found and the + * OP_DATA pointer, to allow caller to return information about + * the record. + * + * The RANGE indicates whether to search for records less than or + * equal to, or greater than or equal to the information passed + * in with UDATA. + * + * Return: Non-negative on success, negative on failure. + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Mar 9 2005 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr, + void *neighbor_loc, H5B2_compare_t comp, void *parent, void *udata, H5B2_found_t op, + void *op_data) +{ + H5B2_leaf_t *leaf; /* Pointer to leaf node */ + unsigned idx = 0; /* Location of record which matches key */ + int cmp = 0; /* Comparison value of records */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(curr_node_ptr); + HDassert(H5F_addr_defined(curr_node_ptr->addr)); + HDassert(op); + + /* Lock current B-tree node */ + if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, parent, curr_node_ptr, FALSE, H5AC__READ_ONLY_FLAG))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + + /* Locate node pointer for child */ + if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx, &cmp) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") + if(cmp > 0) + idx++; + else + if(cmp == 0 && comp == H5B2_COMPARE_GREATER) + idx++; + + /* Set the neighbor location, if appropriate */ + if(comp == H5B2_COMPARE_LESS) { + if(idx > 0) + neighbor_loc = H5B2_LEAF_NREC(leaf, hdr, idx - 1); + } /* end if */ + else { + HDassert(comp == H5B2_COMPARE_GREATER); + + if(idx < leaf->nrec) + neighbor_loc = H5B2_LEAF_NREC(leaf, hdr, idx); + } /* end else */ + + /* Make callback if neighbor record has been found */ + if(neighbor_loc) { + /* Make callback for current record */ + if((op)(neighbor_loc, op_data) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "'found' callback failed for B-tree neighbor operation") + } /* end if */ + else + HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree") + +done: + /* Release the B-tree leaf node */ + if(leaf && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr->addr, leaf, H5AC__NO_FLAGS_SET) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node") + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__neighbor_leaf() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__insert_leaf + * + * Purpose: Adds a new record to a B-tree leaf node. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Mar 3 2005 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr, + H5B2_nodepos_t curr_pos, void *parent, void *udata) +{ + H5B2_leaf_t *leaf; /* Pointer to leaf node */ + unsigned leaf_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting the leaf node */ + int cmp; /* Comparison value of records */ + unsigned idx = 0; /* Location of record which matches key */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(curr_node_ptr); + HDassert(H5F_addr_defined(curr_node_ptr->addr)); + + /* Lock current B-tree node */ + if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, parent, curr_node_ptr, FALSE, H5AC__NO_FLAGS_SET))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + + /* Must have a leaf node with enough space to insert a record now */ + HDassert(curr_node_ptr->node_nrec < hdr->node_info[0].max_nrec); + + /* Sanity check number of records */ + HDassert(curr_node_ptr->all_nrec == curr_node_ptr->node_nrec); + HDassert(leaf->nrec == curr_node_ptr->node_nrec); + + /* Check for inserting into empty leaf */ + if(leaf->nrec == 0) + idx = 0; + else { + /* Find correct location to insert this record */ + if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx, &cmp) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") + if(cmp == 0) + HGOTO_ERROR(H5E_BTREE, H5E_EXISTS, FAIL, "record is already in B-tree") + if(cmp > 0) + idx++; + + /* Make room for new record */ + if(idx < leaf->nrec) + HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx + 1), H5B2_LEAF_NREC(leaf, hdr, idx), hdr->cls->nrec_size * (leaf->nrec - idx)); + } /* end else */ + + /* Make callback to store record in native form */ + if((hdr->cls->store)(H5B2_LEAF_NREC(leaf, hdr, idx), udata) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into leaf node") + + /* Mark the node as dirty */ + leaf_flags |= H5AC__DIRTIED_FLAG; + + /* Update record count for node pointer to current node */ + curr_node_ptr->all_nrec++; + curr_node_ptr->node_nrec++; + + /* Update record count for current node */ + leaf->nrec++; + + /* Check for new record being the min or max for the tree */ + /* (Don't use 'else' for the idx check, to allow for root leaf node) */ + if(H5B2_POS_MIDDLE != curr_pos) { + if(idx == 0) { + if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) { + if(hdr->min_native_rec == NULL) + if(NULL == (hdr->min_native_rec = H5MM_malloc(hdr->cls->nrec_size))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "memory allocation failed for v2 B-tree min record info") + HDmemcpy(hdr->min_native_rec, H5B2_LEAF_NREC(leaf, hdr, idx), hdr->cls->nrec_size); + } /* end if */ + } /* end if */ + if(idx == (unsigned)(leaf->nrec - 1)) { + if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) { + if(hdr->max_native_rec == NULL) + if(NULL == (hdr->max_native_rec = H5MM_malloc(hdr->cls->nrec_size))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "memory allocation failed for v2 B-tree max record info") + HDmemcpy(hdr->max_native_rec, H5B2_LEAF_NREC(leaf, hdr, idx), hdr->cls->nrec_size); + } /* end if */ + } /* end if */ + } /* end if */ + +done: + /* Release the B-tree leaf node (marked as dirty) */ + if(leaf) { + /* Shadow the node if doing SWMR writes */ + if(hdr->swmr_write && (leaf_flags & H5AC__DIRTIED_FLAG)) + if(H5B2__shadow_leaf(leaf, dxpl_id, curr_node_ptr) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf B-tree node") + + /* Unprotect leaf node */ + if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr->addr, leaf, leaf_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node") + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__insert_leaf() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__update_leaf + * + * Purpose: Insert or modify a record in a B-tree leaf node. + * If the record exists already, it is modified as if H5B2_modify + * was called). If it doesn't exist, it is inserted as if + * H5B2_insert was called. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * koziol@hdfgroup.org + * Dec 23 2015 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__update_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr, + H5B2_update_status_t *status, H5B2_nodepos_t curr_pos, void *parent, + void *udata, H5B2_modify_t op, void *op_data) +{ + H5B2_leaf_t *leaf; /* Pointer to leaf node */ + unsigned leaf_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting the leaf node */ + int cmp = -1; /* Comparison value of records */ + unsigned idx = 0; /* Location of record which matches key */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(curr_node_ptr); + HDassert(H5F_addr_defined(curr_node_ptr->addr)); + + /* Lock current B-tree node */ + if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, parent, curr_node_ptr, FALSE, H5AC__NO_FLAGS_SET))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + + /* Sanity check number of records */ + HDassert(curr_node_ptr->all_nrec == curr_node_ptr->node_nrec); + HDassert(leaf->nrec == curr_node_ptr->node_nrec); + + /* Check for inserting into empty leaf */ + if(leaf->nrec == 0) + idx = 0; + else { + /* Find correct location to insert this record */ + if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx, &cmp) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") + + /* Check for inserting a record */ + if(0 != cmp) { + /* Check if the leaf node is full */ + if(curr_node_ptr->node_nrec == hdr->node_info[0].split_nrec) { + /* Indicate that the leaf is full, but we need to insert */ + *status = H5B2_UPDATE_INSERT_CHILD_FULL; + + /* Let calling routine handle insertion */ + HGOTO_DONE(SUCCEED) + } /* end if */ + + /* Adjust index to leave room for record to insert */ + if(cmp > 0) + idx++; + + /* Make room for new record */ + if(idx < leaf->nrec) + HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx + 1), H5B2_LEAF_NREC(leaf, hdr, idx), hdr->cls->nrec_size * (leaf->nrec - idx)); + } /* end if */ + } /* end else */ + + /* Check for modifying existing record */ + if(0 == cmp) { + hbool_t changed = FALSE; /* Whether the 'modify' callback changed the record */ + + /* Make callback for current record */ + if((op)(H5B2_LEAF_NREC(leaf, hdr, idx), op_data, &changed) < 0) { + /* Make certain that the callback didn't modify the value if it failed */ + HDassert(changed == FALSE); + + HGOTO_ERROR(H5E_BTREE, H5E_CANTMODIFY, FAIL, "'modify' callback failed for B-tree update operation") + } /* end if */ + + /* Mark the node as dirty if it changed */ + leaf_flags |= (changed ? H5AC__DIRTIED_FLAG : 0); + + /* Indicate that the record was modified */ + *status = H5B2_UPDATE_MODIFY_DONE; + } /* end if */ + else { + /* Must have a leaf node with enough space to insert a record now */ + HDassert(curr_node_ptr->node_nrec < hdr->node_info[0].max_nrec); + + /* Make callback to store record in native form */ + if((hdr->cls->store)(H5B2_LEAF_NREC(leaf, hdr, idx), udata) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into leaf node") + + /* Mark the node as dirty */ + leaf_flags |= H5AC__DIRTIED_FLAG; + + /* Indicate that the record was inserted */ + *status = H5B2_UPDATE_INSERT_DONE; + + /* Update record count for node pointer to current node */ + curr_node_ptr->all_nrec++; + curr_node_ptr->node_nrec++; + + /* Update record count for current node */ + leaf->nrec++; + } /* end else */ + + /* Check for new record being the min or max for the tree */ + /* (Don't use 'else' for the idx check, to allow for root leaf node) */ + if(H5B2_POS_MIDDLE != curr_pos) { + if(idx == 0) { + if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) { + if(hdr->min_native_rec == NULL) + if(NULL == (hdr->min_native_rec = H5MM_malloc(hdr->cls->nrec_size))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "memory allocation failed for v2 B-tree min record info") + HDmemcpy(hdr->min_native_rec, H5B2_LEAF_NREC(leaf, hdr, idx), hdr->cls->nrec_size); + } /* end if */ + } /* end if */ + if(idx == (unsigned)(leaf->nrec - 1)) { + if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) { + if(hdr->max_native_rec == NULL) + if(NULL == (hdr->max_native_rec = H5MM_malloc(hdr->cls->nrec_size))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "memory allocation failed for v2 B-tree max record info") + HDmemcpy(hdr->max_native_rec, H5B2_LEAF_NREC(leaf, hdr, idx), hdr->cls->nrec_size); + } /* end if */ + } /* end if */ + } /* end if */ + +done: + /* Release the B-tree leaf node */ + if(leaf) { + /* Check if we should shadow this node */ + if(hdr->swmr_write && (leaf_flags & H5AC__DIRTIED_FLAG)) { + /* Attempt to shadow the node if doing SWMR writes */ + if(H5B2__shadow_leaf(leaf, dxpl_id, curr_node_ptr) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf B-tree node") + + /* Change the state to "shadowed" if only modified currently */ + /* (Triggers parent to be marked dirty) */ + if(*status == H5B2_UPDATE_MODIFY_DONE) + *status = H5B2_UPDATE_SHADOW_DONE; + } /* end if */ + + /* Unprotect leaf node */ + if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr->addr, leaf, leaf_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node") + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__update_leaf() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__swap_leaf + * + * Purpose: Swap a record in a node with a record in a leaf node + * + * Return: Success: Non-negative + * + * Failure: Negative + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Mar 4 2005 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, + H5B2_internal_t *internal, unsigned *internal_flags_ptr, + unsigned idx, void *swap_loc) +{ + const H5AC_class_t *child_class; /* Pointer to child node's class info */ + haddr_t child_addr; /* Address of child node */ + void *child = NULL; /* Pointer to child node */ + uint8_t *child_native; /* Pointer to child's native records */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(internal); + HDassert(internal_flags_ptr); + HDassert(idx <= internal->nrec); + + /* Check for the kind of B-tree node to swap */ + if(depth > 1) { + H5B2_internal_t *child_internal; /* Pointer to internal node */ + + /* Setup information for unlocking child node */ + child_class = H5AC_BT2_INT; + + /* Lock B-tree child nodes */ + if(NULL == (child_internal = H5B2__protect_internal(hdr, dxpl_id, internal, &internal->node_ptrs[idx], (uint16_t)(depth - 1), FALSE, H5AC__NO_FLAGS_SET))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node") + child_addr = internal->node_ptrs[idx].addr; + + /* More setup for accessing child node information */ + child = child_internal; + child_native = child_internal->int_native; + } /* end if */ + else { + H5B2_leaf_t *child_leaf; /* Pointer to leaf node */ + + /* Setup information for unlocking child nodes */ + child_class = H5AC_BT2_LEAF; + + /* Lock B-tree child node */ + if(NULL == (child_leaf = H5B2__protect_leaf(hdr, dxpl_id, internal, &internal->node_ptrs[idx], FALSE, H5AC__NO_FLAGS_SET))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + child_addr = internal->node_ptrs[idx].addr; + + /* More setup for accessing child node information */ + child = child_leaf; + child_native = child_leaf->leaf_native; + } /* end else */ + + /* Swap records (use disk page as temporary buffer) */ + HDmemcpy(hdr->page, H5B2_NAT_NREC(child_native, hdr, 0), hdr->cls->nrec_size); + HDmemcpy(H5B2_NAT_NREC(child_native, hdr, 0), swap_loc, hdr->cls->nrec_size); + HDmemcpy(swap_loc, hdr->page, hdr->cls->nrec_size); + + /* Mark parent as dirty */ + *internal_flags_ptr |= H5AC__DIRTIED_FLAG; + +#ifdef H5B2_DEBUG + H5B2__assert_internal((hsize_t)0, hdr, internal); + if(depth > 1) + H5B2__assert_internal(internal->node_ptrs[idx].all_nrec, hdr, (H5B2_internal_t *)child); + else + H5B2__assert_leaf(hdr, (H5B2_leaf_t *)child); +#endif /* H5B2_DEBUG */ + +done: + /* Unlock child node */ + if(child && H5AC_unprotect(hdr->f, dxpl_id, child_class, child_addr, child, H5AC__DIRTIED_FLAG) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node") + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B2__swap_leaf() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__shadow_leaf + * + * Purpose: "Shadow" a leaf node - copy it to a new location, leaving + * the data in the old location intact (for now). This is + * done when writing in SWMR mode to ensure that readers do + * not see nodes that are out of date with respect to each + * other and thereby inconsistent. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * Apr 27 2012 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5B2__shadow_leaf(H5B2_leaf_t *leaf, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr) +{ + H5B2_hdr_t *hdr; /* B-tree header */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* + * Check arguments. + */ + HDassert(leaf); + HDassert(curr_node_ptr); + HDassert(H5F_addr_defined(curr_node_ptr->addr)); + hdr = leaf->hdr; + HDassert(hdr); + HDassert(hdr->swmr_write); + + /* We only need to shadow the node if it has not been shadowed since the + * last time the header was flushed, as otherwise it will be unreachable by + * the readers so there will be no need to shadow. To check if it has been + * shadowed, compare the epoch of this node and the header. If this node's + * epoch is <= to the header's, it hasn't been shadowed yet. */ + if(leaf->shadow_epoch <= hdr->shadow_epoch) { + haddr_t new_node_addr; /* Address to move node to */ + + /* + * We must clone the old node so readers with an out-of-date version of + * the parent can still see the correct number of children, via the + * shadowed node. Remove it from cache but do not mark it free on disk. + */ + /* Allocate space for the cloned node */ + if(HADDR_UNDEF == (new_node_addr = H5MF_alloc(hdr->f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)hdr->node_size))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "unable to allocate file space to move B-tree node") + + /* Move the location of the old child on the disk */ + if(H5AC_move_entry(hdr->f, H5AC_BT2_LEAF, curr_node_ptr->addr, new_node_addr, dxpl_id) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTMOVE, FAIL, "unable to move B-tree node") + curr_node_ptr->addr = new_node_addr; + + /* Should free the space in the file, but this is not supported by + * SWMR_WRITE code yet - QAK, 2016/12/01 + */ + + /* Set shadow epoch for node ahead of header */ + leaf->shadow_epoch = hdr->shadow_epoch + 1; + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B2__shadow_leaf() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__remove_leaf + * + * Purpose: Removes a record from a B-tree leaf node. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Mar 3 2005 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr, + H5B2_nodepos_t curr_pos, void *parent, void *udata, H5B2_remove_t op, void *op_data) +{ + H5B2_leaf_t *leaf; /* Pointer to leaf node */ + haddr_t leaf_addr = HADDR_UNDEF; /* Leaf address on disk */ + unsigned leaf_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting leaf node */ + unsigned idx = 0; /* Location of record which matches key */ + int cmp; /* Comparison value of records */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(curr_node_ptr); + HDassert(H5F_addr_defined(curr_node_ptr->addr)); + + /* Lock current B-tree node */ + if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, parent, curr_node_ptr, FALSE, H5AC__NO_FLAGS_SET))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + leaf_addr = curr_node_ptr->addr; + + /* Sanity check number of records */ + HDassert(curr_node_ptr->all_nrec == curr_node_ptr->node_nrec); + HDassert(leaf->nrec == curr_node_ptr->node_nrec); + + /* Find correct location to remove this record */ + if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx, &cmp) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOMPARE, FAIL, "can't compare btree2 records") + if(cmp != 0) + HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "record is not in B-tree") + + /* Check for invalidating the min/max record for the tree */ + if(H5B2_POS_MIDDLE != curr_pos) { + /* (Don't use 'else' for the idx check, to allow for root leaf node) */ + if(idx == 0) { + if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) { + if(hdr->min_native_rec) + hdr->min_native_rec = H5MM_xfree(hdr->min_native_rec); + } /* end if */ + } /* end if */ + if(idx == (unsigned)(leaf->nrec - 1)) { + if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) { + if(hdr->max_native_rec) + hdr->max_native_rec = H5MM_xfree(hdr->max_native_rec); + } /* end if */ + } /* end if */ + } /* end if */ + + /* Make 'remove' callback if there is one */ + if(op) + if((op)(H5B2_LEAF_NREC(leaf, hdr, idx), op_data) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record into leaf node") + + /* Update number of records in node */ + leaf->nrec--; + + if(leaf->nrec > 0) { + /* Shadow the node if doing SWMR writes */ + if(hdr->swmr_write) { + if(H5B2__shadow_leaf(leaf, dxpl_id, curr_node_ptr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf node") + leaf_addr = curr_node_ptr->addr; + } /* end if */ + + /* Pack record out of leaf */ + if(idx < leaf->nrec) + HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx), H5B2_LEAF_NREC(leaf, hdr, (idx + 1)), hdr->cls->nrec_size * (leaf->nrec - idx)); + + /* Mark leaf node as dirty also */ + leaf_flags |= H5AC__DIRTIED_FLAG; + } /* end if */ + else { + /* Let the cache know that the object is deleted */ + leaf_flags |= H5AC__DELETED_FLAG; + if(!hdr->swmr_write) + leaf_flags |= H5AC__DIRTIED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; + + /* Reset address of parent node pointer */ + curr_node_ptr->addr = HADDR_UNDEF; + } /* end else */ + + /* Update record count for parent of leaf node */ + curr_node_ptr->node_nrec--; + +done: + /* Release the B-tree leaf node */ + if(leaf && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, leaf_addr, leaf, leaf_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node") + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__remove_leaf() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__remove_leaf_by_idx + * + * Purpose: Removes a record from a B-tree leaf node, according to the + * offset in the B-tree records. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * koziol@hdfgroup.org + * Nov 14 2006 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id, + H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *parent, + unsigned idx, H5B2_remove_t op, void *op_data) +{ + H5B2_leaf_t *leaf; /* Pointer to leaf node */ + haddr_t leaf_addr = HADDR_UNDEF; /* Leaf address on disk */ + unsigned leaf_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting leaf node */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check arguments. */ + HDassert(hdr); + HDassert(curr_node_ptr); + HDassert(H5F_addr_defined(curr_node_ptr->addr)); + + /* Lock B-tree leaf node */ + if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, parent, curr_node_ptr, FALSE, H5AC__NO_FLAGS_SET))) + HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + leaf_addr = curr_node_ptr->addr; + + /* Sanity check number of records */ + HDassert(curr_node_ptr->all_nrec == curr_node_ptr->node_nrec); + HDassert(leaf->nrec == curr_node_ptr->node_nrec); + HDassert(idx < leaf->nrec); + + /* Check for invalidating the min/max record for the tree */ + if(H5B2_POS_MIDDLE != curr_pos) { + /* (Don't use 'else' for the idx check, to allow for root leaf node) */ + if(idx == 0) { + if(H5B2_POS_LEFT == curr_pos || H5B2_POS_ROOT == curr_pos) { + if(hdr->min_native_rec) + hdr->min_native_rec = H5MM_xfree(hdr->min_native_rec); + } /* end if */ + } /* end if */ + if(idx == (unsigned)(leaf->nrec - 1)) { + if(H5B2_POS_RIGHT == curr_pos || H5B2_POS_ROOT == curr_pos) { + if(hdr->max_native_rec) + hdr->max_native_rec = H5MM_xfree(hdr->max_native_rec); + } /* end if */ + } /* end if */ + } /* end if */ + + /* Make 'remove' callback if there is one */ + if(op) + if((op)(H5B2_LEAF_NREC(leaf, hdr, idx), op_data) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record into leaf node") + + /* Update number of records in node */ + leaf->nrec--; + + if(leaf->nrec > 0) { + /* Shadow the node if doing SWMR writes */ + if(hdr->swmr_write) { + if(H5B2__shadow_leaf(leaf, dxpl_id, curr_node_ptr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf node") + leaf_addr = curr_node_ptr->addr; + } /* end if */ + + /* Pack record out of leaf */ + if(idx < leaf->nrec) + HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx), H5B2_LEAF_NREC(leaf, hdr, (idx + 1)), hdr->cls->nrec_size * (leaf->nrec - idx)); + + /* Mark leaf node as dirty also */ + leaf_flags |= H5AC__DIRTIED_FLAG; + } /* end if */ + else { + /* Let the cache know that the object is deleted */ + leaf_flags |= H5AC__DELETED_FLAG; + if(!hdr->swmr_write) + leaf_flags |= H5AC__DIRTIED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; + + /* Reset address of parent node pointer */ + curr_node_ptr->addr = HADDR_UNDEF; + } /* end else */ + + /* Update record count for parent of leaf node */ + curr_node_ptr->node_nrec--; + +done: + /* Release the B-tree leaf node */ + if(leaf && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, leaf_addr, leaf, leaf_flags) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release leaf B-tree node") + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5B2__remove_leaf_by_idx() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__leaf_free + * + * Purpose: Destroys a B-tree leaf node in memory. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Feb 2 2005 + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2__leaf_free(H5B2_leaf_t *leaf) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* + * Check arguments. + */ + HDassert(leaf); + + /* Release leaf's native key buffer */ + if(leaf->leaf_native) + leaf->leaf_native = (uint8_t *)H5FL_FAC_FREE(leaf->hdr->node_info[0].nat_rec_fac, leaf->leaf_native); + + /* Decrement ref. count on B-tree header */ + if(H5B2__hdr_decr(leaf->hdr) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTDEC, FAIL, "can't decrement ref. count on B-tree header") + + /* Sanity check */ + HDassert(NULL == leaf->top_proxy); + + /* Free B-tree leaf node info */ + leaf = H5FL_FREE(H5B2_leaf_t, leaf); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5B2__leaf_free() */ + +#ifdef H5B2_DEBUG + +/*------------------------------------------------------------------------- + * Function: H5B2__assert_leaf + * + * Purpose: Verify than a leaf node is mostly sane + * + * Return: Non-negative on success, negative on failure + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Feb 19 2005 + * + *------------------------------------------------------------------------- + */ +H5_ATTR_PURE herr_t +H5B2__assert_leaf(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf) +{ + /* General sanity checking on node */ + HDassert(leaf->nrec <= hdr->node_info->split_nrec); + + return(0); +} /* end H5B2__assert_leaf() */ + + +/*------------------------------------------------------------------------- + * Function: H5B2__assert_leaf2 + * + * Purpose: Verify than a leaf node is mostly sane + * + * Return: Non-negative on success, negative on failure + * + * Programmer: Quincey Koziol + * koziol@ncsa.uiuc.edu + * Feb 19 2005 + * + *------------------------------------------------------------------------- + */ +H5_ATTR_PURE herr_t +H5B2__assert_leaf2(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf, const H5B2_leaf_t H5_ATTR_UNUSED *leaf2) +{ + /* General sanity checking on node */ + HDassert(leaf->nrec <= hdr->node_info->split_nrec); + + return(0); +} /* end H5B2__assert_leaf2() */ +#endif /* H5B2_DEBUG */ + diff --git a/src/H5B2pkg.h b/src/H5B2pkg.h index 71700fe..7b1ec4d 100644 --- a/src/H5B2pkg.h +++ b/src/H5B2pkg.h @@ -125,6 +125,9 @@ #define H5B2_NUM_INT_REC(h, d) \ (((h)->node_size - (H5B2_INT_PREFIX_SIZE + H5B2_INT_POINTER_SIZE(h, d))) / ((h)->rrec_size + H5B2_INT_POINTER_SIZE(h, d))) +/* Uncomment this macro to enable extra sanity checking */ +/* #define H5B2_DEBUG */ + /****************************/ /* Package Private Typedefs */ @@ -185,6 +188,26 @@ typedef struct H5B2_hdr_t { void *min_native_rec; /* Pointer to minimum native record */ void *max_native_rec; /* Pointer to maximum native record */ + /* SWMR / Flush dependency information (not stored) */ + hbool_t swmr_write; /* Whether we are doing SWMR writes */ + H5AC_proxy_entry_t *top_proxy; /* 'Top' proxy cache entry for all B-tree entries */ + void *parent; /* Pointer to 'top' proxy flush dependency + * parent, if it exists, otherwise NULL. + * If the v2 B-tree is being used to index a + * chunked dataset and the dataset metadata is + * modified by a SWMR writer, this field will + * be set equal to the object header proxy + * that is the flush dependency parent + * of the v2 B-tree header. + * + * The field is used to avoid duplicate setups + * of the flush dependency relationship, and to + * allow the v2 B-tree header to destroy the + * flush dependency on receipt of an eviction + * notification from the metadata cache. + */ + uint64_t shadow_epoch; /* Epoch of header, for making shadow copies */ + /* Client information (not stored) */ const H5B2_class_t *cls; /* Class of B-tree client */ void *cb_ctx; /* Client callback context */ @@ -199,6 +222,11 @@ typedef struct H5B2_leaf_t { H5B2_hdr_t *hdr; /* Pointer to the [pinned] v2 B-tree header */ uint8_t *leaf_native; /* Pointer to native records */ uint16_t nrec; /* Number of records in node */ + + /* SWMR / Flush dependency information (not stored) */ + H5AC_proxy_entry_t *top_proxy; /* 'Top' proxy cache entry for all B-tree entries */ + void *parent; /* Flush dependency parent for leaf */ + uint64_t shadow_epoch; /* Epoch of node, for making shadow copies */ } H5B2_leaf_t; /* B-tree internal node information */ @@ -212,6 +240,11 @@ typedef struct H5B2_internal_t { H5B2_node_ptr_t *node_ptrs; /* Pointer to node pointers */ uint16_t nrec; /* Number of records in node */ uint16_t depth; /* Depth of this node in the B-tree */ + + /* SWMR / Flush dependency information (not stored) */ + H5AC_proxy_entry_t *top_proxy; /* 'Top' proxy cache entry for all B-tree entries */ + void *parent; /* Flush dependency parent for internal node */ + uint64_t shadow_epoch; /* Epoch of node, for making shadow copies */ } H5B2_internal_t; /* v2 B-tree */ @@ -232,6 +265,7 @@ typedef enum H5B2_nodepos_t { typedef enum H5B2_update_status_t { H5B2_UPDATE_UNKNOWN, /* Unknown update status (initial state) */ H5B2_UPDATE_MODIFY_DONE, /* Update successfully modified existing record */ + H5B2_UPDATE_SHADOW_DONE, /* Update modified existing record and modified node was shadowed */ H5B2_UPDATE_INSERT_DONE, /* Update inserted record successfully */ H5B2_UPDATE_INSERT_CHILD_FULL /* Update will insert record, but child is full */ } H5B2_update_status_t; @@ -247,6 +281,7 @@ typedef struct H5B2_hdr_cache_ud_t { typedef struct H5B2_internal_cache_ud_t { H5F_t *f; /* File that v2 b-tree header is within */ H5B2_hdr_t *hdr; /* v2 B-tree header */ + void *parent; /* Flush dependency parent */ uint16_t nrec; /* Number of records in node to load */ uint16_t depth; /* Depth of node to load */ } H5B2_internal_cache_ud_t; @@ -255,6 +290,7 @@ typedef struct H5B2_internal_cache_ud_t { typedef struct H5B2_leaf_cache_ud_t { H5F_t *f; /* File that v2 b-tree header is within */ H5B2_hdr_t *hdr; /* v2 B-tree header */ + void *parent; /* Flush dependency parent */ uint16_t nrec; /* Number of records in node to load */ } H5B2_leaf_cache_ud_t; @@ -306,6 +342,30 @@ extern const H5B2_class_t *const H5B2_client_class_g[H5B2_NUM_BTREE_ID]; /* Package Private Prototypes */ /******************************/ +/* Generic routines */ +H5_DLL herr_t H5B2__create_flush_depend(H5AC_info_t *parent_entry, + H5AC_info_t *child_entry); +H5_DLL herr_t H5B2__update_flush_depend(H5B2_hdr_t *hdr, hid_t dxpl_id, + unsigned depth, const H5B2_node_ptr_t *node_ptr, void *old_parent, + void *new_parent); +H5_DLL herr_t H5B2__destroy_flush_depend(H5AC_info_t *parent_entry, + H5AC_info_t *child_entry); + +/* Internal node management routines */ +H5_DLL herr_t H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, + H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr, + H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx); +H5_DLL herr_t H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, + H5B2_internal_t *internal, unsigned idx); +H5_DLL herr_t H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, + H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx); +H5_DLL herr_t H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, + H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr, + H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx); +H5_DLL herr_t H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, + H5B2_node_ptr_t *curr_node_ptr, unsigned *parent_cache_info_flags_ptr, + H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx); + /* Routines for managing B-tree header info */ H5_DLL H5B2_hdr_t *H5B2__hdr_alloc(H5F_t *f); H5_DLL haddr_t H5B2__hdr_create(H5F_t *f, hid_t dxpl_id, @@ -324,17 +384,23 @@ H5_DLL herr_t H5B2__hdr_unprotect(H5B2_hdr_t *hdr, hid_t dxpl_id, H5_DLL herr_t H5B2__hdr_delete(H5B2_hdr_t *hdr, hid_t dxpl_id); /* Routines for operating on leaf nodes */ -H5B2_leaf_t *H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, - uint16_t nrec, unsigned flags); +H5_DLL H5B2_leaf_t * H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, + void *parent, H5B2_node_ptr_t *node_ptr, hbool_t shadow, unsigned flags); +H5_DLL herr_t H5B2__swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, + H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx, + void *swap_loc); /* Routines for operating on internal nodes */ H5_DLL H5B2_internal_t *H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, - haddr_t addr, uint16_t nrec, uint16_t depth, unsigned flags); + void *parent, H5B2_node_ptr_t *node_ptr, uint16_t depth, hbool_t shadow, + unsigned flags); /* Routines for allocating nodes */ H5_DLL herr_t H5B2__split_root(H5B2_hdr_t *hdr, hid_t dxpl_id); -H5_DLL herr_t H5B2__create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, +H5_DLL herr_t H5B2__create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, void *parent, H5B2_node_ptr_t *node_ptr); +H5_DLL herr_t H5B2__create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, void *parent, + H5B2_node_ptr_t *node_ptr, uint16_t depth); /* Routines for releasing structures */ H5_DLL herr_t H5B2__hdr_free(H5B2_hdr_t *hdr); @@ -342,59 +408,64 @@ H5_DLL herr_t H5B2__leaf_free(H5B2_leaf_t *l); H5_DLL herr_t H5B2__internal_free(H5B2_internal_t *i); /* Routines for inserting records */ -H5_DLL herr_t H5B2__insert_hdr(H5B2_hdr_t *hdr, hid_t dxpl_id, void *udata); +H5_DLL herr_t H5B2__insert(H5B2_hdr_t *hdr, hid_t dxpl_id, void *udata); H5_DLL herr_t H5B2__insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, unsigned *parent_cache_info_flags_ptr, - H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *udata); + H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *parent, void *udata); H5_DLL herr_t H5B2__insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, - H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *udata); + H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *parent, void *udata); /* Routines for update records */ H5_DLL herr_t H5B2__update_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, unsigned *parent_cache_info_flags_ptr, H5B2_node_ptr_t *curr_node_ptr, H5B2_update_status_t *status, - H5B2_nodepos_t curr_pos, void *udata, H5B2_modify_t op, void *op_data); + H5B2_nodepos_t curr_pos, void *parent, void *udata, H5B2_modify_t op, + void *op_data); H5_DLL herr_t H5B2__update_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr, H5B2_update_status_t *status, - H5B2_nodepos_t curr_pos, void *udata, H5B2_modify_t op, void *op_data); + H5B2_nodepos_t curr_pos, void *parent, void *udata, H5B2_modify_t op, + void *op_data); /* Routines for iterating over nodes/records */ H5_DLL herr_t H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - const H5B2_node_ptr_t *curr_node, H5B2_operator_t op, void *op_data); + const H5B2_node_ptr_t *curr_node, void *parent, H5B2_operator_t op, void *op_data); H5_DLL herr_t H5B2__node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, - uint16_t depth, const H5B2_node_ptr_t *curr_node, hsize_t *op_data); + uint16_t depth, const H5B2_node_ptr_t *curr_node, void *parent, + hsize_t *op_data); /* Routines for locating records */ H5_DLL herr_t H5B2__locate_record(const H5B2_class_t *type, unsigned nrec, size_t *rec_off, const uint8_t *native, const void *udata, unsigned *idx, int *result); H5_DLL herr_t H5B2__neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, void *neighbor_loc, - H5B2_compare_t comp, void *udata, H5B2_found_t op, void *op_data); + H5B2_compare_t comp, void *parent, void *udata, H5B2_found_t op, + void *op_data); H5_DLL herr_t H5B2__neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, - H5B2_node_ptr_t *curr_node_ptr, void *neighbor_loc, - H5B2_compare_t comp, void *udata, H5B2_found_t op, void *op_data); + H5B2_node_ptr_t *curr_node_ptr, void *neighbor_loc, H5B2_compare_t comp, + void *parent, void *udata, H5B2_found_t op, void *op_data); /* Routines for removing records */ H5_DLL herr_t H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, - hbool_t *depth_decreased, void *swap_loc, uint16_t depth, + hbool_t *depth_decreased, void *swap_loc, void *swap_parent, uint16_t depth, H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr, H5B2_nodepos_t curr_pos, H5B2_node_ptr_t *curr_node_ptr, void *udata, H5B2_remove_t op, void *op_data); H5_DLL herr_t H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, - H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, + H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *parent, void *udata, H5B2_remove_t op, void *op_data); H5_DLL herr_t H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id, - hbool_t *depth_decreased, void *swap_loc, uint16_t depth, + hbool_t *depth_decreased, void *swap_loc, void *swap_parent, uint16_t depth, H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr, H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, hsize_t n, H5B2_remove_t op, void *op_data); H5_DLL herr_t H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id, - H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, + H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *parent, unsigned idx, H5B2_remove_t op, void *op_data); /* Routines for deleting nodes */ H5_DLL herr_t H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth, - const H5B2_node_ptr_t *curr_node, H5B2_remove_t op, void *op_data); + const H5B2_node_ptr_t *curr_node, void *parent, H5B2_remove_t op, + void *op_data); /* Debugging routines for dumping file structures */ H5_DLL herr_t H5B2__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, @@ -406,6 +477,15 @@ H5_DLL herr_t H5B2__leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int fwidth, const H5B2_class_t *type, haddr_t hdr_addr, unsigned nrec, haddr_t obj_addr); +/* Sanity checking routines */ +#ifdef H5B2_DEBUG +/* Don't label these with H5_ATTR_PURE or you'll get even more warnings... */ +H5_DLL herr_t H5B2__assert_internal(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal); +H5_DLL herr_t H5B2__assert_internal2(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B2_internal_t *internal, const H5B2_internal_t *internal2); +H5_DLL herr_t H5B2__assert_leaf(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf); +H5_DLL herr_t H5B2__assert_leaf2(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf, const H5B2_leaf_t *leaf2); +#endif /* H5B2_DEBUG */ + /* Testing routines */ #ifdef H5B2_TESTING H5_DLL herr_t H5B2_get_root_addr_test(H5B2_t *bt2, haddr_t *root_addr); diff --git a/src/H5B2private.h b/src/H5B2private.h index 3caf41f..161e25e 100644 --- a/src/H5B2private.h +++ b/src/H5B2private.h @@ -31,7 +31,8 @@ #include "H5B2public.h" /* Private headers needed by this file */ -#include "H5Fprivate.h" /* File access */ +#include "H5ACprivate.h" /* Metadata cache */ +#include "H5Fprivate.h" /* File access */ /**************************/ /* Library Private Macros */ @@ -153,6 +154,7 @@ H5_DLL herr_t H5B2_size(H5B2_t *bt2, hid_t dxpl_id, H5_DLL herr_t H5B2_close(H5B2_t *bt2, hid_t dxpl_id); H5_DLL herr_t H5B2_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata, H5B2_remove_t op, void *op_data); +H5_DLL herr_t H5B2_depend(H5B2_t *bt2, hid_t dxpl_id, H5AC_proxy_entry_t *parent); H5_DLL herr_t H5B2_patch_file(H5B2_t *fa, H5F_t *f); /* Statistics routines */ diff --git a/src/H5B2stat.c b/src/H5B2stat.c index 10c692e..da721c6 100644 --- a/src/H5B2stat.c +++ b/src/H5B2stat.c @@ -139,7 +139,7 @@ H5B2_size(H5B2_t *bt2, hid_t dxpl_id, hsize_t *btree_size) *btree_size += hdr->node_size; else /* Iterate through nodes */ - if(H5B2__node_size(hdr, dxpl_id, hdr->depth, &hdr->root, btree_size) < 0) + if(H5B2__node_size(hdr, dxpl_id, hdr->depth, &hdr->root, hdr, btree_size) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "node iteration failed") } /* end if */ diff --git a/src/H5B2test.c b/src/H5B2test.c index 24163e0..aec2aba 100644 --- a/src/H5B2test.c +++ b/src/H5B2test.c @@ -518,6 +518,7 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata, { H5B2_hdr_t *hdr; /* Pointer to the B-tree header */ H5B2_node_ptr_t curr_node_ptr; /* Node pointer info for current node */ + void *parent = NULL; /* Parent of current node */ uint16_t depth; /* Current depth of the tree */ int cmp; /* Comparison value of records */ unsigned idx; /* Location of record which matches key */ @@ -537,6 +538,10 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata, /* Make copy of the root node pointer to start search with */ curr_node_ptr = hdr->root; + /* Set initial parent, if doing swmr writes */ + if(hdr->swmr_write) + parent = hdr; + /* Current depth of the tree */ depth = hdr->depth; @@ -551,9 +556,16 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */ /* Lock B-tree current node */ - if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG))) + if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, parent, &curr_node_ptr, depth, FALSE, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node") + /* Unpin parent if necessary */ + if(parent) { + if(parent != hdr && H5AC_unpin_entry(parent) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + parent = NULL; + } /* end if */ + /* Locate node pointer for child */ if(H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx, &cmp) < 0) @@ -567,9 +579,13 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata, next_node_ptr = internal->node_ptrs[idx]; /* Unlock current node */ - if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, H5AC__NO_FLAGS_SET) < 0) + if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, (unsigned)(hdr->swmr_write ? H5AC__PIN_ENTRY_FLAG : H5AC__NO_FLAGS_SET)) < 0) HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node") + /* Keep track of parent if necessary */ + if(hdr->swmr_write) + parent = internal; + /* Set pointer to next node to load */ curr_node_ptr = next_node_ptr; } /* end if */ @@ -594,9 +610,16 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */ /* Lock B-tree leaf node */ - if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG))) + if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, parent, &curr_node_ptr, FALSE, H5AC__READ_ONLY_FLAG))) HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node") + /* Unpin parent if necessary */ + if(parent) { + if(parent != hdr && H5AC_unpin_entry(parent) < 0) + HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + parent = NULL; + } /* end if */ + /* Locate record */ if(H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx, &cmp) < 0) @@ -616,6 +639,12 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata, ninfo->nrec = curr_node_ptr.node_nrec; done: + if(parent) { + HDassert(ret_value < 0); + if(parent != hdr && H5AC_unpin_entry(parent) < 0) + HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry") + } /* end if */ + FUNC_LEAVE_NOAPI(ret_value) } /* H5B2_get_node_info_test() */ diff --git a/src/H5Bdbg.c b/src/H5Bdbg.c index d92a24b..b22264d 100644 --- a/src/H5Bdbg.c +++ b/src/H5Bdbg.c @@ -77,6 +77,9 @@ H5B_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int f HDassert(fwidth >= 0); HDassert(type); + /* Currently does not support SWMR access */ + HDassert(!(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE)); + /* Get shared info for B-tree */ if(NULL == (rc_shared = (type->get_shared)(f, udata))) HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "can't retrieve B-tree's shared ref. count object") @@ -742,7 +742,39 @@ done: /*------------------------------------------------------------------------- + * Function: H5C_evict * + * Purpose: Evict all except pinned entries in the cache + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * Dec 2013 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_evict(H5F_t * f, hid_t dxpl_id) +{ + H5C_t *cache_ptr = f->shared->cache; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity check */ + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + + /* Flush and invalidate all cache entries except the pinned entries */ + if(H5C_flush_invalidate_cache(f, dxpl_id, H5C__EVICT_ALLOW_LAST_PINS_FLAG) < 0 ) + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to evict entries in the cache") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_evict() */ + + +/*------------------------------------------------------------------------- * Function: H5C_expunge_entry * * Purpose: Use this function to tell the cache to expunge an entry @@ -1543,6 +1575,85 @@ done: /*------------------------------------------------------------------------- + * Function: H5C_mark_entry_clean + * + * Purpose: Mark a pinned entry as clean. The target entry MUST be pinned. + * + * If the entry is not + * already clean, the function places function marks the entry + * clean and removes it from the skip list. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * 7/23/16 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_mark_entry_clean(void *_thing) +{ + H5C_t * cache_ptr; + H5C_cache_entry_t * entry_ptr = (H5C_cache_entry_t *)_thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(entry_ptr); + HDassert(H5F_addr_defined(entry_ptr->addr)); + cache_ptr = entry_ptr->cache_ptr; + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + + /* Operate on pinned entry */ + if(entry_ptr->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "entry is protected") + else if(entry_ptr->is_pinned) { + hbool_t was_dirty; /* Whether the entry was previously dirty */ + + /* Remember previous dirty status */ + was_dirty = entry_ptr->is_dirty; + + /* Mark the entry as clean if it isn't already */ + entry_ptr->is_dirty = FALSE; + + /* Also reset the 'flush_marker' flag, since the entry shouldn't be flushed now */ + entry_ptr->flush_marker = FALSE; + + /* Modify cache data structures */ + if(was_dirty) + H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr) + if(entry_ptr->in_slist) + H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE) + + /* Update stats for entry being marked clean */ + H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) + + /* Check for entry changing status and do notifications, etc. */ + if(was_dirty) { + /* If the entry's type has a 'notify' callback send a 'entry cleaned' + * notice now that the entry is fully integrated into the cache. + */ + if(entry_ptr->type->notify && + (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared") + + /* Propagate the clean up the flush dependency chain, if appropriate */ + if(entry_ptr->flush_dep_nparents > 0) + if(H5C__mark_flush_dep_clean(entry_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean") + } /* end if */ + } /* end if */ + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Entry is not pinned??") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C_mark_entry_clean() */ + + +/*------------------------------------------------------------------------- * * Function: H5C_move_entry * @@ -1591,9 +1702,6 @@ H5C_move_entry(H5C_t * cache_ptr, HDassert(entry_ptr->addr == old_addr); HDassert(entry_ptr->type == type); - if(entry_ptr->is_protected) - HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "target entry is protected.") - H5C__SEARCH_INDEX(cache_ptr, new_addr, test_entry_ptr, FAIL) if(test_entry_ptr != NULL) { /* we are hosed */ @@ -4955,17 +5063,19 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags) } /* end while */ /* Invariants, after destroying all entries in the hash table */ - HDassert(cache_ptr->index_size == 0); - HDassert(cache_ptr->clean_index_size == 0); - HDassert(cache_ptr->dirty_index_size == 0); - HDassert(cache_ptr->slist_len == 0); - HDassert(cache_ptr->slist_size == 0); - HDassert(cache_ptr->pel_len == 0); - HDassert(cache_ptr->pel_size == 0); - HDassert(cache_ptr->pl_len == 0); - HDassert(cache_ptr->pl_size == 0); - HDassert(cache_ptr->LRU_list_len == 0); - HDassert(cache_ptr->LRU_list_size == 0); + if(!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) { + HDassert(cache_ptr->index_size == 0); + HDassert(cache_ptr->clean_index_size == 0); + HDassert(cache_ptr->dirty_index_size == 0); + HDassert(cache_ptr->slist_len == 0); + HDassert(cache_ptr->slist_size == 0); + HDassert(cache_ptr->pel_len == 0); + HDassert(cache_ptr->pel_size == 0); + HDassert(cache_ptr->pl_len == 0); + HDassert(cache_ptr->pl_size == 0); + HDassert(cache_ptr->LRU_list_len == 0); + HDassert(cache_ptr->LRU_list_size == 0); + } /* end if */ done: FUNC_LEAVE_NOAPI(ret_value) @@ -5307,12 +5417,16 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring, * into the cache, or moved to a new location * in the file as a side effect of the flush. * - * If this happens, and one of the target - * entries happens to be the next entry in - * the hash bucket, we could find ourselves - * either find ourselves either scanning a - * non-existant entry, scanning through a - * different bucket, or skipping an entry. + * It's also possible that removing a clean + * entry will remove the last child of a proxy + * entry, allowing it to be removed also and + * invalidating the next_entry_ptr. + * + * If either of these happen, and one of the target + * or proxy entries happens to be the next entry in + * the hash bucket, we could either find ourselves + * either scanning a non-existant entry, scanning + * through a different bucket, or skipping an entry. * * Neither of these are good, so restart the * the scan at the head of the hash bucket @@ -5889,6 +6003,11 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) } /* end else if */ + /* Note that the algorithm below is (very) similar to the set of operations + * in H5C_remove_entry() and should be kept in sync with changes + * to that code. - QAK, 2016/11/30 + */ + /* Update the cache internal data structures. */ if(destroy) { /* Sanity checks */ @@ -7724,3 +7843,120 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__generate_image */ + +/*------------------------------------------------------------------------- + * + * Function: H5C_remove_entry + * + * Purpose: Remove an entry from the cache. Must be not protected, pinned, + * dirty, involved in flush dependencies, etc. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * September 17, 2016 + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_remove_entry(void *_entry) +{ + H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry to remove */ + H5C_t *cache; /* Cache for file */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(entry); + HDassert(entry->ring != H5C_RING_UNDEFINED); + cache = entry->cache_ptr; + HDassert(cache); + HDassert(cache->magic == H5C__H5C_T_MAGIC); + + /* Check for error conditions */ + if(entry->is_dirty) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove dirty entry from cache") + if(entry->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove protected entry from cache") + if(entry->is_pinned) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove pinned entry from cache") + if(entry->flush_dep_nparents > 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry with flush dependency parents from cache") + if(entry->flush_dep_nchildren > 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry with flush dependency children from cache") + + /* Additional internal cache consistency checks */ + HDassert(!entry->in_slist); + HDassert(!entry->flush_marker); + HDassert(!entry->flush_in_progress); + + /* Note that the algorithm below is (very) similar to the set of operations + * in H5C__flush_single_entry() and should be kept in sync with changes + * to that code. - QAK, 2016/11/30 + */ + + /* Update stats, as if we are "destroying" and taking ownership of the entry */ + H5C__UPDATE_STATS_FOR_EVICTION(cache, entry, TRUE) + + /* If the entry's type has a 'notify' callback, send a 'before eviction' + * notice while the entry is still fully integrated in the cache. + */ + if(entry->type->notify && (entry->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict") + + /* Update the cache internal data structures as appropriate for a destroy. + * Specifically: + * 1) Delete it from the index + * 2) Update the replacement policy for eviction + * 3) Remove it from the tag list for this object + */ + + H5C__DELETE_FROM_INDEX(cache, entry) + + H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL) + + /* Remove entry from tag list */ + if(H5C__untag_entry(cache, entry) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list") + + /* Increment entries_removed_counter and set last_entry_removed_ptr. + * As we me be about to free the entry, recall that last_entry_removed_ptr + * must NEVER be dereferenced. + * + * Recall that these fields are maintained to allow functions that perform + * scans of lists of entries to detect the unexpected removal of entries + * (via expunge, eviction, or take ownership at present), so that they can + * re-start their scans if necessary. + * + * Also check if the entry we are watching for removal is being + * removed (usually the 'next' entry for an iteration) and reset + * it to indicate that it was removed. + */ + cache->entries_removed_counter++; + cache->last_entry_removed_ptr = entry; + if(entry == cache->entry_watched_for_removal) + cache->entry_watched_for_removal = NULL; + + /* Internal cache data structures should now be up to date, and + * consistant with the status of the entry. + * + * Now clean up internal cache fields if appropriate. + */ + + /* Free the buffer for the on disk image */ + if(entry->image_ptr != NULL) + entry->image_ptr = H5MM_xfree(entry->image_ptr); + + /* Reset the pointer to the cache the entry is within */ + entry->cache_ptr = NULL; + + /* Client is taking ownership of the entry. Set bad magic here so the + * cache will choke unless the entry is re-inserted properly + */ + entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__remove_entry() */ + diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 16efb5c..fae4d74 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -1508,7 +1508,6 @@ if ( ( (cache_ptr)->index_size != \ HDassert( (cache_ptr) ); \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ HDassert( !((entry_ptr)->is_read_only) ); \ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ @@ -1548,7 +1547,6 @@ if ( ( (cache_ptr)->index_size != \ HDassert( (cache_ptr) ); \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ HDassert( !((entry_ptr)->is_read_only) ); \ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->in_slist ); \ @@ -2398,12 +2396,11 @@ if ( ( (cache_ptr)->index_size != \ HDassert( (cache_ptr) ); \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ HDassert( !((entry_ptr)->is_read_only) ); \ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ - if ( ! ( (entry_ptr)->is_pinned ) ) { \ + if ( ! ( (entry_ptr)->is_pinned ) && ! ( (entry_ptr->is_protected ) ) ) { \ \ /* modified LRU specific code */ \ \ @@ -2476,12 +2473,11 @@ if ( ( (cache_ptr)->index_size != \ HDassert( (cache_ptr) ); \ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ HDassert( !((entry_ptr)->is_read_only) ); \ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ HDassert( (entry_ptr)->size > 0 ); \ \ - if ( ! ( (entry_ptr)->is_pinned ) ) { \ + if ( ! ( (entry_ptr)->is_pinned ) && ! ( (entry_ptr->is_protected ) ) ) { \ \ /* modified LRU specific code */ \ \ diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index 923083f..baa80a2 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -41,7 +41,7 @@ /**************************/ /* Cache configuration settings */ -#define H5C__MAX_NUM_TYPE_IDS 28 +#define H5C__MAX_NUM_TYPE_IDS 29 #define H5C__PREFIX_LEN 32 /* This sanity checking constant was picked out of the air. Increase @@ -169,6 +169,9 @@ * These flags apply to H5C_expunge_entry(): * H5C__FREE_FILE_SPACE_FLAG * + * These flags apply to H5C_evict(): + * H5C__EVICT_ALLOW_LAST_PINS_FLAG + * * These flags apply to H5C_flush_cache(): * H5C__FLUSH_INVALIDATE_FLAG * H5C__FLUSH_CLEAR_ONLY_FLAG @@ -1716,6 +1719,7 @@ H5_DLL void H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, int32_t version, size_t old_max_cache_size, size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size); H5_DLL herr_t H5C_dest(H5F_t *f, hid_t dxpl_id); +H5_DLL herr_t H5C_evict(H5F_t *f, hid_t dxpl_id); H5_DLL herr_t H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type, haddr_t addr, unsigned flags); H5_DLL herr_t H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags); @@ -1743,6 +1747,7 @@ H5_DLL FILE *H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_pt H5_DLL herr_t H5C_insert_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags); H5_DLL herr_t H5C_mark_entry_dirty(void *thing); +H5_DLL herr_t H5C_mark_entry_clean(void *thing); H5_DLL herr_t H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, haddr_t new_addr); H5_DLL herr_t H5C_pin_protected_entry(void *thing); @@ -1770,6 +1775,7 @@ H5_DLL hbool_t H5C_get_ignore_tags(const H5C_t *cache_ptr); H5_DLL herr_t H5C_retag_entries(H5C_t * cache_ptr, haddr_t src_tag, haddr_t dest_tag); H5_DLL herr_t H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked); H5_DLL herr_t H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring); +H5_DLL herr_t H5C_remove_entry(void * thing); #ifdef H5_HAVE_PARALLEL H5_DLL herr_t H5C_apply_candidate_list(H5F_t *f, hid_t dxpl_id, diff --git a/src/H5Ctag.c b/src/H5Ctag.c index 6d5b454..6b47cc7 100644 --- a/src/H5Ctag.c +++ b/src/H5Ctag.c @@ -627,8 +627,10 @@ H5C_verify_tag(int id, haddr_t tag) * constraints are met. */ if(tag == H5AC__IGNORE_TAG) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "cannot ignore a tag while doing verification.") - else if(tag == H5AC__INVALID_TAG) - HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "no metadata tag provided") + else if(tag == H5AC__INVALID_TAG) { + if(id != H5AC_PROXY_ENTRY_ID) + HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "no metadata tag provided") + } /* end else-if */ else { /* Perform some sanity checks on tag value. Certain entry * types require certain tag values, so check that these diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c index 85238da..8ef14b9 100644 --- a/src/H5Dbtree.c +++ b/src/H5Dbtree.c @@ -153,6 +153,7 @@ static herr_t H5D__btree_idx_dest(const H5D_chk_idx_info_t *idx_info); /* v1 B-tree indexed chunk I/O ops */ const H5D_chunk_ops_t H5D_COPS_BTREE[1] = {{ + FALSE, /* v1 B-tree indices does not support SWMR access */ H5D__btree_idx_init, /* insert */ H5D__btree_idx_create, /* create */ H5D__btree_idx_is_space_alloc, /* is_space_alloc */ diff --git a/src/H5Dbtree2.c b/src/H5Dbtree2.c index 3b9b803..f687a5d 100644 --- a/src/H5Dbtree2.c +++ b/src/H5Dbtree2.c @@ -105,6 +105,7 @@ static herr_t H5D__bt2_filt_debug(FILE *stream, int indent, int fwidth, /* Helper routine */ static herr_t H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__btree2_idx_depend(const H5D_chk_idx_info_t *idx_info); /* Callback for H5B2_iterate() which is called in H5D__bt2_idx_iterate() */ static int H5D__bt2_idx_iterate_cb(const void *_record, void *_udata); @@ -152,6 +153,7 @@ static herr_t H5D__bt2_idx_dest(const H5D_chk_idx_info_t *idx_info); /* Chunked dataset I/O ops for v2 B-tree indexing */ const H5D_chunk_ops_t H5D_COPS_BT2[1] = {{ + TRUE, /* Fixed array indices support SWMR access */ H5D__bt2_idx_init, /* init */ H5D__bt2_idx_create, /* create */ H5D__bt2_idx_is_space_alloc, /* is_space_alloc */ @@ -623,6 +625,68 @@ H5D__bt2_idx_init(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, /*------------------------------------------------------------------------- + * Function: H5D__btree2_idx_depend + * + * Purpose: Create flush dependency between v2 B-tree and dataset's + * object header. + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Friday, December 18, 2015 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__btree2_idx_depend(const H5D_chk_idx_info_t *idx_info) +{ + H5O_t *oh = NULL; /* Object header */ + H5O_loc_t oloc; /* Temporary object header location for dataset */ + H5AC_proxy_entry_t *oh_proxy; /* Dataset's object header proxy */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(H5D_CHUNK_IDX_BT2 == idx_info->layout->idx_type); + HDassert(idx_info->storage); + HDassert(H5D_CHUNK_IDX_BT2 == idx_info->storage->idx_type); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(idx_info->storage->u.btree2.bt2); + + /* Set up object header location for dataset */ + H5O_loc_reset(&oloc); + oloc.file = idx_info->f; + oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr; + + /* Get header */ + if(NULL == (oh = H5O_protect(&oloc, idx_info->dxpl_id, H5AC__READ_ONLY_FLAG, TRUE))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTPROTECT, FAIL, "unable to protect object header") + + /* Retrieve the dataset's object header proxy */ + if(NULL == (oh_proxy = H5O_get_proxy(oh))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get dataset object header proxy") + + /* Make the v2 B-tree a child flush dependency of the dataset's object header proxy */ + if(H5B2_depend(idx_info->storage->u.btree2.bt2, idx_info->dxpl_id, oh_proxy) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header proxy") + +done: + /* Release the object header from the cache */ + if(oh && H5O_unprotect(&oloc, idx_info->dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTUNPROTECT, FAIL, "unable to release object header") + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__btree2_idx_depend() */ + + +/*------------------------------------------------------------------------- * Function: H5D__bt2_idx_open() * * Purpose: Opens an existing v2 B-tree. @@ -667,6 +731,11 @@ H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info) if(NULL == (idx_info->storage->u.btree2.bt2 = H5B2_open(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &u_ctx))) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open v2 B-tree for tracking chunked dataset") + /* Check for SWMR writes to the file */ + if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) + if(H5D__btree2_idx_depend(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header") + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__bt2_idx_open() */ @@ -738,6 +807,11 @@ H5D__bt2_idx_create(const H5D_chk_idx_info_t *idx_info) if(H5B2_get_addr(idx_info->storage->u.btree2.bt2, &(idx_info->storage->idx_addr)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get v2 B-tree address for tracking chunked dataset") + /* Check for SWMR writes to the file */ + if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) + if(H5D__btree2_idx_depend(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header") + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__bt2_idx_create() */ @@ -1186,7 +1260,7 @@ H5D__bt2_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *u /* Remove the record for the "dataset chunk" object from the v2 B-tree */ /* (space in the file for the object is freed in the 'remove' callback) */ - if(H5B2_remove(bt2, idx_info->dxpl_id, &bt2_udata, H5D__bt2_remove_cb, &remove_udata) < 0) + if(H5B2_remove(bt2, idx_info->dxpl_id, &bt2_udata, (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) ? NULL : H5D__bt2_remove_cb, &remove_udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "can't remove object from B-tree") done: @@ -1243,8 +1317,11 @@ H5D__bt2_idx_delete(const H5D_chk_idx_info_t *idx_info) remove_udata.f = idx_info->f; remove_udata.dxpl_id = idx_info->dxpl_id; - /* Set remove operation. */ - remove_op = H5D__bt2_remove_cb; + /* Set remove operation. Do not remove chunks in SWMR_WRITE mode */ + if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) + remove_op = NULL; + else + remove_op = H5D__bt2_remove_cb; /* Delete the v2 B-tree */ /*(space in the file for each object is freed in the 'remove' callback) */ diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 7637339..7a646af 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -6292,8 +6292,13 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old /* Check for chunk being same size */ if(new_chunk->length != old_chunk->length) { /* Release previous chunk */ - if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, old_chunk->offset, old_chunk->length) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + /* Only free the old location if not doing SWMR writes - otherwise + * we must keep the old chunk around in case a reader has an + * outdated version of the B-tree node + */ + if(!(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)) + if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, old_chunk->offset, old_chunk->length) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") alloc_chunk = TRUE; } /* end if */ else { diff --git a/src/H5Dearray.c b/src/H5Dearray.c index 3f12e97..e9dbd0d 100644 --- a/src/H5Dearray.c +++ b/src/H5Dearray.c @@ -148,6 +148,7 @@ static herr_t H5D__earray_idx_dest(const H5D_chk_idx_info_t *idx_info); /* Generic extensible array routines */ static herr_t H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__earray_idx_depend(const H5D_chk_idx_info_t *idx_info); /*********************/ @@ -156,6 +157,7 @@ static herr_t H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info); /* Extensible array indexed chunk I/O ops */ const H5D_chunk_ops_t H5D_COPS_EARRAY[1] = {{ + TRUE, /* Extensible array indices support SWMR access */ H5D__earray_idx_init, /* init */ H5D__earray_idx_create, /* create */ H5D__earray_idx_is_space_alloc, /* is_space_alloc */ @@ -716,6 +718,68 @@ H5D__earray_dst_dbg_context(void *_dbg_ctx) /*------------------------------------------------------------------------- + * Function: H5D__earray_idx_depend + * + * Purpose: Create flush dependency between extensible array and dataset's + * object header. + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Tuesday, June 2, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_depend(const H5D_chk_idx_info_t *idx_info) +{ + H5O_t *oh = NULL; /* Object header */ + H5O_loc_t oloc; /* Temporary object header location for dataset */ + H5AC_proxy_entry_t *oh_proxy; /* Dataset's object header proxy */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->idx_type); + HDassert(idx_info->storage); + HDassert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(idx_info->storage->u.earray.ea); + + /* Set up object header location for dataset */ + H5O_loc_reset(&oloc); + oloc.file = idx_info->f; + oloc.addr = idx_info->storage->u.earray.dset_ohdr_addr; + + /* Get header */ + if(NULL == (oh = H5O_protect(&oloc, idx_info->dxpl_id, H5AC__READ_ONLY_FLAG, TRUE))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTPROTECT, FAIL, "unable to protect object header") + + /* Retrieve the dataset's object header proxy */ + if(NULL == (oh_proxy = H5O_get_proxy(oh))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get dataset object header proxy") + + /* Make the extensible array a child flush dependency of the dataset's object header */ + if(H5EA_depend(idx_info->storage->u.earray.ea, idx_info->dxpl_id, oh_proxy) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header proxy") + +done: + /* Release the object header from the cache */ + if(oh && H5O_unprotect(&oloc, idx_info->dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTUNPROTECT, FAIL, "unable to release object header") + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_depend() */ + + +/*------------------------------------------------------------------------- * Function: H5D__earray_idx_open * * Purpose: Opens an existing extensible array. @@ -760,6 +824,11 @@ H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info) if(NULL == (idx_info->storage->u.earray.ea = H5EA_open(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &udata))) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open extensible array") + /* Check for SWMR writes to the file */ + if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) + if(H5D__earray_idx_depend(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header") + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__earray_idx_open() */ @@ -910,6 +979,11 @@ H5D__earray_idx_create(const H5D_chk_idx_info_t *idx_info) if(H5EA_get_addr(idx_info->storage->u.earray.ea, &(idx_info->storage->idx_addr)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query extensible array address") + /* Check for SWMR writes to the file */ + if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) + if(H5D__earray_idx_depend(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header") + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__earray_idx_create() */ @@ -1355,11 +1429,13 @@ H5D__earray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t if(H5EA_get(ea, idx_info->dxpl_id, idx, &elmt) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info") - /* Remove raw data chunk from file */ + /* Remove raw data chunk from file if not doing SWMR writes */ HDassert(H5F_addr_defined(elmt.addr)); - H5_CHECK_OVERFLOW(elmt.nbytes, /*From: */uint32_t, /*To: */hsize_t); - if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, elmt.addr, (hsize_t)elmt.nbytes) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + if(!(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)) { + H5_CHECK_OVERFLOW(elmt.nbytes, /*From: */uint32_t, /*To: */hsize_t); + if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, elmt.addr, (hsize_t)elmt.nbytes) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + } /* end if */ /* Reset the info about the chunk for the index */ elmt.addr = HADDR_UNDEF; @@ -1375,11 +1451,13 @@ H5D__earray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t if(H5EA_get(ea, idx_info->dxpl_id, idx, &addr) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address") - /* Remove raw data chunk from file */ + /* Remove raw data chunk from file if not doing SWMR writes */ HDassert(H5F_addr_defined(addr)); - H5_CHECK_OVERFLOW(idx_info->layout->size, /*From: */uint32_t, /*To: */hsize_t); - if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, addr, (hsize_t)idx_info->layout->size) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + if(!(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)) { + H5_CHECK_OVERFLOW(idx_info->layout->size, /*From: */uint32_t, /*To: */hsize_t); + if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, addr, (hsize_t)idx_info->layout->size) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + } /* end if */ /* Reset the address of the chunk for the index */ addr = HADDR_UNDEF; diff --git a/src/H5Dfarray.c b/src/H5Dfarray.c index 212f1f1..6b95e12 100644 --- a/src/H5Dfarray.c +++ b/src/H5Dfarray.c @@ -147,6 +147,7 @@ static herr_t H5D__farray_idx_dest(const H5D_chk_idx_info_t *idx_info); /* Generic fixed array routines */ static herr_t H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__farray_idx_depend(const H5D_chk_idx_info_t *idx_info); /*********************/ /* Package Variables */ @@ -154,6 +155,7 @@ static herr_t H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info); /* Fixed array indexed chunk I/O ops */ const H5D_chunk_ops_t H5D_COPS_FARRAY[1] = {{ + TRUE, /* Fixed array indices support SWMR access */ H5D__farray_idx_init, /* init */ H5D__farray_idx_create, /* create */ H5D__farray_idx_is_space_alloc, /* is_space_alloc */ @@ -716,6 +718,68 @@ H5D__farray_filt_debug(FILE *stream, int indent, int fwidth, hsize_t idx, /*------------------------------------------------------------------------- + * Function: H5D__farray_idx_depend + * + * Purpose: Create flush dependency between fixed array and dataset's + * object header. + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_depend(const H5D_chk_idx_info_t *idx_info) +{ + H5O_t *oh = NULL; /* Object header */ + H5O_loc_t oloc; /* Temporary object header location for dataset */ + H5AC_proxy_entry_t *oh_proxy; /* Dataset's object header proxy */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->idx_type); + HDassert(idx_info->storage); + HDassert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(idx_info->storage->u.farray.fa); + + /* Set up object header location for dataset */ + H5O_loc_reset(&oloc); + oloc.file = idx_info->f; + oloc.addr = idx_info->storage->u.farray.dset_ohdr_addr; + + /* Get header */ + if(NULL == (oh = H5O_protect(&oloc, idx_info->dxpl_id, H5AC__READ_ONLY_FLAG, TRUE))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTPROTECT, FAIL, "unable to protect object header") + + /* Retrieve the dataset's object header proxy */ + if(NULL == (oh_proxy = H5O_get_proxy(oh))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get dataset object header proxy") + + /* Make the fixed array a child flush dependency of the dataset's object header proxy */ + if(H5FA_depend(idx_info->storage->u.farray.fa, idx_info->dxpl_id, oh_proxy) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header proxy") + +done: + /* Release the object header from the cache */ + if(oh && H5O_unprotect(&oloc, idx_info->dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTUNPROTECT, FAIL, "unable to release object header") + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_depend() */ + + +/*------------------------------------------------------------------------- * Function: H5D__farray_idx_init * * Purpose: Initialize the indexing information for a dataset. @@ -784,6 +848,11 @@ H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info) if(NULL == (idx_info->storage->u.farray.fa = H5FA_open(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &udata))) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open fixed array") + /* Check for SWMR writes to the file */ + if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) + if(H5D__farray_idx_depend(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header") + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__farray_idx_open() */ @@ -860,6 +929,11 @@ H5D__farray_idx_create(const H5D_chk_idx_info_t *idx_info) if(H5FA_get_addr(idx_info->storage->u.farray.fa, &(idx_info->storage->idx_addr)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query fixed array address") + /* Check for SWMR writes to the file */ + if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) + if(H5D__farray_idx_depend(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header") + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__farray_idx_create() */ @@ -1224,11 +1298,13 @@ H5D__farray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t if(H5FA_get(fa, idx_info->dxpl_id, idx, &elmt) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info") - /* Remove raw data chunk from file */ + /* Remove raw data chunk from file if not doing SWMR writes */ HDassert(H5F_addr_defined(elmt.addr)); - H5_CHECK_OVERFLOW(elmt.nbytes, /*From: */uint32_t, /*To: */hsize_t); - if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, elmt.addr, (hsize_t)elmt.nbytes) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + if(!(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)) { + H5_CHECK_OVERFLOW(elmt.nbytes, /*From: */uint32_t, /*To: */hsize_t); + if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, elmt.addr, (hsize_t)elmt.nbytes) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + } /* end if */ /* Reset the info about the chunk for the index */ elmt.addr = HADDR_UNDEF; @@ -1244,11 +1320,13 @@ H5D__farray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t if(H5FA_get(fa, idx_info->dxpl_id, idx, &addr) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address") - /* Remove raw data chunk from file */ + /* Remove raw data chunk from file if not doing SWMR writes */ HDassert(H5F_addr_defined(addr)); - H5_CHECK_OVERFLOW(idx_info->layout->size, /*From: */uint32_t, /*To: */hsize_t); - if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, addr, (hsize_t)idx_info->layout->size) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + if(!(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)) { + H5_CHECK_OVERFLOW(idx_info->layout->size, /*From: */uint32_t, /*To: */hsize_t); + if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, addr, (hsize_t)idx_info->layout->size) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + } /* end if */ /* Reset the address of the chunk for the index */ addr = HADDR_UNDEF; diff --git a/src/H5Dnone.c b/src/H5Dnone.c index 677ca67..0cadac2 100644 --- a/src/H5Dnone.c +++ b/src/H5Dnone.c @@ -82,6 +82,7 @@ static herr_t H5D__none_idx_dump(const H5O_storage_chunk_t *storage, FILE *strea /* Non Index chunk I/O ops */ const H5D_chunk_ops_t H5D_COPS_NONE[1] = {{ + FALSE, /* Non-indexed chunking don't current support SWMR access */ NULL, /* init */ H5D__none_idx_create, /* create */ H5D__none_idx_is_space_alloc, /* is_space_alloc */ diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 12966f2..f54a9f2 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -325,6 +325,7 @@ typedef herr_t (*H5D_chunk_dest_func_t)(const H5D_chk_idx_info_t *idx_info); /* Typedef for grouping chunk I/O routines */ typedef struct H5D_chunk_ops_t { + hbool_t can_swim; /* Flag to indicate that the index supports SWMR access */ H5D_chunk_init_func_t init; /* Routine to initialize indexing information in memory */ H5D_chunk_create_func_t create; /* Routine to create chunk index */ H5D_chunk_is_space_alloc_func_t is_space_alloc; /* Query routine to determine if storage/index is allocated */ diff --git a/src/H5Dsingle.c b/src/H5Dsingle.c index e09a5cf..04b8971 100644 --- a/src/H5Dsingle.c +++ b/src/H5Dsingle.c @@ -84,6 +84,7 @@ static herr_t H5D__single_idx_dump(const H5O_storage_chunk_t *storage, FILE *str /* Non Index chunk I/O ops */ const H5D_chunk_ops_t H5D_COPS_SINGLE[1] = {{ + FALSE, /* Single Chunk indexing doesn't current support SWMR access */ H5D__single_idx_init, /* init */ H5D__single_idx_create, /* create */ H5D__single_idx_is_space_alloc, /* is_space_alloc */ diff --git a/src/H5Dvirtual.c b/src/H5Dvirtual.c index 3780b88..c85b1e9 100644 --- a/src/H5Dvirtual.c +++ b/src/H5Dvirtual.c @@ -768,7 +768,7 @@ H5D__virtual_open_source_dset(const H5D_t *vdset, /* Check if we need to open the source file */ if(HDstrcmp(source_dset->file_name, ".")) { /* Open the source file */ - if(NULL == (src_file = H5F_open(source_dset->file_name, H5F_INTENT(vdset->oloc.file) & H5F_ACC_RDWR, H5P_FILE_CREATE_DEFAULT, vdset->shared->layout.storage.u.virt.source_fapl, dxpl_id))) + if(NULL == (src_file = H5F_open(source_dset->file_name, H5F_INTENT(vdset->oloc.file) & (H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE | H5F_ACC_SWMR_READ), H5P_FILE_CREATE_DEFAULT, vdset->shared->layout.storage.u.virt.source_fapl, dxpl_id))) H5E_clear_stack(NULL); /* Quick hack until proper support for H5Fopen with missing file is implemented */ else src_file_open = TRUE; @@ -795,7 +795,7 @@ END_FUNC(PRIV) /* end H5EA_get() */ */ BEGIN_FUNC(PRIV, ERR, herr_t, SUCCEED, FAIL, -H5EA_depend(H5AC_info_t *parent_entry, H5EA_t *ea)) +H5EA_depend(H5EA_t *ea, hid_t dxpl_id, H5AC_proxy_entry_t *parent)) /* Local variables */ H5EA_hdr_t *hdr = ea->hdr; /* Header for EA */ @@ -805,13 +805,25 @@ H5EA_depend(H5AC_info_t *parent_entry, H5EA_t *ea)) */ HDassert(ea); HDassert(hdr); + HDassert(parent); - /* Set the shared array header's file context for this operation */ - hdr->f = ea->f; + /* + * Check to see if a flush dependency between the extensible array + * and another data structure in the file has already been set up. + * If it hasn't, do so now. + */ + if(NULL == hdr->parent) { + /* Sanity check */ + HDassert(hdr->top_proxy); - /* Set up flush dependency between parent entry and extensible array header */ - if(H5EA__create_flush_depend(parent_entry, (H5AC_info_t *)hdr) < 0) - H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency on file metadata") + /* Set the shared array header's file context for this operation */ + hdr->f = ea->f; + + /* Add the extensible array as a child of the parent (proxy) */ + if(H5AC_proxy_entry_add_child(parent, hdr->f, dxpl_id, hdr->top_proxy) < 0) + H5E_THROW(H5E_CANTSET, "unable to add extensible array as child of proxy") + hdr->parent = parent; + } /* end if */ CATCH diff --git a/src/H5EAcache.c b/src/H5EAcache.c index b7846e9..1119e39 100644 --- a/src/H5EAcache.c +++ b/src/H5EAcache.c @@ -80,6 +80,7 @@ static void *H5EA__cache_hdr_deserialize(const void *image, size_t len, static herr_t H5EA__cache_hdr_image_len(const void *thing, size_t *image_len); static herr_t H5EA__cache_hdr_serialize(const H5F_t *f, void *image, size_t len, void *thing); +static herr_t H5EA__cache_hdr_notify(H5AC_notify_action_t action, void *thing); static herr_t H5EA__cache_hdr_free_icr(void *thing); static herr_t H5EA__cache_iblock_get_initial_load_size(void *udata, size_t *image_len); @@ -142,7 +143,7 @@ const H5AC_class_t H5AC_EARRAY_HDR[1] = {{ H5EA__cache_hdr_image_len, /* 'image_len' callback */ NULL, /* 'pre_serialize' callback */ H5EA__cache_hdr_serialize, /* 'serialize' callback */ - NULL, /* 'notify' callback */ + H5EA__cache_hdr_notify, /* 'notify' callback */ H5EA__cache_hdr_free_icr, /* 'free_icr' callback */ NULL, /* 'fsf_size' callback */ }}; @@ -521,6 +522,80 @@ END_FUNC(STATIC) /* end H5EA__cache_hdr_serialize() */ /*------------------------------------------------------------------------- + * Function: H5EA__cache_hdr_notify + * + * Purpose: Handle cache action notifications + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 11/30/15 + * + *------------------------------------------------------------------------- + */ +BEGIN_FUNC(STATIC, ERR, +herr_t, SUCCEED, FAIL, +H5EA__cache_hdr_notify(H5AC_notify_action_t action, void *_thing)) + + /* Local variables */ + H5EA_hdr_t *hdr = (H5EA_hdr_t *)_thing; /* Pointer to the object */ + + /* Sanity check */ + HDassert(hdr); + + /* Check if the file was opened with SWMR-write access */ + if(hdr->swmr_write) { + /* Determine which action to take */ + switch(action) { + case H5AC_NOTIFY_ACTION_AFTER_INSERT: + case H5AC_NOTIFY_ACTION_AFTER_LOAD: + case H5AC_NOTIFY_ACTION_AFTER_FLUSH: + case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: + case H5AC_NOTIFY_ACTION_ENTRY_CLEANED: + case H5AC_NOTIFY_ACTION_CHILD_DIRTIED: + case H5AC_NOTIFY_ACTION_CHILD_CLEANED: + /* do nothing */ + break; + + case H5AC_NOTIFY_ACTION_BEFORE_EVICT: + /* If hdr->parent != NULL, hdr->parent is used to destroy + * the flush dependency before the header is evicted. + */ + if(hdr->parent) { + /* Sanity check */ + HDassert(hdr->top_proxy); + + /* Destroy flush dependency on object header proxy */ + if(H5AC_proxy_entry_remove_child((H5AC_proxy_entry_t *)hdr->parent, (void *)hdr->top_proxy) < 0) + H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between extensible array and proxy") + hdr->parent = NULL; + } /* end if */ + + /* Detach from 'top' proxy for extensible array */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_remove_child(hdr->top_proxy, hdr) < 0) + H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between header and extensible array 'top' proxy") + /* Don't reset hdr->top_proxy here, it's destroyed when the header is freed -QAK */ + } /* end if */ + break; + + default: +#ifdef NDEBUG + H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache") +#else /* NDEBUG */ + HDassert(0 && "Unknown action?!?"); +#endif /* NDEBUG */ + } /* end switch */ + } /* end if */ + else + HDassert(NULL == hdr->parent); + +CATCH + +END_FUNC(STATIC) /* end H5EA__cache_hdr_notify() */ + + +/*------------------------------------------------------------------------- * Function: H5EA__cache_hdr_free_icr * * Purpose: Destroy/release an "in core representation" of a data @@ -890,6 +965,13 @@ H5EA__cache_iblock_notify(H5AC_notify_action_t action, void *_thing)) /* Destroy flush dependency on extensible array header */ if(H5EA__destroy_flush_depend((H5AC_info_t *)iblock->hdr, (H5AC_info_t *)iblock) < 0) H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between index block and header, address = %llu", (unsigned long long)iblock->addr) + + /* Detach from 'top' proxy for extensible array */ + if(iblock->top_proxy) { + if(H5AC_proxy_entry_remove_child(iblock->top_proxy, iblock) < 0) + H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between index block and extensible array 'top' proxy") + iblock->top_proxy = NULL; + } /* end if */ break; default: @@ -1286,6 +1368,13 @@ H5EA__cache_sblock_notify(H5AC_notify_action_t action, void *_thing)) H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between super block and header, address = %llu", (unsigned long long)sblock->addr) sblock->has_hdr_depend = FALSE; } /* end if */ + + /* Detach from 'top' proxy for extensible array */ + if(sblock->top_proxy) { + if(H5AC_proxy_entry_remove_child(sblock->top_proxy, sblock) < 0) + H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between super block and extensible array 'top' proxy") + sblock->top_proxy = NULL; + } /* end if */ break; case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: @@ -1690,6 +1779,13 @@ H5EA__cache_dblock_notify(H5AC_notify_action_t action, void *_thing)) H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block and header, address = %llu", (unsigned long long)dblock->addr) dblock->has_hdr_depend = FALSE; } /* end if */ + + /* Detach from 'top' proxy for extensible array */ + if(dblock->top_proxy) { + if(H5AC_proxy_entry_remove_child(dblock->top_proxy, dblock) < 0) + H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block and extensible array 'top' proxy") + dblock->top_proxy = NULL; + } /* end if */ break; case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: @@ -2062,6 +2158,13 @@ H5EA__cache_dblk_page_notify(H5AC_notify_action_t action, void *_thing)) H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block page and header, address = %llu", (unsigned long long)dblk_page->addr) dblk_page->has_hdr_depend = FALSE; } /* end if */ + + /* Detach from 'top' proxy for extensible array */ + if(dblk_page->top_proxy) { + if(H5AC_proxy_entry_remove_child(dblk_page->top_proxy, dblk_page) < 0) + H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block page and extensible array 'top' proxy") + dblk_page->top_proxy = NULL; + } /* end if */ break; case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: diff --git a/src/H5EAdblkpage.c b/src/H5EAdblkpage.c index b903a8a..927dcb6 100644 --- a/src/H5EAdblkpage.c +++ b/src/H5EAdblkpage.c @@ -155,6 +155,7 @@ H5EA__dblk_page_create(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_sblock_t *parent, /* Local variables */ H5EA_dblk_page_t *dblk_page = NULL; /* Extensible array data block page */ + hbool_t inserted = FALSE; /* Whether the header was inserted into cache */ /* Sanity check */ HDassert(hdr); @@ -174,10 +175,23 @@ H5EA__dblk_page_create(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_sblock_t *parent, /* Cache the new extensible array data block page */ if(H5AC_insert_entry(hdr->f, dxpl_id, H5AC_EARRAY_DBLK_PAGE, dblk_page->addr, dblk_page, H5AC__NO_FLAGS_SET) < 0) H5E_THROW(H5E_CANTINSERT, "can't add extensible array data block page to cache") + inserted = TRUE; + + /* Add data block page as child of 'top' proxy */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, dblk_page) < 0) + H5E_THROW(H5E_CANTSET, "unable to add extensible array entry as child of array proxy") + dblk_page->top_proxy = hdr->top_proxy; + } /* end if */ CATCH if(ret_value < 0) if(dblk_page) { + /* Remove from cache, if inserted */ + if(inserted) + if(H5AC_remove_entry(dblk_page) < 0) + H5E_THROW(H5E_CANTREMOVE, "unable to remove extensible array data block page from cache") + /* Destroy data block page */ if(H5EA__dblk_page_dest(dblk_page) < 0) H5E_THROW(H5E_CANTFREE, "unable to destroy extensible array data block page") @@ -206,6 +220,7 @@ H5EA__dblk_page_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_sblock_t *parent, haddr_t dblk_page_addr, unsigned flags)) /* Local variables */ + H5EA_dblk_page_t *dblk_page = NULL; /* Extensible array data block page */ H5EA_dblk_page_cache_ud_t udata; /* Information needed for loading data block page */ /* Sanity check */ @@ -221,10 +236,27 @@ H5EA__dblk_page_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_sblock_t *parent, udata.dblk_page_addr = dblk_page_addr; /* Protect the data block page */ - if(NULL == (ret_value = (H5EA_dblk_page_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_DBLK_PAGE, dblk_page_addr, &udata, flags))) + if(NULL == (dblk_page = (H5EA_dblk_page_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_DBLK_PAGE, dblk_page_addr, &udata, flags))) H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array data block page, address = %llu", (unsigned long long)dblk_page_addr) + /* Create top proxy, if it doesn't exist */ + if(hdr->top_proxy && NULL == dblk_page->top_proxy) { + /* Add data block page as child of 'top' proxy */ + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, dblk_page) < 0) + H5E_THROW(H5E_CANTSET, "unable to add extensible array entry as child of array proxy") + dblk_page->top_proxy = hdr->top_proxy; + } /* end if */ + + /* Set return value */ + ret_value = dblk_page; + CATCH + /* Clean up on error */ + if(!ret_value) { + /* Release the data block page, if it was protected */ + if(dblk_page && H5AC_unprotect(hdr->f, dxpl_id, H5AC_EARRAY_DBLK_PAGE, dblk_page->addr, dblk_page, H5AC__NO_FLAGS_SET) < 0) + H5E_THROW(H5E_CANTUNPROTECT, "unable to unprotect extensible array data block page, address = %llu", (unsigned long long)dblk_page->addr) + } /* end if */ END_FUNC(PKG) /* end H5EA__dblk_page_protect() */ @@ -299,6 +331,9 @@ H5EA__dblk_page_dest(H5EA_dblk_page_t *dblk_page)) dblk_page->hdr = NULL; } /* end if */ + /* Sanity check */ + HDassert(NULL == dblk_page->top_proxy); + /* Free the data block page itself */ dblk_page = H5FL_FREE(H5EA_dblk_page_t, dblk_page); diff --git a/src/H5EAdblock.c b/src/H5EAdblock.c index 9511a9d..c288c69 100644 --- a/src/H5EAdblock.c +++ b/src/H5EAdblock.c @@ -168,6 +168,7 @@ H5EA__dblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent, /* Local variables */ H5EA_dblock_t *dblock = NULL; /* Extensible array data block */ haddr_t dblock_addr; /* Extensible array data block address */ + hbool_t inserted = FALSE; /* Whether the header was inserted into cache */ /* Sanity check */ HDassert(hdr); @@ -198,6 +199,14 @@ H5EA__dblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent, /* Cache the new extensible array data block */ if(H5AC_insert_entry(hdr->f, dxpl_id, H5AC_EARRAY_DBLOCK, dblock_addr, dblock, H5AC__NO_FLAGS_SET) < 0) H5E_THROW(H5E_CANTINSERT, "can't add extensible array data block to cache") + inserted = TRUE; + + /* Add data block as child of 'top' proxy */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, dblock) < 0) + H5E_THROW(H5E_CANTSET, "unable to add extensible array entry as child of array proxy") + dblock->top_proxy = hdr->top_proxy; + } /* end if */ /* Update extensible array data block statistics */ hdr->stats.stored.ndata_blks++; @@ -215,6 +224,11 @@ H5EA__dblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent, CATCH if(!H5F_addr_defined(ret_value)) if(dblock) { + /* Remove from cache, if inserted */ + if(inserted) + if(H5AC_remove_entry(dblock) < 0) + H5E_THROW(H5E_CANTREMOVE, "unable to remove extensible array data block from cache") + /* Release data block's disk space */ if(H5F_addr_defined(dblock->addr) && H5MF_xfree(hdr->f, H5FD_MEM_EARRAY_DBLOCK, dxpl_id, dblock->addr, (hsize_t)dblock->size) < 0) H5E_THROW(H5E_CANTFREE, "unable to release extensible array data block") @@ -284,7 +298,8 @@ H5EA__dblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent, haddr_t dblk_addr, size_t dblk_nelmts, unsigned flags)) /* Local variables */ - H5EA_dblock_cache_ud_t udata; /* Information needed for loading data block */ + H5EA_dblock_t *dblock; /* Extensible array data block */ + H5EA_dblock_cache_ud_t udata; /* Information needed for loading data block */ /* Sanity check */ HDassert(hdr); @@ -301,11 +316,29 @@ H5EA__dblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent, udata.dblk_addr = dblk_addr; /* Protect the data block */ - if(NULL == (ret_value = (H5EA_dblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_DBLOCK, dblk_addr, &udata, flags))) + if(NULL == (dblock = (H5EA_dblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_DBLOCK, dblk_addr, &udata, flags))) H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array data block, address = %llu", (unsigned long long)dblk_addr) + /* Create top proxy, if it doesn't exist */ + if(hdr->top_proxy && NULL == dblock->top_proxy) { + /* Add data block as child of 'top' proxy */ + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, dblock) < 0) + H5E_THROW(H5E_CANTSET, "unable to add extensible array entry as child of array proxy") + dblock->top_proxy = hdr->top_proxy; + } /* end if */ + + /* Set return value */ + ret_value = dblock; + CATCH + /* Clean up on error */ + if(!ret_value) { + /* Release the data block, if it was protected */ + if(dblock && H5AC_unprotect(hdr->f, dxpl_id, H5AC_EARRAY_DBLOCK, dblock->addr, dblock, H5AC__NO_FLAGS_SET) < 0) + H5E_THROW(H5E_CANTUNPROTECT, "unable to unprotect extensible array data block, address = %llu", (unsigned long long)dblock->addr) + } /* end if */ + END_FUNC(PKG) /* end H5EA__dblock_protect() */ @@ -442,6 +475,9 @@ H5EA__dblock_dest(H5EA_dblock_t *dblock)) dblock->hdr = NULL; } /* end if */ + /* Sanity check */ + HDassert(NULL == dblock->top_proxy); + /* Free the data block itself */ dblock = H5FL_FREE(H5EA_dblock_t, dblock); diff --git a/src/H5EAhdr.c b/src/H5EAhdr.c index d8be3cb..76d8733 100644 --- a/src/H5EAhdr.c +++ b/src/H5EAhdr.c @@ -135,6 +135,7 @@ H5EA__hdr_alloc(H5F_t *f)) /* Set the internal parameters for the array */ hdr->f = f; + hdr->swmr_write = (H5F_INTENT(f) & H5F_ACC_SWMR_WRITE) > 0; hdr->sizeof_addr = H5F_SIZEOF_ADDR(f); hdr->sizeof_size = H5F_SIZEOF_SIZE(f); @@ -361,6 +362,7 @@ H5EA__hdr_create(H5F_t *f, hid_t dxpl_id, const H5EA_create_t *cparam, /* Local variables */ H5EA_hdr_t *hdr = NULL; /* Extensible array header */ + hbool_t inserted = FALSE; /* Whether the header was inserted into cache */ /* Check arguments */ HDassert(f); @@ -418,9 +420,20 @@ H5EA__hdr_create(H5F_t *f, hid_t dxpl_id, const H5EA_create_t *cparam, if(HADDR_UNDEF == (hdr->addr = H5MF_alloc(f, H5FD_MEM_EARRAY_HDR, dxpl_id, (hsize_t)hdr->size))) H5E_THROW(H5E_CANTALLOC, "file allocation failed for extensible array header") + /* Create 'top' proxy for extensible array entries */ + if(hdr->swmr_write) + if(NULL == (hdr->top_proxy = H5AC_proxy_entry_create())) + H5E_THROW(H5E_CANTCREATE, "can't create extensible array entry proxy") + /* Cache the new extensible array header */ if(H5AC_insert_entry(f, dxpl_id, H5AC_EARRAY_HDR, hdr->addr, hdr, H5AC__NO_FLAGS_SET) < 0) H5E_THROW(H5E_CANTINSERT, "can't add extensible array header to cache") + inserted = TRUE; + + /* Add header as child of 'top' proxy */ + if(hdr->top_proxy) + if(H5AC_proxy_entry_add_child(hdr->top_proxy, f, dxpl_id, hdr) < 0) + H5E_THROW(H5E_CANTSET, "unable to add extensible array entry as child of array proxy") /* Set address of array header to return */ ret_value = hdr->addr; @@ -428,6 +441,11 @@ H5EA__hdr_create(H5F_t *f, hid_t dxpl_id, const H5EA_create_t *cparam, CATCH if(!H5F_addr_defined(ret_value)) if(hdr) { + /* Remove from cache, if inserted */ + if(inserted) + if(H5AC_remove_entry(hdr) < 0) + H5E_THROW(H5E_CANTREMOVE, "unable to remove extensible array header from cache") + /* Release header's disk space */ if(H5F_addr_defined(hdr->addr) && H5MF_xfree(f, H5FD_MEM_EARRAY_HDR, dxpl_id, hdr->addr, (hsize_t)hdr->size) < 0) H5E_THROW(H5E_CANTFREE, "unable to free extensible array header") @@ -614,6 +632,7 @@ H5EA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata, unsigned flags)) /* Local variables */ + H5EA_hdr_t *hdr; /* Extensible array header */ H5EA_hdr_cache_ud_t udata; /* User data for cache callbacks */ /* Sanity check */ @@ -629,9 +648,23 @@ H5EA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata, udata.ctx_udata = ctx_udata; /* Protect the header */ - if(NULL == (ret_value = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, &udata, flags))) + if(NULL == (hdr = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, &udata, flags))) H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array header, address = %llu", (unsigned long long)ea_addr) - ret_value->f = f; /* (Must be set again here, in case the header was already in the cache -QAK) */ + hdr->f = f; /* (Must be set again here, in case the header was already in the cache -QAK) */ + + /* Create top proxy, if it doesn't exist */ + if(hdr->swmr_write && NULL == hdr->top_proxy) { + /* Create 'top' proxy for extensible array entries */ + if(NULL == (hdr->top_proxy = H5AC_proxy_entry_create())) + H5E_THROW(H5E_CANTCREATE, "can't create extensible array entry proxy") + + /* Add header as child of 'top' proxy */ + if(H5AC_proxy_entry_add_child(hdr->top_proxy, f, dxpl_id, hdr) < 0) + H5E_THROW(H5E_CANTSET, "unable to add extensible array entry as child of array proxy") + } /* end if */ + + /* Set return value */ + ret_value = hdr; CATCH @@ -779,6 +812,13 @@ H5EA__hdr_dest(H5EA_hdr_t *hdr)) if(hdr->sblk_info) hdr->sblk_info = (H5EA_sblk_info_t *)H5FL_SEQ_FREE(H5EA_sblk_info_t, hdr->sblk_info); + /* Destroy the 'top' proxy */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_dest(hdr->top_proxy) < 0) + H5E_THROW(H5E_CANTRELEASE, "unable to destroy extensible array 'top' proxy") + hdr->top_proxy = NULL; + } /* end if */ + /* Free the shared info itself */ hdr = H5FL_FREE(H5EA_hdr_t, hdr); diff --git a/src/H5EAiblock.c b/src/H5EAiblock.c index 48b52a4..a3723c5 100644 --- a/src/H5EAiblock.c +++ b/src/H5EAiblock.c @@ -183,6 +183,7 @@ H5EA__iblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id, hbool_t *stats_changed)) /* Local variables */ H5EA_iblock_t *iblock = NULL; /* Extensible array index block */ haddr_t iblock_addr; /* Extensible array index block address */ + hbool_t inserted = FALSE; /* Whether the header was inserted into cache */ #ifdef QAK HDfprintf(stderr, "%s: Called\n", FUNC); @@ -233,6 +234,14 @@ HDfprintf(stderr, "%s: iblock->size = %Zu\n", FUNC, iblock->size); /* Cache the new extensible array index block */ if(H5AC_insert_entry(hdr->f, dxpl_id, H5AC_EARRAY_IBLOCK, iblock_addr, iblock, H5AC__NO_FLAGS_SET) < 0) H5E_THROW(H5E_CANTINSERT, "can't add extensible array index block to cache") + inserted = TRUE; + + /* Add index block as child of 'top' proxy */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, iblock) < 0) + H5E_THROW(H5E_CANTSET, "unable to add extensible array entry as child of array proxy") + iblock->top_proxy = hdr->top_proxy; + } /* end if */ /* Update extensible array index block statistics */ HDassert(0 == hdr->stats.computed.nindex_blks); @@ -252,6 +261,11 @@ HDfprintf(stderr, "%s: iblock->size = %Zu\n", FUNC, iblock->size); CATCH if(!H5F_addr_defined(ret_value)) if(iblock) { + /* Remove from cache, if inserted */ + if(inserted) + if(H5AC_remove_entry(iblock) < 0) + H5E_THROW(H5E_CANTREMOVE, "unable to remove extensible array index block from cache") + /* Release index block's disk space */ if(H5F_addr_defined(iblock->addr) && H5MF_xfree(hdr->f, H5FD_MEM_EARRAY_IBLOCK, dxpl_id, iblock->addr, (hsize_t)iblock->size) < 0) H5E_THROW(H5E_CANTFREE, "unable to release file space for extensible array index block") @@ -281,6 +295,9 @@ BEGIN_FUNC(PKG, ERR, H5EA_iblock_t *, NULL, NULL, H5EA__iblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, unsigned flags)) + /* Local variables */ + H5EA_iblock_t *iblock = NULL; /* Pointer to index block */ + #ifdef QAK HDfprintf(stderr, "%s: Called\n", FUNC); #endif /* QAK */ @@ -292,10 +309,27 @@ HDfprintf(stderr, "%s: Called\n", FUNC); HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0); /* Protect the index block */ - if(NULL == (ret_value = (H5EA_iblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_IBLOCK, hdr->idx_blk_addr, hdr, flags))) + if(NULL == (iblock = (H5EA_iblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_IBLOCK, hdr->idx_blk_addr, hdr, flags))) H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array index block, address = %llu", (unsigned long long)hdr->idx_blk_addr) + /* Create top proxy, if it doesn't exist */ + if(hdr->top_proxy && NULL == iblock->top_proxy) { + /* Add index block as child of 'top' proxy */ + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, iblock) < 0) + H5E_THROW(H5E_CANTSET, "unable to add extensible array entry as child of array proxy") + iblock->top_proxy = hdr->top_proxy; + } /* end if */ + + /* Set return value */ + ret_value = iblock; + CATCH + /* Clean up on error */ + if(!ret_value) { + /* Release the index block, if it was protected */ + if(iblock && H5AC_unprotect(hdr->f, dxpl_id, H5AC_EARRAY_IBLOCK, iblock->addr, iblock, H5AC__NO_FLAGS_SET) < 0) + H5E_THROW(H5E_CANTUNPROTECT, "unable to unprotect extensible array index block, address = %llu", (unsigned long long)iblock->addr) + } /* end if */ END_FUNC(PKG) /* end H5EA__iblock_protect() */ @@ -470,6 +504,9 @@ H5EA__iblock_dest(H5EA_iblock_t *iblock)) iblock->hdr = NULL; } /* end if */ + /* Sanity check */ + HDassert(NULL == iblock->top_proxy); + /* Free the index block itself */ iblock = H5FL_FREE(H5EA_iblock_t, iblock); diff --git a/src/H5EApkg.h b/src/H5EApkg.h index 70e6a55..093403c 100644 --- a/src/H5EApkg.h +++ b/src/H5EApkg.h @@ -206,6 +206,28 @@ typedef struct H5EA_hdr_t { /* Client information (not stored) */ void *cb_ctx; /* Callback context */ + + /* SWMR / Flush dependency information (not stored) */ + hbool_t swmr_write; /* Flag indicating the file is opened with SWMR-write access */ + H5AC_proxy_entry_t *top_proxy; /* 'Top' proxy cache entry for all array entries */ + void *parent; /* Pointer to 'top' proxy flush dependency + * parent, if it exists, otherwise NULL. + * If the extensible array is being used + * to index a chunked dataset and the + * dataset metadata is modified by a + * SWMR writer, this field will be set + * equal to the object header proxy + * that is the flush dependency parent + * of the extensible array header. + * + * The field is used to avoid duplicate + * setups of the flush dependency + * relationship, and to allow the + * extensible array header to destroy + * the flush dependency on receipt of + * an eviction notification from the + * metadata cache. + */ } H5EA_hdr_t; /* The extensible array index block information */ @@ -223,6 +245,9 @@ typedef struct H5EA_iblock_t { haddr_t addr; /* Address of this index block on disk */ size_t size; /* Size of index block on disk */ + /* SWMR / Flush dependency information (not stored) */ + H5AC_proxy_entry_t *top_proxy; /* "Top" proxy cache entry for all array entries */ + /* Computed/cached values (not stored) */ size_t nsblks; /* # of super blocks whose data block addresses are in index block */ size_t ndblk_addrs; /* Number of pointers to data blocks in index block */ @@ -244,8 +269,9 @@ typedef struct H5EA_sblock_t { haddr_t addr; /* Address of this index block on disk */ size_t size; /* Size of index block on disk */ - /* Flush dependency information (not stored) */ + /* SWMR / Flush dependency information (not stored) */ hbool_t has_hdr_depend; /* Whether this object has a flush dependency on the header */ + H5AC_proxy_entry_t *top_proxy; /* "Top" proxy cache entry for all array entries */ H5EA_iblock_t *parent; /* Parent object for super block (index block) */ /* Computed/cached values (not stored) */ @@ -271,8 +297,9 @@ typedef struct H5EA_dblock_t { haddr_t addr; /* Address of this data block on disk */ size_t size; /* Size of data block on disk */ - /* Flush dependency information (not stored) */ + /* SWMR / Flush dependency information (not stored) */ hbool_t has_hdr_depend; /* Whether this object has a flush dependency on the header */ + H5AC_proxy_entry_t *top_proxy; /* 'Top' proxy cache entry for all array entries */ void *parent; /* Parent object for data block (index or super block) */ /* Computed/cached values (not stored) */ @@ -293,8 +320,9 @@ typedef struct H5EA_dbk_page_t { haddr_t addr; /* Address of this data block page on disk */ size_t size; /* Size of data block page on disk */ - /* Flush dependency information (not stored) */ + /* SWMR / Flush dependency information (not stored) */ hbool_t has_hdr_depend; /* Whether this object has a flush dependency on the header */ + H5AC_proxy_entry_t *top_proxy; /* "Top" proxy cache entry for all array entries */ H5EA_sblock_t *parent; /* Parent object for data block page (super block) */ /* Computed/cached values (not stored) */ diff --git a/src/H5EAprivate.h b/src/H5EAprivate.h index 0a1b945..66d88e0 100644 --- a/src/H5EAprivate.h +++ b/src/H5EAprivate.h @@ -142,7 +142,7 @@ H5_DLL herr_t H5EA_get_nelmts(const H5EA_t *ea, hsize_t *nelmts); H5_DLL herr_t H5EA_get_addr(const H5EA_t *ea, haddr_t *addr); H5_DLL herr_t H5EA_set(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, const void *elmt); H5_DLL herr_t H5EA_get(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, void *elmt); -H5_DLL herr_t H5EA_depend(H5AC_info_t *parent_entry, H5EA_t *ea); +H5_DLL herr_t H5EA_depend(H5EA_t *ea, hid_t dxpl_id, H5AC_proxy_entry_t *parent); H5_DLL herr_t H5EA_iterate(H5EA_t *fa, hid_t dxpl_id, H5EA_operator_t op, void *udata); H5_DLL herr_t H5EA_close(H5EA_t *ea, hid_t dxpl_id); H5_DLL herr_t H5EA_delete(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata); diff --git a/src/H5EAsblock.c b/src/H5EAsblock.c index 4f153cd..2fa87e0 100644 --- a/src/H5EAsblock.c +++ b/src/H5EAsblock.c @@ -195,6 +195,7 @@ H5EA__sblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_iblock_t *parent, H5EA_sblock_t *sblock = NULL; /* Extensible array super block */ haddr_t sblock_addr; /* Extensible array super block address */ haddr_t tmp_addr = HADDR_UNDEF; /* Address value to fill data block addresses with */ + hbool_t inserted = FALSE; /* Whether the header was inserted into cache */ /* Sanity check */ HDassert(hdr); @@ -221,6 +222,14 @@ H5EA__sblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_iblock_t *parent, /* Cache the new extensible array super block */ if(H5AC_insert_entry(hdr->f, dxpl_id, H5AC_EARRAY_SBLOCK, sblock_addr, sblock, H5AC__NO_FLAGS_SET) < 0) H5E_THROW(H5E_CANTINSERT, "can't add extensible array super block to cache") + inserted = TRUE; + + /* Add super block as child of 'top' proxy */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, sblock) < 0) + H5E_THROW(H5E_CANTSET, "unable to add extensible array entry as child of array proxy") + sblock->top_proxy = hdr->top_proxy; + } /* end if */ /* Update extensible array super block statistics */ hdr->stats.stored.nsuper_blks++; @@ -235,6 +244,11 @@ H5EA__sblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_iblock_t *parent, CATCH if(!H5F_addr_defined(ret_value)) if(sblock) { + /* Remove from cache, if inserted */ + if(inserted) + if(H5AC_remove_entry(sblock) < 0) + H5E_THROW(H5E_CANTREMOVE, "unable to remove extensible array super block from cache") + /* Release super block's disk space */ if(H5F_addr_defined(sblock->addr) && H5MF_xfree(hdr->f, H5FD_MEM_EARRAY_SBLOCK, dxpl_id, sblock->addr, (hsize_t)sblock->size) < 0) H5E_THROW(H5E_CANTFREE, "unable to release extensible array super block") @@ -266,6 +280,7 @@ H5EA__sblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_iblock_t *parent, haddr_t sblk_addr, unsigned sblk_idx, unsigned flags)) /* Local variables */ + H5EA_sblock_t *sblock = NULL; /* Pointer to super block */ H5EA_sblock_cache_ud_t udata; /* Information needed for loading super block */ /* Sanity check */ @@ -282,10 +297,27 @@ H5EA__sblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_iblock_t *parent, udata.sblk_addr = sblk_addr; /* Protect the super block */ - if(NULL == (ret_value = (H5EA_sblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_SBLOCK, sblk_addr, &udata, flags))) + if(NULL == (sblock = (H5EA_sblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_SBLOCK, sblk_addr, &udata, flags))) H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array super block, address = %llu", (unsigned long long)sblk_addr) + /* Create top proxy, if it doesn't exist */ + if(hdr->top_proxy && NULL == sblock->top_proxy) { + /* Add super block as child of 'top' proxy */ + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, sblock) < 0) + H5E_THROW(H5E_CANTSET, "unable to add extensible array entry as child of array proxy") + sblock->top_proxy = hdr->top_proxy; + } /* end if */ + + /* Set return value */ + ret_value = sblock; + CATCH + /* Clean up on error */ + if(!ret_value) { + /* Release the super block, if it was protected */ + if(sblock && H5AC_unprotect(hdr->f, dxpl_id, H5AC_EARRAY_SBLOCK, sblock->addr, sblock, H5AC__NO_FLAGS_SET) < 0) + H5E_THROW(H5E_CANTUNPROTECT, "unable to unprotect extensible array super block, address = %llu", (unsigned long long)sblock->addr) + } /* end if */ END_FUNC(PKG) /* end H5EA__sblock_protect() */ @@ -409,6 +441,9 @@ H5EA__sblock_dest(H5EA_sblock_t *sblock)) sblock->hdr = NULL; } /* end if */ + /* Sanity check */ + HDassert(NULL == sblock->top_proxy); + /* Free the super block itself */ sblock = H5FL_FREE(H5EA_sblock_t, sblock); @@ -456,9 +456,9 @@ H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id, hid_t fapl_id) if(!filename || !*filename) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file name") /* In this routine, we only accept the following flags: - * H5F_ACC_EXCL and H5F_ACC_TRUNC + * H5F_ACC_EXCL, H5F_ACC_TRUNC and H5F_ACC_SWMR_WRITE */ - if(flags & ~(H5F_ACC_EXCL | H5F_ACC_TRUNC)) + if(flags & ~(H5F_ACC_EXCL | H5F_ACC_TRUNC | H5F_ACC_SWMR_WRITE)) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid flags") /* The H5F_ACC_EXCL and H5F_ACC_TRUNC flags are mutually exclusive */ if((flags & H5F_ACC_EXCL) && (flags & H5F_ACC_TRUNC)) @@ -563,6 +563,12 @@ H5Fopen(const char *filename, unsigned flags, hid_t fapl_id) if((flags & ~H5F_ACC_PUBLIC_FLAGS) || (flags & H5F_ACC_TRUNC) || (flags & H5F_ACC_EXCL)) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file open flags") + /* Asking for SWMR write access on a read-only file is invalid */ + if((flags & H5F_ACC_SWMR_WRITE) && 0 == (flags & H5F_ACC_RDWR)) + HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "SWMR write access on a file open for read-only access is not allowed") + /* Asking for SWMR read access on a non-read-only file is invalid */ + if((flags & H5F_ACC_SWMR_READ) && (flags & H5F_ACC_RDWR)) + HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "SWMR read access on a file open for read-write access is not allowed") /* Verify access property list and get correct dxpl */ if(H5P_verify_apl_and_dxpl(&fapl_id, H5P_CLS_FACC, &dxpl_id, H5I_INVALID_HID, TRUE) < 0) @@ -857,10 +863,20 @@ H5Fget_intent(hid_t file_id, unsigned *intent_flags) * Simplify things for them so that they only get either H5F_ACC_RDWR * or H5F_ACC_RDONLY. */ - if(H5F_INTENT(file) & H5F_ACC_RDWR) + if(H5F_INTENT(file) & H5F_ACC_RDWR) { *intent_flags = H5F_ACC_RDWR; - else + + /* Check for SWMR write access on the file */ + if(H5F_INTENT(file) & H5F_ACC_SWMR_WRITE) + *intent_flags |= H5F_ACC_SWMR_WRITE; + } /* end if */ + else { *intent_flags = H5F_ACC_RDONLY; + + /* Check for SWMR read access on the file */ + if(H5F_INTENT(file) & H5F_ACC_SWMR_READ) + *intent_flags |= H5F_ACC_SWMR_READ; + } /* end else */ } /* end if */ done: @@ -1541,6 +1557,222 @@ done: /*------------------------------------------------------------------------- + * Function: H5Fstart_swmr_write + * + * Purpose: To enable SWMR writing mode for the file + * 1) Refresh opened objects: part 1 + * 2) Flush & reset accumulator + * 3) Mark the file in SWMR writing mode + * 4) Set metadata read attempts and retries info + * 5) Disable accumulator + * 6) Evict all cache entries except the superblock + * 7) Refresh opened objects (part 2) + * 8) Unlock the file + * + * Pre-conditions: + * 1) The file being opened has v3 superblock + * 2) The file is opened with H5F_ACC_RDWR + * 3) The file is not already marked for SWMR writing + * 4) Current implementaion for opened objects: + * --only allow datasets and groups without attributes + * --disallow named datatype with/without attributes + * --disallow opened attributes attached to objects + * NOTE: Currently, only opened groups and datasets are allowed + * when enabling SWMR via H5Fstart_swmr_write(). + * Will later implement a different approach-- + * set up flush dependency/proxy even for file opened without + * SWMR to resolve issues with opened objects. + * + * Return: Non-negative on success/negative on failure + * + * Programmer: + * Vailin Choi; Feb 2014 + * + *------------------------------------------------------------------------- + */ +herr_t +H5Fstart_swmr_write(hid_t file_id) +{ + H5F_t *file = NULL; /* File info */ + size_t grp_dset_count=0; /* # of open objects: groups & datasets */ + size_t nt_attr_count=0; /* # of opened named datatypes + opened attributes */ + hid_t *obj_ids=NULL; /* List of ids */ + H5G_loc_t *obj_glocs=NULL; /* Group location of the object */ + H5O_loc_t *obj_olocs=NULL; /* Object location */ + H5G_name_t *obj_paths=NULL; /* Group hierarchy path */ + size_t u; /* Local index variable */ + hbool_t setup = FALSE; /* Boolean flag to indicate whether SWMR setting is enabled */ + H5F_io_info_t fio_info; /* I/O info for operation */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE1("e", "i", file_id); + + /* check args */ + if(NULL == (file = (H5F_t *)H5I_object_verify(file_id, H5I_FILE))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file") + + /* Should have write permission */ + if((H5F_INTENT(file) & H5F_ACC_RDWR) == 0) + HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "no write intent on file") + + if(file->shared->sblock->super_vers < HDF5_SUPERBLOCK_VERSION_3) + HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "file superblock version should be at least 3") + HDassert(file->shared->latest_flags == H5F_LATEST_ALL_FLAGS); + + /* Should not be marked for SWMR writing mode already */ + if(file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS) + HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "file already in SWMR writing mode") + + HDassert(file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS); + + /* Flush data buffers */ + if(H5F_flush(file, H5AC_ind_read_dxpl_id, FALSE) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush file's cached information") + + /* Get the # of opened named datatypes and attributes */ + if(H5F_get_obj_count(file, H5F_OBJ_DATATYPE|H5F_OBJ_ATTR, FALSE, &nt_attr_count) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_BADITER, FAIL, "H5F_get_obj_count failed") + if(nt_attr_count) + HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "named datatypes and/or attributes opened in the file") + + /* Get the # of opened datasets and groups */ + if(H5F_get_obj_count(file, H5F_OBJ_GROUP|H5F_OBJ_DATASET, FALSE, &grp_dset_count) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_BADITER, FAIL, "H5F_get_obj_count failed") + + if(grp_dset_count) { + /* Allocate space for group and object locations */ + if((obj_ids = (hid_t *) H5MM_malloc(grp_dset_count * sizeof(hid_t))) == NULL) + HGOTO_ERROR(H5E_FILE, H5E_NOSPACE, FAIL, "can't allocate buffer for hid_t") + if((obj_glocs = (H5G_loc_t *) H5MM_malloc(grp_dset_count * sizeof(H5G_loc_t))) == NULL) + HGOTO_ERROR(H5E_FILE, H5E_NOSPACE, FAIL, "can't allocate buffer for H5G_loc_t") + if((obj_olocs = (H5O_loc_t *) H5MM_malloc(grp_dset_count * sizeof(H5O_loc_t))) == NULL) + HGOTO_ERROR(H5E_FILE, H5E_NOSPACE, FAIL, "can't allocate buffer for H5O_loc_t") + if((obj_paths = (H5G_name_t *) H5MM_malloc(grp_dset_count * sizeof(H5G_name_t))) == NULL) + HGOTO_ERROR(H5E_FILE, H5E_NOSPACE, FAIL, "can't allocate buffer for H5G_name_t") + + /* Get the list of opened object ids (groups & datasets) */ + if(H5F_get_obj_ids(file, H5F_OBJ_GROUP|H5F_OBJ_DATASET, grp_dset_count, obj_ids, FALSE, &grp_dset_count) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "H5F_get_obj_ids failed") + + /* Refresh opened objects (groups, datasets) in the file */ + for(u = 0; u < grp_dset_count; u++) { + H5O_loc_t *oloc; /* object location */ + H5G_loc_t tmp_loc; + + /* Set up the id's group location */ + obj_glocs[u].oloc = &obj_olocs[u]; + obj_glocs[u].path = &obj_paths[u]; + H5G_loc_reset(&obj_glocs[u]); + + /* get the id's object location */ + if((oloc = H5O_get_loc(obj_ids[u])) == NULL) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an object") + + /* Make deep local copy of object's location information */ + H5G_loc(obj_ids[u], &tmp_loc); + H5G_loc_copy(&obj_glocs[u], &tmp_loc, H5_COPY_DEEP); + + /* Close the object */ + if(H5I_dec_ref(obj_ids[u]) < 0) + HGOTO_ERROR(H5E_ATOM, H5E_CANTCLOSEOBJ, FAIL, "decrementing object ID failed") + } /* end for */ + } /* end if */ + + /* Set up I/O info for operation */ + fio_info.f = file; + if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list") + + /* Flush and reset the accumulator */ + if(H5F__accum_reset(&fio_info, TRUE) < 0) + HGOTO_ERROR(H5E_IO, H5E_CANTRESET, FAIL, "can't reset accumulator") + + /* Turn on SWMR write in shared file open flags */ + file->shared->flags |= H5F_ACC_SWMR_WRITE; + + /* Mark the file in SWMR writing mode */ + file->shared->sblock->status_flags |= H5F_SUPER_SWMR_WRITE_ACCESS; + + /* Set up metadata read attempts */ + file->shared->read_attempts = H5F_SWMR_METADATA_READ_ATTEMPTS; + + /* Initialize "retries" and "retries_nbins" */ + if(H5F_set_retries(file) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "can't set retries and retries_nbins") + + /* Turn off usage of accumulator */ + file->shared->feature_flags &= ~(unsigned)H5FD_FEAT_ACCUMULATE_METADATA; + if(H5FD_set_feature_flags(file->shared->lf, file->shared->feature_flags) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "can't set feature_flags in VFD") + + setup = TRUE; + + /* Mark superblock as dirty */ + if(H5F_super_dirty(file) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty") + + /* Flush the superblock */ + if(H5F_flush_tagged_metadata(file, H5AC__SUPERBLOCK_TAG, H5AC_ind_read_dxpl_id) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush superblock") + + /* Evict all flushed entries in the cache except the pinned superblock */ + if(H5F__evict_cache_entries(file, H5AC_ind_read_dxpl_id) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to evict file's cached information") + + /* Refresh (reopen) the objects (groups & datasets) in the file */ + for(u = 0; u < grp_dset_count; u++) + if(H5O_refresh_metadata_reopen(obj_ids[u], &obj_glocs[u], H5AC_ind_read_dxpl_id, TRUE) < 0) + HGOTO_ERROR(H5E_ATOM, H5E_CLOSEERROR, FAIL, "can't refresh-close object") + + /* Unlock the file */ + if(H5FD_unlock(file->shared->lf) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to unlock the file") + +done: + if(ret_value < 0 && setup) { + HDassert(file); + + /* Re-enable accumulator */ + file->shared->feature_flags |= (unsigned)H5FD_FEAT_ACCUMULATE_METADATA; + if(H5FD_set_feature_flags(file->shared->lf, file->shared->feature_flags) < 0) + HDONE_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "can't set feature_flags in VFD") + + /* Reset the # of read attempts */ + file->shared->read_attempts = H5F_METADATA_READ_ATTEMPTS; + if(H5F_set_retries(file) < 0) + HDONE_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "can't set retries and retries_nbins") + + /* Un-set H5F_ACC_SWMR_WRITE in shared open flags */ + file->shared->flags &= ~H5F_ACC_SWMR_WRITE; + + /* Unmark the file: not in SWMR writing mode */ + file->shared->sblock->status_flags &= (uint8_t)(~H5F_SUPER_SWMR_WRITE_ACCESS); + + /* Mark superblock as dirty */ + if(H5F_super_dirty(file) < 0) + HDONE_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty") + + /* Flush the superblock */ + if(H5F_flush_tagged_metadata(file, H5AC__SUPERBLOCK_TAG, H5AC_ind_read_dxpl_id) < 0) + HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush superblock") + } /* end if */ + + /* Free memory */ + if(obj_ids) + H5MM_xfree(obj_ids); + if(obj_glocs) + H5MM_xfree(obj_glocs); + if(obj_olocs) + H5MM_xfree(obj_olocs); + if(obj_paths) + H5MM_xfree(obj_paths); + + FUNC_LEAVE_API(ret_value) +} /* end H5Fstart_swmr_write() */ + + +/*------------------------------------------------------------------------- * Function: H5Fstart_mdc_logging * * Purpose: Start metadata cache logging operations for a file. @@ -737,6 +737,56 @@ END_FUNC(PRIV) /* end H5FA_iterate() */ /*------------------------------------------------------------------------- + * Function: H5FA_depend + * + * Purpose: Make a child flush dependency between the fixed array + * and another piece of metadata in the file. + * + * Return: SUCCEED/FAIL + * + * Programmer: Dana Robinson + * Fall 2012 + * + *------------------------------------------------------------------------- + */ +BEGIN_FUNC(PRIV, ERR, +herr_t, SUCCEED, FAIL, +H5FA_depend(H5FA_t *fa, hid_t dxpl_id, H5AC_proxy_entry_t *parent)) + + /* Local variables */ + H5FA_hdr_t *hdr = fa->hdr; /* Header for FA */ + + /* + * Check arguments. + */ + HDassert(fa); + HDassert(hdr); + HDassert(parent); + + /* + * Check to see if a flush dependency between the fixed array + * and another data structure in the file has already been set up. + * If it hasn't, do so now. + */ + if(NULL == hdr->parent) { + /* Sanity check */ + HDassert(hdr->top_proxy); + + /* Set the shared array header's file context for this operation */ + hdr->f = fa->f; + + /* Add the fixed array as a child of the parent (proxy) */ + if(H5AC_proxy_entry_add_child(parent, hdr->f, dxpl_id, hdr->top_proxy) < 0) + H5E_THROW(H5E_CANTSET, "unable to add fixed array as child of proxy") + hdr->parent = parent; + } /* end if */ + +CATCH + +END_FUNC(PRIV) /* end H5FA_depend() */ + + +/*------------------------------------------------------------------------- * Function: H5FA_patch_file * * Purpose: Patch the top-level file pointer contained in fa diff --git a/src/H5FAcache.c b/src/H5FAcache.c index 82b87dd..11e8571 100644 --- a/src/H5FAcache.c +++ b/src/H5FAcache.c @@ -78,6 +78,7 @@ static void *H5FA__cache_hdr_deserialize(const void *image, size_t len, static herr_t H5FA__cache_hdr_image_len(const void *thing, size_t *image_len); static herr_t H5FA__cache_hdr_serialize(const H5F_t *f, void *image, size_t len, void *thing); +static herr_t H5FA__cache_hdr_notify(H5AC_notify_action_t action, void *thing); static herr_t H5FA__cache_hdr_free_icr(void *thing); static herr_t H5FA__cache_dblock_get_initial_load_size(void *udata, size_t *image_len); @@ -87,6 +88,7 @@ static void *H5FA__cache_dblock_deserialize(const void *image, size_t len, static herr_t H5FA__cache_dblock_image_len(const void *thing, size_t *image_len); static herr_t H5FA__cache_dblock_serialize(const H5F_t *f, void *image, size_t len, void *thing); +static herr_t H5FA__cache_dblock_notify(H5AC_notify_action_t action, void *thing); static herr_t H5FA__cache_dblock_free_icr(void *thing); static herr_t H5FA__cache_dblock_fsf_size(const void *thing, size_t *fsf_size); @@ -97,6 +99,7 @@ static void *H5FA__cache_dblk_page_deserialize(const void *image, size_t len, static herr_t H5FA__cache_dblk_page_image_len(const void *thing, size_t *image_len); static herr_t H5FA__cache_dblk_page_serialize(const H5F_t *f, void *image, size_t len, void *thing); +static herr_t H5FA__cache_dblk_page_notify(H5AC_notify_action_t action, void *thing); static herr_t H5FA__cache_dblk_page_free_icr(void *thing); @@ -117,7 +120,7 @@ const H5AC_class_t H5AC_FARRAY_HDR[1] = {{ H5FA__cache_hdr_image_len, /* 'image_len' callback */ NULL, /* 'pre_serialize' callback */ H5FA__cache_hdr_serialize, /* 'serialize' callback */ - NULL, /* 'notify' callback */ + H5FA__cache_hdr_notify, /* 'notify' callback */ H5FA__cache_hdr_free_icr, /* 'free_icr' callback */ NULL, /* 'fsf_size' callback */ }}; @@ -135,7 +138,7 @@ const H5AC_class_t H5AC_FARRAY_DBLOCK[1] = {{ H5FA__cache_dblock_image_len, /* 'image_len' callback */ NULL, /* 'pre_serialize' callback */ H5FA__cache_dblock_serialize, /* 'serialize' callback */ - NULL, /* 'notify' callback */ + H5FA__cache_dblock_notify, /* 'notify' callback */ H5FA__cache_dblock_free_icr, /* 'free_icr' callback */ H5FA__cache_dblock_fsf_size, /* 'fsf_size' callback */ }}; @@ -153,7 +156,7 @@ const H5AC_class_t H5AC_FARRAY_DBLK_PAGE[1] = {{ H5FA__cache_dblk_page_image_len, /* 'image_len' callback */ NULL, /* 'pre_serialize' callback */ H5FA__cache_dblk_page_serialize, /* 'serialize' callback */ - NULL, /* 'notify' callback */ + H5FA__cache_dblk_page_notify, /* 'notify' callback */ H5FA__cache_dblk_page_free_icr, /* 'free_icr' callback */ NULL, /* 'fsf_size' callback */ }}; @@ -440,6 +443,80 @@ END_FUNC(STATIC) /* end H5FA__cache_hdr_serialize() */ /*------------------------------------------------------------------------- + * Function: H5FA__cache_hdr_notify + * + * Purpose: Handle cache action notifications + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Dana Robinson + * December 2015 + * + *------------------------------------------------------------------------- + */ +BEGIN_FUNC(STATIC, ERR, +herr_t, SUCCEED, FAIL, +H5FA__cache_hdr_notify(H5AC_notify_action_t action, void *_thing)) + + /* Local variables */ + H5FA_hdr_t *hdr = (H5FA_hdr_t *)_thing; /* Pointer to the object */ + + /* Sanity check */ + HDassert(hdr); + + /* Check if the file was opened with SWMR-write access */ + if(hdr->swmr_write) { + /* Determine which action to take */ + switch(action) { + case H5AC_NOTIFY_ACTION_AFTER_INSERT: + case H5AC_NOTIFY_ACTION_AFTER_LOAD: + case H5AC_NOTIFY_ACTION_AFTER_FLUSH: + case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: + case H5AC_NOTIFY_ACTION_ENTRY_CLEANED: + case H5AC_NOTIFY_ACTION_CHILD_DIRTIED: + case H5AC_NOTIFY_ACTION_CHILD_CLEANED: + /* do nothing */ + break; + + case H5AC_NOTIFY_ACTION_BEFORE_EVICT: + /* If hdr->parent != NULL, hdr->parent is used to destroy + * the flush dependency before the header is evicted. + */ + if(hdr->parent) { + /* Sanity check */ + HDassert(hdr->top_proxy); + + /* Destroy flush dependency on object header proxy */ + if(H5AC_proxy_entry_remove_child((H5AC_proxy_entry_t *)hdr->parent, (void *)hdr->top_proxy) < 0) + H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between fixed array and proxy") + hdr->parent = NULL; + } /* end if */ + + /* Detach from 'top' proxy for fixed array */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_remove_child(hdr->top_proxy, hdr) < 0) + H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between header and fixed array 'top' proxy") + /* Don't reset hdr->top_proxy here, it's destroyed when the header is freed -QAK */ + } /* end if */ + break; + + default: +#ifdef NDEBUG + H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache") +#else /* NDEBUG */ + HDassert(0 && "Unknown action?!?"); +#endif /* NDEBUG */ + } /* end switch */ + } /* end if */ + else + HDassert(NULL == hdr->parent); + +CATCH + +END_FUNC(STATIC) /* end H5FA__cache_hdr_notify() */ + + +/*------------------------------------------------------------------------- * Function: H5FA__cache_hdr_free_icr * * Purpose: Destroy/release an "in core representation" of a data @@ -765,6 +842,75 @@ END_FUNC(STATIC) /* end H5FA__cache_dblock_serialize() */ /*------------------------------------------------------------------------- + * Function: H5FA__cache_dblock_notify + * + * Purpose: Handle cache action notifications + * + * Return: SUCCEED/FAIL + * + * Programmer: Dana Robinson + * Fall 2012 + * + *------------------------------------------------------------------------- + */ +BEGIN_FUNC(STATIC, ERR, +herr_t, SUCCEED, FAIL, +H5FA__cache_dblock_notify(H5AC_notify_action_t action, void *_thing)) + + /* Local variables */ + H5FA_dblock_t *dblock = (H5FA_dblock_t *)_thing; + + /* Sanity check */ + HDassert(dblock); + + /* Check if the file was opened with SWMR-write access */ + if(dblock->hdr->swmr_write) { + /* Determine which action to take */ + switch(action) { + case H5AC_NOTIFY_ACTION_AFTER_INSERT: + case H5AC_NOTIFY_ACTION_AFTER_LOAD: + /* Create flush dependency on parent */ + if(H5FA__create_flush_depend((H5AC_info_t *)dblock->hdr, (H5AC_info_t *)dblock) < 0) + H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between data block and header, address = %llu", (unsigned long long)dblock->addr) + break; + + case H5AC_NOTIFY_ACTION_AFTER_FLUSH: + case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: + case H5AC_NOTIFY_ACTION_ENTRY_CLEANED: + case H5AC_NOTIFY_ACTION_CHILD_DIRTIED: + case H5AC_NOTIFY_ACTION_CHILD_CLEANED: + /* do nothing */ + break; + + case H5AC_NOTIFY_ACTION_BEFORE_EVICT: + /* Destroy flush dependency on parent */ + if(H5FA__destroy_flush_depend((H5AC_info_t *)dblock->hdr, (H5AC_info_t *)dblock) < 0) + H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency") + + /* Detach from 'top' proxy for fixed array */ + if(dblock->top_proxy) { + if(H5AC_proxy_entry_remove_child(dblock->top_proxy, dblock) < 0) + H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block and fixed array 'top' proxy") + dblock->top_proxy = NULL; + } /* end if */ + break; + + default: +#ifdef NDEBUG + H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache") +#else /* NDEBUG */ + HDassert(0 && "Unknown action?!?"); +#endif /* NDEBUG */ + } /* end switch */ + } /* end if */ + +CATCH + +END_FUNC(STATIC) /* end H5FA__cache_dblock_notify() */ + + + +/*------------------------------------------------------------------------- * Function: H5FA__cache_dblock_free_icr * * Purpose: Destroy/release an "in core representation" of a data @@ -1065,6 +1211,66 @@ END_FUNC(STATIC) /* end H5FA__cache_dblk_page_serialize() */ /*------------------------------------------------------------------------- + * Function: H5FA__cache_dblk_page_notify + * + * Purpose: Handle cache action notifications + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * koziol@lbl.gov + * Oct 17 2016 + * + *------------------------------------------------------------------------- + */ +BEGIN_FUNC(STATIC, ERR, +herr_t, SUCCEED, FAIL, +H5FA__cache_dblk_page_notify(H5AC_notify_action_t action, void *_thing)) + + /* Local variables */ + H5FA_dblk_page_t *dblk_page = (H5FA_dblk_page_t *)_thing; /* Pointer to the object */ + + /* Sanity check */ + HDassert(dblk_page); + + /* Determine which action to take */ + switch(action) { + case H5AC_NOTIFY_ACTION_AFTER_INSERT: + case H5AC_NOTIFY_ACTION_AFTER_LOAD: + case H5AC_NOTIFY_ACTION_AFTER_FLUSH: + /* do nothing */ + break; + + case H5AC_NOTIFY_ACTION_BEFORE_EVICT: + /* Detach from 'top' proxy for fixed array */ + if(dblk_page->top_proxy) { + if(H5AC_proxy_entry_remove_child(dblk_page->top_proxy, dblk_page) < 0) + H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block page and fixed array 'top' proxy") + dblk_page->top_proxy = NULL; + } /* end if */ + break; + + case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: + case H5AC_NOTIFY_ACTION_ENTRY_CLEANED: + case H5AC_NOTIFY_ACTION_CHILD_DIRTIED: + case H5AC_NOTIFY_ACTION_CHILD_CLEANED: + /* do nothing */ + break; + + default: +#ifdef NDEBUG + H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache") +#else /* NDEBUG */ + HDassert(0 && "Unknown action?!?"); +#endif /* NDEBUG */ + } /* end switch */ + +CATCH + +END_FUNC(STATIC) /* end H5FA__cache_dblk_page_notify() */ + + +/*------------------------------------------------------------------------- * Function: H5FA__cache_dblk_page_free_icr * * Purpose: Destroy/release an "in core representation" of a data diff --git a/src/H5FAdblkpage.c b/src/H5FAdblkpage.c index 054ffb9..09278f8 100644 --- a/src/H5FAdblkpage.c +++ b/src/H5FAdblkpage.c @@ -153,6 +153,7 @@ H5FA__dblk_page_create(H5FA_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, size_t nelm /* Local variables */ H5FA_dblk_page_t *dblk_page = NULL; /* Fixed array data block page */ + hbool_t inserted = FALSE; /* Whether the header was inserted into cache */ #ifdef H5FA_DEBUG HDfprintf(stderr, "%s: Called, addr = %a\n", FUNC, addr); @@ -179,10 +180,23 @@ HDfprintf(stderr, "%s: dblk_page->size = %Zu\n", FUNC, dblk_page->size); /* Cache the new fixed array data block page */ if(H5AC_insert_entry(hdr->f, dxpl_id, H5AC_FARRAY_DBLK_PAGE, dblk_page->addr, dblk_page, H5AC__NO_FLAGS_SET) < 0) H5E_THROW(H5E_CANTINSERT, "can't add fixed array data block page to cache") + inserted = TRUE; + + /* Add data block page as child of 'top' proxy */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, dblk_page) < 0) + H5E_THROW(H5E_CANTSET, "unable to add fixed array entry as child of array proxy") + dblk_page->top_proxy = hdr->top_proxy; + } /* end if */ CATCH if(ret_value < 0) if(dblk_page) { + /* Remove from cache, if inserted */ + if(inserted) + if(H5AC_remove_entry(dblk_page) < 0) + H5E_THROW(H5E_CANTREMOVE, "unable to remove fixed array data block page from cache") + /* Destroy data block page */ if(H5FA__dblk_page_dest(dblk_page) < 0) H5E_THROW(H5E_CANTFREE, "unable to destroy fixed array data block page") @@ -210,6 +224,7 @@ H5FA__dblk_page_protect(H5FA_hdr_t *hdr, hid_t dxpl_id, haddr_t dblk_page_addr, size_t dblk_page_nelmts, unsigned flags)) /* Local variables */ + H5FA_dblk_page_t *dblk_page = NULL; /* Fixed array data block page */ H5FA_dblk_page_cache_ud_t udata; /* Information needed for loading data block page */ #ifdef H5FA_DEBUG @@ -229,11 +244,29 @@ HDfprintf(stderr, "%s: Called\n", FUNC); udata.dblk_page_addr = dblk_page_addr; /* Protect the data block page */ - if(NULL == (ret_value = (H5FA_dblk_page_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FARRAY_DBLK_PAGE, dblk_page_addr, &udata, flags))) + if(NULL == (dblk_page = (H5FA_dblk_page_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FARRAY_DBLK_PAGE, dblk_page_addr, &udata, flags))) H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block page, address = %llu", (unsigned long long)dblk_page_addr) + /* Create top proxy, if it doesn't exist */ + if(hdr->top_proxy && NULL == dblk_page->top_proxy) { + /* Add data block page as child of 'top' proxy */ + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, dblk_page) < 0) + H5E_THROW(H5E_CANTSET, "unable to add fixed array entry as child of array proxy") + dblk_page->top_proxy = hdr->top_proxy; + } /* end if */ + + /* Set return value */ + ret_value = dblk_page; + CATCH + /* Clean up on error */ + if(!ret_value) { + /* Release the data block page, if it was protected */ + if(dblk_page && H5AC_unprotect(hdr->f, dxpl_id, H5AC_FARRAY_DBLK_PAGE, dblk_page->addr, dblk_page, H5AC__NO_FLAGS_SET) < 0) + H5E_THROW(H5E_CANTUNPROTECT, "unable to unprotect fixed array data block page, address = %llu", (unsigned long long)dblk_page->addr) + } /* end if */ + END_FUNC(PKG) /* end H5FA__dblk_page_protect() */ @@ -306,6 +339,9 @@ H5FA__dblk_page_dest(H5FA_dblk_page_t *dblk_page)) dblk_page->hdr = NULL; } /* end if */ + /* Sanity check */ + HDassert(NULL == dblk_page->top_proxy); + /* Free the data block page itself */ dblk_page = H5FL_FREE(H5FA_dblk_page_t, dblk_page); diff --git a/src/H5FAdblock.c b/src/H5FAdblock.c index f597c8e..440447d 100644 --- a/src/H5FAdblock.c +++ b/src/H5FAdblock.c @@ -191,6 +191,7 @@ H5FA__dblock_create(H5FA_hdr_t *hdr, hid_t dxpl_id, hbool_t *hdr_dirty)) /* Local variables */ H5FA_dblock_t *dblock = NULL; /* Fixed array data block */ haddr_t dblock_addr; /* Fixed array data block address */ + hbool_t inserted = FALSE; /* Whether the header was inserted into cache */ /* Sanity check */ HDassert(hdr); @@ -217,6 +218,14 @@ H5FA__dblock_create(H5FA_hdr_t *hdr, hid_t dxpl_id, hbool_t *hdr_dirty)) /* Cache the new fixed array data block */ if(H5AC_insert_entry(hdr->f, dxpl_id, H5AC_FARRAY_DBLOCK, dblock_addr, dblock, H5AC__NO_FLAGS_SET) < 0) H5E_THROW(H5E_CANTINSERT, "can't add fixed array data block to cache") + inserted = TRUE; + + /* Add data block as child of 'top' proxy */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, dblock) < 0) + H5E_THROW(H5E_CANTSET, "unable to add fixed array entry as child of array proxy") + dblock->top_proxy = hdr->top_proxy; + } /* end if */ /* Mark the header dirty (for updating statistics) */ *hdr_dirty = TRUE; @@ -228,6 +237,11 @@ CATCH if(!H5F_addr_defined(ret_value)) if(dblock) { + /* Remove from cache, if inserted */ + if(inserted) + if(H5AC_remove_entry(dblock) < 0) + H5E_THROW(H5E_CANTREMOVE, "unable to remove fixed array data block from cache") + /* Release data block's disk space */ if(H5F_addr_defined(dblock->addr) && H5MF_xfree(hdr->f, H5FD_MEM_FARRAY_DBLOCK, dxpl_id, dblock->addr, (hsize_t)dblock->size) < 0) H5E_THROW(H5E_CANTFREE, "unable to release fixed array data block") @@ -258,6 +272,7 @@ H5FA__dblock_protect(H5FA_hdr_t *hdr, hid_t dxpl_id, haddr_t dblk_addr, unsigned flags)) /* Local variables */ + H5FA_dblock_t *dblock; /* Fixed array data block */ H5FA_dblock_cache_ud_t udata; /* Information needed for loading data block */ /* Sanity check */ @@ -272,11 +287,28 @@ H5FA__dblock_protect(H5FA_hdr_t *hdr, hid_t dxpl_id, haddr_t dblk_addr, udata.dblk_addr = dblk_addr; /* Protect the data block */ - if(NULL == (ret_value = (H5FA_dblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FARRAY_DBLOCK, dblk_addr, &udata, flags))) + if(NULL == (dblock = (H5FA_dblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FARRAY_DBLOCK, dblk_addr, &udata, flags))) H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)dblk_addr) + /* Create top proxy, if it doesn't exist */ + if(hdr->top_proxy && NULL == dblock->top_proxy) { + /* Add data block as child of 'top' proxy */ + if(H5AC_proxy_entry_add_child(hdr->top_proxy, hdr->f, dxpl_id, dblock) < 0) + H5E_THROW(H5E_CANTSET, "unable to add fixed array entry as child of array proxy") + dblock->top_proxy = hdr->top_proxy; + } /* end if */ + + /* Set return value */ + ret_value = dblock; + CATCH + /* Clean up on error */ + if(!ret_value) + /* Release the data block, if it was protected */ + if(dblock && H5AC_unprotect(hdr->f, dxpl_id, H5AC_FARRAY_DBLOCK, dblock->addr, dblock, H5AC__NO_FLAGS_SET) < 0) + H5E_THROW(H5E_CANTUNPROTECT, "unable to unprotect fixed array data block, address = %llu", (unsigned long long)dblock->addr) + END_FUNC(PKG) /* end H5FA__dblock_protect() */ @@ -408,6 +440,9 @@ H5FA__dblock_dest(H5FA_dblock_t *dblock)) dblock->hdr = NULL; } /* end if */ + /* Sanity check */ + HDassert(NULL == dblock->top_proxy); + /* Free the data block itself */ dblock = H5FL_FREE(H5FA_dblock_t, dblock); diff --git a/src/H5FAhdr.c b/src/H5FAhdr.c index d6e32dc..52b90d1 100644 --- a/src/H5FAhdr.c +++ b/src/H5FAhdr.c @@ -112,6 +112,7 @@ H5FA__hdr_alloc(H5F_t *f)) /* Set the internal parameters for the array */ hdr->f = f; + hdr->swmr_write = (H5F_INTENT(f) & H5F_ACC_SWMR_WRITE) > 0; hdr->sizeof_addr = H5F_SIZEOF_ADDR(f); hdr->sizeof_size = H5F_SIZEOF_SIZE(f); @@ -184,6 +185,7 @@ H5FA__hdr_create(H5F_t *f, hid_t dxpl_id, const H5FA_create_t *cparam, /* Local variables */ H5FA_hdr_t *hdr = NULL; /* Fixed array header */ + hbool_t inserted = FALSE; /* Whether the header was inserted into cache */ /* Check arguments */ HDassert(f); @@ -218,9 +220,20 @@ H5FA__hdr_create(H5F_t *f, hid_t dxpl_id, const H5FA_create_t *cparam, if(HADDR_UNDEF == (hdr->addr = H5MF_alloc(f, H5FD_MEM_FARRAY_HDR, dxpl_id, (hsize_t)hdr->size))) H5E_THROW(H5E_CANTALLOC, "file allocation failed for Fixed Array header") + /* Create 'top' proxy for extensible array entries */ + if(hdr->swmr_write) + if(NULL == (hdr->top_proxy = H5AC_proxy_entry_create())) + H5E_THROW(H5E_CANTCREATE, "can't create fixed array entry proxy") + /* Cache the new Fixed Array header */ if(H5AC_insert_entry(f, dxpl_id, H5AC_FARRAY_HDR, hdr->addr, hdr, H5AC__NO_FLAGS_SET) < 0) H5E_THROW(H5E_CANTINSERT, "can't add fixed array header to cache") + inserted = TRUE; + + /* Add header as child of 'top' proxy */ + if(hdr->top_proxy) + if(H5AC_proxy_entry_add_child(hdr->top_proxy, f, dxpl_id, hdr) < 0) + H5E_THROW(H5E_CANTSET, "unable to add fixed array entry as child of array proxy") /* Set address of array header to return */ ret_value = hdr->addr; @@ -229,6 +242,11 @@ CATCH if(!H5F_addr_defined(ret_value)) if(hdr) { + /* Remove from cache, if inserted */ + if(inserted) + if(H5AC_remove_entry(hdr) < 0) + H5E_THROW(H5E_CANTREMOVE, "unable to remove fixed array header from cache") + /* Release header's disk space */ if(H5F_addr_defined(hdr->addr) && H5MF_xfree(f, H5FD_MEM_FARRAY_HDR, dxpl_id, hdr->addr, (hsize_t)hdr->size) < 0) H5E_THROW(H5E_CANTFREE, "unable to free Fixed Array header") @@ -409,6 +427,7 @@ H5FA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata, unsigned flags)) /* Local variables */ + H5FA_hdr_t *hdr; /* Fixed array header */ H5FA_hdr_cache_ud_t udata; /* User data for cache callbacks */ /* Sanity check */ @@ -424,9 +443,23 @@ H5FA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata, udata.ctx_udata = ctx_udata; /* Protect the header */ - if(NULL == (ret_value = (H5FA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, &udata, flags))) + if(NULL == (hdr = (H5FA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, &udata, flags))) H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array header, address = %llu", (unsigned long long)fa_addr) - ret_value->f = f; /* (Must be set again here, in case the header was already in the cache -QAK) */ + hdr->f = f; /* (Must be set again here, in case the header was already in the cache -QAK) */ + + /* Create top proxy, if it doesn't exist */ + if(hdr->swmr_write && NULL == hdr->top_proxy) { + /* Create 'top' proxy for fixed array entries */ + if(NULL == (hdr->top_proxy = H5AC_proxy_entry_create())) + H5E_THROW(H5E_CANTCREATE, "can't create fixed array entry proxy") + + /* Add header as child of 'top' proxy */ + if(H5AC_proxy_entry_add_child(hdr->top_proxy, f, dxpl_id, hdr) < 0) + H5E_THROW(H5E_CANTSET, "unable to add fixed array entry as child of array proxy") + } /* end if */ + + /* Set return value */ + ret_value = hdr; CATCH @@ -547,6 +580,13 @@ H5FA__hdr_dest(H5FA_hdr_t *hdr)) } /* end if */ hdr->cb_ctx = NULL; + /* Destroy the 'top' proxy */ + if(hdr->top_proxy) { + if(H5AC_proxy_entry_dest(hdr->top_proxy) < 0) + H5E_THROW(H5E_CANTRELEASE, "unable to destroy fixed array 'top' proxy") + hdr->top_proxy = NULL; + } /* end if */ + /* Free the shared info itself */ hdr = H5FL_FREE(H5FA_hdr_t, hdr); diff --git a/src/H5FAint.c b/src/H5FAint.c new file mode 100644 index 0000000..331227b --- /dev/null +++ b/src/H5FAint.c @@ -0,0 +1,139 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*------------------------------------------------------------------------- + * + * Created: H5FAint.c + * Fall 2012 + * Dana Robinson <derobins@hdfgroup.org> + * + * Purpose: Internal routines for fixed arrays. + * + *------------------------------------------------------------------------- + */ + +/**********************/ +/* Module Declaration */ +/**********************/ + +#include "H5FAmodule.h" /* This source code file is part of the H5FA module */ + + +/***********************/ +/* Other Packages Used */ +/***********************/ + + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5Eprivate.h" /* Error Handling */ +#include "H5FApkg.h" /* Fixed Arrays */ + + +/****************/ +/* Local Macros */ +/****************/ + + +/******************/ +/* Local Typedefs */ +/******************/ + + +/********************/ +/* Package Typedefs */ +/********************/ + + +/********************/ +/* Local Prototypes */ +/********************/ + + +/*********************/ +/* Package Variables */ +/*********************/ + + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + + +/*******************/ +/* Local Variables */ +/*******************/ + + + +/*------------------------------------------------------------------------- + * Function: H5FA__create_flush_depend + * + * Purpose: Create a flush dependency between two data structure components + * + * Return: SUCCEED/FAIL + * + * Programmer: Dana Robinson + * Fall 2012 + * + *------------------------------------------------------------------------- + */ +BEGIN_FUNC(PKG, ERR, +herr_t, SUCCEED, FAIL, +H5FA__create_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry)) + + /* Sanity check */ + HDassert(parent_entry); + HDassert(child_entry); + + /* Create a flush dependency between parent and child entry */ + if(H5AC_create_flush_dependency(parent_entry, child_entry) < 0) + H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency") + +CATCH + +END_FUNC(PKG) /* end H5FA__create_flush_depend() */ + + +/*------------------------------------------------------------------------- + * Function: H5FA__destroy_flush_depend + * + * Purpose: Destroy a flush dependency between two data structure components + * + * Return: SUCCEED/FAIL + * + * Programmer: Dana Robinson + * Fall 2012 + * + *------------------------------------------------------------------------- + */ +BEGIN_FUNC(PKG, ERR, +herr_t, SUCCEED, FAIL, +H5FA__destroy_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry)) + + /* Sanity check */ + HDassert(parent_entry); + HDassert(child_entry); + + /* Destroy a flush dependency between parent and child entry */ + if(H5AC_destroy_flush_dependency(parent_entry, child_entry) < 0) + H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency") + +CATCH + +END_FUNC(PKG) /* end H5FA__destroy_flush_depend() */ + diff --git a/src/H5FApkg.h b/src/H5FApkg.h index c29322a..ccef562 100644 --- a/src/H5FApkg.h +++ b/src/H5FApkg.h @@ -144,6 +144,28 @@ typedef struct H5FA_hdr_t { /* Client information (not stored) */ void *cb_ctx; /* Callback context */ + + /* SWMR / Flush dependency information (not stored) */ + hbool_t swmr_write; /* Flag indicating the file is opened with SWMR-write access */ + H5AC_proxy_entry_t *top_proxy; /* 'Top' proxy cache entry for all array entries */ + void *parent; /* Pointer to 'top' proxy flush dependency + * parent, if it exists, otherwise NULL. + * If the fixed array is being used + * to index a chunked dataset and the + * dataset metadata is modified by a + * SWMR writer, this field will be set + * equal to the object header proxy + * that is the flush dependency parent + * of the fixed array header. + * + * The field is used to avoid duplicate + * setups of the flush dependency + * relationship, and to allow the + * fixed array header to destroy + * the flush dependency on receipt of + * an eviction notification from the + * metadata cache. + */ } H5FA_hdr_t; /* The fixed array data block information */ @@ -158,6 +180,9 @@ typedef struct H5FA_dblock_t { /* Internal array information (not stored) */ H5FA_hdr_t *hdr; /* Shared array header info */ + /* SWMR / Flush dependency information (not stored) */ + H5AC_proxy_entry_t *top_proxy; /* 'Top' proxy cache entry for all array entries */ + /* Computed/cached values (not stored) */ haddr_t addr; /* Address of this data block on disk */ hsize_t size; /* Size of data block on disk */ @@ -181,6 +206,9 @@ typedef struct H5FA_dbk_page_t { /* Internal array information (not stored) */ H5FA_hdr_t *hdr; /* Shared array header info */ + /* SWMR / Flush dependency information (not stored) */ + H5AC_proxy_entry_t *top_proxy; /* 'Top' proxy cache entry for all array entries */ + /* Computed/cached values (not stored) */ haddr_t addr; /* Address of this data block page on disk */ size_t size; /* Size of data block page on disk */ @@ -241,6 +269,12 @@ H5_DLLVAR const H5FA_class_t *const H5FA_client_class_g[H5FA_NUM_CLS_ID]; /* Package Private Prototypes */ /******************************/ +/* Generic routines */ +H5_DLL herr_t H5FA__create_flush_depend(H5AC_info_t *parent_entry, + H5AC_info_t *child_entry); +H5_DLL herr_t H5FA__destroy_flush_depend(H5AC_info_t *parent_entry, + H5AC_info_t *child_entry); + /* Header routines */ H5_DLL H5FA_hdr_t *H5FA__hdr_alloc(H5F_t *f); H5_DLL herr_t H5FA__hdr_init(H5FA_hdr_t *hdr, void *ctx_udata); diff --git a/src/H5FAprivate.h b/src/H5FAprivate.h index 1e76468..612f3a2 100644 --- a/src/H5FAprivate.h +++ b/src/H5FAprivate.h @@ -32,6 +32,7 @@ #endif /* NOT_YET */ /* Private headers needed by this file */ +#include "H5ACprivate.h" /* Metadata cache */ #include "H5Fprivate.h" /* File access */ @@ -126,6 +127,7 @@ H5_DLL herr_t H5FA_get_nelmts(const H5FA_t *fa, hsize_t *nelmts); H5_DLL herr_t H5FA_get_addr(const H5FA_t *fa, haddr_t *addr); H5_DLL herr_t H5FA_set(const H5FA_t *fa, hid_t dxpl_id, hsize_t idx, const void *elmt); H5_DLL herr_t H5FA_get(const H5FA_t *fa, hid_t dxpl_id, hsize_t idx, void *elmt); +H5_DLL herr_t H5FA_depend(H5FA_t *fa, hid_t dxpl_id, H5AC_proxy_entry_t *parent); H5_DLL herr_t H5FA_iterate(H5FA_t *fa, hid_t dxpl_id, H5FA_operator_t op, void *udata); H5_DLL herr_t H5FA_close(H5FA_t *fa, hid_t dxpl_id); H5_DLL herr_t H5FA_delete(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata); @@ -811,6 +811,9 @@ H5FD_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr) if(NULL == (file = (driver->open)(name, flags, fapl_id, maxaddr))) HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, NULL, "open failed") + /* Set the file access flags */ + file->access_flags = flags; + /* * Fill in public fields. We must increment the reference count on the * driver ID to prevent it from being freed while this file is open. @@ -1441,6 +1444,32 @@ H5FD_get_feature_flags(const H5FD_t *file, unsigned long *feature_flags) /*------------------------------------------------------------------------- + * Function: H5FD_set_feature_flags + * + * Purpose: Set the feature flags for the VFD + * + * Return: Success: Non-negative + * Failure: Negative + * + * Programmer: Vailin Choi; Oct 2013 + * + *------------------------------------------------------------------------- + */ +herr_t +H5FD_set_feature_flags(H5FD_t *file, unsigned long feature_flags) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + + HDassert(file); + + /* Set the file's feature flags */ + file->feature_flags = feature_flags; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5FD_set_feature_flags() */ + + +/*------------------------------------------------------------------------- * Function: H5FD_get_fs_type_map * * Purpose: Retrieve the free space type mapping for the VFD diff --git a/src/H5FDint.c b/src/H5FDint.c index cd48197..744c3d1 100644 --- a/src/H5FDint.c +++ b/src/H5FDint.c @@ -207,9 +207,16 @@ H5P_genplist_t *dxpl, H5FD_mem_t type, haddr_t addr, if(HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type))) HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed") - if((addr + file->base_addr + size) > eoa) - HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, addr = %llu, size=%llu, eoa=%llu", - (unsigned long long)(addr+ file->base_addr), (unsigned long long)size, (unsigned long long)eoa) + + /* + * If the file is open for SWMR read access, allow access to data past + * the end of the allocated space (the 'eoa'). This is done because the + * eoa stored in the file's superblock might be out of sync with the + * objects being written within the file by the application performing + * SWMR write operations. + */ + if(!(file->access_flags & H5F_ACC_SWMR_READ) && ((addr + file->base_addr + size) > eoa)) + HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, addr = %llu, size = %llu, eoa = %llu", (unsigned long long)(addr + file->base_addr), (unsigned long long)size, (unsigned long long)eoa) /* Dispatch to driver */ if((file->cls->read)(file, type, H5P_PLIST_ID(dxpl), addr + file->base_addr, size, buf) < 0) diff --git a/src/H5FDlog.c b/src/H5FDlog.c index a2891cb..03228d2 100644 --- a/src/H5FDlog.c +++ b/src/H5FDlog.c @@ -898,6 +898,7 @@ H5FD_log_query(const H5FD_t *_file, unsigned long *flags /* out */) *flags |= H5FD_FEAT_DATA_SIEVE; /* OK to perform data sieving for faster raw data reads & writes */ *flags |= H5FD_FEAT_AGGREGATE_SMALLDATA; /* OK to aggregate "small" raw data allocations */ *flags |= H5FD_FEAT_POSIX_COMPAT_HANDLE; /* VFD handle is POSIX I/O call compatible */ + *flags |= H5FD_FEAT_SUPPORTS_SWMR_IO; /* VFD supports the single-writer/multiple-readers (SWMR) pattern */ /* Check for flags that are set by h5repart */ if(file && file->fam_to_sec2) diff --git a/src/H5FDpkg.h b/src/H5FDpkg.h index a0c1b3a..45bcfd8 100644 --- a/src/H5FDpkg.h +++ b/src/H5FDpkg.h @@ -59,6 +59,7 @@ H5_DLL herr_t H5FD_free_real(H5FD_t *file, hid_t dxpl_id, H5FD_mem_t type, /* Testing functions */ #ifdef H5FD_TESTING +H5_DLL hbool_t H5FD_supports_swmr_test(const char *vfd_name); #endif /* H5FD_TESTING */ #endif /* _H5FDpkg_H */ diff --git a/src/H5FDprivate.h b/src/H5FDprivate.h index c40dc54..427b42c 100644 --- a/src/H5FDprivate.h +++ b/src/H5FDprivate.h @@ -157,6 +157,7 @@ H5_DLL herr_t H5FD_set_eoa(H5FD_t *file, H5FD_mem_t type, haddr_t addr); H5_DLL haddr_t H5FD_get_eof(const H5FD_t *file, H5FD_mem_t type); H5_DLL haddr_t H5FD_get_maxaddr(const H5FD_t *file); H5_DLL herr_t H5FD_get_feature_flags(const H5FD_t *file, unsigned long *feature_flags); +H5_DLL herr_t H5FD_set_feature_flags(H5FD_t *file, unsigned long feature_flags); H5_DLL herr_t H5FD_get_fs_type_map(const H5FD_t *file, H5FD_mem_t *type_map); H5_DLL herr_t H5FD_read(H5FD_t *file, #ifndef H5_DEBUG_BUILD diff --git a/src/H5FDpublic.h b/src/H5FDpublic.h index 4931e0f..436be10 100644 --- a/src/H5FDpublic.h +++ b/src/H5FDpublic.h @@ -234,6 +234,11 @@ typedef enum H5F_mem_t H5FD_mem_t; * image to store in memory. */ #define H5FD_FEAT_CAN_USE_FILE_IMAGE_CALLBACKS 0x00000800 + /* + * Defining H5FD_FEAT_SUPPORTS_SWMR_IO for a VFL driver means that the + * driver supports the single-writer/multiple-readers I/O pattern. + */ +#define H5FD_FEAT_SUPPORTS_SWMR_IO 0x00001000 /* Forward declaration */ typedef struct H5FD_t H5FD_t; @@ -294,6 +299,7 @@ struct H5FD_t { hid_t driver_id; /*driver ID for this file */ const H5FD_class_t *cls; /*constant class info */ unsigned long fileno; /* File 'serial' number */ + unsigned access_flags; /* File access flags (from create or open) */ unsigned long feature_flags; /* VFL Driver feature Flags */ haddr_t maxaddr; /* For this file, overrides class */ haddr_t base_addr; /* Base address for HDF5 data w/in file */ diff --git a/src/H5FDsec2.c b/src/H5FDsec2.c index 04a0790..0ca5efb 100644 --- a/src/H5FDsec2.c +++ b/src/H5FDsec2.c @@ -530,6 +530,7 @@ H5FD_sec2_query(const H5FD_t *_file, unsigned long *flags /* out */) *flags |= H5FD_FEAT_DATA_SIEVE; /* OK to perform data sieving for faster raw data reads & writes */ *flags |= H5FD_FEAT_AGGREGATE_SMALLDATA; /* OK to aggregate "small" raw data allocations */ *flags |= H5FD_FEAT_POSIX_COMPAT_HANDLE; /* VFD handle is POSIX I/O call compatible */ + *flags |= H5FD_FEAT_SUPPORTS_SWMR_IO; /* VFD supports the single-writer/multiple-readers (SWMR) pattern */ /* Check for flags that are set by h5repart */ if(file && file->fam_to_sec2) diff --git a/src/H5FDstdio.c b/src/H5FDstdio.c index 7056f7f..4c62bcc 100644 --- a/src/H5FDstdio.c +++ b/src/H5FDstdio.c @@ -550,7 +550,11 @@ H5FD_stdio_query(const H5FD_t *_f, unsigned long /*OUT*/ *flags) /* Quiet the compiler */ _f=_f; - /* Set the VFL feature flags that this driver supports */ + /* Set the VFL feature flags that this driver supports. + * + * Note that this VFD does not support SWMR due to the unpredictable + * nature of the buffering layer. + */ if(flags) { *flags = 0; *flags|=H5FD_FEAT_AGGREGATE_METADATA; /* OK to aggregate metadata allocations */ diff --git a/src/H5FDtest.c b/src/H5FDtest.c new file mode 100644 index 0000000..9d2f2e9 --- /dev/null +++ b/src/H5FDtest.c @@ -0,0 +1,119 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*------------------------------------------------------------------------- + * + * Created: H5FDtest.c + * Fall 2014 + * + * Purpose: File driver testing routines. + * + *------------------------------------------------------------------------- + */ + +/****************/ +/* Module Setup */ +/****************/ + +#include "H5FDmodule.h" /* This source code file is part of the H5FD module */ +#define H5FD_TESTING /* Suppress warning about H5FD testing funcs */ + + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5FDpkg.h" /* File Drivers */ + +/****************/ +/* Local Macros */ +/****************/ + + +/******************/ +/* Local Typedefs */ +/******************/ + + +/********************/ +/* Package Typedefs */ +/********************/ + + +/********************/ +/* Local Prototypes */ +/********************/ + + +/*********************/ +/* Package Variables */ +/*********************/ + + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + + +/*******************/ +/* Local Variables */ +/*******************/ + + +/*------------------------------------------------------------------------- + * Function: H5FD_supports_swmr_test() + * + * Purpose: Determines if a VFD supports SWMR. + * + * The function determines SWMR support by inspecting the + * HDF5_DRIVER environment variable, not by checking the + * VFD feature flags (which do not exist until the driver + * is instantiated). + * + * See test/Makefile.am for a list of the VFD strings. + * + * This function is only intended for use in the test code. + * + * Return: TRUE (1) if the VFD supports SWMR I/O or vfd_name is + * NULL or the empty string (which implies the default VFD). + * + * FALSE (0) if it does not + * + * This function cannot fail at this time so there is no + * error return value. + * + * Programmer: Dana Robinson + * Fall 2014 + * + *------------------------------------------------------------------------- + */ +hbool_t +H5FD_supports_swmr_test(const char *vfd_name) +{ + hbool_t ret_value = FALSE; + + FUNC_ENTER_NOAPI_NOINIT_NOERR + + if(!vfd_name || !HDstrcmp(vfd_name, "")) + ret_value = TRUE; + else + ret_value = !HDstrcmp(vfd_name, "direct") + || !HDstrcmp(vfd_name, "log") + || !HDstrcmp(vfd_name, "sec2"); + + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD_supports_swmr_test() */ + @@ -132,6 +132,7 @@ HDfprintf(stderr, "%s: Creating free space manager, nclasses = %Zu\n", FUNC, ncl fspace->expand_percent = fs_create->expand_percent; fspace->max_sect_addr = fs_create->max_sect_addr; fspace->max_sect_size = fs_create->max_sect_size; + fspace->swmr_write = (H5F_INTENT(f) & H5F_ACC_SWMR_WRITE) > 0; fspace->alignment = alignment; fspace->threshold = threshold; diff --git a/src/H5FScache.c b/src/H5FScache.c index c1a9e0a..f43676d 100644 --- a/src/H5FScache.c +++ b/src/H5FScache.c @@ -100,6 +100,7 @@ static herr_t H5FS__cache_sinfo_pre_serialize(const H5F_t *f, hid_t dxpl_id, unsigned *flags); static herr_t H5FS__cache_sinfo_serialize(const H5F_t *f, void *image, size_t len, void *thing); +static herr_t H5FS__cache_sinfo_notify(H5AC_notify_action_t action, void *thing); static herr_t H5FS__cache_sinfo_free_icr(void *thing); @@ -138,7 +139,7 @@ const H5AC_class_t H5AC_FSPACE_SINFO[1] = {{ H5FS__cache_sinfo_image_len, /* 'image_len' callback */ H5FS__cache_sinfo_pre_serialize, /* 'pre_serialize' callback */ H5FS__cache_sinfo_serialize, /* 'serialize' callback */ - NULL, /* 'notify' callback */ + H5FS__cache_sinfo_notify, /* 'notify' callback */ H5FS__cache_sinfo_free_icr, /* 'free_icr' callback */ NULL, /* 'fsf_size' callback */ }}; @@ -1235,6 +1236,68 @@ done: /*------------------------------------------------------------------------- + * Function: H5FS__cache_sinfo_notify + * + * Purpose: Handle cache action notifications + * + * Return: SUCCEED/FAIL + * + * Programmer: Dana Robinson + * Fall 2012 + * + *------------------------------------------------------------------------- + */ +herr_t +H5FS__cache_sinfo_notify(H5AC_notify_action_t action, void *_thing) +{ + H5FS_sinfo_t *sinfo = (H5FS_sinfo_t *)_thing; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT + + /* Sanity check */ + HDassert(sinfo); + + /* Check if the file was opened with SWMR-write access */ + if(sinfo->fspace->swmr_write) { + /* Determine which action to take */ + switch(action) { + case H5AC_NOTIFY_ACTION_AFTER_INSERT: + case H5AC_NOTIFY_ACTION_AFTER_LOAD: + /* Create flush dependency on parent */ + if(H5FS__create_flush_depend((H5AC_info_t *)sinfo->fspace, (H5AC_info_t *)sinfo) < 0) + HGOTO_ERROR(H5E_FSPACE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency between data block and header, address = %llu", (unsigned long long)sinfo->fspace->sect_addr) + break; + + case H5AC_NOTIFY_ACTION_AFTER_FLUSH: + case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: + case H5AC_NOTIFY_ACTION_ENTRY_CLEANED: + case H5AC_NOTIFY_ACTION_CHILD_DIRTIED: + case H5AC_NOTIFY_ACTION_CHILD_CLEANED: + /* do nothing */ + break; + + case H5AC_NOTIFY_ACTION_BEFORE_EVICT: + /* Destroy flush dependency on parent */ + if(H5FS__destroy_flush_depend((H5AC_info_t *)sinfo->fspace, (H5AC_info_t *)sinfo) < 0) + HGOTO_ERROR(H5E_FSPACE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency") + break; + + default: +#ifdef NDEBUG + HGOTO_ERROR(H5E_FSPACE, H5E_BADVALUE, FAIL, "unknown action from metadata cache") +#else /* NDEBUG */ + HDassert(0 && "Unknown action?!?"); +#endif /* NDEBUG */ + } /* end switch */ + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FS__cache_sinfo_notify() */ + + +/*------------------------------------------------------------------------- * Function: H5FS__cache_sinfo_free_icr * * Purpose: Free the memory used for the in core representation of the diff --git a/src/H5FSint.c b/src/H5FSint.c new file mode 100644 index 0000000..60cedd5 --- /dev/null +++ b/src/H5FSint.c @@ -0,0 +1,145 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*------------------------------------------------------------------------- + * + * Created: H5FSint.c + * Fall 2012 + * Dana Robinson <derobins@hdfgroup.org> + * + * Purpose: Internal routines for free space managers. + * + *------------------------------------------------------------------------- + */ + +/**********************/ +/* Module Declaration */ +/**********************/ + +#include "H5FSmodule.h" /* This source code file is part of the H5FS module */ + + +/***********************/ +/* Other Packages Used */ +/***********************/ + + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5Eprivate.h" /* Error Handling */ +#include "H5FSpkg.h" /* Free Space Managers */ + + +/****************/ +/* Local Macros */ +/****************/ + + +/******************/ +/* Local Typedefs */ +/******************/ + + +/********************/ +/* Package Typedefs */ +/********************/ + + +/********************/ +/* Local Prototypes */ +/********************/ + + +/*********************/ +/* Package Variables */ +/*********************/ + + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + + +/*******************/ +/* Local Variables */ +/*******************/ + + + +/*------------------------------------------------------------------------- + * Function: H5FS__create_flush_depend + * + * Purpose: Create a flush dependency between two data structure components + * + * Return: SUCCEED/FAIL + * + * Programmer: Dana Robinson + * Fall 2012 + * + *------------------------------------------------------------------------- + */ +herr_t +H5FS__create_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT + + /* Sanity check */ + HDassert(parent_entry); + HDassert(child_entry); + + /* Create a flush dependency between parent and child entry */ + if(H5AC_create_flush_dependency(parent_entry, child_entry) < 0) + HGOTO_ERROR(H5E_FSPACE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FS__create_flush_depend() */ + + +/*------------------------------------------------------------------------- + * Function: H5FS__destroy_flush_depend + * + * Purpose: Destroy a flush dependency between two data structure components + * + * Return: SUCCEED/FAIL + * + * Programmer: Dana Robinson + * Fall 2012 + * + *------------------------------------------------------------------------- + */ +herr_t +H5FS__destroy_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT + + /* Sanity check */ + HDassert(parent_entry); + HDassert(child_entry); + + /* Destroy a flush dependency between parent and child entry */ + if(H5AC_destroy_flush_dependency(parent_entry, child_entry) < 0) + HGOTO_ERROR(H5E_FSPACE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FS__destroy_flush_depend() */ + diff --git a/src/H5FSpkg.h b/src/H5FSpkg.h index 7e52b5f..b3b1548 100644 --- a/src/H5FSpkg.h +++ b/src/H5FSpkg.h @@ -179,6 +179,7 @@ struct H5FS_t { haddr_t addr; /* Address of free space header on disk */ size_t hdr_size; /* Size of free space header on disk */ H5FS_sinfo_t *sinfo; /* Section information */ + hbool_t swmr_write; /* Flag indicating the file is opened with SWMR-write access */ unsigned sinfo_lock_count; /* # of times the section info has been locked */ hbool_t sinfo_protected; /* Whether the section info was protected when locked */ hbool_t sinfo_modified; /* Whether the section info has been modified while locked */ @@ -222,6 +223,12 @@ H5FL_EXTERN(H5FS_t); /* Package Private Prototypes */ /******************************/ +/* Generic routines */ +H5_DLL herr_t H5FS__create_flush_depend(H5AC_info_t *parent_entry, + H5AC_info_t *child_entry); +H5_DLL herr_t H5FS__destroy_flush_depend(H5AC_info_t *parent_entry, + H5AC_info_t *child_entry); + /* Free space manager header routines */ H5_DLL H5FS_t *H5FS__new(const H5F_t *f, uint16_t nclasses, const H5FS_section_class_t *classes[], void *cls_init_udata); diff --git a/src/H5Faccum.c b/src/H5Faccum.c index 3fac184..48f9bdd 100644 --- a/src/H5Faccum.c +++ b/src/H5Faccum.c @@ -726,6 +726,12 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add } /* end else */ } /* end if */ else { + /* Make certain that data in accumulator is visible before new write */ + if((H5F_INTENT(fio_info->f) & H5F_ACC_SWMR_WRITE) > 0) + /* Flush if dirty and reset accumulator */ + if(H5F__accum_reset(fio_info, TRUE) < 0) + HGOTO_ERROR(H5E_IO, H5E_CANTRESET, FAIL, "can't reset accumulator") + /* Write the data */ if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, size, buf) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed") diff --git a/src/H5Fint.c b/src/H5Fint.c index 9cf129c..363812d 100644 --- a/src/H5Fint.c +++ b/src/H5Fint.c @@ -641,8 +641,8 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get sieve buffer size") if(H5P_get(plist, H5F_ACS_LATEST_FORMAT_NAME, &latest_format) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get 'latest format' flag") - /* For latest format, activate all latest version support */ - if(latest_format) + /* For latest format or SWMR_WRITE, activate all latest version support */ + if(latest_format || (H5F_INTENT(f) & H5F_ACC_SWMR_WRITE)) f->shared->latest_flags |= H5F_LATEST_ALL_FLAGS; if(H5P_get(plist, H5F_ACS_USE_MDC_LOGGING_NAME, &(f->shared->use_mdc_logging)) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get 'use mdc logging' flag") @@ -672,6 +672,15 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad maximum address from VFD") if(H5FD_get_feature_flags(lf, &f->shared->feature_flags) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "can't get feature flags from VFD") + + /* Require the SWMR feature flag if SWMR I/O is desired */ + if(!H5F_HAS_FEATURE(f, H5FD_FEAT_SUPPORTS_SWMR_IO) && (H5F_INTENT(f) & (H5F_ACC_SWMR_WRITE | H5F_ACC_SWMR_READ))) + HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "must use a SWMR-compatible VFD when SWMR is specified") + + /* Require a POSIX compatible VFD to use SWMR feature */ + /* (It's reasonable to try to expand this to other VFDs eventually -QAK) */ + if(!H5F_HAS_FEATURE(f, H5FD_FEAT_POSIX_COMPAT_HANDLE) && (H5F_INTENT(f) & (H5F_ACC_SWMR_WRITE | H5F_ACC_SWMR_READ))) + HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "must use POSIX compatible VFD with SWMR write access") if(H5FD_get_fs_type_map(lf, f->shared->fs_type_map) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "can't get free space type mapping from VFD") if(H5MF_init_merge_flags(f) < 0) @@ -692,9 +701,23 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t if(H5P_get(plist, H5F_ACS_METADATA_READ_ATTEMPTS_NAME, &f->shared->read_attempts) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get the # of read attempts") - /* If no value for read attempts has been set, use the default */ - if(!f->shared->read_attempts) - f->shared->read_attempts = H5F_METADATA_READ_ATTEMPTS; + /* When opening file with SWMR access, the # of read attempts is H5F_SWMR_METADATA_READ_ATTEMPTS if not set */ + /* When opening file without SWMR access, the # of read attempts is always H5F_METADATA_READ_ATTEMPTS (set or not set) */ + if(H5F_INTENT(f) & (H5F_ACC_SWMR_READ | H5F_ACC_SWMR_WRITE)) { + /* If no value for read attempts has been set, use the default */ + if(!f->shared->read_attempts) + f->shared->read_attempts = H5F_SWMR_METADATA_READ_ATTEMPTS; + + /* Turn off accumulator with SWMR */ + f->shared->feature_flags &= ~(unsigned)H5FD_FEAT_ACCUMULATE_METADATA; + if(H5FD_set_feature_flags(f->shared->lf, f->shared->feature_flags) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTSET, NULL, "can't set feature_flags in VFD") + } /* end if */ + else { + /* If no value for read attempts has been set, use the default */ + if(!f->shared->read_attempts) + f->shared->read_attempts = H5F_METADATA_READ_ATTEMPTS; + } /* end else */ /* Determine the # of bins for metdata read retries */ if(H5F_set_retries(f) < 0) @@ -831,10 +854,19 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush) /* Flush the file again (if requested), as shutting down the * free space manager may dirty some data structures again. */ - if(flush) + if(flush) { + /* Clear status_flags */ + f->shared->sblock->status_flags &= (uint8_t)(~H5F_SUPER_WRITE_ACCESS); + f->shared->sblock->status_flags &= (uint8_t)(~H5F_SUPER_SWMR_WRITE_ACCESS); + /* Mark superblock dirty in cache, so change will get encoded */ + /* Push error, but keep going*/ + if(H5F_super_dirty(f) < 0) + HDONE_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty") + if(H5F_flush(f, dxpl_id, TRUE) < 0) /* Push error, but keep going*/ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache") + } /* end if */ } /* end if */ /* if it exists, unpin the driver information block cache entry, @@ -979,6 +1011,37 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush) * The ACCESS_PARMS argument is optional. A null pointer will * cause the default file access parameters to be used. * + * The following two tables show results of file opens for single and concurrent access: + * + * SINGLE PROCESS ACCESS CONCURRENT ACCESS + * + * #1st open# #1st open# + * -- SR SR -- -- SR SR -- -- SR SR -- -- SR SR -- + * -- -- SW SW SW SW -- -- -- -- SW SW SW SW -- -- + * W W W W R R R R W W W W R R R R + * #2nd open# #2nd open# + * -------------------------- -------------------------- + * -- -- W | s x x s x x f f | -- -- W | f x x f x x f f | + * SR -- W | x x x x x x x x | SR -- W | x x x x x x x x | + * SR SW W | x x x x x x x x | SR SW W | x x x x x x x x | + * -- SW W | f x x s x x f f | -- SW W | f x x f x x f f | + * -- SW R | x x x x x x x x | -- SW R | x x x x x x x x | + * SR SW R | x x x x x x x x | SR SW R | x x x x x x x x | + * SR -- R | s x x s x x s f | SR -- R | f x x s x x s s | + * -- -- R | s x x s x x s s | -- -- R | f x x f x x s s | + * -------------------------- -------------------------- + * + * Notations: + * W: H5F_ACC_RDWR + * R: H5F_ACC_RDONLY + * SW: H5F_ACC_SWMR_WRITE + * SR: H5F_ACC_SWMR_READ + * + * x: the first open or second open itself fails due to invalid flags combination + * f: the open fails with flags combination from both the first and second opens + * s: the open succeeds with flags combination from both the first and second opens + * + * * Return: Success: A new file pointer. * Failure: NULL * @@ -998,6 +1061,8 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_class_t *drvr; /*file driver class info */ H5P_genplist_t *a_plist; /*file access property list */ H5F_close_degree_t fc_degree; /*file close degree */ + hbool_t set_flag = FALSE; /*set the status_flags in the superblock */ + hbool_t clear = FALSE; /*clear the status_flags */ hbool_t evict_on_close; /* evict on close value from plist */ H5F_t *ret_value = NULL; /*actual return value */ char *lock_env_var = NULL;/*env var pointer */ @@ -1074,7 +1139,8 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, * file (since we can't do that while the file is open), or if the * request was to create a non-existent file (since the file already * exists), or if the new request adds write access (since the - * readers don't expect the file to change under them). + * readers don't expect the file to change under them), or if the + * SWMR write/read access flags don't agree. */ if(H5FD_close(lf) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to close low-level file info") @@ -1085,6 +1151,11 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, if((flags & H5F_ACC_RDWR) && 0 == (shared->flags & H5F_ACC_RDWR)) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "file is already open for read-only") + if((flags & H5F_ACC_SWMR_WRITE) && 0 == (shared->flags & H5F_ACC_SWMR_WRITE)) + HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "SWMR write access flag not the same for file that is already open") + if((flags & H5F_ACC_SWMR_READ) && !((shared->flags & H5F_ACC_SWMR_WRITE) || (shared->flags & H5F_ACC_SWMR_READ) || (shared->flags & H5F_ACC_RDWR))) + HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "SWMR read access flag not the same for file that is already open") + /* Allocate new "high-level" file struct */ if((file = H5F_new(shared, flags, fcpl_id, fapl_id, NULL)) == NULL) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to create new file object") @@ -1116,6 +1187,10 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, /* Create the 'top' file structure */ if(NULL == (file = H5F_new(NULL, flags, fcpl_id, fapl_id, lf))) HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to initialize file structure") + + /* Need to set status_flags in the superblock if the driver has a 'lock' method */ + if(drvr->lock) + set_flag = TRUE; } /* end else */ /* Retain the name the file was opened with */ @@ -1171,6 +1246,15 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, if(H5P_get(a_plist, H5F_ACS_CLOSE_DEGREE_NAME, &fc_degree) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get file close degree") + /* This is a private property to clear the status_flags in the super block */ + /* Use by h5clear and a routine in test/flush2.c to clear the test file's status_flags */ + if(H5P_exist_plist(a_plist, H5F_ACS_CLEAR_STATUS_FLAGS_NAME) > 0) { + if(H5P_get(a_plist, H5F_ACS_CLEAR_STATUS_FLAGS_NAME, &clear) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get clearance for status_flags") + else if(clear) + file->shared->sblock->status_flags = 0; + } /* end if */ + if(shared->nrefs == 1) { if(fc_degree == H5F_CLOSE_DEFAULT) shared->fc_degree = lf->cls->fc_degree; @@ -1207,6 +1291,53 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id, if(H5F_build_actual_name(file, a_plist, name, &file->actual_name) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to build actual name") + if(set_flag) { + if(H5F_INTENT(file) & H5F_ACC_RDWR) { /* Set and check consistency of status_flags */ + /* Skip check of status_flags for file with < superblock version 3 */ + if(file->shared->sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_3) { + + if(file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS || + file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS) + HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "file is already open for write/SWMR write (may use <h5clear file> to clear file consistency flags)") + } /* version 3 superblock */ + + file->shared->sblock->status_flags |= H5F_SUPER_WRITE_ACCESS; + if(H5F_INTENT(file) & H5F_ACC_SWMR_WRITE) + file->shared->sblock->status_flags |= H5F_SUPER_SWMR_WRITE_ACCESS; + + /* Flush the superblock */ + if(H5F_super_dirty(file) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, NULL, "unable to mark superblock as dirty") + if(H5F_flush_tagged_metadata(file, H5AC__SUPERBLOCK_TAG, H5AC_ind_read_dxpl_id) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, NULL, "unable to flush superblock") + + /* Remove the file lock for SWMR_WRITE */ + if(use_file_locking && (H5F_INTENT(file) & H5F_ACC_SWMR_WRITE)) { + if(H5FD_unlock(file->shared->lf) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to unlock the file") + } /* end if */ + } /* end if */ + else { /* H5F_ACC_RDONLY: check consistency of status_flags */ + /* Skip check of status_flags for file with < superblock version 3 */ + if(file->shared->sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_3) { + + if(H5F_INTENT(file) & H5F_ACC_SWMR_READ) { + if((file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS && + !(file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS)) + || + (!(file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS) && + file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS)) + HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "file is not already open for SWMR writing") + + } /* end if */ + else if((file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS) || + (file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS)) + HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "file is already open for write (may use <h5clear file> to clear file consistency flags)") + + } /* version 3 superblock */ + } /* end else */ + } /* end if set_flag */ + /* Success */ ret_value = file; @@ -2149,6 +2280,8 @@ H5F_get_file_image(H5F_t *file, void *buf_ptr, size_t buf_len, hid_t dxpl_id) /* test to see if a buffer was provided -- if not, we are done */ if(buf_ptr != NULL) { size_t space_needed; /* size of file image */ + hsize_t tmp; + size_t tmp_size; H5P_genplist_t *xfer_plist= NULL; /* Dataset transfer property list object */ /* Check for buffer too small */ @@ -2165,6 +2298,15 @@ H5F_get_file_image(H5F_t *file, void *buf_ptr, size_t buf_len, hid_t dxpl_id) /* (Note compensation for base address addition in internal routine) */ if(H5FD_read(fd_ptr, xfer_plist, H5FD_MEM_DEFAULT, 0, space_needed, buf_ptr) < 0) HGOTO_ERROR(H5E_FILE, H5E_READERROR, FAIL, "file image read request failed") + + /* Offset to "status_flags" in the superblock */ + tmp = H5F_SUPER_STATUS_FLAGS_OFF(file->shared->sblock->super_vers); + /* Size of "status_flags" depends on the superblock version */ + tmp_size = H5F_SUPER_STATUS_FLAGS_SIZE(file->shared->sblock->super_vers); + + /* Clear "status_flags" */ + HDmemset((uint8_t *)(buf_ptr) + tmp, 0, tmp_size); + } /* end if */ done: diff --git a/src/H5Fio.c b/src/H5Fio.c index 273ee74..e215666 100644 --- a/src/H5Fio.c +++ b/src/H5Fio.c @@ -264,26 +264,70 @@ H5F_evict_tagged_metadata(H5F_t * f, haddr_t tag, hid_t dxpl_id) FUNC_ENTER_NOAPI(FAIL) - /* Unpin the superblock, as this will be marked for eviction and it can't - be pinned. */ - if(H5AC_unpin_entry(f->shared->sblock) < 0) - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "unable to unpin superblock") - f->shared->sblock = NULL; - /* Evict the object's metadata */ if(H5AC_evict_tagged_metadata(f, tag, TRUE, dxpl_id) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "unable to evict tagged metadata") - /* Re-read the superblock. */ - if(H5F__super_read(f, dxpl_id, FALSE) < 0) - HGOTO_ERROR(H5E_FILE, H5E_READERROR, FAIL, "unable to read superblock") - done: FUNC_LEAVE_NOAPI(ret_value); } /* end H5F_evict_tagged_metadata */ /*------------------------------------------------------------------------- + * Function: H5F__evict_cache_entries + * + * Purpose: To evict all cache entries except the pinned superblock entry + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * Dec 2013 + * + *------------------------------------------------------------------------- + */ +herr_t +H5F__evict_cache_entries(H5F_t *f, hid_t dxpl_id) +{ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_PACKAGE + + HDassert(f); + HDassert(f->shared); + + /* Evict all except pinned entries in the cache */ + if(H5AC_evict(f, dxpl_id) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "unable to evict all except pinned entries") + +#ifndef NDEBUG +{ + unsigned status = 0; + int32_t cur_num_entries; + + /* Retrieve status of the superblock */ + if(H5AC_get_entry_status(f, (haddr_t)0, &status) < 0) + HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "unable to get entry status") + + /* Verify status of the superblock entry in the cache */ + if(!(status & H5AC_ES__IN_CACHE) || !(status & H5AC_ES__IS_PINNED)) + HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "unable to get entry status") + + /* Get the number of cache entries */ + if(H5AC_get_cache_size(f->shared->cache, NULL, NULL, NULL, &cur_num_entries) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_get_cache_size() failed.") + + /* Should be the only one left in the cache (the superblock) */ + if(cur_num_entries != 1) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "number of cache entries is not correct") +} +#endif /* NDEBUG */ + +done: + FUNC_LEAVE_NOAPI(ret_value); +} /* end H5F__evict_cache_entries() */ + + +/*------------------------------------------------------------------------- * Function: H5F_get_checksums * * Purpose: Decode checksum stored in the buffer diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h index e9c45f3..1adf74b 100644 --- a/src/H5Fpkg.h +++ b/src/H5Fpkg.h @@ -56,10 +56,11 @@ /* Superblock status flags */ #define H5F_SUPER_WRITE_ACCESS 0x01 #define H5F_SUPER_FILE_OK 0x02 -#define H5F_SUPER_ALL_FLAGS (H5F_SUPER_WRITE_ACCESS | H5F_SUPER_FILE_OK) +#define H5F_SUPER_SWMR_WRITE_ACCESS 0x04 +#define H5F_SUPER_ALL_FLAGS (H5F_SUPER_WRITE_ACCESS | H5F_SUPER_FILE_OK | H5F_SUPER_SWMR_WRITE_ACCESS) /* Mask for removing private file access flags */ -#define H5F_ACC_PUBLIC_FLAGS 0x001fu +#define H5F_ACC_PUBLIC_FLAGS 0x007fu /* Free space section+aggregator merge flags */ #define H5F_FS_MERGE_METADATA 0x01 /* Section can merge with metadata aggregator */ @@ -131,12 +132,28 @@ #define H5F_SUPERBLOCK_VARLEN_SIZE(v, sizeof_addr, sizeof_size) ( \ (v == 0 ? H5F_SUPERBLOCK_VARLEN_SIZE_V0(sizeof_addr, sizeof_size) : 0) \ + (v == 1 ? H5F_SUPERBLOCK_VARLEN_SIZE_V1(sizeof_addr, sizeof_size) : 0) \ - + (v == 2 ? H5F_SUPERBLOCK_VARLEN_SIZE_V2(sizeof_addr) : 0)) + + (v >= 2 ? H5F_SUPERBLOCK_VARLEN_SIZE_V2(sizeof_addr) : 0)) /* Total size of superblock, depends on superblock version */ #define H5F_SUPERBLOCK_SIZE(s) ( H5F_SUPERBLOCK_FIXED_SIZE \ + H5F_SUPERBLOCK_VARLEN_SIZE((s)->super_vers, (s)->sizeof_addr, (s)->sizeof_size)) +/* For superblock version 0 & 1: + Offset to the file consistency flags (status_flags) in the superblock (excluding H5F_SUPERBLOCK_FIXED_SIZE) */ +#define H5F_SUPER_STATUS_OFF_V01 \ + (2 /* freespace, and root group versions */ \ + + 1 /* reserved */ \ + + 3 /* shared header vers, size of address, size of lengths */ \ + + 1 /* reserved */ \ + + 4) /* group leaf k, group internal k */ + +#define H5F_SUPER_STATUS_OFF(v) (v >= 2 ? 2 : H5F_SUPER_STATUS_OFF_V01) + +/* Offset to the file consistency flags (status_flags) in the superblock */ +#define H5F_SUPER_STATUS_FLAGS_OFF(v) (H5F_SUPERBLOCK_FIXED_SIZE + H5F_SUPER_STATUS_OFF(v)) + +/* Size of file consistency flags (status_flags) in the superblock */ +#define H5F_SUPER_STATUS_FLAGS_SIZE(v) (v >= 2 ? 1 : 4) /* Forward declaration external file cache struct used below (defined in * H5Fefc.c) */ @@ -406,6 +423,9 @@ H5_DLL herr_t H5F_efc_try_close(H5F_t *f); H5_DLL herr_t H5F__set_eoa(const H5F_t *f, H5F_mem_t type, haddr_t addr); H5_DLL herr_t H5F__set_base_addr(const H5F_t *f, haddr_t addr); +/* Functions that flush or evict */ +H5_DLL herr_t H5F__evict_cache_entries(H5F_t *f, hid_t dxpl_id); + /* Testing functions */ #ifdef H5F_TESTING H5_DLL herr_t H5F_get_sohm_mesg_count_test(hid_t fid, unsigned type_id, diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h index 50cd763..a6d1c4a 100644 --- a/src/H5Fprivate.h +++ b/src/H5Fprivate.h @@ -473,6 +473,7 @@ #define H5F_ACS_OBJECT_FLUSH_CB_NAME "object_flush_cb" /* Object flush callback */ #define H5F_ACS_EFC_SIZE_NAME "efc_size" /* Size of external file cache */ #define H5F_ACS_FILE_IMAGE_INFO_NAME "file_image_info" /* struct containing initial file image and callback info */ +#define H5F_ACS_CLEAR_STATUS_FLAGS_NAME "clear_status_flags" /* Whether to clear superblock status_flags (private property only used by h5clear) */ #define H5F_ACS_USE_MDC_LOGGING_NAME "use_mdc_logging" /* Whether to use metadata cache logging */ #define H5F_ACS_MDC_LOG_LOCATION_NAME "mdc_log_location" /* Name of metadata cache log location */ #define H5F_ACS_START_MDC_LOG_ON_ACCESS_NAME "start_mdc_log_on_access" /* Whether logging starts on file create/open */ @@ -498,7 +499,8 @@ #define HDF5_SUPERBLOCK_VERSION_DEF 0 /* The default super block format */ #define HDF5_SUPERBLOCK_VERSION_1 1 /* Version with non-default B-tree 'K' value */ #define HDF5_SUPERBLOCK_VERSION_2 2 /* Revised version with superblock extension and checksum */ -#define HDF5_SUPERBLOCK_VERSION_LATEST HDF5_SUPERBLOCK_VERSION_2 /* The maximum super block format */ +#define HDF5_SUPERBLOCK_VERSION_3 3 /* With file locking and consistency flags (at least this version for SWMR support) */ +#define HDF5_SUPERBLOCK_VERSION_LATEST HDF5_SUPERBLOCK_VERSION_3 /* The maximum super block format */ #define HDF5_SUPERBLOCK_VERSION_V18_LATEST HDF5_SUPERBLOCK_VERSION_2 /* The latest superblock version for v18 */ #define HDF5_FREESPACE_VERSION 0 /* of the Free-Space Info */ #define HDF5_OBJECTDIR_VERSION 0 /* of the Object Directory format */ @@ -523,6 +525,7 @@ /* Metadata read attempt values */ #define H5F_METADATA_READ_ATTEMPTS 1 /* Default # of read attempts for non-SWMR access */ +#define H5F_SWMR_METADATA_READ_ATTEMPTS 100 /* Default # of read attempts for SWMR access */ /* Macros to define signatures of all objects in the file */ diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h index 018618e..a79da75 100644 --- a/src/H5Fpublic.h +++ b/src/H5Fpublic.h @@ -55,6 +55,23 @@ #define H5F_ACC_EXCL (H5CHECK H5OPEN 0x0004u) /*fail if file already exists*/ /* NOTE: 0x0008u was H5F_ACC_DEBUG, now deprecated */ #define H5F_ACC_CREAT (H5CHECK H5OPEN 0x0010u) /*create non-existing files */ +#define H5F_ACC_SWMR_WRITE (H5CHECK 0x0020u) /*indicate that this file is + * open for writing in a + * single-writer/multi-reader (SWMR) + * scenario. Note that the + * process(es) opening the file + * for reading must open the file + * with RDONLY access, and use + * the special "SWMR_READ" access + * flag. */ +#define H5F_ACC_SWMR_READ (H5CHECK 0x0040u) /*indicate that this file is + * open for reading in a + * single-writer/multi-reader (SWMR) + * scenario. Note that the + * process(es) opening the file + * for SWMR reading must also + * open the file with the RDONLY + * flag. */ /* Value passed to H5Pset_elink_acc_flags to cause flags to be taken from the * parent file. */ @@ -225,6 +242,7 @@ H5_DLL herr_t H5Freset_mdc_hit_rate_stats(hid_t file_id); H5_DLL ssize_t H5Fget_name(hid_t obj_id, char *name, size_t size); H5_DLL herr_t H5Fget_info2(hid_t obj_id, H5F_info2_t *finfo); H5_DLL herr_t H5Fget_metadata_read_retry_info(hid_t file_id, H5F_retry_info_t *info); +H5_DLL herr_t H5Fstart_swmr_write(hid_t file_id); H5_DLL ssize_t H5Fget_free_sections(hid_t file_id, H5F_mem_t type, size_t nsects, H5F_sect_info_t *sect_info/*out*/); H5_DLL herr_t H5Fclear_elink_file_cache(hid_t file_id); diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c index 489a149..2a82618 100644 --- a/src/H5Fsuper.c +++ b/src/H5Fsuper.c @@ -250,6 +250,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read) haddr_t super_addr; /* Absolute address of superblock */ haddr_t eof; /* End of file address */ unsigned rw_flags; /* Read/write permissions for file */ + hbool_t skip_eof_check = FALSE; /* Whether to skip checking the EOF value */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL) @@ -317,6 +318,14 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read) if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, &udata, rw_flags))) HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "unable to load superblock") + if(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE) + if(sblock->super_vers < HDF5_SUPERBLOCK_VERSION_3) + HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "invalid superblock version for SWMR_WRITE") + + /* Enable all latest version support when file has v3 superblock */ + if(sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_3) + f->shared->latest_flags |= H5F_LATEST_ALL_FLAGS; + /* Pin the superblock in the cache */ if(H5AC_pin_protected_entry(sblock) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTPIN, FAIL, "unable to pin superblock") @@ -410,7 +419,22 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read) * Note: the aggregator is changed again after being reset * earlier before H5AC_flush due to allocation of tmp addresses. */ - if(initial_read) { + /* The EOF check must be skipped when the file is opened for SWMR read, + * as the file can appear truncated if only part of it has been + * been flushed to disk by the SWMR writer process. + */ + if(H5F_INTENT(f) & H5F_ACC_SWMR_READ) { + /* + * When the file is opened for SWMR read access, skip the check if: + * --the file is already marked for SWMR writing and + * --the file has version 3 superblock for SWMR support + */ + if((sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS) && + (sblock->status_flags & H5F_SUPER_WRITE_ACCESS) && + sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_3) + skip_eof_check = TRUE; + } /* end if */ + if(!skip_eof_check && initial_read) { if(HADDR_UNDEF == (eof = H5FD_get_eof(f->shared->lf, H5FD_MEM_DEFAULT))) HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to determine file size") diff --git a/src/H5Gent.c b/src/H5Gent.c index 6020028..f64eaf0 100644 --- a/src/H5Gent.c +++ b/src/H5Gent.c @@ -460,7 +460,7 @@ H5G__ent_convert(H5F_t *f, hid_t dxpl_id, H5HL_t *heap, const char *name, targ_oloc.addr = lnk->u.hard.addr; /* Get the object header */ - if(NULL == (oh = H5O_protect(&targ_oloc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(&targ_oloc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect target object header") /* Check if a symbol table message exists */ diff --git a/src/H5Gtest.c b/src/H5Gtest.c index d69a804..0e0a897 100644 --- a/src/H5Gtest.c +++ b/src/H5Gtest.c @@ -701,7 +701,7 @@ H5G_verify_cached_stabs_test_cb(H5F_t *f, hid_t dxpl_id, targ_oloc.addr = sn->entry[i].header; /* Load target object header */ - if(NULL == (targ_oh = H5O_protect(&targ_oloc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (targ_oh = H5O_protect(&targ_oloc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to protect target object header") /* Check if a symbol table message exists */ diff --git a/src/H5Lexternal.c b/src/H5Lexternal.c index 12cce73..7f59e50 100644 --- a/src/H5Lexternal.c +++ b/src/H5Lexternal.c @@ -323,7 +323,7 @@ H5L_extern_traverse(const char H5_ATTR_UNUSED *link_name, hid_t cur_group, */ /* Simplify intent flags for open calls */ - intent = ((intent & H5F_ACC_RDWR) ? H5F_ACC_RDWR : H5F_ACC_RDONLY); + intent &= (H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE | H5F_ACC_SWMR_READ); /* Copy the file name to use */ if(NULL == (temp_file_name = H5MM_strdup(file_name))) @@ -1281,6 +1281,7 @@ H5O_create(H5F_t *f, hid_t dxpl_id, size_t size_hint, size_t initial_rc, oh->version = H5O_VERSION_1; oh->sizeof_size = H5F_SIZEOF_SIZE(f); oh->sizeof_addr = H5F_SIZEOF_ADDR(f); + oh->swmr_write = !!(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE); #ifdef H5O_ENABLE_BAD_MESG_COUNT /* Check whether the "bad message count" property is set */ if(H5P_exist_plist(oc_plist, H5O_BAD_MESG_COUNT_NAME) > 0) { @@ -1290,6 +1291,15 @@ H5O_create(H5F_t *f, hid_t dxpl_id, size_t size_hint, size_t initial_rc, } /* end if */ #endif /* H5O_ENABLE_BAD_MESG_COUNT */ + /* Create object header proxy if doing SWMR writes */ + if(oh->swmr_write) { + /* Create virtual entry, for use as proxy */ + if(NULL == (oh->proxy = H5AC_proxy_entry_create())) + HGOTO_ERROR(H5E_OHDR, H5E_CANTCREATE, FAIL, "can't create object header proxy") + } /* end if */ + else + oh->proxy = NULL; + /* Set initial status flags */ oh->flags = oh_flags; @@ -1356,6 +1366,7 @@ H5O_create(H5F_t *f, hid_t dxpl_id, size_t size_hint, size_t initial_rc, /* (including space for serializing the object header prefix */ if(NULL == (oh->chunk[0].image = H5FL_BLK_CALLOC(chunk_image, oh_size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed") + oh->chunk[0].chunk_proxy = NULL; /* Put magic # for object header in first chunk */ if(oh->version > H5O_VERSION_1) @@ -1791,7 +1802,8 @@ done: *------------------------------------------------------------------------- */ H5O_t * -H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, unsigned prot_flags) +H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, unsigned prot_flags, + hbool_t pin_all_chunks) { H5O_t *oh = NULL; /* Object header protected */ H5O_cache_ud_t udata; /* User data for protecting object header */ @@ -1915,6 +1927,39 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, unsigned prot_flags) H5O_assert(oh); #endif /* H5O_DEBUG */ + /* Pin the other chunks also when requested, so that the object header + * proxy can be set up. + */ + if(pin_all_chunks && oh->nchunks > 1) { + unsigned u; /* Local index variable */ + + /* Sanity check */ + HDassert(oh->swmr_write); + + /* Iterate over chunks > 0 */ + for(u = 1; u < oh->nchunks; u++) { + H5O_chunk_proxy_t *chk_proxy; /* Chunk proxy */ + + /* Protect chunk */ + if(NULL == (chk_proxy = H5O_chunk_protect(loc->file, dxpl_id, oh, u))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to protect object header chunk") + + /* Pin chunk proxy*/ + if(H5AC_pin_protected_entry(chk_proxy) < 0 ) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPIN, NULL, "unable to pin object header chunk") + + /* Unprotect chunk */ + if(H5O_chunk_unprotect(loc->file, dxpl_id, chk_proxy, FALSE) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, NULL, "unable to unprotect object header chunk") + + /* Preserve chunk proxy pointer for later */ + oh->chunk[u].chunk_proxy = chk_proxy; + } /* end for */ + + /* Set the flag for the unprotect */ + oh->chunks_pinned = TRUE; + } /* end if */ + /* Set return value */ ret_value = oh; @@ -1956,7 +2001,7 @@ H5O_pin(const H5O_loc_t *loc, hid_t dxpl_id) HDassert(loc); /* Get header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__NO_FLAGS_SET))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__NO_FLAGS_SET, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to protect object header") /* Increment the reference count on the object header */ @@ -2038,6 +2083,27 @@ H5O_unprotect(const H5O_loc_t *loc, hid_t dxpl_id, H5O_t *oh, unsigned oh_flags) HDassert(loc); HDassert(oh); + /* Unpin the other chunks */ + if(oh->chunks_pinned && oh->nchunks > 1) { + unsigned u; /* Local index variable */ + + /* Sanity check */ + HDassert(oh->swmr_write); + + /* Iterate over chunks > 0 */ + for(u = 1; u < oh->nchunks; u++) { + if(NULL != oh->chunk[u].chunk_proxy) { + /* Release chunk proxy */ + if(H5AC_unpin_entry(oh->chunk[u].chunk_proxy) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPIN, FAIL, "unable to unpin object header chunk") + oh->chunk[u].chunk_proxy = NULL; + } /* end if */ + } /* end for */ + + /* Reet the flag from the unprotect */ + oh->chunks_pinned = FALSE; + } /* end if */ + /* Unprotect the object header */ if(H5AC_unprotect(loc->file, dxpl_id, H5AC_OHDR, oh->chunk[0].addr, oh, oh_flags) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to release object header") @@ -2169,7 +2235,7 @@ H5O_touch(const H5O_loc_t *loc, hbool_t force, hid_t dxpl_id) HDassert(loc); /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__NO_FLAGS_SET))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__NO_FLAGS_SET, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Create/Update the modification time message */ @@ -2295,7 +2361,7 @@ H5O_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr) loc.holding_file = FALSE; /* Get the object header information */ - if(NULL == (oh = H5O_protect(&loc, dxpl_id, H5AC__NO_FLAGS_SET))) + if(NULL == (oh = H5O_protect(&loc, dxpl_id, H5AC__NO_FLAGS_SET, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Delete object */ @@ -2386,7 +2452,7 @@ H5O_obj_type(const H5O_loc_t *loc, H5O_type_t *obj_type, hid_t dxpl_id) FUNC_ENTER_NOAPI_TAG(dxpl_id, loc->addr, FAIL) /* Load the object header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Retrieve the type of the object */ @@ -2464,7 +2530,7 @@ H5O_obj_class(const H5O_loc_t *loc, hid_t dxpl_id) FUNC_ENTER_NOAPI_NOINIT_TAG(dxpl_id, loc->addr, NULL) /* Load the object header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header") /* Test whether entry qualifies as a particular type of object */ @@ -2761,7 +2827,7 @@ H5O_get_hdr_info(const H5O_loc_t *loc, hid_t dxpl_id, H5O_hdr_info_t *hdr) HDmemset(hdr, 0, sizeof(*hdr)); /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "unable to load object header") /* Get the information for the object header */ @@ -2885,7 +2951,7 @@ H5O_get_info(const H5O_loc_t *loc, hid_t dxpl_id, hbool_t want_ih_info, HDassert(oinfo); /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Reset the object info structure */ @@ -3006,7 +3072,7 @@ H5O_get_create_plist(const H5O_loc_t *loc, hid_t dxpl_id, H5P_genplist_t *oc_pli HDassert(oc_plist); /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Set property values, if they were used for the object */ @@ -3061,7 +3127,7 @@ H5O_get_nlinks(const H5O_loc_t *loc, hid_t dxpl_id, hsize_t *nlinks) HDassert(nlinks); /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Retrieve the # of link messages seen when the object header was loaded */ @@ -3178,7 +3244,7 @@ H5O_get_rc_and_type(const H5O_loc_t *loc, hid_t dxpl_id, unsigned *rc, H5O_type_ HDassert(loc); /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Set the object's reference count */ @@ -3551,7 +3617,7 @@ H5O_dec_rc_by_loc(const H5O_loc_t *loc, hid_t dxpl_id) HDassert(loc); /* Get header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header") /* Decrement the reference count on the object header */ @@ -3569,6 +3635,30 @@ done: /*------------------------------------------------------------------------- + * Function: H5O_get_proxy + * + * Purpose: Retrieve the proxy for the object header. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * July 24 2016 + * + *------------------------------------------------------------------------- + */ +H5AC_proxy_entry_t * +H5O_get_proxy(const H5O_t *oh) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + + /* Check args */ + HDassert(oh); + + FUNC_LEAVE_NOAPI(oh->proxy) +} /* end H5O_get_proxy() */ + + +/*------------------------------------------------------------------------- * Function: H5O__free * * Purpose: Destroys an object header. @@ -3585,8 +3675,9 @@ herr_t H5O__free(H5O_t *oh) { unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_PACKAGE_NOERR + FUNC_ENTER_PACKAGE /* check args */ HDassert(oh); @@ -3621,9 +3712,15 @@ H5O__free(H5O_t *oh) oh->mesg = (H5O_mesg_t *)H5FL_SEQ_FREE(H5O_mesg_t, oh->mesg); } /* end if */ + /* Destroy the proxy */ + if(oh->proxy) + if(H5AC_proxy_entry_dest(oh->proxy) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to destroy virtual entry used for proxy") + /* destroy object header */ oh = H5FL_FREE(H5O_t, oh); - FUNC_LEAVE_NOAPI(SUCCEED) +done: + FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__free() */ diff --git a/src/H5Oalloc.c b/src/H5Oalloc.c index d22fd92..4f98cfa 100644 --- a/src/H5Oalloc.c +++ b/src/H5Oalloc.c @@ -745,9 +745,8 @@ H5O__alloc_find_best_nonnull(const H5F_t *f, const H5O_t *oh, size_t *size, size_t cont_size; /* Continuation message size */ size_t multi_size; /* Size of all the messages in the last chunk */ unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_STATIC + FUNC_ENTER_STATIC_NOERR /* Check args */ HDassert(f); @@ -865,8 +864,7 @@ H5O__alloc_find_best_nonnull(const H5F_t *f, const H5O_t *oh, size_t *size, else *size += (size_t)H5O_SIZEOF_MSGHDR_OH(oh) + oh->mesg[found_msg->msgno].raw_size; -done: - FUNC_LEAVE_NOAPI(ret_value) + FUNC_LEAVE_NOAPI(SUCCEED) } /* H5O__alloc_find_best_nonnull() */ @@ -949,6 +947,7 @@ H5O__alloc_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size, oh->chunk[chunkno].gap = 0; if(NULL == (oh->chunk[chunkno].image = p = H5FL_BLK_CALLOC(chunk_image, size))) HGOTO_ERROR(H5E_OHDR, H5E_CANTALLOC, FAIL, "can't allocate image for chunk, size = %zu", size) + oh->chunk[chunkno].chunk_proxy = NULL; /* If this is a later version of the object header format, put the magic * # at the beginning of the chunk image. @@ -1221,10 +1220,9 @@ static herr_t H5O__alloc_find_best_null(const H5O_t *oh, size_t size, size_t *mesg_idx) { size_t idx; /* Index of message which fits allocation */ - int found_null; /* Best fit null message */ - herr_t ret_value = SUCCEED; /* Return value */ + ssize_t found_null; /* Best fit null message */ - FUNC_ENTER_STATIC + FUNC_ENTER_STATIC_NOERR /* check args */ HDassert(oh); @@ -1239,11 +1237,11 @@ H5O__alloc_find_best_null(const H5O_t *oh, size_t size, size_t *mesg_idx) if(oh->mesg[idx].raw_size == size) { /* Keep first exact fit */ if(found_null < 0) - found_null = idx; + found_null = (ssize_t)idx; else /* If we've got more than one exact fit, choose the one in the earliest chunk */ if(oh->mesg[idx].chunkno < oh->mesg[found_null].chunkno) { - found_null = idx; + found_null = (ssize_t)idx; /* If we found an exact fit in object header chunk #0, we can get out */ /* (Could extend this to look for earliest message in @@ -1257,29 +1255,26 @@ H5O__alloc_find_best_null(const H5O_t *oh, size_t size, size_t *mesg_idx) else if(oh->mesg[idx].raw_size > size) { /* Keep first one found */ if(found_null < 0) - found_null = idx; + found_null = (ssize_t)idx; else /* Check for better fit */ if(oh->mesg[idx].raw_size < oh->mesg[found_null].raw_size) - found_null = idx; + found_null = (ssize_t)idx; else { /* If they are the same size, choose the one in the earliest chunk */ if(oh->mesg[idx].raw_size == oh->mesg[found_null].raw_size) { if(oh->mesg[idx].chunkno < oh->mesg[found_null].chunkno) - found_null = idx; + found_null = (ssize_t)idx; } /* end if */ } /* end else */ } /* end else-if */ - /* Ignore too-small null messages */ - else - ; + /* else: Ignore too-small null messages */ } /* end if */ } /* end for */ if(found_null >= 0) - *mesg_idx = found_null; + *mesg_idx = (size_t)found_null; -done: - FUNC_LEAVE_NOAPI(ret_value) + FUNC_LEAVE_NOAPI(SUCCEED) } /* H5O__alloc_find_best_null() */ @@ -1627,6 +1622,7 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh) { H5O_chunk_proxy_t *null_chk_proxy = NULL; /* Chunk that null message is in */ H5O_chunk_proxy_t *curr_chk_proxy = NULL; /* Chunk that message is in */ + H5O_chunk_proxy_t *cont_targ_chk_proxy = NULL; /* Chunk that continuation message points to */ hbool_t null_chk_dirtied = FALSE; /* Flags for unprotecting null chunk */ hbool_t curr_chk_dirtied = FALSE; /* Flags for unprotecting curr chunk */ hbool_t packed_msg; /* Flag to indicate that messages were packed */ @@ -1741,6 +1737,60 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh) if(NULL == (curr_chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, curr_msg->chunkno))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk") + /* If the message being moved is a continuation + * message and we are doing SWMR writes, we must + * update the flush dependencies */ + if(oh->swmr_write && (H5O_CONT_ID == curr_msg->type->id)) { + void *null_chk_mdc_obj; /* The metadata cache object for the null_msg chunk */ + + /* Point to the metadata cache object for the + * null message chunk, oh if in chunk 0 or the + * proxy otherwise */ + null_chk_mdc_obj = (null_msg->chunkno == 0 + ? (void *)oh + : (void *)null_chk_proxy); + + /* The other chunks involved should never be + * chunk 0 */ + HDassert(curr_msg->chunkno > 0); + HDassert(((H5O_cont_t *)(curr_msg->native))->chunkno > 0); + + /* Protect continuation message target chunk */ + if(NULL == (cont_targ_chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, ((H5O_cont_t *)(curr_msg->native))->chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk") + + /* Remove flush dependency on old continuation + * message chunk */ + HDassert(cont_targ_chk_proxy); + HDassert(cont_targ_chk_proxy->parent); + HDassert(curr_chk_proxy); + HDassert((void *)curr_chk_proxy == cont_targ_chk_proxy->parent); + + if(H5AC_destroy_flush_dependency(curr_chk_proxy, cont_targ_chk_proxy) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency") + + cont_targ_chk_proxy->parent = NULL; + + /* Create flush dependency on new continuation + * message chunk */ + if(H5AC_create_flush_dependency(null_chk_mdc_obj, cont_targ_chk_proxy) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTDEPEND, FAIL, "unable to create flush dependency") + + HDassert(null_chk_mdc_obj); + HDassert(((H5C_cache_entry_t *)null_chk_mdc_obj)->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(((H5C_cache_entry_t *)null_chk_mdc_obj)->type); + HDassert((((H5C_cache_entry_t *)null_chk_mdc_obj)->type->id == H5AC_OHDR_ID) || + (((H5C_cache_entry_t *)null_chk_mdc_obj)->type->id == H5AC_OHDR_CHK_ID)); + + cont_targ_chk_proxy->parent = null_chk_mdc_obj; + + /* Unprotect continuation message target chunk + */ + if(H5O_chunk_unprotect(f, dxpl_id, cont_targ_chk_proxy, FALSE) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") + cont_targ_chk_proxy = NULL; + } /* end if */ + /* Copy raw data for non-null message to new chunk */ HDmemcpy(null_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh), curr_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh), curr_msg->raw_size + (size_t)H5O_SIZEOF_MSGHDR_OH(oh)); @@ -1889,9 +1939,11 @@ done: HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect null object header chunk") if(curr_chk_proxy && H5O_chunk_unprotect(f, dxpl_id, curr_chk_proxy, curr_chk_dirtied) < 0) HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect current object header chunk") + if(cont_targ_chk_proxy && H5O_chunk_unprotect(f, dxpl_id, cont_targ_chk_proxy, FALSE) < 0) + HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect continuation message target object header chunk") } /* end if */ else - HDassert(!null_chk_proxy && !curr_chk_proxy); + HDassert(!null_chk_proxy && !curr_chk_proxy && !cont_targ_chk_proxy); FUNC_LEAVE_NOAPI(ret_value) } /* H5O_move_msgs_forward() */ diff --git a/src/H5Oattribute.c b/src/H5Oattribute.c index 9b494a1..71f54eb 100644 --- a/src/H5Oattribute.c +++ b/src/H5Oattribute.c @@ -484,7 +484,7 @@ H5O_attr_open_by_name(const H5O_loc_t *loc, const char *name, hid_t dxpl_id) HDassert(name); /* Protect the object header to iterate over */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, NULL, "unable to load object header") /* Check for attribute info stored */ @@ -633,7 +633,7 @@ H5O_attr_open_by_idx(const H5O_loc_t *loc, H5_index_t idx_type, HGOTO_ERROR(H5E_ATTR, H5E_BADITER, NULL, "can't locate attribute") /* Protect the object header to iterate over */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, NULL, "unable to load object header") /* Find out whether it has already been opened. If it has, close the object @@ -1284,7 +1284,7 @@ H5O_attr_iterate_real(hid_t loc_id, const H5O_loc_t *loc, hid_t dxpl_id, HDassert(attr_op); /* Protect the object header to iterate over */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Check for attribute info stored */ @@ -1847,7 +1847,7 @@ H5O_attr_exists(const H5O_loc_t *loc, const char *name, hid_t dxpl_id) HDassert(name); /* Protect the object header to iterate over */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Check for attribute info stored */ @@ -2001,7 +2001,7 @@ H5O_attr_count(const H5O_loc_t *loc, hid_t dxpl_id) HDassert(loc); /* Protect the object header to iterate over */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Retrieve # of attributes on object */ diff --git a/src/H5Ocache.c b/src/H5Ocache.c index 88a4d85..94498ee 100644 --- a/src/H5Ocache.c +++ b/src/H5Ocache.c @@ -441,6 +441,18 @@ H5O__cache_deserialize(const void *_image, size_t len, void *_udata, /* Retrieve partially deserialized object header from user data */ oh = udata->oh; + /* Set SWMR flag, if appropriate */ + oh->swmr_write = !!(H5F_INTENT(udata->common.f) & H5F_ACC_SWMR_WRITE); + + /* Create object header proxy if doing SWMR writes */ + if(oh->swmr_write) { + /* Create virtual entry, for use as proxy */ + if(NULL == (oh->proxy = H5AC_proxy_entry_create())) + HGOTO_ERROR(H5E_OHDR, H5E_CANTCREATE, NULL, "can't create object header proxy") + } /* end if */ + else + oh->proxy = NULL; + /* Parse the first chunk */ if(H5O__chunk_deserialize(oh, udata->common.addr, udata->chunk0_size, (const uint8_t *)_image, &(udata->common), dirty) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't deserialize first object header chunk") @@ -667,6 +679,16 @@ H5O__cache_notify(H5AC_notify_action_t action, void *_thing) switch(action) { case H5AC_NOTIFY_ACTION_AFTER_INSERT: case H5AC_NOTIFY_ACTION_AFTER_LOAD: + if(oh->swmr_write) { + /* Sanity check */ + HDassert(oh->proxy); + + /* Register the object header as a parent of the virtual entry */ + if(H5AC_proxy_entry_add_parent(oh->proxy, oh) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't add object header as parent of proxy") + } /* end if */ + break; + case H5AC_NOTIFY_ACTION_AFTER_FLUSH: case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: /* do nothing */ @@ -689,10 +711,17 @@ H5O__cache_notify(H5AC_notify_action_t action, void *_thing) case H5AC_NOTIFY_ACTION_CHILD_DIRTIED: case H5AC_NOTIFY_ACTION_CHILD_CLEANED: - case H5AC_NOTIFY_ACTION_BEFORE_EVICT: /* do nothing */ break; + case H5AC_NOTIFY_ACTION_BEFORE_EVICT: + if(oh->swmr_write) { + /* Unregister the object header as a parent of the virtual entry */ + if(H5AC_proxy_entry_remove_parent(oh->proxy, oh) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't remove object header as parent of proxy") + } /* end if */ + break; + default: #ifdef NDEBUG HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "unknown action from metadata cache") @@ -998,7 +1027,6 @@ static herr_t H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing) { H5O_chunk_proxy_t *chk_proxy = (H5O_chunk_proxy_t *)_thing; - void *parent = NULL; /* Chunk containing continuation message that points to this chunk */ H5O_chunk_proxy_t *cont_chk_proxy = NULL; /* Proxy for chunk containing continuation message that points to this chunk, if not chunk 0 */ herr_t ret_value = SUCCEED; /* Return value */ @@ -1013,6 +1041,51 @@ H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing) switch(action) { case H5AC_NOTIFY_ACTION_AFTER_INSERT: case H5AC_NOTIFY_ACTION_AFTER_LOAD: + if(chk_proxy->oh->swmr_write) { + /* Add flush dependency on chunk parent */ + { + void *parent; /* Chunk containing continuation message that points to this chunk */ + + /* Determine the parent of the chunk */ + if(chk_proxy->cont_chunkno == 0) + parent = chk_proxy->oh; + else { + if(NULL == (cont_chk_proxy = H5O_chunk_protect(chk_proxy->f, H5AC_ind_read_dxpl_id, chk_proxy->oh, chk_proxy->cont_chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk") + parent = cont_chk_proxy; + } /* end else */ + + /* Sanity checks */ + HDassert(parent); + HDassert(((H5C_cache_entry_t *)parent)->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(((H5C_cache_entry_t *)parent)->type); + HDassert((((H5C_cache_entry_t *)(parent))->type->id == H5AC_OHDR_ID) + || (((H5C_cache_entry_t *)(parent))->type->id == H5AC_OHDR_CHK_ID)); + + /* Add flush dependency from chunk containing the continuation message + * that points to this chunk (either oh or another chunk proxy object) + */ + if(H5AC_create_flush_dependency(parent, chk_proxy) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTDEPEND, FAIL, "unable to create flush dependency") + + /* Make note of the address and pointer of the flush dependency + * parent so we can take the dependency down on eviction. + */ + chk_proxy->parent = parent; + } + + /* Add flush dependency on object header proxy, if proxy exists */ + { + /* Sanity check */ + HDassert(chk_proxy->oh->proxy); + + /* Register the object header chunk as a parent of the virtual entry */ + if(H5AC_proxy_entry_add_parent(chk_proxy->oh->proxy, chk_proxy) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't add object header chunk as parent of proxy") + } + } /* end if */ + break; + case H5AC_NOTIFY_ACTION_AFTER_FLUSH: case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: /* do nothing */ @@ -1031,10 +1104,29 @@ H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing) case H5AC_NOTIFY_ACTION_CHILD_DIRTIED: case H5AC_NOTIFY_ACTION_CHILD_CLEANED: - case H5AC_NOTIFY_ACTION_BEFORE_EVICT: /* do nothing */ break; + case H5AC_NOTIFY_ACTION_BEFORE_EVICT: + if(chk_proxy->oh->swmr_write) { + /* Remove flush dependency on parent object header chunk */ + { + /* Sanity checks */ + HDassert(chk_proxy->parent != NULL); + HDassert(((H5C_cache_entry_t *)(chk_proxy->parent))->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(((H5C_cache_entry_t *)(chk_proxy->parent))->type); + HDassert((((H5C_cache_entry_t *)(chk_proxy->parent))->type->id == H5AC_OHDR_ID) || (((H5C_cache_entry_t *)(chk_proxy->parent))->type->id == H5AC_OHDR_CHK_ID)); + + if(H5AC_destroy_flush_dependency(chk_proxy->parent, chk_proxy) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency") + } + + /* Unregister the object header chunk as a parent of the virtual entry */ + if(H5AC_proxy_entry_remove_parent(chk_proxy->oh->proxy, chk_proxy) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't remove object header chunk as parent of proxy") + } /* end if */ + break; + default: #ifdef NDEBUG HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "unknown action from metadata cache") @@ -1200,6 +1292,7 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image oh->chunk[chunkno].size = len; if(NULL == (oh->chunk[chunkno].image = H5FL_BLK_MALLOC(chunk_image, oh->chunk[chunkno].size))) HGOTO_ERROR(H5E_OHDR, H5E_CANTALLOC, FAIL, "memory allocation failed") + oh->chunk[chunkno].chunk_proxy = NULL; /* Copy disk image into chunk's image */ HDmemcpy(oh->chunk[chunkno].image, image, oh->chunk[chunkno].size); diff --git a/src/H5Ochunk.c b/src/H5Ochunk.c index f6eb135..50be171 100644 --- a/src/H5Ochunk.c +++ b/src/H5Ochunk.c @@ -403,7 +403,9 @@ H5O_chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx) HDassert(chk_proxy->oh == oh); HDassert(chk_proxy->chunkno == idx); - cache_flags |= H5AC__DIRTIED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; + /* Only free file space if not doing SWMR writes */ + if(!oh->swmr_write) + cache_flags |= H5AC__DIRTIED_FLAG | H5AC__FREE_FILE_SPACE_FLAG; /* Release the chunk proxy from the cache, marking it deleted */ if(H5AC_unprotect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, chk_proxy, cache_flags) < 0) diff --git a/src/H5Ocopy.c b/src/H5Ocopy.c index c1c72cf..ddf375c 100644 --- a/src/H5Ocopy.c +++ b/src/H5Ocopy.c @@ -381,7 +381,7 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out*/, } /* end if */ /* Get source object header */ - if(NULL == (oh_src = H5O_protect(oloc_src, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh_src = H5O_protect(oloc_src, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Retrieve user data for particular type of object to copy */ @@ -450,6 +450,7 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out*/, oh_dst->attr_msgs_seen = oh_src->attr_msgs_seen; oh_dst->sizeof_size = H5F_SIZEOF_SIZE(oloc_dst->file); oh_dst->sizeof_addr = H5F_SIZEOF_ADDR(oloc_dst->file); + oh_dst->swmr_write = !!(H5F_INTENT(oloc_dst->file) & H5F_ACC_SWMR_WRITE); /* Copy time fields */ oh_dst->atime = oh_src->atime; @@ -461,6 +462,15 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out*/, oh_dst->max_compact = oh_src->max_compact; oh_dst->min_dense = oh_src->min_dense; + /* Create object header proxy if doing SWMR writes */ + if(oh_dst->swmr_write) { + /* Create virtual entry, for use as proxy */ + if(NULL == (oh_dst->proxy = H5AC_proxy_entry_create())) + HGOTO_ERROR(H5E_OHDR, H5E_CANTCREATE, FAIL, "can't create object header proxy") + } /* end if */ + else + oh_dst->proxy = NULL; + /* Initialize size of chunk array. Start off with zero chunks so this field * is consistent with the current state of the chunk array. This is * important if an error occurs. diff --git a/src/H5Odbg.c b/src/H5Odbg.c index d9d4b8b..827f40c 100644 --- a/src/H5Odbg.c +++ b/src/H5Odbg.c @@ -578,7 +578,7 @@ H5O_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int f loc.addr = addr; loc.holding_file = FALSE; - if(NULL == (oh = H5O_protect(&loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(&loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* debug */ diff --git a/src/H5Oflush.c b/src/H5Oflush.c index 4fc9c8e..e399c6c 100644 --- a/src/H5Oflush.c +++ b/src/H5Oflush.c @@ -162,7 +162,7 @@ H5O_oh_tag(const H5O_loc_t *oloc, hid_t dxpl_id, haddr_t *tag) HDassert(oloc); /* Get object header for object */ - if(NULL == (oh = H5O_protect(oloc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(oloc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object's object header") /* Get object header's address (i.e. the tag value for this object) */ @@ -262,7 +262,7 @@ H5O_refresh_metadata(hid_t oid, H5O_loc_t oloc, hid_t dxpl_id) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "unable to refresh object") /* Re-open the object, re-fetching its metadata */ - if((H5O_refresh_metadata_reopen(oid, &obj_loc, dxpl_id)) < 0) + if((H5O_refresh_metadata_reopen(oid, &obj_loc, dxpl_id, FALSE)) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "unable to refresh object") } /* end if */ @@ -360,7 +360,7 @@ done: *------------------------------------------------------------------------- */ herr_t -H5O_refresh_metadata_reopen(hid_t oid, H5G_loc_t *obj_loc, hid_t dxpl_id) +H5O_refresh_metadata_reopen(hid_t oid, H5G_loc_t *obj_loc, hid_t dxpl_id, hbool_t start_swmr) { void *object = NULL; /* Dataset for this operation */ H5I_type_t type; /* Type of object for the ID */ @@ -388,8 +388,9 @@ H5O_refresh_metadata_reopen(hid_t oid, H5G_loc_t *obj_loc, hid_t dxpl_id) /* Re-open the dataset */ if(NULL == (object = H5D_open(obj_loc, H5P_DATASET_ACCESS_DEFAULT, dxpl_id))) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to open dataset") - if(H5D_mult_refresh_reopen((H5D_t *)object, dxpl_id) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to finish refresh for dataset") + if(!start_swmr) /* No need to handle multiple opens when H5Fstart_swmr_write() */ + if(H5D_mult_refresh_reopen((H5D_t *)object, dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to finish refresh for dataset") break; case(H5I_UNINIT): diff --git a/src/H5Omessage.c b/src/H5Omessage.c index 7063ef6..ee21e49 100644 --- a/src/H5Omessage.c +++ b/src/H5Omessage.c @@ -477,7 +477,7 @@ H5O_msg_read(const H5O_loc_t *loc, unsigned type_id, void *mesg, HDassert(type_id < NELMTS(H5O_msg_class_g)); /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to protect object header") /* Call the "real" read routine */ @@ -803,7 +803,7 @@ H5O_msg_count(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id) HDassert(type); /* Load the object header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header") /* Count the messages of the correct type */ @@ -885,7 +885,7 @@ H5O_msg_exists(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id) HDassert(type_id < NELMTS(H5O_msg_class_g)); /* Load the object header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header") /* Call the "real" exists routine */ @@ -1225,7 +1225,7 @@ H5O_msg_iterate(const H5O_loc_t *loc, unsigned type_id, HDassert(op); /* Protect the object header to iterate over */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header") /* Call the "real" iterate routine */ diff --git a/src/H5Opkg.h b/src/H5Opkg.h index b80f736..e778f90 100644 --- a/src/H5Opkg.h +++ b/src/H5Opkg.h @@ -269,6 +269,7 @@ typedef struct H5O_chunk_t { size_t size; /*chunk size */ size_t gap; /*space at end of chunk too small for null message */ uint8_t *image; /*image of file */ + struct H5O_chunk_proxy_t *chunk_proxy; /* Pointer to a chunk's proxy when chunk protected */ } H5O_chunk_t; struct H5O_t { @@ -278,6 +279,7 @@ struct H5O_t { /* File-specific information (not stored) */ size_t sizeof_size; /* Size of file sizes */ size_t sizeof_addr; /* Size of file addresses */ + hbool_t swmr_write; /* Whether we are doing SWMR writes */ /* Debugging information (not stored) */ #ifdef H5O_ENABLE_BAD_MESG_COUNT @@ -320,6 +322,10 @@ struct H5O_t { size_t nchunks; /*number of chunks */ size_t alloc_nchunks; /*chunks allocated */ H5O_chunk_t *chunk; /*array of chunks */ + hbool_t chunks_pinned; /* Whether chunks are pinned from ohdr protect */ + + /* Object header proxy information (not stored) */ + H5AC_proxy_entry_t *proxy; /* Proxy cache entry for all ohdr entries */ }; /* Class for types of objects in file */ @@ -381,6 +387,20 @@ typedef struct H5O_chunk_proxy_t { H5O_t *oh; /* Object header for this chunk */ unsigned chunkno; /* Chunk number for this chunk */ unsigned cont_chunkno; /* Chunk number for the chunk containing the continuation message that points to this chunk */ + + /* Flush depencency parent information (not stored) + * + * The following field is used to store a pointer + * to the in-core representation of the chunk proxy's flush dependency + * parent -- if it exists. If it does not exist, this field will + * contain NULL. + * + * If the file is opened in SWMR write mode, the flush dependency + * parent of the chunk proxy will be either its object header + * (if cont_chunkno == 0) or the chunk proxy indicated by the + * cont_chunkno field (if cont_chunkno > 0). + */ + void *parent; /* Pointer to flush dependency parent */ } H5O_chunk_proxy_t; /* Callback information for loading object header chunk from disk */ @@ -621,9 +641,6 @@ H5_DLL herr_t H5O_attr_link(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, void *_mesg H5_DLL herr_t H5O_attr_count_real(H5F_t *f, hid_t dxpl_id, H5O_t *oh, hsize_t *nattrs); -/* These functions operate on object locations */ -H5_DLL H5O_loc_t *H5O_get_loc(hid_t id); - /* Testing functions */ #ifdef H5O_TESTING H5_DLL htri_t H5O_is_attr_empty_test(hid_t oid); @@ -633,6 +650,9 @@ H5_DLL herr_t H5O_attr_dense_info_test(hid_t oid, hsize_t *name_count, hsize_t * H5_DLL herr_t H5O_check_msg_marked_test(hid_t oid, hbool_t flag_val); H5_DLL herr_t H5O_expunge_chunks_test(const H5O_loc_t *oloc, hid_t dxpl_id); H5_DLL herr_t H5O_get_rc(const H5O_loc_t *oloc, hid_t dxpl_id, unsigned *rc); +H5_DLL herr_t H5O_msg_get_chunkno_test(hid_t oid, unsigned msg_type, + unsigned *chunk_num); +H5_DLL herr_t H5O_msg_move_to_new_chunk_test(hid_t oid, unsigned msg_type); #endif /* H5O_TESTING */ /* Object header debugging routines */ diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h index 2169906..e430b1f 100644 --- a/src/H5Oprivate.h +++ b/src/H5Oprivate.h @@ -829,7 +829,8 @@ H5_DLL herr_t H5O_create(H5F_t *f, hid_t dxpl_id, size_t size_hint, H5_DLL herr_t H5O_open(H5O_loc_t *loc); H5_DLL herr_t H5O_close(H5O_loc_t *loc, hbool_t *file_closed/*out*/); H5_DLL int H5O_link(const H5O_loc_t *loc, int adjust, hid_t dxpl_id); -H5_DLL H5O_t *H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, unsigned prot_flags); +H5_DLL H5O_t *H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, + unsigned prot_flags, hbool_t pin_all_chunks); H5_DLL H5O_t *H5O_pin(const H5O_loc_t *loc, hid_t dxpl_id); H5_DLL herr_t H5O_unpin(H5O_t *oh); H5_DLL herr_t H5O_dec_rc_by_loc(const H5O_loc_t *loc, hid_t dxpl_id); @@ -852,6 +853,7 @@ H5_DLL herr_t H5O_get_nlinks(const H5O_loc_t *loc, hid_t dxpl_id, hsize_t *nlink H5_DLL void *H5O_obj_create(H5F_t *f, H5O_type_t obj_type, void *crt_info, H5G_loc_t *obj_loc, hid_t dxpl_id); H5_DLL haddr_t H5O_get_oh_addr(const H5O_t *oh); H5_DLL herr_t H5O_get_rc_and_type(const H5O_loc_t *oloc, hid_t dxpl_id, unsigned *rc, H5O_type_t *otype); +H5_DLL H5AC_proxy_entry_t *H5O_get_proxy(const H5O_t *oh); /* Object header message routines */ H5_DLL herr_t H5O_msg_create(const H5O_loc_t *loc, unsigned type_id, unsigned mesg_flags, @@ -903,7 +905,7 @@ H5_DLL herr_t H5O_msg_delete(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, H5_DLL herr_t H5O_flush_common(H5O_loc_t *oloc, hid_t obj_id, hid_t dxpl_id); H5_DLL herr_t H5O_refresh_metadata(hid_t oid, H5O_loc_t oloc, hid_t dxpl_id); H5_DLL herr_t H5O_refresh_metadata_close(hid_t oid, H5O_loc_t oloc, H5G_loc_t *obj_loc, hid_t dxpl_id); -H5_DLL herr_t H5O_refresh_metadata_reopen(hid_t oid, H5G_loc_t *obj_loc, hid_t dxpl_id); +H5_DLL herr_t H5O_refresh_metadata_reopen(hid_t oid, H5G_loc_t *obj_loc, hid_t dxpl_id, hbool_t start_swmr); /* Object copying routines */ H5_DLL herr_t H5O_copy_header_map(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */, @@ -923,6 +925,7 @@ H5_DLL herr_t H5O_loc_reset(H5O_loc_t *loc); H5_DLL herr_t H5O_loc_copy(H5O_loc_t *dst, H5O_loc_t *src, H5_copy_depth_t depth); H5_DLL herr_t H5O_loc_hold_file(H5O_loc_t *loc); H5_DLL herr_t H5O_loc_free(H5O_loc_t *loc); +H5_DLL H5O_loc_t *H5O_get_loc(hid_t id); /* EFL operators */ H5_DLL hsize_t H5O_efl_total_size(H5O_efl_t *efl); diff --git a/src/H5Otest.c b/src/H5Otest.c index c3e487d..8f8980a 100644 --- a/src/H5Otest.c +++ b/src/H5Otest.c @@ -108,7 +108,7 @@ H5O_is_attr_dense_test(hid_t oid) HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found") /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, H5AC_ind_read_dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, H5AC_ind_read_dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Check for attribute info stored */ @@ -173,7 +173,7 @@ H5O_is_attr_empty_test(hid_t oid) HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found") /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, H5AC_ind_read_dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, H5AC_ind_read_dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Check for attribute info stored */ @@ -266,7 +266,7 @@ H5O_num_attrs_test(hid_t oid, hsize_t *nattrs) HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found") /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, H5AC_ind_read_dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, H5AC_ind_read_dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Check for attribute info stored */ @@ -361,7 +361,7 @@ H5O_attr_dense_info_test(hid_t oid, hsize_t *name_count, hsize_t *corder_count) H5_BEGIN_TAG(H5AC_ind_read_dxpl_id, loc->addr, FAIL); /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, H5AC_ind_read_dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, H5AC_ind_read_dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR_TAG(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Check for attribute info stored */ @@ -452,7 +452,7 @@ H5O_check_msg_marked_test(hid_t oid, hbool_t flag_val) HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found") /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, H5AC_ind_read_dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, H5AC_ind_read_dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") /* Locate "unknown" message */ @@ -511,7 +511,7 @@ H5O_expunge_chunks_test(const H5O_loc_t *loc, hid_t dxpl_id) FUNC_ENTER_NOAPI(FAIL) /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__NO_FLAGS_SET))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__NO_FLAGS_SET, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header") /* Safety check */ @@ -571,7 +571,7 @@ H5O_get_rc(const H5O_loc_t *loc, hid_t dxpl_id, unsigned *rc) HDassert(rc); /* Get the object header */ - if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header") /* Save the refcount for the object header */ @@ -585,3 +585,165 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5O_expunge_chunks_test() */ + +/*-------------------------------------------------------------------------- + NAME + H5O_msg_get_chunkno_test + PURPOSE + Retrieve the chunk number for an object header message of a given type. + USAGE + herr_t H5O_check_msg_marked_test(oid, chunk_num) + hid_t oid; IN: Object to check + unsigned msg_type; IN: Object header message type to check + unsigned *chunk_num; OUT: Object header chunk that the message is in + RETURNS + Non-negative on success, negative on failure + DESCRIPTION + Retrieves the chunk number for the first object header message of a given + type found in an object's header. + GLOBAL VARIABLES + COMMENTS, BUGS, ASSUMPTIONS + DO NOT USE THIS FUNCTION FOR ANYTHING EXCEPT TESTING + EXAMPLES + REVISION LOG +--------------------------------------------------------------------------*/ +herr_t +H5O_msg_get_chunkno_test(hid_t oid, unsigned msg_type, unsigned *chunk_num) +{ + H5O_t *oh = NULL; /* Object header */ + H5O_loc_t *loc; /* Pointer to object's location */ + H5O_mesg_t *idx_msg; /* Pointer to message */ + unsigned idx; /* Index of message */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Get object location for object */ + if(NULL == (loc = H5O_get_loc(oid))) + HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found") + + /* Get the object header */ + if(NULL == (oh = H5O_protect(loc, H5AC_ind_read_dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") + + /* Locate first message of given type */ + for(idx = 0, idx_msg = &oh->mesg[0]; idx < oh->nmesgs; idx++, idx_msg++) + if(idx_msg->type->id == msg_type) { + /* Set the chunk number for the message */ + *chunk_num = idx_msg->chunkno; + + /* Break out of loop, to indicate that the message was found */ + break; + } /* end if */ + + /* Check for not finding a message of the given type*/ + if(idx == oh->nmesgs) + HGOTO_ERROR(H5E_OHDR, H5E_NOTFOUND, FAIL, "message of type not found") + +done: + if(oh && H5O_unprotect(loc, H5AC_ind_read_dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0) + HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to release object header") + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5O_msg_get_chunkno_test() */ + + +/*-------------------------------------------------------------------------- + NAME + H5O_msg_move_to_new_chunk_test + PURPOSE + Move a message into a new chunk + USAGE + herr_t H5O_msg_move_to_new_chunk_test(oid, msg_type) + hid_t oid; IN: Object to check + unsigned msg_type; IN: Object header message type to check + RETURNS + Non-negative on success, negative on failure + DESCRIPTION + Moves the first message of the given type to a new object header chunk. + GLOBAL VARIABLES + COMMENTS, BUGS, ASSUMPTIONS + DO NOT USE THIS FUNCTION FOR ANYTHING EXCEPT TESTING + EXAMPLES + REVISION LOG +--------------------------------------------------------------------------*/ +herr_t +H5O_msg_move_to_new_chunk_test(hid_t oid, unsigned msg_type) +{ + H5O_t *oh = NULL; /* Object header */ + H5O_loc_t *loc; /* Pointer to object's location */ + H5O_mesg_t *curr_msg; /* Pointer to current message */ + unsigned idx; /* Index of message */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Get object location for object */ + if(NULL == (loc = H5O_get_loc(oid))) + HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found") + + /* Get the object header */ + if(NULL == (oh = H5O_protect(loc, H5AC_ind_read_dxpl_id, H5AC__NO_FLAGS_SET, FALSE))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header") + + /* Locate first message of given type */ + for(idx = 0, curr_msg = &oh->mesg[0]; idx < oh->nmesgs; idx++, curr_msg++) + if(curr_msg->type->id == msg_type) { + H5O_msg_alloc_info_t found_msg; /* Information about message to move */ + unsigned msg_chunkno = curr_msg->chunkno; /* Chunk that the message is in */ + uint8_t *end_chunk_data = (oh->chunk[msg_chunkno].image + oh->chunk[msg_chunkno].size) - (H5O_SIZEOF_CHKSUM_OH(oh) + oh->chunk[msg_chunkno].gap); /* End of message data in chunk */ + uint8_t *end_msg = curr_msg->raw + curr_msg->raw_size; /* End of current message */ + size_t gap_size = 0; /* Size of gap after current message */ + size_t null_size = 0; /* Size of NULL message after current message */ + unsigned null_msgno = 0; /* Index of NULL message after current message */ + size_t total_size; /* Total size of available space "around" current message */ + size_t new_idx; /* Index of new null message */ + + /* Check if the message is the last one in the chunk */ + if(end_msg == end_chunk_data) + gap_size = oh->chunk[msg_chunkno].gap; + else { + H5O_mesg_t *tmp_msg; /* Temp. pointer to message to operate on */ + unsigned v; /* Local index variable */ + + /* Check for null message after this message, in same chunk */ + for(v = 0, tmp_msg = &oh->mesg[0]; v < oh->nmesgs; v++, tmp_msg++) { + if(tmp_msg->type->id == H5O_NULL_ID && (tmp_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh)) == end_msg) { + null_msgno = v; + null_size = (size_t)H5O_SIZEOF_MSGHDR_OH(oh) + tmp_msg->raw_size; + break; + } /* end if */ + + /* XXX: Should also check for NULL message in front of current message... */ + + } /* end for */ + } /* end else */ + + /* Add up current message's total available space */ + total_size = curr_msg->raw_size + gap_size + null_size; + + /* Set up "found message" info for moving the message */ + found_msg.msgno = (int)idx; + found_msg.id = curr_msg->type->id; + found_msg.chunkno = msg_chunkno; + found_msg.gap_size = gap_size; + found_msg.null_size = null_size; + found_msg.total_size = total_size; + found_msg.null_msgno = null_msgno; + + /* Allocate and initialize new chunk in the file, moving the found message */ + /* (*new_idx returned from this routine is unused here) */ + if(H5O__alloc_chunk(loc->file, H5AC_ind_read_dxpl_id, oh, 40, oh->nmesgs, &found_msg, &new_idx) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTALLOC, FAIL, "can't allocate new object header chunk") + + /* Break out of loop, the message was found */ + break; + } /* end if */ + +done: + if(oh && H5O_unprotect(loc, H5AC_ind_read_dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0) + HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to release object header") + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5O_msg_get_chunkno_test() */ + diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c index 81cd260..91a0e03 100644 --- a/src/H5Pfapl.c +++ b/src/H5Pfapl.c @@ -186,6 +186,9 @@ /* Definition for object flush callback */ #define H5F_ACS_OBJECT_FLUSH_CB_SIZE sizeof(H5F_object_flush_t) #define H5F_ACS_OBJECT_FLUSH_CB_DEF {NULL, NULL} +/* Definition for status_flags in the superblock */ +#define H5F_ACS_CLEAR_STATUS_FLAGS_SIZE sizeof(hbool_t) +#define H5F_ACS_CLEAR_STATUS_FLAGS_DEF FALSE /* Definition for 'use metadata cache logging' flag */ #define H5F_ACS_USE_MDC_LOGGING_SIZE sizeof(hbool_t) #define H5F_ACS_USE_MDC_LOGGING_DEF FALSE @@ -334,6 +337,7 @@ static const hbool_t H5F_def_core_write_tracking_flag_g = H5F_ACS_CORE_WRITE_TRA static const size_t H5F_def_core_write_tracking_page_size_g = H5F_ACS_CORE_WRITE_TRACKING_PAGE_SIZE_DEF; /* Default core VFD write tracking page size */ static const unsigned H5F_def_metadata_read_attempts_g = H5F_ACS_METADATA_READ_ATTEMPTS_DEF; /* Default setting for the # of metadata read attempts */ static const H5F_object_flush_t H5F_def_object_flush_cb_g = H5F_ACS_OBJECT_FLUSH_CB_DEF; /* Default setting for object flush callback */ +static const hbool_t H5F_def_clear_status_flags_g = H5F_ACS_CLEAR_STATUS_FLAGS_DEF; /* Default to clear the superblock status_flags */ static const hbool_t H5F_def_use_mdc_logging_g = H5F_ACS_USE_MDC_LOGGING_DEF; /* Default metadata cache logging flag */ static const char *H5F_def_mdc_log_location_g = H5F_ACS_MDC_LOG_LOCATION_DEF; /* Default mdc log location */ static const hbool_t H5F_def_start_mdc_log_on_access_g = H5F_ACS_START_MDC_LOG_ON_ACCESS_DEF; /* Default mdc log start on access flag */ @@ -510,6 +514,11 @@ H5P__facc_reg_prop(H5P_genclass_t *pclass) NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") + /* Register the private property of whether to clear the superblock status_flags. It's used by h5clear only. */ + if(H5P_register_real(pclass, H5F_ACS_CLEAR_STATUS_FLAGS_NAME, H5F_ACS_CLEAR_STATUS_FLAGS_SIZE, &H5F_def_clear_status_flags_g, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") + /* Register the metadata cache logging flag. */ if(H5P_register_real(pclass, H5F_ACS_USE_MDC_LOGGING_NAME, H5F_ACS_USE_MDC_LOGGING_SIZE, &H5F_def_use_mdc_logging_g, NULL, NULL, NULL, H5F_ACS_USE_MDC_LOGGING_ENC, H5F_ACS_USE_MDC_LOGGING_DEC, NULL, NULL, NULL, NULL) < 0) diff --git a/src/H5Plapl.c b/src/H5Plapl.c index 266dc63..a0ec7f1 100644 --- a/src/H5Plapl.c +++ b/src/H5Plapl.c @@ -1168,7 +1168,9 @@ H5Pset_elink_acc_flags(hid_t lapl_id, unsigned flags) H5TRACE2("e", "iIu", lapl_id, flags); /* Check that flags are valid */ - if((flags != H5F_ACC_RDWR) && (flags != H5F_ACC_RDONLY) && (flags != H5F_ACC_DEFAULT)) + if(( flags != H5F_ACC_RDWR) && (flags != (H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE)) + && (flags != H5F_ACC_RDONLY) && (flags != (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ)) + && (flags != H5F_ACC_DEFAULT)) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file open flags") /* Get the plist structure */ @@ -2436,7 +2436,7 @@ H5SM_read_mesg(H5F_t *f, const H5SM_sohm_t *mesg, H5HF_t *fheap, HGOTO_ERROR(H5E_SOHM, H5E_CANTLOAD, FAIL, "unable to open object header") /* Load the object header from the cache */ - if(NULL == (oh = H5O_protect(&oloc, dxpl_id, H5AC__READ_ONLY_FLAG))) + if(NULL == (oh = H5O_protect(&oloc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load object header") } /* end if */ else diff --git a/src/H5err.txt b/src/H5err.txt index a156316..9531df9 100644 --- a/src/H5err.txt +++ b/src/H5err.txt @@ -169,7 +169,9 @@ MINOR, CACHE, H5E_CANTUNPROTECT, Unable to unprotect metadata MINOR, CACHE, H5E_CANTPIN, Unable to pin cache entry MINOR, CACHE, H5E_CANTUNPIN, Unable to un-pin cache entry MINOR, CACHE, H5E_CANTMARKDIRTY, Unable to mark a pinned entry as dirty +MINOR, CACHE, H5E_CANTMARKCLEAN, Unable to mark a pinned entry as clean MINOR, CACHE, H5E_CANTDIRTY, Unable to mark metadata as dirty +MINOR, CACHE, H5E_CANTCLEAN, Unable to mark metadata as clean MINOR, CACHE, H5E_CANTEXPUNGE, Unable to expunge a metadata cache entry MINOR, CACHE, H5E_CANTRESIZE, Unable to resize a metadata cache entry MINOR, CACHE, H5E_CANTDEPEND, Unable to create a flush dependency diff --git a/src/Makefile.am b/src/Makefile.am index ff845d1..939e151 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -42,9 +42,10 @@ DISTCLEANFILES=H5pubconf.h # library sources libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \ H5A.c H5Abtree2.c H5Adense.c H5Adeprec.c H5Aint.c H5Atest.c \ - H5AC.c H5ACdbg.c H5AClog.c \ + H5AC.c H5ACdbg.c H5AClog.c H5ACproxy_entry.c \ H5B.c H5Bcache.c H5Bdbg.c \ - H5B2.c H5B2cache.c H5B2dbg.c H5B2hdr.c H5B2int.c H5B2stat.c H5B2test.c \ + H5B2.c H5B2cache.c H5B2dbg.c H5B2hdr.c H5B2int.c H5B2internal.c \ + H5B2leaf.c H5B2stat.c H5B2test.c \ H5C.c H5Cdbg.c H5Cepoch.c H5Clog.c H5Cquery.c H5Ctag.c H5Ctest.c \ H5CS.c \ H5D.c H5Dbtree.c H5Dbtree2.c H5Dchunk.c H5Dcompact.c H5Dcontig.c H5Ddbg.c \ @@ -60,11 +61,12 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \ H5Fmount.c H5Fquery.c \ H5Fsfile.c H5Fsuper.c H5Fsuper_cache.c H5Ftest.c \ H5FA.c H5FAcache.c H5FAdbg.c H5FAdblock.c H5FAdblkpage.c H5FAhdr.c \ - H5FAstat.c H5FAtest.c \ + H5FAint.c H5FAstat.c H5FAtest.c \ H5FD.c H5FDcore.c \ H5FDfamily.c H5FDint.c H5FDlog.c \ - H5FDmulti.c H5FDsec2.c H5FDspace.c H5FDstdio.c \ - H5FL.c H5FO.c H5FS.c H5FScache.c H5FSdbg.c H5FSsection.c H5FSstat.c H5FStest.c \ + H5FDmulti.c H5FDsec2.c H5FDspace.c H5FDstdio.c H5FDtest.c \ + H5FL.c H5FO.c H5FS.c H5FScache.c H5FSdbg.c H5FSint.c H5FSsection.c \ + H5FSstat.c H5FStest.c \ H5G.c H5Gbtree2.c H5Gcache.c \ H5Gcompact.c H5Gdense.c H5Gdeprec.c H5Gent.c \ H5Gint.c H5Glink.c \ diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 22774cd..5a55616 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -251,6 +251,7 @@ set (H5_TESTS unregister cache_logging cork + swmr ) foreach (test ${H5_TESTS}) diff --git a/test/CMakeTests.cmake b/test/CMakeTests.cmake index 5cca2d2..cd2e989 100644 --- a/test/CMakeTests.cmake +++ b/test/CMakeTests.cmake @@ -562,6 +562,7 @@ set (H5TEST_TESTS unregister cache_logging cork + swmr ) foreach (test ${H5TEST_TESTS}) diff --git a/test/Makefile.am b/test/Makefile.am index b6be746..380043a 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -48,7 +48,7 @@ TEST_PROG= testhdf5 cache cache_api cache_tagging lheap ohdr stab gheap \ big mtime fillval mount flush1 flush2 app_ref enum \ set_extent ttsafe enc_dec_plist enc_dec_plist_cross_platform\ getname vfd ntypes dangle dtransform reserved cross_read \ - freespace mf vds file_image unregister cache_logging cork + freespace mf vds file_image unregister cache_logging cork swmr # List programs to be built when testing here. # error_test and err_compat are built at the same time as the other tests, but executed by testerror.sh. @@ -136,7 +136,8 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse max_compact_dataset.h5 simple.h5 set_local.h5 random_chunks.h5 \ huge_chunks.h5 chunk_cache.h5 big_chunk.h5 chunk_fast.h5 chunk_expand.h5 \ chunk_fixed.h5 copy_dcpl_newfile.h5 partial_chunks.h5 layout_extend.h5 \ - zero_chunk.h5 chunk_single.h5 \ + zero_chunk.h5 chunk_single.h5 swmr_non_latest.h5 \ + earray_hdr_fd.h5 farray_hdr_fd.h5 bt2_hdr_fd.h5 \ storage_size.h5 dls_01_strings.h5 \ extend.h5 istore.h5 extlinks*.h5 frspace.h5 links*.h5 \ sys_file1 tfile[1-7].h5 th5s[1-4].h5 lheap.h5 fheap.h5 ohdr.h5 \ @@ -161,8 +162,8 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse split_get_file_image_test-m.h5 split_get_file_image_test-r.h5 \ file_image_core_test.h5.copy unregister_filter_1.h5 unregister_filter_2.h5 \ vds_virt.h5 vds_dapl.h5 vds_src_[0-1].h5 \ - cache_logging.h5 cache_logging.out - + cache_logging.h5 cache_logging.out \ + swmr[0-2].h5 # Sources for testhdf5 executable testhdf5_SOURCES=testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \ tgenprop.c th5o.c th5s.c tcoords.c theap.c tid.c titerate.c tmeta.c tmisc.c \ diff --git a/test/dsets.c b/test/dsets.c index 7d9f331..31ef88a 100644 --- a/test/dsets.c +++ b/test/dsets.c @@ -23,11 +23,16 @@ #define H5D_FRIEND /*suppress error about including H5Dpkg */ #define H5D_TESTING +#define H5FD_FRIEND /*suppress error about including H5FDpkg */ +#define H5FD_TESTING + #define H5Z_FRIEND /*suppress error about including H5Zpkg */ #include "h5test.h" #include "H5srcdir.h" #include "H5Dpkg.h" +#include "H5FDpkg.h" +#include "H5VMprivate.h" #include "H5Zpkg.h" #ifdef H5_HAVE_SZLIB_H # include "szlib.h" @@ -52,8 +57,12 @@ const char *FILENAME[] = { "layout_extend", /* 15 */ "zero_chunk", /* 16 */ "chunk_single", /* 17 */ - "storage_size", /* 18 */ - "dls_01_strings", /* 19 */ + "swmr_non_latest", /* 18 */ + "earray_hdr_fd", /* 19 */ + "farray_hdr_fd", /* 20 */ + "bt2_hdr_fd", /* 21 */ + "storage_size", /* 22 */ + "dls_01_strings", /* 23 */ NULL }; #define FILENAME_BUF_SIZE 1024 @@ -127,6 +136,11 @@ const char *FILENAME[] = { #define POINTS 72 #define POINTS_BIG 2500 +/* Dataset names used for testing header flush dependencies */ +#define DSET_EARRAY_HDR_FD "earray_hdr_fd" +#define DSET_FARRAY_HDR_FD "farray_hdr_fd" +#define DSET_BT2_HDR_FD "bt2_hdr_fd" + /* Dataset names for testing Implicit Indexing */ #define DSET_SINGLE_MAX "DSET_SINGLE_MAX" #define DSET_SINGLE_NOMAX "DSET_SINGLE_NOMAX" @@ -8219,7 +8233,7 @@ error: *------------------------------------------------------------------------- */ static herr_t -test_chunk_fast(hid_t fapl) +test_chunk_fast(const char *env_h5_driver, hid_t fapl) { char filename[FILENAME_BUF_SIZE]; hid_t fid = -1; /* File ID */ @@ -8232,6 +8246,7 @@ test_chunk_fast(hid_t fapl) hsize_t hs_size[EARRAY_MAX_RANK]; /* Hyperslab size */ hsize_t chunk_dim[EARRAY_MAX_RANK]; /* Chunk dimensions */ H5F_libver_t low; /* File format low bound */ + unsigned swmr; /* Whether file should be written with SWMR access enabled */ TESTING("datasets w/extensible array as chunk index"); @@ -8266,9 +8281,19 @@ test_chunk_fast(hid_t fapl) fill = 1; H5VM_array_fill(hs_size, &fill, sizeof(fill), EARRAY_MAX_RANK); - { + /* Loop over using SWMR access to write */ + for(swmr = 0; swmr <= 1; swmr++) { int compress; /* Whether chunks should be compressed */ + /* SWMR is now supported with/without latest format: */ + /* (1) write+latest-format (2) SWMR-write+non-latest-format */ + + /* Skip this iteration if SWMR I/O is not supported for the VFD specified + * by the environment variable. + */ + if(swmr && !H5FD_supports_swmr_test(env_h5_driver)) + continue; + #ifdef H5_HAVE_FILTER_DEFLATE /* Loop over compressing chunks */ for(compress = 0; compress <= 1; compress++) @@ -8318,7 +8343,7 @@ test_chunk_fast(hid_t fapl) hsize_t u; /* Local index variable */ /* Create file */ - if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, my_fapl)) < 0) FAIL_STACK_ERROR + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC | (swmr ? H5F_ACC_SWMR_WRITE : 0), H5P_DEFAULT, my_fapl)) < 0) FAIL_STACK_ERROR /* Create n-D dataspace */ fill = EARRAY_DSET_DIM; @@ -8345,7 +8370,7 @@ test_chunk_fast(hid_t fapl) if(H5D__layout_idx_type_test(dsid, &idx_type) < 0) FAIL_STACK_ERROR /* Chunk index type expected depends on whether we are using the latest version of the format */ - if(low == H5F_LIBVER_LATEST) { + if(low == H5F_LIBVER_LATEST || swmr) { /* Verify index type */ if(idx_type != H5D_CHUNK_IDX_EARRAY) FAIL_PUTS_ERROR("should be using extensible array as index"); } /* end if */ @@ -8445,7 +8470,7 @@ test_chunk_fast(hid_t fapl) /* Re-open file & dataset */ - if((fid = H5Fopen(filename, H5F_ACC_RDONLY, my_fapl)) < 0) FAIL_STACK_ERROR + if((fid = H5Fopen(filename, H5F_ACC_RDONLY | (swmr ? H5F_ACC_SWMR_READ : 0), my_fapl)) < 0) FAIL_STACK_ERROR /* Open dataset */ if((dsid = H5Dopen2(fid, "dset", H5P_DEFAULT)) < 0) FAIL_STACK_ERROR @@ -8454,7 +8479,7 @@ test_chunk_fast(hid_t fapl) if(H5D__layout_idx_type_test(dsid, &idx_type) < 0) FAIL_STACK_ERROR /* Chunk index tyepe expected depends on whether we are using the latest version of the format */ - if(low == H5F_LIBVER_LATEST) { + if(low == H5F_LIBVER_LATEST || swmr) { /* Verify index type */ if(idx_type != H5D_CHUNK_IDX_EARRAY) FAIL_PUTS_ERROR("should be using extensible array as index"); } /* end if */ @@ -10316,6 +10341,615 @@ error: /*------------------------------------------------------------------------- + * Function: test_swmr_non_latest + * + * Purpose: Checks that a file created with either: + * (a) SWMR-write + non-latest-format + * (b) write + latest format + * will generate datset with latest chunk indexing type. + * + * Return: Success: 0 + * Failure: -1 + * + *------------------------------------------------------------------------- + */ +static herr_t +test_swmr_non_latest(const char *env_h5_driver, hid_t fapl) +{ + char filename[FILENAME_BUF_SIZE]; + hid_t fid = -1; /* File ID */ + hid_t gid = -1; /* Group ID */ + hid_t dcpl = -1; /* Dataset creation property list ID */ + hid_t sid = -1; /* Dataspace ID */ + hid_t did = -1; /* Dataset ID */ + hsize_t dim[1], dims2[2]; /* Size of dataset */ + hsize_t max_dim[1], max_dims2[2]; /* Maximum size of dataset */ + hsize_t chunk_dim[1], chunk_dims2[2]; /* Chunk dimensions */ + H5D_chunk_index_t idx_type; /* Chunk index type */ + int data; /* Data to be written to the dataset */ + H5F_libver_t low; /* File format low bound */ + + TESTING("File created with write+latest-format/SWMR-write+non-latest-format: dataset with latest chunk index"); + + /* Skip this test if SWMR I/O is not supported for the VFD specified + * by the environment variable. + */ + if(!H5FD_supports_swmr_test(env_h5_driver)) { + SKIPPED(); + HDputs(" Test skipped due to VFD not supporting SWMR I/O."); + return 0; + } /* end if */ + + /* Check if we are using the latest version of the format */ + if(H5Pget_libver_bounds(fapl, &low, NULL) < 0) + FAIL_STACK_ERROR + + h5_fixname(FILENAME[18], fapl, filename, sizeof filename); + + if(low == H5F_LIBVER_LATEST) { + /* Create file with write+latest-format */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + } else { + /* Create file with SWMR-write+non-latest-format */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + } /* end else */ + + /* Create a chunked dataset: this will use extensible array chunk indexing */ + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + + chunk_dim[0] = 6; + if(H5Pset_chunk(dcpl, 1, chunk_dim) < 0) + FAIL_STACK_ERROR + + dim[0] = 1; + max_dim[0] = H5S_UNLIMITED; + if((sid = H5Screate_simple(1, dim, max_dim)) < 0) + FAIL_STACK_ERROR + + if((did = H5Dcreate2(fid, DSET_CHUNKED_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Write to the dataset */ + data = 100; + if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data) < 0) + FAIL_STACK_ERROR + + /* Verify the dataset's indexing type */ + if(H5D__layout_idx_type_test(did, &idx_type) < 0) + FAIL_STACK_ERROR + if(idx_type != H5D_CHUNK_IDX_EARRAY) + FAIL_PUTS_ERROR("created dataset not indexed by extensible array") + + /* Closing */ + if(H5Dclose(did) < 0) FAIL_STACK_ERROR + if(H5Sclose(sid) < 0) FAIL_STACK_ERROR + if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) FAIL_STACK_ERROR + + /* Open the file again */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + + /* Open the dataset in the file */ + if((did = H5Dopen2(fid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Verify the dataset's indexing type */ + if(H5D__layout_idx_type_test(did, &idx_type) < 0) + FAIL_STACK_ERROR + if(idx_type != H5D_CHUNK_IDX_EARRAY) + FAIL_PUTS_ERROR("created dataset not indexed by extensible array") + + /* Read from the dataset and verify data read is correct */ + if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data) < 0) + FAIL_STACK_ERROR + if(data != 100) + TEST_ERROR + + /* Close the dataset */ + if(H5Dclose(did) < 0) FAIL_STACK_ERROR + + /* Create a group in the file */ + if((gid = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Create a chunked dataset in the group: this will use v2 B-tree chunk indexing */ + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + + chunk_dims2[0] = chunk_dims2[1] = 10; + if(H5Pset_chunk(dcpl, 2, chunk_dims2) < 0) + FAIL_STACK_ERROR + + dims2[0] = dims2[1] = 1; + max_dims2[0] = max_dims2[1] = H5S_UNLIMITED; + if((sid = H5Screate_simple(2, dims2, max_dims2)) < 0) + FAIL_STACK_ERROR + + if((did = H5Dcreate2(gid, DSET_CHUNKED_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Verify the dataset's indexing type */ + if(H5D__layout_idx_type_test(did, &idx_type) < 0) + FAIL_STACK_ERROR + if(idx_type != H5D_CHUNK_IDX_BT2) + FAIL_PUTS_ERROR("created dataset not indexed by v2 B-tree") + + /* Closing */ + if(H5Dclose(did) < 0) FAIL_STACK_ERROR + if(H5Sclose(sid) < 0) FAIL_STACK_ERROR + if(H5Gclose(gid) < 0) FAIL_STACK_ERROR + if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) FAIL_STACK_ERROR + + /* Open the file again */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Open the group */ + if((gid = H5Gopen2(fid, "group", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Open the dataset in the group */ + if((did = H5Dopen2(gid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Verify the dataset's indexing type */ + if(H5D__layout_idx_type_test(did, &idx_type) < 0) + FAIL_STACK_ERROR + if(idx_type != H5D_CHUNK_IDX_BT2) + FAIL_PUTS_ERROR("created dataset not indexed by v2 B-tree") + + /* Closing */ + if(H5Dclose(did) < 0) FAIL_STACK_ERROR + if(H5Gclose(gid) < 0) FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) FAIL_STACK_ERROR + + + /* Reopen the file with SWMR-write */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0) + FAIL_STACK_ERROR + + /* Open the dataset in the file */ + if((did = H5Dopen2(fid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Verify the dataset's indexing type */ + if(H5D__layout_idx_type_test(did, &idx_type) < 0) + FAIL_STACK_ERROR + if(idx_type != H5D_CHUNK_IDX_EARRAY) + FAIL_PUTS_ERROR("created dataset not indexed by extensible array") + + /* Close the dataset */ + if(H5Dclose(did) < 0) FAIL_STACK_ERROR + + /* Open the group */ + if((gid = H5Gopen2(fid, "group", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Open the dataset in the group */ + if((did = H5Dopen2(gid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Verify the dataset's indexing type */ + if(H5D__layout_idx_type_test(did, &idx_type) < 0) + FAIL_STACK_ERROR + if(idx_type != H5D_CHUNK_IDX_BT2) + FAIL_PUTS_ERROR("created dataset not indexed by v2 B-tree") + + /* Write to the dataset in the group */ + data = 99; + if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data) < 0) + FAIL_STACK_ERROR + + /* Closing */ + if(H5Dclose(did) < 0) FAIL_STACK_ERROR + if(H5Gclose(gid) < 0) FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) FAIL_STACK_ERROR + + /* Open the file again with SWMR read access */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + if((gid = H5Gopen2(fid, "group", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Open the dataset */ + if((did = H5Dopen2(gid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Read from the dataset and verify data read is correct */ + data = 0; + if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data) < 0) + FAIL_STACK_ERROR + if(data != 99) + TEST_ERROR + + /* Closing */ + if(H5Dclose(did) < 0) FAIL_STACK_ERROR + if(H5Gclose(gid) < 0) FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) FAIL_STACK_ERROR + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(dcpl); + H5Dclose(did); + H5Sclose(sid); + H5Gclose(gid); + H5Fclose(fid); + } H5E_END_TRY; + return -1; +} /* test_swmr_non_latest() */ + + +/*------------------------------------------------------------------------- + * Function: test_earray_hdr_fd + * + * Purpose: Tests that extensible array header flush dependencies + * are created and torn down correctly when used as a + * chunk index. + * + * Return: Success: 0 + * Failure: -1 + * + *------------------------------------------------------------------------- + */ +static herr_t +test_earray_hdr_fd(const char *env_h5_driver, hid_t fapl) +{ + char filename[FILENAME_BUF_SIZE]; + hid_t fid = -1; + hid_t sid = -1; + hid_t did = -1; + hid_t tid = -1; + hid_t dcpl = -1; + hid_t msid = -1; + H5D_chunk_index_t idx_type; + const hsize_t shape[1] = { 8 }; + const hsize_t maxshape[1] = { H5S_UNLIMITED }; + const hsize_t chunk[1] = { 8 }; + const int buffer[8] = {0, 1, 2, 3, 4, 5, 6, 7}; + H5O_info_t info; + + TESTING("Extensible array chunk index header flush dependencies handled correctly"); + + /* Skip this test if SWMR I/O is not supported for the VFD specified + * by the environment variable. + */ + if(!H5FD_supports_swmr_test(env_h5_driver)) { + SKIPPED(); + HDputs(" Test skipped due to VFD not supporting SWMR I/O."); + return 0; + } /* end if */ + + if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) + FAIL_STACK_ERROR; + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR; + + h5_fixname(FILENAME[19], fapl, filename, sizeof(filename)); + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Create a dataset with one unlimited dimension */ + if((sid = H5Screate_simple(1, shape, maxshape)) < 0) + FAIL_STACK_ERROR; + if((tid = H5Tcopy(H5T_NATIVE_INT32)) < 0) + FAIL_STACK_ERROR; + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + if(H5Pset_chunk(dcpl, 1, chunk) < 0) + FAIL_STACK_ERROR; + if((did = H5Dcreate2(fid, DSET_EARRAY_HDR_FD, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT )) < 0) + FAIL_STACK_ERROR; + + /* Verify the chunk index type */ + if(H5D__layout_idx_type_test(did, &idx_type) < 0) + FAIL_STACK_ERROR; + if(idx_type != H5D_CHUNK_IDX_EARRAY) + FAIL_PUTS_ERROR("should be using extensible array as index"); + + if(H5Dclose(did) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if(H5Tclose(tid) < 0) + FAIL_STACK_ERROR; + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + + if(H5Fstart_swmr_write(fid) < 0) + FAIL_STACK_ERROR; + + /* Write data to the dataset */ + if((did = H5Dopen2(fid, DSET_EARRAY_HDR_FD, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if((tid = H5Dget_type(did)) < 0) + FAIL_STACK_ERROR; + if(H5Dwrite(did, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did) < 0) + FAIL_STACK_ERROR; + if(H5Tclose(tid) < 0) + FAIL_STACK_ERROR; + + /* The second call triggered a bug in the library (JIRA issue: SWMR-95) */ + if(H5Oget_info_by_name(fid, DSET_EARRAY_HDR_FD, &info, H5P_DEFAULT) < 0) + FAIL_STACK_ERROR; + if(H5Oget_info_by_name(fid, DSET_EARRAY_HDR_FD, &info, H5P_DEFAULT) < 0) + FAIL_STACK_ERROR; + + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(fapl); + H5Fclose(fid); + H5Dclose(did); + H5Pclose(dcpl); + H5Tclose(tid); + H5Sclose(sid); + H5Sclose(msid); + } H5E_END_TRY; + return -1; +} /* test_earray_hdr_fd() */ + + +/*------------------------------------------------------------------------- + * Function: test_farray_hdr_fd + * + * Purpose: Tests that fixed array header flush dependencies + * are created and torn down correctly when used as a + * chunk index. + * + * Return: Success: 0 + * Failure: -1 + * + *------------------------------------------------------------------------- + */ +static herr_t +test_farray_hdr_fd(const char *env_h5_driver, hid_t fapl) +{ + char filename[FILENAME_BUF_SIZE]; + hid_t fid = -1; + hid_t sid = -1; + hid_t did = -1; + hid_t tid = -1; + hid_t dcpl = -1; + hid_t msid = -1; + H5D_chunk_index_t idx_type; + const hsize_t shape[1] = { 8 }; + const hsize_t maxshape[1] = { 64 }; + const hsize_t chunk[1] = { 8 }; + const int buffer[8] = {0, 1, 2, 3, 4, 5, 6, 7}; + H5O_info_t info; + + TESTING("Fixed array chunk index header flush dependencies handled correctly"); + + /* Skip this test if SWMR I/O is not supported for the VFD specified + * by the environment variable. + */ + if(!H5FD_supports_swmr_test(env_h5_driver)) { + SKIPPED(); + HDputs(" Test skipped due to VFD not supporting SWMR I/O."); + return 0; + } /* end if */ + + if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) + FAIL_STACK_ERROR; + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR; + + h5_fixname(FILENAME[20], fapl, filename, sizeof(filename)); + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Create a chunked dataset with fixed dimensions */ + if((sid = H5Screate_simple(1, shape, maxshape)) < 0) + FAIL_STACK_ERROR; + if((tid = H5Tcopy(H5T_NATIVE_INT32)) < 0) + FAIL_STACK_ERROR; + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + if(H5Pset_chunk(dcpl, 1, chunk) < 0) + FAIL_STACK_ERROR; + if((did = H5Dcreate2(fid, DSET_FARRAY_HDR_FD, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT )) < 0) + FAIL_STACK_ERROR; + + /* Verify the chunk index type */ + if(H5D__layout_idx_type_test(did, &idx_type) < 0) + FAIL_STACK_ERROR; + if(idx_type != H5D_CHUNK_IDX_FARRAY) + FAIL_PUTS_ERROR("should be using fixed array as index"); + + if(H5Dclose(did) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if(H5Tclose(tid) < 0) + FAIL_STACK_ERROR; + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + + if(H5Fstart_swmr_write(fid) < 0) + FAIL_STACK_ERROR; + + /* Write data to the dataset */ + if((did = H5Dopen2(fid, DSET_FARRAY_HDR_FD, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if((tid = H5Dget_type(did)) < 0) + FAIL_STACK_ERROR; + if(H5Dwrite(did, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did) < 0) + FAIL_STACK_ERROR; + if(H5Tclose(tid) < 0) + FAIL_STACK_ERROR; + + /* The second call triggered a bug in the library (JIRA issue: SWMR-95) */ + if(H5Oget_info_by_name(fid, DSET_FARRAY_HDR_FD, &info, H5P_DEFAULT) < 0) + FAIL_STACK_ERROR; + if(H5Oget_info_by_name(fid, DSET_FARRAY_HDR_FD, &info, H5P_DEFAULT) < 0) + FAIL_STACK_ERROR; + + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(fapl); + H5Fclose(fid); + H5Dclose(did); + H5Pclose(dcpl); + H5Tclose(tid); + H5Sclose(sid); + H5Sclose(msid); + } H5E_END_TRY; + return -1; +} /* test_farray_hdr_fd() */ + + +/*------------------------------------------------------------------------- + * Function: test_bt2_hdr_fd + * + * Purpose: Tests that version 2 B-tree header flush dependencies + * are created and torn down correctly when used as a + * chunk index. + * + * Return: Success: 0 + * Failure: -1 + * + *------------------------------------------------------------------------- + */ +static herr_t +test_bt2_hdr_fd(const char *env_h5_driver, hid_t fapl) +{ + char filename[FILENAME_BUF_SIZE]; + hid_t fid = -1; + hid_t sid = -1; + hid_t did = -1; + hid_t tid = -1; + hid_t dcpl = -1; + hid_t msid = -1; + H5D_chunk_index_t idx_type; + const hsize_t shape[2] = { 8, 8 }; + const hsize_t maxshape[2] = { H5S_UNLIMITED, H5S_UNLIMITED }; + const hsize_t chunk[2] = { 8, 8 }; + const int buffer[8] = {0, 1, 2, 3, 4, 5, 6, 7}; + H5O_info_t info; + + TESTING("Version 2 B-tree chunk index header flush dependencies handled correctly"); + + /* Skip this test if SWMR I/O is not supported for the VFD specified + * by the environment variable. + */ + if(!H5FD_supports_swmr_test(env_h5_driver)) { + SKIPPED(); + HDputs(" Test skipped due to VFD not supporting SWMR I/O."); + return 0; + } /* end if */ + + if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) + FAIL_STACK_ERROR; + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR; + + h5_fixname(FILENAME[21], fapl, filename, sizeof(filename)); + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Create a chunked dataset with fixed dimensions */ + if((sid = H5Screate_simple(2, shape, maxshape)) < 0) + FAIL_STACK_ERROR; + if((tid = H5Tcopy(H5T_NATIVE_INT32)) < 0) + FAIL_STACK_ERROR; + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR; + if(H5Pset_chunk(dcpl, 2, chunk) < 0) + FAIL_STACK_ERROR; + if((did = H5Dcreate2(fid, DSET_BT2_HDR_FD, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT )) < 0) + FAIL_STACK_ERROR; + + /* Verify the chunk index type */ + if(H5D__layout_idx_type_test(did, &idx_type) < 0) + FAIL_STACK_ERROR; + if(idx_type != H5D_CHUNK_IDX_BT2) + FAIL_PUTS_ERROR("should be using fixed array as index"); + + if(H5Dclose(did) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if(H5Tclose(tid) < 0) + FAIL_STACK_ERROR; + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + + if(H5Fstart_swmr_write(fid) < 0) + FAIL_STACK_ERROR; + + /* Write data to the dataset */ + if((did = H5Dopen2(fid, DSET_BT2_HDR_FD, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if((tid = H5Dget_type(did)) < 0) + FAIL_STACK_ERROR; + if(H5Dwrite(did, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did) < 0) + FAIL_STACK_ERROR; + if(H5Tclose(tid) < 0) + FAIL_STACK_ERROR; + + /* The second call triggered a bug in the library (JIRA issue: SWMR-95) */ + if(H5Oget_info_by_name(fid, DSET_BT2_HDR_FD, &info, H5P_DEFAULT) < 0) + FAIL_STACK_ERROR; + if(H5Oget_info_by_name(fid, DSET_BT2_HDR_FD, &info, H5P_DEFAULT) < 0) + FAIL_STACK_ERROR; + + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid) < 0) + TEST_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(fapl); + H5Fclose(fid); + H5Dclose(did); + H5Pclose(dcpl); + H5Tclose(tid); + H5Sclose(sid); + H5Sclose(msid); + } H5E_END_TRY; + return -1; +} /* test_bt2_hdr_fd() */ + + +/*------------------------------------------------------------------------- * Function: test_storage_size * * Purpose: Tests results from querying the storage size of a dataset, @@ -10345,7 +10979,7 @@ test_storage_size(hid_t fapl) TESTING("querying storage size"); - h5_fixname(FILENAME[18], fapl, filename, sizeof filename); + h5_fixname(FILENAME[22], fapl, filename, sizeof filename); /* Create file */ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR @@ -11943,7 +12577,7 @@ dls_01_main( void ) { TESTING("Testing DLS bugfix 1"); - if ( NULL == h5_fixname(FILENAME[19], H5P_DEFAULT, filename, + if ( NULL == h5_fixname(FILENAME[23], H5P_DEFAULT, filename, sizeof(filename)) ) TEST_ERROR @@ -12127,7 +12761,7 @@ main(void) nerrors += (test_huge_chunks(my_fapl) < 0 ? 1 : 0); nerrors += (test_chunk_cache(my_fapl) < 0 ? 1 : 0); nerrors += (test_big_chunks_bypass_cache(my_fapl) < 0 ? 1 : 0); - nerrors += (test_chunk_fast(my_fapl) < 0 ? 1 : 0); + nerrors += (test_chunk_fast(envval, my_fapl) < 0 ? 1 : 0); nerrors += (test_reopen_chunk_fast(my_fapl) < 0 ? 1 : 0); nerrors += (test_chunk_fast_bug1(my_fapl) < 0 ? 1 : 0); nerrors += (test_chunk_expand(my_fapl) < 0 ? 1 : 0); @@ -12138,6 +12772,10 @@ main(void) nerrors += (test_single_chunk(my_fapl) < 0 ? 1 : 0); nerrors += (test_large_chunk_shrink(my_fapl) < 0 ? 1 : 0); nerrors += (test_zero_dim_dset(my_fapl) < 0 ? 1 : 0); + nerrors += (test_swmr_non_latest(envval, my_fapl) < 0 ? 1 : 0); + nerrors += (test_earray_hdr_fd(envval, my_fapl) < 0 ? 1 : 0); + nerrors += (test_farray_hdr_fd(envval, my_fapl) < 0 ? 1 : 0); + nerrors += (test_bt2_hdr_fd(envval, my_fapl) < 0 ? 1 : 0); nerrors += (test_storage_size(my_fapl) < 0 ? 1 : 0); if(H5Fclose(file) < 0) diff --git a/test/swmr.c b/test/swmr.c new file mode 100644 index 0000000..780eb0f --- /dev/null +++ b/test/swmr.c @@ -0,0 +1,7131 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*********************************************************** +* +* Test program: swmr +* +* To test new public routines from SWMR project: +* H5Pget/set_metadata_read_attempts() +* H5Fget_metadata_read_retry_info() +* H5Fstart_swmr_write() +* H5Pget/set_object_flush_cb() +* H5Pget/set_append_flush() +* +*************************************************************/ + +#include "hdf5.h" +#include "h5test.h" +#include "H5Iprivate.h" + +/* + * This file needs to access private information from the H5F package. + * This file also needs to access the file, file driver, dataset, + * and object header testing code. + */ +#define H5F_FRIEND /*suppress error about including H5Fpkg */ +#define H5F_TESTING +#include "H5Fpkg.h" /* File access */ + +#define H5D_FRIEND /*suppress error about including H5Dpkg */ +#define H5D_TESTING +#include "H5Dpkg.h" /* Datasets */ + +#define H5FD_FRIEND /*suppress error about including H5FDpkg */ +#define H5FD_TESTING +#include "H5FDpkg.h" /* File drivers */ + +#define H5O_FRIEND /*suppress error about including H5Opkg */ +#define H5O_TESTING +#include "H5Opkg.h" /* Object headers */ + + +const char *FILENAME[] = { + "swmr0", /* 0 */ + "swmr1", /* 1 */ + "swmr2", /* 2 */ + NULL +}; + + +#define NAME_BUF_SIZE 1024 /* Length of file name */ + +/* Tests for H5Pget/set_metadata_read_attempts(), H5Fget_metadata_read_retry_info */ +static int test_metadata_read_attempts(hid_t in_fapl); +static int test_metadata_read_retry_info(hid_t in_fapl); + +/* Tests for H5Fstart_swmr_write() */ +static int test_start_swmr_write(hid_t in_fapl, hbool_t new_format); +static int test_err_start_swmr_write(hid_t in_fapl, hbool_t new_format); +static int test_start_swmr_write_concur(hid_t in_fapl, hbool_t new_format); +static int test_start_swmr_write_stress_ohdr(hid_t in_fapl); + +/* Tests for H5Pget/set_object_flush_cb() */ +static herr_t flush_cb(hid_t obj_id, void *_udata); +static int test_object_flush_cb(hid_t in_fapl); + +/* Tests for H5Pget/set_append_flush() */ +static herr_t append_cb(hid_t dset_id, hsize_t *cur_dims, void *_udata); +static herr_t append_cb2(hid_t dset_id, hsize_t *cur_dims, void *_udata); +static int test_append_flush_generic(void); +static int test_append_flush_dataset_chunked(hid_t in_fapl); +static int test_append_flush_dataset_fixed(hid_t in_fapl); +static int test_append_flush_dataset_multiple(hid_t in_fapl); + +/* Tests for file open flags/SWMR flags: single process access */ +static int test_file_lock_same(hid_t fapl); +static int test_file_lock_swmr_same(hid_t fapl); + +/* Tests for file open flags/SWMR flags: concurrent process access */ +static int test_file_lock_concur(hid_t fapl); +static int test_file_lock_swmr_concur(hid_t fapl); + +/* Test file lock environment variable */ +static int test_file_lock_env_var(hid_t fapl); + +/* Tests for SWMR VFD flag */ +static int test_swmr_vfd_flag(void); + +/* Tests for H5Drefresh: concurrent access */ +static int test_refresh_concur(hid_t in_fapl, hbool_t new_format); + +/* Tests for multiple opens of files and datasets with H5Drefresh() & H5Fstart_swmr_write(): same process */ +static int test_multiple_same(hid_t in_fapl, hbool_t new_format); + +/* + * Tests for H5Pget/set_metadata_read_attemps(), H5Fget_metadata_read_retry_info() + */ + +/* + * test_metadata_read_attempts(): + * + * Checks the following two public routines work as specified: + * H5Pset_metadata_read_attempts() + * H5Pget_metadata_read_attempts() + */ +static int +test_metadata_read_attempts(hid_t in_fapl) +{ + hid_t fapl; /* File access property list */ + hid_t file_fapl; /* The file's access property list */ + hid_t fid, fid1, fid2; /* File IDs */ + unsigned attempts; /* The # of read attempts */ + char filename[NAME_BUF_SIZE]; /* File name */ + herr_t ret; /* Generic return value */ + + /* Output message about test being performed */ + TESTING("H5Pget/set_metadata_read_attempts()"); + + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + /* + * Set A: + * Tests on verifying the # of read attempts when: + * --setting/getting read attempts from a + * file access property list. + */ + /* Get # of read attempts -- should be the default: 1 */ + if(H5Pget_metadata_read_attempts(fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != 1) + TEST_ERROR + + /* Set the # of read attempts to 0--should fail */ + H5E_BEGIN_TRY { + ret = H5Pset_metadata_read_attempts(fapl, 0); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Set the # of read attempts to a # > 0--should succeed */ + if(H5Pset_metadata_read_attempts(fapl, 9) < 0) + TEST_ERROR + + /* Retrieve the # of read attempts -- should be 9 */ + if(H5Pget_metadata_read_attempts(fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != 9) + TEST_ERROR + + /* Set the # of read attempts to the default for non-SWMR access: H5F_METADATA_READ_ATTEMPTS --should succeed */ + if(H5Pset_metadata_read_attempts(fapl, H5F_METADATA_READ_ATTEMPTS) < 0) + TEST_ERROR + + /* Retrieve the # of read attempts -- should be H5F_METADATA_READ_ATTEMPTS */ + if(H5Pget_metadata_read_attempts(fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Set the # of read attempts to the default for SWMR access: H5F_SWMR_METADATA_READ_ATEMPTS --should succeed */ + if(H5Pset_metadata_read_attempts(fapl, H5F_SWMR_METADATA_READ_ATTEMPTS) < 0) + TEST_ERROR + + /* Retrieve the # of read attempts -- should be H5F_SWMR_METADATA_READ_ATTEMPTS */ + if(H5Pget_metadata_read_attempts(fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Close the property list */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* + * Set B: + * Tests on verifying read attempts when: + * --create a file with non-SWMR access + * --opening files with SWMR access + * --using default or non-default file access property list + */ + /* Test 1 */ + /* Create a file with non-SWMR access and default fapl */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file's fapl -- should be H5F_METADATA_READ_ATTEMPTS */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Test 2 */ + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Open the file with SWMR access and default read attempts */ + if((fid = H5Fopen(filename, (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ), fapl)) < 0) + FAIL_STACK_ERROR + + /* Close fapl */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file's fapl -- should be H5F_SWMR_METADATA_READ_ATTEMPTS */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Test 3 */ + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Set the # of read attempts */ + if(H5Pset_metadata_read_attempts(fapl, 9) < 0) + FAIL_STACK_ERROR + + /* Open the file with SWMR access and fapl (non-default & set to 9) */ + if((fid = H5Fopen(filename, (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ), fapl)) < 0) + FAIL_STACK_ERROR + + /* Close fapl */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file's fapl -- should be 9 */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != 9) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Test 4 */ + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Set the # of read attempts */ + if(H5Pset_metadata_read_attempts(fapl, 1) < 0) + FAIL_STACK_ERROR + + /* Open the file with SWMR access and fapl (non-default & set to 1) */ + if((fid = H5Fopen(filename, (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ), fapl)) < 0) + FAIL_STACK_ERROR + + /* Close fapl */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file fapl -- should be 1 */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != 1) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Test 5 */ + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Open the file with SWMR_READ and fapl (non-default read attempts but unset) */ + if((fid = H5Fopen(filename, (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ), fapl)) < 0) + FAIL_STACK_ERROR + + /* Close fapl */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file's fapl -- should be H5F_SWMR_METADATA_READ_ATTEMPTS */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* + * Set C: + * Tests on verifying read attempts when: + * --create a file with SWMR access + * --opening files with non-SWMR access + * --using default or non-default file access property list + */ + /* Test 1 */ + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Create a file with non-SWMR access and default read attempts */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close fapl */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file's fapl -- should be H5F_SWMR_METADATA_READ_ATTEMPTS */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Test 2 */ + /* Open the file with non-SWMR access and default fapl */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file's fapl -- should be H5F_METADATA_READ_ATTEMPTS */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Test 3 */ + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the # of read attempts */ + if(H5Pset_metadata_read_attempts(fapl, 9) < 0) + FAIL_STACK_ERROR + + /* Open the file with non-SWMR access and fapl (non-default & set to 9) */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close fapl */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file's fapl -- should be 9 */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != 9) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Test 4 */ + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the # of read attempts */ + if(H5Pset_metadata_read_attempts(fapl, 1) < 0) + FAIL_STACK_ERROR + + /* Open the file with non-SWMR access and fapl (non-default & set to 1) */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close fapl */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file fapl -- should be 1 */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != 1) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Test 5 */ + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Open the file with non-SWMR_READ and fapl (non-default but unset) */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close fapl */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file's fapl -- should be H5F_METADATA_READ_ATTEMPTS */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Set the # of read attempts */ + if(H5Pset_metadata_read_attempts(fapl, 9) < 0) + FAIL_STACK_ERROR + + /* Create a file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close fapl */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Open file again with non-SWMR access and default fapl */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file fapl -- should be H5F_METADATA_READ_ATTEMPTS */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Close the file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Open file again with SWMR access and default read attempts */ + if((fid = H5Fopen(filename, H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close fapl */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file fapl -- should be H5F_SWMR_METADATA_READ_ATTEMPTS */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Close the file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Set D: + * Tests on verifying read attempts when: + * --create with non-SWMR access + * --opening files with SWMR access + * --H5reopen the files + */ + + /* Create a file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Open file again with SWMR access and default read attempts */ + if((fid1 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the # of read attempts */ + if(H5Pset_metadata_read_attempts(fapl, 9) < 0) + FAIL_STACK_ERROR + + /* Open file again with SWMR access and fapl (non-default & set to 9) */ + if((fid2 = H5Fopen(filename, (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ), fapl)) < 0) + FAIL_STACK_ERROR + + /* Close fapl */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* Re-open fid1 */ + if((fid = H5Freopen(fid1)) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file fapl -- should be H5F_SWMR_METADATA_READ_ATTEMPTS */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Close the file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Re-open fid2 */ + if((fid = H5Freopen(fid2)) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file fapl -- should be H5F_SWMR_METADATA_READ_ATTEMPTS, not 9 */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Close the file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close all the files */ + if(H5Fclose(fid1) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + /* + * Set E: + * Tests on verifying read attempts when: + * --create with SWMR access + * --opening files with non-SWMR access + * --H5reopen the files + */ + + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Create a file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close fapl */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Open file again with non-SWMR access and default fapl */ + if((fid1 = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Get a copy of the parameter fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the # of read attempts */ + if(H5Pset_metadata_read_attempts(fapl, 9) < 0) + FAIL_STACK_ERROR + + /* Open file again with non-SWMR access and fapl (non-default & set to 9) */ + if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close fapl */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + /* Re-open fid1 */ + if((fid = H5Freopen(fid1)) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file fapl -- should be H5F_METADATA_READ_ATTEMPTS */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Close the file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Re-open fid2 */ + if((fid = H5Freopen(fid2)) < 0) + FAIL_STACK_ERROR + + /* Get file's fapl */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from file fapl -- should be H5F_METADATA_READ_ATTEMPTS */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + if(attempts != H5F_METADATA_READ_ATTEMPTS) + TEST_ERROR + + /* Close the file's fapl */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close all the files */ + if(H5Fclose(fid1) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(fapl); + H5Pclose(file_fapl); + H5Fclose(fid); + H5Fclose(fid1); + H5Fclose(fid2); + } H5E_END_TRY; + + return -1; + + +} /* test_metadata_read_attempts() */ + +/* + * test_metadata_read_retry_info(): + * + * Checks whether the public routine H5Fget_metadata_read_retry_info + * works as specified. + * + */ +static int +test_metadata_read_retry_info(hid_t in_fapl) +{ + hid_t fapl, new_fapl; /* File access property list */ + hid_t fid, fid1; /* File IDs */ + H5F_retry_info_t info, info1; /* The collection of metadata retries */ + H5F_t *f = NULL, *f1 = NULL; /* Internal file object pointers */ + unsigned i, j, n; /* Local index variables */ + hid_t did1, did2; /* Dataset IDs */ + hid_t sid; /* Dataspace ID */ + hid_t dcpl; /* Dataset creation property list */ + hsize_t dims[2] = {6, 10}; /* Dataset dimensions */ + char filename[NAME_BUF_SIZE]; /* File name */ + int buf[6][10], chkbuf1[6][10], chkbuf2[6][10]; /* Buffers for data */ + hsize_t max_dims_1un[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Dataset maximum dimensions */ + hsize_t max_dims_2un[2] = {500, H5S_UNLIMITED}; /* Dataset maximum dimensions */ + hsize_t chunk_dims[2] = {2, 2}; /* Chunk dimensions */ + + /* Output message about test being performed */ + TESTING("H5Fset_metadata_read_retry_info()"); + + /* Get a copy of the parameter in_fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + /* Set to use latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Create a file without SWMR access */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Create a chunked dataset with 1 unlimited dimension: extensible array indexing will be used */ + if((sid = H5Screate_simple(2, dims, max_dims_1un)) < 0) + FAIL_STACK_ERROR + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0) + FAIL_STACK_ERROR + if((did1 = H5Dcreate2(fid, "DSET_1UNLIM", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Create a chunked dataset with 2 unlimited dimension: v2 Btree indexing will be used */ + if((sid = H5Screate_simple(2, dims, max_dims_2un)) < 0) + FAIL_STACK_ERROR + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0) + FAIL_STACK_ERROR + if((did2 = H5Dcreate2(fid, "DSET_2UNLIM", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Initialize buffer data */ + for(i = n = 0; i < 6; i++) + for(j = 0; j < 10; j++) + buf[i][j] = (int)n++; + + /* Write to the 2 datasets */ + if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) + FAIL_STACK_ERROR + if(H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) + FAIL_STACK_ERROR + + /* Closing */ + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR + if(H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 1: tests on nbins + */ + /* + * Open a file without SWMR access, default # of read attempts-- + * info.nbins should be 0 + * info.retries should all be NULL + */ + /* Open the file without SWMR access */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Open the dataset */ + if((did1 = H5Dopen2(fid, "DSET_1UNLIM", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + if(H5Dread(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chkbuf1) < 0) + FAIL_STACK_ERROR + + /* Open the dataset */ + if((did2 = H5Dopen2(fid, "DSET_2UNLIM", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + if(H5Dread(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chkbuf2) < 0) + FAIL_STACK_ERROR + + /* Retrieve retries information */ + if(H5Fget_metadata_read_retry_info(fid, &info) < 0) + FAIL_STACK_ERROR + + /* Should be 0 */ + if(info.nbins != 0) + TEST_ERROR + + /* Should be all NULL */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) + if(info.retries[i] != NULL) + TEST_ERROR + + /* Closing */ + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + + /* + * Open a file with SWMR access, default # of read attempts-- + * info.nbins should be 2 + * info.retries should all be NULL + */ + /* Open the file with SWMR access */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + /* Retrieve retries information */ + if(H5Fget_metadata_read_retry_info(fid, &info) < 0) + FAIL_STACK_ERROR + + /* Should be 2 */ + if(info.nbins != 2) + TEST_ERROR + + /* Should be all NULL */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) + if(info.retries[i] != NULL) + TEST_ERROR + + /* Closing */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Open a file with SWMR access, # of read_attempts is 10: + * info.nbins should be 1 + * info.retries should all be NULL + */ + if((new_fapl = H5Pcopy(fapl)) < 0) + FAIL_STACK_ERROR + + if(H5Pset_metadata_read_attempts(new_fapl, 10) < 0) + FAIL_STACK_ERROR + + /* Open the file with SWMR access */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, new_fapl)) < 0) + FAIL_STACK_ERROR + + /* Retrieve retry information */ + if(H5Fget_metadata_read_retry_info(fid, &info) < 0) + FAIL_STACK_ERROR + + /* Should be 1 */ + if(info.nbins != 1) + TEST_ERROR + + /* Should be all NULL */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) + if(info.retries[i] != NULL) + TEST_ERROR + + /* Closing */ + if(H5Pclose(new_fapl) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Open a file with SWMR access, # of read attempts is 101: + * info.nbins should be 3 + * info.retries should all be NULL + */ + if((new_fapl = H5Pcopy(fapl)) < 0) + FAIL_STACK_ERROR + if(H5Pset_metadata_read_attempts(new_fapl, 101) < 0) + FAIL_STACK_ERROR + + /* Open the file with SWMR access */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, new_fapl)) < 0) + FAIL_STACK_ERROR + + /* Retrieve retry information */ + if(H5Fget_metadata_read_retry_info(fid, &info) < 0) + FAIL_STACK_ERROR + + /* Should be 3 */ + if(info.nbins != 3) + TEST_ERROR + + /* Should be all NULL */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) + if(info.retries[i] != NULL) + TEST_ERROR + + /* Closing */ + if(H5Pclose(new_fapl) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Open a file with SWMR access, # of read_attempts is 10000: + * info.nbins should be 4 + * info.retries should all be NULL + */ + if((new_fapl = H5Pcopy(fapl)) < 0) + FAIL_STACK_ERROR + + if(H5Pset_metadata_read_attempts(new_fapl, 10000) < 0) + FAIL_STACK_ERROR + + /* Open the file with SWMR access */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, new_fapl)) < 0) + FAIL_STACK_ERROR + + /* Retrieve retry information */ + if(H5Fget_metadata_read_retry_info(fid, &info) < 0) + FAIL_STACK_ERROR + + /* Should be 4 */ + if(info.nbins != 4) + TEST_ERROR + + /* Should be all NULL */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) + if(info.retries[i] != NULL) + TEST_ERROR + + /* Closing */ + if(H5Pclose(new_fapl) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Open a file with SWMR access, # of read_attempts is 1: + * info.nbins should be 0 + * info.retries should all be NULL + */ + if((new_fapl = H5Pcopy(fapl)) < 0) + FAIL_STACK_ERROR + + if(H5Pset_metadata_read_attempts(new_fapl, 1) < 0) + FAIL_STACK_ERROR + + /* Open the file with SWMR access */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, new_fapl)) < 0) + FAIL_STACK_ERROR + + /* Retrieve retry information */ + if(H5Fget_metadata_read_retry_info(fid, &info) < 0) + FAIL_STACK_ERROR + + /* Should be 0 */ + if(info.nbins != 0) + TEST_ERROR + + /* Should be all NULL */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) + if(info.retries[i] != NULL) + TEST_ERROR + + /* Closing */ + if(H5Pclose(new_fapl) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + + /* + * Case 2: tests on retries info + */ + + /* Open the file with SWMR access */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + /* Open the dataset */ + if((did1 = H5Dopen2(fid, "DSET_1UNLIM", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Read data from the dataset */ + if(H5Dread(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chkbuf1) < 0) + FAIL_STACK_ERROR + + /* Open the dataset */ + if((did2 = H5Dopen2(fid, "DSET_2UNLIM", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Read data from the dataset */ + if(H5Dread(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chkbuf2) < 0) + FAIL_STACK_ERROR + + /* Retrieve retry information */ + if(H5Fget_metadata_read_retry_info(fid, &info) < 0) + FAIL_STACK_ERROR + + /* Should be 2 */ + if(info.nbins != 2) + TEST_ERROR + + /* Should be all NULL */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) + if(info.retries[i] != NULL) + TEST_ERROR + + /* Get a pointer to the internal file object */ + if((f = (H5F_t *)H5I_object(fid)) == NULL) + FAIL_STACK_ERROR + + /* + * Increment 1st set of retries for metadata items: + * a) v2 B-tree leaf node--retries[4][1] + * b) Extensive array data block--retries[15][1] + * c) File's superblock--retries[20][0] + */ + + /* v2 B-tree leaf node: log retry 99 for 500 times */ + for(i = 0; i < 500; i++) { + if(H5F_track_metadata_read_retries(f, H5AC_BT2_LEAF_ID, 99) < 0) + FAIL_STACK_ERROR + } + + /* Extensive array data block: log retry 10 for 1000 times */ + for(i = 0; i < 1000; i++) + if(H5F_track_metadata_read_retries(f, H5AC_EARRAY_DBLOCK_ID, 10) < 0) + FAIL_STACK_ERROR + + /* File's superblock: log retry 1 for 1 time */ + if(H5F_track_metadata_read_retries(f, H5AC_SUPERBLOCK_ID, 1) < 0) + FAIL_STACK_ERROR + + /* Retrieve the collection of metadata read retries */ + if(H5Fget_metadata_read_retry_info(fid, &info) < 0) + FAIL_STACK_ERROR + + /* Verify retries for v2 B-tree leaf node */ + if(info.retries[4][0] != 0) + TEST_ERROR + if(info.retries[4][1] != 500) + TEST_ERROR + + /* Verify retries for extensive array data block */ + if(info.retries[15][0] != 0) + TEST_ERROR + if(info.retries[15][1] != 1000) + TEST_ERROR + + /* Verify retries for file's superblock */ + if(info.retries[20][0] != 1) + TEST_ERROR + if(info.retries[20][1] != 0) + TEST_ERROR + + /* Free memory for info.retries */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) { + if(info.retries[i] != NULL) + H5free_memory(info.retries[i]); + } + + /* + * Increment 2nd set of retries for metadata items: + * a) Object header--retries[0][0] + * b) Extensive array datablock--retries[15][0] + * c) Fixed array header--retries[17][1] + * d) File's superblock--retries[20][0] + */ + + /* Object header: log retry 5 for 5 times */ + for(i = 0; i < 5; i++) { + if(H5F_track_metadata_read_retries(f, H5AC_OHDR_ID, 5) < 0) + TEST_ERROR + } + + /* Extensive array data block: log retry 4 for 1 time */ + if(H5F_track_metadata_read_retries(f, H5AC_EARRAY_DBLOCK_ID, 4) < 0) + TEST_ERROR + + /* Fixed array header : log retry 50 for 10000 times */ + for(i = 0; i < 10000; i++) { + if(H5F_track_metadata_read_retries(f, H5AC_FARRAY_HDR_ID, 50) < 0) + TEST_ERROR + } + + /* File's superblock: log retry 1 for 1 more time */ + if(H5F_track_metadata_read_retries(f, H5AC_SUPERBLOCK_ID, 1) < 0) + FAIL_STACK_ERROR + + /* Retrieve the collection of metadata read retries */ + if(H5Fget_metadata_read_retry_info(fid, &info) < 0) + FAIL_STACK_ERROR + + /* + * Verify info has both previous + current retries information: + */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) { + switch(i) { + case 0: /* Object header */ + if(info.retries[i][0] != 5) + TEST_ERROR + if(info.retries[i][1] != 0) + TEST_ERROR + break; + + case 4: /* v2 B-tree leaf node */ + if(info.retries[i][0] != 0) + TEST_ERROR + if(info.retries[i][1] != 500) + TEST_ERROR + break; + + case 15: /* Extensive array data block */ + if(info.retries[i][0] != 1) + TEST_ERROR + if(info.retries[i][1] != 1000) + TEST_ERROR + break; + + case 17: /* Fixed array header */ + if(info.retries[i][0] != 0) + TEST_ERROR + if(info.retries[i][1] != 10000) + TEST_ERROR + break; + + case 20: /* File's superblock */ + if(info.retries[i][0] != 2) + TEST_ERROR + if(info.retries[i][1] != 0) + TEST_ERROR + break; + + default: + if(info.retries[i] != NULL) + TEST_ERROR + break; + } + } + + /* Free memory for info.retries */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) + if(info.retries[i] != NULL) + H5free_memory(info.retries[i]); + + /* Closing */ + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Get a copy of the file access property list */ + if((new_fapl = H5Pcopy(fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the number of metadata read attempts to 101 */ + if(H5Pset_metadata_read_attempts(new_fapl, 101) < 0) + FAIL_STACK_ERROR + + /* Open the file with SWMR access */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, new_fapl)) < 0) + FAIL_STACK_ERROR + + /* Get a pointer to the internal file object */ + if((f = (H5F_t *)H5I_object(fid)) == NULL) + FAIL_STACK_ERROR + + /* File's superblock: log retry 1 for 1 time */ + if(H5F_track_metadata_read_retries(f, H5AC_SUPERBLOCK_ID, 1) < 0) + FAIL_STACK_ERROR + + /* Retrieve the collection of metadata read retries */ + if(H5Fget_metadata_read_retry_info(fid, &info) < 0) + FAIL_STACK_ERROR + + /* Should be 3 */ + if(info.nbins != 3) + TEST_ERROR + + /* Verify retries info */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) { + switch(i) { + case 20: /* File's superblock */ + if(info.retries[i][0] != 1) + TEST_ERROR + if(info.retries[i][1] != 0) + TEST_ERROR + if(info.retries[i][2] != 0) + TEST_ERROR + break; + + default: + if(info.retries[i] != NULL) + TEST_ERROR + break; + } + } + + /* Free memory */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) + if(info.retries[i] != NULL) + H5free_memory(info.retries[i]); + + /* Closing */ + if(H5Pclose(new_fapl) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 3: Tests on retrieving the collection of retries + * when H5Fopen and H5Freopen the same file. + */ + + /* + * Open a file without SWMR access, default # of read attempts-- + * H5Freopen the same file-- + * Both files should: + * nbins should be 0 + * retries should all be NULL + */ + /* Open the file without SWMR access */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Re-open fid */ + if((fid1 = H5Freopen(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve retries information for fid */ + if(H5Fget_metadata_read_retry_info(fid, &info) < 0) + FAIL_STACK_ERROR + + /* Retrieve retries information for fid1*/ + if(H5Fget_metadata_read_retry_info(fid1, &info1)< 0) + FAIL_STACK_ERROR + + /* Should be 0 */ + if(info.nbins != 0) + TEST_ERROR + if(info1.nbins != 0) + TEST_ERROR + + /* Should be all NULL */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) { + if(info.retries[i] != NULL) + TEST_ERROR + if(info1.retries[i] != NULL) + TEST_ERROR + } + + /* Closing */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid1) < 0) + FAIL_STACK_ERROR + + /* + * Open a file with SWMR access, default # of read attempts: + * --increment retries for metadata item: fixed array data block page (retries[19][0]) + * H5Freopen the same file: + * --increment retries for metadata item: free-space sections (retries[9][1])-- + */ + /* Open the file with SWMR access */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + /* Get a pointer to the internal file object for fid */ + if((f = (H5F_t *)H5I_object(fid)) == NULL) + FAIL_STACK_ERROR + + /* Re-open fid */ + if((fid1 = H5Freopen(fid)) < 0) + FAIL_STACK_ERROR + + /* Get a pointer to the internal file object for fid1 */ + if((f1 = (H5F_t *)H5I_object(fid1)) == NULL) + FAIL_STACK_ERROR + + /* For fid: fixed array data block page--log retry 9 for 500 times */ + for(i = 0; i < 500; i++) { + if(H5F_track_metadata_read_retries(f, H5AC_FARRAY_DBLK_PAGE_ID, 9) < 0) + FAIL_STACK_ERROR + } + + /* For fid1: free-space sections--log retry 99 for 1000 times */ + for(i = 0; i < 1000; i++) { + if(H5F_track_metadata_read_retries(f1, H5AC_FSPACE_SINFO_ID, 99) < 0) + FAIL_STACK_ERROR + } + + /* Retrieve the collection of metadata read retries for fid */ + if(H5Fget_metadata_read_retry_info(fid, &info) < 0) + FAIL_STACK_ERROR + + /* Retrieve the collection of metadata read retries for fid1 */ + if(H5Fget_metadata_read_retry_info(fid1, &info1) < 0) + FAIL_STACK_ERROR + + /* Verify nbins for fid & fid1: should be 2 */ + if(info.nbins != 2) + TEST_ERROR + if(info1.nbins != 2) + TEST_ERROR + + /* Verify retries for fid: fixed array data block page */ + if(info.retries[19][0] != 500) + TEST_ERROR + if(info.retries[19][1] != 0) + TEST_ERROR + + /* Verify retries for fid: free-space sections */ + /* (Since file was re-opened) */ + if(info.retries[9][0] != 0) + TEST_ERROR + if(info.retries[9][1] != 1000) + TEST_ERROR + + /* Verify retries for fid1: free-space sections */ + if(info1.retries[9][0] != 0) + TEST_ERROR + if(info1.retries[9][1] != 1000) + TEST_ERROR + + /* Verify retries for fid1: fixed array data block page */ + /* (Since file was re-opened) */ + if(info1.retries[19][0] != 500) + TEST_ERROR + if(info1.retries[19][1] != 0) + TEST_ERROR + + /* Free memory for info.retries and info1.retries */ + for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) { + if(info.retries[i] != NULL) + H5free_memory(info.retries[i]); + if(info1.retries[i] != NULL) + H5free_memory(info1.retries[i]); + } /* end for */ + + /* Closing */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid1) < 0) + FAIL_STACK_ERROR + + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(fapl); + H5Pclose(new_fapl); + H5Dclose(did1); + H5Dclose(did2); + H5Sclose(sid); + H5Pclose(dcpl); + H5Fclose(fid); + H5Fclose(fid1); + } H5E_END_TRY; + + return -1; + +} /* test_metadata_read_retry_info() */ + + + +/* + * Tests for H5Fstart_swmr_write() + */ + +/* + * test_start_swmr_write(): + * + * Verify SWMR writing is enabled via H5Fstart_swmr_write(): + * Mainly test for file created with SWMR_WRITE + with/without latest format: + * --file will have v3 superblock and all latest version support enabled + * + * (A) Creating a file + * Create a file with SWMR_WRITE + non-latest-format + * Create a chunked dataset "dataset1" in the file -- should be using latest chunk indexing + * Should fail to enable SWMR as the file is already in SWMR writing mode + * Close the file + * + * (B) Opening a file + * Open the file with write + non-latest-format + * --file has v3 superblock and all latest version support enabled + * Open dataset "dataset1" 3 times--keep it open + * Write to "dataset1" + * Create a group in the file + * Create a chunked dataset "dataset2" in the group--should be using latest chunk indexing--keep it open + * Should succeed in enabling SWMR + * Should succeed in reading from multiple opens of "dataset1" + * Close multiple opens of "dataset1" + * Close "dataset2" + * Create "dataset3"--should be using latest chunk indexing + * Close "dataset3" + * Close the group and file + */ +static int +test_start_swmr_write(hid_t in_fapl, hbool_t new_format) +{ + hid_t fid = -1; /* File ID */ + hid_t fapl = -1; /* File access property */ + hid_t gid = -1; /* Group ID */ + hid_t dcpl = -1; /* Dataset creation property */ + hid_t file_fapl = -1; /* File access property for the file */ + hid_t did1 = -1, did2 = -1, did3 = -1; /* Dataset IDs */ + hid_t did1_a = -1, did1_b = -1; + hid_t sid1 = -1, sid2 = -1, sid3 = -1; /* Dataspace IDs */ + hsize_t dim[1] = {1}; /* Dimension sizes */ + hsize_t max_dim[1] = {H5S_UNLIMITED}; /* Maximum dimension sizes */ + hsize_t chunk_dim[1] = {2}; /* Chunk dimension sizes */ + hsize_t dim2[2] = {5, 10}; /* Dimension sizes */ + hsize_t max_dim2[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes */ + hsize_t chunk_dim2[2] = {2, 7}; /* Chunk dimension sizes */ + H5D_chunk_index_t idx_type; /* Dataset chunk index type */ + int wdata = 99; /* Data to write */ + int rdata; /* Data read */ + unsigned attempts; /* The retrieved # of read attempts */ + char filename[NAME_BUF_SIZE]; /* File name */ + herr_t ret; /* Return value */ + + + /* Get a copy of the parameter fapl (non-latest-format) */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + if(new_format) { + TESTING("H5Fstart_swmr_write() when creating/opening a file with latest format"); + + /* Set to use the latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + } else { + TESTING("H5Fstart_swmr_write() when creating/opening a file without latest format"); + } /* end if */ + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + /* + * Case A: when creating a file + */ + + /* Create the file with SWMR write + non-latest-format */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC | (new_format ? 0 : H5F_ACC_SWMR_WRITE), H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Get the file's access_property list */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts from the file's fapl */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + + /* Should be 100 */ + if(attempts != (new_format ? H5F_METADATA_READ_ATTEMPTS : H5F_SWMR_METADATA_READ_ATTEMPTS)) + TEST_ERROR; + + /* Close the property list */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR + + /* Create "dataset1" */ + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + if(H5Pset_chunk(dcpl, 1, chunk_dim) < 0) + FAIL_STACK_ERROR + if((sid1 = H5Screate_simple(1, dim, max_dim)) < 0) + FAIL_STACK_ERROR; + if((did1 = H5Dcreate2(fid, "dataset1", H5T_NATIVE_INT, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Get the chunk index type */ + if(H5D__layout_idx_type_test(did1, &idx_type) < 0) + FAIL_STACK_ERROR; + if(idx_type != H5D_CHUNK_IDX_EARRAY) + FAIL_PUTS_ERROR("should be using extensible array as index"); + + /* Write to the dataset */ + if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wdata) < 0) + FAIL_STACK_ERROR; + + /* Should fail to enable SWMR for non-latest-format */ + /* Should succeed in enabling SWMR for latest format */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid); + } H5E_END_TRY; + if(new_format) { + if(ret < 0) TEST_ERROR + } else if(ret >= 0) + TEST_ERROR + + /* Read from the dataset */ + if(H5Dread(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata) < 0) + FAIL_STACK_ERROR; + + /* Verify the data is correct */ + if(wdata != rdata) + TEST_ERROR + + /* Close "dataset1", dataspace, dataset creation property list */ + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR; + if(H5Sclose(sid1) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + + /* Get the file's access_property list */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + + /* Should be 100 */ + if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS) + TEST_ERROR; + + /* Close the file access property list */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR; + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + /* + * Case B: when opening a file + */ + + /* Open the file again with write + non-latest-format */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Get the file's access_property list */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + + /* Should be 1 */ + if(attempts != H5F_METADATA_READ_ATTEMPTS) + TEST_ERROR; + + /* Close the property list */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR; + + /* open "dataset1", keep it open */ + if((did1 = H5Dopen2(fid, "dataset1", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* open "dataset1" second time */ + if((did1_a = H5Dopen2(fid, "dataset1", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* open "dataset1" third time */ + if((did1_b = H5Dopen2(fid, "dataset1", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Write to "dataset1" */ + wdata = 88; + if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wdata) < 0) + FAIL_STACK_ERROR; + + /* Create a group */ + if((gid = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Create "dataset2" in the group, keep it open */ + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + if(H5Pset_chunk(dcpl, 2, chunk_dim2) < 0) + FAIL_STACK_ERROR + if((sid2 = H5Screate_simple(2, dim2, max_dim2)) < 0) + FAIL_STACK_ERROR; + if((did2 = H5Dcreate2(gid, "dataset2", H5T_NATIVE_INT, sid2, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Get the chunk index type for "dataset2" */ + if(H5D__layout_idx_type_test(did2, &idx_type) < 0) + FAIL_STACK_ERROR; + if(idx_type != H5D_CHUNK_IDX_BT2) + FAIL_PUTS_ERROR("should be using v2 B-tree chunk indexing"); + + /* Should succeed in enabling SWMR writing */ + if(H5Fstart_swmr_write(fid) < 0) + FAIL_STACK_ERROR; + + /* Get the file's access_property list */ + if((file_fapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the # of read attempts */ + if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0) + FAIL_STACK_ERROR + + /* Should be 100 */ + if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS) + TEST_ERROR; + + /* Close the property list */ + if(H5Pclose(file_fapl) < 0) + FAIL_STACK_ERROR; + + rdata = 0; + /* Read from "dataset1" via did1_b (multiple opens) */ + if(H5Dread(did1_b, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata) < 0) + FAIL_STACK_ERROR; + if(wdata != rdata) + FAIL_STACK_ERROR; + if(H5Dclose(did1_b) < 0) + FAIL_STACK_ERROR; + + /* Read from "dataset1" */ + rdata = 0; + if(H5Dread(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata) < 0) + FAIL_STACK_ERROR; + if(wdata != rdata) + FAIL_STACK_ERROR; + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR; + + rdata = 0; + /* Read from "dataset1" via did1_a (multiple opens) */ + if(H5Dread(did1_a, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata) < 0) + FAIL_STACK_ERROR; + if(wdata != rdata) + FAIL_STACK_ERROR; + if(H5Dclose(did1_a) < 0) + FAIL_STACK_ERROR; + + /* Close "dataset2", dataspace, dataset creation property list */ + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR; + if(H5Sclose(sid2) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + + /* Create "dataset3" */ + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + if(H5Pset_chunk(dcpl, 2, chunk_dim2) < 0) + FAIL_STACK_ERROR + if((sid3 = H5Screate_simple(2, dim2, max_dim2)) < 0) + FAIL_STACK_ERROR; + if((did3 = H5Dcreate2(fid, "dataset3", H5T_NATIVE_INT, sid3, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Get the chunk index type for "dataset3" */ + if(H5D__layout_idx_type_test(did3, &idx_type) < 0) + FAIL_STACK_ERROR; + if(idx_type != H5D_CHUNK_IDX_BT2) + FAIL_PUTS_ERROR("should be using v2 B-tree as index"); + + /* Close "dataset3", dataspace, dataset creation property list */ + if(H5Dclose(did3) < 0) + FAIL_STACK_ERROR; + if(H5Sclose(sid3) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + + /* Close the group */ + if(H5Gclose(gid) < 0) + FAIL_STACK_ERROR; + + /* Close the file access property list */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR; + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Fclose(fid); + H5Pclose(fapl); + H5Pclose(file_fapl); + H5Gclose(gid); + H5Pclose(dcpl); + H5Dclose(did1); + H5Dclose(did1_a); + H5Dclose(did1_b); + H5Dclose(did2); + H5Dclose(did3); + H5Sclose(sid1); + H5Sclose(sid2); + H5Sclose(sid3); + } H5E_END_TRY; + + return -1; +} /* test_start_swmr_write() */ + +/* + * test_err_start_swmr_write(): + * + * Verify failure conditions in enabling SWMR writing mode via H5Fstart_swmr_write(): + * (A) When creating a file: + * (1) Create a file with SWMR write + with/without latest format + * --fail to enable SWMR because the file is already in SWMR writing mode + * If (latest-format): + * (2a) Create a file with write + latest format and with opened named datatype + * --fail to enable SWMR because there are opened datatype + * If !(latest-format): + * (2b) Create a file with write + non-latest-format + * --fail to enable SWMR because superblock version is not at least 3 + * + * (B) When opening a file which is created with write + latest format: + * (1) Open the file with SWMR write + with/without latest format + * --fail to enable SWMR because the file is already in SWMR writing mode + * (2) Open the file with read only + with/without latest format + * --fail to enable SWMR because the file is not opened with write + * (3) Open the file with SWMR read only + with/without latest format + * --fail to enable SWMR because the file is not opened with write + * (4) Open the file with write + with/without latest format and with opened named datatype/attribute + * --fail to enable SWMR because there are opened datatype/attribute + * + * (C) When doing multiple opens for a file: + * Create a file with (a) write + latest format or (b) SMWR write + non-latest-format + * Close the file + * (1) --Open the file with write + with/without latest format + * --Enable SWMR writing mode twice + * --First time succeed, second time fail + * --Close the file + (2) --Open the file with write + with/without latest format + * --Succeed to enable SWMR writing mode + * --Reopen the same file + * --fail to enable SWMR writing mode for the reopened file + * --Close the file + (3) --Open the file with write + with/without latest format + * --Open the same file again + * --succeed to enable SWMR for the first opened file + * --fail to enable SWMR for the second opened file + * --Close the file + * + * (D) (!new_format): When opening a file which is created with write + non-latest-format: + * (1) Open the file with SWMR write+latest format + * --fail to open due to superblock version not 3 + * (2) Open the file with SWMR write+non-latest-format + * --fail to open due to superblock version not 3 + + * (3) Open the file with write+latest format + * --fail to enable SWMR due to superblock version not 3 + * (4) Open the file with write+non-latest-format + * --fail to enable SWMR due to superblock version not 3 + */ +static int +test_err_start_swmr_write(hid_t in_fapl, hbool_t new_format) +{ + hid_t fid = -1; /* File ID */ + hid_t fid2 = -1; /* File ID */ + hid_t fapl = -1; /* A copy of file access property */ + hid_t new_fapl = -1; /* A copy of file access property */ + hid_t gid = -1; /* Group ID */ + hid_t did = -1; /* Dataset ID */ + hid_t sid = -1; /* Dataspace ID */ + hid_t aid = -1; /* Attribute ID */ + hid_t tid = -1; /* Datatype ID */ + hid_t bad_fid = -1; /* Test fid (should never represent a real ID) */ + herr_t ret; /* Return value */ + char filename[NAME_BUF_SIZE]; /* File name */ + + /* Create a copy of the input parameter in_fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + if((new_fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + /* Set to use the latest library format */ + if(H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + if(new_format) { + TESTING("H5Fstart_swmr_write() on failure conditions for latest format"); + + if((fapl = H5Pcopy(new_fapl)) < 0) + FAIL_STACK_ERROR + } else { + TESTING("H5Fstart_swmr_write() on failure conditions for without latest format"); + } + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + + /* + * (A) When creating a file: + */ + + /* Case 1 */ + + /* Create the file with SWMR_WRITE + with/without latest format */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl); + + /* Should fail to enable SWMR writing when the file is already in SWMR writing mode */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + /* Case 2 */ + + if(new_format) { + + /* Create the file with write + latest format */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Create and commit a named datatype */ + if((tid = H5Tcopy(H5T_NATIVE_INT)) < 0) + FAIL_STACK_ERROR; + if(H5Tcommit2(fid, "TID", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) + FAIL_STACK_ERROR; + + /* Should fail to enable SWMR writing when there is an opened named datatype */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Close the datatype */ + if(H5Tclose(tid) < 0) + FAIL_STACK_ERROR; + + /* Should succeed in enabling SWMR writing */ + if(H5Fstart_swmr_write(fid) < 0) + TEST_ERROR; + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + } else { + + /* Create a file with write + non-latest-format */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + TEST_ERROR + + /* Should fail to enable SWMR writing because the file's superblock version is not at least 3 */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + } /* end if */ + + /* + * (B) When opening a file which is created with the latest format + */ + + /* Create a file with write + latest format */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, new_fapl)) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + /* Case 1 */ + + /* Open the file with SWMR write + with/without latest format */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Should fail to enable SWMR writing when the file is already in SWMR writing mode */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + /* Case 2 */ + + /* Open the file with read only access + with/without latest format */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Should fail to enable SWMR writing when the file is opened with read only access */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + /* Case 3 */ + + /* Open the file file with SWMR read access + with/without latest format */ + if((fid = H5Fopen(filename, H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Should fail to enable SWMR writing when the file is opened with SWMR read access only */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + /* Case 4 */ + + /* Open the file with write + with/without latest format */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Create and commit a named datatype */ + if((tid = H5Tcopy(H5T_NATIVE_INT)) < 0) + FAIL_STACK_ERROR; + if(H5Tcommit2(fid, "TID", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) + FAIL_STACK_ERROR; + + /* Create dataspace */ + if((sid = H5Screate(H5S_SCALAR)) < 0) + FAIL_STACK_ERROR; + + /* Attach an attribute to the named datatype */ + if((aid = H5Acreate2(tid, "attr", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Should fail to enable SWMR writing when there are opened named datatype and attribute */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Close the datatype */ + if(H5Tclose(tid) < 0) + FAIL_STACK_ERROR; + + /* Still fail to enable SWMR writing when the attribute is still opened */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Close the attribute */ + if(H5Aclose(aid) < 0) + FAIL_STACK_ERROR; + + /* Should succeed in enabling SWMR writing */ + if(H5Fstart_swmr_write(fid) < 0) + TEST_ERROR; + + /* Close the dataspace */ + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + + /* + * (C) Failure cases for multiple opens + */ + + /* Case 1 */ + + /* Create a file with write + with/without latest format */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC | (new_format ? 0 : H5F_ACC_SWMR_WRITE), H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + /* Open the file with write + with/without latest format */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Should succeed in enabling SWMR writing mode */ + if(H5Fstart_swmr_write(fid) < 0) + TEST_ERROR + + /* Should fail for a second call to enable SWMR writing mode */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + + /* Case 2 */ + + /* Open the file with write + with/without latest format */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Should succeed in enabling SWMR writing mode */ + if(H5Fstart_swmr_write(fid) < 0) + TEST_ERROR + + /* Re-open the same file */ + if((fid2 = H5Freopen(fid)) < 0) + FAIL_STACK_ERROR; + + /* Should fail to enable SWMR writing mode for fid2 */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid2); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Close the files */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR; + + /* Case 3 */ + + /* Open the file with write + with/without latest format */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Open the same file */ + if((fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Should succeed in enabling SWMR writing for fid */ + if(H5Fstart_swmr_write(fid) < 0) + TEST_ERROR + + /* Should fail to enable SWMR writing for fid2 */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid2); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Close the files */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR; + + if(!new_format) { + + /* + * (D) When opening a file which is created without the latest format: + */ + + /* Create a file with write + without latest format */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + /* Case 1 */ + + /* Should fail to open the file with SWMR write + latest format due to superblock version not at least 3 */ + H5E_BEGIN_TRY { + bad_fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, new_fapl); + } H5E_END_TRY; + if(bad_fid >= 0) + TEST_ERROR + + /* Case 2 */ + + /* Should fail to open the file with SWMR write + non-latest-format due to superblock version not at least 3 */ + H5E_BEGIN_TRY { + bad_fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl); + } H5E_END_TRY; + if(bad_fid >= 0) + TEST_ERROR + + /* Case 3 */ + + /* Open the file with write + latest format */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, new_fapl)) < 0) + FAIL_STACK_ERROR; + + /* Should fail to enable SWMR writing due to superblock version not at least 3 */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + + /* Case 4 */ + + /* Open the file with write + non-latest-format */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Should fail to enable SWMR writing because the file's superblock version is not at least 3 */ + H5E_BEGIN_TRY { + ret = H5Fstart_swmr_write(fid); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + } /* not new */ + + /* Close the file access property list */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(new_fapl) < 0) + FAIL_STACK_ERROR; + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Sclose(sid); + H5Gclose(gid); + H5Dclose(did); + H5Fclose(fid); + H5Fclose(fid2); + H5Pclose(fapl); + H5Pclose(new_fapl); + /* bad_fid should only represent a read ID in the error case. + * It never needs to be closed in the normal case. + */ + H5Fclose(bad_fid); + } H5E_END_TRY; + + return -1; +} /* test_err_start_swmr_write() */ + +/* + * test_start_swmr_write_concur(): + * + * The "new_format" parameter indicates whether to create the file with latest format or not. + * To have SWMR support, can use either one of the following in creating a file: + * (a) Create the file with write + latest format: + * --result in v3 superblock with latest chunk indexing types + * (b) Create the file with SWMR write + non-latest-format: + * --result in v3 superblock with latest chunk indexing types + * Create a chunked dataset with 1 extendible dimension in the file + * + * Verify concurrent access for H5Fstart_swmr_write()-- + * (1) Parent: open a file with write access + * Child: concurrent open of the file with read & SWMR read (fail) + * (2) Parent: open a file with write access; enable SWMR writing mode + * Child: open the file 2 times with read & SWMR read (succeed) + * open the dataset 2 times with the 2 file opens + * verify data read from multiple opens of the dataset is correct + * (3) Parent: open a file with write access; enable SWMR writing mode + * Child: Concurrent open of the file with read only (fail) + * (4) Parent: open a file with write access; enable SWMR writing mode + * Child: concurrent open of the file with write access (fail) + * (5) Parent: open a file with write access; enable SWMR writing mode + * Child: concurrent open of the file with write and SWMR write access (fail) + */ +#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)) + +static int +test_start_swmr_write_concur(hid_t H5_ATTR_UNUSED in_fapl, hbool_t H5_ATTR_UNUSED new_format) +{ + SKIPPED(); + HDputs(" Test skipped due to fork or waitpid not defined."); + return 0; +} /* test_start_swmr_write_concur() */ + +#else /* defined(H5_HAVE_FORK && defined(H5_HAVE_WAITPID) */ + +static int +test_start_swmr_write_concur(hid_t in_fapl, hbool_t new_format) +{ + hid_t fid = -1, fid1 = -1, fid2 = -1; /* File IDs */ + hid_t fapl; /* File access property list */ + pid_t childpid=0; /* Child process ID */ + pid_t tmppid; /* Child process ID returned by waitpid */ + int child_status; /* Status passed to waitpid */ + int child_wait_option=0; /* Options passed to waitpid */ + int child_exit_val; /* Exit status of the child */ + char filename[NAME_BUF_SIZE]; /* File name */ + + hid_t did = -1, did1 = -1, did2 = -1, did3 = -1; + hid_t sid = -1; + hid_t dcpl = -1; + hsize_t chunk_dims[1] = {1}; + hsize_t maxdims[1] = {H5S_UNLIMITED}; + hsize_t dims[1] = {1}; + int wdata = 0; + + int out_pdf[2]; + int in_pdf[2]; + int notify = 0; + + /* Output message about test being performed */ + if(new_format) { + TESTING("H5Fstart_swmr_write()--concurrent access for latest format"); + } else { + TESTING("H5Fstart_swmr_write()--concurrent access for non-latest-format"); + } /* end if */ + + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + if(new_format) { + /* Set to use the latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Create the test file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + } else { + /* Create the test file without latest format but with SWMR write */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + } /* end if */ + + /* Create a chunked dataset with 1 extendible dimension */ + if((sid = H5Screate_simple(1, dims, maxdims)) < 0) + FAIL_STACK_ERROR; + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + if(H5Pset_chunk(dcpl, 1, chunk_dims) < 0) + FAIL_STACK_ERROR; + if((did = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Close the dataset */ + if(H5Dclose(did) < 0) + FAIL_STACK_ERROR + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* + * Case (1): + * Verify concurrent file open with H5F_ACC_RDONLY|H5F_ACC_SWMR_READ + * will fail without H5Fstart_swmr_write() + */ + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Should fail */ + H5E_BEGIN_TRY { + /* Open the test file */ + fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl); + } H5E_END_TRY; + if(fid >= 0) + HDexit(EXIT_FAILURE); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_SUCCESS); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if((tmppid = HDwaitpid(childpid, &child_status, child_wait_option)) < 0) + FAIL_STACK_ERROR + + /* Check exit status of child process */ + if(WIFEXITED(child_status)) { + if((child_exit_val = WEXITSTATUS(child_status)) != 0) + TEST_ERROR + } else /* child process terminated abnormally */ + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case (2): + * Verify concurrent file open with H5F_ACC_RDONLY|H5F_ACC_SWMR_READ + * will succeed with H5Fstart_swmr_write() + */ + + /* Create 2 pipes */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + if(HDpipe(in_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid1 = -1, child_fid2; /* File IDs */ + hid_t child_did1 = -1, child_did2 = -1; /* Dataset IDs */ + int child_notify = 0; + int rdata = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + /* close unused read end for in_pdf */ + if(HDclose(in_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Should succeed in opening the test file 2 times */ + if((child_fid1 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + HDexit(EXIT_FAILURE); + if((child_fid2 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + HDexit(EXIT_FAILURE); + + /* open "dataset" 2 times */ + if((child_did1 = H5Dopen2(child_fid1, "dataset", H5P_DEFAULT)) < 0) + HDexit(EXIT_FAILURE); + if((child_did2 = H5Dopen2(child_fid2, "dataset", H5P_DEFAULT)) < 0) + HDexit(EXIT_FAILURE); + + /* Read from "dataset" via child_did1 */ + rdata = 0; + if(H5Dread(child_did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata) < 0) + HDexit(EXIT_FAILURE); + if(rdata != 88) + HDexit(EXIT_FAILURE); + + /* Notify parent process */ + child_notify = 2; + if(HDwrite(in_pdf[1], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 3) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Refresh "dataset" via child_did2 */ + if(H5Drefresh(child_did2) < 0) + HDexit(EXIT_FAILURE); + + /* Read from "dataset" child_did2 */ + rdata = 0; + if(H5Dread(child_did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata) < 0) + HDexit(EXIT_FAILURE); + if(rdata != 99) + HDexit(EXIT_FAILURE); + + /* Read from "dataset" child_did1 */ + rdata = 0; + if(H5Dread(child_did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata) < 0) + HDexit(EXIT_FAILURE); + if(rdata != 99) + HDexit(EXIT_FAILURE); + + /* Close the dataset */ + if(H5Dclose(child_did1)) + HDexit(EXIT_FAILURE); + if(H5Dclose(child_did2)) + HDexit(EXIT_FAILURE); + + /* Close the file */ + if(H5Fclose(child_fid1) < 0) + HDexit(EXIT_FAILURE); + if(H5Fclose(child_fid2) < 0) + HDexit(EXIT_FAILURE); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + if(HDclose(in_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_SUCCESS); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + /* Close unused write end for in_pdf */ + if(HDclose(in_pdf[1]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid1 = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + if((fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + + /* open "dataset", keep it open */ + if((did1 = H5Dopen2(fid1, "dataset", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if((did2 = H5Dopen2(fid2, "dataset", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if((did3 = H5Dopen2(fid1, "dataset", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Write to "dataset" */ + wdata = 88; + if(H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wdata) < 0) + FAIL_STACK_ERROR; + + /* Flush to disk */ + if(H5Fflush(fid1, H5F_SCOPE_LOCAL) < 0) + FAIL_STACK_ERROR; + + /* Enable SWMR writing mode */ + if(H5Fstart_swmr_write(fid1) < 0) + TEST_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Wait for notification from child process */ + while(notify != 2) { + if(HDread(in_pdf[0], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + } + + /* Write to "dataset" */ + wdata = 99; + if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wdata) < 0) + FAIL_STACK_ERROR; + + /* Flush to disk */ + if(H5Fflush(fid1, H5F_SCOPE_LOCAL) < 0) + FAIL_STACK_ERROR; + + /* Notify child process */ + notify = 3; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the dataset */ + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR + if(H5Dclose(did3) < 0) + FAIL_STACK_ERROR + + /* Close the pipes */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + if(HDclose(in_pdf[0]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if((tmppid = HDwaitpid(childpid, &child_status, child_wait_option)) < 0) + FAIL_STACK_ERROR + + /* Check exit status of child process */ + if(WIFEXITED(child_status)) { + if((child_exit_val = WEXITSTATUS(child_status)) != 0) + TEST_ERROR + } else /* Child process terminated abnormally */ + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid1) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + + /* + * Case (3): + * Verify concurrent file open with H5F_ACC_RDONLY + * will fail with H5Fstart_swmr_write() + */ + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Should fail in opening the test file */ + H5E_BEGIN_TRY { + fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl); + } H5E_END_TRY; + if(fid >= 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_SUCCESS); + } /* end if */ + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + + /* Enable SWMR writing mode */ + if(H5Fstart_swmr_write(fid) < 0) + TEST_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if((tmppid = HDwaitpid(childpid, &child_status, child_wait_option)) < 0) + FAIL_STACK_ERROR + + /* Check exit status of child process */ + if(WIFEXITED(child_status)) { + if((child_exit_val = WEXITSTATUS(child_status)) != 0) + TEST_ERROR + } else /* Child process terminated abnormally */ + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case (4): + * Verify concurrent file open with H5F_ACC_RDWR + * will fail with H5Fstart_swmr_write() + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Should fail in opening the test file */ + H5E_BEGIN_TRY { + fid = H5Fopen(filename, H5F_ACC_RDWR, fapl); + } H5E_END_TRY; + if(fid >= 0) + HDexit(EXIT_FAILURE); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_SUCCESS); + } /* end if */ + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + + /* Enable SWMR writing mode */ + if(H5Fstart_swmr_write(fid) < 0) + TEST_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR + + /* Wait for child process to complete */ + if((tmppid = HDwaitpid(childpid, &child_status, child_wait_option)) < 0) + FAIL_STACK_ERROR + + /* Check exit status of child process */ + if(WIFEXITED(child_status)) { + if((child_exit_val = WEXITSTATUS(child_status)) != 0) + TEST_ERROR + } else /* Child process terminated abnormally */ + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case (5): + * Verify concurrent file open with H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE + * will fail with H5Fstart_swmr_write() + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Should fail in opening the test file */ + H5E_BEGIN_TRY { + fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl); + } H5E_END_TRY; + if(fid >= 0) + HDexit(EXIT_FAILURE); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_SUCCESS); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + + /* Enable SWMR writing mode */ + if(H5Fstart_swmr_write(fid) < 0) + TEST_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if((tmppid = HDwaitpid(childpid, &child_status, child_wait_option)) < 0) + FAIL_STACK_ERROR + + /* Check exit status of child process */ + if(WIFEXITED(child_status)) { + if((child_exit_val = WEXITSTATUS(child_status)) != 0) + TEST_ERROR + } else /* Child process terminated abnormally */ + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close the property list */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Sclose(sid); + H5Pclose(dcpl); + H5Pclose(fapl); + H5Fclose(fid); + } H5E_END_TRY; + + return -1; + +} /* test_start_swmr_write_concur() */ +#endif /* !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)) */ + +/* + * test_start_swmr_write_stress_ohdr(): + * + * Verify that H5Fswmr_start_write() works correctly when the dataspace header + * message is not located in chunk #0 of the object header. + * + */ +static int +test_start_swmr_write_stress_ohdr(hid_t in_fapl) +{ + hid_t fid = -1; /* File IDs */ + hid_t fapl; /* File access property list */ + char filename[NAME_BUF_SIZE]; /* File name */ + hid_t did = -1, did2 = -1; /* Dataset IDs */ + hid_t sid = -1; /* Dataspace ID */ + hid_t tid = -1; /* Datatype ID */ + hid_t dcpl = -1; /* Dataset creation property list ID */ + hid_t aid = -1; /* Attribute ID */ + hsize_t chunk_dims[2] = {10, 10}; + hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; + char fill[256]; /* Fill value for dataset */ + char attr_data[32]; /* Data value for attribute */ + hsize_t dims[2] = {1,1}; + unsigned chunk_num; /* Object header chunk # for dataspace message */ + + /* Output message about test being performed */ + TESTING("H5Fstart_swmr_write()--stress object header messages"); + + /* Initialize buffers */ + HDmemset(fill, 0, sizeof(fill)); + HDmemset(attr_data, 0, sizeof(attr_data)); + + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + /* Set to use the latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Create the test file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Create a chunked dataset with 2 extendible dimensions */ + if((sid = H5Screate_simple(1, dims, maxdims)) < 0) + FAIL_STACK_ERROR; + if((tid = H5Tcopy(H5T_C_S1)) < 0) + FAIL_STACK_ERROR; + if(H5Tset_size(tid, 256) < 0) + FAIL_STACK_ERROR; + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + if(H5Pset_chunk(dcpl, 1, chunk_dims) < 0) + FAIL_STACK_ERROR; + if(H5Pset_fill_value(dcpl, tid, &fill) < 0) + FAIL_STACK_ERROR; + if((did = H5Dcreate2(fid, "dataset", tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Retrieve the chunk # for the dataspace message */ + chunk_num = UINT_MAX; + if(H5O_msg_get_chunkno_test(did, H5O_SDSPACE_ID, &chunk_num) < 0) + FAIL_STACK_ERROR; + /* Should be in chunk #0 for now */ + if(0 != chunk_num) + TEST_ERROR; + + /* Create a second chunked dataset with 2 extendible dimensions */ + /* (So that the original dataset's object header can't be extended) */ + if((sid = H5Screate_simple(1, dims, maxdims)) < 0) + FAIL_STACK_ERROR; + if((tid = H5Tcopy(H5T_C_S1)) < 0) + FAIL_STACK_ERROR; + if(H5Tset_size(tid, 256) < 0) + FAIL_STACK_ERROR; + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + if(H5Pset_chunk(dcpl, 1, chunk_dims) < 0) + FAIL_STACK_ERROR; + if(H5Pset_fill_value(dcpl, tid, &fill) < 0) + FAIL_STACK_ERROR; + if((did2 = H5Dcreate2(fid, "dataset2", tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Close the second dataset */ + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR + + /* Close the objects for the dataset creation */ + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR + if(H5Tclose(tid) < 0) + FAIL_STACK_ERROR + if(H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR + + /* Create attribute on original dataset, to push dataspace header message out of header chunk #0 */ + if((sid = H5Screate(H5S_SCALAR)) < 0) + FAIL_STACK_ERROR; + if((tid = H5Tcopy(H5T_C_S1)) < 0) + FAIL_STACK_ERROR; + if(H5Tset_size(tid, 32) < 0) + FAIL_STACK_ERROR; + if(H5Tset_strpad(tid, H5T_STR_NULLTERM) < 0) + FAIL_STACK_ERROR; + if((aid = H5Acreate2(did, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if(H5Awrite(aid, tid, attr_data) < 0) + FAIL_STACK_ERROR; + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR + if(H5Tclose(tid) < 0) + FAIL_STACK_ERROR + if(H5Aclose(aid) < 0) + FAIL_STACK_ERROR + + /* Retrieve the chunk # for the dataspace message */ + chunk_num = UINT_MAX; + if(H5O_msg_get_chunkno_test(did, H5O_SDSPACE_ID, &chunk_num) < 0) + FAIL_STACK_ERROR; + /* Should be in chunk #0 for now */ + if(1 != chunk_num) + TEST_ERROR; + + /* Enable SWMR write */ + if(H5Fstart_swmr_write(fid) < 0) + FAIL_STACK_ERROR; + + /* Close the dataset */ + if(H5Dclose(did) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close the FAPL */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Sclose(aid); + H5Sclose(tid); + H5Sclose(sid); + H5Sclose(did); + H5Sclose(did2); + H5Pclose(dcpl); + H5Pclose(fapl); + H5Fclose(fid); + } H5E_END_TRY; + + return -1; +} /* test_start_swmr_write_stress_ohdr() */ + +/* + * Tests for H5Pset/get_object_flush_cb() + */ + +/* The callback function for object flush property */ +static herr_t +flush_cb(hid_t H5_ATTR_UNUSED obj_id, void *_udata) +{ + unsigned *flush_ct = (unsigned*)_udata; + ++(*flush_ct); + return 0; +} + +/* + * test_object_flush_cb() + * + * Verify the public routines H5Pget/set_object_flush_cb() work as specified: + * 1) To verify the failure condition in setting object flush property + * 2) To verify the object flush property values retrieved from a default + * file access property list. + * 3) To verify the object flush property values retrieved from a non-default + * file access property list. + * 4) To verify the object flush property values retrieved from a default + * file access property list of a file + * 5) To verify the object flush property values retrieved from a non-default + * file access property list of a file + * To verify the object flush callback is invoked when doing H5Oflush(), + * H5Dflush(), H5Gflush() and H5Tflush(). + */ +static int +test_object_flush_cb(hid_t in_fapl) +{ + hid_t fapl = -1; /* A copy of file access property list */ + hid_t ffapl = -1; /* A file's file access property list */ + hid_t fid = -1; /* File ID */ + hid_t gid = -1; /* Group ID */ + hid_t did1 = -1, did2 = -1; /* Dataset IDs */ + hid_t sid = -1; /* Dataspace ID */ + hsize_t dims[2] = {5, 10}; /* Dataset dimension sizes */ + int buf[50]; /* Data buffer */ + H5F_flush_cb_t ret_cb; /* The callback function set in object flush property */ + void *ret_ct; /* The user data set in object flush property */ + unsigned flush_ct = 0; /* The user data for object flush property */ + char filename[NAME_BUF_SIZE]; /* File name */ + int i; /* Local index variable */ + herr_t ret; /* Generic return value */ + + TESTING("H5Pget/set_obj_flush_cb()"); + + /* + * Case (1) + * To verify the failure condition in setting object flush property + */ + /* Should fail if the callback function is not defined but user data is defined */ + H5E_BEGIN_TRY { + ret = H5Pset_object_flush_cb(fapl, NULL, &flush_ct); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* + * Case (2) + * To verify the object flush property values retrieved from a + * default file access property list. + */ + + /* Create a copy of file access property list */ + if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) + FAIL_STACK_ERROR + + /* Retrieve object flush property values for the default file access property list */ + if(H5Pget_object_flush_cb(fapl, &ret_cb, &ret_ct) < 0) + TEST_ERROR + /* Should be null */ + if(ret_cb != NULL || ret_ct != NULL) + TEST_ERROR + + /* + * Case (3) + * To verify the object flush property values retrieved from a + * non-default file access property list. + */ + /* Set the object flush property */ + if(H5Pset_object_flush_cb(fapl, flush_cb, &flush_ct) < 0) + TEST_ERROR + + /* Increment the counter */ + ++flush_ct; + + /* Retrieve object flush property values for the non-default file access property list */ + if(H5Pget_object_flush_cb(fapl, &ret_cb, &ret_ct) < 0) + TEST_ERROR + + /* Verify expected values */ + if(ret_cb != flush_cb || *(unsigned *)ret_ct != 1) + TEST_ERROR + + /* Close the property list */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR; + + /* + * Case (4) + * To verify the object flush property values retrieved from a + * default file access property list of a file + */ + + /* Reset values */ + flush_ct = 0; + ret_cb = NULL; + ret_ct = NULL; + + /* Make a copy of the input parameter in_fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use the latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + /* Create the test file: without setting object flush property in fapl */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Get the file's file access property list */ + if((ffapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR; + + /* Retrieve the object flush property values */ + if(H5Pget_object_flush_cb(ffapl, &ret_cb, &ret_ct) < 0) + TEST_ERROR + + /* Verify expected values */ + if(ret_cb != NULL || ret_ct != NULL) + TEST_ERROR + + /* Closing */ + if(H5Pclose(ffapl) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + /* + * Cases (5) + * To verify the object flush property values retrieved from a non-default + * file access property list of a file. + * To verify the object flush callback is invoked when doing H5Oflush(), + * H5Dflush(), H5Gflush() and H5Tflush(). + */ + /* Reset values */ + flush_ct = 0; + ret_cb = NULL; + ret_ct = NULL; + + /* Set the object flush property */ + if(H5Pset_object_flush_cb(fapl, flush_cb, &flush_ct) < 0) + FAIL_STACK_ERROR + + /* Open the test file: with object flush property setting in fapl */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR; + + /* Create a dataset */ + if((sid = H5Screate_simple(2, dims, dims)) < 0) + FAIL_STACK_ERROR; + + /* Create a dataset */ + if((did1 = H5Dcreate2(fid, "dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Initialize data buffer */ + for(i = 0; i < 50; i++) + buf[i] = i + 1; + + /* Write to the dataset */ + if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) + FAIL_STACK_ERROR; + + /* Flush the dataset object */ + if(H5Oflush(did1) < 0) + FAIL_STACK_ERROR; + + /* Get the file's file access property list */ + if((ffapl = H5Fget_access_plist(fid)) < 0) + FAIL_STACK_ERROR; + + /* Retrieve the object flush property values */ + if(H5Pget_object_flush_cb(ffapl, &ret_cb, &ret_ct) < 0) + TEST_ERROR + + /* Verify expected values */ + if(ret_cb != flush_cb || *(unsigned *)ret_ct != 1) + TEST_ERROR + + /* Create a group */ + if((gid = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Flush the group */ + if(H5Gflush(gid) < 0) + TEST_ERROR + + /* Retrieve the object flush property values */ + if(H5Pget_object_flush_cb(ffapl, &ret_cb, &ret_ct) < 0) + TEST_ERROR + + /* Verify expected values */ + if(ret_cb != flush_cb || *(unsigned *)ret_ct != 2) + TEST_ERROR + + /* Create a dataset */ + if((did2 = H5Dcreate2(gid, "dataset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Flush the dataset */ + if(H5Dflush(did2) < 0) + FAIL_STACK_ERROR; + + /* Retrieve the object flush property values */ + if(H5Pget_object_flush_cb(ffapl, &ret_cb, &ret_ct) < 0) + TEST_ERROR + + /* Verify expected values */ + if(ret_cb != flush_cb || *(unsigned *)ret_ct != 3) + TEST_ERROR + + /* Closing */ + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR; + if(H5Gclose(gid) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(ffapl) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(fapl); + H5Pclose(ffapl); + H5Sclose(sid); + H5Dclose(did1); + H5Dclose(did2); + H5Gclose(gid); + H5Fclose(fid); + } H5E_END_TRY; + + return -1; +} /* test_object_flush_cb() */ + +/* + * Tests for H5Pset/get_append_flush() + */ + + +/* The callback function for append flush property */ +static herr_t +append_cb(hid_t H5_ATTR_UNUSED dset_id, hsize_t H5_ATTR_UNUSED *cur_dims, void *_udata) +{ + unsigned *count = (unsigned *)_udata; + ++(*count++); + return 0; +} /* append_cb() */ + + +/* The callback function for append flush property */ +static herr_t +append_cb2(hid_t H5_ATTR_UNUSED dset_id, hsize_t H5_ATTR_UNUSED *cur_dims, void *_udata) +{ + unsigned *count = (unsigned *)_udata; + ++(*count++); + return 0; +} /* append_cb2() */ + + + +/* + * test_append_flush_generic() + * + * Verify H5Pget/set_append_flush() work as specified for a generic dataset + * access property list: + * 1) To verify the append flush property values retrieved from a default + * access property list. + * -- zero boundary, null callback function, null user data + * 2) To verify the failure conditions in setting append flush property: + * -- an invalid dataset rank: <= 0, > H5S_MAX_RANK + * -- undefined callback but defined user data + * -- no boundary specified + * -- invalid boundary size: H5S_UNLIMITED, negative value + * 3) To verify the append flush property values retrieved from a non-default + * access property list. + * -- the set callback function, the set user data + * -- the # of boundary sizes retrieved does not exceed MIN(input ndims, the ndims set) + */ +static int +test_append_flush_generic(void) +{ + hid_t dapl = -1; /* A copy of dataset access property */ + hsize_t boundary[3]; /* The boundary for append flush property */ + unsigned count = 0; /* The user data for append flush property */ + hsize_t ret_boundary[3]; /* The boundary set in append flush property */ + H5D_append_cb_t ret_cb; /* The callback function set in append flush property */ + unsigned *ret_count; /* The user data set in append flush property */ + herr_t ret; /* The return value */ + + TESTING("H5Fget/set_append_flush() for a generic dataset access property list"); + + + /* + * Case (1) + * To verify the retrieved append flush property values: + * -- zero boundary, null callback function, null user data + */ + + /* Create a copy of dataset access property list */ + if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the append flush property values */ + if(H5Pget_append_flush(dapl, 2, ret_boundary, &ret_cb, (void **)&ret_count) < 0) + FAIL_STACK_ERROR + + /* Verify expected values */ + if(ret_boundary[0] != 0 || ret_boundary[1] != 0) + TEST_ERROR; + if(ret_cb != NULL || ret_count != NULL) + TEST_ERROR + + /* Close the property list */ + if(H5Pclose(dapl) < 0) + FAIL_STACK_ERROR; + + /* + * Case (2) + * To verify the failure conditions in setting append flush property: + * -- an invalid dataset rank: <= 0, > H5S_MAX_RANK + * -- no boundary specified + * -- undefined callback but defined user data + * -- invalid boundary size: H5S_UNLIMITED, negative value + */ + + /* Create a copy of dataset access property list */ + if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0) + FAIL_STACK_ERROR + + /* Invalid dataset rank: zero value */ + H5E_BEGIN_TRY { + ret = H5Pset_append_flush(dapl, 0, NULL, NULL, &count); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Invalid dataset rank: > H5S_MAX_RANK */ + H5E_BEGIN_TRY { + ret = H5Pset_append_flush(dapl, H5S_MAX_RANK+1, NULL, NULL, &count); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* No boundary specified */ + H5E_BEGIN_TRY { + ret = H5Pset_append_flush(dapl, 2, NULL, NULL, &count); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Set up a valid boundary */ + boundary[0] = 1; + boundary[1] = 1; + + /* Undefined callback function but defined user data */ + H5E_BEGIN_TRY { + ret = H5Pset_append_flush(dapl, 2, boundary, NULL, &count); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Invalid boundary size: negative value */ + boundary[0] = (hsize_t)-1; + boundary[1] = 1; + H5E_BEGIN_TRY { + ret = H5Pset_append_flush(dapl, 2, boundary, append_cb, &count); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* Invalid boundary size: H5S_UNLIMITED */ + boundary[0] = 1; + boundary[1] = H5S_UNLIMITED; + H5E_BEGIN_TRY { + ret = H5Pset_append_flush(dapl, 2, boundary, append_cb, &count); + } H5E_END_TRY; + if(ret >= 0) + TEST_ERROR + + /* + * Case (3) + * To verify the append flush property values retrieved from a non-default + * access property list: + * -- the set callback function, the set user data + * -- the # of boundary sizes retrieved does not exceed MIN(input ndims, the ndims set) + */ + boundary[0] = boundary[1] = 1; + boundary[2] = 0; + count = 1; + if(H5Pset_append_flush(dapl, 2, boundary, append_cb, &count) < 0) + FAIL_STACK_ERROR; + ++count; + + /* Verify expected values: with boundary rank > set boundary rank */ + if(H5Pget_append_flush(dapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0) + TEST_ERROR + if(ret_boundary[0] != 1 || ret_boundary[1] != 1 || boundary[2] != 0) + TEST_ERROR; + if(ret_cb == NULL || ret_count == NULL || *ret_count != 2) + TEST_ERROR + + /* Verify expected values: with boundary rank < set boundary rank */ + HDmemset(ret_boundary, 0, sizeof(ret_boundary)); + if(H5Pget_append_flush(dapl, 1, ret_boundary, NULL, NULL) < 0) + TEST_ERROR + if(ret_boundary[0] != 1 || ret_boundary[1] != 0 || boundary[2] != 0) + TEST_ERROR; + + /* Closing */ + if(H5Pclose(dapl) < 0) + FAIL_STACK_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(dapl); + } H5E_END_TRY; + + return -1; +} /* test_append_flush_generic() */ + +/* + * test_append_flush_dataset_chunked() + * + * Verify H5Pget/set_append_flush() work as specified for a chunked dataset's + * access property list: + * 1) To verify the append flush property values retrieved from a default + * access property list: + * -- zero boundary, null callback function, null user data + * 2) To verify failure in creating dataset when: + * -- the rank set in append flush property is not the same as the dataset's rank + * -- boundary (non-zero) is set for a non-extendible dimension + * 3) To verify the append flush property values retrieved from a non-default + * access property list: + * -- the set callback function, the set user data + * -- the # of boundary sizes retrieved does not exceed MIN(input ndims, the ndims set) + */ +static int +test_append_flush_dataset_chunked(hid_t in_fapl) +{ + hid_t fid = -1; /* file ID */ + hid_t fapl = -1; /* A copy of file access property */ + hid_t did1 = -1, did2 = -1; /* The datset ID */ + hid_t sid = -1; /* The dataspace ID */ + hid_t dcpl = -1; /* A copy of dataset creation property */ + hid_t dapl = -1; /* A copy of dataset access property */ + hid_t ddapl = -1; /* The dataset access property of the opened dataset */ + + hsize_t boundary[3]; /* Boundary size */ + unsigned count = 0; /* User data */ + + hsize_t ret_boundary[3]; /* Boundary size set in the append flush property */ + H5D_append_cb_t ret_cb; /* The callback function set in the append flush property */ + unsigned *ret_count; /* The user data set in the append flush property */ + + char filename[NAME_BUF_SIZE]; /* file name */ + + hsize_t dims[2] = {100, 0}; /* The dataset dimension sizes */ + hsize_t maxdims[2] = {100, H5S_UNLIMITED}; /* The dataset maximum dimension sizes */ + hsize_t chunk_dims[2] = {5,2}; /* The chunk dimesion sizes */ + + TESTING("H5Fget/set_append_flush() for a chunked dataset's access property list"); + + /* + * Case (1)-- + * For a chunked dataset's access property list: + * --to verify the append flush property values retrieved from a default access + * a default access property list is: + * zero rank, zero boundary, null callback function, null user data + */ + + /* Get a copy of the input parameter in_fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use the latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + /* Create the test file to work on */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Create a chunked dataset with 1 extendible dimension */ + if((sid = H5Screate_simple(2, dims, maxdims)) < 0) + FAIL_STACK_ERROR; + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0) + FAIL_STACK_ERROR; + if((did1 = H5Dcreate2(fid, "dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + TEST_ERROR + + /* Get the dataset's access property list */ + if((ddapl = H5Dget_access_plist(did1)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the append flush property values */ + if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0) + TEST_ERROR + + /* Verify expected values */ + if(ret_boundary[0] != 0 || ret_boundary[1] != 0 || ret_boundary[2] != 0) + TEST_ERROR; + if(ret_cb != NULL || ret_count != NULL) + TEST_ERROR + + /* Close the dataset's access property list */ + if(H5Pclose(ddapl) < 0) + FAIL_STACK_ERROR; + + /* + * Case (2)-- + * For a chunked dataset's access property list: + * --to verify failure in creating the dataset when: + * --the rank set in append flush property is not the same as the dataset's rank + * -- boundary (non-zero) is set for a non-extendible dimension + * --to verify failure in opening the dataset + * -- boundary (non-zero) is set for a non-extendible dimension + */ + /* Create a copy of dataset access property list */ + if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0) + FAIL_STACK_ERROR + + /* Set boundary dimension rank > the rank of dataset to be created */ + HDmemset(boundary, 0, sizeof(boundary)); + if(H5Pset_append_flush(dapl, 3, boundary, NULL, NULL) < 0) + FAIL_STACK_ERROR + + /* Should fail to Create the dataset */ + H5E_BEGIN_TRY { + did2 = H5Dcreate2(fid, "dataset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl); + } H5E_END_TRY; + if(did2 >= 0) + TEST_ERROR + + /* Set boundary for a non-extendible dimension */ + boundary[0] = boundary[1] = 1; + if(H5Pset_append_flush(dapl, 2, boundary, NULL, NULL) < 0) + FAIL_STACK_ERROR + + /* Should fail to create the dataset */ + H5E_BEGIN_TRY { + did2 = H5Dcreate2(fid, "dataset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl); + } H5E_END_TRY; + if(did2 >= 0) + TEST_ERROR + + /* Create and close the dataset */ + if((did2 = H5Dcreate2(fid, "dataset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR; + + /* Should fail to open the dataset */ + H5E_BEGIN_TRY { + did2 = H5Dopen2(fid, "dataset2", dapl); + } H5E_END_TRY; + if(did2 >= 0) + TEST_ERROR + + /* + * Case (3)-- + * For a chunked dataset's access property list: + * --To verify the append flush property values retrieved from a non-default + * access property list: + * -- the set callback function, the set user data + * -- the # of boundary sizes retrieved does not exceed MIN(input ndims, the ndims set) + */ + + boundary[0] = 0; + boundary[1] = 1; + if(H5Pset_append_flush(dapl, 2, boundary, append_cb, &count) < 0) + FAIL_STACK_ERROR + if((did2 = H5Dopen2(fid, "dataset2", dapl)) < 0) + FAIL_STACK_ERROR + + /* Get the dataset's access property list */ + if((ddapl = H5Dget_access_plist(did2)) < 0) + FAIL_STACK_ERROR + + HDmemset(ret_boundary, 0, sizeof(ret_boundary)); + ret_cb = NULL; + ret_count = NULL; + /* Retrieve the append flush property values */ + if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0) + TEST_ERROR + + /* Verify expected values */ + if(ret_cb != append_cb || ret_count != &count) + TEST_ERROR + if(ret_boundary[0] != 0 || ret_boundary[1] != 1 || ret_boundary[2] != 0) + TEST_ERROR + + HDmemset(ret_boundary, 0, sizeof(ret_boundary)); + /* Retrieve the append flush property values */ + if(H5Pget_append_flush(ddapl, 1, ret_boundary, NULL, NULL) < 0) + TEST_ERROR + if(ret_boundary[0] != 0 || ret_boundary[1] != 0 || ret_boundary[2] != 0) + TEST_ERROR + + /* Closing */ + if(H5Pclose(ddapl) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(dapl) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR; + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(dcpl); + H5Pclose(dapl); + H5Pclose(ddapl); + H5Dclose(did1); + H5Dclose(did2); + H5Pclose(fapl); + H5Sclose(sid); + H5Fclose(fid); + } H5E_END_TRY; + + return -1; +} /* test_append_flush_dataset_chunked() */ + +/* + * test_append_flush_dataset_fixed(): + * + * Verify H5Pget/set_append_flush() work as specified for a + * non-chunked (fixed size) dataset's access property list: + * (1) To verify success in creating the dataset--whatever is set for the append flush property setting + * (2) To verify that default append flush property values are retrieved for both + * default or non-default access property list: + * -- zero boundary, null callback function, null user data + */ +static int +test_append_flush_dataset_fixed(hid_t in_fapl) +{ + hid_t fid = -1; /* file ID */ + hid_t fapl = -1; /* A copy of file access property */ + hid_t did1 = -1, did2 = -1; /* The datset ID */ + hid_t sid = -1; /* The dataspace ID */ + hid_t dapl = -1; /* A copy of dataset access property */ + hid_t ddapl = -1; /* The dataset access property of the opened dataset */ + + hsize_t boundary[3]; /* Boundary size */ + unsigned count = 0; /* User data */ + + hsize_t ret_boundary[3]; /* Boundary size set in the append flush property */ + H5D_append_cb_t ret_cb; /* The callback function set in the append flush property */ + unsigned *ret_count; /* The user data set in the append flush property */ + + char filename[NAME_BUF_SIZE]; /* file name */ + + hsize_t dims[1] = {100}; + + TESTING("H5Fget/set_append_flush() for a non-chunked dataset's access property list"); + + /* + * Case (1)-- + * For a non-chunked dataset's access property list: + * --to verify the append flush property values retrieved from + * a default access property list is: + * zero boundary, null callback function, null user data + */ + + /* Get a copy of the input parameter in_fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use the latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + /* Create the test file to work on */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Create a dataset */ + if((sid = H5Screate_simple(1, dims, dims)) < 0) + FAIL_STACK_ERROR; + if((did1 = H5Dcreate2(fid, "dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + TEST_ERROR + + /* Get the dataset's access property list */ + if((ddapl = H5Dget_access_plist(did1)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the append flush property values */ + if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0) + TEST_ERROR + + /* Verify expected values */ + if(ret_boundary[0] != 0 || ret_boundary[1] != 0 || ret_boundary[2] != 0) + TEST_ERROR; + if(ret_cb != NULL || ret_count != NULL) + TEST_ERROR + + /* Close the dataset's access property list */ + if(H5Pclose(ddapl) < 0) + FAIL_STACK_ERROR; + + /* + * Case (2)-- + * For a non-chunked dataset's access property list: + * --to verify success in creating and opening the dataset even when append flush property + * is setup with error conditions: + * --the rank set in append flush property is not the same as the dataset's rank + * --boundary is set + * --to verify the append flush property values are: + * zero boundary, null callback function, null user data + */ + /* Create a copy of dataset access property list */ + if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0) + FAIL_STACK_ERROR + + boundary[0] = 1; + boundary[1] = boundary[2] = 0; + if(H5Pset_append_flush(dapl, 3, boundary, append_cb, &count) < 0) + FAIL_STACK_ERROR + + /* Should succeed to create the dataset: append flush property has no effect */ + if((did2 = H5Dcreate2(fid, "dataset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, dapl)) < 0) + TEST_ERROR + + /* Get the dataset's access property list */ + if((ddapl = H5Dget_access_plist(did2)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the append flush property values */ + if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0) + TEST_ERROR + + /* Verify expected values */ + if(ret_cb != NULL || ret_count != NULL) + TEST_ERROR + if(ret_boundary[0] != 0 || ret_boundary[1] != 0 || ret_boundary[2] != 0) + TEST_ERROR + + /* Closing */ + if(H5Pclose(ddapl) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR; + + /* Should succeed in opening the dataset: append flush property has no effect */ + if((did2 = H5Dopen2(fid, "dataset2", dapl)) < 0) + TEST_ERROR + + /* Get the dataset's access property list */ + if((ddapl = H5Dget_access_plist(did2)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the append flush property values */ + if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0) + TEST_ERROR + + /* Verify expected values */ + if(ret_cb != NULL || ret_count != NULL) + TEST_ERROR + if(ret_boundary[0] != 0 || ret_boundary[1] != 0 || ret_boundary[2] != 0) + TEST_ERROR + + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR + /* + * Case (3)-- + * For a non-chunked dataset's access property list: + * --To verify the append flush property values retrieved from a non-default + * access property list: + * zero boundary, null callback function, null user data + */ + + HDmemset(boundary, 0, sizeof(boundary)); + if(H5Pset_append_flush(dapl, 1, boundary, append_cb, &count) < 0) + FAIL_STACK_ERROR + if((did2 = H5Dopen2(fid, "dataset2", dapl)) < 0) + FAIL_STACK_ERROR + + /* Get the dataset's access property list */ + if((ddapl = H5Dget_access_plist(did2)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the append flush property values */ + if(H5Pget_append_flush(ddapl, 1, ret_boundary, &ret_cb, (void **)&ret_count) < 0) + TEST_ERROR + + /* Verify expected values */ + if(ret_cb != NULL || ret_count != NULL) + TEST_ERROR + if(ret_boundary[0] != 0 || ret_boundary[1] != 0 || ret_boundary[2] != 0) + TEST_ERROR + + /* Closing */ + if(H5Pclose(ddapl) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(dapl) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR; + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(dapl); + H5Pclose(ddapl); + H5Dclose(did1); + H5Dclose(did2); + H5Pclose(fapl); + H5Sclose(sid); + H5Fclose(fid); + } H5E_END_TRY; + + return -1; +} /* test_append_flush_dataset_fixed() */ + +/* + * test_append_flush_multiple() + * + * Verify H5Pget/set_append_flush() work as specified for multiple opens + * of a dataset: + * (1) did1 = H5Dcreate(...dapl1...) + * did2 = H5Dopen2(...dapl2) + * H5Pget_append_flush(did1...) + * H5Pget_append_flush(did2...) + * -- should return append flush property values set in dapl1 + * (2) H5Dcreate(...H5P_DEFAULT...) + * H5Dclose() + * did1 = H5Dopen2(...dapl1) + * did2 = H5Dopen2(..dapl2) + * H5Pget_append_flush(did1, ...) + * H5Pget_append_flush(did2, ...) + * -- should return append flush property values set in dapl1 + * NOTE: + * FOR NOW: return the append flush property values of the create or the very first open + * LATER ON: should REJECT subsequent dataset open if append flush property values differ + */ +static int +test_append_flush_dataset_multiple(hid_t in_fapl) +{ + hid_t fid = -1; /* file ID */ + hid_t fapl = -1; /* A copy of file access property */ + hid_t did1 = -1, did2 = -1; /* The datset ID */ + hid_t sid = -1; /* The dataspace ID */ + hid_t dcpl = -1; /* A copy of dataset creation property */ + hid_t dapl1 = -1; /* A copy of dataset access property */ + hid_t dapl2 = -1; /* A copy of dataset access property */ + hid_t ddapl = -1; /* The dataset access property of the opened dataset */ + + hsize_t boundary1[3]; /* Boundary size */ + hsize_t boundary2[3]; /* Boundary size */ + unsigned count1 = 0; /* User data */ + unsigned count2 = 0; /* User data */ + + hsize_t ret_boundary[3]; /* Boundary size set in the append flush property */ + H5D_append_cb_t ret_cb; /* The callback function set in the append flush property */ + unsigned *ret_count; /* The user data set in the append flush property */ + + char filename[NAME_BUF_SIZE]; /* file name */ + + hsize_t dims[2] = {0, 0}; /* The dataset dimension sizes */ + hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* The dataset maximum dimension sizes */ + hsize_t chunk_dims[2] = {5,2}; /* The chunk dimesion sizes */ + + TESTING("H5Fget/set_append_flush() for multiple opens of a chunked dataset"); + + /* + * Case (1) + * For a chunked dataset's access property list: + * did1 = H5Dcreate(...dapl1...) + * did2 = H5Dopen2(...dapl2) + * H5Pget_append_flush(did1...) + * H5Pget_append_flush(did2...) + * -- should return append flush property values set in dapl1 + */ + + /* Create a copy of dataset access property list */ + if((dapl1 = H5Pcreate(H5P_DATASET_ACCESS)) < 0) + FAIL_STACK_ERROR + if((dapl2 = H5Pcreate(H5P_DATASET_ACCESS)) < 0) + FAIL_STACK_ERROR + + boundary1[0] = 0; + boundary1[1] = 1; + count1 = 0; + if(H5Pset_append_flush(dapl1, 2, boundary1, append_cb, &count1) < 0) + FAIL_STACK_ERROR + boundary2[0] = 1; + boundary2[1] = 0; + count2 = 0; + if(H5Pset_append_flush(dapl2, 2, boundary2, append_cb2, &count2) < 0) + FAIL_STACK_ERROR + + /* Get a copy of the input parameter in_fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use the latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + /* Create the test file to work on */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Create a chunked dataset with 2 extendible dimensions */ + if((sid = H5Screate_simple(2, dims, maxdims)) < 0) + FAIL_STACK_ERROR; + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0) + FAIL_STACK_ERROR; + if((did1 = H5Dcreate2(fid, "dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl1)) < 0) + FAIL_STACK_ERROR; + + /* Open the dataset */ + if((did2 = H5Dopen2(fid, "dataset1", dapl2)) < 0) + FAIL_STACK_ERROR; + + /* Get the dataset's access property list for did1 */ + if((ddapl = H5Dget_access_plist(did1)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the append flush property values */ + if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0) + TEST_ERROR + + /* Verify expected values: should be the setting in dapl1 */ + if(ret_boundary[0] != 0 || ret_boundary[1] != 1 || ret_boundary[2] != 0) + TEST_ERROR; + if(ret_cb != append_cb || ret_count != &count1) + TEST_ERROR + + /* Close the dataset's access property list */ + if(H5Pclose(ddapl) < 0) + FAIL_STACK_ERROR; + + /* Get the dataset's access property list for did2 */ + if((ddapl = H5Dget_access_plist(did2)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the append flush property values */ + if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0) + TEST_ERROR + + /* Verify expected values: should be the setting in dapl1 */ + if(ret_boundary[0] != 0 || ret_boundary[1] != 1 || ret_boundary[2] != 0) + TEST_ERROR; + if(ret_cb != append_cb || ret_count != &count1) + TEST_ERROR + + /* Close the dataset's access property list */ + if(H5Pclose(ddapl) < 0) + FAIL_STACK_ERROR; + H5Dclose(did1); + H5Dclose(did2); + + /* + * Case (2) + * For a chunked dataset's access property list: + * H5Dcreate(...H5P_DEFAULT...) + * H5Dclose() + * did1 = H5Dopen2(...dapl1) + * did2 = H5Dopen2(..dapl2) + * H5Pget_append_flush(did1, ...) + * H5Pget_append_flush(did2, ...) + * -- should return append flush property values set in dapl1 + */ + if((did1 = H5Dcreate2(fid, "dataset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR; + + /* Open the dataset with append flush setting in dapl2 */ + if((did1 = H5Dopen2(fid, "dataset2", dapl2)) < 0) + FAIL_STACK_ERROR; + + /* Open the dataset with append flush setting in dapl1 */ + if((did2 = H5Dopen2(fid, "dataset2", dapl1)) < 0) + FAIL_STACK_ERROR; + + /* Get the dataset's access property list for did1 */ + if((ddapl = H5Dget_access_plist(did1)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the append flush property values */ + if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0) + TEST_ERROR + + /* Verify expected values: should be the setting in dapl2 */ + if(ret_boundary[0] != 1 || ret_boundary[1] != 0 || ret_boundary[2] != 0) + TEST_ERROR; + if(ret_cb != append_cb2 || ret_count != &count2) + TEST_ERROR + + /* Close the access property list */ + if(H5Pclose(ddapl) < 0) + FAIL_STACK_ERROR; + + + /* Get the dataset's access property list for did2 */ + if((ddapl = H5Dget_access_plist(did2)) < 0) + FAIL_STACK_ERROR + + /* Retrieve the append flush property values */ + if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0) + TEST_ERROR + + /* Verify expected values: should be the setting in dapl2 */ + if(ret_boundary[0] != 1 || ret_boundary[1] != 0 || ret_boundary[2] != 0) + TEST_ERROR; + if(ret_cb != append_cb2 || ret_count != &count2) + TEST_ERROR + + /* Closing */ + if(H5Pclose(ddapl) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(dapl2) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(dapl1) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR; + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(dcpl); + H5Pclose(dapl1); + H5Pclose(dapl2); + H5Pclose(ddapl); + H5Dclose(did1); + H5Dclose(did2); + H5Pclose(fapl); + H5Sclose(sid); + H5Fclose(fid); + } H5E_END_TRY; + + return -1; +} /* test_append_flush_dataset_multiple() */ + + + +/**************************************************************** +** +** test_file_lock_same(): +** With the implementation of file locking, this test checks file +** open with different combinations of flags. +** This is for single process access. +** +*****************************************************************/ +static int +test_file_lock_same(hid_t in_fapl) +{ + hid_t fid = -1, fid2 = -1; /* File IDs */ + hid_t fapl = -1; /* File access property list */ + unsigned intent; /* File access flags */ + char filename[NAME_BUF_SIZE]; /* file name */ + + /* Output message about test being performed */ + TESTING("File open with different combinations of flags--single process access"); + + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[1], fapl, filename, sizeof(filename)); + + /* + * Case 1: 1) RDWR 2) RDWR : should succeed + */ + /* Create file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Get and check file intent */ + if(H5Fget_intent(fid, &intent) < 0) + FAIL_STACK_ERROR + + if(intent != H5F_ACC_RDWR) + TEST_ERROR + + /* Open the same file with RDWR */ + if((fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + + /* Get and check the intent */ + if(H5Fget_intent(fid2, &intent) < 0) + FAIL_STACK_ERROR + if(intent != H5F_ACC_RDWR) + TEST_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + /* + * Case 2: 1) RDWR 2) RDONLY : should succeed + */ + /* Open file with RDWR */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + + /* Get and check the intent */ + if(H5Fget_intent(fid, &intent) < 0) + FAIL_STACK_ERROR + if(intent != H5F_ACC_RDWR) + TEST_ERROR + + /* Open file with RDONLY */ + if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Get and check the intent: should get intent from 1st open */ + if(H5Fget_intent(fid2, &intent) < 0) + FAIL_STACK_ERROR + if(intent != H5F_ACC_RDWR) + TEST_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + /* + * Case 3: 1) RDONLY 2) RDWR : should fail + */ + /* Open file with RDONLY */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Get and check the intent */ + if(H5Fget_intent(fid, &intent) < 0) + FAIL_STACK_ERROR + if(intent != H5F_ACC_RDONLY) + TEST_ERROR + + /* Open file with RDWR should fail */ + H5E_BEGIN_TRY { + fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl); + } H5E_END_TRY; + if(fid2 >= 0) + TEST_ERROR + + /* Close first file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 4: 1) RDONLY 2) RDONLY : should succeed + */ + /* Open file with RDONLY */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Get and check the intent */ + if(H5Fget_intent(fid, &intent) < 0) + FAIL_STACK_ERROR + if(intent != H5F_ACC_RDONLY) + TEST_ERROR + + /* Open file with RDONLY */ + if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Get and check the intent */ + if(H5Fget_intent(fid2, &intent) < 0) + FAIL_STACK_ERROR + if(intent != H5F_ACC_RDONLY) + TEST_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + /* Close the property list */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(fapl); + H5Fclose(fid); + H5Fclose(fid2); + } H5E_END_TRY; + + return -1; +} /* end test_file_lock_same() */ + +/**************************************************************** +** +** test_file_lock_swmr_same(): +** With the implementation of file locking, this test checks file +** open with different combinations of flags + SWMR flags. +** This is for single process access. +** +*****************************************************************/ +static int +test_file_lock_swmr_same(hid_t in_fapl) +{ + hid_t fid, fid2; /* File IDs */ + hid_t fapl; /* File access property list */ + char filename[NAME_BUF_SIZE]; /* file name */ + + /* Output message about test being performed */ + TESTING("File open with different combinations of flags + SWMR flags--single process access"); + + /* Get a copy of the parameter in_fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[1], fapl, filename, sizeof(filename)); + + /* Set to use latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Create a file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Cases a, b, c, d: H5Fopen failure cases + */ + + /* + * Case a: RDWR|SWRM_READ : should fail + */ + H5E_BEGIN_TRY { + fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_READ, fapl); + } H5E_END_TRY; + if(fid >= 0) + TEST_ERROR + + /* + * Case b: RDWR|SWMM_WRTE|SWMR_READ : should fail + */ + H5E_BEGIN_TRY { + fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE|H5F_ACC_SWMR_READ, fapl); + } H5E_END_TRY; + if(fid >= 0) + TEST_ERROR + + /* + * Case c: RDONLY|SWMM_WRITE : should fail + */ + H5E_BEGIN_TRY { + fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_WRITE, fapl); + } H5E_END_TRY; + if(fid >= 0) + TEST_ERROR + + /* + * Case d: RDONLY|SWMM_WRITE|SWMR_READ : should fail + */ + H5E_BEGIN_TRY { + fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_WRITE|H5F_ACC_SWMR_READ, fapl); + } H5E_END_TRY; + if(fid >= 0) + TEST_ERROR + + /* + * Cases 1 - 12: combinations of different flags for 1st and 2nd opens + */ + + /* + * Case 1: 1) RDWR 2) RDWR|SWMR_WRITE : should fail + */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + H5E_BEGIN_TRY { + fid2 = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl); + } H5E_END_TRY; + if(fid2 >= 0) + TEST_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 2: 1) RDWR 2) RDONLY|SWMR_READ : should succeed + */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) + TEST_ERROR + if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + TEST_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + /* + * Case 3: 1) RDWR|SWMR_WRITE 2)RDWR : should succeed + */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0) + FAIL_STACK_ERROR + if((fid2 = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + /* + * Case 4: 1) RDWR|SWMR_WRITE 2) RDWR|SWMR_WRITE : should succeed + */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0) + FAIL_STACK_ERROR + if((fid2 = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + /* + * Case 5: 1) RDWR|SWMR_WRITE 2) RDONLY|SWMR_READ : should succeed + */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0) + FAIL_STACK_ERROR + if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + /* + * Case 6: 1) RDWR|SWMR_WRITE 2) RDONLY : should succeed + */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0) + FAIL_STACK_ERROR + if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + /* + * Case 7: 1) RDONLY|SWMR_READ 2)RDWR : should fail + */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + H5E_BEGIN_TRY { + fid2 = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); + } H5E_END_TRY; + if(fid2 >= 0) + TEST_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 8: 1) RDONLY|SWMR_READ 2) RDWR|SWMR_WRITE : should fail + */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + H5E_BEGIN_TRY { + fid2 = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl); + } H5E_END_TRY; + if(fid2 >= 0) + TEST_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 9: 1) RDONLY|SWMR_READ 2) RDONLY|SWMR_READ : should succeed + */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + /* + * Case 10: 1) RDONLY|SWMR_READ 2) RDONLY : should succeed + */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + TEST_ERROR + if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) + TEST_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close file */ + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + /* + * Case 11: 1) RDONLY 2) RDWR|SWMR_WRITE: should fail + */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + H5E_BEGIN_TRY { + fid2 = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl); + } H5E_END_TRY; + if(fid2 >= 0) + TEST_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 12: 1) RDONLY 2) RDONLY|SWMR_READ : should fail + */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + H5E_BEGIN_TRY { + fid2 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl); + } H5E_END_TRY; + if(fid2 >=0 ) + TEST_ERROR + + /* Close file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close the property list */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(fapl); + H5Fclose(fid); + H5Fclose(fid2); + } H5E_END_TRY; + + return -1; +} /* end test_file_lock_swmr_same() */ + + +/**************************************************************** +** +** test_file_lock_concur(): +** With the implementation of file locking, this test checks file +** open with different combinations of flags. +** This is for concurrent access. +** +*****************************************************************/ +#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID) && defined(H5_HAVE_FLOCK)) + +static int +test_file_lock_concur(hid_t H5_ATTR_UNUSED in_fapl) +{ + /* Output message about test being performed */ + TESTING("File open with different combinations of flags--concurrent access"); + SKIPPED(); + HDputs(" Test skipped due to fork, waitpid, or flock not defined."); + return 0; + +} /* end test_file_lock_concur() */ + +#else + +static int +test_file_lock_concur(hid_t in_fapl) +{ + hid_t fid = -1; /* File ID */ + hid_t fapl = -1; /* File access property list */ + char filename[NAME_BUF_SIZE]; /* file name */ + pid_t childpid=0; /* Child process ID */ + int child_status; /* Status passed to waitpid */ + int child_wait_option=0; /* Options passed to waitpid */ + int out_pdf[2]; + int notify = 0; + + /* Output message about test being performed */ + TESTING("File open with different combinations of flags--concurrent access"); + + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[1], fapl, filename, sizeof(filename)); + + /* Create the test file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 1: 1) RDWR 2) RDWR : should fail + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDWR, fapl); + } H5E_END_TRY; + + /* Should fail */ + if(child_fid == FAIL) + HDexit(EXIT_SUCCESS); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 2: 1) RDWR 2) RDONLY : should fail + */ + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Opens the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl); + } H5E_END_TRY; + + /* Should fail */ + if(child_fid == FAIL) + HDexit(EXIT_SUCCESS); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Opens the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 3: 1) RDONLY 2) RDWR : should fail + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Opens the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDWR, fapl); + } H5E_END_TRY; + + /* Should fail */ + if(child_fid == FAIL) + HDexit(EXIT_SUCCESS); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } /* end if */ + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Opens the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 4: 1) RDONLY 2) RDONLY : should succeed + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Opens the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl); + } H5E_END_TRY; + + /* Should succeed */ + if(child_fid >= 0) { + /* Close the file */ + if(H5Fclose(child_fid) < 0) + HDexit(EXIT_FAILURE); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_SUCCESS); + } /* end if */ + + HDexit(EXIT_FAILURE); + } /* end if */ + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Create file */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close the property list */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(fapl); + H5Fclose(fid); + } H5E_END_TRY; + + return -1; + +} /* end test_file_lock_concur() */ + +#endif /* !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID) && defined(H5_HAVE_FLOCK)) */ + +/**************************************************************** +** +** test_file_lock_swmr_concur(): low-level file test routine. +** With the implementation of file locking, this test checks file +** open with different combinations of flags + SWMR flags. +** This is for concurrent access. +** +*****************************************************************/ +#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)) + +static int +test_file_lock_swmr_concur(hid_t H5_ATTR_UNUSED in_fapl) +{ + /* Output message about test being performed */ + TESTING("File open with different combintations of flags + SWMR flags--concurrent access"); + SKIPPED(); + HDputs(" Test skipped due to fork or waitpid not defined."); + return 0; + +} /* end test_file_lock_swmr_concur() */ + +#else + +static int +test_file_lock_swmr_concur(hid_t in_fapl) +{ + hid_t fid; /* File ID */ + hid_t fapl; /* File access property list */ + char filename[NAME_BUF_SIZE]; /* file name */ + pid_t childpid=0; /* Child process ID */ + int child_status; /* Status passed to waitpid */ + int child_wait_option=0; /* Options passed to waitpid */ + int out_pdf[2]; + int notify = 0; + + /* Output message about test being performed */ + TESTING("File open with different combintations of flags + SWMR flags--concurrent access"); + + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[2], fapl, filename, sizeof(filename)); + + /* Set to use latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Create the test file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 1: 1) RDWR 2) RDWR|SWMR_WRITE : should fail + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl); + } H5E_END_TRY; + + /* Should fail */ + if(child_fid == FAIL) + HDexit(EXIT_SUCCESS); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 2: 1) RDWR 2) RDONLY|SWMR_READ: should fail + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl); + } H5E_END_TRY; + + /* Should fail */ + if(child_fid == FAIL) + HDexit(EXIT_SUCCESS); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 3: 1) RDWR|SWMR_WRITE 2) RDWR : should fail + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); + } H5E_END_TRY; + + /* Should fail */ + if(child_fid == FAIL) + HDexit(EXIT_SUCCESS); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 4: 1) RDWR|SWMR_WRITE 2) RDWR|SWMR_WRITE : should fail + */ + + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl); + } H5E_END_TRY; + + /* Should fail */ + if(child_fid == FAIL) + HDexit(EXIT_SUCCESS); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 5: 1) RDWR|SWMR_WRITE 2) RDONLY|SWMR_READ : should succeed + */ + + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl); + } H5E_END_TRY; + + /* Should succeed */ + if(child_fid >= 0) { + if(H5Fclose(child_fid) < 0) + FAIL_STACK_ERROR + HDexit(EXIT_SUCCESS); + } + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 6: 1) RDWR|SWMR_WRITE 2) RDONLY : should fail + */ + + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT); + } H5E_END_TRY; + + /* Should fail */ + if(child_fid == FAIL) + HDexit(EXIT_SUCCESS); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 7: 1) RDONLY|SWMR_READ 2) RDWR : should fail + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); + } H5E_END_TRY; + + /* Should fail */ + if(child_fid == FAIL) + HDexit(EXIT_SUCCESS); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 8: 1) RDONLY|SWMR_READ 2) RDWR|SWMR_WRITE : should fail + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl); + } H5E_END_TRY; + + /* Should fail */ + if(child_fid == FAIL) + HDexit(EXIT_SUCCESS); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 9: 1) RDONLY|SWMR_READ 2) RDONLY|SWMR_READ : should succeed + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl); + } H5E_END_TRY; + + /* Should succeed */ + if(child_fid >= 0) { + if(H5Fclose(child_fid) < 0) + FAIL_STACK_ERROR + HDexit(EXIT_SUCCESS); + } + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 10: 1) RDONLY|SWMR_READ 2) RDONLY : should succeed + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the test file */ + if((child_fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Should succeed */ + if(child_fid >= 0) { + if(H5Fclose(child_fid) < 0) + FAIL_STACK_ERROR + HDexit(EXIT_SUCCESS); + } + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 11: 1) RDONLY 2) RDWR|SWMR_WRITE : should fail + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl); + } H5E_END_TRY; + + /* Should fail */ + if(child_fid == FAIL) + HDexit(EXIT_SUCCESS); + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* Close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* + * Case 12: 1) RDONLY 2) RDONLY|SWMR_READ : should succeed + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the test file */ + H5E_BEGIN_TRY { + child_fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl); + } H5E_END_TRY; + + /* Should succeed */ + if(child_fid >= 0) { + if(H5Fclose(child_fid) < 0) + FAIL_STACK_ERROR + HDexit(EXIT_SUCCESS); + } + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_FAILURE); + } + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + FAIL_STACK_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } else + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close the property list */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(fapl); + H5Fclose(fid); + } H5E_END_TRY; + + return -1; + +} /* end test_file_lock_swmr_concur() */ + + + +#endif /* !(defined(H5_HAVE_FORK && defined(H5_HAVE_WAITPID)) */ + +/**************************************************************** +** +** test_file_lock_swmr_concur(): low-level file test routine. +** With the implementation of file locking, this test checks file +** open with different combinations of flags + SWMR flags. +** This is for concurrent access. +** +*****************************************************************/ +static int +test_file_lock_env_var(hid_t in_fapl) +{ +#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)) + SKIPPED(); + HDputs(" Test skipped due to fork or waitpid not defined."); + return 0; +#else + hid_t fid = -1; /* File ID */ + hid_t fapl = -1; /* File access property list */ + char filename[NAME_BUF_SIZE]; /* file name */ + pid_t childpid=0; /* Child process ID */ + int child_status; /* Status passed to waitpid */ + int child_wait_option=0; /* Options passed to waitpid */ + int out_pdf[2]; + int notify = 0; + + + TESTING("File locking environment variable"); + + + /* Set the environment variable */ + if(HDsetenv("HDF5_USE_FILE_LOCKING", "FALSE", TRUE) < 0) + TEST_ERROR + + if((fapl = H5Pcopy(in_fapl)) < 0) + TEST_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[1], fapl, filename, sizeof(filename)); + + /* Create the test file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + TEST_ERROR + + /* Open a file for read-only and then read-write. This would + * normally fail due to the file locking scheme but should + * pass when the environment variable is set to disable file + * locking. + */ + + /* Create 1 pipe */ + if(HDpipe(out_pdf) < 0) + TEST_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + TEST_ERROR + + if(childpid == 0) { + + /* Child process */ + + hid_t child_fid; /* File ID */ + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } /* end while */ + + /* Open the test file */ + if((child_fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + TEST_ERROR + + /* Close the pipe */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_SUCCESS); + } /* end if */ + + /* close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + TEST_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + TEST_ERROR + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + TEST_ERROR; + + /* Close the pipe */ + if(HDclose(out_pdf[1]) < 0) + TEST_ERROR; + + /* Wait for child process to complete */ + if(HDwaitpid(childpid, &child_status, child_wait_option) < 0) + TEST_ERROR + + /* Check if child terminated normally */ + if(WIFEXITED(child_status)) { + /* Check exit status of the child */ + if(WEXITSTATUS(child_status) != 0) + TEST_ERROR + } /* end if */ + else + TEST_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + TEST_ERROR + + /* Close the copied property list */ + if(H5Pclose(fapl) < 0) + TEST_ERROR + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(fapl); + H5Fclose(fid); + } H5E_END_TRY; + + return -1; + + +#endif /* !(defined(H5_HAVE_FORK && defined(H5_HAVE_WAITPID)) */ + +} /* end test_file_lock_env_var() */ + + +static int +test_swmr_vfd_flag(void) +{ + hid_t fid = -1; /* file ID */ + hid_t sec2_fapl = -1; /* fapl ID of a VFD that supports SWMR writes (sec2) */ + hid_t bad_fapl = -1; /* fapl ID of a VFD that does not support SWMR writes (stdio) */ + char filename[NAME_BUF_SIZE]; /* file name */ + + TESTING("SWMR-enabled VFD flag functionality"); + + /* Attempt to open a file using a SWMR-compatible VFD. */ + + if((sec2_fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) + FAIL_STACK_ERROR; + if(H5Pset_fapl_sec2(sec2_fapl) < 0) + FAIL_STACK_ERROR; + if(H5Pset_libver_bounds(sec2_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + h5_fixname(FILENAME[0], sec2_fapl, filename, sizeof(filename)); + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC | H5F_ACC_SWMR_WRITE, H5P_DEFAULT, sec2_fapl)) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR; + + /* Attempt to open a file using a non-SWMR-compatible VFD. */ + + if((bad_fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) + FAIL_STACK_ERROR; + if(H5Pset_fapl_stdio(bad_fapl) < 0) + FAIL_STACK_ERROR; + if(H5Pset_libver_bounds(bad_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + fid = -1; + h5_fixname(FILENAME[0], bad_fapl, filename, sizeof(filename)); + H5E_BEGIN_TRY { + fid = H5Fcreate(filename, H5F_ACC_TRUNC | H5F_ACC_SWMR_WRITE, H5P_DEFAULT, bad_fapl); + } H5E_END_TRY; + if(fid >= 0) + TEST_ERROR; + + if(H5Pclose(sec2_fapl) < 0) + FAIL_STACK_ERROR; + if(H5Pclose(bad_fapl) < 0) + FAIL_STACK_ERROR; + + PASSED(); + + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(sec2_fapl); + H5Pclose(bad_fapl); + H5Fclose(fid); + } H5E_END_TRY; + + return -1; +} /* test_swmr_vfd_flag() */ + +#ifdef OUT +/* + * This exposes a bug for H5Orefresh while handling opened objects for H5Fstart_swmr_write(). + * The boolean to skip file truncation test when reading in superblock will fix the problem. + * Will work to move that to test/flushrefresh.c later. + */ +static int +test_bug_refresh(hid_t in_fapl) +{ + hid_t fid = -1; /* File ID */ + hid_t fapl; + H5F_t *f; + hid_t gid1, gid2, gid3, gid4, gid5, gid6, gid7, gid8, gid9; + char filename[NAME_BUF_SIZE]; /* File name */ + + /* Create a copy of the input parameter in_fapl */ + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set to use the latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + TESTING("H5Orefresh failure conditions"); + + /* Create a file with the latest format */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + + /* Get a pointer to the internal file object */ + if(NULL == (f = (H5F_t *)H5I_object(fid))) + FAIL_STACK_ERROR + + /* Create groups: compact to dense storage */ + if((gid1 = H5Gcreate2(fid, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if((gid2 = H5Gcreate2(fid, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if((gid3 = H5Gcreate2(fid, "group3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if((gid4 = H5Gcreate2(fid, "group4", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if((gid5 = H5Gcreate2(fid, "group5", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if((gid6 = H5Gcreate2(fid, "group6", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if((gid7 = H5Gcreate2(fid, "group7", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if((gid8 = H5Gcreate2(fid, "group8", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + if((gid9 = H5Gcreate2(fid, "group9", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) + TEST_ERROR + + if(H5Grefresh(gid1) < 0) TEST_ERROR + if(H5Grefresh(gid2) < 0) TEST_ERROR + if(H5Grefresh(gid3) < 0) TEST_ERROR + if(H5Grefresh(gid4) < 0) TEST_ERROR + if(H5Grefresh(gid5) < 0) TEST_ERROR + if(H5Grefresh(gid6) < 0) TEST_ERROR + if(H5Grefresh(gid7) < 0) TEST_ERROR + if(H5Grefresh(gid8) < 0) TEST_ERROR + if(H5Grefresh(gid9) < 0) TEST_ERROR + + H5Gclose(gid1); + H5Gclose(gid2); + H5Gclose(gid3); + H5Gclose(gid4); + H5Gclose(gid5); + H5Gclose(gid6); + H5Gclose(gid7); + H5Gclose(gid8); + H5Gclose(gid9); + H5Pclose(fapl); + H5Fclose(fid); + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Gclose(gid1); + H5Gclose(gid2); + H5Gclose(gid3); + H5Gclose(gid4); + H5Gclose(gid5); + H5Gclose(gid6); + H5Gclose(gid7); + H5Gclose(gid8); + H5Gclose(gid9); + H5Pclose(fapl); + H5Fclose(fid); + } H5E_END_TRY; + + return -1; +} /* test_bug_refresh() */ +#endif /* OUT */ + +/* + * test_refresh_concur(): + * + * The "new_format" parameter indicates whether to create the file with latest format or not. + * To have SWMR support, can use either one of the following in creating a file: + * (a) Create the file with write + latest format: + * --result in v3 superblock with latest chunk indexing types + * (b) Create the file with SWMR write + non-latest-format: + * --result in v3 superblock with latest chunk indexing types + * + * Verify H5Drefresh() works correctly with concurrent access: + * Parent process: + * (1) Open the test file, write to the dataset + * (2) Notify child process #A + * (3) Wait for notification from child process #B + * (4) Extend the dataset, write to the dataset, flush the file + * (5) Notify child process #C + * Child process: + * (1) Wait for notification from parent process #A + * (2) Open the file 2 times + * (3) Open the dataset 2 times with the 2 files + * (4) Verify the dataset's dimension and data read are correct + * (5) Notify parent process #B + * (6) Wait for notification from parent process #C + * (7) Refresh the dataset + * (8) Verify the dataset's dimension and data are correct + */ +#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)) + +static int +test_refresh_concur(hid_t H5_ATTR_UNUSED in_fapl, hbool_t H5_ATTR_UNUSED new_format) +{ + SKIPPED(); + HDputs(" Test skipped due to fork or waitpid not defined."); + return 0; +} /* test_refresh_concur() */ + +#else /* defined(H5_HAVE_FORK && defined(H5_HAVE_WAITPID) */ + +static int +test_refresh_concur(hid_t in_fapl, hbool_t new_format) +{ + hid_t fid; /* File ID */ + hid_t fapl; /* File access property list */ + pid_t childpid=0; /* Child process ID */ + pid_t tmppid; /* Child process ID returned by waitpid */ + int child_status; /* Status passed to waitpid */ + int child_wait_option=0; /* Options passed to waitpid */ + int child_exit_val; /* Exit status of the child */ + char filename[NAME_BUF_SIZE]; /* File name */ + + hid_t did = -1; + hid_t sid = -1; + hid_t dcpl = -1; + hsize_t chunk_dims[1] = {1}; + hsize_t maxdims[1] = {H5S_UNLIMITED}; + hsize_t dims[1] = { 1 }; + hsize_t new_dims[1] = {2}; + + int out_pdf[2]; + int in_pdf[2]; + int notify = 0; + int wbuf[2]; + + /* Output message about test being performed */ + if(new_format) { + TESTING("H5Drefresh()--concurrent access for latest format"); + } else { + TESTING("H5Drefresh()--concurrent access for non-latest-format"); + } /* end if */ + + + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + if(new_format) { + /* Set to use the latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Create the test file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + } else { + /* Create the test file without latest format but with SWMR write */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + } /* end if */ + + /* Create a chunked dataset with 1 extendible dimension */ + if((sid = H5Screate_simple(1, dims, maxdims)) < 0) + FAIL_STACK_ERROR; + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + if(H5Pset_chunk(dcpl, 1, chunk_dims) < 0) + FAIL_STACK_ERROR; + if((did = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Closing */ + if(H5Dclose(did) < 0) + FAIL_STACK_ERROR + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR + if(H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Create 2 pipes */ + if(HDpipe(out_pdf) < 0) + FAIL_STACK_ERROR + if(HDpipe(in_pdf) < 0) + FAIL_STACK_ERROR + + /* Fork child process */ + if((childpid = HDfork()) < 0) + FAIL_STACK_ERROR + + if(childpid == 0) { /* Child process */ + hid_t child_fid1 = -1; /* File ID */ + hid_t child_fid2 = -1; /* File ID */ + hid_t child_did1 = -1, child_did2 = -1; + hid_t child_sid = -1; + hsize_t tdims[1]; + int rbuf[2] = {0, 0}; + int child_notify = 0; + + /* Close unused write end for out_pdf */ + if(HDclose(out_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + /* close unused read end for in_pdf */ + if(HDclose(in_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 1) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Open the file 2 times */ + if((child_fid1 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + HDexit(EXIT_FAILURE); + + if((child_fid2 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0) + HDexit(EXIT_FAILURE); + + /* Open the dataset 2 times */ + if((child_did1 = H5Dopen2(child_fid1, "dataset", H5P_DEFAULT)) < 0) + HDexit(EXIT_FAILURE); + if((child_did2 = H5Dopen2(child_fid2, "dataset", H5P_DEFAULT)) < 0) + HDexit(EXIT_FAILURE); + + /* Get the dataset's dataspace via did1 */ + if((child_sid = H5Dget_space(child_did1)) < 0) + HDexit(EXIT_FAILURE); + if(H5Sget_simple_extent_dims(child_sid, tdims, NULL) < 0) + HDexit(EXIT_FAILURE); + if(tdims[0] != 1) + HDexit(EXIT_FAILURE); + + /* Read from the dataset via did2 */ + if(H5Dread(child_did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0) + HDexit(EXIT_FAILURE); + + /* Verify the data is correct */ + if(rbuf[0] != 99) + HDexit(EXIT_FAILURE); + + /* Notify parent process */ + child_notify = 2; + if(HDwrite(in_pdf[1], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + + /* Wait for notification from parent process */ + while(child_notify != 3) { + if(HDread(out_pdf[0], &child_notify, sizeof(int)) < 0) + HDexit(EXIT_FAILURE); + } + + /* Refresh dataset via did1 */ + if(H5Drefresh(child_did1) < 0) + HDexit(EXIT_FAILURE); + + /* Get the dataset's dataspace and verify */ + if((child_sid = H5Dget_space(child_did1)) < 0) + HDexit(EXIT_FAILURE); + if(H5Sget_simple_extent_dims(child_sid, tdims, NULL) < 0) + HDexit(EXIT_FAILURE); + + if(tdims[0] != 2) + HDexit(EXIT_FAILURE); + + /* Read from the dataset */ + if(H5Dread(child_did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0) + HDexit(EXIT_FAILURE); + + /* Verify the data is correct */ + if(rbuf[0] != 100 || rbuf[1] != 100) + HDexit(EXIT_FAILURE); + + /* Close the 2 datasets */ + if(H5Dclose(child_did1) < 0) + HDexit(EXIT_FAILURE); + if(H5Dclose(child_did2) < 0) + HDexit(EXIT_FAILURE); + + /* Close the 2 files */ + if(H5Fclose(child_fid1) < 0) + HDexit(EXIT_FAILURE); + if(H5Fclose(child_fid2) < 0) + HDexit(EXIT_FAILURE); + + /* Close the pipes */ + if(HDclose(out_pdf[0]) < 0) + HDexit(EXIT_FAILURE); + if(HDclose(in_pdf[1]) < 0) + HDexit(EXIT_FAILURE); + + HDexit(EXIT_SUCCESS); + } + + /* Close unused read end for out_pdf */ + if(HDclose(out_pdf[0]) < 0) + FAIL_STACK_ERROR + /* Close unused write end for in_pdf */ + if(HDclose(in_pdf[1]) < 0) + FAIL_STACK_ERROR + + /* Open the test file */ + if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0) + FAIL_STACK_ERROR + + /* Open the dataset */ + if((did = H5Dopen2(fid, "dataset", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Write to the dataset */ + wbuf[0] = wbuf[1] = 99; + if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0) + FAIL_STACK_ERROR; + + /* Flush to disk */ + if(H5Fflush(fid, H5F_SCOPE_LOCAL) < 0) + FAIL_STACK_ERROR; + + /* Notify child process */ + notify = 1; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + + /* Wait for notification from child process */ + while(notify != 2) { + if(HDread(in_pdf[0], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + } + + /* Cork the metadata cache, to prevent the object header from being + * flushed before the data has been written */ + if(H5Odisable_mdc_flushes(did) < 0) + FAIL_STACK_ERROR; + + /* Extend the dataset */ + if(H5Dset_extent(did, new_dims) < 0) + FAIL_STACK_ERROR; + + /* Write to the dataset */ + wbuf[0] = wbuf[1] = 100; + if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0) + FAIL_STACK_ERROR; + + /* Uncork the metadata cache */ + if(H5Oenable_mdc_flushes(did) < 0) + FAIL_STACK_ERROR; + + /* Flush to disk */ + if(H5Fflush(fid, H5F_SCOPE_LOCAL) < 0) + FAIL_STACK_ERROR; + + /* Notify child process */ + notify = 3; + if(HDwrite(out_pdf[1], ¬ify, sizeof(int)) < 0) + FAIL_STACK_ERROR; + + /* Close the pipes */ + if(HDclose(out_pdf[1]) < 0) + FAIL_STACK_ERROR; + if(HDclose(in_pdf[0]) < 0) + FAIL_STACK_ERROR; + + /* Wait for child process to complete */ + if((tmppid = HDwaitpid(childpid, &child_status, child_wait_option)) < 0) + FAIL_STACK_ERROR + + /* Check exit status of child process */ + if(WIFEXITED(child_status)) { + if((child_exit_val = WEXITSTATUS(child_status)) != 0) + TEST_ERROR + } else /* Child process terminated abnormally */ + TEST_ERROR + + /* Close the dataset */ + if(H5Dclose(did) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Close the property list */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Dclose(did); + H5Sclose(sid); + H5Pclose(dcpl); + H5Pclose(fapl); + H5Fclose(fid); + } H5E_END_TRY; + + return -1; + +} /* test_refresh_concur() */ +#endif /* !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)) */ + +/* + * test_multiple_same(): + * + * The "new_format" parameter indicates whether to create the file with latest format or not. + * To have SWMR support, can use either one of the following in creating a file: + * (a) Create the file with write + latest format: + * --result in v3 superblock with latest chunk indexing types + * (b) Create the file with SWMR write + non-latest-format: + * --result in v3 superblock with latest chunk indexing types + * + * Verify that H5Drefresh() and H5Fstart_swmr_write() work properly with multiple + * opens of files and datasets. + */ +static int +test_multiple_same(hid_t in_fapl, hbool_t new_format) +{ + hid_t fid = -1, fid1 = -1, fid2 = -1, fid3 = -1; /* File IDs */ + hid_t fapl; /* File access property list */ + char filename[NAME_BUF_SIZE]; /* File name */ + hid_t did = -1, did1 = -1, did2 = -1, did3 = -1; + hid_t sid = -1; + hid_t dcpl = -1; + hsize_t chunk_dims[2] = {1, 2}; + hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; + hsize_t dims[2] = {1, 1}; + int rbuf = 0; + int wbuf = 0; + + /* Output message about test being performed */ + if(new_format) { + TESTING("multiple--single process access for latest format"); + } else { + TESTING("multiple--single process access for non-latest-format"); + } /* end if */ + + + if((fapl = H5Pcopy(in_fapl)) < 0) + FAIL_STACK_ERROR + + /* Set the filename to use for this test (dependent on fapl) */ + h5_fixname(FILENAME[0], fapl, filename, sizeof(filename)); + + if(new_format) { + /* Set to use the latest library format */ + if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) + FAIL_STACK_ERROR + + /* Create the test file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + } else { + /* Create the test file without latest format but with SWMR write */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0) + FAIL_STACK_ERROR + } /* end if */ + + /* Create a chunked dataset with 1 extendible dimension */ + if((sid = H5Screate_simple(2, dims, maxdims)) < 0) + FAIL_STACK_ERROR; + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + FAIL_STACK_ERROR + if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0) + FAIL_STACK_ERROR; + if((did = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR; + + /* Closing */ + if(H5Dclose(did) < 0) + FAIL_STACK_ERROR + if(H5Sclose(sid) < 0) + FAIL_STACK_ERROR + if(H5Pclose(dcpl) < 0) + FAIL_STACK_ERROR + + /* Close the file */ + if(H5Fclose(fid) < 0) + FAIL_STACK_ERROR + + /* Case 1 */ + + /* Open the file 3 times: SWMR-write, read-write, read-only */ + if((fid1 = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0) + FAIL_STACK_ERROR + if((fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + if((fid3 = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Open the dataset 3 times with fid1, fid2, fid3 */ + if((did1 = H5Dopen2(fid1, "dataset", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + if((did2 = H5Dopen2(fid2, "dataset", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + if((did3 = H5Dopen2(fid3, "dataset", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + + /* Write to the dataset via did1 */ + wbuf = 88; + if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wbuf) < 0) + FAIL_STACK_ERROR; + + /* Refresh via did2 */ + if(H5Drefresh(did2) < 0) + FAIL_STACK_ERROR; + + /* Read from the dataset via did2 */ + rbuf = 0; + if(H5Dread(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rbuf) < 0) + FAIL_STACK_ERROR; + /* Verify the data is correct */ + if(rbuf != 88) + FAIL_STACK_ERROR; + + /* Write to the dataset via did3 */ + wbuf = 99; + if(H5Dwrite(did3, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wbuf) < 0) + FAIL_STACK_ERROR; + + /* Refresh via did1 */ + if(H5Drefresh(did1) < 0) + FAIL_STACK_ERROR; + + /* Read from the dataset via did1 */ + rbuf = 0; + if(H5Dread(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rbuf) < 0) + FAIL_STACK_ERROR; + /* Verify the data is correct */ + if(rbuf != 99) + FAIL_STACK_ERROR; + + /* Close datasets */ + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did3) < 0) + FAIL_STACK_ERROR; + + /* Close files */ + if(H5Fclose(fid1) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid3) < 0) + FAIL_STACK_ERROR + + /* Case 2 */ + + /* Open the file 3 times: read-write, read-only, read-write */ + if((fid1 = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + if((fid3 = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + + /* Open the dataset 3 times with fid1, fid2, fid3 */ + if((did1 = H5Dopen2(fid1, "dataset", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + if((did2 = H5Dopen2(fid2, "dataset", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + if((did3 = H5Dopen2(fid3, "dataset", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Write to the dataset via did1 */ + wbuf = 88; + if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wbuf) < 0) + FAIL_STACK_ERROR; + + /* Refresh via did2 */ + if(H5Drefresh(did2) < 0) + FAIL_STACK_ERROR; + + /* Read from dataset via did2 */ + rbuf = 0; + if(H5Dread(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rbuf) < 0) + FAIL_STACK_ERROR; + if(rbuf != wbuf) + FAIL_STACK_ERROR; + + /* Write to dataset via did3 */ + wbuf = 99; + if(H5Dwrite(did3, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wbuf) < 0) + FAIL_STACK_ERROR; + + /* Enable SWMR write */ + if(H5Fstart_swmr_write(fid1) < 0) + FAIL_STACK_ERROR; + + /* Read from dataset via did1 and verify data is correct */ + rbuf = 0; + if(H5Dread(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rbuf) < 0) + FAIL_STACK_ERROR; + if(rbuf != wbuf) + FAIL_STACK_ERROR; + + /* Write to dataset via did2 */ + wbuf = 100; + if(H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wbuf) < 0) + FAIL_STACK_ERROR; + + /* Refresh dataset via did3 */ + if(H5Drefresh(did3) < 0) + FAIL_STACK_ERROR; + + /* Read from dataset via did3 and verify data is correct */ + rbuf = 0; + if(H5Dread(did3, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rbuf) < 0) + FAIL_STACK_ERROR; + if(rbuf != wbuf) + FAIL_STACK_ERROR; + + /* Close datasets */ + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR; + if(H5Dclose(did3) < 0) + FAIL_STACK_ERROR; + + /* Close files */ + if(H5Fclose(fid1) < 0) + FAIL_STACK_ERROR + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR; + if(H5Fclose(fid3) < 0) + FAIL_STACK_ERROR + + /* Case 3 */ + + /* Open the file 3 times: read-write, read-only, read-only */ + if((fid1 = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) + FAIL_STACK_ERROR + if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + if((fid3 = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) + FAIL_STACK_ERROR + + /* Open the dataset 3 times with fid1, fid2, fid3 */ + if((did1 = H5Dopen2(fid1, "dataset", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + if((did2 = H5Dopen2(fid2, "dataset", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + if((did3 = H5Dopen2(fid3, "dataset", H5P_DEFAULT)) < 0) + FAIL_STACK_ERROR + + /* Write to the dataset via did1 */ + wbuf = 88; + if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wbuf) < 0) + FAIL_STACK_ERROR; + + /* Refresh dataset via did2 */ + if(H5Drefresh(did2) < 0) + FAIL_STACK_ERROR; + + /* Read from dataset via did2 and verify data is correct */ + rbuf = 0; + if(H5Dread(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rbuf) < 0) + FAIL_STACK_ERROR; + if(rbuf != wbuf) + FAIL_STACK_ERROR; + + /* Close dataset via did2 */ + if(H5Dclose(did2) < 0) + FAIL_STACK_ERROR; + + /* Close file via fid2 */ + if(H5Fclose(fid2) < 0) + FAIL_STACK_ERROR + + /* Write to dataset via did3 */ + wbuf = 99; + if(H5Dwrite(did3, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wbuf) < 0) + FAIL_STACK_ERROR; + + /* Close dataset via did3 */ + if(H5Dclose(did3) < 0) + FAIL_STACK_ERROR; + + /* Close file via fid3 */ + if(H5Fclose(fid3) < 0) + FAIL_STACK_ERROR + + /* Enable SWMR writing */ + if(H5Fstart_swmr_write(fid1) < 0) + FAIL_STACK_ERROR; + + /* Read from dataset via did1 and verify data is correct */ + rbuf = 0; + if(H5Dread(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rbuf) < 0) + FAIL_STACK_ERROR; + if(rbuf != wbuf) + FAIL_STACK_ERROR; + + /* Close dataset via did1 */ + if(H5Dclose(did1) < 0) + FAIL_STACK_ERROR; + + /* Close file via fid1 */ + if(H5Fclose(fid1) < 0) + FAIL_STACK_ERROR + + /* Close the property list */ + if(H5Pclose(fapl) < 0) + FAIL_STACK_ERROR + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Dclose(did); + H5Dclose(did1); + H5Dclose(did2); + H5Dclose(did3); + H5Sclose(sid); + H5Pclose(dcpl); + H5Pclose(fapl); + H5Fclose(fid); + H5Fclose(fid1); + H5Fclose(fid2); + H5Fclose(fid3); + } H5E_END_TRY; + + return -1; + +} /* test_multiple_same() */ + +/**************************************************************** +** +** Tests for new public routines introduced from the SWMR project. +** +****************************************************************/ +int +main(void) +{ + int nerrors = 0; /* The # of errors */ + hid_t fapl = -1; /* File access property list ID */ + char *driver = NULL; /* VFD string (from env variable) */ + char *lock_env_var = NULL; /* file locking env var pointer */ + hbool_t use_file_locking; /* read from env var */ + + /* Skip this test if SWMR I/O is not supported for the VFD specified + * by the environment variable. + */ + driver = HDgetenv("HDF5_DRIVER"); + if(!H5FD_supports_swmr_test(driver)) { + printf("This VFD does not support SWMR I/O\n"); + return EXIT_SUCCESS; + } /* end if */ + + /* Check the environment variable that determines if we care + * about file locking. File locking should be used unless explicitly + * disabled. + */ + lock_env_var = HDgetenv("HDF5_USE_FILE_LOCKING"); + if(lock_env_var && !HDstrcmp(lock_env_var, "FALSE")) + use_file_locking = FALSE; + else + use_file_locking = TRUE; + + /* Set up */ + h5_reset(); + + /* Get file access property list */ + fapl = h5_fileaccess(); + +#ifdef OUT + nerrors += test_bug_refresh(fapl); +#endif + nerrors += test_refresh_concur(fapl, TRUE); + nerrors += test_refresh_concur(fapl, FALSE); + nerrors += test_multiple_same(fapl, TRUE); + nerrors += test_multiple_same(fapl, FALSE); + + /* Tests on H5Pget/set_metadata_read_attempts() and H5Fget_metadata_read_retry_info() */ + nerrors += test_metadata_read_attempts(fapl); + nerrors += test_metadata_read_retry_info(fapl); + + /* Tests on H5Fstart_swmr_write() */ + /* + * Modify the following routines to test for files: + * H5Fcreate(write, latest format) or H5Fcreate(SWMR write, non-latest-format) + * --both result in v3 superblock and latest version suppport + */ + nerrors += test_start_swmr_write(fapl, TRUE); + nerrors += test_start_swmr_write(fapl, FALSE); + nerrors += test_err_start_swmr_write(fapl, TRUE); + nerrors += test_err_start_swmr_write(fapl, FALSE); + nerrors += test_start_swmr_write_concur(fapl, TRUE); + nerrors += test_start_swmr_write_concur(fapl, FALSE); + nerrors += test_start_swmr_write_stress_ohdr(fapl); + + /* Tests for H5Pget/set_object_flush_cb() */ + nerrors += test_object_flush_cb(fapl); + + /* Tests on H5Pget/set_append_flush() */ + nerrors += test_append_flush_generic(); + nerrors += test_append_flush_dataset_chunked(fapl); + nerrors += test_append_flush_dataset_fixed(fapl); + nerrors += test_append_flush_dataset_multiple(fapl); + + if(use_file_locking) { + /* + * Tests for: + * file open flags--single process access + * file open flags--concurrent access + */ + nerrors += test_file_lock_same(fapl); + nerrors += test_file_lock_concur(fapl); + /* + * Tests for: + * file open flags+SWMR flags--single process access + * file open flags+SWMR flags--concurrent access + * + * Modify the following 2 routines to test for files: + * H5Fcreate(write, latest format) or H5Fcreate(SWMR write, non-latest-format) + * --both result in v3 superblock and latest version suppport + */ + nerrors += test_file_lock_swmr_same(fapl); + nerrors += test_file_lock_swmr_concur(fapl); + } /* end if */ + + /* Tests SWMR VFD compatibility flag. + * Only needs to run when the VFD is the default (sec2). + */ + if(NULL == driver || !HDstrcmp(driver, "") || !HDstrcmp(driver, "sec2")) + nerrors += test_swmr_vfd_flag(); + + /* This test changes the HDF5_USE_FILE_LOCKING environment variable + * so it should be run last. + */ + nerrors += test_file_lock_env_var(fapl); + + if(nerrors) + goto error; + + printf("All tests passed.\n"); + + h5_cleanup(FILENAME, fapl); + + return EXIT_SUCCESS; + +error: + nerrors = MAX(1, nerrors); + printf("***** %d SWMR TEST%s FAILED! *****\n", + nerrors, 1 == nerrors ? "" : "S"); + return EXIT_FAILURE; + +} /* end main() */ + diff --git a/test/tfile.c b/test/tfile.c index d6cb427..82905d5 100644 --- a/test/tfile.c +++ b/test/tfile.c @@ -146,6 +146,11 @@ test_obj_count_and_id(hid_t, hid_t, hid_t, hid_t, hid_t, hid_t); static void check_file_id(hid_t, hid_t); +/* Helper routine used by test_rw_noupdate() */ +static int cal_chksum(const char *file, uint32_t *chksum); + +static void test_rw_noupdate(void); + /**************************************************************** ** ** test_file_create(): Low-level file creation I/O test routine. @@ -2731,93 +2736,101 @@ test_cached_stab_info(void) CHECK(ret, FAIL, "H5Fclose"); } /* end test_cached_stab_info() */ +/* + * To calculate the checksum for a file. + * This is a helper routine for test_rw_noupdate(). + */ +static int +cal_chksum(const char *file, uint32_t *chksum) +{ + int curr_num_errs = GetTestNumErrs(); /* Retrieve the current # of errors */ + int fdes = -1; /* File descriptor */ + void *file_data = NULL; /* Copy of file data */ + ssize_t bytes_read; /* # of bytes read */ + h5_stat_t sb; /* Stat buffer for file */ + herr_t ret; /* Generic return value */ + + /* Open the file */ + fdes = HDopen(file, O_RDONLY, 0); + CHECK(fdes, FAIL, "HDopen"); + + /* Retrieve the file's size */ + ret = HDfstat(fdes, &sb); + CHECK(fdes, FAIL, "HDfstat"); + + /* Allocate space for the file data */ + file_data = HDmalloc((size_t)sb.st_size); + CHECK(file_data, NULL, "HDmalloc"); + + if(file_data) { + /* Read file's data into memory */ + bytes_read = HDread(fdes, file_data, (size_t)sb.st_size); + CHECK(bytes_read == sb.st_size, FALSE, "HDmalloc"); + + /* Calculate checksum */ + *chksum = H5_checksum_lookup3(file_data, sizeof(file_data), 0); + + /* Free memory */ + HDfree(file_data); + } + + /* Close the file */ + ret = HDclose(fdes); + CHECK(ret, FAIL, "HDclose"); + + return((GetTestNumErrs() == curr_num_errs) ? 0 : -1); +} /* cal_chksum() */ + /**************************************************************** ** ** test_rw_noupdate(): low-level file test routine. ** This test checks to ensure that opening and closing a file ** with read/write permissions does not write anything to the ** file if the file does not change. +** Due to the implementation of file locking (status_flags in +** the superblock is used), this test is changed to use checksum +** instead of timestamp to verify the file is not changed. ** -** Programmer: Mike McGreevy -** mamcgree@hdfgroup.org -** June 29, 2009 +** Programmer: Vailin Choi; July 2013 ** *****************************************************************/ static void test_rw_noupdate(void) { - int fd; /* File Descriptor */ - h5_stat_t sb1, sb2; /* Info from 'stat' call */ - double diff; /* Difference in modification times */ herr_t ret; /* Generic return value */ + hid_t fid; /* File ID */ + uint32_t chksum1, chksum2; /* Checksum value */ /* Output message about test being performed */ MESSAGE(5, ("Testing to verify that nothing is written if nothing is changed.\n")); - /* First make sure the stat function behaves as we expect - the modification time - * is the time that the file was modified last time. */ - fd = HDopen(SFILE1, O_RDWR | O_CREAT | O_TRUNC, 0666); - CHECK(fd, FAIL, "HDopen"); - ret = HDclose(fd); - CHECK(ret, FAIL, "HDclose"); - - /* Determine File's Initial Timestamp */ - ret = HDstat(SFILE1, &sb1); - VERIFY(ret, 0, "HDstat"); - - /* Wait for 2 seconds */ - /* (This ensures a system time difference between the two file accesses) */ - HDsleep(2); - - fd = HDopen(SFILE1, O_RDWR, 0666); - CHECK(fd, FAIL, "HDopen"); - ret = HDclose(fd); - CHECK(ret, FAIL, "HDclose"); - - /* Determine File's New Timestamp */ - ret = HDstat(SFILE1, &sb2); - VERIFY(ret, 0, "HDstat"); - - /* Get difference between timestamps */ - diff = HDdifftime(sb2.st_mtime, sb1.st_mtime); + /* Create and Close a HDF5 File */ + fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fcreate"); - /* Check That Timestamps Are Equal */ - if(diff > (double)0.0F) { - /* Output message about test being performed */ - MESSAGE(1, ("Testing to verify that nothing is written if nothing is changed: This test is skipped on this system because the modification time from stat is the same as the last access time.\n")); - } /* end if */ - else { - hid_t file_id; /* HDF5 File ID */ + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); - /* Create and Close a HDF5 File */ - file_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fcreate"); - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); + /* Calculate checksum for the file */ + ret = cal_chksum(FILE1, &chksum1); + CHECK(ret, FAIL, "HDopen"); - /* Determine File's Initial Timestamp */ - ret = HDstat(FILE1, &sb1); - VERIFY(ret, 0, "HDfstat"); + /* Open and close File With Read/Write Permission */ + fid = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); + CHECK(fid, FAIL, "H5Fopen"); - /* Wait for 2 seconds */ - /* (This ensures a system time difference between the two file accesses) */ - HDsleep(2); + /* Close the file */ + ret = H5Fclose(fid); + CHECK(ret, FAIL, "H5Fclose"); - /* Open and Close File With Read/Write Permission */ - file_id = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT); - CHECK(file_id, FAIL, "H5Fopen"); - ret = H5Fclose(file_id); - CHECK(ret, FAIL, "H5Fclose"); + /* Calculate checksum for the file */ + ret = cal_chksum(FILE1, &chksum2); + CHECK(ret, FAIL, "HDopen"); - /* Determine File's New Timestamp */ - ret = HDstat(FILE1, &sb2); - VERIFY(ret, 0, "HDstat"); + /* The two checksums are the same, i.e. the file is not changed */ + VERIFY(chksum1, chksum2, "Checksum"); - /* Ensure That Timestamps Are Equal */ - diff = HDdifftime(sb2.st_mtime, sb1.st_mtime); - ret = (diff > (double)0.0F); - VERIFY(ret, 0, "Timestamp"); - } /* end else */ } /* end test_rw_noupdate() */ /**************************************************************** @@ -3809,7 +3822,7 @@ test_libver_bounds(void) /* Run the tests */ test_libver_bounds_real(H5F_LIBVER_EARLIEST, 1, H5F_LIBVER_LATEST, 2); - test_libver_bounds_real(H5F_LIBVER_LATEST, 2, H5F_LIBVER_EARLIEST, 1); + test_libver_bounds_real(H5F_LIBVER_LATEST, 2, H5F_LIBVER_EARLIEST, 2); } /* end test_libver_bounds() */ /**************************************************************** diff --git a/testpar/t_cache.c b/testpar/t_cache.c index ef89c8d..94a6e7c 100644 --- a/testpar/t_cache.c +++ b/testpar/t_cache.c @@ -480,7 +480,7 @@ static hbool_t setup_cache_for_test(hid_t * fid_ptr, H5C_t ** cache_ptr_ptr, int metadata_write_strategy); static void setup_rand(void); -static hbool_t take_down_cache(hid_t fid); +static hbool_t take_down_cache(hid_t fid, H5C_t * cache_ptr); static hbool_t verify_entry_reads(haddr_t addr, int expected_entry_reads); static hbool_t verify_entry_writes(haddr_t addr, int expected_entry_writes); static hbool_t verify_total_reads(int expected_total_reads); @@ -4491,35 +4491,77 @@ setup_rand(void) * *****************************************************************************/ static hbool_t -take_down_cache(hid_t fid) +take_down_cache(hid_t fid, H5C_t * cache_ptr) { - hbool_t success = FALSE; /* will set to TRUE if appropriate. */ + hbool_t success = TRUE; /* will set to FALSE if appropriate. */ - /* close the file and delete it */ - if ( H5Fclose(fid) < 0 ) { + /* flush the file -- this should write out any remaining test + * entries in the cache. + */ + if ( ( success ) && ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) ) { + success = FALSE; nerrors++; if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Fclose() failed.\n", + HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC); } + } - } else if ( world_mpi_rank == world_server_mpi_rank ) { + /* Now reset the sync point done callback. Must do this as with + * the SWMR mods, the cache will do additional I/O on file close + * un-related to the test entries, and thereby corrupt our counts + * of entry writes. + */ + if ( success ) { - if ( HDremove(filenames[0]) < 0 ) { + if ( H5AC__set_sync_point_done_callback(cache_ptr, NULL) != SUCCEED ) { + success = FALSE; nerrors++; if ( verbose ) { - HDfprintf(stdout, "%d:%s: HDremove() failed.\n", + HDfprintf(stdout, + "%d:%s: H5AC__set_sync_point_done_callback failed.\n", world_mpi_rank, FUNC); } - } else { + } + + + } - success = TRUE; + /* close the file */ + if ( ( success ) && ( H5Fclose(fid) < 0 ) ) { + + success = FALSE; + nerrors++; + if ( verbose ) { + HDfprintf(stdout, "%d:%s: H5Fclose() failed.\n", + world_mpi_rank, FUNC); } - } else { - success = TRUE; + } + + if ( success ) { + + if ( world_mpi_rank == world_server_mpi_rank ) { + + if ( HDremove(filenames[0]) < 0 ) { + + success = FALSE; + nerrors++; + if ( verbose ) { + HDfprintf(stdout, "%d:%s: HDremove() failed.\n", + world_mpi_rank, FUNC); + } + } + } else { + + /* verify that there have been no further writes of test + * entries during the close + */ + success = verify_total_writes(0); + + } } return(success); @@ -5576,7 +5618,7 @@ smoke_check_1(int metadata_write_strategy) if ( fid >= 0 ) { - if ( ! take_down_cache(fid) ) { + if ( ! take_down_cache(fid, cache_ptr) ) { nerrors++; if ( verbose ) { @@ -5798,7 +5840,7 @@ smoke_check_2(int metadata_write_strategy) if ( fid >= 0 ) { - if ( ! take_down_cache(fid) ) { + if ( ! take_down_cache(fid, cache_ptr) ) { nerrors++; if ( verbose ) { @@ -6121,7 +6163,7 @@ smoke_check_3(int metadata_write_strategy) if ( fid >= 0 ) { - if ( ! take_down_cache(fid) ) { + if ( ! take_down_cache(fid, cache_ptr) ) { nerrors++; if ( verbose ) { @@ -6438,7 +6480,7 @@ smoke_check_4(int metadata_write_strategy) if ( fid >= 0 ) { - if ( ! take_down_cache(fid) ) { + if ( ! take_down_cache(fid, cache_ptr) ) { nerrors++; if ( verbose ) { @@ -6648,7 +6690,7 @@ smoke_check_5(int metadata_write_strategy) if ( fid >= 0 ) { - if ( ! take_down_cache(fid) ) { + if ( ! take_down_cache(fid, cache_ptr) ) { nerrors++; if ( verbose ) { @@ -6997,7 +7039,7 @@ trace_file_check(int metadata_write_strategy) if ( fid >= 0 ) { - if ( ! take_down_cache(fid) ) { + if ( ! take_down_cache(fid, cache_ptr) ) { nerrors++; if ( verbose ) { @@ -7325,7 +7367,7 @@ smoke_check_6(int metadata_write_strategy) if ( fid >= 0 ) { - if ( ! take_down_cache(fid) ) { + if ( ! take_down_cache(fid, cache_ptr) ) { nerrors++; if ( verbose ) { |