diff options
author | Dana Robinson <derobins@hdfgroup.org> | 2016-11-28 18:01:10 (GMT) |
---|---|---|
committer | Dana Robinson <derobins@hdfgroup.org> | 2016-11-28 18:01:10 (GMT) |
commit | 978e83fb5b5715de26ffb795ed5cabd63d729cd0 (patch) | |
tree | b5e47ce726741016bf6a04604973419a0f9a1063 /src | |
parent | 2c3d02e6e5771c05e666d4ccf4a77915d0bf1e50 (diff) | |
parent | 1921f7f4ad8cf3e7ef271183b6af897b3af87ab9 (diff) | |
download | hdf5-978e83fb5b5715de26ffb795ed5cabd63d729cd0.zip hdf5-978e83fb5b5715de26ffb795ed5cabd63d729cd0.tar.gz hdf5-978e83fb5b5715de26ffb795ed5cabd63d729cd0.tar.bz2 |
Merge branch 'develop' into eoc_valgrind_bugfix
Diffstat (limited to 'src')
-rw-r--r-- | src/H5C.c | 112 | ||||
-rw-r--r-- | src/H5Cmpio.c | 142 | ||||
-rw-r--r-- | src/H5Cpkg.h | 25 | ||||
-rw-r--r-- | src/H5Cprivate.h | 2 | ||||
-rw-r--r-- | src/H5Ctag.c | 2 |
5 files changed, 131 insertions, 152 deletions
@@ -216,9 +216,6 @@ H5FL_DEFINE_STATIC(H5C_t); /* Declare a free list to manage flush dependency arrays */ H5FL_BLK_DEFINE_STATIC(parent); -/* Declare extern free list to manage the H5C_collective_write_t struct */ -H5FL_EXTERN(H5C_collective_write_t); - /*------------------------------------------------------------------------- @@ -369,6 +366,7 @@ H5C_create(size_t max_cache_size, cache_ptr->coll_list_size = (size_t)0; cache_ptr->coll_head_ptr = NULL; cache_ptr->coll_tail_ptr = NULL; + cache_ptr->coll_write_list = NULL; #endif /* H5_HAVE_PARALLEL */ cache_ptr->cLRU_list_len = 0; @@ -814,7 +812,7 @@ H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type, /* Delete the entry from the skip list on destroy */ flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flush_flags, NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flush_flags) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't flush entry") done: @@ -3114,7 +3112,7 @@ H5C_unprotect(H5F_t * f, /* Delete the entry from the skip list on destroy */ flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flush_flags, NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flush_flags) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry") } @@ -3128,7 +3126,7 @@ H5C_unprotect(H5F_t * f, else if(test_entry_ptr != entry_ptr) HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?.") - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry") } #endif /* H5_HAVE_PARALLEL */ @@ -4321,7 +4319,7 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f, cache_ptr->entries_removed_counter = 0; cache_ptr->last_entry_removed_ptr = NULL; - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET, NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") if(cache_ptr->entries_removed_counter > 1 || cache_ptr->last_entry_removed_ptr == prev_ptr) @@ -4332,7 +4330,7 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f, bytes_evicted += entry_ptr->size; - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0 ) + if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0 ) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") } @@ -4415,7 +4413,7 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f, prev_ptr = entry_ptr->prev; if ( ! (entry_ptr->is_dirty) ) { - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry") } /* just skip the entry if it is dirty, as we can't do @@ -5212,7 +5210,7 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring, */ protected_entries++; } else if(entry_ptr->is_pinned) { - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__DURING_FLUSH_FLAG, NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.") if(cache_ptr->slist_changed) { @@ -5229,8 +5227,7 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring, } /* end if */ } /* end if */ else { - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, - (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG), NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed.") if(cache_ptr->slist_changed) { @@ -5327,8 +5324,7 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring, * or three entries. */ cache_ptr->entry_watched_for_removal = next_entry_ptr; - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, - (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG), NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed.") /* Check for the next entry getting removed */ @@ -5616,7 +5612,7 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags) protected_entries++; } /* end if */ else { - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG), NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.") if(cache_ptr->slist_changed) { @@ -5724,11 +5720,7 @@ done: */ herr_t H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr, - unsigned flags, H5SL_t -#ifndef H5_HAVE_PARALLEL - H5_ATTR_UNUSED -#endif /* NDEBUG */ - *collective_write_list) + unsigned flags) { H5C_t * cache_ptr; /* Cache for file */ hbool_t destroy; /* external flag */ @@ -5739,6 +5731,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ hbool_t during_flush; /* external flag */ hbool_t write_entry; /* internal flag */ hbool_t destroy_entry; /* internal flag */ + hbool_t generate_image; /* internal flag */ hbool_t was_dirty; haddr_t entry_addr = HADDR_UNDEF; herr_t ret_value = SUCCEED; /* Return value */ @@ -5759,6 +5752,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0); del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0); during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0); + generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0); /* Set the flag for destroying the entry, based on the 'take ownership' * and 'destroy' flags @@ -5818,29 +5812,13 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ entry_ptr->flush_in_progress = TRUE; entry_ptr->flush_marker = FALSE; - /* serialize the entry if necessary, and then write it to disk. */ - if(write_entry) { - - /* The entry is dirty, and we are doing either a flush, - * or a flush destroy. In either case, serialize the - * entry and write it to disk. - * - * Note that this may cause the entry to be re-sized and/or - * moved in the cache. - * - * As we will not update the metadata cache's data structures - * until we we finish the write, we must touch up these - * data structures for size and location changes even if we - * are about to delete the entry from the cache (i.e. on a - * flush destroy). - */ + /* The entry is dirty, and we are doing a flush, a flush destroy or have + * been requested to generate an image. In those cases, serialize the + * entry. + */ + if(write_entry || generate_image) { HDassert(entry_ptr->is_dirty); -#if H5C_DO_SANITY_CHECKS - if(cache_ptr->check_write_permitted && !(cache_ptr->write_permitted)) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!") -#endif /* H5C_DO_SANITY_CHECKS */ - if(NULL == entry_ptr->image_ptr) { if(NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE))) HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer") @@ -5854,30 +5832,27 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image") } /* end if ( ! (entry_ptr->image_up_to_date) ) */ + } /* end if */ - /* Finally, write the image to disk. - * - * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the - * in the entry's type, we silently skip the write. This - * flag should only be used in test code. - */ - if(((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0) { -#ifdef H5_HAVE_PARALLEL - if(collective_write_list) { - H5C_collective_write_t *item; - - if(NULL == (item = (H5C_collective_write_t *)H5FL_MALLOC(H5C_collective_write_t))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "unable to allocate skip list item") + /* Finally, write the image to disk. + * + * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the + * in the entry's type, we silently skip the write. This + * flag should only be used in test code. + */ + if(write_entry) { + HDassert(entry_ptr->is_dirty); - item->length = entry_ptr->size; - item->free_buf = FALSE; - item->buf = entry_ptr->image_ptr; - item->offset = entry_ptr->addr; +#if H5C_DO_SANITY_CHECKS + if(cache_ptr->check_write_permitted && !(cache_ptr->write_permitted)) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!") +#endif /* H5C_DO_SANITY_CHECKS */ - if(H5SL_insert(collective_write_list, item, &item->offset) < 0) { - H5MM_free(item); + if(((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0) { +#ifdef H5_HAVE_PARALLEL + if(cache_ptr->coll_write_list) { + if(H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item") - } /* end if */ } /* end if */ else #endif /* H5_HAVE_PARALLEL */ @@ -6699,7 +6674,7 @@ H5C_make_space_in_cache(H5F_t * f, } /* end if */ #endif - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET, NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") if ( ( cache_ptr->entries_removed_counter > 1 ) || @@ -6716,7 +6691,7 @@ H5C_make_space_in_cache(H5F_t * f, cache_ptr->entries_scanned_to_make_space++; #endif /* H5C_COLLECT_CACHE_STATS */ - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") } else { /* We have enough space so don't flush clean entry. */ @@ -6857,7 +6832,7 @@ H5C_make_space_in_cache(H5F_t * f, #ifdef H5_HAVE_PARALLEL if(!(entry_ptr->coll_access)) { #endif /* H5_HAVE_PARALLEL */ - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") #ifdef H5_HAVE_PARALLEL } /* end if */ @@ -7600,6 +7575,15 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry, * * Purpose: Serialize an entry and generate its image. * + * Note: This may cause the entry to be re-sized and/or moved in + * the cache. + * + * As we will not update the metadata cache's data structures + * until we we finish the write, we must touch up these + * data structures for size and location changes even if we + * are about to delete the entry from the cache (i.e. on a + * flush destroy). + * * Return: Non-negative on success/Negative on failure * * Programmer: Mohamad Chaarawi diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index 8a28f88..ab94879 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -64,9 +64,7 @@ /********************/ /* Local Prototypes */ /********************/ -static herr_t H5C__collective_write(H5F_t *f, hid_t dxpl_id, - H5SL_t *collective_write_list); -static herr_t H5C__collective_write_free(void *_item, void *key, void *op_data); +static herr_t H5C__collective_write(H5F_t *f, hid_t dxpl_id); /*********************/ @@ -83,9 +81,6 @@ static herr_t H5C__collective_write_free(void *_item, void *key, void *op_data); /* Local Variables */ /*******************/ -/* Declare a free list to manage the H5C_collective_write_t struct */ -H5FL_DEFINE(H5C_collective_write_t); - /*------------------------------------------------------------------------- @@ -230,7 +225,6 @@ H5C_apply_candidate_list(H5F_t * f, H5C_cache_entry_t * entry_ptr = NULL; H5C_cache_entry_t * flush_ptr = NULL; H5C_cache_entry_t * delayed_ptr = NULL; - H5SL_t * collective_write_list = NULL; #if H5C_DO_SANITY_CHECKS haddr_t last_addr; #endif /* H5C_DO_SANITY_CHECKS */ @@ -264,8 +258,11 @@ H5C_apply_candidate_list(H5F_t * f, #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ if(f->coll_md_write) { + /* Sanity check */ + HDassert(NULL == cache_ptr->coll_write_list); + /* Create skip list of entries for collective write */ - if(NULL == (collective_write_list = H5SL_create(H5SL_TYPE_HADDR, NULL))) + if(NULL == (cache_ptr->coll_write_list = H5SL_create(H5SL_TYPE_HADDR, NULL))) HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create skip list for entries") } /* end if */ @@ -454,13 +451,37 @@ H5C_apply_candidate_list(H5F_t * f, (long long)clear_ptr->addr); #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ - /* No need to check for the next entry in the scan being - * removed from the cache, as this call to H5C__flush_single_entry() - * will not call either the pre_serialize or serialize callbacks. + /* reset entries_removed_counter and + * last_entry_removed_ptr prior to the call to + * H5C__flush_single_entry() so that we can spot + * unexpected removals of entries from the cache, + * and set the restart_scan flag if proceeding + * would be likely to cause us to scan an entry + * that is no longer in the cache. + * + * Note that as of this writing (April 2015) this + * case cannot occur in the parallel case. However + * Quincey is making noises about changing this, hence + * the insertion of this test. + * + * Note also that there is no test code to verify + * that this code actually works (although similar code + * in the serial version exists and is tested). + * + * Implementing a test will likely require implementing + * flush op like facilities in the parallel tests. At + * a guess this will not be terribly painful, but it + * will take a bit of time. */ + cache_ptr->entries_removed_counter = 0; + cache_ptr->last_entry_removed_ptr = NULL; - if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.") + + if((cache_ptr->entries_removed_counter > 1) || + (cache_ptr->last_entry_removed_ptr == entry_ptr)) + restart_scan = TRUE; } /* end if */ /* Else, if this process needs to flush this entry. */ @@ -505,14 +526,12 @@ H5C_apply_candidate_list(H5F_t * f, cache_ptr->last_entry_removed_ptr = NULL; /* Add this entry to the list of entries to collectively write */ - if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, collective_write_list) < 0) + if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.") - if ( ( cache_ptr->entries_removed_counter > 1 ) || - ( cache_ptr->last_entry_removed_ptr == entry_ptr ) ) - + if((cache_ptr->entries_removed_counter > 1) || + (cache_ptr->last_entry_removed_ptr == entry_ptr)) restart_scan = TRUE; - } /* end else-if */ /* Otherwise, no action to be taken on this entry. Grab the next. */ @@ -663,7 +682,7 @@ H5C_apply_candidate_list(H5F_t * f, (long long)clear_ptr->addr); #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ - if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.") } /* end else-if */ @@ -680,7 +699,7 @@ H5C_apply_candidate_list(H5F_t * f, #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ /* Add this entry to the list of entries to collectively write */ - if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, collective_write_list) < 0) + if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.") } /* end else-if */ } /* end if */ @@ -714,14 +733,14 @@ H5C_apply_candidate_list(H5F_t * f, if (delayed_ptr) { if (delayed_ptr->clear_on_unprotect) { - if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG, NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.") entry_ptr->clear_on_unprotect = FALSE; entries_cleared++; } else if (delayed_ptr->flush_immediately) { /* Add this entry to the list of entries to collectively write */ - if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, collective_write_list) < 0) + if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry collectively.") entry_ptr->flush_immediately = FALSE; @@ -734,10 +753,11 @@ H5C_apply_candidate_list(H5F_t * f, /* If we've deferred writing to do it collectively, take care of that now */ if(f->coll_md_write) { - HDassert(collective_write_list); + /* Sanity check */ + HDassert(cache_ptr->coll_write_list); /* Write collective list */ - if(H5C__collective_write(f, dxpl_id, collective_write_list) < 0) + if(H5C__collective_write(f, dxpl_id) < 0) HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, "Can't write metadata collectively") } /* end if */ @@ -760,9 +780,11 @@ done: if(candidate_assignment_table != NULL) candidate_assignment_table = (int *)H5MM_xfree((void *)candidate_assignment_table); - if(collective_write_list) - if(H5SL_destroy(collective_write_list, H5C__collective_write_free, NULL) < 0) + if(cache_ptr->coll_write_list) { + if(H5SL_close(cache_ptr->coll_write_list) < 0) HDONE_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "failed to destroy skip list") + cache_ptr->coll_write_list = NULL; + } /* end if */ FUNC_LEAVE_NOAPI(ret_value) } /* H5C_apply_candidate_list() */ @@ -1001,7 +1023,7 @@ done: * * Note that unlike H5C_apply_candidate_list(), * H5C_mark_entries_as_clean() makes all its calls to - * H6C_flush_single_entry() with the + * H5C__flush_single_entry() with the * H5C__FLUSH_CLEAR_ONLY_FLAG set. As a result, * the pre_serialize() and serialize calls are not made. * @@ -1160,7 +1182,7 @@ H5C_mark_entries_as_clean(H5F_t * f, * * Note that unlike H5C_apply_candidate_list(), * H5C_mark_entries_as_clean() makes all its calls to - * H6C_flush_single_entry() with the H5C__FLUSH_CLEAR_ONLY_FLAG + * H5C__flush_single_entry() with the H5C__FLUSH_CLEAR_ONLY_FLAG * set. As a result, the pre_serialize() and serialize calls are * not made. * @@ -1195,7 +1217,7 @@ H5C_mark_entries_as_clean(H5F_t * f, entry_ptr = entry_ptr->prev; entries_cleared++; - if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0) + if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.") } else { @@ -1223,7 +1245,7 @@ H5C_mark_entries_as_clean(H5F_t * f, entry_ptr = entry_ptr->next; entries_cleared++; - if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0 ) + if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0 ) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.") } else { @@ -1331,8 +1353,9 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5C__collective_write(H5F_t *f, hid_t dxpl_id, H5SL_t *collective_write_list) +H5C__collective_write(H5F_t *f, hid_t dxpl_id) { + H5AC_t *cache_ptr; H5P_genplist_t *plist = NULL; H5FD_mpio_xfer_t orig_xfer_mode = H5FD_MPIO_COLLECTIVE; int count; @@ -1348,6 +1371,12 @@ H5C__collective_write(H5F_t *f, hid_t dxpl_id, H5SL_t *collective_write_list) FUNC_ENTER_STATIC + /* Sanity checks */ + HDassert(f != NULL); + cache_ptr = f->shared->cache; + HDassert(cache_ptr != NULL); + HDassert(cache_ptr->coll_write_list != NULL); + /* Get original transfer mode */ if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list") @@ -1355,12 +1384,12 @@ H5C__collective_write(H5F_t *f, hid_t dxpl_id, H5SL_t *collective_write_list) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O property") /* Get number of entries in collective write list */ - count = (int)H5SL_count(collective_write_list); + count = (int)H5SL_count(cache_ptr->coll_write_list); if(count > 0) { H5FD_mpio_xfer_t xfer_mode = H5FD_MPIO_COLLECTIVE; H5SL_node_t *node; - H5C_collective_write_t *item; + H5C_cache_entry_t *entry_ptr; void *base_buf; int i; @@ -1376,27 +1405,27 @@ H5C__collective_write(H5F_t *f, hid_t dxpl_id, H5SL_t *collective_write_list) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective offset table length array") /* Fill arrays */ - node = H5SL_first(collective_write_list); + node = H5SL_first(cache_ptr->coll_write_list); HDassert(node); - if(NULL == (item = (H5C_collective_write_t *)H5SL_item(node))) + if(NULL == (entry_ptr = (H5C_cache_entry_t *)H5SL_item(node))) HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item") /* Set up initial array position & buffer base address */ - length_array[0] = (int)item->length; - base_buf = item->buf; + length_array[0] = (int)entry_ptr->size; + base_buf = entry_ptr->image_ptr; buf_array[0] = (MPI_Aint)0; - offset_array[0] = (MPI_Aint)item->offset; + offset_array[0] = (MPI_Aint)entry_ptr->addr; node = H5SL_next(node); i = 1; while(node) { - if(NULL == (item = (H5C_collective_write_t *)H5SL_item(node))) + if(NULL == (entry_ptr = (H5C_cache_entry_t *)H5SL_item(node))) HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item") /* Set up array position */ - length_array[i] = (int)item->length; - buf_array[i] = (MPI_Aint)item->buf - (MPI_Aint)base_buf; - offset_array[i] = (MPI_Aint)item->offset; + length_array[i] = (int)entry_ptr->size; + buf_array[i] = (MPI_Aint)entry_ptr->image_ptr - (MPI_Aint)base_buf; + offset_array[i] = (MPI_Aint)entry_ptr->addr; /* Advance to next node & array location */ node = H5SL_next(node); @@ -1469,36 +1498,5 @@ done: FUNC_LEAVE_NOAPI(ret_value); } /* end H5C__collective_write() */ - - -/*------------------------------------------------------------------------- - * - * Function: H5C__collective_write_free - * - * Purpose: Release node on collective write skiplist - * - * Return: FAIL if error is detected, SUCCEED otherwise. - * - * Programmer: Mohamad Chaarawi - * February, 2016 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5C__collective_write_free(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *op_data) -{ - H5C_collective_write_t *item = (H5C_collective_write_t *)_item; - - FUNC_ENTER_STATIC_NOERR - - /* Sanity check */ - HDassert(item); - - if(item->free_buf) - item->buf = H5MM_xfree(item->buf); - H5FL_FREE(H5C_collective_write_t, item); - - FUNC_LEAVE_NOAPI(SUCCEED) -} /* end H5C__collective_write_free() */ #endif /* H5_HAVE_PARALLEL */ diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 78ae930..16efb5c 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -3430,7 +3430,7 @@ typedef struct H5C_tag_info_t { * entry is removed from the cache by any means (eviction, * expungement, or take ownership at this point in time). * Functions that perform scans on lists may set this field - * to zero prior to calling H5C_flush_single_entry(). + * to zero prior to calling H5C__flush_single_entry(). * Unexpected changes to the counter indicate that an entry * was removed from the cache as a side effect of the flush. * @@ -3438,7 +3438,7 @@ typedef struct H5C_tag_info_t { * which contained the last entry to be removed from the cache, * or NULL if there either is no such entry, or if a function * performing a scan of a list has set this field to NULL prior - * to calling H5C_flush_single_entry(). + * to calling H5C__flush_single_entry(). * * WARNING!!! This field must NEVER be dereferenced. It is * maintained to allow functions that perform scans of lists @@ -4069,17 +4069,17 @@ typedef struct H5C_tag_info_t { * obtain estimates of how frequently these restarts occur. * * slist_scan_restarts: Number of times a scan of the slist (that contains - * calls to H5C_flush_single_entry()) has been restarted to + * calls to H5C__flush_single_entry()) has been restarted to * avoid potential issues with change of status of the next * entry in the scan. * * LRU_scan_restarts: Number of times a scan of the LRU list (that contains - * calls to H5C_flush_single_entry()) has been restarted to + * calls to H5C__flush_single_entry()) has been restarted to * avoid potential issues with change of status of the next * entry in the scan. * * hash_bucket_scan_restarts: Number of times a scan of a hash bucket list - * (that contains calls to H5C_flush_single_entry()) has been + * (that contains calls to H5C__flush_single_entry()) has been * restarted to avoid potential issues with change of status * of the next entry in the scan. * @@ -4205,10 +4205,14 @@ struct H5C_t { H5C_cache_entry_t * dLRU_tail_ptr; #ifdef H5_HAVE_PARALLEL + /* Fields for collective metadata reads */ int32_t coll_list_len; size_t coll_list_size; H5C_cache_entry_t * coll_head_ptr; H5C_cache_entry_t * coll_tail_ptr; + + /* Fields for collective metadata writes */ + H5SL_t * coll_write_list; #endif /* H5_HAVE_PARALLEL */ /* Fields for automatic cache size adjustment */ @@ -4310,15 +4314,6 @@ struct H5C_t { char prefix[H5C__PREFIX_LEN]; }; -#ifdef H5_HAVE_PARALLEL -typedef struct H5C_collective_write_t { - size_t length; - hbool_t free_buf; - void *buf; - haddr_t offset; -} H5C_collective_write_t; -#endif /* H5_HAVE_PARALLEL */ - /* Define typedef for tagged cache entry iteration callbacks */ typedef int (*H5C_tag_iter_cb_t)(H5C_cache_entry_t *entry, void *ctx); @@ -4337,7 +4332,7 @@ H5_DLLVAR const H5C_class_t H5C__epoch_marker_class; /* General routines */ H5_DLL herr_t H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, - H5C_cache_entry_t *entry_ptr, unsigned flags, H5SL_t *collective_write_list); + H5C_cache_entry_t *entry_ptr, unsigned flags); H5_DLL herr_t H5C__flush_marked_entries(H5F_t * f, hid_t dxpl_id); H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global, H5C_tag_iter_cb_t cb, void *cb_ctx); diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index c03e2e7..923083f 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -183,6 +183,7 @@ * H5C__FLUSH_MARKED_ENTRIES_FLAG * H5C__TAKE_OWNERSHIP_FLAG * H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG + * H5C__GENERATE_IMAGE_FLAG */ #define H5C__NO_FLAGS_SET 0x00000 #define H5C__SET_FLUSH_MARKER_FLAG 0x00001 @@ -202,6 +203,7 @@ #define H5C__EVICT_ALLOW_LAST_PINS_FLAG 0x04000 #define H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG 0x08000 #define H5C__DURING_FLUSH_FLAG 0x10000 /* Set when the entire cache is being flushed */ +#define H5C__GENERATE_IMAGE_FLAG 0x20000 /* Set during parallel I/O */ /* Debugging/sanity checking/statistics settings */ #ifndef NDEBUG diff --git a/src/H5Ctag.c b/src/H5Ctag.c index 33f7c22..6d5b454 100644 --- a/src/H5Ctag.c +++ b/src/H5Ctag.c @@ -467,7 +467,7 @@ H5C__evict_tagged_entries_cb(H5C_cache_entry_t *entry, void *_ctx) ctx->pinned_entries_need_evicted = TRUE; else { /* Evict the Entry */ - if(H5C__flush_single_entry(ctx->f, ctx->dxpl_id, entry, H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0) + if(H5C__flush_single_entry(ctx->f, ctx->dxpl_id, entry, H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, H5_ITER_ERROR, "Entry eviction failed.") ctx->evicted_entries_last_pass = TRUE; } /* end else */ |