diff options
author | Larry Knox <lrknox@hdfgroup.org> | 2018-07-18 22:06:59 (GMT) |
---|---|---|
committer | Larry Knox <lrknox@hdfgroup.org> | 2018-07-18 22:06:59 (GMT) |
commit | 3c6654921000a8659276f1fcb32dd0ada1353558 (patch) | |
tree | 5255646f9764b9d04974f516b2134d3af6dcccab /src | |
parent | 892252106a1517a4cd715fa6257201e89fb25564 (diff) | |
parent | d95f36686b2a68bcd848d68bcb81be520d01f037 (diff) | |
download | hdf5-3c6654921000a8659276f1fcb32dd0ada1353558.zip hdf5-3c6654921000a8659276f1fcb32dd0ada1353558.tar.gz hdf5-3c6654921000a8659276f1fcb32dd0ada1353558.tar.bz2 |
Merge branch 'develop' of https://bitbucket.hdfgroup.org/scm/~lrknox/hdf5_lrk into develop
Diffstat (limited to 'src')
83 files changed, 481 insertions, 356 deletions
@@ -470,7 +470,7 @@ H5dont_atexit(void) * library, which are supposed to free any unused memory they have * allocated. * - * These should probably be registered dynamicly in a linked list of + * These should probably be registered dynamically in a linked list of * functions to call, but there aren't that many right now, so we * hard-wire them... * @@ -785,7 +785,7 @@ H5check_version(unsigned majnum, unsigned minnum, unsigned relnum) HDfprintf (stderr, "%s", H5libhdf5_settings); break; default: - /* 2 or higer: continue silently */ + /* 2 or higher: continue silently */ break; } /* end switch */ @@ -318,7 +318,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi size") if(NULL == (aux_ptr = H5FL_CALLOC(H5AC_aux_t))) - HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate H5AC auxilary structure") + HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate H5AC auxiliary structure") aux_ptr->magic = H5AC__H5AC_AUX_T_MAGIC; aux_ptr->mpi_comm = mpi_comm; @@ -432,7 +432,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co done: #ifdef H5_HAVE_PARALLEL - /* if there is a failure, try to tidy up the auxilary structure */ + /* if there is a failure, try to tidy up the auxiliary structure */ if(ret_value < 0) { if(aux_ptr != NULL) { if(aux_ptr->d_slist_ptr != NULL) @@ -1878,7 +1878,7 @@ done: * from the cache, clear it, and free it without writing it to * disk. * - * This verion of the function is a complete re-write to + * This version of the function is a complete re-write to * use the new metadata cache. While there isn't all that * much difference between the old and new Purpose sections, * the original version is given below. diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c index 7671abc..b60b933 100644 --- a/src/H5ACmpio.c +++ b/src/H5ACmpio.c @@ -933,7 +933,7 @@ done: * * Purpose: Update the dirty_bytes count for a newly inserted entry. * - * If mpi_rank isnt 0, this simply means adding the size + * If mpi_rank isn't 0, this simply means adding the size * of the entry to the dirty_bytes count. * * If mpi_rank is 0, we must also add the entry to the @@ -1332,7 +1332,7 @@ done: * clean. * * This function is the main routine for handling this - * notification proceedure. It must be called + * notification procedure. It must be called * simultaniously on all processes that have the relevant * file open. To this end, it is called only during a * sync point, with a barrier prior to the call. @@ -1340,7 +1340,7 @@ done: * Note that any metadata entry writes by process 0 will * occur after the barrier and just before this call. * - * Typicaly, calls to this function will be triggered in + * Typically, calls to this function will be triggered in * one of two ways: * * 1) Dirty byte creation exceeds some user specified value. @@ -2128,7 +2128,7 @@ HDfprintf(stdout, "%d:H5AC_propagate...:%u: (u/uu/i/iu/r/ru) = %zu/%u/%zu/%u/%zu /* clear collective access flag on half of the entries in the cache and mark them as independent in case they need to be - evicted later. All ranks are guranteed to mark the same entries + evicted later. All ranks are guaranteed to mark the same entries since we don't modify the order of the collectively accessed entries except through collective access. */ if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0) diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h index 409d914..9bf84bf 100644 --- a/src/H5ACpkg.h +++ b/src/H5ACpkg.h @@ -189,7 +189,7 @@ H5FL_EXTERN(H5AC_aux_t); * is permitted to write to file. * * dirty_bytes_threshold: Integer field containing the dirty bytes - * generation threashold. Whenever dirty byte creation + * generation threshold. Whenever dirty byte creation * exceeds this value, the metadata cache on process 0 * broadcasts a list of the entries it has flushed since * the last broadcast (or since the beginning of execution) diff --git a/src/H5ACpublic.h b/src/H5ACpublic.h index 654a877..a48aa69 100644 --- a/src/H5ACpublic.h +++ b/src/H5ACpublic.h @@ -39,7 +39,7 @@ extern "C" { * structure H5AC_cache_config_t * * H5AC_cache_config_t is a public structure intended for use in public APIs. - * At least in its initial incarnation, it is basicaly a copy of struct + * At least in its initial incarnation, it is basically a copy of struct * H5C_auto_size_ctl_t, minus the report_fcn field, and plus the * dirty_bytes_threshold field. * @@ -212,7 +212,7 @@ extern "C" { * * flash_incr_mode: Instance of the H5C_cache_flash_incr_mode enumerated * type whose value indicates whether and by which algorithm we should - * make flash increases in the size of the cache to accomodate insertion + * make flash increases in the size of the cache to accommodate insertion * of large entries and large increases in the size of a single entry. * * The addition of the flash increment mode was occasioned by performance @@ -379,7 +379,7 @@ extern "C" { * synchronize updates between caches. (See above for outline and * motivation.) * - * This value MUST be consistant across all processes accessing the + * This value MUST be consistent across all processes accessing the * file. This field is ignored unless HDF5 has been compiled for * parallel. * diff --git a/src/H5Aint.c b/src/H5Aint.c index e666e8c..55560c7 100644 --- a/src/H5Aint.c +++ b/src/H5Aint.c @@ -1152,7 +1152,7 @@ H5A__free(H5A_t *attr) HDassert(attr); - /* Free dynamicly allocated items */ + /* Free dynamically allocated items */ if(attr->shared->name) { H5MM_xfree(attr->shared->name); attr->shared->name = NULL; @@ -1235,7 +1235,7 @@ H5A__close(H5A_t *attr) /* Reference count can be 0. It only happens when H5A__create fails. */ if(attr->shared->nrefs <= 1) { - /* Free dynamicly allocated items */ + /* Free dynamically allocated items */ if(H5A__free(attr) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTRELEASE, FAIL, "can't release attribute info") diff --git a/src/H5Apkg.h b/src/H5Apkg.h index 3315cc1..af16b55 100644 --- a/src/H5Apkg.h +++ b/src/H5Apkg.h @@ -86,7 +86,7 @@ typedef struct H5A_shared_t { void *data; /* Attribute data (on a temporary basis) */ size_t data_size; /* Size of data on disk */ H5O_msg_crt_idx_t crt_idx; /* Attribute's creation index in the object header */ - unsigned nrefs; /* Ref count for times this object is refered */ + unsigned nrefs; /* Ref count for times this object is referred */ } H5A_shared_t; /* Define the main attribute structure */ @@ -769,7 +769,7 @@ H5B__insert_child(H5B_t *bt, unsigned *bt_flags, unsigned idx, * the specified type. * * On return, if LT_KEY_CHANGED is non-zero, then LT_KEY is - * the new native left key. Similarily for RT_KEY_CHANGED + * the new native left key. Similarly for RT_KEY_CHANGED * and RT_KEY. * * If the node splits, then MD_KEY contains the key that diff --git a/src/H5B2internal.c b/src/H5B2internal.c index 95ed769..7f6b80a 100644 --- a/src/H5B2internal.c +++ b/src/H5B2internal.c @@ -1192,7 +1192,7 @@ H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hbool_t *depth_decreased, if(swap_loc) idx = 0; else { - /* Count from the orginal index value again */ + /* Count from the original index value again */ n = orig_n; /* Reset "found" flag - the record may have shifted during the diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h index 270b4e6..e203b87 100644 --- a/src/H5Bprivate.h +++ b/src/H5Bprivate.h @@ -122,7 +122,7 @@ typedef struct H5B_class_t { H5B_ins_t (*insert)(H5F_t*, haddr_t, void*, hbool_t*, void*, void*, void*, hbool_t*, haddr_t*); - /* min insert uses min leaf, not new(), similarily for max insert */ + /* min insert uses min leaf, not new(), similarly for max insert */ hbool_t follow_min; hbool_t follow_max; @@ -793,8 +793,8 @@ H5C_prep_for_file_close(H5F_t *f) * close, and since the close warning is issued after all * non FSM related space allocations and just before the * first sync point on close, this call will leave the caches - * in a consistant state across the processes if they were - * consistant before. + * in a consistent state across the processes if they were + * consistent before. * * 2) Since the FSM settle routines are only invoked once during * file close, invoking them now will prevent their invocation @@ -2255,7 +2255,7 @@ H5C_protect(H5F_t * f, if(entry_ptr != NULL) { if(entry_ptr->ring != ring) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occured for cache entry") + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occurred for cache entry") HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); @@ -3095,7 +3095,7 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags) * * All this is a bit awkward, but until the metadata cache entries * are contiguous, with only one dirty flag, we have to let the supplied - * functions deal with the reseting the is_dirty flag. + * functions deal with the resetting the is_dirty flag. */ if(entry_ptr->clear_on_unprotect) { /* Sanity check */ @@ -5029,7 +5029,7 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr, if ( (cache_ptr->resize_ctl).rpt_fcn != NULL ) { /* get the hit rate for the reporting function. Should still - * be good as we havent reset the hit rate statistics. + * be good as we haven't reset the hit rate statistics. */ if(H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate") @@ -5272,7 +5272,7 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags) cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG; evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG; - /* The flush proceedure here is a bit strange. + /* The flush procedure here is a bit strange. * * In the outer while loop we make at least one pass through the * cache, and then repeat until either all the pinned entries in @@ -5421,7 +5421,7 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags) * * While this optimization used to be easy, with the possibility * of new entries being added to the slist in the midst of the - * flush, we must keep the slist in cannonical form at all + * flush, we must keep the slist in canonical form at all * times. */ if(((!entry_ptr->flush_me_last) || @@ -6280,7 +6280,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) entry_addr = entry_ptr->addr; /* Internal cache data structures should now be up to date, and - * consistant with the status of the entry. + * consistent with the status of the entry. * * Now discard the entry if appropriate. */ @@ -6301,10 +6301,10 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags) entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); /* If the entry is not a prefetched entry, verify that the flush - * dependency parents addresses array has been transfered. + * dependency parents addresses array has been transferred. * * If the entry is prefetched, the free_isr routine will dispose of - * the flush dependency parents adresses array if necessary. + * the flush dependency parents addresses array if necessary. */ if(!entry_ptr->prefetched) { HDassert(0 == entry_ptr->fd_parent_count); @@ -8587,7 +8587,7 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr) * tests will be necessary. */ if(cache_ptr->aux_ptr != NULL) - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occured in parallel case") + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case") #endif /* If required, resize the buffer and update the entry and the cache @@ -8787,7 +8787,7 @@ H5C_remove_entry(void *_entry) cache->entry_watched_for_removal = NULL; /* Internal cache data structures should now be up to date, and - * consistant with the status of the entry. + * consistent with the status of the entry. * * Now clean up internal cache fields if appropriate. */ diff --git a/src/H5Cimage.c b/src/H5Cimage.c index 51de5c4..26b6506 100644 --- a/src/H5Cimage.c +++ b/src/H5Cimage.c @@ -1494,7 +1494,7 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) * if the underlying file alignment is greater than 1. * * Clean this up eventually by extending the size of the cache - * image block to the next alignement boundary, and then setting + * image block to the next alignment boundary, and then setting * the image_data_len to the actual size of the cache_image. * * On the off chance that there is some other way to get a @@ -1554,7 +1554,7 @@ H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated) * metadata cache image superblock extension message, set * cache_ptr->image_ctl.generate_image to FALSE. This will * allow the file close to continue normally without the - * unecessary generation of the metadata cache image. + * unnecessary generation of the metadata cache image. */ if(cache_ptr->num_entries_in_image > 0) { if(H5C__prep_for_file_close__setup_image_entries_array(cache_ptr) < 0) diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c index e342d69..ecaed62 100644 --- a/src/H5Cmpio.c +++ b/src/H5Cmpio.c @@ -380,7 +380,7 @@ H5C_apply_candidate_list(H5F_t * f, * Note that we are doing things in this round about manner so as * to preserve the order of the LRU list to the best of our ability. * If we don't do this, my experiments indicate that we will have a - * noticably poorer hit ratio as a result. + * noticeably poorer hit ratio as a result. */ if(H5C__flush_candidate_entries(f, entries_to_flush, entries_to_clear) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush candidates failed") diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index c5dc8f3..98d7a01 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -58,7 +58,7 @@ * We maintain doubly linked lists of instances of H5C_cache_entry_t for a * variety of reasons -- protected list, LRU list, and the clean and dirty * LRU lists at present. The following macros support linking and unlinking - * of instances of H5C_cache_entry_t by both their regular and auxilary next + * of instances of H5C_cache_entry_t by both their regular and auxiliary next * and previous pointers. * * The size and length fields are also maintained. @@ -2006,7 +2006,7 @@ if ( ( (cache_ptr)->index_size != \ * a bit more performance out of the cache. * * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with + * white space in the macro. If they cause difficulties with * the pre-processor, I'll have to remove them. * * JRM - 7/28/04 @@ -2117,7 +2117,7 @@ if ( ( (cache_ptr)->index_size != \ * a bit more performance out of the cache. * * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with + * white space in the macro. If they cause difficulties with * pre-processor, I'll have to remove them. * * JRM - 7/28/04 @@ -2375,7 +2375,7 @@ if ( ( (cache_ptr)->index_size != \ * a bit more performance out of the cache. * * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with + * white space in the macro. If they cause difficulties with * pre-processor, I'll have to remove them. * * JRM - 7/28/04 @@ -2513,7 +2513,7 @@ if ( ( (cache_ptr)->index_size != \ * a bit more performance out of the cache. * * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with + * white space in the macro. If they cause difficulties with * pre-processor, I'll have to remove them. * * JRM - 7/28/04 @@ -3057,7 +3057,7 @@ if ( ( (cache_ptr)->index_size != \ * squeeze a bit more performance out of the cache. * * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with + * white space in the macro. If they cause difficulties with * pre-processor, I'll have to remove them. * * JRM - 7/28/04 @@ -3892,7 +3892,7 @@ typedef struct H5C_tag_info_t { * 2) A pinned entry can be accessed or modified at any time. * This places an additional burden on the associated pre-serialize * and serialize callbacks, which must ensure the the entry is in - * a consistant state before creating an image of it. + * a consistent state before creating an image of it. * * 3) A pinned entry can be marked as dirty (and possibly * change size) while it is unprotected. @@ -4604,7 +4604,7 @@ typedef struct H5C_tag_info_t { * improper behavior if the next entry in the list is the target of one on * these operations. * - * The following fields are use to count such occurances. They are used + * The following fields are use to count such occurrences. They are used * both in tests (to verify that the scan has been restarted), and to * obtain estimates of how frequently these restarts occur. * @@ -4624,7 +4624,7 @@ typedef struct H5C_tag_info_t { * than the target entry as the result of call(s) to the * pre_serialize or serialize callbacks. * - * Note that at present, this condition can only be triggerd + * Note that at present, this condition can only be triggered * by a call to H5C_serialize_single_entry(). * * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index 522b3cf..38a86ee 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -87,7 +87,7 @@ #define H5C__MIN_MAX_CACHE_SIZE ((size_t)(1024)) /* Default max cache size and min clean size are give here to make - * them generally accessable. + * them generally accessible. */ #define H5C__DEFAULT_MAX_CACHE_SIZE ((size_t)(4 * 1024 * 1024)) #define H5C__DEFAULT_MIN_CLEAN_SIZE ((size_t)(2 * 1024 * 1024)) @@ -901,7 +901,7 @@ typedef struct H5C_class_t { H5C_get_fsf_size_t fsf_size; } H5C_class_t; -/* Type defintions of callback functions used by the cache as a whole */ +/* Type definitions of callback functions used by the cache as a whole */ typedef herr_t (*H5C_write_permitted_func_t)(const H5F_t *f, hbool_t *write_permitted_ptr); typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr, @@ -925,7 +925,7 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr, * flush dependency -- with the implied setup and takedown overhead and * added complexity. Further, the flush ordering between rings need only * be enforced on flush operations, and thus the use of flush dependencies - * instead would apply unecessary constraints on flushes under normal + * instead would apply unnecessary constraints on flushes under normal * operating circumstances. * * As of this writing, all metadata entries pretaining to data sets and @@ -1092,7 +1092,7 @@ typedef int H5C_ring_t; * 2) A pinned entry can be accessed or modified at any time. * This places an extra burden on the pre-serialize and * serialize callbacks, which must ensure that a pinned - * entry is consistant and ready to write to disk before + * entry is consistent and ready to write to disk before * generating an image. * * 3) A pinned entry can be marked as dirty (and possibly @@ -1152,7 +1152,7 @@ typedef int H5C_ring_t; * flush_immediately: Boolean flag used only in Phdf5 -- and then only * for H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED. * - * When a destributed metadata write is triggered at a + * When a distributed metadata write is triggered at a * sync point, this field is used to mark entries that * must be flushed before leaving the sync point. At all * other times, this field should be set to FALSE. @@ -1430,7 +1430,7 @@ typedef int H5C_ring_t; * The flush dependency height of any entry involved in a * flush dependency relationship is defined to be the * longest flush dependency path from that entry to an entry - * with no flush depenency children. + * with no flush dependency children. * * Since the image_fd_height is used to order entries in the * cache image so that fd parents preceed fd children, for @@ -1468,8 +1468,8 @@ typedef int H5C_ring_t; * * Further, if the prefetched entry is a flush dependency parent, * all its flush dependency children (which must also be - * prefetched entries), must be tranfered to the new cache - * entry returned by the deserailization callback. + * prefetched entries), must be transferred to the new cache + * entry returned by the deserialization callback. * * Finally, if the prefetched entry is a flush dependency child, * this flush dependency must be destroyed prior to the @@ -1737,7 +1737,7 @@ typedef struct H5C_cache_entry_t { * The flush dependency height of any entry involved in a * flush dependency relationship is defined to be the * longest flush dependency path from that entry to an entry - * with no flush depenency children. + * with no flush dependency children. * * Since the image_fd_height is used to order entries in the * cache image so that fd parents preceed fd children, for @@ -1946,7 +1946,7 @@ typedef struct H5C_image_entry_t { * * flash_incr_mode: Instance of the H5C_cache_flash_incr_mode enumerated * type whose value indicates whether and by what algorithm we should - * make flash increases in the size of the cache to accomodate insertion + * make flash increases in the size of the cache to accommodate insertion * of large entries and large increases in the size of a single entry. * * The addition of the flash increment mode was occasioned by performance @@ -2182,9 +2182,9 @@ typedef struct H5C_auto_size_ctl_t { * current value, any value in excess of 255 will be the functional * equivalent of H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE. * - * flags: Unsigned integer containing flags controling which aspects of the + * flags: Unsigned integer containing flags controlling which aspects of the * cache image functinality is actually executed. The primary impetus - * behind this field is to allow developement of tests for partial + * behind this field is to allow development of tests for partial * implementations that will require little if any modification to run * with the full implementation. In normal operation, all flags should * be set. diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c index c23f089..8abfe27 100644 --- a/src/H5Dbtree.c +++ b/src/H5Dbtree.c @@ -666,12 +666,13 @@ done: static herr_t H5D__btree_decode_key(const H5B_shared_t *shared, const uint8_t *raw, void *_key) { - const H5O_layout_chunk_t *layout; /* Chunk layout description */ - H5D_btree_key_t *key = (H5D_btree_key_t *) _key; /* Pointer to decoded key */ - hsize_t tmp_offset; /* Temporary coordinate offset, from file */ - unsigned u; /* Local index variable */ + const H5O_layout_chunk_t *layout; /* Chunk layout description */ + H5D_btree_key_t *key = (H5D_btree_key_t *) _key; /* Pointer to decoded key */ + hsize_t tmp_offset; /* Temporary coordinate offset, from file */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_STATIC_NOERR + FUNC_ENTER_STATIC /* check args */ HDassert(shared); @@ -684,16 +685,21 @@ H5D__btree_decode_key(const H5B_shared_t *shared, const uint8_t *raw, void *_key /* decode */ UINT32DECODE(raw, key->nbytes); UINT32DECODE(raw, key->filter_mask); - for(u = 0; u < layout->ndims; u++) { + for(u = 0; u < layout->ndims; u++) + { + if (layout->dim[u] == 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", u) + /* Retrieve coordinate offset */ - UINT64DECODE(raw, tmp_offset); + UINT64DECODE(raw, tmp_offset); HDassert(0 == (tmp_offset % layout->dim[u])); /* Convert to a scaled offset */ key->scaled[u] = tmp_offset / layout->dim[u]; } /* end for */ - FUNC_LEAVE_NOAPI(SUCCEED) +done: + FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__btree_decode_key() */ diff --git a/src/H5Dbtree2.c b/src/H5Dbtree2.c index f9074be..d60e79e 100644 --- a/src/H5Dbtree2.c +++ b/src/H5Dbtree2.c @@ -378,7 +378,7 @@ H5D__bt2_unfilt_encode(uint8_t *raw, const void *_record, void *_ctx) { H5D_bt2_ctx_t *ctx = (H5D_bt2_ctx_t *)_ctx; /* Callback context structure */ const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */ - unsigned u; /* Local index varible */ + unsigned u; /* Local index variable */ FUNC_ENTER_STATIC_NOERR diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 7216c49..4906f3c 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -695,7 +695,7 @@ H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, /* Compute the # of chunks in dataset dimensions */ for(u = 0, layout->nchunks = 1, layout->max_nchunks = 1; u < ndims; u++) { - /* Round up to the next integer # of chunks, to accomodate partial chunks */ + /* Round up to the next integer # of chunks, to accommodate partial chunks */ layout->chunks[u] = ((curr_dims[u] + layout->dim[u]) - 1) / layout->dim[u]; if(H5S_UNLIMITED == max_dims[u]) layout->max_chunks[u] = H5S_UNLIMITED; @@ -947,6 +947,8 @@ H5D__chunk_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id) hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */ /* Initial scaled dimension sizes */ + if(dset->shared->layout.u.chunk.dim[u] == 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", u) rdcc->scaled_dims[u] = dset->shared->curr_dims[u] / dset->shared->layout.u.chunk.dim[u]; if( !(scaled_power2up = H5VM_power2up(rdcc->scaled_dims[u])) ) @@ -2933,8 +2935,41 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled, H5F_set_coll_md_read(idx_info.f, temp_cmr); #endif /* H5_HAVE_PARALLEL */ - /* Cache the information retrieved */ - H5D__chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, udata); + /* + * Cache the information retrieved. + * + * Note that if we are writing to the dataset in parallel and filters + * are involved, we skip caching this information as it is highly likely + * that the chunk information will be invalidated as a result of the + * filter operation (e.g. the chunk gets re-allocated to a different + * address in the file and/or gets re-allocated with a different size). + * If we were to cache this information, subsequent reads/writes would + * retrieve the invalid information and cause a variety of issues. + * + * It has been verified that in the serial library, when writing to chunks + * with the real chunk cache disabled and with filters involved, the + * functions within this file are correctly called in such a manner that + * this single chunk cache is always updated correctly. Therefore, this + * check is not needed for the serial library. + * + * This is an ugly and potentially frail check, but the + * H5D__chunk_cinfo_cache_reset() function is not currently available + * to functions outside of this file, so outside functions can not + * invalidate this single chunk cache. Even if the function were available, + * this check prevents us from doing the work of going through and caching + * each chunk in the write operation, when we're only going to invalidate + * the cache at the end of a parallel write anyway. + * + * - JTH (7/13/2018) + */ +#ifdef H5_HAVE_PARALLEL + if ( !( (H5F_HAS_FEATURE(idx_info.f, H5FD_FEAT_HAS_MPI)) + && (H5F_INTENT(dset->oloc.file) & H5F_ACC_RDWR) + && dset->shared->dcpl_cache.pline.nused + ) + ) +#endif + H5D__chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, udata); } /* end if */ } /* end else */ @@ -3016,7 +3051,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset) } /* end if */ else { /* - * If we are reseting and something goes wrong after this + * If we are resetting and something goes wrong after this * point then it's too late to recover because we may have * destroyed the original data by calling H5Z_pipeline(). * The only safe option is to continue with the reset @@ -4449,7 +4484,7 @@ H5D__chunk_update_old_edge_chunks(H5D_t *dset, hsize_t old_dim[]) /* Start off with this dimension marked as not needing to be modified */ new_full_dim[op_dim] = FALSE; - /* Calulate offset of first previously incomplete chunk in this + /* Calculate offset of first previously incomplete chunk in this * dimension */ old_edge_chunk_sc[op_dim] = (old_dim[op_dim] / chunk_dim[op_dim]); @@ -4970,7 +5005,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, const hsize_t *old_dim) HGOTO_DONE(SUCCEED) } /* end if */ - /* Round up to the next integer # of chunks, to accomodate partial chunks */ + /* Round up to the next integer # of chunks, to accommodate partial chunks */ /* Use current dims because the indices have already been updated! -NAF */ /* (also compute the number of elements per chunk) */ /* (also copy the chunk dimensions into 'hsize_t' array for creating dataspace) */ diff --git a/src/H5Dint.c b/src/H5Dint.c index b9d9cce..e8874a2 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -604,7 +604,7 @@ H5D__cache_dataspace_info(const H5D_t *dset) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't cache dataspace dimensions") dset->shared->ndims = (unsigned)sndims; - /* Compute the inital 'power2up' values */ + /* Compute the initial 'power2up' values */ for(u = 0; u < dset->shared->ndims; u++) { hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */ @@ -782,7 +782,7 @@ H5D__update_oh_info(H5F_t *file, H5D_t *dset, hid_t dapl_id) H5O_fill_t old_fill_prop; /* Copy of fill value property, for writing as "old" fill value */ /* Shallow copy the fill value property */ - /* (we only want to make certain that the shared component isnt' modified) */ + /* (we only want to make certain that the shared component isn't modified) */ HDmemcpy(&old_fill_prop, fill_prop, sizeof(old_fill_prop)); /* Reset shared component info */ @@ -2580,10 +2580,11 @@ done: herr_t H5D__set_extent(H5D_t *dset, const hsize_t *size) { - hsize_t curr_dims[H5S_MAX_RANK]; /* Current dimension sizes */ - htri_t changed; /* Whether the dataspace changed size */ - size_t u, v; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + hsize_t curr_dims[H5S_MAX_RANK]; /* Current dimension sizes */ + htri_t changed; /* Whether the dataspace changed size */ + size_t u, v; /* Local index variable */ + unsigned dim_idx; /* Dimension index */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_VOL_TAG(dset->oloc.addr) @@ -2620,11 +2621,11 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size) hbool_t update_chunks = FALSE; /* Flag to indicate chunk cache update is needed */ /* Determine if we are shrinking and/or expanding any dimensions */ - for(u = 0; u < (size_t)dset->shared->ndims; u++) { + for(dim_idx = 0; dim_idx < dset->shared->ndims; dim_idx++) { /* Check for various status changes */ - if(size[u] < curr_dims[u]) + if(size[dim_idx] < curr_dims[dim_idx]) shrink = TRUE; - if(size[u] > curr_dims[u]) + if(size[dim_idx] > curr_dims[dim_idx]) expand = TRUE; /* Chunked storage specific checks */ @@ -2632,30 +2633,33 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size) hsize_t scaled; /* Scaled value */ /* Compute the scaled dimension size value */ - scaled = size[u] / dset->shared->layout.u.chunk.dim[u]; + if(dset->shared->layout.u.chunk.dim[dim_idx] == 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk size must be > 0, dim = %u ", dim_idx) + + scaled = size[dim_idx] / dset->shared->layout.u.chunk.dim[dim_idx]; /* Check if scaled dimension size changed */ - if(scaled != dset->shared->cache.chunk.scaled_dims[u]) { + if(scaled != dset->shared->cache.chunk.scaled_dims[dim_idx]) { hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */ /* Update the scaled dimension size value for the current dimension */ - dset->shared->cache.chunk.scaled_dims[u] = scaled; + dset->shared->cache.chunk.scaled_dims[dim_idx] = scaled; /* Check if algorithm for computing hash values will change */ if((scaled > dset->shared->cache.chunk.nslots && - dset->shared->cache.chunk.scaled_dims[u] <= dset->shared->cache.chunk.nslots) + dset->shared->cache.chunk.scaled_dims[dim_idx] <= dset->shared->cache.chunk.nslots) || (scaled <= dset->shared->cache.chunk.nslots && - dset->shared->cache.chunk.scaled_dims[u] > dset->shared->cache.chunk.nslots)) + dset->shared->cache.chunk.scaled_dims[dim_idx] > dset->shared->cache.chunk.nslots)) update_chunks = TRUE; if(!(scaled_power2up = H5VM_power2up(scaled))) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get the next power of 2") /* Check if the number of bits required to encode the scaled size value changed */ - if(dset->shared->cache.chunk.scaled_power2up[u] != scaled_power2up) { + if(dset->shared->cache.chunk.scaled_power2up[dim_idx] != scaled_power2up) { /* Update the 'power2up' & 'encode_bits' values for the current dimension */ - dset->shared->cache.chunk.scaled_power2up[u] = scaled_power2up; - dset->shared->cache.chunk.scaled_encode_bits[u] = H5VM_log2_gen(scaled_power2up); + dset->shared->cache.chunk.scaled_power2up[dim_idx] = scaled_power2up; + dset->shared->cache.chunk.scaled_encode_bits[dim_idx] = H5VM_log2_gen(scaled_power2up); /* Indicate that the cached chunk indices need to be updated */ update_chunks = TRUE; @@ -2664,7 +2668,7 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size) } /* end if */ /* Update the cached copy of the dataset's dimensions */ - dset->shared->curr_dims[u] = size[u]; + dset->shared->curr_dims[dim_idx] = size[dim_idx]; } /* end for */ /*------------------------------------------------------------------------- diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 107b751..d721ae6 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -234,7 +234,7 @@ static herr_t H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info H5D_filtered_collective_io_info_t *local_chunk_array, size_t *local_chunk_array_num_entries); static herr_t H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries, size_t array_entry_size, void **gathered_array, size_t *gathered_array_num_entries, - int nprocs, hbool_t allgather, int root, MPI_Comm comm, int (*sort_func)(const void *, const void *)); + hbool_t allgather, int root, MPI_Comm comm, int (*sort_func)(const void *, const void *)); static herr_t H5D__mpio_filtered_collective_write_type( H5D_filtered_collective_io_info_t *chunk_list, size_t num_entries, MPI_Datatype *new_mem_type, hbool_t *mem_type_derived, @@ -418,19 +418,16 @@ done: * Function: H5D__mpio_array_gatherv * * Purpose: Given an array, specified in local_array, by each processor - * calling this function, gathers each array into a single + * calling this function, collects each array into a single * array which is then either gathered to the processor * specified by root, when allgather is false, or is * distributed back to all processors when allgather is true. * - * The size of each entry and number of entries in the array - * contributed by an individual processor should be specified - * in array_entry_size and local_array_num_entries, + * The number of entries in the array contributed by an + * individual processor and the size of each entry should be + * specified in local_array_num_entries and array_entry_size, * respectively. * - * The number of processors participating in the gather - * operation should be specified for nprocs. - * * The MPI communicator to use should be specified for comm. * * If the sort_func argument is supplied, the array is sorted @@ -448,14 +445,13 @@ done: static herr_t H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries, size_t array_entry_size, void **_gathered_array, size_t *_gathered_array_num_entries, - int nprocs, hbool_t allgather, int root, MPI_Comm comm, int (*sort_func)(const void *, const void *)) + hbool_t allgather, int root, MPI_Comm comm, int (*sort_func)(const void *, const void *)) { size_t gathered_array_num_entries = 0; /* The size of the newly-constructed array */ - size_t i; void *gathered_array = NULL; /* The newly-constructed array returned to the caller */ - int *receive_counts_array = NULL; /* Array containing number of entries each process is contributing */ - int *displacements_array = NULL; /* Array of displacements where each process places its data in the final array */ - int mpi_code; + int *receive_counts_array = NULL; /* Array containing number of entries each processor is contributing */ + int *displacements_array = NULL; /* Array of displacements where each processor places its data in the final array */ + int mpi_code, mpi_rank, mpi_size; int sendcount; herr_t ret_value = SUCCEED; @@ -464,34 +460,62 @@ H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries, HDassert(_gathered_array); HDassert(_gathered_array_num_entries); - /* Determine the size of the end result array */ + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + /* + * Determine the size of the end result array by collecting the number + * of entries contributed by each processor into a single total. + */ if (MPI_SUCCESS != (mpi_code = MPI_Allreduce(&local_array_num_entries, &gathered_array_num_entries, 1, MPI_INT, MPI_SUM, comm))) HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_code) - /* If 0 entries resulted from the collective operation, no one is writing anything */ + /* If 0 entries resulted from the collective operation, no processor is contributing anything and there is nothing to do */ if (gathered_array_num_entries > 0) { - if (NULL == (gathered_array = H5MM_malloc(gathered_array_num_entries * array_entry_size))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate gathered array") + /* + * If gathering to all processors, all processors need to allocate space for the resulting array, as well as + * the receive counts and displacements arrays for the collective MPI_Allgatherv call. Otherwise, only the + * root processor needs to allocate the space for an MPI_Gatherv call. + */ + if (allgather || (mpi_rank == root)) { + if (NULL == (gathered_array = H5MM_malloc(gathered_array_num_entries * array_entry_size))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate gathered array") - if (NULL == (receive_counts_array = (int *) H5MM_malloc((size_t) nprocs * sizeof(int)))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive counts array") + if (NULL == (receive_counts_array = (int *) H5MM_malloc((size_t) mpi_size * sizeof(int)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive counts array") - if (NULL == (displacements_array = (int *) H5MM_malloc((size_t) nprocs * sizeof(int)))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive displacements array") + if (NULL == (displacements_array = (int *) H5MM_malloc((size_t) mpi_size * sizeof(int)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive displacements array") + } /* end if */ - /* Inform each process of how many entries each other process is contributing to the resulting array */ - if (MPI_SUCCESS != (mpi_code = MPI_Allgather(&local_array_num_entries, 1, MPI_INT, receive_counts_array, 1, MPI_INT, comm))) - HMPI_GOTO_ERROR(FAIL, "MPI_Allgather failed", mpi_code) + /* + * If gathering to all processors, inform each processor of how many entries each other processor is + * contributing to the resulting array by collecting the counts into each processor's "receive counts" + * array. Otherwise, inform only the root processor of how many entries each other processor is contributing. + */ + if (allgather) { + if (MPI_SUCCESS != (mpi_code = MPI_Allgather(&local_array_num_entries, 1, MPI_INT, receive_counts_array, 1, MPI_INT, comm))) + HMPI_GOTO_ERROR(FAIL, "MPI_Allgather failed", mpi_code) + } /* end if */ + else { + if (MPI_SUCCESS != (mpi_code = MPI_Gather(&local_array_num_entries, 1, MPI_INT, receive_counts_array, 1, MPI_INT, root, comm))) + HMPI_GOTO_ERROR(FAIL, "MPI_Gather failed", mpi_code) + } /* end else */ - /* Multiply each receive count by the size of the array entry, since the data is sent as bytes */ - for (i = 0; i < (size_t) nprocs; i++) - H5_CHECKED_ASSIGN(receive_counts_array[i], int, (size_t) receive_counts_array[i] * array_entry_size, size_t); + if (allgather || (mpi_rank == root)) { + size_t i; - /* Set receive buffer offsets for MPI_Allgatherv */ - displacements_array[0] = 0; - for (i = 1; i < (size_t) nprocs; i++) - displacements_array[i] = displacements_array[i - 1] + receive_counts_array[i - 1]; + /* Multiply each receive count by the size of the array entry, since the data is sent as bytes. */ + for (i = 0; i < (size_t) mpi_size; i++) + H5_CHECKED_ASSIGN(receive_counts_array[i], int, (size_t) receive_counts_array[i] * array_entry_size, size_t); + /* Set receive buffer offsets for the collective MPI_Allgatherv/MPI_Gatherv call. */ + displacements_array[0] = 0; + for (i = 1; i < (size_t) mpi_size; i++) + displacements_array[i] = displacements_array[i - 1] + receive_counts_array[i - 1]; + } /* end if */ + + /* As the data is sent as bytes, calculate the true sendcount for the data. */ H5_CHECKED_ASSIGN(sendcount, int, local_array_num_entries * array_entry_size, size_t); if (allgather) { @@ -502,10 +526,11 @@ H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries, else { if (MPI_SUCCESS != (mpi_code = MPI_Gatherv(local_array, sendcount, MPI_BYTE, gathered_array, receive_counts_array, displacements_array, MPI_BYTE, root, comm))) - HMPI_GOTO_ERROR(FAIL, "MPI_Allgatherv failed", mpi_code) + HMPI_GOTO_ERROR(FAIL, "MPI_Gatherv failed", mpi_code) } /* end else */ - if (sort_func) HDqsort(gathered_array, gathered_array_num_entries, array_entry_size, sort_func); + if (sort_func && (allgather || (mpi_rank == root))) + HDqsort(gathered_array, gathered_array_num_entries, array_entry_size, sort_func); } /* end if */ *_gathered_array = gathered_array; @@ -706,7 +731,7 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf io_option = H5D_MULTI_CHUNK_IO; /* via default path. branch by num threshold */ else { - unsigned one_link_chunk_io_threshold; /* Threshhold to use single collective I/O for all chunks */ + unsigned one_link_chunk_io_threshold; /* Threshold to use single collective I/O for all chunks */ int mpi_size; /* Number of processes in MPI job */ if(H5D__mpio_get_sum_chunk(io_info, fm, &sum_chunk) < 0) @@ -1295,8 +1320,7 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_in * of the chunks in the file. */ if (H5D__mpio_array_gatherv(chunk_list, chunk_list_num_entries, sizeof(H5D_filtered_collective_io_info_t), - (void **) &collective_chunk_list, &collective_chunk_list_num_entries, mpi_size, - true, 0, io_info->comm, NULL) < 0) + (void **) &collective_chunk_list, &collective_chunk_list_num_entries, true, 0, io_info->comm, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "couldn't gather new chunk sizes") /* Collectively re-allocate the modified chunks (from each process) in the file */ @@ -1768,8 +1792,7 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_i * of the chunks in the file */ if (H5D__mpio_array_gatherv(&chunk_list[i], have_chunk_to_process ? 1 : 0, sizeof(H5D_filtered_collective_io_info_t), - (void **) &collective_chunk_list, &collective_chunk_list_num_entries, mpi_size, - true, 0, io_info->comm, NULL) < 0) + (void **) &collective_chunk_list, &collective_chunk_list_num_entries, true, 0, io_info->comm, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "couldn't gather new chunk sizes") /* Participate in the collective re-allocation of all chunks modified @@ -2655,8 +2678,8 @@ H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_ty * call, the gathered list will initially be sorted in increasing order of chunk offset in the file. */ if (H5D__mpio_array_gatherv(local_chunk_array, *local_chunk_array_num_entries, sizeof(H5D_filtered_collective_io_info_t), - (void **) &shared_chunks_info_array, &shared_chunks_info_array_num_entries, mpi_size, - false, 0, io_info->comm, H5D__cmp_filtered_collective_io_info_entry) < 0) + (void **) &shared_chunks_info_array, &shared_chunks_info_array_num_entries, false, 0, + io_info->comm, H5D__cmp_filtered_collective_io_info_entry) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "couldn't gather array") /* Rank 0 redistributes any shared chunks to new owners as necessary */ @@ -2765,7 +2788,7 @@ H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_ty if (H5S_encode(chunk_info->fspace, &mod_data_p, &mod_data_size) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "unable to encode dataspace") - /* Intialize iterator for memory selection */ + /* Initialize iterator for memory selection */ if (H5S_select_iter_init(mem_iter, chunk_info->mspace, type_info->src_type_size) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information") mem_iter_init = TRUE; @@ -2981,7 +3004,7 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk { H5D_chunk_info_t *chunk_info = NULL; H5S_sel_iter_t *mem_iter = NULL; /* Memory iterator for H5D__scatter_mem/H5D__gather_mem */ - unsigned char *mod_data = NULL; /* Chunk modification data sent by a process to a chunk's owner */ + H5S_sel_iter_t *file_iter = NULL; H5Z_EDC_t err_detect; /* Error detection info */ H5Z_cb_t filter_cb; /* I/O filter callback function */ unsigned filter_mask = 0; @@ -2989,11 +3012,13 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk hssize_t extent_npoints; hsize_t true_chunk_size; hbool_t mem_iter_init = FALSE; + hbool_t file_iter_init = FALSE; size_t buf_size; size_t i; H5S_t *dataspace = NULL; /* Other process' dataspace for the chunk */ - void *tmp_gath_buf = NULL; /* Temporary gather buffer for owner of the chunk to gather into from - application write buffer before scattering out to the chunk data buffer */ + void *tmp_gath_buf = NULL; /* Temporary gather buffer to gather into from application buffer + before scattering out to the chunk data buffer (when writing data), + or vice versa (when reading data) */ int mpi_code; herr_t ret_value = SUCCEED; @@ -3073,9 +3098,6 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information") mem_iter_init = TRUE; - if ((iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid") - /* If this is a read operation, scatter the read chunk data to the user's buffer. * * If this is a write operation, update the chunk data buffer with the modifications @@ -3084,16 +3106,39 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk */ switch (io_info->op_type) { case H5D_IO_OP_READ: - if (H5D__scatter_mem(chunk_entry->buf, chunk_info->mspace, mem_iter, (size_t)iter_nelmts, io_info->u.rbuf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to read buffer") + if (NULL == (file_iter = (H5S_sel_iter_t *) H5MM_malloc(sizeof(H5S_sel_iter_t)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate file iterator") + + if (H5S_select_iter_init(file_iter, chunk_info->fspace, type_info->src_type_size) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information") + file_iter_init = TRUE; + + if ((iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->fspace)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid") + + if (NULL == (tmp_gath_buf = H5MM_malloc((hsize_t) iter_nelmts * type_info->src_type_size))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate temporary gather buffer") + + if (!H5D__gather_mem(chunk_entry->buf, chunk_info->fspace, file_iter, (size_t) iter_nelmts, tmp_gath_buf)) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "couldn't gather from chunk buffer") + + if ((iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid") + + if (H5D__scatter_mem(tmp_gath_buf, chunk_info->mspace, mem_iter, (size_t) iter_nelmts, io_info->u.rbuf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to read buffer") + break; case H5D_IO_OP_WRITE: + if ((iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid") + if (NULL == (tmp_gath_buf = H5MM_malloc((hsize_t) iter_nelmts * type_info->src_type_size))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate temporary gather buffer") /* Gather modification data from the application write buffer into a temporary buffer */ - if(!H5D__gather_mem(io_info->u.wbuf, chunk_info->mspace, mem_iter, (size_t)iter_nelmts, tmp_gath_buf)) + if(!H5D__gather_mem(io_info->u.wbuf, chunk_info->mspace, mem_iter, (size_t) iter_nelmts, tmp_gath_buf)) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "couldn't gather from write buffer") if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0) @@ -3111,7 +3156,7 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk /* Scatter the owner's modification data into the chunk data buffer according to * the file space. */ - if(H5D__scatter_mem(tmp_gath_buf, chunk_info->fspace, mem_iter, (size_t)iter_nelmts, chunk_entry->buf) < 0) + if(H5D__scatter_mem(tmp_gath_buf, chunk_info->fspace, mem_iter, (size_t) iter_nelmts, chunk_entry->buf) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to chunk data buffer") if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0) @@ -3177,10 +3222,12 @@ done: H5MM_free(chunk_entry->async_info.receive_buffer_array); if (chunk_entry->async_info.receive_requests_array) H5MM_free(chunk_entry->async_info.receive_requests_array); - if (mod_data) - H5MM_free(mod_data); if (tmp_gath_buf) H5MM_free(tmp_gath_buf); + if (file_iter_init && H5S_SELECT_ITER_RELEASE(file_iter) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator") + if (file_iter) + H5MM_free(file_iter); if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator") if (mem_iter) diff --git a/src/H5Dvirtual.c b/src/H5Dvirtual.c index c7c0775..7f1ac86 100644 --- a/src/H5Dvirtual.c +++ b/src/H5Dvirtual.c @@ -796,7 +796,7 @@ H5D__virtual_copy(H5F_t *f_dst, H5O_layout_t *layout_dst) if(f_dst == f_src) { /* Increase reference count on global heap object */ if((heap_rc = H5HG_link(f_dst, (H5HG_t *)&(layout_dst->u.virt.serial_list_hobjid), 1)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTMODIFY, FAIL, "unable to adjust global heap refence count") + HGOTO_ERROR(H5E_DATASET, H5E_CANTMODIFY, FAIL, "unable to adjust global heap reference count") } /* end if */ else #endif /* NOT_YET */ @@ -847,7 +847,7 @@ H5D__virtual_delete(H5F_t *f, H5O_storage_t *storage) #ifdef NOT_YET /* Unlink the global heap block */ if((heap_rc = H5HG_link(f, (H5HG_t *)&(storage->u.virt.serial_list_hobjid), -1)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTMODIFY, FAIL, "unable to adjust global heap refence count") + HGOTO_ERROR(H5E_DATASET, H5E_CANTMODIFY, FAIL, "unable to adjust global heap reference count") if(heap_rc == 0) #endif /* NOT_YET */ /* Delete the global heap block */ diff --git a/src/H5Edeprec.c b/src/H5Edeprec.c index 9ff9f1f..fc3eb9c 100644 --- a/src/H5Edeprec.c +++ b/src/H5Edeprec.c @@ -227,7 +227,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Eclear1 * - * Purpose: This function is for backward compatbility. + * Purpose: This function is for backward compatibility. * Clears the error stack for the specified error stack. * * Return: Non-negative on success/Negative on failure @@ -258,7 +258,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Eprint1 * - * Purpose: This function is for backward compatbility. + * Purpose: This function is for backward compatibility. * Prints the error stack in some default way. This is just a * convenience function for H5Ewalk() with a function that * prints error messages. Users are encouraged to write there @@ -296,7 +296,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Ewalk1 * - * Purpose: This function is for backward compatbility. + * Purpose: This function is for backward compatibility. * Walks the error stack for the current thread and calls some * function for each error along the way. * @@ -335,7 +335,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Eget_auto1 * - * Purpose: This function is for backward compatbility. + * Purpose: This function is for backward compatibility. * Returns the current settings for the automatic error stack * traversal function and its data for specific error stack. * Either (or both) arguments may be null in which case the @@ -386,7 +386,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Eset_auto1 * - * Purpose: This function is for backward compatbility. + * Purpose: This function is for backward compatibility. * Turns on or off automatic printing of errors for certain * error stack. When turned on (non-null FUNC pointer) any * API function which returns an error indication will first diff --git a/src/H5FDdirect.c b/src/H5FDdirect.c index 811ea8e..906ec28 100644 --- a/src/H5FDdirect.c +++ b/src/H5FDdirect.c @@ -951,7 +951,7 @@ H5FD_direct_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UN do { /* Read the aligned data in file first. Not able to handle interrupted * system calls and partial results like sec2 driver does because the - * data may no longer be aligned. It's expecially true when the data in + * data may no longer be aligned. It's especially true when the data in * file is smaller than ALLOC_SIZE. */ HDmemset(copy_buf, 0, alloc_size); @@ -1138,9 +1138,9 @@ H5FD_direct_write(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_U /* * Read the aligned data first if the aligned region doesn't fall - * entirely in the range to be writen. Not able to handle interrupted + * entirely in the range to be written. Not able to handle interrupted * system calls and partial results like sec2 driver does because the - * data may no longer be aligned. It's expecially true when the data in + * data may no longer be aligned. It's especially true when the data in * file is smaller than ALLOC_SIZE. Only read the entire section if * both ends are misaligned, otherwise only read the block on the * misaligned end. diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c index 3c1c7bb..e52a71a 100644 --- a/src/H5FDfamily.c +++ b/src/H5FDfamily.c @@ -548,7 +548,7 @@ H5FD_family_sb_encode(H5FD_t *_file, char *name/*out*/, unsigned char *buf/*out* /*------------------------------------------------------------------------- * Function: H5FD_family_sb_decode * - * Purpose: This function has 2 seperate purpose. One is to decodes the + * Purpose: This function has 2 separate purpose. One is to decodes the * superblock information for this driver. The NAME argument is * the eight-character (plus null termination) name stored in i * the file. The FILE argument is updated according to the diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c index ee3277d..87f8b6a 100644 --- a/src/H5FDmpio.c +++ b/src/H5FDmpio.c @@ -1462,7 +1462,7 @@ H5FD_mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, /* Only look for MPI views for raw data transfers */ if(type == H5FD_MEM_DRAW) { - H5FD_mpio_xfer_t xfer_mode; /* I/O tranfer mode */ + H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode */ /* Get the transfer mode from the API context */ if(H5CX_get_io_xfer_mode(&xfer_mode) < 0) @@ -1717,7 +1717,7 @@ H5FD_mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, #endif int size_i; hbool_t use_view_this_time = FALSE; - H5FD_mpio_xfer_t xfer_mode; /* I/O tranfer mode */ + H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode */ herr_t ret_value = SUCCEED; FUNC_ENTER_NOAPI_NOINIT diff --git a/src/H5FDmulti.c b/src/H5FDmulti.c index 8ae23d2..aa1b118 100644 --- a/src/H5FDmulti.c +++ b/src/H5FDmulti.c @@ -263,7 +263,7 @@ H5FD_multi_term(void) /*------------------------------------------------------------------------- * Function: H5Pset_fapl_split * - * Purpose: Compatability function. Makes the multi driver act like the + * Purpose: Compatibility function. Makes the multi driver act like the * old split driver which stored meta data in one file and raw * data in another file. * @@ -18,7 +18,7 @@ * Purpose: Manage priority queues of free-lists (of blocks of bytes). * These are used in various places in the library which allocate and * free differently blocks of bytes repeatedly. Usually the same size - * of block is allocated and freed repeatly in a loop, while writing out + * of block is allocated and freed repeatedly in a loop, while writing out * chunked data for example, but the blocks may also be of different sizes * from different datasets and an attempt is made to optimize access to * the proper free list of blocks by using these priority queues to @@ -499,7 +499,7 @@ H5FL_reg_calloc(H5FL_reg_head_t *head H5FL_TRACK_PARAMS) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Clear to zeros */ - /* (Accomodate tracking information, if present) */ + /* (Accommodate tracking information, if present) */ HDmemset(ret_value,0,head->size - H5FL_TRACK_SIZE); done: @@ -2220,7 +2220,7 @@ H5FL_fac_calloc(H5FL_fac_head_t *head H5FL_TRACK_PARAMS) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Clear to zeros */ - /* (Accomodate tracking information, if present) */ + /* (Accommodate tracking information, if present) */ HDmemset(ret_value,0,head->size - H5FL_TRACK_SIZE); done: @@ -135,7 +135,7 @@ HDfprintf(stderr, "%s: Creating free space manager, nclasses = %Zu\n", FUNC, ncl fspace->alignment = alignment; fspace->align_thres = threshold; - /* Check if the free space tracker is supposed to be persistant */ + /* Check if the free space tracker is supposed to be persistent */ if(fs_addr) { /* Allocate space for the free space header */ if(HADDR_UNDEF == (fspace->addr = H5MF_alloc(f, H5FD_MEM_FSPACE_HDR, (hsize_t)fspace->hdr_size))) @@ -423,7 +423,7 @@ HDfprintf(stderr, "%s: fspace->tot_sect_count = %Hu, fspace->serial_sect_count = HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n", FUNC, fspace->alloc_sect_size, fspace->sect_size); #endif /* H5FS_DEBUG */ /* If there are sections to serialize, update them */ - /* (if the free space manager is persistant) */ + /* (if the free space manager is persistent) */ if(fspace->serial_sect_count > 0 && H5F_addr_defined(fspace->addr)) { #ifdef H5FS_DEBUG HDfprintf(stderr, "%s: Real sections to store in file\n", FUNC); @@ -794,7 +794,7 @@ HDfprintf(stderr, "%s: Marking free space header as dirty\n", FUNC); /* Sanity check */ HDassert(fspace); - /* Check if the free space manager is persistant */ + /* Check if the free space manager is persistent */ if(H5F_addr_defined(fspace->addr)) /* Mark header as dirty in cache */ if(H5AC_mark_entry_dirty(fspace) < 0) @@ -883,7 +883,7 @@ H5FS_alloc_sect(H5F_t *f, H5FS_t *fspace) HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't add free space sections to cache") /* Since space has been allocated for the section info and the sinfo - * has been inserted into the cache, relinquish owership (i.e. float) + * has been inserted into the cache, relinquish ownership (i.e. float) * the section info. */ fspace->sinfo = NULL; diff --git a/src/H5FScache.c b/src/H5FScache.c index 875a383..fa04ba1 100644 --- a/src/H5FScache.c +++ b/src/H5FScache.c @@ -457,7 +457,7 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing, * H5F_addr_defined(fspace->addr) * * will both be TRUE. If this contition does not hold, then - * either the free space info is not persistant + * either the free space info is not persistent * (!H5F_addr_defined(fspace->addr)???) or the section info * contains no free space data that must be written to file * ( fspace->serial_sect_count == 0 ). @@ -487,7 +487,7 @@ H5FS__cache_hdr_pre_serialize(H5F_t *f, void *_thing, * * Case 1) If either fspace->serial_sect_count == 0 or * ! H5F_addr_defined(fspace->addr) do nothing as either - * the free space manager data is not persistant, or the + * the free space manager data is not persistent, or the * section info is empty. * * Otherwise, allocate space for the section info in real @@ -1454,7 +1454,7 @@ H5FS__sinfo_serialize_sect_cb(void *_item, void H5_ATTR_UNUSED *key, void *_udat /* Call 'serialize' callback for this section */ if(sect_cls->serialize) { if((*sect_cls->serialize)(sect_cls, sect, *udata->image) < 0) - HGOTO_ERROR(H5E_FSPACE, H5E_CANTSERIALIZE, FAIL, "can't syncronize section") + HGOTO_ERROR(H5E_FSPACE, H5E_CANTSERIALIZE, FAIL, "can't synchronize section") /* Update offset in serialization buffer */ (*udata->image) += sect_cls->serial_size; diff --git a/src/H5FSsection.c b/src/H5FSsection.c index 11cd722..a58347f 100644 --- a/src/H5FSsection.c +++ b/src/H5FSsection.c @@ -2432,7 +2432,7 @@ done: * returns to 1) above. * * Similarly, if it allocates space for its own header, it - * can go into an infinte loop as it: + * can go into an infinite loop as it: * * 1) allocates space for the header * @@ -2479,7 +2479,7 @@ done: * enabled when the free space managers are read. To allow * for this, we must ensure that space allocated for the * free space manager header and section info is either larger - * than a page, or resides completely withing a page. + * than a page, or resides completely within a page. * * Do this by allocating space for the free space header and * section info starting at page boundaries, and extending @@ -2532,7 +2532,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, HDassert(fspace->sect_addr == HADDR_UNDEF); HDassert(fspace->alloc_sect_size == 0); - /* persistant free space managers must be enabled */ + /* persistent free space managers must be enabled */ HDassert(f->shared->fs_persist); /* At present, all free space strategies enable the free space managers. @@ -2576,7 +2576,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, f, hdr_alloc_size, &eoa_frag_addr, &eoa_frag_size))) HGOTO_ERROR(H5E_FSPACE, H5E_CANTALLOC, FAIL, "can't allocate file space for hdr") - /* if the file alignement is 1, there should be no + /* if the file alignment is 1, there should be no * eoa fragment. Otherwise, drop any fragment on the floor. */ HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1)); @@ -2616,7 +2616,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, f, sinfo_alloc_size, &eoa_frag_addr, &eoa_frag_size))) HGOTO_ERROR(H5E_FSPACE, H5E_CANTALLOC, FAIL, "can't allocate file space") - /* if the file alignement is 1, there should be no + /* if the file alignment is 1, there should be no * eoa fragment. Otherwise, drop the fragment on the floor. */ HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1)); @@ -2637,7 +2637,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, * On reflection, no. * * On a regular file close, any eviction will not change the - * the contents of the free space manger(s), as all entries + * the contents of the free space manager(s), as all entries * should have correct file space allocated by the time this * function is called. * @@ -2658,7 +2658,7 @@ H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, H5FS_t *fspace, HGOTO_ERROR(H5E_FSPACE, H5E_CANTMARKDIRTY, FAIL, "unable to mark free space header as dirty") /* since space has been allocated for the section info and the sinfo - * has been inserted into the cache, relinquish owership (i.e. float) + * has been inserted into the cache, relinquish ownership (i.e. float) * the section info. */ fspace->sinfo = NULL; diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h index f815005..2ab41de 100644 --- a/src/H5Fpkg.h +++ b/src/H5Fpkg.h @@ -321,7 +321,7 @@ struct H5F_file_t { haddr_t fs_addr[H5F_MEM_PAGE_NTYPES]; /* Address of free space manager info for each type */ H5FS_t *fs_man[H5F_MEM_PAGE_NTYPES]; /* Free space manager for each file space type */ hbool_t first_alloc_dealloc; /* TRUE iff free space managers */ - /* are persistant and have not */ + /* are persistent and have not */ /* been used accessed for either */ /* allocation or deallocation */ /* since file open. */ diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h index f06d4d0..979ba7d 100644 --- a/src/H5Fprivate.h +++ b/src/H5Fprivate.h @@ -557,7 +557,7 @@ typedef struct H5F_t H5F_t; #define H5F_FILE_SPACE_PAGE_SIZE_DEF 4096 /* For paged aggregation: minimum value for file space page size */ #define H5F_FILE_SPACE_PAGE_SIZE_MIN 512 -/* For paged aggregation: maxiumum value for file space page size: 1 gigabyte */ +/* For paged aggregation: maximum value for file space page size: 1 gigabyte */ #define H5F_FILE_SPACE_PAGE_SIZE_MAX 1024*1024*1024 /* For paged aggregation: drop free-space with size <= this threshold for small meta section */ diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c index 97a6d6f..9339b3d 100644 --- a/src/H5Fsuper.c +++ b/src/H5Fsuper.c @@ -823,13 +823,13 @@ H5F__super_read(H5F_t *f, H5P_genplist_t *fa_plist, hbool_t initial_read) f->shared->eoa_pre_fsm_fsalloc = fsinfo.eoa_pre_fsm_fsalloc; /* f->shared->eoa_pre_fsm_fsalloc must always be HADDR_UNDEF - * in the absence of persistant free space managers. + * in the absence of persistent free space managers. */ /* If the following two conditions are true: * (1) skipping EOF check (skip_eof_check) * (2) dropping free-space to the floor (null_fsm_addr) * skip the asserts as "eoa_pre_fsm_fsalloc" may be undefined - * for a crashed file with persistant free space managers. + * for a crashed file with persistent free space managers. * #1 and #2 are enabled when the tool h5clear --increment * option is used. */ @@ -839,7 +839,7 @@ H5F__super_read(H5F_t *f, H5P_genplist_t *fa_plist, hbool_t initial_read) } /* end if */ /* As "eoa_pre_fsm_fsalloc" may be undefined for a crashed file - * with persistant free space managers, therefore, set + * with persistent free space managers, therefore, set * "first_alloc_dealloc" when the condition * "dropping free-space to the floor is true. * This will ensure that no action is done to settle things on file diff --git a/src/H5Gname.c b/src/H5Gname.c index 1ca06b9..c1156ca 100644 --- a/src/H5Gname.c +++ b/src/H5Gname.c @@ -1058,7 +1058,7 @@ done: * Function: H5G_name_replace * * Purpose: Search the list of open IDs and replace names according to a - * particular operation. The operation occured on the + * particular operation. The operation occurred on the * SRC_FILE/SRC_FULL_PATH_R object. The new name (if there is * one) is NEW_NAME_R. Additional entry location information * (currently only needed for the 'move' operation) is passed in diff --git a/src/H5Gobj.c b/src/H5Gobj.c index af0e72c..e93896c 100644 --- a/src/H5Gobj.c +++ b/src/H5Gobj.c @@ -222,7 +222,7 @@ H5G__obj_create_real(H5F_t *f, const H5O_ginfo_t *ginfo, size_t pline_size = 0; /* Size of the pipeline message */ size_t link_size; /* Size of a link message */ - /* Calculate message size infomation, for creating group's object header */ + /* Calculate message size information, for creating group's object header */ linfo_size = H5O_msg_size_f(f, gcpl_id, H5O_LINFO_ID, linfo, (size_t)0); HDassert(linfo_size); diff --git a/src/H5HFcache.c b/src/H5HFcache.c index 84d5c5f..0c5d3aa 100644 --- a/src/H5HFcache.c +++ b/src/H5HFcache.c @@ -702,12 +702,12 @@ H5HF__cache_hdr_pre_serialize(H5F_t *f, void *_thing, haddr_t addr, size_t len, * * Do this with a call to H5HF__cache_verify_hdr_descendants_clean(). * - * Note that decendants need not be clean if the pre_serialize call + * Note that descendants need not be clean if the pre_serialize call * is made during a cache serialization instead of an entry or cache * flush. * * Note also that with the recent change in the definition of flush - * dependency, not all decendants need be clean -- only direct flush + * dependency, not all descendants need be clean -- only direct flush * dependency children. * * Finally, observe that the H5HF__cache_verify_hdr_descendants_clean() @@ -2643,11 +2643,11 @@ H5HF__cache_dblock_fsf_size(const void *_thing, hsize_t *fsf_size) * * The implementation of flush dependencies has been changed. * Prior to this change, a flush dependency parent could be - * flushed if and only if all its flush dependency decendants + * flushed if and only if all its flush dependency descendants * were clean. In the new definition, a flush dependency * parent can be flushed if all its immediate flush dependency * children are clean, regardless of any other dirty - * decendants. + * descendants. * * Further, metadata cache entries are now allowed to have * multiple flush dependency parents. @@ -2815,7 +2815,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr, H5_BEGIN_TAG(hdr->heap_addr) if(NULL == (root_iblock = (H5HF_indirect_t *)H5AC_protect(f, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.") + HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() failed.") H5_END_TAG @@ -2889,7 +2889,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr, H5_BEGIN_TAG(hdr->heap_addr) if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(f, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.") + HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() failed.") H5_END_TAG @@ -2912,7 +2912,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr, if(unprotect_root_iblock) { HDassert(root_iblock); if(H5AC_unprotect(f, H5AC_FHEAP_IBLOCK, root_iblock_addr, root_iblock, H5AC__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.") + HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() failed.") } /* end if */ } /* end else */ } /* end if */ @@ -2982,7 +2982,7 @@ done: * Function: H5HF__cache_verify_iblock_descendants_clean * * Purpose: Sanity checking routine that verifies that all indirect - * and direct blocks that are decendents of the supplied + * and direct blocks that are descendants of the supplied * instance of H5HF_indirect_t are clean. Set *clean * to TRUE if this is the case, and to FALSE otherwise. * @@ -3009,11 +3009,11 @@ done: * * The implementation of flush dependencies has been changed. * Prior to this change, a flush dependency parent could be - * flushed if and only if all its flush dependency decendants + * flushed if and only if all its flush dependency descendants * were clean. In the new definition, a flush dependency * parent can be flushed if all its immediate flush dependency * children are clean, regardless of any other dirty - * decendants. + * descendants. * * Further, metadata cache entries are now allowed to have * multiple flush dependency parents. @@ -3126,11 +3126,11 @@ done: * * The implementation of flush dependencies has been changed. * Prior to this change, a flush dependency parent could be - * flushed if and only if all its flush dependency decendants + * flushed if and only if all its flush dependency descendants * were clean. In the new definition, a flush dependency * parent can be flushed if all its immediate flush dependency * children are clean, regardless of any other dirty - * decendants. + * descendants. * * Further, metadata cache entries are now allowed to have * multiple flush dependency parents. @@ -3290,11 +3290,11 @@ done: * * The implementation of flush dependencies has been changed. * Prior to this change, a flush dependency parent could be - * flushed if and only if all its flush dependency decendants + * flushed if and only if all its flush dependency descendants * were clean. In the new definition, a flush dependency * parent can be flushed if all its immediate flush dependency * children are clean, regardless of any other dirty - * decendants. + * descendants. * * Further, metadata cache entries are now allowed to have * multiple flush dependency parents. @@ -3468,7 +3468,7 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr, H5_BEGIN_TAG(iblock->hdr->heap_addr) if(NULL == (child_iblock = (H5HF_indirect_t *) H5AC_protect(f, H5AC_FHEAP_IBLOCK, child_iblock_addr, NULL, H5AC__READ_ONLY_FLAG))) - HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.") + HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() failed.") H5_END_TAG @@ -3480,7 +3480,7 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr, /* pointer to the entry. This is very slimy -- */ /* come up with a better solution. */ if(H5AC_get_entry_ptr_from_addr(f, child_iblock_addr, (void **)(&child_iblock)) < 0) - HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() faild.") + HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() failed.") HDassert(child_iblock); } /* end else */ } /* end if */ @@ -3519,7 +3519,7 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, haddr_t fd_parent_addr, /* if we protected the child iblock, unprotect it now */ if(unprotect_child_iblock) { if(H5AC_unprotect(f, H5AC_FHEAP_IBLOCK, child_iblock_addr, child_iblock, H5AC__NO_FLAGS_SET) < 0) - HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.") + HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() failed.") } /* end if */ } /* end if */ } /* end if */ diff --git a/src/H5HFdtable.c b/src/H5HFdtable.c index 563e8c5..6e9429a 100644 --- a/src/H5HFdtable.c +++ b/src/H5HFdtable.c @@ -338,7 +338,7 @@ HDfprintf(stderr, "%s: end_row = %u, end_col = %u, end_entry = %u\n", "H5HF_sect /* Check for multi-row span */ if(start_row != end_row) { - /* Accomodate partial starting row */ + /* Accommodate partial starting row */ if(start_col > 0) { acc_span_size = dtable->row_block_size[start_row] * (dtable->cparam.width - start_col); @@ -352,7 +352,7 @@ HDfprintf(stderr, "%s: end_row = %u, end_col = %u, end_entry = %u\n", "H5HF_sect start_row++; } /* end while */ - /* Accomodate partial ending row */ + /* Accommodate partial ending row */ acc_span_size += dtable->row_block_size[start_row] * (end_col + 1); } /* end if */ diff --git a/src/H5HFhdr.c b/src/H5HFhdr.c index b1b8574..8166d88 100644 --- a/src/H5HFhdr.c +++ b/src/H5HFhdr.c @@ -46,13 +46,13 @@ /* Limit on the size of the max. direct block size */ /* (This is limited to 32-bits currently, because I think it's unlikely to * need to be larger, the 32-bit limit for H5VM_log2_of2(n), and - * some offsets/sizes are encoded with a maxiumum of 32-bits - QAK) + * some offsets/sizes are encoded with a maximum of 32-bits - QAK) */ #define H5HF_MAX_DIRECT_SIZE_LIMIT ((hsize_t)2 * 1024 * 1024 * 1024) /* Limit on the width of the doubling table */ /* (This is limited to 16-bits currently, because I think it's unlikely to - * need to be larger, and its encoded with a maxiumum of 16-bits - QAK) + * need to be larger, and its encoded with a maximum of 16-bits - QAK) */ #define H5HF_WIDTH_LIMIT (64 * 1024) #endif /* NDEBUG */ diff --git a/src/H5HFspace.c b/src/H5HFspace.c index 5d659a6..5260ae2 100644 --- a/src/H5HFspace.c +++ b/src/H5HFspace.c @@ -311,7 +311,7 @@ H5HF__space_revert_root(const H5HF_hdr_t *hdr) /* Only need to scan the sections if the free space has been initialized */ if(hdr->fspace) - /* Iterate over all sections, reseting the parent pointers in 'single' sections */ + /* Iterate over all sections, resetting the parent pointers in 'single' sections */ if(H5FS_sect_iterate(hdr->f, hdr->fspace, H5HF_space_revert_root_cb, NULL) < 0) HGOTO_ERROR(H5E_FSPACE, H5E_BADITER, FAIL, "can't iterate over sections to reset parent pointers") diff --git a/src/H5HFtiny.c b/src/H5HFtiny.c index 79462e9..5cf1c08 100644 --- a/src/H5HFtiny.c +++ b/src/H5HFtiny.c @@ -109,7 +109,7 @@ H5HF_tiny_init(H5HF_hdr_t *hdr) /* Check if tiny objects need an extra byte for their length */ /* (account for boundary condition when length of an object would need an * extra byte, but using that byte means that the extra length byte is - * unneccessary) + * unnecessary) */ if((hdr->id_len - 1) <= H5HF_TINY_LEN_SHORT) { hdr->tiny_max_len = hdr->id_len - 1; diff --git a/src/H5HGcache.c b/src/H5HGcache.c index 50b2669..3a770ae 100644 --- a/src/H5HGcache.c +++ b/src/H5HGcache.c @@ -300,7 +300,7 @@ H5HG__cache_heap_deserialize(const void *_image, size_t len, void *_udata, image += heap->obj[0].size; } /* end if */ else { - size_t need; + size_t need = 0; unsigned idx; uint8_t *begin = image; @@ -420,7 +420,7 @@ H5HG__cache_heap_image_len(const void *_thing, size_t *image_len) * * Purpose: Given an appropriately sized buffer and an instance of * H5HG_heap_t, serialize the global heap for writing to file, - * and copy the serialized verion into the buffer. + * and copy the serialized version into the buffer. * * * Return: Success: SUCCEED @@ -1692,7 +1692,7 @@ HDfprintf(stderr, "%s: Entering\n", FUNC); } /* end if */ /* Set the ring type in the API context. In most cases, we will - * need H5AC_RING_RDFSM, so initialy set the ring in + * need H5AC_RING_RDFSM, so initially set the ring in * the context to that value. We will alter this later if needed. */ H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring); @@ -1788,7 +1788,7 @@ HDfprintf(stderr, "%s: Entering\n", FUNC); HDassert(f->shared->sblock); /* Set the ring type in the API context. In most cases, we will - * need H5AC_RING_RDFSM, so initialy set the ring in + * need H5AC_RING_RDFSM, so initially set the ring in * the context to that value. We will alter this later if needed. */ H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring); @@ -1956,7 +1956,7 @@ HDfprintf(stderr, "%s: Entering\n", FUNC); HDassert(f->shared->sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2); /* Set the ring type in the API context. In most cases, we will - * need H5AC_RING_RDFSM, so initialy set the ring in + * need H5AC_RING_RDFSM, so initially set the ring in * the context to that value. We will alter this later if needed. */ H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring); @@ -2244,7 +2244,7 @@ H5MF_get_freespace(H5F_t *f, hsize_t *tot_space, hsize_t *meta_size) HDassert(f->shared->lf); /* Set the ring type in the API context. In most cases, we will - * need H5AC_RING_RDFSM, so initialy set the ring in + * need H5AC_RING_RDFSM, so initially set the ring in * the context to that value. We will alter this later if needed. */ H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring); @@ -2419,7 +2419,7 @@ H5MF_get_free_sections(H5F_t *f, H5FD_mem_t type, size_t nsects, H5F_sect_info_t sect_udata.sect_idx = 0; /* Set the ring type in the API context. In most cases, we will - * need H5AC_RING_RDFSM, so initialy set the ring in + * need H5AC_RING_RDFSM, so initially set the ring in * the context to that value. We will alter this later if needed. */ H5AC_set_ring(H5AC_RING_RDFSM, &orig_ring); @@ -2712,7 +2712,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled) HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't free aggregators") /* Set the ring type in the DXPL. In most cases, we will - * need H5AC_RING_MDFSM first, so initialy set the ring in + * need H5AC_RING_MDFSM first, so initially set the ring in * the DXPL to that value. We will alter this later if * needed. */ @@ -2829,7 +2829,7 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled) * extension messages will choke if the target message is * unexpectedly either absent or present. * - * Update: This is probably unecessary, as I gather that the + * Update: This is probably unnecessary, as I gather that the * file space manager info message is guaranteed to exist. * Leave it in for now, but consider removing it. */ @@ -3083,7 +3083,7 @@ done: * of these free space manager(s) only to have the allocation * result in the free space manager being empty and thus * obliging us to free the space again. Thus there is the - * potential for an infinte loop if we want to avoid saving + * potential for an infinite loop if we want to avoid saving * empty free space managers. * * Similarly, it is possible that we could allocate space @@ -3280,7 +3280,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled) * extension message. * * As of this writing, we are solving this problem by - * simply not supporting persistant FSMs with the split + * simply not supporting persistent FSMs with the split * and multi file drivers. * * Current plans are to do away with the multi file @@ -3296,7 +3296,7 @@ H5MF_settle_meta_data_fsm(H5F_t *f, hbool_t *fsm_settled) /* ******************* PROBLEM: ******************** * - * If the file has an alignement other than 1, and if + * If the file has an alignment other than 1, and if * the EOA is not a multiple of this alignment, allocating space * for the section via the VFD info has the potential of generating * a fragment that will be added to the free space manager. This @@ -3433,7 +3433,7 @@ H5MF__fsm_type_is_self_referential(H5F_t *f, H5F_mem_page_t fsm_type) /* In principle, fsm_type should always be less than * H5F_MEM_PAGE_LARGE_SUPER whenever paged aggregation * is not enabled. However, since there is code that does - * not observe this prinicple, force the result to FALSE if + * not observe this principle, force the result to FALSE if * fsm_type is greater than or equal to H5F_MEM_PAGE_LARGE_SUPER. */ if(fsm_type >= H5F_MEM_PAGE_LARGE_SUPER) diff --git a/src/H5MFaggr.c b/src/H5MFaggr.c index 5ab1834..3db7f73 100644 --- a/src/H5MFaggr.c +++ b/src/H5MFaggr.c @@ -407,7 +407,7 @@ H5MF__aggr_try_extend(H5F_t *f, H5F_blk_aggr_t *aggr, if(f->shared->feature_flags & aggr->feature_flag) { /* * If the block being tested adjoins the beginning of the aggregator - * block, check if the aggregator can accomodate the extension. + * block, check if the aggregator can accommodate the extension. */ if(H5F_addr_eq(blk_end, aggr->addr)) { haddr_t eoa; /* EOA for the file */ @@ -946,7 +946,7 @@ done: * Function: H5Odisable_mdc_flushes * * Purpose: To "cork" an object: - * --keep dirty entries assoicated with the object in the metadata cache + * --keep dirty entries associated with the object in the metadata cache * * Return: Success: Non-negative * Failure: Negative diff --git a/src/H5Oattr.c b/src/H5Oattr.c index f2c31d7..a62a3a3 100644 --- a/src/H5Oattr.c +++ b/src/H5Oattr.c @@ -249,7 +249,7 @@ done: if(NULL == ret_value) if(attr) { if(attr->shared) { - /* Free any dynamicly allocated items */ + /* Free any dynamically allocated items */ if(H5A__free(attr) < 0) HDONE_ERROR(H5E_ATTR, H5E_CANTRELEASE, NULL, "can't release attribute info") diff --git a/src/H5Oattribute.c b/src/H5Oattribute.c index 9d125e8..e8a8433 100644 --- a/src/H5Oattribute.c +++ b/src/H5Oattribute.c @@ -1093,7 +1093,7 @@ H5O_attr_rename_mod_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/, mesg->native = NULL; /* Delete old attribute */ - /* (doesn't decrement the link count on shared components becuase + /* (doesn't decrement the link count on shared components because * the "native" pointer has been reset) */ if(H5O_release_mesg(udata->f, oh, mesg, FALSE) < 0) diff --git a/src/H5Ocache.c b/src/H5Ocache.c index 3607839..59e1705 100644 --- a/src/H5Ocache.c +++ b/src/H5Ocache.c @@ -1430,9 +1430,10 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image /* Check for combining two adjacent 'null' messages */ if((udata->file_intent & H5F_ACC_RDWR) && - H5O_NULL_ID == id && oh->nmesgs > 0 && - H5O_NULL_ID == oh->mesg[oh->nmesgs - 1].type->id && - oh->mesg[oh->nmesgs - 1].chunkno == chunkno) { + H5O_NULL_ID == id && oh->nmesgs > 0 && + H5O_NULL_ID == oh->mesg[oh->nmesgs - 1].type->id && + oh->mesg[oh->nmesgs - 1].chunkno == chunkno) { + size_t mesgno; /* Current message to operate on */ /* Combine adjacent null messages */ @@ -1467,13 +1468,13 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image /* Point unknown messages at 'unknown' message class */ /* (Usually from future versions of the library) */ - if(id >= H5O_UNKNOWN_ID || + if(id >= H5O_UNKNOWN_ID || #ifdef H5O_ENABLE_BOGUS - id == H5O_BOGUS_VALID_ID || + id == H5O_BOGUS_VALID_ID || #endif - NULL == H5O_msg_class_g[id]) { + NULL == H5O_msg_class_g[id]) { - H5O_unknown_t *unknown; /* Pointer to "unknown" message info */ + H5O_unknown_t *unknown; /* Pointer to "unknown" message info */ /* Allocate "unknown" message info */ if(NULL == (unknown = H5FL_MALLOC(H5O_unknown_t))) @@ -1490,9 +1491,9 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image /* Check for "fail if unknown" message flags */ if(((udata->file_intent & H5F_ACC_RDWR) && - (flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_AND_OPEN_FOR_WRITE)) - || (flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_ALWAYS)) - HGOTO_ERROR(H5E_OHDR, H5E_BADMESG, FAIL, "unknown message with 'fail if unknown' flag found") + (flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_AND_OPEN_FOR_WRITE)) + || (flags & H5O_MSG_FLAG_FAIL_IF_UNKNOWN_ALWAYS)) + HGOTO_ERROR(H5E_OHDR, H5E_BADMESG, FAIL, "unknown message with 'fail if unknown' flag found") /* Check for "mark if unknown" message flag, etc. */ else if((flags & H5O_MSG_FLAG_MARK_IF_UNKNOWN) && !(flags & H5O_MSG_FLAG_WAS_UNKNOWN) && @@ -1543,7 +1544,8 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image H5O_refcount_t *refcount; /* Decode ref. count message */ - HDassert(oh->version > H5O_VERSION_1); + if(oh->version <= H5O_VERSION_1) + HGOTO_ERROR(H5E_OHDR, H5E_VERSION, FAIL, "object header version does not support reference count message") refcount = (H5O_refcount_t *)(H5O_MSG_REFCOUNT->decode)(udata->f, NULL, 0, &ioflags, mesg->raw_size, mesg->raw); /* Save 'native' form of ref. count message */ @@ -1551,6 +1553,8 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image /* Set object header values */ oh->has_refcount_msg = TRUE; + if(!refcount) + HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't decode refcount") oh->nlink = *refcount; } /* end if */ /* Check if message is a link message */ @@ -1614,6 +1618,10 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image } /* end if */ done: + if(ret_value < 0 && udata->cont_msg_info->msgs) { + udata->cont_msg_info->msgs = (H5O_chunk_t *)H5FL_SEQ_FREE(H5O_cont_t, udata->cont_msg_info->msgs); + udata->cont_msg_info->alloc_nmsgs = 0; + } FUNC_LEAVE_NOAPI(ret_value) } /* H5O__chunk_deserialize() */ diff --git a/src/H5Ocache_image.c b/src/H5Ocache_image.c index ad64297..591ac4a 100644 --- a/src/H5Ocache_image.c +++ b/src/H5Ocache_image.c @@ -313,7 +313,7 @@ H5O__mdci_delete(H5F_t *f, H5O_t *open_oh, void *_mesg) * space allocations / deallocations prior to the free of the * cache image. Verify this to the extent possible. * - * If the hack to work around the persistant self referential + * If the hack to work around the persistent self referential * free space manager issue is NOT in use, just call H5MF_xfree() * to release the cache iamge. In principle, we should be able * to just reduce the EOA to the base address of the cache diff --git a/src/H5Ocopy.c b/src/H5Ocopy.c index e6865f1..2e628f4 100644 --- a/src/H5Ocopy.c +++ b/src/H5Ocopy.c @@ -1106,7 +1106,7 @@ H5O__copy_header(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */, HDassert(H5F_addr_defined(oloc_src->addr)); HDassert(oloc_dst->file); - /* Intialize copy info before errors can be thrown */ + /* Initialize copy info before errors can be thrown */ HDmemset(&cpy_info, 0, sizeof(H5O_copy_t)); /* Get the copy property list */ diff --git a/src/H5Ofill.c b/src/H5Ofill.c index 932241f..8a6004d 100644 --- a/src/H5Ofill.c +++ b/src/H5Ofill.c @@ -194,7 +194,7 @@ H5FL_BLK_EXTERN(type_conv); static void * H5O_fill_new_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, - size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + size_t p_size, const uint8_t *p) { H5O_fill_t *fill = NULL; void *ret_value = NULL; /* Return value */ @@ -228,6 +228,8 @@ H5O_fill_new_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, INT32DECODE(p, fill->size); if(fill->size > 0) { H5_CHECK_OVERFLOW(fill->size, ssize_t, size_t); + if((size_t)fill->size > p_size) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "destination buffer too small") if(NULL == (fill->buf = H5MM_malloc((size_t)fill->size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for fill value") HDmemcpy(fill->buf, p, (size_t)fill->size); @@ -307,11 +309,13 @@ done: *------------------------------------------------------------------------- */ static void * -H5O_fill_old_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, +H5O_fill_old_decode(H5F_t *f, H5O_t *open_oh, unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, - size_t H5_ATTR_UNUSED p_size, const uint8_t *p) + size_t p_size, const uint8_t *p) { H5O_fill_t *fill = NULL; /* Decoded fill value message */ + htri_t exists = FALSE; + H5T_t *dt = NULL; void *ret_value = NULL; /* Return value */ FUNC_ENTER_NOAPI_NOINIT @@ -332,6 +336,21 @@ H5O_fill_old_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, /* Only decode the fill value itself if there is one */ if(fill->size > 0) { + H5_CHECK_OVERFLOW(fill->size, ssize_t, size_t); + if((size_t)fill->size > p_size) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "destination buffer too small") + + /* Get the datatype message */ + if((exists = H5O_msg_exists_oh(open_oh, H5O_DTYPE_ID)) < 0) + HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "unable to read object header") + if(exists) { + if((dt = H5O_msg_read_oh(f, open_oh, H5O_DTYPE_ID, NULL)) < 0) + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, NULL, "can't read DTYPE message") + /* Verify size */ + if(fill->size != H5T_GET_SIZE(dt)) + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, NULL, "inconsistent fill value size") + } + if(NULL == (fill->buf = H5MM_malloc((size_t)fill->size))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for fill value") HDmemcpy(fill->buf, p, (size_t)fill->size); @@ -344,6 +363,9 @@ H5O_fill_old_decode(H5F_t H5_ATTR_UNUSED *f, H5O_t H5_ATTR_UNUSED *open_oh, ret_value = (void*)fill; done: + if(dt) + H5O_msg_free(H5O_DTYPE_ID, dt); + if(!ret_value && fill) { if(fill->buf) H5MM_xfree(fill->buf); diff --git a/src/H5Olayout.c b/src/H5Olayout.c index d8f05f0..5f16837 100644 --- a/src/H5Olayout.c +++ b/src/H5Olayout.c @@ -125,8 +125,8 @@ H5O__layout_decode(H5F_t *f, H5O_t H5_ATTR_UNUSED *open_oh, /* Dimensionality */ ndims = *p++; - if(ndims > H5O_LAYOUT_NDIMS) - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large") + if(!ndims || ndims > H5O_LAYOUT_NDIMS) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is out of range") /* Layout class */ mesg->type = (H5D_layout_t)*p++; diff --git a/src/H5Omessage.c b/src/H5Omessage.c index 930be9b..e156dcd 100644 --- a/src/H5Omessage.c +++ b/src/H5Omessage.c @@ -239,7 +239,7 @@ done: * Purpose: Modifies an existing message or creates a new message. * * The UPDATE_FLAGS argument are flags that allow the caller - * to skip updating the modification time or reseting the message + * to skip updating the modification time or resetting the message * data. This is useful when several calls to H5O_msg_write will be * made in a sequence. * @@ -296,7 +296,7 @@ done: * Purpose: Modifies an existing message or creates a new message. * * The UPDATE_FLAGS argument are flags that allow the caller - * to skip updating the modification time or reseting the message + * to skip updating the modification time or resetting the message * data. This is useful when several calls to H5O_msg_write will be * made in a sequence. * @@ -343,7 +343,7 @@ done: * Purpose: Modifies an existing message or creates a new message. * * The UPDATE_FLAGS argument are flags that allow the caller - * to skip updating the modification time or reseting the message + * to skip updating the modification time or resetting the message * data. This is useful when several calls to H5O_msg_write will be * made in a sequence. * diff --git a/src/H5Oshared.c b/src/H5Oshared.c index 047f90e..328ba4b 100644 --- a/src/H5Oshared.c +++ b/src/H5Oshared.c @@ -640,7 +640,7 @@ done: /*------------------------------------------------------------------------- * Function: H5O__shared_post_copy_file * - * Purpose: Delate a shared message and replace with a new one. + * Purpose: Delete a shared message and replace with a new one. * The function is needed at cases such as coping a shared reg_ref attribute. * When a shared reg_ref attribute is copied from one file to * another, the values in file need to be replaced. The only way @@ -399,7 +399,7 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property @@ -581,7 +581,7 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c index d69b063..cb13ff3 100644 --- a/src/H5Pdcpl.c +++ b/src/H5Pdcpl.c @@ -1332,7 +1332,7 @@ done: /*------------------------------------------------------------------------- * Function: H5P__dcrt_ext_file_list_get * - * Purpose: Copies an external file lsit property when it's retrieved from a property list + * Purpose: Copies an external file list property when it's retrieved from a property list * * Return: Success: Non-negative * Failure: Negative @@ -2245,7 +2245,7 @@ H5Pset_virtual(hid_t dcpl_id, hid_t vspace_id, const char *src_file_name, done: /* Set VDS layout information in property list */ - /* (Even on faliure, so there's not a mangled layout struct in the list) */ + /* (Even on failure, so there's not a mangled layout struct in the list) */ if(retrieved_layout) { if(H5P_poke(plist, H5D_CRT_LAYOUT_NAME, &virtual_layout) < 0) { HDONE_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set layout") diff --git a/src/H5Pdeprec.c b/src/H5Pdeprec.c index 7f96333..226d206 100644 --- a/src/H5Pdeprec.c +++ b/src/H5Pdeprec.c @@ -202,7 +202,7 @@ GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property @@ -383,7 +383,7 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c index b56c137..e6eed32 100644 --- a/src/H5Pdxpl.c +++ b/src/H5Pdxpl.c @@ -1035,7 +1035,7 @@ done: * for the type conversion buffer and background buffer and * optionally supply pointers to application-allocated buffers. * If the buffer size is smaller than the entire amount of data - * being transfered between application and file, and a type + * being transferred between application and file, and a type * conversion buffer or background buffer is required then * strip mining will be used. * diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c index eded286..7ba9c11 100644 --- a/src/H5Pfapl.c +++ b/src/H5Pfapl.c @@ -396,7 +396,7 @@ static const hbool_t H5F_def_coll_md_write_flag_g = H5F_ACS_COLL_MD_WRITE_FLAG_D static const H5AC_cache_image_config_t H5F_def_mdc_initCacheImageCfg_g = H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_DEF; /* Default metadata cache image settings */ static const size_t H5F_def_page_buf_size_g = H5F_ACS_PAGE_BUFFER_SIZE_DEF; /* Default page buffer size */ static const unsigned H5F_def_page_buf_min_meta_perc_g = H5F_ACS_PAGE_BUFFER_MIN_META_PERC_DEF; /* Default page buffer minimum metadata size */ -static const unsigned H5F_def_page_buf_min_raw_perc_g = H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_DEF; /* Default page buffer minumum raw data size */ +static const unsigned H5F_def_page_buf_min_raw_perc_g = H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_DEF; /* Default page buffer mininum raw data size */ /*------------------------------------------------------------------------- @@ -1713,7 +1713,7 @@ H5Pget_mdc_image_config(hid_t plist_id, H5AC_cache_image_config_t *config_ptr) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unknown image config version.") /* If we ever support multiple versions of H5AC_cache_config_t, we - * will have to get the cannonical version here, and then translate + * will have to get the canonical version here, and then translate * to the version of the structure supplied. */ @@ -1808,7 +1808,7 @@ H5Pget_mdc_config(hid_t plist_id, H5AC_cache_config_t *config_ptr) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unknown config version.") /* If we ever support multiple versions of H5AC_cache_config_t, we - * will have to get the cannonical version here, and then translate + * will have to get the canonical version here, and then translate * to the version of the structure supplied. */ @@ -2322,7 +2322,7 @@ done: * necessary to represent features used. * (This is the "make certain to take advantage of <new feature> * in the file format" use case (maybe <new feature> is smaller - * or scales better than an ealier version, which would otherwise + * or scales better than an earlier version, which would otherwise * be used)) * * LOW = H5F_FORMAT_1_2_0, HIGH = H5F_FORMAT_1_6_0 => creates objects @@ -2549,7 +2549,7 @@ H5Pset_file_image(hid_t fapl_id, void *buf_ptr, size_t buf_len) /* validate parameters */ if(!(((buf_ptr == NULL) && (buf_len == 0)) || ((buf_ptr != NULL) && (buf_len > 0)))) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "inconsistant buf_ptr and buf_len") + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "inconsistent buf_ptr and buf_len") /* Get the plist structure */ if(NULL == (fapl = H5P_object_verify(fapl_id, H5P_FILE_ACCESS))) @@ -2652,7 +2652,7 @@ H5Pget_file_image(hid_t fapl_id, void **buf_ptr_ptr, size_t *buf_len_ptr) if(H5P_peek(fapl, H5F_ACS_FILE_IMAGE_INFO_NAME, &image_info) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get file image info") - /* verify file image field consistancy */ + /* verify file image field consistency */ HDassert(((image_info.buffer != NULL) && (image_info.size > 0)) || ((image_info.buffer == NULL) && (image_info.size == 0))); @@ -2728,7 +2728,7 @@ H5Pset_file_image_callbacks(hid_t fapl_id, H5FD_file_image_callbacks_t *callback if(H5P_peek(fapl, H5F_ACS_FILE_IMAGE_INFO_NAME, &info) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get old file image info") - /* verify file image field consistancy */ + /* verify file image field consistency */ HDassert(((info.buffer != NULL) && (info.size > 0)) || ((info.buffer == NULL) && (info.size == 0))); @@ -2804,7 +2804,7 @@ H5Pget_file_image_callbacks(hid_t fapl_id, H5FD_file_image_callbacks_t *callback if(H5P_peek(fapl, H5F_ACS_FILE_IMAGE_INFO_NAME, &info) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get file image info") - /* verify file image field consistancy */ + /* verify file image field consistency */ HDassert(((info.buffer != NULL) && (info.size > 0)) || ((info.buffer == NULL) && (info.size == 0))); @@ -2858,7 +2858,7 @@ H5P__file_image_info_copy(void *value) info = (H5FD_file_image_info_t *)value; - /* verify file image field consistancy */ + /* verify file image field consistency */ HDassert(((info->buffer != NULL) && (info->size > 0)) || ((info->buffer == NULL) && (info->size == 0))); @@ -2932,7 +2932,7 @@ H5P__file_image_info_free(void *value) info = (H5FD_file_image_info_t *)value; - /* Verify file image field consistancy */ + /* Verify file image field consistency */ HDassert(((info->buffer != NULL) && (info->size > 0)) || ((info->buffer == NULL) && (info->size == 0))); diff --git a/src/H5Pfcpl.c b/src/H5Pfcpl.c index 6b0d2c0..f90dae5 100644 --- a/src/H5Pfcpl.c +++ b/src/H5Pfcpl.c @@ -545,7 +545,7 @@ H5Pset_sym_k(hid_t plist_id, unsigned ik, unsigned lk) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "istore IK value exceeds maximum B-tree entries"); if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree interanl nodes"); + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree internal nodes"); btree_k[H5B_SNODE_ID] = ik; if(H5P_set(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set rank for btree nodes"); @@ -650,10 +650,10 @@ H5Pset_istore_k(hid_t plist_id, unsigned ik) /* Set value */ if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree interanl nodes"); + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree internal nodes"); btree_k[H5B_CHUNK_ID] = ik; if(H5P_set(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set rank for btree interanl nodes"); + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set rank for btree internal nodes"); done: FUNC_LEAVE_API(ret_value) @@ -698,7 +698,7 @@ H5Pget_istore_k(hid_t plist_id, unsigned *ik /*out */ ) /* Get value */ if(ik) { if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, btree_k) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree interanl nodes"); + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get rank for btree internal nodes"); *ik = btree_k[H5B_CHUNK_ID]; } /* end if */ diff --git a/src/H5Pint.c b/src/H5Pint.c index 0e78463..6a0cc14 100644 --- a/src/H5Pint.c +++ b/src/H5Pint.c @@ -1398,7 +1398,7 @@ H5P_free_del_name_cb(void *item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED * NAME H5P_access_class PURPOSE - Internal routine to increment or decrement list & class dependancies on a + Internal routine to increment or decrement list & class dependencies on a property list class USAGE herr_t H5P_access_class(pclass,mod) @@ -1407,7 +1407,7 @@ H5P_free_del_name_cb(void *item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED * RETURNS Returns non-negative on success, negative on failure. DESCRIPTION - Increment/Decrement the class or list dependancies for a given class. + Increment/Decrement the class or list dependencies for a given class. This routine is the final arbiter on decisions about actually releasing a class in memory, such action is only taken when the reference counts for both dependent classes & lists reach zero. @@ -1425,19 +1425,19 @@ H5P_access_class(H5P_genclass_t *pclass, H5P_class_mod_t mod) HDassert(mod > H5P_MOD_ERR && mod < H5P_MOD_MAX); switch(mod) { - case H5P_MOD_INC_CLS: /* Increment the dependant class count*/ + case H5P_MOD_INC_CLS: /* Increment the dependent class count*/ pclass->classes++; break; - case H5P_MOD_DEC_CLS: /* Decrement the dependant class count*/ + case H5P_MOD_DEC_CLS: /* Decrement the dependent class count*/ pclass->classes--; break; - case H5P_MOD_INC_LST: /* Increment the dependant list count*/ + case H5P_MOD_INC_LST: /* Increment the dependent list count*/ pclass->plists++; break; - case H5P_MOD_DEC_LST: /* Decrement the dependant list count*/ + case H5P_MOD_DEC_LST: /* Decrement the dependent list count*/ pclass->plists--; break; @@ -1586,12 +1586,12 @@ H5P_create_class(H5P_genclass_t *par_class, const char *name, H5P_plist_type_t t /* Allocate room for the class */ if(NULL == (pclass = H5FL_CALLOC(H5P_genclass_t))) - HGOTO_ERROR(H5E_PLIST, H5E_CANTALLOC, NULL, "propery list class allocation failed") + HGOTO_ERROR(H5E_PLIST, H5E_CANTALLOC, NULL, "property list class allocation failed") /* Set class state */ pclass->parent = par_class; if(NULL == (pclass->name = H5MM_xstrdup(name))) - HGOTO_ERROR(H5E_PLIST, H5E_CANTALLOC, NULL, "propery list class name allocation failed") + HGOTO_ERROR(H5E_PLIST, H5E_CANTALLOC, NULL, "property list class name allocation failed") pclass->type = type; pclass->nprops = 0; /* Classes are created without properties initially */ pclass->plists = 0; /* No properties lists of this class yet */ @@ -2026,7 +2026,7 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property @@ -2258,7 +2258,7 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property @@ -2499,7 +2499,7 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The 'set' callback function may be useful to range check the value being - set for the property or may perform some tranformation/translation of the + set for the property or may perform some transformation/translation of the value set. The 'get' callback would then [probably] reverse the transformation, etc. A single 'get' or 'set' callback could handle multiple properties by performing different actions based on the property @@ -2636,7 +2636,7 @@ H5P__do_prop(H5P_genplist_t *plist, const char *name, H5P_do_plist_op_t plist_op /* Find property in changed list */ if(NULL != (prop = (H5P_genprop_t *)H5SL_search(plist->props, name))) { - /* Call the 'found in propery list' callback */ + /* Call the 'found in property list' callback */ if((*plist_op)(plist, name, prop, udata) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTOPERATE, FAIL, "can't operate on property") } /* end if */ @@ -4020,7 +4020,7 @@ H5P_iterate_plist(const H5P_genplist_t *plist, hbool_t iter_all_prop, int *idx, /* Check for iterating over all properties, or just non-default ones */ if(iter_all_prop) { - /* Walk up the class hiearchy */ + /* Walk up the class hierarchy */ tclass = plist->pclass; while(tclass != NULL) { /* Iterate over properties in property list class */ @@ -4737,7 +4737,7 @@ H5P_copy_prop_pclass(hid_t dst_id, hid_t src_id, const char *name) /* Sanity check */ HDassert(name); - /* Get propery list classes */ + /* Get property list classes */ if(NULL == (src_pclass = (H5P_genclass_t *)H5I_object(src_id))) HGOTO_ERROR(H5E_PLIST, H5E_NOTFOUND, FAIL, "source property class object doesn't exist") if(NULL == (dst_pclass = (H5P_genclass_t *)H5I_object(dst_id))) @@ -4854,8 +4854,8 @@ done: GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The property list class 'close' callback routine is not called from - here, it must have been check for and called properly prior to this routine - being called + here, it must have been checked for and called properly prior to this routine + being called. EXAMPLES REVISION LOG --------------------------------------------------------------------------*/ @@ -4981,7 +4981,7 @@ H5P_close(void *_plist) tclass=tclass->parent; } /* end while */ - /* Decrement class's dependant property list value! */ + /* Decrement class's dependent property list value! */ if(H5P_access_class(plist->pclass,H5P_MOD_DEC_LST) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "Can't decrement class ref count") diff --git a/src/H5Ppkg.h b/src/H5Ppkg.h index 13463ae..60b2363 100644 --- a/src/H5Ppkg.h +++ b/src/H5Ppkg.h @@ -51,10 +51,10 @@ typedef enum { /* Define enum for modifications to class */ typedef enum { H5P_MOD_ERR=(-1), /* Indicate an error */ - H5P_MOD_INC_CLS, /* Increment the dependant class count*/ - H5P_MOD_DEC_CLS, /* Decrement the dependant class count*/ - H5P_MOD_INC_LST, /* Increment the dependant list count*/ - H5P_MOD_DEC_LST, /* Decrement the dependant list count*/ + H5P_MOD_INC_CLS, /* Increment the dependent class count*/ + H5P_MOD_DEC_CLS, /* Decrement the dependent class count*/ + H5P_MOD_INC_LST, /* Increment the dependent list count*/ + H5P_MOD_DEC_LST, /* Decrement the dependent list count*/ H5P_MOD_INC_REF, /* Increment the ID reference count*/ H5P_MOD_DEC_REF, /* Decrement the ID reference count*/ H5P_MOD_MAX /* Upper limit on class modifications */ @@ -89,7 +89,7 @@ struct H5P_genclass_t { size_t nprops; /* Number of properties in class */ unsigned plists; /* Number of property lists that have been created since the last modification to the class */ unsigned classes; /* Number of classes that have been derived since the last modification to the class */ - unsigned ref_count; /* Number of oustanding ID's open on this class object */ + unsigned ref_count; /* Number of outstanding ID's open on this class object */ hbool_t deleted; /* Whether this class has been deleted and is waiting for dependent classes & proplists to close */ unsigned revision; /* Revision number of a particular class (global) */ H5SL_t *props; /* Skip list containing properties */ diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index a2c0418..58f3d18 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -139,7 +139,7 @@ typedef enum H5D_mpio_actual_chunk_opt_mode_t { typedef enum H5D_mpio_actual_io_mode_t { /* The following four values are conveniently defined as a bit field so that - * we can switch from the default to indpendent or collective and then to + * we can switch from the default to independent or collective and then to * mixed without having to check the original value. * * NO_COLLECTIVE means that either collective I/O wasn't requested or that diff --git a/src/H5SMpkg.h b/src/H5SMpkg.h index a9a4fd9..bb458a7 100644 --- a/src/H5SMpkg.h +++ b/src/H5SMpkg.h @@ -96,7 +96,7 @@ #define H5SM_B2_SPLIT_PERCENT 100 #define H5SM_B2_MERGE_PERCENT 40 -#define H5SM_LIST_VERSION 0 /* Verion of Shared Object Header Message List Indexes */ +#define H5SM_LIST_VERSION 0 /* Version of Shared Object Header Message List Indexes */ /****************************/ /* Package Typedefs */ diff --git a/src/H5Sall.c b/src/H5Sall.c index 11dd8e1..28395b1 100644 --- a/src/H5Sall.c +++ b/src/H5Sall.c @@ -432,7 +432,7 @@ H5S__all_copy(H5S_t *dst, const H5S_t H5_ATTR_UNUSED *src, hbool_t H5_ATTR_UNUSE TRUE if the selection fits within the extent, FALSE if it does not and Negative on an error. DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. Offset is irrelevant for this type of selection. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS diff --git a/src/H5Shyper.c b/src/H5Shyper.c index 47eb573..7a6d1be 100644 --- a/src/H5Shyper.c +++ b/src/H5Shyper.c @@ -853,7 +853,7 @@ H5S__hyper_iter_next(H5S_sel_iter_t *iter, size_t nelem) /* Check if we are finished with the spans in the tree */ if(curr_dim >= 0) { - /* Walk back down the iterator positions, reseting them */ + /* Walk back down the iterator positions, resetting them */ while(curr_dim < fast_dim) { HDassert(curr_span); HDassert(curr_span->down); @@ -1037,7 +1037,7 @@ H5S__hyper_iter_next_block(H5S_sel_iter_t *iter) /* Check if we are finished with the spans in the tree */ if(curr_dim >= 0) { - /* Walk back down the iterator positions, reseting them */ + /* Walk back down the iterator positions, resetting them */ while(curr_dim < fast_dim) { HDassert(curr_span); HDassert(curr_span->down); @@ -1704,7 +1704,7 @@ done: RETURNS TRUE if the selection fits within the extent, FALSE if it does not DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS @@ -1762,7 +1762,7 @@ done: TRUE if the selection fits within the extent, FALSE if it does not and Negative on an error. DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS @@ -2052,7 +2052,7 @@ H5S__hyper_serialize_helper(const H5S_hyper_span_info_t *spans, HDassert(rank < H5O_LAYOUT_NDIMS); HDassert(p && pp); - /* Walk through the list of spans, recursing or outputing them */ + /* Walk through the list of spans, recursing or outputting them */ curr = spans->head; while(curr != NULL) { /* Recurse if this node has down spans */ @@ -2458,7 +2458,7 @@ H5S__hyper_span_blocklist(const H5S_hyper_span_info_t *spans, hsize_t start[], HDassert(numblocks && *numblocks > 0); HDassert(buf && *buf); - /* Walk through the list of spans, recursing or outputing them */ + /* Walk through the list of spans, recursing or outputting them */ curr = spans->head; while(curr != NULL && *numblocks > 0) { /* Recurse if this node has down spans */ @@ -3509,7 +3509,7 @@ done: NAME H5S__hyper_add_span_element_helper PURPOSE - Add a single elment to a span tree + Add a single element to a span tree USAGE herr_t H5S_hyper_add_span_element_helper(prev_span, span_tree, rank, coords) H5S_hyper_span_info_t *span_tree; IN/OUT: Pointer to span tree to append to @@ -3713,7 +3713,7 @@ done: NAME H5S_hyper_add_span_element PURPOSE - Add a single elment to a span tree + Add a single element to a span tree USAGE herr_t H5S_hyper_add_span_element(space, span_tree, rank, coords) H5S_t *space; IN/OUT: Pointer to dataspace to add coordinate to @@ -5903,7 +5903,8 @@ H5S__hyper_make_spans(unsigned rank, const hsize_t *start, const hsize_t *stride last_span = NULL; /* Generate all the span segments for this dimension */ - for(u = 0, stride_iter = 0; u < count[i]; u++, stride_iter += stride[i]) { + for(u = 0, stride_iter = 0; u < count[i]; u++, stride_iter += stride[i]) + { H5S_hyper_span_t *span; /* New hyperslab span */ /* Allocate a span node */ @@ -5945,7 +5946,8 @@ H5S__hyper_make_spans(unsigned rank, const hsize_t *start, const hsize_t *stride } /* end for */ /* Indicate that there is a pointer to this tree */ - down->count = 1; + if(down) + down->count = 1; /* Success! Return the head of the list in the slowest changing dimension */ ret_value = down; @@ -8101,7 +8103,7 @@ H5S__hyper_get_seq_list_gen(const H5S_t *space, H5S_sel_iter_t *iter, /* Check if we have more spans in the tree */ if(curr_dim >= 0) { - /* Walk back down the iterator positions, reseting them */ + /* Walk back down the iterator positions, resetting them */ while((unsigned)curr_dim < fast_dim) { HDassert(curr_span); HDassert(curr_span->down); @@ -8280,7 +8282,7 @@ H5S__hyper_get_seq_list_gen(const H5S_t *space, H5S_sel_iter_t *iter, break; } /* end if */ else { - /* Walk back down the iterator positions, reseting them */ + /* Walk back down the iterator positions, resetting them */ while((unsigned)curr_dim < fast_dim) { HDassert(curr_span); HDassert(curr_span->down); diff --git a/src/H5Smpio.c b/src/H5Smpio.c index 5c65119..db81ffc 100644 --- a/src/H5Smpio.c +++ b/src/H5Smpio.c @@ -909,10 +909,9 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, else inner_type = outer_type; } /* end for */ -/*************************** -* End of loop, walking -* thru dimensions. -***************************/ +/****************************************** +* End of loop, walking through dimensions. +*******************************************/ /* At this point inner_type is actually the outermost type, even for 0-trip loop */ *new_type = inner_type; diff --git a/src/H5Snone.c b/src/H5Snone.c index 19d0b5f..a46a71a 100644 --- a/src/H5Snone.c +++ b/src/H5Snone.c @@ -408,7 +408,7 @@ H5S_none_copy(H5S_t *dst, const H5S_t H5_ATTR_UNUSED *src, hbool_t H5_ATTR_UNUSE TRUE if the selection fits within the extent, FALSE if it does not and Negative on an error. DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. Offset is irrelevant for this type of selection. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS diff --git a/src/H5Spoint.c b/src/H5Spoint.c index 442bb2a..fb8e311 100644 --- a/src/H5Spoint.c +++ b/src/H5Spoint.c @@ -676,7 +676,7 @@ done: TRUE if the selection fits within the extent, FALSE if it does not and Negative on an error. DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS diff --git a/src/H5Sselect.c b/src/H5Sselect.c index 4462295..ec74523 100644 --- a/src/H5Sselect.c +++ b/src/H5Sselect.c @@ -160,7 +160,7 @@ H5S_select_release(H5S_t *ds) HDassert(ds); /* Call the selection type's release function */ - if((ret_value = (*ds->select.type->release)(ds)) < 0) + if((ds->select.type) && ((ret_value = (*ds->select.type->release)(ds)) < 0)) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection") done: @@ -361,7 +361,7 @@ H5S_get_select_npoints(const H5S_t *space) TRUE if the selection fits within the extent, FALSE if it does not and Negative on an error. DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS @@ -403,7 +403,7 @@ done: TRUE if the selection fits within the extent, FALSE if it does not and Negative on an error. DESCRIPTION - Determines if the current selection at the current offet fits within the + Determines if the current selection at the current offset fits within the extent for the dataspace. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS @@ -309,7 +309,7 @@ static H5T_path_t *H5T__path_find_real(const H5T_t *src, const H5T_t *dst, /* Library Private Variables */ /*****************************/ -/* The native endianess of the platform */ +/* The native endianness of the platform */ H5T_order_t H5T_native_order_g = H5T_ORDER_ERROR; diff --git a/src/H5Tcommit.c b/src/H5Tcommit.c index 4e4a551..1719f8f 100644 --- a/src/H5Tcommit.c +++ b/src/H5Tcommit.c @@ -385,7 +385,7 @@ H5T__commit(H5F_t *file, H5T_t *type, hid_t tcpl_id) if(H5T_set_version(file, type) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set version of datatype") - /* Calculate message size infomation, for creating object header */ + /* Calculate message size information, for creating object header */ dtype_size = H5O_msg_size_f(file, tcpl_id, H5O_DTYPE_ID, type, (size_t)0); HDassert(dtype_size); diff --git a/src/H5Tconv.c b/src/H5Tconv.c index f3af9e1..803a6da 100644 --- a/src/H5Tconv.c +++ b/src/H5Tconv.c @@ -109,7 +109,7 @@ * least as large as the destination. Overflows can occur when * the destination is narrower than the source. * - * xF: Integers to float-point(float or double) values where the desination + * xF: Integers to float-point(float or double) values where the destination * is at least as wide as the source. This case cannot generate * overflows. * @@ -2102,7 +2102,7 @@ H5T__conv_struct_subset(const H5T_cdata_t *cdata) * * For each element do * For I=1..NELMTS do - * If sizeof detination type <= sizeof source type then + * If sizeof destination type <= sizeof source type then * Convert member to destination type; * Move member as far left as possible; * @@ -2314,7 +2314,7 @@ done: * function. The algorithm is basically: * * For each member of the struct - * If sizeof detination type <= sizeof source type then + * If sizeof destination type <= sizeof source type then * Convert member to destination type for all elements * Move memb to BKG buffer for all elements * Else @@ -2421,7 +2421,7 @@ H5T__conv_struct_opt(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, offset -= src_memb->size; if(dst_memb->size > src->shared->size-offset) { cdata->priv = H5T_conv_struct_free(priv); - HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "convertion is unsupported by this function") + HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "conversion is unsupported by this function") } /* end if */ } /* end if */ } /* end for */ @@ -4250,7 +4250,7 @@ H5T__conv_f_f(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts, /* * The exponent is too small to fit in the exponent field, * but by shifting the mantissa to the right we can - * accomodate that value. The mantissa of course is no + * accommodate that value. The mantissa of course is no * longer normalized. */ mrsh += (size_t)(1 - expo); diff --git a/src/H5Toffset.c b/src/H5Toffset.c index 668e730..c0e94fc 100644 --- a/src/H5Toffset.c +++ b/src/H5Toffset.c @@ -33,12 +33,12 @@ static herr_t H5T_set_offset(const H5T_t *dt, size_t offset); * Function: H5Tget_offset * * Purpose: Retrieves the bit offset of the first significant bit. The - * signficant bits of an atomic datum can be offset from the + * significant bits of an atomic datum can be offset from the * beginning of the memory for that datum by an amount of * padding. The `offset' property specifies the number of bits * of padding that appear to the "right of" the value. That is, * if we have a 32-bit datum with 16-bits of precision having - * the value 0x1122 then it will be layed out in memory as (from + * the value 0x1122 then it will be laid out in memory as (from * small byte address toward larger byte addresses): * * Big Big Little Little @@ -84,12 +84,12 @@ done: * Function: H5T_get_offset * * Purpose: Retrieves the bit offset of the first significant bit. The - * signficant bits of an atomic datum can be offset from the + * significant bits of an atomic datum can be offset from the * beginning of the memory for that datum by an amount of * padding. The `offset' property specifies the number of bits * of padding that appear to the "right of" the value. That is, * if we have a 32-bit datum with 16-bits of precision having - * the value 0x1122 then it will be layed out in memory as (from + * the value 0x1122 then it will be laid out in memory as (from * small byte address toward larger byte addresses): * * Big Big Little Little @@ -134,12 +134,12 @@ done: * Function: H5Tset_offset * * Purpose: Sets the bit offset of the first significant bit. The - * signficant bits of an atomic datum can be offset from the + * significant bits of an atomic datum can be offset from the * beginning of the memory for that datum by an amount of * padding. The `offset' property specifies the number of bits * of padding that appear to the "right of" the value. That is, * if we have a 32-bit datum with 16-bits of precision having - * the value 0x1122 then it will be layed out in memory as (from + * the value 0x1122 then it will be laid out in memory as (from * small byte address toward larger byte addresses): * * Big Big Little Little @@ -203,12 +203,12 @@ done: * Function: H5T_set_offset * * Purpose: Sets the bit offset of the first significant bit. The - * signficant bits of an atomic datum can be offset from the + * significant bits of an atomic datum can be offset from the * beginning of the memory for that datum by an amount of * padding. The `offset' property specifies the number of bits * of padding that appear to the "right of" the value. That is, * if we have a 32-bit datum with 16-bits of precision having - * the value 0x1122 then it will be layed out in memory as (from + * the value 0x1122 then it will be laid out in memory as (from * small byte address toward larger byte addresses): * * Big Big Little Little diff --git a/src/H5Tpkg.h b/src/H5Tpkg.h index 96132b0..e1b996c 100644 --- a/src/H5Tpkg.h +++ b/src/H5Tpkg.h @@ -295,7 +295,7 @@ typedef enum H5T_state_t { H5T_STATE_OPEN /*named constant, open object header */ } H5T_state_t; - /* This struct is shared between all occurances of an open named type */ + /* This struct is shared between all occurrences of an open named type */ typedef struct H5T_shared_t { hsize_t fo_count; /* number of references to this file object */ H5T_state_t state; /*current state of the type */ diff --git a/src/H5Tprivate.h b/src/H5Tprivate.h index 8051b6d..89cdcfd 100644 --- a/src/H5Tprivate.h +++ b/src/H5Tprivate.h @@ -100,7 +100,7 @@ typedef struct H5T_subset_info_t { /* Forward declarations for prototype arguments */ struct H5O_t; -/* The native endianess of the platform */ +/* The native endianness of the platform */ H5_DLLVAR H5T_order_t H5T_native_order_g; /* Private functions */ diff --git a/src/H5Znbit.c b/src/H5Znbit.c index c0afc38..fe0041c 100644 --- a/src/H5Znbit.c +++ b/src/H5Znbit.c @@ -558,7 +558,7 @@ H5Z_set_parms_array(const H5T_t *type, unsigned *cd_values_index, H5T_t *dtype_base = NULL; /* Array datatype's base datatype */ H5T_class_t dtype_base_class; /* Array datatype's base datatype's class */ size_t dtype_size; /* Array datatype's size (in bytes) */ - htri_t is_vlstring; /* flag indicating if datatype is varible-length string */ + htri_t is_vlstring; /* flag indicating if datatype is variable-length string */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT @@ -667,7 +667,7 @@ H5Z_set_parms_compound(const H5T_t *type, unsigned *cd_values_index, size_t dtype_member_offset; /* Compound datatype's current member datatype's offset (in bytes) */ size_t dtype_next_member_offset;/* Compound datatype's next member datatype's offset (in bytes) */ size_t dtype_size; /* Compound datatype's size (in bytes) */ - htri_t is_vlstring; /* flag indicating if datatype is varible-length string */ + htri_t is_vlstring; /* flag indicating if datatype is variable-length string */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ diff --git a/src/H5Zpublic.h b/src/H5Zpublic.h index fcb2d37..0df05f0 100644 --- a/src/H5Zpublic.h +++ b/src/H5Zpublic.h @@ -50,7 +50,7 @@ typedef int H5Z_filter_t; * unlimited amount, but currently each * filter uses a bit in a 32-bit field, * so the format would have to be - * changed to accomodate that) + * changed to accommodate that) */ /* Flags for filter definition (stored) */ diff --git a/src/H5Zscaleoffset.c b/src/H5Zscaleoffset.c index d139696..cdf31a4 100644 --- a/src/H5Zscaleoffset.c +++ b/src/H5Zscaleoffset.c @@ -988,7 +988,7 @@ H5Z_set_local_scaleoffset(hid_t dcpl_id, hid_t type_id, hid_t space_id) if(status == H5D_FILL_VALUE_UNDEFINED) cd_values[H5Z_SCALEOFFSET_PARM_FILAVAIL] = H5Z_SCALEOFFSET_FILL_UNDEFINED; else { - int need_convert = FALSE; /* Flag indicating convertion of byte order */ + int need_convert = FALSE; /* Flag indicating conversion of byte order */ cd_values[H5Z_SCALEOFFSET_PARM_FILAVAIL] = H5Z_SCALEOFFSET_FILL_DEFINED; @@ -1047,7 +1047,7 @@ H5Z_filter_scaleoffset(unsigned flags, size_t cd_nelmts, const unsigned cd_value uint32_t minbits = 0; /* minimum number of bits to store values */ unsigned long long minval= 0; /* minimum value of input buffer */ enum H5Z_scaleoffset_t type; /* memory type corresponding to dataset datatype */ - int need_convert = FALSE; /* flag indicating convertion of byte order */ + int need_convert = FALSE; /* flag indicating conversion of byte order */ unsigned char *outbuf = NULL; /* pointer to new output buffer */ unsigned buf_offset = 21; /* buffer offset because of parameters stored in file */ unsigned i; /* index */ diff --git a/src/H5Ztrans.c b/src/H5Ztrans.c index d4b59a6..67646a0 100644 --- a/src/H5Ztrans.c +++ b/src/H5Ztrans.c @@ -329,7 +329,7 @@ static void H5Z_print(H5Z_node *tree, FILE *stream); } /* The difference of this macro from H5Z_XFORM_DO_OP3 is that it handles the operations when the left operand is empty, like -x or +x. - * The reason that it's seperated from H5Z_XFORM_DO_OP3 is because compilers don't accept operations like *x or /x. So in H5Z_do_op, + * The reason that it's separated from H5Z_XFORM_DO_OP3 is because compilers don't accept operations like *x or /x. So in H5Z_do_op, * these two macros are called in different ways. (SLU 2012/3/20) */ #define H5Z_XFORM_DO_OP6(OP) \ @@ -967,7 +967,7 @@ done: /*------------------------------------------------------------------------- * Function: H5Z_new_node - * Purpose: Create and initilize a new H5Z_node structure. + * Purpose: Create and initialize a new H5Z_node structure. * Return: Success: Valid H5Z_node ptr * NULLure: NULL * Programmer: Bill Wendling @@ -999,7 +999,7 @@ done: * Purpose: If the transform is trivial, this function applies it. * Otherwise, it calls H5Z_xform_eval_full to do the full * transform. - * Return: SUCCEED if transform applied succesfully, FAIL otherwise + * Return: SUCCEED if transform applied successfully, FAIL otherwise * Programmer: Leon Arber * 5/1/04 * Modifications: @@ -1485,7 +1485,7 @@ H5Z_xform_reduce_tree(H5Z_node* tree) * Purpose: If the root of the tree passed in points to a simple * arithmetic operation and the left and right subtrees are both * integer or floating point values, this function does that - * operation, free the left and rigt subtrees, and replaces + * operation, free the left and right subtrees, and replaces * the root with the result of the operation. * Return: None. * Programmer: Leon Arber diff --git a/src/H5detect.c b/src/H5detect.c index 1c5554e..2d33a3d 100644 --- a/src/H5detect.c +++ b/src/H5detect.c @@ -43,7 +43,7 @@ static const char *FileHeader = "\n\ * system or configure has detected those Unix * features which aren't available. We're not * running on a Vax or other machine with mixed - * endianess. + * endianness. * * Modifications: * @@ -55,13 +55,13 @@ static const char *FileHeader = "\n\ #include "H5Rpublic.h" #if defined(__has_attribute) -#if __has_attribute(no_sanitize) -#define HDF_NO_UBSAN __attribute__((no_sanitize("undefined"))) +# if __has_attribute(no_sanitize_address) +# define HDF_NO_UBSAN __attribute__((no_sanitize_address)) +# else +# define HDF_NO_UBSAN +# endif #else -#define HDF_NO_UBSAN -#endif -#else -#define HDF_NO_UBSAN +# define HDF_NO_UBSAN #endif #define MAXDETECT 64 @@ -695,7 +695,7 @@ H5T__init_native(void)\n\ FUNC_ENTER_PACKAGE\n"); for(i = 0; i < nd; i++) { - /* The native endianess of this machine */ + /* The native endianness of this machine */ /* The INFO.perm now contains `-1' for bytes that aren't used and * are always zero. This happens on the Cray for `short' where * sizeof(short) is 8, but only the low-order 4 bytes are ever used. @@ -1079,8 +1079,8 @@ fix_order(int n, int last, int *perm, const char **mesg) } else { /* * Bi-endian machines like VAX. - * (NOTE: This is not an actual determination of the VAX-endianess. - * It could have some other endianess and fall into this + * (NOTE: This is not an actual determination of the VAX-endianness. + * It could have some other endianness and fall into this * case - JKM & QAK) */ HDassert(0 == n % 2); @@ -1111,7 +1111,7 @@ fix_order(int n, int last, int *perm, const char **mesg) * * This function assumes that the exponent occupies higher * order bits than the mantissa and that the most significant - * bit of the mantissa is next to the least signficant bit + * bit of the mantissa is next to the least significant bit * of the exponent. * * @@ -1271,7 +1271,7 @@ mark. Bits of integer types are printed as\n\ If the most significant bit of the normalized\n\ mantissa (always a `1' except for `0.0') is\n\ not stored then an `implicit=yes' appears\n\ -under the field description. In thie case,\n\ +under the field description. In this case,\n\ the radix point is still assumed to be\n\ before the first `M' but after the implicit\n\ bit.\n"; @@ -1675,11 +1675,13 @@ detect_alignments(void) */ static int verify_signal_handlers(int signum, void (*handler)(int)) { -#if defined(__has_feature) +#if defined(__has_feature) /* Clang */ #if __has_feature(address_sanitizer) || __has_feature(thread_sanitizer) /* Under the address and thread sanitizers, don't raise any signals. */ return 0; #endif +#elif defined(__SANITIZE_ADDRESS__) || defined(__SANITIZE_THREAD__) /* GCC */ + return 0; #endif void (*save_handler)(int) = HDsignal(signum, handler); volatile int i, val; diff --git a/src/H5private.h b/src/H5private.h index 6334f39..8974e46 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -116,7 +116,7 @@ #endif /* - * flock() in sys/file.h is used for the implemention of file locking. + * flock() in sys/file.h is used for the implementation of file locking. */ #if defined(H5_HAVE_FLOCK) && defined(H5_HAVE_SYS_FILE_H) # include <sys/file.h> @@ -132,7 +132,7 @@ /* * Unix ioctls. These are used by h5ls (and perhaps others) to determine a - * resonable output width. + * reasonable output width. */ #ifdef H5_HAVE_SYS_IOCTL_H # include <sys/ioctl.h> diff --git a/src/H5public.h b/src/H5public.h index 9157366..a4c80cd 100644 --- a/src/H5public.h +++ b/src/H5public.h @@ -115,7 +115,7 @@ extern "C" { /* * Status return values. Failed integer functions in HDF5 result almost * always in a negative value (unsigned failing functions sometimes return - * zero for failure) while successfull return is non-negative (often zero). + * zero for failure) while successful return is non-negative (often zero). * The negative failure value is most commonly -1, but don't bet on it. The * proper way to detect failure is something like: * @@ -298,7 +298,7 @@ typedef enum { } H5_iter_order_t; /* Iteration callback values */ -/* (Actually, any postive value will cause the iterator to stop and pass back +/* (Actually, any positive value will cause the iterator to stop and pass back * that positive value to the function that called the iterator) */ #define H5_ITER_ERROR (-1) diff --git a/src/H5system.c b/src/H5system.c index 719b7e0..186d8fa 100644 --- a/src/H5system.c +++ b/src/H5system.c @@ -294,13 +294,13 @@ HDfprintf(FILE *stream, const char *fmt, ...) unsigned short x = (unsigned short)va_arg(ap, unsigned int); n = fprintf(stream, format_templ, x); } else if(!*modifier) { - unsigned int x = va_arg(ap, unsigned int); /*lint !e732 Loss of sign not really occuring */ + unsigned int x = va_arg(ap, unsigned int); /*lint !e732 Loss of sign not really occurring */ n = fprintf(stream, format_templ, x); } else if(!HDstrcmp(modifier, "l")) { - unsigned long x = va_arg(ap, unsigned long); /*lint !e732 Loss of sign not really occuring */ + unsigned long x = va_arg(ap, unsigned long); /*lint !e732 Loss of sign not really occurring */ n = fprintf(stream, format_templ, x); } else { - uint64_t x = va_arg(ap, uint64_t); /*lint !e732 Loss of sign not really occuring */ + uint64_t x = va_arg(ap, uint64_t); /*lint !e732 Loss of sign not really occurring */ n = fprintf(stream, format_templ, x); } break; @@ -333,7 +333,7 @@ HDfprintf(FILE *stream, const char *fmt, ...) case 'a': { - haddr_t x = va_arg(ap, haddr_t); /*lint !e732 Loss of sign not really occuring */ + haddr_t x = va_arg(ap, haddr_t); /*lint !e732 Loss of sign not really occurring */ if(H5F_addr_defined(x)) { len = 0; @@ -387,7 +387,7 @@ HDfprintf(FILE *stream, const char *fmt, ...) case 's': case 'p': { - char *x = va_arg(ap, char*); /*lint !e64 Type mismatch not really occuring */ + char *x = va_arg(ap, char*); /*lint !e64 Type mismatch not really occurring */ n = fprintf(stream, format_templ, x); } break; |