diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2010-04-15 19:57:02 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2010-04-15 19:57:02 (GMT) |
commit | ebd22f7643d53b234b43393d8ede23e0549caa5f (patch) | |
tree | 62b13c6ef0f705822fd925687073df7988bd8288 /src/H5Oalloc.c | |
parent | 227a351e4758ffb0af9882b00831ad6cf1acff3b (diff) | |
download | hdf5-ebd22f7643d53b234b43393d8ede23e0549caa5f.zip hdf5-ebd22f7643d53b234b43393d8ede23e0549caa5f.tar.gz hdf5-ebd22f7643d53b234b43393d8ede23e0549caa5f.tar.bz2 |
[svn-r18571] Description:
Bring r18542 from metadata journaling "merging" branch to trunk:
Bring new object header pin/unpin & protect/unprotect routines and
split-out object header chunk proxy changes from metadata_journaling branch to
"merging" branch, along with some other minor tweaks to clean up compiler
warnings, etc.
Also: clean up chunk protect/unprotect calls when allocating or freeing
space in a chunk, optimize metadata accumulator code to avoid some re-reading
of information from the file, refactor H5O_pin/H5O_unpin from way they are done
on the merging branch back to way they were previously done on trunk, other
minor code cleanups, etc.
Tested on
FreeBSD/32 6.3 (duty) in debug mode
FreeBSD/64 6.3 (liberty) w/C++ & FORTRAN, in debug mode
Linux/64-amd64 2.6 (amani) w/Intel compilers, w/default API=1.6.x,
w/C++ & FORTRAN, in production mode
Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN,
w/szip filter, in production mode
Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN,
in production mode
Linux/64-ia64 2.4 (tg-login3) w/parallel, w/FORTRAN, in debug mode
Linux/64-amd64 2.6 (abe) w/parallel, w/FORTRAN, in production mode
Mac OS X/32 10.6.3 (amazon) in debug mode
Mac OS X/32 10.6.3 (amazon) w/C++ & FORTRAN, w/threadsafe,
in production mode
Diffstat (limited to 'src/H5Oalloc.c')
-rw-r--r-- | src/H5Oalloc.c | 649 |
1 files changed, 451 insertions, 198 deletions
diff --git a/src/H5Oalloc.c b/src/H5Oalloc.c index 31e6de7..66630ca 100644 --- a/src/H5Oalloc.c +++ b/src/H5Oalloc.c @@ -58,21 +58,24 @@ /* Local Prototypes */ /********************/ -static herr_t H5O_add_gap(H5O_t *oh, unsigned chunkno, unsigned idx, - uint8_t *new_gap_loc, size_t new_gap_size); -static herr_t H5O_eliminate_gap(H5O_t *oh, H5O_mesg_t *mesg, - uint8_t *new_gap_loc, size_t new_gap_size); -static herr_t H5O_alloc_null(H5O_t *oh, unsigned null_idx, +static herr_t H5O_add_gap(H5F_t *f, hid_t dxpl_id, H5O_t *oh, + H5O_chunk_proxy_t *chk_proxy, unsigned *chk_flags, + unsigned idx, uint8_t *new_gap_loc, size_t new_gap_size); +static herr_t H5O_eliminate_gap(H5F_t *f, hid_t dxpl_id, H5O_t *oh, + H5O_chunk_proxy_t *chk_proxy, unsigned *chk_flags, + H5O_mesg_t *mesg, uint8_t *new_gap_loc, size_t new_gap_size); +static herr_t H5O_alloc_null(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned null_idx, const H5O_msg_class_t *new_type, void *new_native, size_t new_size); static htri_t H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno, size_t size, unsigned * msg_idx); static unsigned H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size); -static htri_t H5O_move_cont(H5F_t *f, H5O_t *oh, unsigned cont_u, hid_t dxpl_id); -static htri_t H5O_move_msgs_forward(H5F_t *f, H5O_t *oh, hid_t dxpl_id); -static htri_t H5O_merge_null(H5F_t *f, H5O_t *oh, hid_t dxpl_id); +static htri_t H5O_move_cont(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned cont_u); +static htri_t H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh); +static htri_t H5O_merge_null(H5F_t *f, hid_t dxpl_id, H5O_t *oh); static htri_t H5O_remove_empty_chunks(H5F_t *f, hid_t dxpl_id, H5O_t *oh); -static herr_t H5O_alloc_shrink_chunk(H5F_t *f, H5O_t *oh, hid_t dxpl_id, unsigned chunkno); +static herr_t H5O_alloc_shrink_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, + unsigned chunkno); /*********************/ @@ -108,8 +111,8 @@ H5FL_EXTERN(H5O_cont_t); *------------------------------------------------------------------------- */ static herr_t -H5O_add_gap(H5O_t *oh, unsigned chunkno, unsigned idx, - uint8_t *new_gap_loc, size_t new_gap_size) +H5O_add_gap(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_chunk_proxy_t *chk_proxy, + unsigned *chk_flags, unsigned idx, uint8_t *new_gap_loc, size_t new_gap_size) { hbool_t merged_with_null; /* Whether the gap was merged with a null message */ unsigned u; /* Local index variable */ @@ -120,6 +123,8 @@ H5O_add_gap(H5O_t *oh, unsigned chunkno, unsigned idx, /* check args */ HDassert(oh); HDassert(oh->version > H5O_VERSION_1); + HDassert(chk_proxy); + HDassert(chk_flags); HDassert(new_gap_loc); HDassert(new_gap_size); @@ -128,13 +133,13 @@ H5O_add_gap(H5O_t *oh, unsigned chunkno, unsigned idx, for(u = 0; u < oh->nmesgs && !merged_with_null; u++) { /* Find a null message in the chunk with the new gap */ /* (a null message that's not the one we are eliminating) */ - if(H5O_NULL_ID == oh->mesg[u].type->id && oh->mesg[u].chunkno == chunkno + if(H5O_NULL_ID == oh->mesg[u].type->id && oh->mesg[u].chunkno == chk_proxy->chunkno && u != idx) { /* Sanity check - chunks with null messages shouldn't have a gap */ - HDassert(oh->chunk[chunkno].gap == 0); + HDassert(oh->chunk[chk_proxy->chunkno].gap == 0); /* Eliminate the gap in the chunk */ - if(H5O_eliminate_gap(oh, &oh->mesg[u], new_gap_loc, new_gap_size) < 0) + if(H5O_eliminate_gap(f, dxpl_id, oh, chk_proxy, chk_flags, &oh->mesg[u], new_gap_loc, new_gap_size) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't eliminate gap in chunk") /* Set flag to indicate that the gap was handled */ @@ -146,15 +151,15 @@ H5O_add_gap(H5O_t *oh, unsigned chunkno, unsigned idx, if(!merged_with_null) { /* Adjust message offsets after new gap forward in chunk */ for(u = 0; u < oh->nmesgs; u++) - if(oh->mesg[u].chunkno == chunkno && oh->mesg[u].raw > new_gap_loc) + if(oh->mesg[u].chunkno == chk_proxy->chunkno && oh->mesg[u].raw > new_gap_loc) oh->mesg[u].raw -= new_gap_size; /* Slide raw message info forward in chunk image */ HDmemmove(new_gap_loc, new_gap_loc + new_gap_size, - (size_t)((oh->chunk[chunkno].image + (oh->chunk[chunkno].size - H5O_SIZEOF_CHKSUM_OH(oh))) - (new_gap_loc + new_gap_size))); + (size_t)((oh->chunk[chk_proxy->chunkno].image + (oh->chunk[chk_proxy->chunkno].size - H5O_SIZEOF_CHKSUM_OH(oh))) - (new_gap_loc + new_gap_size))); /* Add existing gap size to new gap size */ - new_gap_size += oh->chunk[chunkno].gap; + new_gap_size += oh->chunk[chk_proxy->chunkno].gap; /* Merging with existing gap will allow for a new null message */ if(new_gap_size >= (size_t)H5O_SIZEOF_MSGHDR_OH(oh)) { @@ -166,16 +171,16 @@ H5O_add_gap(H5O_t *oh, unsigned chunkno, unsigned idx, HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate more space for messages") /* Increment new gap size */ - oh->chunk[chunkno].gap += new_gap_size; + oh->chunk[chk_proxy->chunkno].gap += new_gap_size; /* Create new null message, with the tail of the previous null message */ null_msg = &(oh->mesg[oh->nmesgs++]); null_msg->type = H5O_MSG_NULL; null_msg->native = NULL; null_msg->raw_size = new_gap_size - H5O_SIZEOF_MSGHDR_OH(oh); - null_msg->raw = (oh->chunk[chunkno].image + oh->chunk[chunkno].size) + null_msg->raw = (oh->chunk[chk_proxy->chunkno].image + oh->chunk[chk_proxy->chunkno].size) - (H5O_SIZEOF_CHKSUM_OH(oh) + null_msg->raw_size); - null_msg->chunkno = chunkno; + null_msg->chunkno = chk_proxy->chunkno; /* Zero out new null message's raw data */ if(null_msg->raw_size) @@ -185,10 +190,13 @@ H5O_add_gap(H5O_t *oh, unsigned chunkno, unsigned idx, null_msg->dirty = TRUE; /* Reset size of gap in chunk */ - oh->chunk[chunkno].gap = 0; + oh->chunk[chk_proxy->chunkno].gap = 0; } /* end if */ else - oh->chunk[chunkno].gap = new_gap_size; + oh->chunk[chk_proxy->chunkno].gap = new_gap_size; + + /* Mark the chunk as modified */ + *chk_flags |= H5AC__DIRTIED_FLAG; } /* end if */ done: @@ -199,7 +207,14 @@ done: /*------------------------------------------------------------------------- * Function: H5O_eliminate_gap * - * Purpose: Eliminate a gap in a chunk with a null message + * Purpose: Eliminate a gap in a chunk with a null message. + * + * Note: Sometimes this happens as a result of converting an existing + * non-null message to a null message, so we zero out the gap + * here, even though it might already be zero (when we're adding + * a gap to a chunk with an existing null message). (Mostly, + * this just simplifies the code, esp. with the necessary chunk + * locking -QAK) * * Return: Non-negative on success/Negative on failure * @@ -210,16 +225,20 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5O_eliminate_gap(H5O_t *oh, H5O_mesg_t *mesg, uint8_t *gap_loc, size_t gap_size) +H5O_eliminate_gap(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_chunk_proxy_t *chk_proxy, + unsigned *chk_flags, H5O_mesg_t *mesg, uint8_t *gap_loc, size_t gap_size) { uint8_t *move_start, *move_end; /* Pointers to area of messages to move */ hbool_t null_before_gap; /* Flag whether the null message is before the gap or not */ + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5O_eliminate_gap) + FUNC_ENTER_NOAPI_NOINIT(H5O_eliminate_gap) /* check args */ HDassert(oh); HDassert(oh->version > H5O_VERSION_1); + HDassert(chk_proxy); + HDassert(chk_flags); HDassert(mesg); HDassert(gap_loc); HDassert(gap_size); @@ -242,6 +261,7 @@ H5O_eliminate_gap(H5O_t *oh, H5O_mesg_t *mesg, uint8_t *gap_loc, size_t gap_size unsigned u; /* Local index variable */ /* Look for messages that need to move, to adjust raw pointers in chunk */ + /* (this doesn't change the moved messages 'dirty' state) */ for(u = 0; u < oh->nmesgs; u++) { uint8_t *msg_start; /* Start of encoded message in chunk */ @@ -282,10 +302,15 @@ H5O_eliminate_gap(H5O_t *oh, H5O_mesg_t *mesg, uint8_t *gap_loc, size_t gap_size /* Adjust size of null message */ mesg->raw_size += gap_size; + /* Set the gap size to zero for the chunk */ + oh->chunk[mesg->chunkno].gap = 0; + /* Mark null message as dirty */ mesg->dirty = TRUE; + *chk_flags |= H5AC__DIRTIED_FLAG; - FUNC_LEAVE_NOAPI(SUCCEED) +done: + FUNC_LEAVE_NOAPI(ret_value) } /* H5O_eliminate_gap() */ @@ -304,9 +329,11 @@ H5O_eliminate_gap(H5O_t *oh, H5O_mesg_t *mesg, uint8_t *gap_loc, size_t gap_size *------------------------------------------------------------------------- */ static herr_t -H5O_alloc_null(H5O_t *oh, unsigned null_idx, const H5O_msg_class_t *new_type, - void *new_native, size_t new_size) +H5O_alloc_null(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned null_idx, + const H5O_msg_class_t *new_type, void *new_native, size_t new_size) { + H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that message is in */ + unsigned chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */ H5O_mesg_t *alloc_msg; /* Pointer to null message to allocate out of */ herr_t ret_value = SUCCEED; /* Return value */ @@ -320,6 +347,10 @@ H5O_alloc_null(H5O_t *oh, unsigned null_idx, const H5O_msg_class_t *new_type, /* Point to null message to allocate out of */ alloc_msg = &oh->mesg[null_idx]; + /* Protect chunk */ + if(NULL == (chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, alloc_msg->chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk") + /* Check if there's a need to split the null message */ if(alloc_msg->raw_size > new_size) { /* Check for producing a gap in the chunk */ @@ -330,7 +361,7 @@ H5O_alloc_null(H5O_t *oh, unsigned null_idx, const H5O_msg_class_t *new_type, alloc_msg->raw_size = new_size; /* Add the gap to the chunk */ - if(H5O_add_gap(oh, alloc_msg->chunkno, null_idx, alloc_msg->raw + alloc_msg->raw_size, gap_size) < 0) + if(H5O_add_gap(f, dxpl_id, oh, chk_proxy, &chk_flags, null_idx, alloc_msg->raw + alloc_msg->raw_size, gap_size) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert gap in chunk") } /* end if */ else { @@ -356,19 +387,17 @@ H5O_alloc_null(H5O_t *oh, unsigned null_idx, const H5O_msg_class_t *new_type, /* Mark the message as dirty */ null_msg->dirty = TRUE; + chk_flags |= H5AC__DIRTIED_FLAG; /* Check for gap in new null message's chunk */ if(oh->chunk[null_msg->chunkno].gap > 0) { unsigned null_chunkno = null_msg->chunkno; /* Chunk w/gap */ /* Eliminate the gap in the chunk */ - if(H5O_eliminate_gap(oh, null_msg, + if(H5O_eliminate_gap(f, dxpl_id, oh, chk_proxy, &chk_flags, null_msg, ((oh->chunk[null_chunkno].image + oh->chunk[null_chunkno].size) - (H5O_SIZEOF_CHKSUM_OH(oh) + oh->chunk[null_chunkno].gap)), oh->chunk[null_chunkno].gap) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTREMOVE, FAIL, "can't eliminate gap in chunk") - - /* Set the gap size to zero for the chunk */ - oh->chunk[null_chunkno].gap = 0; } /* end if */ /* Set the size of the new "real" message */ @@ -382,8 +411,13 @@ H5O_alloc_null(H5O_t *oh, unsigned null_idx, const H5O_msg_class_t *new_type, /* Mark the new message as dirty */ alloc_msg->dirty = TRUE; + chk_flags |= H5AC__DIRTIED_FLAG; done: + /* Release chunk */ + if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, oh, chk_proxy, chk_flags) < 0) + HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") + FUNC_LEAVE_NOAPI(ret_value) } /* H5O_alloc_null() */ @@ -467,6 +501,8 @@ static htri_t H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno, size_t size, unsigned * msg_idx) { + H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that message is in */ + unsigned chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */ size_t delta; /* Change in chunk's size */ size_t aligned_size = H5O_ALIGN_OH(oh, size); uint8_t *old_image; /* Old address of chunk's image in memory */ @@ -500,7 +536,7 @@ H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno, == ((oh->chunk[chunkno].image + oh->chunk[chunkno].size) - (oh->chunk[chunkno].gap + H5O_SIZEOF_CHKSUM_OH(oh))))) { - extend_msg = u; + extend_msg = (int)u; break; } /* end if */ } /* end for */ @@ -516,7 +552,10 @@ H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno, /* Check for changing the chunk #0 data size enough to need adjusting the flags */ if(oh->version > H5O_VERSION_1 && chunkno == 0) { - uint64_t chunk0_size = oh->chunk[0].size - H5O_SIZEOF_HDR(oh); /* Size of chunk 0's data */ + uint64_t chunk0_size; /* Size of chunk 0's data */ + + HDassert(oh->chunk[0].size >= (size_t)H5O_SIZEOF_HDR(oh)); + chunk0_size = oh->chunk[0].size - (size_t)H5O_SIZEOF_HDR(oh); /* Check for moving from a 1-byte to a 2-byte size encoding */ if(chunk0_size <= 255 && (chunk0_size + delta) > 255) { @@ -546,16 +585,23 @@ H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno, else if(extended == FALSE) /* can't extend -- we are done */ HGOTO_DONE(FALSE) + /* Protect chunk */ + if(NULL == (chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk") + /* Adjust object header prefix flags */ if(adjust_size_flags) { - oh->flags &= ~H5O_HDR_CHUNK0_SIZE; + oh->flags &= (uint8_t)~H5O_HDR_CHUNK0_SIZE; oh->flags |= new_size_flags; + + /* Mark object header as dirty in cache */ + if(H5AC_mark_pinned_or_protected_entry_dirty(oh) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTMARKDIRTY, FAIL, "unable to mark object header as dirty") } /* end if */ /* If we can extend an existing null message, take care of that */ if(extend_msg >= 0) { /* Adjust message size of existing null message */ - oh->mesg[extend_msg].dirty = TRUE; oh->mesg[extend_msg].raw_size += delta; } /* end if */ /* Create new null message for end of chunk */ @@ -566,11 +612,10 @@ H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno, HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate more space for messages") /* Set extension message */ - extend_msg = oh->nmesgs++; + extend_msg = (int)oh->nmesgs++; /* Initialize new null message */ oh->mesg[extend_msg].type = H5O_MSG_NULL; - oh->mesg[extend_msg].dirty = TRUE; oh->mesg[extend_msg].native = NULL; oh->mesg[extend_msg].raw = ((oh->chunk[chunkno].image + oh->chunk[chunkno].size) - (H5O_SIZEOF_CHKSUM_OH(oh) + oh->chunk[chunkno].gap)) @@ -579,13 +624,16 @@ H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno, oh->mesg[extend_msg].chunkno = chunkno; } /* end else */ + /* Mark the extended message as dirty */ + oh->mesg[extend_msg].dirty = TRUE; + chk_flags |= H5AC__DIRTIED_FLAG; + /* Allocate more memory space for chunk's image */ old_image = oh->chunk[chunkno].image; old_size = oh->chunk[chunkno].size; oh->chunk[chunkno].size += delta + extra_prfx_size; oh->chunk[chunkno].image = H5FL_BLK_REALLOC(chunk_image, old_image, oh->chunk[chunkno].size); oh->chunk[chunkno].gap = 0; - oh->chunk[chunkno].dirty = TRUE; if(NULL == oh->chunk[chunkno].image) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed") @@ -599,6 +647,7 @@ H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno, oh->mesg[u].raw = oh->chunk[chunkno].image + extra_prfx_size + (oh->mesg[u].raw - old_image); /* Flag message as dirty directly */ + /* (we mark the entire chunk dirty when we update its size) */ oh->mesg[u].dirty = TRUE; } /* endif */ @@ -607,19 +656,39 @@ H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno, * it's size is directly encoded in the object header) */ if(chunkno > 0 && (H5O_CONT_ID == oh->mesg[u].type->id) && (((H5O_cont_t *)(oh->mesg[u].native))->chunkno == chunkno)) { + H5O_chunk_proxy_t *chk_proxy2 = NULL; /* Chunk that continuation message is in */ + unsigned chk_flags2 = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */ + unsigned cont_chunkno = oh->mesg[u].chunkno; /* Chunk # for continuation message */ + + /* Protect chunk containing continuation message */ + if(NULL == (chk_proxy2 = H5O_chunk_protect(f, dxpl_id, oh, cont_chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk") + /* Adjust size in continuation message */ HDassert(((H5O_cont_t *)(oh->mesg[u].native))->size == old_size); ((H5O_cont_t *)(oh->mesg[u].native))->size = oh->chunk[chunkno].size; /* Flag continuation message as dirty */ oh->mesg[u].dirty = TRUE; + chk_flags2 |= H5AC__DIRTIED_FLAG; + + /* Release chunk containing continuation message */ + if(H5O_chunk_unprotect(f, dxpl_id, oh, chk_proxy2, chk_flags2) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") } /* end if */ } /* end for */ + /* Mark the chunk size in the cache as changed */ + chk_flags |= H5AC__SIZE_CHANGED_FLAG; + /* Set return value */ - *msg_idx = extend_msg; + *msg_idx = (unsigned)extend_msg; done: + /* Release chunk */ + if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, oh, chk_proxy, chk_flags) < 0) + HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") + FUNC_LEAVE_NOAPI(ret_value) } /* H5O_alloc_extend_chunk() */ @@ -672,6 +741,7 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size) } alloc_info; H5O_mesg_t *curr_msg; /* Pointer to current message to operate on */ + H5O_chunk_proxy_t *chk_proxy; /* Chunk that message is in */ size_t cont_size; /*continuation message size */ size_t multi_size = 0; /* Size of all the messages in the last chunk */ int found_null = (-1); /* Best fit null message */ @@ -707,12 +777,12 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size) for(u = 0, curr_msg = &oh->mesg[0]; u < oh->nmesgs; u++, curr_msg++) { if(curr_msg->type->id == H5O_NULL_ID) { if(cont_size == curr_msg->raw_size) { - found_null = u; + found_null = (int)u; break; } /* end if */ else if(curr_msg->raw_size > cont_size && (found_null < 0 || curr_msg->raw_size < oh->mesg[found_null].raw_size)) - found_null = u; + found_null = (int)u; } /* end if */ else if(curr_msg->type->id == H5O_CONT_ID) { /* Don't consider continuation messages (for now) */ @@ -753,7 +823,7 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size) if(total_size >= cont_size) { if(curr_msg->type->id == H5O_ATTR_ID) { if(found_attr.msgno < 0 || total_size < found_attr.total_size) { - found_attr.msgno = u; + found_attr.msgno = (int)u; found_attr.gap_size = gap_size; found_attr.null_size = null_size; found_attr.total_size = total_size; @@ -762,7 +832,7 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size) } /* end if */ else { if(found_other.msgno < 0 || total_size < found_other.total_size) { - found_other.msgno = u; + found_other.msgno = (int)u; found_other.gap_size = gap_size; found_other.null_size = null_size; found_other.total_size = total_size; @@ -835,7 +905,6 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size) } /* end if */ chunkno = oh->nchunks++; - oh->chunk[chunkno].dirty = TRUE; oh->chunk[chunkno].addr = new_chunk_addr; oh->chunk[chunkno].size = size; oh->chunk[chunkno].gap = 0; @@ -861,9 +930,14 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size) if(multi_size > 0) { /* Move all non-null messages in the last chunk to the new chunk. This * should be extremely rare so we don't care too much about minimizing - * the space used */ + * the space used. + */ H5O_mesg_t *null_msg; /* Pointer to new null message */ + /* Protect last chunk */ + if(NULL == (chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, chunkno - 1))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, UFAIL, "unable to load object header chunk") + /* Copy each message to the new location */ for(u = 0, curr_msg = &oh->mesg[0]; u < oh->nmesgs; u++, curr_msg++) if(curr_msg->chunkno == chunkno - 1) { @@ -888,15 +962,15 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size) } /* end if */ /* Create a null message spanning the entire last chunk */ - found_null = oh->nmesgs++; + found_null = (int)oh->nmesgs++; null_msg = &(oh->mesg[found_null]); null_msg->type = H5O_MSG_NULL; null_msg->dirty = TRUE; null_msg->native = NULL; - null_msg->raw = oh->chunk[chunkno-1].image + null_msg->raw = oh->chunk[chunkno - 1].image + ((chunkno == 1) ? H5O_SIZEOF_HDR(oh) : H5O_SIZEOF_CHKHDR_OH(oh)) - H5O_SIZEOF_CHKSUM_OH(oh) + H5O_SIZEOF_MSGHDR_OH(oh); - null_msg->raw_size = oh->chunk[chunkno-1].size + null_msg->raw_size = oh->chunk[chunkno - 1].size - ((chunkno == 1) ? H5O_SIZEOF_HDR(oh) : H5O_SIZEOF_CHKHDR_OH(oh)) - H5O_SIZEOF_MSGHDR_OH(oh); null_msg->chunkno = chunkno - 1; @@ -904,16 +978,23 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size) HDassert(null_msg->raw_size >= cont_size); /* Remove any gap in the chunk */ - oh->chunk[chunkno-1].gap = 0; + oh->chunk[chunkno - 1].gap = 0; + /* Release chunk, marking it dirty */ + if(H5O_chunk_unprotect(f, dxpl_id, oh, chk_proxy, H5AC__DIRTIED_FLAG) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, UFAIL, "unable to unprotect object header chunk") } else if(found_null < 0) { /* Move message (that will be replaced with continuation message) - * to new chunk, if necessary. - */ + * to new chunk, if necessary. + */ H5O_mesg_t *null_msg; /* Pointer to new null message */ + /* Protect chunk */ + if(NULL == (chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, oh->mesg[found_other.msgno].chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, UFAIL, "unable to load object header chunk") + /* Create null message for space that message to copy currently occupies */ - found_null = oh->nmesgs++; + found_null = (int)oh->nmesgs++; null_msg = &(oh->mesg[found_null]); null_msg->type = H5O_MSG_NULL; null_msg->native = NULL; @@ -922,7 +1003,6 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size) null_msg->chunkno = oh->mesg[found_other.msgno].chunkno; /* Copy the message to move (& its prefix) to its new location */ - /* (Chunk is already dirty, no need to mark it) */ HDmemcpy(p, oh->mesg[found_other.msgno].raw - H5O_SIZEOF_MSGHDR_OH(oh), oh->mesg[found_other.msgno].raw_size + H5O_SIZEOF_MSGHDR_OH(oh)); @@ -965,6 +1045,10 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size) /* Mark the new null message as dirty */ null_msg->dirty = TRUE; + + /* Release chunk, marking it dirty */ + if(H5O_chunk_unprotect(f, dxpl_id, oh, chk_proxy, H5AC__DIRTIED_FLAG) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, UFAIL, "unable to unprotect object header chunk") } /* end if */ HDassert(found_null >= 0); @@ -975,9 +1059,13 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size) oh->mesg[idx].dirty = TRUE; oh->mesg[idx].native = NULL; oh->mesg[idx].raw = p + H5O_SIZEOF_MSGHDR_OH(oh); - oh->mesg[idx].raw_size = size - (H5O_SIZEOF_CHKHDR_OH(oh) + H5O_SIZEOF_MSGHDR_OH(oh)); + oh->mesg[idx].raw_size = size - (size_t)(H5O_SIZEOF_CHKHDR_OH(oh) + H5O_SIZEOF_MSGHDR_OH(oh)); oh->mesg[idx].chunkno = chunkno; + /* Insert the new chunk into the cache */ + if(H5O_chunk_add(f, dxpl_id, oh, chunkno) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, UFAIL, "can't add new chunk to cache") + /* Initialize the continuation information */ if(NULL == (cont = H5FL_MALLOC(H5O_cont_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, UFAIL, "memory allocation failed") @@ -986,7 +1074,7 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size) cont->chunkno = chunkno; /* Split the null message and point at continuation message */ - if(H5O_alloc_null(oh, (unsigned)found_null, H5O_MSG_CONT, cont, cont_size) < 0) + if(H5O_alloc_null(f, dxpl_id, oh, (unsigned)found_null, H5O_MSG_CONT, cont, cont_size) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, UFAIL, "can't split null message") /* Set return value */ @@ -1071,7 +1159,7 @@ H5O_alloc(H5F_t *f, hid_t dxpl_id, H5O_t *oh, const H5O_msg_class_t *type, } /* end if */ /* Split the null message and point at continuation message */ - if(H5O_alloc_null(oh, idx, type, NULL, aligned_size) < 0) + if(H5O_alloc_null(f, dxpl_id, oh, idx, type, NULL, aligned_size) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, UFAIL, "can't split null message") /* Mark object header as dirty in cache */ @@ -1104,6 +1192,8 @@ herr_t H5O_release_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_mesg_t *mesg, hbool_t adj_link) { + H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that message is in */ + unsigned chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5O_release_mesg, FAIL) @@ -1120,6 +1210,10 @@ H5O_release_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_mesg_t *mesg, HGOTO_ERROR(H5E_OHDR, H5E_CANTDELETE, FAIL, "unable to delete file space for object header message") } /* end if */ + /* Protect chunk */ + if(NULL == (chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, mesg->chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header chunk") + /* Free any native information */ H5O_msg_free_mesg(mesg); @@ -1133,20 +1227,22 @@ H5O_release_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_mesg_t *mesg, /* Mark the message as modified */ mesg->dirty = TRUE; + chk_flags |= H5AC__DIRTIED_FLAG; /* Check if chunk has a gap currently */ if(oh->chunk[mesg->chunkno].gap) { /* Eliminate the gap in the chunk */ - if(H5O_eliminate_gap(oh, mesg, + if(H5O_eliminate_gap(f, dxpl_id, oh, chk_proxy, &chk_flags, mesg, ((oh->chunk[mesg->chunkno].image + oh->chunk[mesg->chunkno].size) - (H5O_SIZEOF_CHKSUM_OH(oh) + oh->chunk[mesg->chunkno].gap)), oh->chunk[mesg->chunkno].gap) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTREMOVE, FAIL, "can't eliminate gap in chunk") - - /* Set the gap size to zero for the chunk */ - oh->chunk[mesg->chunkno].gap = 0; } /* end if */ done: + /* Release chunk, if not already done */ + if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, oh, chk_proxy, chk_flags) < 0) + HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") + FUNC_LEAVE_NOAPI(ret_value) } /* H5O_release_mesg() */ @@ -1165,18 +1261,13 @@ done: *------------------------------------------------------------------------- */ static htri_t -H5O_move_cont(H5F_t *f, H5O_t *oh, unsigned cont_u, hid_t dxpl_id) +H5O_move_cont(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned cont_u) { - unsigned v; /* local index variable */ - H5O_mesg_t *cont_msg; /* pointer to the continuation message */ - H5O_mesg_t *nonnull_msg; /* pointer to the current message to operate on */ - H5O_mesg_t *null_msg; /* pointer to the current message to operate on */ - size_t total_size=0; /* total size of nonnull messages in the chunk pointed to by cont message */ - size_t move_size=0; /* size of the message to be moved */ - uint8_t *move_start, *move_end; /* pointers to area of messages to move */ - size_t gap_size; /* size of gap produced */ + H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that continuation message is in */ + H5O_mesg_t *cont_msg; /* Pointer to the continuation message */ unsigned deleted_chunkno; /* Chunk # to delete */ - htri_t ret_value = FALSE; /* Return value */ + unsigned chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */ + htri_t ret_value = TRUE; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5O_move_cont) @@ -1184,81 +1275,133 @@ H5O_move_cont(H5F_t *f, H5O_t *oh, unsigned cont_u, hid_t dxpl_id) HDassert(f); HDassert(oh); + /* Get initial information */ cont_msg = &oh->mesg[cont_u]; H5O_LOAD_NATIVE(f, dxpl_id, 0, oh, cont_msg, FAIL) deleted_chunkno = ((H5O_cont_t *)(cont_msg->native))->chunkno; - /* proceed further only if continuation message is pointing to the last chunk */ - if(deleted_chunkno != (oh->nchunks - 1)) - HGOTO_DONE(FALSE) - - /* find size of all nonnull messages in the chunk pointed to by the continuation message */ - for(v = 0, nonnull_msg = &oh->mesg[0]; v < oh->nmesgs; v++, nonnull_msg++) - if(nonnull_msg->chunkno == deleted_chunkno && nonnull_msg->type->id != H5O_NULL_ID) { - HDassert(nonnull_msg->type->id != H5O_CONT_ID); - total_size += nonnull_msg->raw_size + H5O_SIZEOF_MSGHDR_OH(oh); - } - - /* check if messages can fit into the continuation message */ - if(total_size && total_size <= (cont_msg->raw_size + H5O_SIZEOF_MSGHDR_OH(oh))) { - - /* convert continuation message into a null message */ - if(H5O_release_mesg(f, dxpl_id, oh, cont_msg, TRUE) < 0) - HGOTO_ERROR(H5E_OHDR, H5E_CANTDELETE, FAIL, "unable to convert into null message") - - move_start = cont_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh); - move_end = cont_msg->raw + cont_msg->raw_size; - - /* move message(s) forward into continuation message */ - for(v = 0, nonnull_msg = &oh->mesg[0]; v < oh->nmesgs; v++, nonnull_msg++) - if(nonnull_msg->chunkno == deleted_chunkno && nonnull_msg->type->id != H5O_NULL_ID) { - move_size = nonnull_msg->raw_size + H5O_SIZEOF_MSGHDR_OH(oh); - HDmemcpy(move_start, nonnull_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh), move_size); - nonnull_msg->raw = move_start + H5O_SIZEOF_MSGHDR_OH(oh); - nonnull_msg->chunkno = cont_msg->chunkno; - nonnull_msg->dirty = TRUE; - move_start += move_size; - } - - HDassert(move_start <= move_end); - - /* check if there is space remaining in the continuation message */ - /* the remaining space can be gap or a null message */ - gap_size = move_end - move_start; - if(gap_size >= (size_t)H5O_SIZEOF_MSGHDR_OH(oh)) { - cont_msg->raw_size = gap_size - H5O_SIZEOF_MSGHDR_OH(oh); - cont_msg->raw = move_start + H5O_SIZEOF_MSGHDR_OH(oh); - cont_msg->dirty = TRUE; - } else { - if(gap_size && (H5O_add_gap(oh, cont_msg->chunkno, cont_u, move_start, gap_size) < 0)) - HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert gap in chunk") - /* Release any information/memory for continuation message */ - H5O_msg_free_mesg(cont_msg); - if(cont_u < (oh->nmesgs - 1)) - HDmemmove(&oh->mesg[cont_u], &oh->mesg[cont_u + 1], ((oh->nmesgs - 1) - cont_u) * sizeof(H5O_mesg_t)); - oh->nmesgs--; - } - - /* remove all null messages in deleted chunk from list of messages */ - /* Note: unsigned v wrapping around at the end */ - for (v = oh->nmesgs - 1, null_msg = &oh->mesg[v]; v < oh->nmesgs; v--, null_msg--) - if(null_msg->type->id == H5O_NULL_ID && null_msg->chunkno == deleted_chunkno) { + /* Check if continuation message is pointing to the last chunk */ + if(deleted_chunkno == (oh->nchunks - 1)) { + size_t nonnull_size; /* Total size of nonnull messages in the chunk pointed to by cont message */ + H5O_mesg_t *curr_msg; /* Pointer to the current message to operate on */ + size_t gap_size; /* Size of gap produced */ + unsigned v; /* Local index variable */ + + /* Find size of all nonnull messages in the chunk pointed to by the continuation message */ + nonnull_size = 0; + for(v = 0, curr_msg = &oh->mesg[0]; v < oh->nmesgs; v++, curr_msg++) + if(curr_msg->chunkno == deleted_chunkno && curr_msg->type->id != H5O_NULL_ID) { + HDassert(curr_msg->type->id != H5O_CONT_ID); + nonnull_size += curr_msg->raw_size + H5O_SIZEOF_MSGHDR_OH(oh); + } /* end if */ - /* Release any information/memory for message */ - H5O_msg_free_mesg(null_msg); + /* Size of gap in chunk w/continuation message */ + gap_size = oh->chunk[cont_msg->chunkno].gap; + + /* Check if messages can fit into the continuation message + gap size */ + /* (Could count any null messages in the chunk w/the continuation + * message also, but that is pretty complex. -QAK) + */ + if(nonnull_size && nonnull_size <= (gap_size + cont_msg->raw_size + H5O_SIZEOF_MSGHDR_OH(oh))) { + uint8_t *move_start, *move_end; /* Pointers to area of messages to move */ + unsigned cont_chunkno; /* Chunk number for continuation message */ + + /* Get continuation info */ + move_start = cont_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh); + move_end = cont_msg->raw + cont_msg->raw_size; + cont_chunkno = cont_msg->chunkno; + + /* Convert continuation message into a null message */ + if(H5O_release_mesg(f, dxpl_id, oh, cont_msg, TRUE) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTDELETE, FAIL, "unable to convert into null message") + + /* Protect chunk */ + if(NULL == (chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, cont_chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header chunk") + + /* Move message(s) forward into continuation message */ + for(v = 0, curr_msg = &oh->mesg[0]; v < oh->nmesgs; v++, curr_msg++) + /* Look for messages in chunk to delete */ + if(curr_msg->chunkno == deleted_chunkno) { + /* Move messages out of chunk to delete */ + if(curr_msg->type->id != H5O_NULL_ID) { + size_t move_size; /* Size of the message to be moved */ + + /* Compute size of message to move */ + move_size = curr_msg->raw_size + H5O_SIZEOF_MSGHDR_OH(oh); + + /* Move message out of deleted chunk */ + HDmemcpy(move_start, curr_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh), move_size); + curr_msg->raw = move_start + H5O_SIZEOF_MSGHDR_OH(oh); + curr_msg->chunkno = cont_chunkno; + curr_msg->dirty = TRUE; + chk_flags |= H5AC__DIRTIED_FLAG; + + /* Adjust location to move messages to */ + move_start += move_size; + } /* end else */ + } /* end if */ + + HDassert(move_start <= (move_end + gap_size)); + + /* Check if there is space remaining in the continuation message */ + /* (The remaining space can be gap or a null message) */ + gap_size += (size_t)(move_end - move_start); + if(gap_size >= (size_t)H5O_SIZEOF_MSGHDR_OH(oh)) { + /* Adjust size of null (was continuation) message */ + cont_msg->raw_size = gap_size - H5O_SIZEOF_MSGHDR_OH(oh); + cont_msg->raw = move_start + H5O_SIZEOF_MSGHDR_OH(oh); + cont_msg->dirty = TRUE; + chk_flags |= H5AC__DIRTIED_FLAG; + } /* end if */ + else { + /* Check if there is space that should be a gap */ + if(gap_size > 0) { + /* Convert remnant into gap in chunk */ + if(H5O_add_gap(f, dxpl_id, oh, chk_proxy, &chk_flags, cont_u, move_start, gap_size) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert gap in chunk") + } /* end if */ - if(v < (oh->nmesgs - 1)) - HDmemmove(&oh->mesg[v], &oh->mesg[v + 1], ((oh->nmesgs - 1) - v) * sizeof(H5O_mesg_t)); - oh->nmesgs--; - } /* end if */ + /* Release any information/memory for continuation message */ + H5O_msg_free_mesg(cont_msg); + if(cont_u < (oh->nmesgs - 1)) + HDmemmove(&oh->mesg[cont_u], &oh->mesg[cont_u + 1], ((oh->nmesgs - 1) - cont_u) * sizeof(H5O_mesg_t)); + oh->nmesgs--; + } /* end else */ - (void)H5FL_BLK_FREE(chunk_image, oh->chunk[deleted_chunkno].image); + /* Move message(s) forward into continuation message */ + /* Note: unsigned v wrapping around at the end */ + for(v = oh->nmesgs - 1, curr_msg = &oh->mesg[v]; v < oh->nmesgs; v--, curr_msg--) + /* Look for messages in chunk to delete */ + if(curr_msg->chunkno == deleted_chunkno) { + /* Remove all null messages in deleted chunk from list of messages */ + if(curr_msg->type->id == H5O_NULL_ID) { + /* Release any information/memory for message */ + H5O_msg_free_mesg(curr_msg); + chk_flags |= H5AC__DIRTIED_FLAG; + + /* Remove from message list */ + if(v < (oh->nmesgs - 1)) + HDmemmove(&oh->mesg[v], &oh->mesg[v + 1], ((oh->nmesgs - 1) - v) * sizeof(H5O_mesg_t)); + oh->nmesgs--; + } /* end if */ + } /* end if */ - oh->nchunks--; - ret_value = TRUE; + /* Remove chunk from list of chunks */ + oh->chunk[deleted_chunkno].image = H5FL_BLK_FREE(chunk_image, oh->chunk[deleted_chunkno].image); + oh->nchunks--; + } /* end if */ + else + ret_value = FALSE; } /* end if */ + else + ret_value = FALSE; done: + /* Release chunk, if not already done */ + if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, oh, chk_proxy, chk_flags) < 0) + HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") + FUNC_LEAVE_NOAPI(ret_value) } /* H5O_move_cont() */ @@ -1274,14 +1417,11 @@ done: * Programmer: Quincey Koziol * koziol@ncsa.uiuc.edu * Oct 17 2005 - * Modifications: - * Feb. 2009: Vailin Choi - * Add changes to move messages forward into "continuation" message * *------------------------------------------------------------------------- */ static htri_t -H5O_move_msgs_forward(H5F_t *f, H5O_t *oh, hid_t dxpl_id) +H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh) { hbool_t packed_msg; /* Flag to indicate that messages were packed */ hbool_t did_packing = FALSE; /* Whether any messages were packed */ @@ -1323,6 +1463,12 @@ H5O_move_msgs_forward(H5F_t *f, H5O_t *oh, hid_t dxpl_id) /* Don't swap messages if the second message is also a null message */ /* (We'll merge them together later, in another routine) */ if(H5O_NULL_ID != nonnull_msg->type->id) { + H5O_chunk_proxy_t *null_chk_proxy; /* Chunk that message is in */ + + /* Protect chunk */ + if(NULL == (null_chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, curr_msg->chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk") + /* Copy raw data for non-null message to new location */ HDmemmove(curr_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh), nonnull_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh), nonnull_msg->raw_size + H5O_SIZEOF_MSGHDR_OH(oh)); @@ -1336,11 +1482,12 @@ H5O_move_msgs_forward(H5F_t *f, H5O_t *oh, hid_t dxpl_id) /* Mark null message dirty */ /* (since we need to re-encode its message header) */ - /* (also, marking this message dirty means we - * don't have to mark chunk as dirty) - */ curr_msg->dirty = TRUE; + /* Release chunk, marking it dirty */ + if(H5O_chunk_unprotect(f, dxpl_id, oh, null_chk_proxy, H5AC__DIRTIED_FLAG) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") + /* Set the flag to indicate that the null message * was packed - if its not at the end its chunk, * we'll move it again on the next pass. @@ -1357,23 +1504,29 @@ H5O_move_msgs_forward(H5F_t *f, H5O_t *oh, hid_t dxpl_id) } /* end if */ } /* end if */ else { - H5O_mesg_t *null_msg; /* Pointer to current message to operate on */ - unsigned v; /* Local index variable */ - htri_t status; + H5O_mesg_t *null_msg; /* Pointer to current message to operate on */ + unsigned v; /* Local index variable */ + /* Check if messages in chunk pointed to can replace continuation message */ if(H5O_CONT_ID == curr_msg->type->id) { - if((status = H5O_move_cont(f, oh, u, dxpl_id)) < 0) + htri_t status; /* Status from moving messages */ + + if((status = H5O_move_cont(f, dxpl_id, oh, u)) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTDELETE, FAIL, "Error in moving messages into cont message") - else if(status > 0) { /* message(s) got moved into "continuation" message */ + else if(status > 0) { /* Message(s) got moved into "continuation" message */ packed_msg = TRUE; break; - } - } + } /* end else-if */ + } /* end if */ /* Loop over messages again, looking for large enough null message in earlier chunk */ for(v = 0, null_msg = &oh->mesg[0]; v < oh->nmesgs; v++, null_msg++) { if(H5O_NULL_ID == null_msg->type->id && curr_msg->chunkno > null_msg->chunkno && curr_msg->raw_size <= null_msg->raw_size) { + H5O_chunk_proxy_t *null_chk_proxy; /* Chunk that null message is in */ + H5O_chunk_proxy_t *curr_chk_proxy; /* Chunk that message is in */ + unsigned null_chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting null chunk */ + unsigned curr_chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting curr chunk */ unsigned old_chunkno; /* Old message information */ uint8_t *old_raw; @@ -1381,15 +1534,19 @@ H5O_move_msgs_forward(H5F_t *f, H5O_t *oh, hid_t dxpl_id) old_chunkno = curr_msg->chunkno; old_raw = curr_msg->raw; + /* Protect chunks */ + if(NULL == (null_chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, null_msg->chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk") + if(NULL == (curr_chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, curr_msg->chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk") + /* Copy raw data for non-null message to new chunk */ HDmemcpy(null_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh), curr_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh), curr_msg->raw_size + H5O_SIZEOF_MSGHDR_OH(oh)); - /* Mark null message's chunk as dirty, since the raw data image changed */ - oh->chunk[null_msg->chunkno].dirty = TRUE; - /* Point non-null message at null message's space */ curr_msg->chunkno = null_msg->chunkno; curr_msg->raw = null_msg->raw; + curr_chk_flags |= H5AC__DIRTIED_FLAG; /* Change information for null message */ if(curr_msg->raw_size == null_msg->raw_size) { @@ -1400,18 +1557,24 @@ H5O_move_msgs_forward(H5F_t *f, H5O_t *oh, hid_t dxpl_id) /* Mark null message dirty */ null_msg->dirty = TRUE; + null_chk_flags |= H5AC__DIRTIED_FLAG; + + /* Release current chunk, marking it dirty */ + if(H5O_chunk_unprotect(f, dxpl_id, oh, curr_chk_proxy, curr_chk_flags) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") /* Check for gap in null message's chunk */ if(oh->chunk[old_chunkno].gap > 0) { /* Eliminate the gap in the chunk */ - if(H5O_eliminate_gap(oh, null_msg, + if(H5O_eliminate_gap(f, dxpl_id, oh, null_chk_proxy, &null_chk_flags, null_msg, ((oh->chunk[old_chunkno].image + oh->chunk[old_chunkno].size) - (H5O_SIZEOF_CHKSUM_OH(oh) + oh->chunk[old_chunkno].gap)), oh->chunk[old_chunkno].gap) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTREMOVE, FAIL, "can't eliminate gap in chunk") - - /* Set the gap size to zero for the chunk */ - oh->chunk[old_chunkno].gap = 0; } /* end if */ + + /* Release null chunk, marking it dirty */ + if(H5O_chunk_unprotect(f, dxpl_id, oh, null_chk_proxy, null_chk_flags) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") } /* end if */ else { unsigned new_null_msg; /* Message index for new null message */ @@ -1423,8 +1586,12 @@ H5O_move_msgs_forward(H5F_t *f, H5O_t *oh, hid_t dxpl_id) /* Adjust the size of the null message being eliminated */ null_msg->raw_size = curr_msg->raw_size; + /* Mark null message dirty */ + null_msg->dirty = TRUE; + null_chk_flags |= H5AC__DIRTIED_FLAG; + /* Add the gap to the chunk */ - if(H5O_add_gap(oh, null_msg->chunkno, v, null_msg->raw + null_msg->raw_size, gap_size) < 0) + if(H5O_add_gap(f, dxpl_id, oh, null_chk_proxy, &null_chk_flags, v, null_msg->raw + null_msg->raw_size, gap_size) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert gap in chunk") /* Re-use message # for new null message taking place of non-null message */ @@ -1437,6 +1604,7 @@ H5O_move_msgs_forward(H5F_t *f, H5O_t *oh, hid_t dxpl_id) /* Mark null message dirty */ null_msg->dirty = TRUE; + null_chk_flags |= H5AC__DIRTIED_FLAG; /* Create new null message for previous location of non-null message */ if(oh->nmesgs >= oh->alloc_nmesgs) { @@ -1451,6 +1619,10 @@ H5O_move_msgs_forward(H5F_t *f, H5O_t *oh, hid_t dxpl_id) new_null_msg = oh->nmesgs++; } /* end else */ + /* Release null message's chunk, marking it dirty */ + if(H5O_chunk_unprotect(f, dxpl_id, oh, null_chk_proxy, null_chk_flags) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") + /* Initialize new null message to take over non-null message's location */ oh->mesg[new_null_msg].type = H5O_MSG_NULL; oh->mesg[new_null_msg].native = NULL; @@ -1460,18 +1632,20 @@ H5O_move_msgs_forward(H5F_t *f, H5O_t *oh, hid_t dxpl_id) /* Mark new null message dirty */ oh->mesg[new_null_msg].dirty = TRUE; + curr_chk_flags |= H5AC__DIRTIED_FLAG; /* Check for gap in new null message's chunk */ if(oh->chunk[old_chunkno].gap > 0) { /* Eliminate the gap in the chunk */ - if(H5O_eliminate_gap(oh, &oh->mesg[new_null_msg], + if(H5O_eliminate_gap(f, dxpl_id, oh, curr_chk_proxy, &curr_chk_flags, &oh->mesg[new_null_msg], ((oh->chunk[old_chunkno].image + oh->chunk[old_chunkno].size) - (H5O_SIZEOF_CHKSUM_OH(oh) + oh->chunk[old_chunkno].gap)), oh->chunk[old_chunkno].gap) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTREMOVE, FAIL, "can't eliminate gap in chunk") - - /* Set the gap size to zero for the chunk */ - oh->chunk[old_chunkno].gap = 0; } /* end if */ + + /* Release new null message's chunk, marking it dirty */ + if(H5O_chunk_unprotect(f, dxpl_id, oh, curr_chk_proxy, curr_chk_flags) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") } /* end else */ /* Indicate that we packed messages */ @@ -1520,7 +1694,7 @@ done: *------------------------------------------------------------------------- */ static htri_t -H5O_merge_null(H5F_t *f, H5O_t *oh, hid_t dxpl_id) +H5O_merge_null(H5F_t *f, hid_t dxpl_id, H5O_t *oh) { hbool_t merged_msg; /* Flag to indicate that messages were merged */ hbool_t did_merging = FALSE; /* Whether any messages were merged */ @@ -1554,11 +1728,14 @@ H5O_merge_null(H5F_t *f, H5O_t *oh, hid_t dxpl_id) /* Loop over messages again, looking for null message in same chunk */ for(v = 0, curr_msg2 = &oh->mesg[0]; v < oh->nmesgs; v++, curr_msg2++) { if(u != v && H5O_NULL_ID == curr_msg2->type->id && curr_msg->chunkno == curr_msg2->chunkno) { + ssize_t adj_raw; /* Amount to adjust raw message pointer */ + size_t adj_raw_size; /* Amount to adjust raw message size */ /* Check for second message after first message */ if((curr_msg->raw + curr_msg->raw_size) == (curr_msg2->raw - H5O_SIZEOF_MSGHDR_OH(oh))) { /* Extend first null message length to cover second null message */ - curr_msg->raw_size += (H5O_SIZEOF_MSGHDR_OH(oh) + curr_msg2->raw_size); + adj_raw = 0; + adj_raw_size = H5O_SIZEOF_MSGHDR_OH(oh) + curr_msg2->raw_size; /* Message has been merged */ merged_msg = TRUE; @@ -1566,8 +1743,8 @@ H5O_merge_null(H5F_t *f, H5O_t *oh, hid_t dxpl_id) /* Check for second message before first message */ else if((curr_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh)) == (curr_msg2->raw + curr_msg2->raw_size)) { /* Adjust first message address and extend length to cover second message */ - curr_msg->raw -= (H5O_SIZEOF_MSGHDR_OH(oh) + curr_msg2->raw_size); - curr_msg->raw_size += (H5O_SIZEOF_MSGHDR_OH(oh) + curr_msg2->raw_size); + adj_raw = -((ssize_t)(H5O_SIZEOF_MSGHDR_OH(oh) + curr_msg2->raw_size)); + adj_raw_size = H5O_SIZEOF_MSGHDR_OH(oh) + curr_msg2->raw_size; /* Message has been merged */ merged_msg = TRUE; @@ -1575,12 +1752,26 @@ H5O_merge_null(H5F_t *f, H5O_t *oh, hid_t dxpl_id) /* Second message has been merged, delete it */ if(merged_msg) { + H5O_chunk_proxy_t *curr_chk_proxy; /* Chunk that message is in */ + /* Release any information/memory for second message */ H5O_msg_free_mesg(curr_msg2); + /* Protect chunk */ + if(NULL == (curr_chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, curr_msg->chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk") + + /* Adjust first message address and extend length to cover second message */ + curr_msg->raw += adj_raw; + curr_msg->raw_size += adj_raw_size; + /* Mark first message as dirty */ curr_msg->dirty = TRUE; + /* Release new null message's chunk, marking it dirty */ + if(H5O_chunk_unprotect(f, dxpl_id, oh, curr_chk_proxy, H5AC__DIRTIED_FLAG) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") + /* Remove second message from list of messages */ if(v < (oh->nmesgs - 1)) HDmemmove(&oh->mesg[v], &oh->mesg[v + 1], ((oh->nmesgs - 1) - v) * sizeof(H5O_mesg_t)); @@ -1591,7 +1782,7 @@ H5O_merge_null(H5F_t *f, H5O_t *oh, hid_t dxpl_id) /* If the merged message is too large, shrink the chunk */ if(curr_msg->raw_size >= H5O_MESG_MAX_SIZE) - if(H5O_alloc_shrink_chunk(f, oh, dxpl_id, curr_msg->chunkno) < 0) + if(H5O_alloc_shrink_chunk(f, dxpl_id, oh, curr_msg->chunkno) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTPACK, FAIL, "unable to shrink chunk") /* Get out of loop */ @@ -1676,6 +1867,20 @@ H5O_remove_empty_chunks(H5F_t *f, hid_t dxpl_id, H5O_t *oh) /* Decode current continuation message if necessary */ H5O_LOAD_NATIVE(f, dxpl_id, 0, oh, cont_msg, FAIL) + /* Check if the chunkno needs to be set */ + /* (should only occur when the continuation message is first decoded) */ + if(0 == ((H5O_cont_t *)(cont_msg->native))->chunkno) { + unsigned w; /* Local index variable */ + + /* Find chunk that this continuation message points to */ + for(w = 0; w < oh->nchunks; w++) + if(oh->chunk[w].addr == ((H5O_cont_t *)(cont_msg->native))->addr) { + ((H5O_cont_t *)(cont_msg->native))->chunkno = w; + break; + } /* end if */ + HDassert(((H5O_cont_t *)(cont_msg->native))->chunkno > 0); + } /* end if */ + /* Check for correct chunk to delete */ if(oh->chunk[null_msg->chunkno].addr == ((H5O_cont_t *)(cont_msg->native))->addr) break; @@ -1684,6 +1889,7 @@ H5O_remove_empty_chunks(H5F_t *f, hid_t dxpl_id, H5O_t *oh) /* Must be a continuation message that points to chunk containing null message */ HDassert(v < oh->nmesgs); HDassert(cont_msg); + HDassert(((H5O_cont_t *)(cont_msg->native))->chunkno == null_msg->chunkno); /* Initialize information about null message */ null_msg_no = u; @@ -1698,12 +1904,28 @@ H5O_remove_empty_chunks(H5F_t *f, hid_t dxpl_id, H5O_t *oh) */ /* Free memory for chunk image */ - (void)H5FL_BLK_FREE(chunk_image, oh->chunk[null_msg->chunkno].image); + oh->chunk[null_msg->chunkno].image = H5FL_BLK_FREE(chunk_image, oh->chunk[null_msg->chunkno].image); /* Remove chunk from list of chunks */ - if(null_msg->chunkno < (oh->nchunks - 1)) + if(null_msg->chunkno < (oh->nchunks - 1)) { HDmemmove(&oh->chunk[null_msg->chunkno], &oh->chunk[null_msg->chunkno + 1], ((oh->nchunks - 1) - null_msg->chunkno) * sizeof(H5O_chunk_t)); + /* Adjust chunk number for any chunk proxies that are in the cache */ + for(u = null_msg->chunkno; u < (oh->nchunks - 1); u++) { + unsigned chk_proxy_status = 0; /* Metadata cache status of chunk proxy for chunk */ + + /* Check the chunk proxy's status in the metadata cache */ + if(H5AC_get_entry_status(f, oh->chunk[u].addr, &chk_proxy_status) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to check metadata cache status for chunk proxy") + + /* If the entry is in the cache, update its chunk index */ + if(chk_proxy_status & H5AC_ES__IN_CACHE) { + if(H5O_chunk_update_idx(f, dxpl_id, oh, u) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "unable to update index for chunk proxy") + } /* end if */ + } /* end for */ + } /* end if */ + /* Decrement # of chunks */ /* (Don't bother reducing size of chunk array for now -QAK) */ oh->nchunks--; @@ -1737,9 +1959,24 @@ H5O_remove_empty_chunks(H5F_t *f, hid_t dxpl_id, H5O_t *oh) /* Decode current continuation message if necessary */ H5O_LOAD_NATIVE(f, dxpl_id, 0, oh, curr_msg, FAIL) - /* Check for pointer to chunk after deleted chunk */ - if(((H5O_cont_t *)(curr_msg->native))->chunkno > deleted_chunkno) - ((H5O_cont_t *)(curr_msg->native))->chunkno--; + /* Check if the chunkno needs to be set */ + /* (should only occur when the continuation message is first decoded) */ + if(0 == ((H5O_cont_t *)(curr_msg->native))->chunkno) { + unsigned w; /* Local index variable */ + + /* Find chunk that this continuation message points to */ + for(w = 0; w < oh->nchunks; w++) + if(oh->chunk[w].addr == ((H5O_cont_t *)(curr_msg->native))->addr) { + ((H5O_cont_t *)(curr_msg->native))->chunkno = w; + break; + } /* end if */ + HDassert(((H5O_cont_t *)(curr_msg->native))->chunkno > 0); + } /* end if */ + else { + /* Check for pointer to chunk after deleted chunk */ + if(((H5O_cont_t *)(curr_msg->native))->chunkno > deleted_chunkno) + ((H5O_cont_t *)(curr_msg->native))->chunkno--; + } /* end else */ } /* end if */ } /* end for */ @@ -1799,14 +2036,14 @@ H5O_condense_header(H5F_t *f, H5O_t *oh, hid_t dxpl_id) rescan_header = FALSE; /* Scan for messages that can be moved earlier in chunks */ - result = H5O_move_msgs_forward(f, oh, dxpl_id); + result = H5O_move_msgs_forward(f, dxpl_id, oh); if(result < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTPACK, FAIL, "can't move header messages forward") if(result > 0) rescan_header = TRUE; /* Scan for adjacent null messages & merge them */ - result = H5O_merge_null(f, oh, dxpl_id); + result = H5O_merge_null(f, dxpl_id, oh); if(result < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTPACK, FAIL, "can't pack null header messages") if(result > 0) @@ -1843,13 +2080,11 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5O_alloc_shrink_chunk(H5F_t *f, - H5O_t *oh, - hid_t dxpl_id, - unsigned chunkno) +H5O_alloc_shrink_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno) { H5O_chunk_t *chunk = &oh->chunk[chunkno]; /* Chunk to shrink */ - H5O_mesg_t *curr_msg; + H5O_chunk_proxy_t *chk_proxy = NULL; /* Metadata cache proxy for chunk to shrink */ + H5O_mesg_t *curr_msg; /* Current message to examine */ uint8_t *old_image = chunk->image; /* Old address of chunk's image in memory */ size_t old_size = chunk->size; /* Old size of chunk */ size_t new_size = chunk->size - chunk->gap; /* Size of shrunk chunk */ @@ -1866,20 +2101,25 @@ H5O_alloc_shrink_chunk(H5F_t *f, FUNC_ENTER_NOAPI_NOINIT(H5O_alloc_shrink_chunk) /* check args */ - HDassert(f != NULL); + HDassert(f); + HDassert(oh); + + /* Protect chunk */ + if(NULL == (chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header chunk") /* Loop backwards to increase the chance of seeing more null messages at the * end of the chunk. Note that we rely on unsigned u wrapping around at the * end. */ - for (u = oh->nmesgs - 1, curr_msg = &oh->mesg[u]; u < oh->nmesgs; u--, curr_msg--) { - if ((H5O_NULL_ID == curr_msg->type->id) && (chunkno == curr_msg->chunkno)) { + for(u = oh->nmesgs - 1, curr_msg = &oh->mesg[u]; u < oh->nmesgs; u--, curr_msg--) { + if((H5O_NULL_ID == curr_msg->type->id) && (chunkno == curr_msg->chunkno)) { size_t shrink_size = curr_msg->raw_size + sizeof_msghdr; /* Amount to shrink the chunk by */ /* If the current message is not at the end of the chunk, copy the * data after it (except the checksum). */ - if (curr_msg->raw + curr_msg->raw_size + if(curr_msg->raw + curr_msg->raw_size < old_image + new_size - sizeof_chksum) { unsigned v; /* Index */ H5O_mesg_t *curr_msg2; @@ -1890,8 +2130,8 @@ H5O_alloc_shrink_chunk(H5F_t *f, (size_t)(old_image + new_size - sizeof_chksum - src)); /* Update the raw data pointers for messages after this one */ - for (v = 0, curr_msg2 = &oh->mesg[0]; v < oh->nmesgs; v++, curr_msg2++) - if ((chunkno == curr_msg2->chunkno) && (curr_msg2->raw > curr_msg->raw)) + for(v = 0, curr_msg2 = &oh->mesg[0]; v < oh->nmesgs; v++, curr_msg2++) + if((chunkno == curr_msg2->chunkno) && (curr_msg2->raw > curr_msg->raw)) curr_msg2->raw -= shrink_size; } /* end if */ @@ -1902,7 +2142,7 @@ H5O_alloc_shrink_chunk(H5F_t *f, H5O_msg_free_mesg(curr_msg); /* Remove the deleted null message from list of messages */ - if (u < (oh->nmesgs - 1)) + if(u < (oh->nmesgs - 1)) HDmemmove(&oh->mesg[u], &oh->mesg[u+1], ((oh->nmesgs - 1) - u) * sizeof(H5O_mesg_t)); /* Decrement # of messages */ @@ -1928,7 +2168,7 @@ H5O_alloc_shrink_chunk(H5F_t *f, /* update the new chunk size */ new_size += oh->mesg[oh->nmesgs].raw_size + sizeof_msghdr; - } + } /* end if */ /* Check for changing the chunk #0 data size enough to need adjusting the flags */ if(oh->version > H5O_VERSION_1 && chunkno == 0) { @@ -1936,19 +2176,19 @@ H5O_alloc_shrink_chunk(H5F_t *f, size_t orig_prfx_size = 1 << (oh->flags & H5O_HDR_CHUNK0_SIZE); /* Original prefix size */ /* Check for moving to a 1-byte size encoding */ - if (orig_prfx_size > 1 && chunk0_newsize <= 255) { + if(orig_prfx_size > 1 && chunk0_newsize <= 255) { less_prfx_size = orig_prfx_size - 1; new_size_flags = H5O_HDR_CHUNK0_1; adjust_size_flags = TRUE; } /* end if */ /* Check for moving to a 2-byte size encoding */ - else if (orig_prfx_size > 2 && chunk0_newsize <= 65535) { + else if(orig_prfx_size > 2 && chunk0_newsize <= 65535) { less_prfx_size = orig_prfx_size - 2; new_size_flags = H5O_HDR_CHUNK0_2; adjust_size_flags = TRUE; } /* end if */ /* Check for moving to a 4-byte size encoding */ - else if (orig_prfx_size > 4 && chunk0_newsize <= 4294967295) { + else if(orig_prfx_size > 4 && chunk0_newsize <= 4294967295) { less_prfx_size = orig_prfx_size - 4; new_size_flags = H5O_HDR_CHUNK0_4; adjust_size_flags = TRUE; @@ -1973,38 +2213,51 @@ H5O_alloc_shrink_chunk(H5F_t *f, chunk->size = new_size; chunk->image = H5FL_BLK_REALLOC(chunk_image, old_image, chunk->size); chunk->gap = 0; - chunk->dirty = TRUE; - if (NULL == oh->chunk[chunkno].image) + if(NULL == oh->chunk[chunkno].image) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed") /* Spin through existing messages, adjusting them */ - for (u = 0, curr_msg = &oh->mesg[0]; u < oh->nmesgs; u++, curr_msg++) { - if (adjust_size_flags || (chunk->image != old_image)) + for(u = 0, curr_msg = &oh->mesg[0]; u < oh->nmesgs; u++, curr_msg++) { + if(adjust_size_flags || (chunk->image != old_image)) /* Adjust raw addresses for messages in this chunk to reflect new 'image' address */ - if (curr_msg->chunkno == chunkno) + if(curr_msg->chunkno == chunkno) curr_msg->raw = chunk->image - less_prfx_size + (curr_msg->raw - old_image); /* Find continuation message which points to this chunk and adjust chunk's size */ /* (Chunk 0 doesn't have a continuation message that points to it and * its size is directly encoded in the object header) */ - if (chunkno > 0 && (H5O_CONT_ID == curr_msg->type->id) && - (((H5O_cont_t *)(curr_msg->native))->chunkno == chunkno)) { + if(chunkno > 0 && (H5O_CONT_ID == curr_msg->type->id) && + (((H5O_cont_t *)(curr_msg->native))->chunkno == chunkno)) { + H5O_chunk_proxy_t *cont_chk_proxy; /* Chunk that message is in */ + + /* Protect chunk */ + if(NULL == (cont_chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, curr_msg->chunkno))) + HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header chunk") + /* Adjust size of continuation message */ HDassert(((H5O_cont_t *)(curr_msg->native))->size == old_size); ((H5O_cont_t *)(curr_msg->native))->size = chunk->size; /* Flag continuation message as dirty */ curr_msg->dirty = TRUE; + + /* Release chunk, marking it dirty */ + if(H5O_chunk_unprotect(f, dxpl_id, oh, cont_chk_proxy, H5AC__DIRTIED_FLAG) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") } /* end if */ } /* end for */ HDassert(new_size <= old_size); /* Free the unused space in the file */ - if (H5MF_xfree(f, H5FD_MEM_OHDR, dxpl_id, chunk->addr + new_size, (hsize_t)(old_size - new_size)) < 0) + if(H5MF_xfree(f, H5FD_MEM_OHDR, dxpl_id, chunk->addr + new_size, (hsize_t)(old_size - new_size)) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to shrink object header chunk") done: + /* Release chunk, marking it dirty */ + if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, oh, chk_proxy, H5AC__DIRTIED_FLAG) < 0) + HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk") + FUNC_LEAVE_NOAPI(ret_value) } /* H5O_alloc_shrink_chunk() */ |