diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2004-09-27 18:06:11 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2004-09-27 18:06:11 (GMT) |
commit | 3f2fb8bed1bc01382e01f8788f3e44799fa54b61 (patch) | |
tree | b791383bfa41e9152e734097f9d0e860317b7919 | |
parent | 643811be029c5a8d483c7fa28bf3cf063192117b (diff) | |
download | hdf5-3f2fb8bed1bc01382e01f8788f3e44799fa54b61.zip hdf5-3f2fb8bed1bc01382e01f8788f3e44799fa54b61.tar.gz hdf5-3f2fb8bed1bc01382e01f8788f3e44799fa54b61.tar.bz2 |
[svn-r9322] Purpose:
Bug fix
Description:
Fix situation where deleting a chunked datasets with B-tree nodes that
weren't in the metadata cache would die with a core dump.
Platforms tested:
FreeBSD 4.10 (sleipnir)
Linux 2.4 (heping)
Solaris 2.7 (arabica)
-rw-r--r-- | src/H5Distore.c | 170 | ||||
-rw-r--r-- | test/Makefile.in | 3 | ||||
-rw-r--r-- | test/unlink.c | 97 |
3 files changed, 203 insertions, 67 deletions
diff --git a/src/H5Distore.c b/src/H5Distore.c index 6135e83..a1bc302 100644 --- a/src/H5Distore.c +++ b/src/H5Distore.c @@ -144,6 +144,7 @@ typedef struct H5D_istore_ud1_t { /* Private prototypes */ static void *H5D_istore_chunk_alloc(size_t size, const H5O_pline_t *pline); static void *H5D_istore_chunk_xfree(void *chk, const H5O_pline_t *pline); +static herr_t H5D_istore_shared_create (H5F_t *f, H5O_layout_t *layout); static herr_t H5D_istore_shared_free (void *page); /* B-tree iterator callbacks */ @@ -927,9 +928,6 @@ H5D_istore_iter_dump (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, hadd herr_t H5D_istore_init (H5F_t *f, H5D_t *dset) { - H5D_istore_ud1_t udata; - H5B_shared_t *shared; /* Shared B-tree node info */ - size_t u; /* Local index variable */ H5D_rdcc_t *rdcc = &(dset->cache.chunk); herr_t ret_value=SUCCEED; /* Return value */ @@ -943,36 +941,9 @@ H5D_istore_init (H5F_t *f, H5D_t *dset) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed"); } /* end if */ - /* Initialize "user" data for B-tree callbacks, etc. */ - udata.mesg = &dset->layout; - - /* Allocate space for the shared structure */ - if(NULL==(shared=H5FL_MALLOC(H5B_shared_t))) - HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for shared B-tree info") - - /* Set up the "global" information for this file's groups */ - shared->type= H5B_ISTORE; - shared->two_k=2*H5F_KVALUE(f,H5B_ISTORE); - shared->sizeof_rkey = H5D_istore_sizeof_rkey(f, &udata); - assert(shared->sizeof_rkey); - shared->sizeof_rnode = H5B_nodesize(f, shared, &shared->sizeof_keys); - assert(shared->sizeof_rnode); - if(NULL==(shared->page=H5FL_BLK_MALLOC(chunk_page,shared->sizeof_rnode))) - HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree page") -#ifdef H5_USING_PURIFY -HDmemset(shared->page,0,shared->sizeof_rnode); -#endif /* H5_USING_PURIFY */ - if(NULL==(shared->nkey=H5FL_SEQ_MALLOC(size_t,(size_t)(2*H5F_KVALUE(f,H5B_ISTORE)+1)))) - HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree page") - - /* Initialize the offsets into the native key buffer */ - for(u=0; u<(2*H5F_KVALUE(f,H5B_ISTORE)+1); u++) - shared->nkey[u]=u*H5B_ISTORE->sizeof_nkey; - - /* Make shared B-tree info reference counted */ - if(NULL==(dset->layout.u.chunk.btree_shared=H5RC_create(shared,H5D_istore_shared_free))) - HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't create ref-count wrapper for shared B-tree info") - + /* Allocate the shared structure */ + if(H5D_istore_shared_create(f, &dset->layout)<0) + HGOTO_ERROR (H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") done: FUNC_LEAVE_NOAPI(ret_value); } /* end H5D_istore_init() */ @@ -1277,6 +1248,65 @@ done: /*------------------------------------------------------------------------- + * Function: H5D_istore_shared_create + * + * Purpose: Create & initialize B-tree shared info + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Monday, September 27, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D_istore_shared_create (H5F_t *f, H5O_layout_t *layout) +{ + H5D_istore_ud1_t udata; + H5B_shared_t *shared; /* Shared B-tree node info */ + size_t u; /* Local index variable */ + herr_t ret_value=SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOINIT(H5D_istore_shared_create) + + /* Initialize "user" data for B-tree callbacks, etc. */ + udata.mesg = layout; + + /* Allocate space for the shared structure */ + if(NULL==(shared=H5FL_MALLOC(H5B_shared_t))) + HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for shared B-tree info") + + /* Set up the "global" information for this file's groups */ + shared->type= H5B_ISTORE; + shared->two_k=2*H5F_KVALUE(f,H5B_ISTORE); + shared->sizeof_rkey = H5D_istore_sizeof_rkey(f, &udata); + assert(shared->sizeof_rkey); + shared->sizeof_rnode = H5B_nodesize(f, shared, &shared->sizeof_keys); + assert(shared->sizeof_rnode); + if(NULL==(shared->page=H5FL_BLK_MALLOC(chunk_page,shared->sizeof_rnode))) + HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree page") +#ifdef H5_USING_PURIFY +HDmemset(shared->page,0,shared->sizeof_rnode); +#endif /* H5_USING_PURIFY */ + if(NULL==(shared->nkey=H5FL_SEQ_MALLOC(size_t,(size_t)(2*H5F_KVALUE(f,H5B_ISTORE)+1)))) + HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree page") + + /* Initialize the offsets into the native key buffer */ + for(u=0; u<(2*H5F_KVALUE(f,H5B_ISTORE)+1); u++) + shared->nkey[u]=u*H5B_ISTORE->sizeof_nkey; + + /* Make shared B-tree info reference counted */ + if(NULL==(layout->u.chunk.btree_shared=H5RC_create(shared,H5D_istore_shared_free))) + HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't create ref-count wrapper for shared B-tree info") + +done: + FUNC_LEAVE_NOAPI(ret_value); +} /* end H5D_istore_shared_create() */ + + +/*------------------------------------------------------------------------- * Function: H5D_istore_shared_free * * Purpose: Free B-tree shared info @@ -1601,7 +1631,7 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, } /* end if */ #ifdef H5_USING_PURIFY else - HDmemset(ret_value,0,size); + HDmemset(chunk,0,chunk_size); #endif /* H5_USING_PURIFY */ rdcc->ninits++; } /* end else */ @@ -2014,9 +2044,7 @@ H5D_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, { H5D_istore_ud1_t udata; /*B-tree pass-through */ haddr_t chunk_addr; /* Chunk address on disk */ -#ifndef NDEBUG size_t u; /* Local index variables */ -#endif ssize_t ret_value; /* Return value */ FUNC_ENTER_NOAPI(H5D_istore_writevv, FAIL); @@ -2078,16 +2106,38 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a uint8_t *chunk; /* Pointer to cached chunk in memory */ unsigned idx_hint=0; /* Cache index hint */ ssize_t naccessed; /* Number of bytes accessed in chunk */ + size_t total_bytes; /* Total # of bytes accessed on disk & memory */ hbool_t relax; /* Whether whole chunk is selected */ /* * Lock the chunk, copy from application to chunk, then unlock the * chunk. */ +#ifdef OLD_WAY +/* Note that this is technically OK, since eventually all the data in the chunk + * will be overwritten. However, it seems risky and a better approach would + * be to lock the chunk in the dataset I/O routine (setting the relax flag + * appropriately) and then unlock it after all the I/O the chunk was finished. -QAK + */ if(chunk_max_nseq==1 && chunk_len_arr[0] == dset->layout.u.chunk.size) relax = TRUE; else relax = FALSE; +#else /* OLD_WAY */ + relax=TRUE; + total_bytes=0; + for(u=*chunk_curr_seq; u<chunk_max_nseq; u++) + total_bytes+=chunk_len_arr[u]; + if(total_bytes!=dset->layout.u.chunk.size) + relax=FALSE; + if(relax) { + total_bytes=0; + for(u=*mem_curr_seq; u<mem_max_nseq; u++) + total_bytes+=mem_len_arr[u]; + if(total_bytes!=dset->layout.u.chunk.size) + relax=FALSE; + } /* end if */ +#endif /* OLD_WAY */ if (NULL==(chunk=H5D_istore_lock(f, dxpl_cache, dxpl_id, dset, store, &udata, relax, &idx_hint))) @@ -3117,22 +3167,32 @@ done: *------------------------------------------------------------------------- */ herr_t -H5D_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout) +H5D_istore_delete(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout) { - H5D_istore_ud1_t udata; /* User data for B-tree iterator call */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5D_istore_delete, FAIL); /* Check if the B-tree has been created in the file */ if(H5F_addr_defined(layout->u.chunk.addr)) { + H5O_layout_t tmp_layout=*layout;/* Local copy of layout info */ + H5D_istore_ud1_t udata; /* User data for B-tree iterator call */ + /* Set up user data for B-tree deletion */ HDmemset(&udata, 0, sizeof udata); - udata.mesg = layout; + udata.mesg = &tmp_layout; + + /* Allocate the shared structure */ + if(H5D_istore_shared_create(f, &tmp_layout)<0) + HGOTO_ERROR (H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") /* Delete entire B-tree */ - if(H5B_delete(f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, &udata)<0) + if(H5B_delete(f, dxpl_id, H5B_ISTORE, tmp_layout.u.chunk.addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_CANTDELETE, 0, "unable to delete chunk B-tree"); + + /* Free the raw B-tree node buffer */ + if(H5RC_DEC(tmp_layout.u.chunk.btree_shared)<0) + HGOTO_ERROR (H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page"); } /* end if */ done: @@ -3366,39 +3426,17 @@ H5D_istore_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int inden { H5O_layout_t layout; H5D_istore_ud1_t udata; - H5B_shared_t *shared; /* Shared B-tree node info */ - size_t u; /* Local index variable */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5D_istore_debug,FAIL); - HDmemset (&udata, 0, sizeof udata); layout.u.chunk.ndims = ndims; + HDmemset (&udata, 0, sizeof udata); udata.mesg = &layout; - /* Allocate space for the shared structure */ - if(NULL==(shared=H5FL_MALLOC(H5B_shared_t))) - HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for shared B-tree info") - - /* Set up the "global" information for this file's groups */ - shared->type= H5B_ISTORE; - shared->two_k=2*H5F_KVALUE(f,H5B_ISTORE); - shared->sizeof_rkey = H5D_istore_sizeof_rkey(f, &udata); - assert(shared->sizeof_rkey); - shared->sizeof_rnode = H5B_nodesize(f, shared, &shared->sizeof_keys); - assert(shared->sizeof_rnode); - if(NULL==(shared->page=H5FL_BLK_MALLOC(chunk_page,shared->sizeof_rnode))) - HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree page") - if(NULL==(shared->nkey=H5FL_SEQ_MALLOC(size_t,(size_t)(2*H5F_KVALUE(f,H5B_ISTORE)+1)))) - HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for B-tree page") - - /* Initialize the offsets into the native key buffer */ - for(u=0; u<(2*H5F_KVALUE(f,H5B_ISTORE)+1); u++) - shared->nkey[u]=u*H5B_ISTORE->sizeof_nkey; - - /* Make shared B-tree info reference counted */ - if(NULL==(layout.u.chunk.btree_shared=H5RC_create(shared,H5D_istore_shared_free))) - HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't create ref-count wrapper for shared B-tree info") + /* Allocate the shared structure */ + if(H5D_istore_shared_create(f, &layout)<0) + HGOTO_ERROR (H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info") H5B_debug (f, dxpl_id, addr, stream, indent, fwidth, H5B_ISTORE, &udata); diff --git a/test/Makefile.in b/test/Makefile.in index ddc3ea7..5a33f2d 100644 --- a/test/Makefile.in +++ b/test/Makefile.in @@ -69,7 +69,8 @@ MOSTLYCLEAN=cmpd_dset.h5 compact_dataset.h5 dataset.h5 extend.h5 istore.h5 \ getname.h5 getname[1-3].h5 sec2_file.h5 \ family_file000[0-3][0-9].h5 multi_file-[rs].h5 core_file \ new_move_[ab].h5 ntypes.h5 dangle.h5 error_test.h5 err_compat.h5 \ - dtransform.h5 test_filters.h5 get_file_name.h5 tstint[1-2].h5 + dtransform.h5 test_filters.h5 get_file_name.h5 tstint[1-2].h5 \ + unlink_chunked.h5 CLEAN=$(TIMINGS) diff --git a/test/unlink.c b/test/unlink.c index 59e0633..8e6a9ac 100644 --- a/test/unlink.c +++ b/test/unlink.c @@ -30,6 +30,7 @@ const char *FILENAME[] = { "filespace", "slashes", "resurrect", + "unlink_chunked", NULL }; @@ -1852,6 +1853,7 @@ test_resurrect_dataset(void) /* Close things */ if(H5Dclose(d)<0) TEST_ERROR; if(H5Fclose(f)<0) TEST_ERROR; + if(H5Pclose(fapl)<0) TEST_ERROR; PASSED(); return 0; @@ -1861,12 +1863,104 @@ error: H5Sclose(s); H5Dclose(d); H5Fclose(f); + H5Pclose(fapl); } H5E_END_TRY; return 1; } /* end test_resurrect_dataset() */ /*------------------------------------------------------------------------- + * Function: test_unlink_chunked_dataset + * + * Purpose: Tests deleting a chunked dataset + * + * Return: Success: 0 + * Failure: number of errors + * + * Programmer: Quincey Koziol + * Monday, September 27, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +static int +test_unlink_chunked_dataset(void) +{ + hid_t fapl_id=-1; + hid_t file_id=-1; + hid_t dset_id=-1; + hid_t space_id=-1; + hid_t dcpl_id=-1; + hsize_t dims[FILESPACE_NDIMS]={FILESPACE_DIM0,FILESPACE_DIM1,FILESPACE_DIM2}; + hsize_t max_dims[FILESPACE_NDIMS]={H5S_UNLIMITED,H5S_UNLIMITED,H5S_UNLIMITED}; + hsize_t chunk_dims[FILESPACE_NDIMS]={FILESPACE_CHUNK0,FILESPACE_CHUNK1,FILESPACE_CHUNK2}; + char filename[1024]; + + TESTING("Unlinking chunked dataset"); + + /* Create file */ + fapl_id = h5_fileaccess(); + h5_fixname(FILENAME[7], fapl_id, filename, sizeof filename); + + /* Create the file */ + if((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id))<0) TEST_ERROR; + + /* Create the dataspace */ + if((space_id=H5Screate_simple(FILESPACE_NDIMS,dims,max_dims))<0) TEST_ERROR; + + /* Create the dataset creation filter */ + if((dcpl_id=H5Pcreate(H5P_DATASET_CREATE))<0) TEST_ERROR; + + /* Set to chunked storage */ + if(H5Pset_chunk(dcpl_id,FILESPACE_NDIMS,chunk_dims)<0) TEST_ERROR; + + /* Set to early space allocation */ + if(H5Pset_alloc_time(dcpl_id,H5D_ALLOC_TIME_EARLY)<0) TEST_ERROR; + + /* Create the dataset */ + if((dset_id = H5Dcreate(file_id,DATASETNAME,H5T_NATIVE_INT,space_id,dcpl_id))<0) TEST_ERROR; + + /* Close the dataspace */ + if(H5Sclose(space_id)<0) TEST_ERROR; + + /* Close the dataset creation property list */ + if(H5Pclose(dcpl_id)<0) TEST_ERROR; + + /* Close the dataset */ + if(H5Dclose(dset_id)<0) TEST_ERROR; + + /* Close the file */ + if(H5Fclose(file_id)<0) TEST_ERROR; + + /* Re-open the file */ + if((file_id = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT))<0) TEST_ERROR; + + /* Delete the dataset */ + if(H5Gunlink(file_id, DATASETNAME)<0) TEST_ERROR; + + /* Close the file */ + if(H5Fclose(file_id)<0) TEST_ERROR; + + /* Close the file access property list */ + if(H5Pclose(fapl_id)<0) TEST_ERROR; + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(dcpl_id); + H5Sclose(space_id); + H5Dclose(dset_id); + H5Fclose(file_id); + H5Pclose(fapl_id); + } H5E_END_TRY; + return 1; +} /* end test_unlink_chunked_dataset() */ + + +/*------------------------------------------------------------------------- * Function: main * * Purpose: Test H5Gunlink() @@ -1942,6 +2036,9 @@ main(void) /* Test "resurrecting" objects */ nerrors += test_resurrect_dataset(); + /* Test unlinking chunked datasets */ + nerrors += test_unlink_chunked_dataset(); + /* Close */ if (H5Fclose(file)<0) TEST_ERROR; if (nerrors) { |