diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/H5D.c | 34 | ||||
-rw-r--r-- | src/H5Dio.c | 62 | ||||
-rw-r--r-- | src/H5Distore.c | 234 | ||||
-rw-r--r-- | src/H5Dprivate.h | 5 | ||||
-rw-r--r-- | src/H5Dseq.c | 4 | ||||
-rw-r--r-- | src/H5F.c | 6 | ||||
-rw-r--r-- | src/H5Fistore.c | 234 | ||||
-rw-r--r-- | src/H5Fpkg.h | 5 | ||||
-rw-r--r-- | src/H5Fprivate.h | 4 | ||||
-rw-r--r-- | src/H5Fseq.c | 4 | ||||
-rw-r--r-- | src/H5Oprivate.h | 8 | ||||
-rw-r--r-- | src/H5Pdcpl.c | 4 | ||||
-rw-r--r-- | src/H5Z.c | 1 |
13 files changed, 426 insertions, 179 deletions
@@ -199,7 +199,7 @@ H5D_init_interface(void) H5P_genplist_t *def_dcpl; /* Default Dataset Creation Property list */ size_t nprops; /* Number of properties */ herr_t ret_value = SUCCEED; /* Return value */ - + FUNC_ENTER_NOAPI_NOINIT(H5D_init_interface) /* Initialize the atom group for the dataset IDs */ @@ -2048,7 +2048,7 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space assert (H5I_GENPROP_LST==H5I_get_type(dxpl_id)); /* Check if the filters in the DCPL can be applied to this dataset */ - if (H5Z_can_apply(dcpl_id,type_id)<0) + if(H5Z_can_apply(dcpl_id,type_id)<0) HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, NULL, "I/O filters can't operate on this dataset") /* Get the dataset's datatype */ @@ -2056,15 +2056,15 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a datatype") /* Check if the datatype is "sensible" for use in a dataset */ - if (H5T_is_sensible(type)!=TRUE) + if(H5T_is_sensible(type)!=TRUE) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "datatype is not sensible") /* Check if the datatype is/contains a VL-type */ - if (H5T_detect_class(type, H5T_VLEN)) + if(H5T_detect_class(type, H5T_VLEN)) has_vl_type=TRUE; /* Initialize the dataset object */ - if (NULL == (new_dset = H5D_new(dcpl_id,TRUE,has_vl_type))) + if(NULL == (new_dset = H5D_new(dcpl_id,TRUE,has_vl_type))) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed") /* Make the "set local" filter callbacks for this dataset */ @@ -2072,7 +2072,7 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to set local filter parameters") /* What file is the dataset being added to? */ - if (NULL==(file=H5G_insertion_file(loc, name, dxpl_id))) + if(NULL==(file=H5G_insertion_file(loc, name, dxpl_id))) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to locate insertion point") /* Copy datatype for dataset */ @@ -2080,7 +2080,7 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, NULL, "can't copy datatype") /* Mark any datatypes as being on disk now */ - if (H5T_set_loc(new_dset->type, file, H5T_LOC_DISK)<0) + if(H5T_set_loc(new_dset->type, file, H5T_LOC_DISK)<0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "invalid datatype location") /* Copy dataspace for dataset */ @@ -2216,7 +2216,7 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space * Chunked storage allows any type of data space extension, so we * don't even bother checking. */ - if(chunk_ndims != ndims) + if(chunk_ndims != (unsigned)ndims) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "dimensionality of chunks doesn't match the data space") if (new_dset->efl.nused>0) HGOTO_ERROR (H5E_DATASET, H5E_BADVALUE, NULL, "external storage not supported with chunked layout") @@ -2400,7 +2400,7 @@ H5D_open(H5G_entry_t *ent, hid_t dxpl_id) /* Clear any errors from H5FO_opened() */ H5E_clear(NULL); - + /* Open the dataset object */ if ((dataset=H5D_open_oid(ent, dxpl_id)) ==NULL) HGOTO_ERROR(H5E_DATASET, H5E_NOTFOUND, FAIL, "not found") @@ -2802,6 +2802,11 @@ H5D_extend (H5D_t *dataset, const hsize_t *size, hid_t dxpl_id) if (H5S_modify (&(dataset->ent), space, TRUE, dxpl_id)<0) HGOTO_ERROR (H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update file with new dataspace") + /* Update the index values for the cached chunks for this dataset */ + if(H5D_CHUNKED == dataset->layout.type) + if(H5F_istore_update_cache(dataset->ent.file, dxpl_id, &dataset->layout, space) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices") + /* Allocate space for the new parts of the dataset, if appropriate */ if(dataset->alloc_time==H5D_ALLOC_TIME_EARLY) if (H5D_alloc_storage(dataset->ent.file, dxpl_id, dataset, H5D_ALLOC_EXTEND, TRUE, FALSE)<0) @@ -3208,7 +3213,6 @@ done: static hsize_t H5D_get_storage_size(const H5D_t *dset, hid_t dxpl_id) { - unsigned u; /* Index variable */ hsize_t ret_value; FUNC_ENTER_NOAPI(H5D_get_storage_size, 0) @@ -3218,8 +3222,7 @@ H5D_get_storage_size(const H5D_t *dset, hid_t dxpl_id) if(dset->layout.u.chunk.addr == HADDR_UNDEF) ret_value=0; else - ret_value = H5F_istore_allocated(dset->ent.file, dxpl_id, dset->layout.u.chunk.ndims, - dset->layout.u.chunk.addr); + ret_value = H5F_istore_allocated(dset->ent.file, dxpl_id, &dset->layout); break; case H5D_CONTIGUOUS: @@ -3419,7 +3422,7 @@ H5Diterate(void *buf, hid_t type_id, hid_t space_id, H5D_operator_t op, HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid datatype") if (NULL == (space = H5I_object_verify(space_id, H5I_DATASPACE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid dataspace") - + ret_value=H5S_select_iterate(buf,type_id,space,op,operator_data); done: @@ -3803,6 +3806,11 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id) if(H5S_modify(&(dset->ent), space, TRUE, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update file with new dataspace") + /* Update the index values for the cached chunks for this dataset */ + if(H5D_CHUNKED == dset->layout.type) + if(H5F_istore_update_cache(dset->ent.file, dxpl_id, &dset->layout, space) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update cached chunk indices") + /* Allocate space for the new parts of the dataset, if appropriate */ if(expand && dset->alloc_time==H5D_ALLOC_TIME_EARLY) if(H5D_alloc_storage(dset->ent.file, dxpl_id, dset, H5D_ALLOC_EXTEND, TRUE, FALSE) < 0) diff --git a/src/H5Dio.c b/src/H5Dio.c index a465413..2d6d016 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -64,7 +64,7 @@ typedef struct fm_map { unsigned m_ndims; /* Number of dimensions for memory dataspace */ hsize_t chunks[H5O_LAYOUT_NDIMS]; /* Number of chunks in each dimension */ hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in each dimension */ - hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */ + hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */ H5O_layout_t *layout; /* Dataset layout information*/ H5S_sel_type msel_type; /* Selection type in memory */ } fm_map; @@ -130,7 +130,6 @@ H5FL_BLK_DEFINE(type_conv); /* Declare a free list to manage the H5D_chunk_info_t struct */ H5FL_DEFINE_STATIC(H5D_chunk_info_t); - /*-------------------------------------------------------------------------- NAME @@ -175,7 +174,7 @@ H5Dfill(const void *fill, hid_t fill_type_id, void *buf, hid_t buf_type_id, hid_ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "not a datatype") if (NULL == (buf_type=H5I_object_verify(buf_type_id, H5I_DATATYPE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "not a datatype") - + /* Fill the selection in the memory buffer */ if(H5D_fill(fill,fill_type,buf,buf_type,space, H5AC_dxpl_id)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "filling selection failed") @@ -308,7 +307,7 @@ H5D_get_dxpl_cache_real(hid_t dxpl_id, H5D_dxpl_cache_t *cache) /* Get the dataset transfer property list */ if (NULL == (dx_plist = H5I_object(dxpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list") - + /* Get maximum temporary buffer size */ if(H5P_get(dx_plist, H5D_XFER_MAX_TEMP_BUF_NAME, &cache->max_temp_buf)<0) HGOTO_ERROR (H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve maximum temporary buffer size") @@ -478,7 +477,7 @@ H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, if(H5S_SELECT_VALID(file_space)!=TRUE) HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent") } - + /* Get the default dataset transfer property list if the user didn't provide one */ if (H5P_DEFAULT == plist_id) plist_id= H5P_DATASET_XFER_DEFAULT; @@ -1071,14 +1070,13 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, hsize_t smine_start; /*strip mine start loc */ hsize_t n, smine_nelmts; /*elements per strip */ herr_t ret_value = SUCCEED; /*return value */ - + FUNC_ENTER_NOAPI_NOINIT(H5D_contig_read) - + /* * If there is no type conversion then read directly into the * application's buffer. This saves at least one mem-to-mem copy. */ - if ( H5Z_xform_noop(dxpl_cache->data_xform_prop) && H5T_path_noop(tpath)) { #ifdef H5S_DEBUG H5_timer_begin(&timer); @@ -1219,7 +1217,6 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, */ if (H5T_convert(tpath, src_id, dst_id, smine_nelmts, 0, 0, tconv_buf, bkg_buf, dxpl_id)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "data type conversion failed") - /* Do the data transform after the conversion (since we're using type mem_type) */ if(!H5Z_xform_noop(dxpl_cache->data_xform_prop)) @@ -1229,7 +1226,6 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, } /* end of LA additions */ - /* * Scatter the data into memory. */ @@ -1238,8 +1234,6 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, #endif status = H5S_select_mscat(tconv_buf, mem_space, &mem_iter, smine_nelmts, dxpl_cache, buf/*out*/); - - #ifdef H5S_DEBUG H5_timer_end(&(sconv->stats[1].scat_timer), &timer); sconv->stats[1].scat_nbytes += smine_nelmts * dst_type_size; @@ -1321,12 +1315,11 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5 herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_contig_write) - + /* * If there is no type conversion then write directly from the * application's buffer. This saves at least one mem-to-mem copy. */ - if ( H5Z_xform_noop(dxpl_cache->data_xform_prop) && H5T_path_noop(tpath)) { #ifdef H5S_DEBUG H5_timer_begin(&timer); @@ -1335,7 +1328,6 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5 status = (sconv->write)(dataset->ent.file, &(dataset->layout), &dataset->dcpl_cache, (H5D_storage_t *)&(dataset->efl), (size_t)nelmts, H5T_get_size(dataset->type), file_space, mem_space, dxpl_cache, dxpl_id, buf); - #ifdef H5S_DEBUG H5_timer_end(&(sconv->stats[0].write_timer), &timer); sconv->stats[0].write_nbytes += nelmts * H5T_get_size(mem_type); @@ -1456,8 +1448,6 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5 HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "file gather failed") } /* end if */ - - /* * Perform data type conversion. */ @@ -1472,19 +1462,15 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5 } /* End of LA modifications */ - /* * Scatter the data out to the file. */ #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif - status = H5S_select_fscat(dataset->ent.file, &(dataset->layout), &dataset->dcpl_cache, (H5D_storage_t *)&(dataset->efl), file_space, &file_iter, smine_nelmts, dxpl_cache, dxpl_id, tconv_buf); - - #ifdef H5S_DEBUG H5_timer_end(&(sconv->stats[0].scat_timer), &timer); sconv->stats[0].scat_nbytes += smine_nelmts * dst_type_size; @@ -1534,7 +1520,6 @@ done: * Leon Arber: 4/20/04 * Added support for data transforms. * - *------------------------------------------------------------------------- */ /* ARGSUSED */ @@ -1571,7 +1556,7 @@ UNUSED uint8_t *bkg_buf = NULL; /*background buffer */ H5D_storage_t store; /*union of EFL and chunk pointer in file space */ herr_t ret_value = SUCCEED; /*return value */ - + FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_read) /* Map elements between file and memory for each chunk*/ @@ -1602,7 +1587,8 @@ UNUSED chunk_info=chunk_node->data; /* Pass in chunk's coordinates in a union. */ - store.chunk_coords = chunk_info->coords; + store.chunk.offset = chunk_info->coords; + store.chunk.index = chunk_info->index; /* Perform the actual read operation */ status = (sconv->read)(dataset->ent.file, &(dataset->layout), @@ -1701,7 +1687,8 @@ UNUSED bkg_iter_init=1; /*file selection iteration info has been initialized */ /* Pass in chunk's coordinates in a union*/ - store.chunk_coords = chunk_info->coords; + store.chunk.offset = chunk_info->coords; + store.chunk.index = chunk_info->index; for (smine_start=0; smine_start<chunk_info->chunk_points; smine_start+=smine_nelmts) { /* Go figure out how many elements to read from the file */ @@ -1756,10 +1743,8 @@ UNUSED /* Do the data transform after the conversion (since we're using type mem_type) */ if(!H5Z_xform_noop(dxpl_cache->data_xform_prop)) - { if( H5Z_xform_eval(dxpl_cache->data_xform_prop, tconv_buf, smine_nelmts, mem_type) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Error performing data transform") - } /* * Scatter the data into memory. @@ -1804,7 +1789,7 @@ done: if(file_iter_init) { if(H5S_SELECT_ITER_RELEASE(&file_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") - } /* end if */ + } /* end if */ if(mem_iter_init) { if(H5S_SELECT_ITER_RELEASE(&mem_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") @@ -1932,7 +1917,8 @@ nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, chunk_info=chunk_node->data; /* Pass in chunk's coordinates in a union. */ - store.chunk_coords = chunk_info->coords; + store.chunk.offset = chunk_info->coords; + store.chunk.index = chunk_info->index; /* Perform the actual write operation */ status = (sconv->write)(dataset->ent.file, &(dataset->layout), @@ -2051,7 +2037,8 @@ nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, bkg_iter_init=1; /*file selection iteration info has been initialized */ /*pass in chunk's coordinates in a union*/ - store.chunk_coords = chunk_info->coords; + store.chunk.offset = chunk_info->coords; + store.chunk.index = chunk_info->index; for (smine_start=0; smine_start<chunk_info->chunk_points; smine_start+=smine_nelmts) { /* Go figure out how many elements to read from the file */ @@ -2093,8 +2080,6 @@ nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, if (n!=smine_nelmts) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "file gather failed") } /* end if */ - - /* * Perform data type conversion. @@ -2105,10 +2090,8 @@ nelmts, H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, /* Do the data transform after the type conversion (since we're using dataset->type) */ if(!H5Z_xform_noop(dxpl_cache->data_xform_prop)) - { if( H5Z_xform_eval(dxpl_cache->data_xform_prop, tconv_buf, smine_nelmts, dataset->type) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Error performing data transform") - } /* * Scatter the data out to the file. @@ -2302,7 +2285,6 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp hbool_t iter_init=0; /* Selection iteration info has been initialized */ unsigned f_ndims; /* The number of dimensions of the file's dataspace */ int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */ - hsize_t nchunks, last_nchunks; /* Number of chunks in dataset */ H5TB_NODE *curr_node; /* Current node in TBBT */ H5S_sel_type fsel_type; /* Selection type on disk */ char bogus; /* "bogus" buffer to pass to selection iterator */ @@ -2358,22 +2340,12 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality") /* Decide the number of chunks in each dimension*/ - last_nchunks=0; - nchunks=1; for(u=0; u<f_ndims; u++) { /* Keep the size of the chunk dimensions as hsize_t for various routines */ fm->chunk_dim[u]=fm->layout->u.chunk.dim[u]; /* Round up to the next integer # of chunks, to accomodate partial chunks */ fm->chunks[u] = ((fm->f_dims[u]+dataset->layout.u.chunk.dim[u])-1) / dataset->layout.u.chunk.dim[u]; - - /* Track total number of chunks in dataset */ - nchunks *= fm->chunks[u]; - - /* Check if the chunk indices will overflow */ - if(nchunks<last_nchunks) - HGOTO_ERROR (H5E_DATASET, H5E_OVERFLOW, FAIL, "too many chunks") - last_nchunks=nchunks; } /* end for */ /* Compute the "down" size of 'chunks' information */ diff --git a/src/H5Distore.c b/src/H5Distore.c index 2434cc2..a5aeb70 100644 --- a/src/H5Distore.c +++ b/src/H5Distore.c @@ -1346,7 +1346,7 @@ static void * H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, const H5O_layout_t *layout, const H5O_pline_t *pline, const H5O_fill_t *fill, H5D_fill_time_t fill_time, H5F_istore_ud1_t *udata, - const hssize_t offset[], hbool_t relax, + const H5D_storage_t *store, hbool_t relax, unsigned *idx_hint/*in,out*/) { int idx=0; /*hash index number */ @@ -1369,18 +1369,14 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con /* Search for the chunk in the cache */ if (rdcc->nslots>0) { - for (u=0, temp_idx=0; u<layout->u.chunk.ndims; u++) { - temp_idx += offset[u]; - temp_idx *= layout->u.chunk.dim[u]; - } - temp_idx += (hsize_t)(layout->u.chunk.addr); + temp_idx = store->chunk.index + (hsize_t)(layout->u.chunk.addr); idx=H5F_HASH(f,temp_idx); ent = rdcc->slot[idx]; if (ent && layout->u.chunk.ndims==ent->layout.u.chunk.ndims && H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { for (u=0, found=TRUE; u<ent->layout.u.chunk.ndims; u++) { - if (offset[u]!=ent->offset[u]) { + if (store->chunk.offset[u]!=ent->offset[u]) { found = FALSE; break; } @@ -1423,7 +1419,7 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con * Not in the cache. Read it from the file and count this as a miss * if it's in the file or an init if it isn't. */ - chunk_addr = H5F_istore_get_addr(f, dxpl_id, layout, offset, udata); + chunk_addr = H5F_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, udata); } /* end else */ if (H5F_addr_defined(chunk_addr)) { @@ -1508,7 +1504,7 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con H5O_copy(H5O_LAYOUT_ID, layout, &ent->layout); H5O_copy(H5O_PLINE_ID, pline, &ent->pline); for (u=0; u<layout->u.chunk.ndims; u++) - ent->offset[u] = offset[u]; + ent->offset[u] = store->chunk.offset[u]; ent->rd_count = chunk_size; ent->wr_count = chunk_size; ent->chunk = chunk; @@ -1612,7 +1608,7 @@ done: static herr_t H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, const H5O_layout_t *layout, const H5O_pline_t *pline, hbool_t dirty, - const hssize_t offset[], unsigned *idx_hint, uint8_t *chunk, size_t naccessed) + const H5D_storage_t *store, unsigned idx_hint, uint8_t *chunk, size_t naccessed) { H5F_rdcc_t *rdcc = &(f->shared->rdcc); H5F_rdcc_ent_t *ent = NULL; @@ -1621,13 +1617,13 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5F_istore_unlock); - if (UINT_MAX==*idx_hint) { + if (UINT_MAX==idx_hint) { /*not in cache*/ } else { - assert(*idx_hint<rdcc->nslots); - assert(rdcc->slot[*idx_hint]); - assert(rdcc->slot[*idx_hint]->chunk==chunk); - found = *idx_hint; + assert(idx_hint<rdcc->nslots); + assert(rdcc->slot[idx_hint]); + assert(rdcc->slot[idx_hint]->chunk==chunk); + found = idx_hint; } if (found<0) { @@ -1645,7 +1641,7 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H5O_copy (H5O_LAYOUT_ID, layout, &x.layout); H5O_copy (H5O_PLINE_ID, pline, &x.pline); for (u=0; u<layout->u.chunk.ndims; u++) - x.offset[u] = offset[u]; + x.offset[u] = store->chunk.offset[u]; assert(layout->u.chunk.size>0); H5_ASSIGN_OVERFLOW(x.chunk_size,layout->u.chunk.size,hsize_t,size_t); x.alloc_size = x.chunk_size; @@ -1692,7 +1688,7 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, */ ssize_t H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - const H5O_layout_t *layout, const struct H5D_dcpl_cache_t *dcpl_cache, hssize_t chunk_coords[], + const H5O_layout_t *layout, const struct H5D_dcpl_cache_t *dcpl_cache, const H5D_storage_t *store, size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], void *buf) @@ -1718,7 +1714,7 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp #ifndef NDEBUG for (u=0; u<layout->u.chunk.ndims; u++) - assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */ + assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */ #endif /* Get the address of this chunk on disk */ @@ -1727,7 +1723,7 @@ HDfprintf(stderr,"%s: chunk_coords={",FUNC); for(u=0; u<layout->u.chunk.ndims; u++) HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n")); #endif /* QAK */ - chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata); + chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, &udata); #ifdef QAK HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size); HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]); @@ -1757,7 +1753,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a * chunk. */ if (NULL==(chunk=H5F_istore_lock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, &dcpl_cache->fill, dcpl_cache->fill_time, - &udata, chunk_coords, FALSE, &idx_hint))) + &udata, store, FALSE, &idx_hint))) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk"); /* Use the vectorized memory copy routine to do actual work */ @@ -1766,7 +1762,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a H5_CHECK_OVERFLOW(naccessed,ssize_t,size_t); if (H5F_istore_unlock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, FALSE, - chunk_coords, &idx_hint, chunk, (size_t)naccessed)<0) + store, idx_hint, chunk, (size_t)naccessed)<0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk"); /* Set return value */ @@ -1796,7 +1792,7 @@ done: ssize_t H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, const H5O_layout_t *layout, - const struct H5D_dcpl_cache_t *dcpl_cache, hssize_t chunk_coords[], + const struct H5D_dcpl_cache_t *dcpl_cache, const H5D_storage_t *store, size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], const void *buf) @@ -1822,7 +1818,7 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, #ifndef NDEBUG for (u=0; u<layout->u.chunk.ndims; u++) - assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */ + assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */ #endif /* Get the address of this chunk on disk */ @@ -1831,7 +1827,7 @@ HDfprintf(stderr,"%s: chunk_coords={",FUNC); for(u=0; u<layout->u.chunk.ndims; u++) HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n")); #endif /* QAK */ - chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata); + chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, &udata); #ifdef QAK HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size); HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]); @@ -1877,7 +1873,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a relax = FALSE; if (NULL==(chunk=H5F_istore_lock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, &dcpl_cache->fill, dcpl_cache->fill_time, - &udata, chunk_coords, relax, &idx_hint))) + &udata, store, relax, &idx_hint))) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk"); /* Use the vectorized memory copy routine to do actual work */ @@ -1886,7 +1882,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a H5_CHECK_OVERFLOW(naccessed,ssize_t,size_t); if (H5F_istore_unlock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, TRUE, - chunk_coords, &idx_hint, chunk, (size_t)naccessed)<0) + store, idx_hint, chunk, (size_t)naccessed)<0) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "uanble to unlock raw data chunk"); /* Set return value */ @@ -1967,16 +1963,33 @@ done: *------------------------------------------------------------------------- */ hsize_t -H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr) +H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout) { + H5F_rdcc_t *rdcc = &(f->shared->rdcc); /*raw data chunk cache */ + H5F_rdcc_ent_t *ent; /*cache entry */ + H5D_dxpl_cache_t dxpl_cache; /* Cached data transfer properties */ H5F_istore_ud1_t udata; hsize_t ret_value; /* Return value */ FUNC_ENTER_NOAPI(H5F_istore_allocated, 0); + /* Fill the DXPL cache values for later use */ + if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, 0, "can't fill dxpl cache") + + /* Search for cached chunks that haven't been written out */ + for(ent = rdcc->head; ent; ent = ent->next) { + /* Make certain we are dealing with the correct B-tree, etc */ + if (H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { + /* Flush the chunk out to disk, to make certain the size is correct later */ + if (H5F_istore_flush_entry(f, &dxpl_cache, dxpl_id, ent, FALSE)<0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, 0, "cannot flush indexed storage buffer"); + } /* end if */ + } /* end for */ + HDmemset(&udata, 0, sizeof udata); - udata.mesg.u.chunk.ndims = ndims; - if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_iter_allocated, addr, &udata)<0) + udata.mesg.u.chunk.ndims = layout->u.chunk.ndims; + if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_iter_allocated, layout->u.chunk.addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over chunk B-tree"); /* Set return value */ @@ -1984,7 +1997,7 @@ H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr) done: FUNC_LEAVE_NOAPI(ret_value); -} +} /* end H5F_istore_allocated() */ /*------------------------------------------------------------------------- @@ -2487,7 +2500,7 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, H5F_rdcc_t *rdcc = &(f->shared->rdcc); /*raw data chunk cache */ H5F_rdcc_ent_t *ent = NULL, *next = NULL; /*cache entry */ unsigned u; /*counters */ - int found = 0; /*remove this entry */ + int found; /*remove this entry */ H5F_istore_ud1_t udata; /*B-tree pass-through */ hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */ herr_t ret_value=SUCCEED; /* Return value */ @@ -2510,13 +2523,12 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, * and release them from the linked list raw data cache *------------------------------------------------------------------------- */ + found = 0; for(ent = rdcc->head; ent; ent = next) { next = ent->next; /* Make certain we are dealing with the correct B-tree, etc */ - if (layout->u.chunk.ndims==ent->layout.u.chunk.ndims && - H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { - found = 0; + if (H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) { if((hsize_t)ent->offset[u] > curr_dims[u]) { found = 1; @@ -2526,17 +2538,18 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, } /* end if */ if(found) { -#if defined (H5F_ISTORE_DEBUG) - HDfputs("cache:remove:[", stdout); - for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) { - HDfprintf(stdout, "%s%Hd", u ? ", " : "", ent->offset[u]); - } - HDfputs("]\n", stdout); +#ifdef H5F_ISTORE_DEBUG + HDfputs("cache:remove:[", stderr); + for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) + HDfprintf(stderr, "%s%Hd", u ? ", " : "", ent->offset[u]); + HDfputs("]\n", stderr); #endif /* Preempt the entry from the cache, but do not flush it to disk */ if(H5F_istore_preempt(f, dxpl_cache, dxpl_id, ent, FALSE) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to preempt chunk"); + + found=0; } } @@ -2597,12 +2610,10 @@ H5F_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr_t UNUSED a /* Figure out what chunks are no longer in use for the specified extent and release them */ for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) if((hsize_t)lt_key->offset[u] > bt_udata->dims[u]) { -#if defined (H5F_ISTORE_DEBUG) +#ifdef H5F_ISTORE_DEBUG HDfputs("b-tree:remove:[", bt_udata->stream); - for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) { - HDfprintf(bt_udata->stream, "%s%Hd", u ? ", " : "", - lt_key->offset[u]); - } + for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) + HDfprintf(bt_udata->stream, "%s%Hd", u ? ", " : "", lt_key->offset[u]); HDfputs("]\n", bt_udata->stream); #endif @@ -2710,6 +2721,8 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca H5S_t *space_chunk = NULL; /*dataspace for a chunk */ hsize_t chunk_dims[H5O_LAYOUT_NDIMS]; /*current chunk dimensions */ hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */ + hsize_t chunks[H5O_LAYOUT_NDIMS]; /*current number of chunks in each dimension */ + hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */ int srank; /*current # of dimensions (signed) */ unsigned rank; /*current # of dimensions */ int i, carry; /*counters */ @@ -2718,6 +2731,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca H5O_pline_t pline; /* I/O pipeline information */ H5O_fill_t fill; /* Fill value information */ H5D_fill_time_t fill_time; /* Fill time information */ + H5D_storage_t store; /* Dataset storage information */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5F_istore_initialize_by_extent, FAIL); @@ -2747,10 +2761,18 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca H5_ASSIGN_OVERFLOW(rank,srank,int,unsigned); /* Copy current dimensions */ - for(u = 0; u < rank; u++) + for(u = 0; u < rank; u++) { size[u] = curr_dims[u]; + + /* Round up to the next integer # of chunks, to accomodate partial chunks */ + chunks[u] = ((curr_dims[u]+layout->u.chunk.dim[u])-1) / layout->u.chunk.dim[u]; + } /* end for */ size[u] = layout->u.chunk.dim[u]; + /* Get the "down" sizes for each dimension */ + if(H5V_array_down(rank,chunks,down_chunks)<0) + HGOTO_ERROR (H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes") + /* Create a data space for a chunk & set the extent */ for(u = 0; u < rank; u++) chunk_dims[u] = layout->u.chunk.dim[u]; @@ -2792,8 +2814,13 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca if(found) { + /* Calculate the index of this chunk */ + if(H5V_chunk_index(rank,chunk_offset,layout->u.chunk.dim,down_chunks,&store.chunk.index)<0) + HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") + + store.chunk.offset=chunk_offset; if(NULL == (chunk = H5F_istore_lock(f, dxpl_cache, dxpl_id, layout, &pline, &fill, fill_time, - NULL, chunk_offset, FALSE, &idx_hint))) + NULL, &store, FALSE, &idx_hint))) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk"); if(H5S_select_all(space_chunk,1) < 0) @@ -2802,7 +2829,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca for(u = 0; u < rank; u++) count[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u], size[u] - chunk_offset[u]); -#if defined (H5F_ISTORE_DEBUG) +#ifdef H5F_ISTORE_DEBUG HDfputs("cache:initialize:offset:[", stdout); for(u = 0; u < layout->u.chunk.ndims - 1; u++) HDfprintf(stdout, "%s%Hd", u ? ", " : "", chunk_offset[u]); @@ -2826,7 +2853,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "filling selection failed"); if(H5F_istore_unlock(f, dxpl_cache, dxpl_id, layout, &pline, TRUE, - chunk_offset, &idx_hint, chunk, (size_t)naccessed) < 0) + &store, idx_hint, chunk, (size_t)naccessed) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk"); } /*found */ @@ -2887,8 +2914,8 @@ H5F_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout) /* Is the chunk to be deleted this cache entry? */ if(layout->u.chunk.addr==ent->layout.u.chunk.addr) /* Remove entry without flushing */ - if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, ent, FALSE )<0) - HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks"); + if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, ent, FALSE )<0) + HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks"); } /* end for */ /* Set up user data for B-tree deletion */ @@ -2906,6 +2933,111 @@ done: /*------------------------------------------------------------------------- + * Function: H5F_istore_update_cache + * + * Purpose: Update any cached chunks index values after the dataspace + * size has changed + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Saturday, May 29, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +herr_t +H5F_istore_update_cache(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, const H5S_t * space) +{ + H5F_rdcc_t *rdcc = &(f->shared->rdcc); /*raw data chunk cache */ + H5F_rdcc_ent_t *ent, *next; /*cache entry */ + H5F_rdcc_ent_t *old_ent; /* Old cache entry */ + H5D_dxpl_cache_t dxpl_cache; /* Cached data transfer properties */ + int srank; /*current # of dimensions (signed) */ + unsigned rank; /*current # of dimensions */ + hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */ + hsize_t chunks[H5O_LAYOUT_NDIMS]; /*current number of chunks in each dimension */ + hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */ + hsize_t idx; /* Chunk index */ + hsize_t temp_idx; /* temporary index number */ + unsigned old_idx; /* Previous index number */ + unsigned u; /*counters */ + herr_t ret_value=SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5F_istore_update_cache, FAIL); + + /* Check args */ + assert(f); + assert(layout && H5D_CHUNKED == layout->type); + assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + assert(space); + + /* Go get the rank & dimensions */ + if((srank = H5S_get_simple_extent_dims(space, curr_dims, NULL)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions"); + H5_ASSIGN_OVERFLOW(rank,srank,int,unsigned); + + /* Copy current dimensions */ + for(u = 0; u < rank; u++) { + /* Round up to the next integer # of chunks, to accomodate partial chunks */ + chunks[u] = ((curr_dims[u]+layout->u.chunk.dim[u])-1) / layout->u.chunk.dim[u]; + } /* end for */ + + /* Get the "down" sizes for each dimension */ + if(H5V_array_down(rank,chunks,down_chunks)<0) + HGOTO_ERROR (H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes") + + /* Fill the DXPL cache values for later use */ + if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + + /* Recompute the index for each cached chunk that is in a dataset */ + for(ent = rdcc->head; ent; ent = next) { + next=ent->next; + + /* Make certain we are dealing with the correct B-tree, etc */ + if (H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { + /* Calculate the index of this chunk */ + if(H5V_chunk_index(rank,ent->offset,layout->u.chunk.dim,down_chunks,&idx)<0) + HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") + + /* Compute the index for the chunk entry */ + temp_idx = idx + (hsize_t)(layout->u.chunk.addr); + old_idx=ent->idx; /* Save for later */ + ent->idx=H5F_HASH(f,temp_idx); + + if(old_idx!=ent->idx) { + /* Check if there is already a chunk at this chunk's new location */ + old_ent = rdcc->slot[ent->idx]; + if(old_ent!=NULL) { + assert(old_ent->locked==0); + + /* Check if we are removing the entry we would walk to next */ + if(old_ent==next) + next=old_ent->next; + + /* Remove the old entry from the cache */ + if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, old_ent, TRUE )<0) + HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks"); + } /* end if */ + + /* Insert this chunk into correct location in hash table */ + rdcc->slot[ent->idx]=ent; + + /* Null out previous location */ + rdcc->slot[old_idx]=NULL; + } /* end if */ + } /* end if */ + } /* end for */ + +done: + FUNC_LEAVE_NOAPI(ret_value); +} /* end H5F_istore_update_cache() */ + + +/*------------------------------------------------------------------------- * Function: H5F_istore_dump_btree * * Purpose: Prints information about the storage B-tree to the specified diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h index 985d935..bdbe126 100644 --- a/src/H5Dprivate.h +++ b/src/H5Dprivate.h @@ -164,7 +164,10 @@ typedef struct H5D_t H5D_t; /* Typedef for dataset storage information */ typedef union H5D_storage_t { H5O_efl_t efl; /* External file list information for dataset */ - hssize_t *chunk_coords; /* chunk's coordinates in file chunks */ + struct { + hsize_t index; /* "Index" of chunk in dataset (must be first for TBBT routines) */ + hssize_t *offset; /* Chunk's coordinates in elements */ + } chunk; } H5D_storage_t; /* Typedef for cached dataset transfer property list information */ diff --git a/src/H5Dseq.c b/src/H5Dseq.c index 9bf10e7..f8ecf00 100644 --- a/src/H5Dseq.c +++ b/src/H5Dseq.c @@ -237,7 +237,7 @@ H5F_seq_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_i case H5D_CHUNKED: assert(store); - if((ret_value=H5F_istore_readvv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store->chunk_coords, + if((ret_value=H5F_istore_readvv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store, dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0) @@ -346,7 +346,7 @@ H5F_seq_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, case H5D_CHUNKED: assert(store); - if((ret_value=H5F_istore_writevv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store->chunk_coords, + if((ret_value=H5F_istore_writevv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store, dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0) @@ -2148,7 +2148,7 @@ H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id, hid_t fapl_id) /* Keep this ID in file object structure */ new_file->file_id = ret_value; - + done: if (ret_value<0 && new_file) if(H5F_close(new_file)<0) @@ -2225,7 +2225,7 @@ H5Fopen(const char *filename, unsigned flags, hid_t fapl_id) /* Get an atom for the file */ if ((ret_value = H5I_register(H5I_FILE, new_file))<0) HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to atomize file handle") - + /* Keep this ID in file object structure */ new_file->file_id = ret_value; @@ -3333,7 +3333,7 @@ H5F_close(H5F_t *f) /* Invalidate file ID */ f->file_id = -1; - + /* Only flush at this point if the file will be closed */ assert(closing); /* Dump debugging info */ diff --git a/src/H5Fistore.c b/src/H5Fistore.c index 2434cc2..a5aeb70 100644 --- a/src/H5Fistore.c +++ b/src/H5Fistore.c @@ -1346,7 +1346,7 @@ static void * H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, const H5O_layout_t *layout, const H5O_pline_t *pline, const H5O_fill_t *fill, H5D_fill_time_t fill_time, H5F_istore_ud1_t *udata, - const hssize_t offset[], hbool_t relax, + const H5D_storage_t *store, hbool_t relax, unsigned *idx_hint/*in,out*/) { int idx=0; /*hash index number */ @@ -1369,18 +1369,14 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con /* Search for the chunk in the cache */ if (rdcc->nslots>0) { - for (u=0, temp_idx=0; u<layout->u.chunk.ndims; u++) { - temp_idx += offset[u]; - temp_idx *= layout->u.chunk.dim[u]; - } - temp_idx += (hsize_t)(layout->u.chunk.addr); + temp_idx = store->chunk.index + (hsize_t)(layout->u.chunk.addr); idx=H5F_HASH(f,temp_idx); ent = rdcc->slot[idx]; if (ent && layout->u.chunk.ndims==ent->layout.u.chunk.ndims && H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { for (u=0, found=TRUE; u<ent->layout.u.chunk.ndims; u++) { - if (offset[u]!=ent->offset[u]) { + if (store->chunk.offset[u]!=ent->offset[u]) { found = FALSE; break; } @@ -1423,7 +1419,7 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con * Not in the cache. Read it from the file and count this as a miss * if it's in the file or an init if it isn't. */ - chunk_addr = H5F_istore_get_addr(f, dxpl_id, layout, offset, udata); + chunk_addr = H5F_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, udata); } /* end else */ if (H5F_addr_defined(chunk_addr)) { @@ -1508,7 +1504,7 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con H5O_copy(H5O_LAYOUT_ID, layout, &ent->layout); H5O_copy(H5O_PLINE_ID, pline, &ent->pline); for (u=0; u<layout->u.chunk.ndims; u++) - ent->offset[u] = offset[u]; + ent->offset[u] = store->chunk.offset[u]; ent->rd_count = chunk_size; ent->wr_count = chunk_size; ent->chunk = chunk; @@ -1612,7 +1608,7 @@ done: static herr_t H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, const H5O_layout_t *layout, const H5O_pline_t *pline, hbool_t dirty, - const hssize_t offset[], unsigned *idx_hint, uint8_t *chunk, size_t naccessed) + const H5D_storage_t *store, unsigned idx_hint, uint8_t *chunk, size_t naccessed) { H5F_rdcc_t *rdcc = &(f->shared->rdcc); H5F_rdcc_ent_t *ent = NULL; @@ -1621,13 +1617,13 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5F_istore_unlock); - if (UINT_MAX==*idx_hint) { + if (UINT_MAX==idx_hint) { /*not in cache*/ } else { - assert(*idx_hint<rdcc->nslots); - assert(rdcc->slot[*idx_hint]); - assert(rdcc->slot[*idx_hint]->chunk==chunk); - found = *idx_hint; + assert(idx_hint<rdcc->nslots); + assert(rdcc->slot[idx_hint]); + assert(rdcc->slot[idx_hint]->chunk==chunk); + found = idx_hint; } if (found<0) { @@ -1645,7 +1641,7 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H5O_copy (H5O_LAYOUT_ID, layout, &x.layout); H5O_copy (H5O_PLINE_ID, pline, &x.pline); for (u=0; u<layout->u.chunk.ndims; u++) - x.offset[u] = offset[u]; + x.offset[u] = store->chunk.offset[u]; assert(layout->u.chunk.size>0); H5_ASSIGN_OVERFLOW(x.chunk_size,layout->u.chunk.size,hsize_t,size_t); x.alloc_size = x.chunk_size; @@ -1692,7 +1688,7 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, */ ssize_t H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - const H5O_layout_t *layout, const struct H5D_dcpl_cache_t *dcpl_cache, hssize_t chunk_coords[], + const H5O_layout_t *layout, const struct H5D_dcpl_cache_t *dcpl_cache, const H5D_storage_t *store, size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], void *buf) @@ -1718,7 +1714,7 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp #ifndef NDEBUG for (u=0; u<layout->u.chunk.ndims; u++) - assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */ + assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */ #endif /* Get the address of this chunk on disk */ @@ -1727,7 +1723,7 @@ HDfprintf(stderr,"%s: chunk_coords={",FUNC); for(u=0; u<layout->u.chunk.ndims; u++) HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n")); #endif /* QAK */ - chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata); + chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, &udata); #ifdef QAK HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size); HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]); @@ -1757,7 +1753,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a * chunk. */ if (NULL==(chunk=H5F_istore_lock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, &dcpl_cache->fill, dcpl_cache->fill_time, - &udata, chunk_coords, FALSE, &idx_hint))) + &udata, store, FALSE, &idx_hint))) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk"); /* Use the vectorized memory copy routine to do actual work */ @@ -1766,7 +1762,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a H5_CHECK_OVERFLOW(naccessed,ssize_t,size_t); if (H5F_istore_unlock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, FALSE, - chunk_coords, &idx_hint, chunk, (size_t)naccessed)<0) + store, idx_hint, chunk, (size_t)naccessed)<0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk"); /* Set return value */ @@ -1796,7 +1792,7 @@ done: ssize_t H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, const H5O_layout_t *layout, - const struct H5D_dcpl_cache_t *dcpl_cache, hssize_t chunk_coords[], + const struct H5D_dcpl_cache_t *dcpl_cache, const H5D_storage_t *store, size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], const void *buf) @@ -1822,7 +1818,7 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, #ifndef NDEBUG for (u=0; u<layout->u.chunk.ndims; u++) - assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */ + assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */ #endif /* Get the address of this chunk on disk */ @@ -1831,7 +1827,7 @@ HDfprintf(stderr,"%s: chunk_coords={",FUNC); for(u=0; u<layout->u.chunk.ndims; u++) HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n")); #endif /* QAK */ - chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata); + chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, store->chunk.offset, &udata); #ifdef QAK HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size); HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]); @@ -1877,7 +1873,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a relax = FALSE; if (NULL==(chunk=H5F_istore_lock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, &dcpl_cache->fill, dcpl_cache->fill_time, - &udata, chunk_coords, relax, &idx_hint))) + &udata, store, relax, &idx_hint))) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk"); /* Use the vectorized memory copy routine to do actual work */ @@ -1886,7 +1882,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a H5_CHECK_OVERFLOW(naccessed,ssize_t,size_t); if (H5F_istore_unlock(f, dxpl_cache, dxpl_id, layout, &dcpl_cache->pline, TRUE, - chunk_coords, &idx_hint, chunk, (size_t)naccessed)<0) + store, idx_hint, chunk, (size_t)naccessed)<0) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "uanble to unlock raw data chunk"); /* Set return value */ @@ -1967,16 +1963,33 @@ done: *------------------------------------------------------------------------- */ hsize_t -H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr) +H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout) { + H5F_rdcc_t *rdcc = &(f->shared->rdcc); /*raw data chunk cache */ + H5F_rdcc_ent_t *ent; /*cache entry */ + H5D_dxpl_cache_t dxpl_cache; /* Cached data transfer properties */ H5F_istore_ud1_t udata; hsize_t ret_value; /* Return value */ FUNC_ENTER_NOAPI(H5F_istore_allocated, 0); + /* Fill the DXPL cache values for later use */ + if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, 0, "can't fill dxpl cache") + + /* Search for cached chunks that haven't been written out */ + for(ent = rdcc->head; ent; ent = ent->next) { + /* Make certain we are dealing with the correct B-tree, etc */ + if (H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { + /* Flush the chunk out to disk, to make certain the size is correct later */ + if (H5F_istore_flush_entry(f, &dxpl_cache, dxpl_id, ent, FALSE)<0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, 0, "cannot flush indexed storage buffer"); + } /* end if */ + } /* end for */ + HDmemset(&udata, 0, sizeof udata); - udata.mesg.u.chunk.ndims = ndims; - if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_iter_allocated, addr, &udata)<0) + udata.mesg.u.chunk.ndims = layout->u.chunk.ndims; + if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_iter_allocated, layout->u.chunk.addr, &udata)<0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over chunk B-tree"); /* Set return value */ @@ -1984,7 +1997,7 @@ H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr) done: FUNC_LEAVE_NOAPI(ret_value); -} +} /* end H5F_istore_allocated() */ /*------------------------------------------------------------------------- @@ -2487,7 +2500,7 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, H5F_rdcc_t *rdcc = &(f->shared->rdcc); /*raw data chunk cache */ H5F_rdcc_ent_t *ent = NULL, *next = NULL; /*cache entry */ unsigned u; /*counters */ - int found = 0; /*remove this entry */ + int found; /*remove this entry */ H5F_istore_ud1_t udata; /*B-tree pass-through */ hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */ herr_t ret_value=SUCCEED; /* Return value */ @@ -2510,13 +2523,12 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, * and release them from the linked list raw data cache *------------------------------------------------------------------------- */ + found = 0; for(ent = rdcc->head; ent; ent = next) { next = ent->next; /* Make certain we are dealing with the correct B-tree, etc */ - if (layout->u.chunk.ndims==ent->layout.u.chunk.ndims && - H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { - found = 0; + if (H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) { if((hsize_t)ent->offset[u] > curr_dims[u]) { found = 1; @@ -2526,17 +2538,18 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, } /* end if */ if(found) { -#if defined (H5F_ISTORE_DEBUG) - HDfputs("cache:remove:[", stdout); - for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) { - HDfprintf(stdout, "%s%Hd", u ? ", " : "", ent->offset[u]); - } - HDfputs("]\n", stdout); +#ifdef H5F_ISTORE_DEBUG + HDfputs("cache:remove:[", stderr); + for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) + HDfprintf(stderr, "%s%Hd", u ? ", " : "", ent->offset[u]); + HDfputs("]\n", stderr); #endif /* Preempt the entry from the cache, but do not flush it to disk */ if(H5F_istore_preempt(f, dxpl_cache, dxpl_id, ent, FALSE) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to preempt chunk"); + + found=0; } } @@ -2597,12 +2610,10 @@ H5F_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr_t UNUSED a /* Figure out what chunks are no longer in use for the specified extent and release them */ for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) if((hsize_t)lt_key->offset[u] > bt_udata->dims[u]) { -#if defined (H5F_ISTORE_DEBUG) +#ifdef H5F_ISTORE_DEBUG HDfputs("b-tree:remove:[", bt_udata->stream); - for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) { - HDfprintf(bt_udata->stream, "%s%Hd", u ? ", " : "", - lt_key->offset[u]); - } + for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) + HDfprintf(bt_udata->stream, "%s%Hd", u ? ", " : "", lt_key->offset[u]); HDfputs("]\n", bt_udata->stream); #endif @@ -2710,6 +2721,8 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca H5S_t *space_chunk = NULL; /*dataspace for a chunk */ hsize_t chunk_dims[H5O_LAYOUT_NDIMS]; /*current chunk dimensions */ hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */ + hsize_t chunks[H5O_LAYOUT_NDIMS]; /*current number of chunks in each dimension */ + hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */ int srank; /*current # of dimensions (signed) */ unsigned rank; /*current # of dimensions */ int i, carry; /*counters */ @@ -2718,6 +2731,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca H5O_pline_t pline; /* I/O pipeline information */ H5O_fill_t fill; /* Fill value information */ H5D_fill_time_t fill_time; /* Fill time information */ + H5D_storage_t store; /* Dataset storage information */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5F_istore_initialize_by_extent, FAIL); @@ -2747,10 +2761,18 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca H5_ASSIGN_OVERFLOW(rank,srank,int,unsigned); /* Copy current dimensions */ - for(u = 0; u < rank; u++) + for(u = 0; u < rank; u++) { size[u] = curr_dims[u]; + + /* Round up to the next integer # of chunks, to accomodate partial chunks */ + chunks[u] = ((curr_dims[u]+layout->u.chunk.dim[u])-1) / layout->u.chunk.dim[u]; + } /* end for */ size[u] = layout->u.chunk.dim[u]; + /* Get the "down" sizes for each dimension */ + if(H5V_array_down(rank,chunks,down_chunks)<0) + HGOTO_ERROR (H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes") + /* Create a data space for a chunk & set the extent */ for(u = 0; u < rank; u++) chunk_dims[u] = layout->u.chunk.dim[u]; @@ -2792,8 +2814,13 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca if(found) { + /* Calculate the index of this chunk */ + if(H5V_chunk_index(rank,chunk_offset,layout->u.chunk.dim,down_chunks,&store.chunk.index)<0) + HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") + + store.chunk.offset=chunk_offset; if(NULL == (chunk = H5F_istore_lock(f, dxpl_cache, dxpl_id, layout, &pline, &fill, fill_time, - NULL, chunk_offset, FALSE, &idx_hint))) + NULL, &store, FALSE, &idx_hint))) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk"); if(H5S_select_all(space_chunk,1) < 0) @@ -2802,7 +2829,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca for(u = 0; u < rank; u++) count[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u], size[u] - chunk_offset[u]); -#if defined (H5F_ISTORE_DEBUG) +#ifdef H5F_ISTORE_DEBUG HDfputs("cache:initialize:offset:[", stdout); for(u = 0; u < layout->u.chunk.ndims - 1; u++) HDfprintf(stdout, "%s%Hd", u ? ", " : "", chunk_offset[u]); @@ -2826,7 +2853,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "filling selection failed"); if(H5F_istore_unlock(f, dxpl_cache, dxpl_id, layout, &pline, TRUE, - chunk_offset, &idx_hint, chunk, (size_t)naccessed) < 0) + &store, idx_hint, chunk, (size_t)naccessed) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk"); } /*found */ @@ -2887,8 +2914,8 @@ H5F_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout) /* Is the chunk to be deleted this cache entry? */ if(layout->u.chunk.addr==ent->layout.u.chunk.addr) /* Remove entry without flushing */ - if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, ent, FALSE )<0) - HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks"); + if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, ent, FALSE )<0) + HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks"); } /* end for */ /* Set up user data for B-tree deletion */ @@ -2906,6 +2933,111 @@ done: /*------------------------------------------------------------------------- + * Function: H5F_istore_update_cache + * + * Purpose: Update any cached chunks index values after the dataspace + * size has changed + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Saturday, May 29, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +herr_t +H5F_istore_update_cache(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout, const H5S_t * space) +{ + H5F_rdcc_t *rdcc = &(f->shared->rdcc); /*raw data chunk cache */ + H5F_rdcc_ent_t *ent, *next; /*cache entry */ + H5F_rdcc_ent_t *old_ent; /* Old cache entry */ + H5D_dxpl_cache_t dxpl_cache; /* Cached data transfer properties */ + int srank; /*current # of dimensions (signed) */ + unsigned rank; /*current # of dimensions */ + hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */ + hsize_t chunks[H5O_LAYOUT_NDIMS]; /*current number of chunks in each dimension */ + hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */ + hsize_t idx; /* Chunk index */ + hsize_t temp_idx; /* temporary index number */ + unsigned old_idx; /* Previous index number */ + unsigned u; /*counters */ + herr_t ret_value=SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5F_istore_update_cache, FAIL); + + /* Check args */ + assert(f); + assert(layout && H5D_CHUNKED == layout->type); + assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + assert(space); + + /* Go get the rank & dimensions */ + if((srank = H5S_get_simple_extent_dims(space, curr_dims, NULL)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions"); + H5_ASSIGN_OVERFLOW(rank,srank,int,unsigned); + + /* Copy current dimensions */ + for(u = 0; u < rank; u++) { + /* Round up to the next integer # of chunks, to accomodate partial chunks */ + chunks[u] = ((curr_dims[u]+layout->u.chunk.dim[u])-1) / layout->u.chunk.dim[u]; + } /* end for */ + + /* Get the "down" sizes for each dimension */ + if(H5V_array_down(rank,chunks,down_chunks)<0) + HGOTO_ERROR (H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes") + + /* Fill the DXPL cache values for later use */ + if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + + /* Recompute the index for each cached chunk that is in a dataset */ + for(ent = rdcc->head; ent; ent = next) { + next=ent->next; + + /* Make certain we are dealing with the correct B-tree, etc */ + if (H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) { + /* Calculate the index of this chunk */ + if(H5V_chunk_index(rank,ent->offset,layout->u.chunk.dim,down_chunks,&idx)<0) + HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") + + /* Compute the index for the chunk entry */ + temp_idx = idx + (hsize_t)(layout->u.chunk.addr); + old_idx=ent->idx; /* Save for later */ + ent->idx=H5F_HASH(f,temp_idx); + + if(old_idx!=ent->idx) { + /* Check if there is already a chunk at this chunk's new location */ + old_ent = rdcc->slot[ent->idx]; + if(old_ent!=NULL) { + assert(old_ent->locked==0); + + /* Check if we are removing the entry we would walk to next */ + if(old_ent==next) + next=old_ent->next; + + /* Remove the old entry from the cache */ + if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, old_ent, TRUE )<0) + HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks"); + } /* end if */ + + /* Insert this chunk into correct location in hash table */ + rdcc->slot[ent->idx]=ent; + + /* Null out previous location */ + rdcc->slot[old_idx]=NULL; + } /* end if */ + } /* end if */ + } /* end for */ + +done: + FUNC_LEAVE_NOAPI(ret_value); +} /* end H5F_istore_update_cache() */ + + +/*------------------------------------------------------------------------- * Function: H5F_istore_dump_btree * * Purpose: Prints information about the storage B-tree to the specified diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h index 8160a98..94655f9 100644 --- a/src/H5Fpkg.h +++ b/src/H5Fpkg.h @@ -189,6 +189,7 @@ struct H5F_t { /* Forward declarations for prototype arguments */ struct H5D_dxpl_cache_t; struct H5D_dcpl_cache_t; +union H5D_storage_t; /* Private functions, not part of the publicly documented API */ #ifdef NOT_YET @@ -204,14 +205,14 @@ H5_DLL herr_t H5F_istore_dest (H5F_t *f, hid_t dxpl_id); H5_DLL ssize_t H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, const struct H5O_layout_t *layout, const struct H5D_dcpl_cache_t *dcpl_cache, - hssize_t chunk_coords[], + const union H5D_storage_t *store, size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], void *buf); H5_DLL ssize_t H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, const struct H5O_layout_t *layout, const struct H5D_dcpl_cache_t *dcpl_cache, - hssize_t chunk_coords[], + const union H5D_storage_t *store, size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[], const void *buf); diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h index addb247..c16c9f1 100644 --- a/src/H5Fprivate.h +++ b/src/H5Fprivate.h @@ -447,7 +447,7 @@ H5_DLL herr_t H5F_istore_create(H5F_t *f, hid_t dxpl_id, H5_DLL herr_t H5F_istore_allocate (H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout, const hsize_t *space_dim, struct H5P_genplist_t *dc_plist, hbool_t full_overwrite); -H5_DLL hsize_t H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr); +H5_DLL hsize_t H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout); H5_DLL herr_t H5F_istore_dump_btree(H5F_t *f, hid_t dxpl_id, FILE *stream, unsigned ndims, haddr_t addr); H5_DLL herr_t H5F_istore_prune_by_extent( H5F_t *f, @@ -459,6 +459,8 @@ H5_DLL herr_t H5F_istore_initialize_by_extent( H5F_t *f, const struct H5S_t *space ); H5_DLL herr_t H5F_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout); +H5_DLL herr_t H5F_istore_update_cache(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout, + const struct H5S_t * space); /* Address-related functions */ H5_DLL void H5F_addr_encode(const H5F_t *, uint8_t** /*in,out*/, haddr_t); diff --git a/src/H5Fseq.c b/src/H5Fseq.c index 9bf10e7..f8ecf00 100644 --- a/src/H5Fseq.c +++ b/src/H5Fseq.c @@ -237,7 +237,7 @@ H5F_seq_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_i case H5D_CHUNKED: assert(store); - if((ret_value=H5F_istore_readvv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store->chunk_coords, + if((ret_value=H5F_istore_readvv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store, dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0) @@ -346,7 +346,7 @@ H5F_seq_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, case H5D_CHUNKED: assert(store); - if((ret_value=H5F_istore_writevv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store->chunk_coords, + if((ret_value=H5F_istore_writevv(f, dxpl_cache, dxpl_id, layout, dcpl_cache, store, dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0) diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h index 6520877..63cac3c 100644 --- a/src/H5Oprivate.h +++ b/src/H5Oprivate.h @@ -35,10 +35,10 @@ #include "H5Spublic.h" /* Dataspace functions */ /* Private headers needed by this file */ -#include "H5private.h" /* Generic functions */ -#include "H5HGprivate.h" /* Global heap functions */ -#include "H5Tprivate.h" /* Datatype functions */ -#include "H5Zprivate.h" /* I/O pipeline filters */ +#include "H5private.h" /* Generic functions */ +#include "H5HGprivate.h" /* Global heap functions */ +#include "H5Tprivate.h" /* Datatype functions */ +#include "H5Zprivate.h" /* I/O pipeline filters */ /* Object header macros */ #define H5O_MIN_SIZE H5O_ALIGN(32) /*min obj header data size */ diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c index d2b57df..ce2e227 100644 --- a/src/H5Pdcpl.c +++ b/src/H5Pdcpl.c @@ -747,8 +747,7 @@ H5Pget_filter(hid_t plist_id, unsigned idx, unsigned int *flags/*out*/, #endif /* H5_WANT_H5_V1_6_COMPAT */ /* Check args */ - if (cd_nelmts || cd_values) -{ + if (cd_nelmts || cd_values) { if (cd_nelmts && *cd_nelmts>256) /* * It's likely that users forget to initialize this on input, so @@ -1641,4 +1640,3 @@ H5Premove_filter(hid_t plist_id, H5Z_filter_t filter) done: FUNC_LEAVE_API(ret_value); } - @@ -1159,4 +1159,3 @@ H5Z_delete(H5O_pline_t *pline, H5Z_filter_t filter) done: FUNC_LEAVE_NOAPI(ret_value) } - |