diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2004-06-14 20:39:08 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2004-06-14 20:39:08 (GMT) |
commit | fabb5167ba3afb1f32784d274df67f8b6195e5ea (patch) | |
tree | a8d00af71a6f9fd04127fbb42b930284765e1867 | |
parent | ef01629bb29800c8837a261b85897570e4c092c1 (diff) | |
download | hdf5-fabb5167ba3afb1f32784d274df67f8b6195e5ea.zip hdf5-fabb5167ba3afb1f32784d274df67f8b6195e5ea.tar.gz hdf5-fabb5167ba3afb1f32784d274df67f8b6195e5ea.tar.bz2 |
[svn-r8686] Purpose:
Code optimization
Description:
Eliminate memcpy() when using default DXPL by pointing at existing
default object, instead of copying it.
Platforms tested:
Solaris 2.7 (arabica)
FreeBSD 4.10 (sleipnir) w/parallel
Too minor to require h5committest
-rw-r--r-- | src/H5D.c | 7 | ||||
-rw-r--r-- | src/H5Dio.c | 35 | ||||
-rw-r--r-- | src/H5Distore.c | 24 | ||||
-rw-r--r-- | src/H5Dprivate.h | 2 |
4 files changed, 39 insertions, 29 deletions
@@ -3852,18 +3852,19 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id) *------------------------------------------------------------------------- */ if(shrink && H5D_CHUNKED == dset->layout.type) { - H5D_dxpl_cache_t dxpl_cache; /* Cached data transfer properties */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ /* Fill the DXPL cache values for later use */ if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") /* Remove excess chunks */ - if(H5D_istore_prune_by_extent(dset->ent.file, &dxpl_cache, dxpl_id, dset) < 0) + if(H5D_istore_prune_by_extent(dset->ent.file, dxpl_cache, dxpl_id, dset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks ") /* Reset the elements outsize the new dimensions, but in existing chunks */ - if(H5D_istore_initialize_by_extent(dset->ent.file, &dxpl_cache, dxpl_id, dset) < 0) + if(H5D_istore_initialize_by_extent(dset->ent.file, dxpl_cache, dxpl_id, dset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to initialize chunks ") } /* end if */ } /* end if */ diff --git a/src/H5Dio.c b/src/H5Dio.c index 389cfef..243b5a9 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -375,11 +375,14 @@ done: within the library. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS + The CACHE pointer should point at already allocated memory to place + non-default property list info. If a default property list is used, the + CACHE pointer will be changed to point at the default information. EXAMPLES REVISION LOG --------------------------------------------------------------------------*/ herr_t -H5D_get_dxpl_cache(hid_t dxpl_id, H5D_dxpl_cache_t *cache) +H5D_get_dxpl_cache(hid_t dxpl_id, H5D_dxpl_cache_t **cache) { herr_t ret_value=SUCCEED; /* Return value */ @@ -390,9 +393,9 @@ H5D_get_dxpl_cache(hid_t dxpl_id, H5D_dxpl_cache_t *cache) /* Check for the default DXPL */ if(dxpl_id==H5P_DATASET_XFER_DEFAULT) - HDmemcpy(cache,&H5D_def_dxpl_cache,sizeof(H5D_dxpl_cache_t)); + *cache=&H5D_def_dxpl_cache; else - if(H5D_get_dxpl_cache_real(dxpl_id,cache)<0) + if(H5D_get_dxpl_cache_real(dxpl_id,*cache)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTGET, FAIL, "Can't retrieve DXPL values") done: @@ -647,7 +650,8 @@ H5D_read(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, #ifdef H5_HAVE_PARALLEL hbool_t xfer_mode_changed=FALSE; /* Whether the transfer mode was changed */ #endif /*H5_HAVE_PARALLEL*/ - H5D_dxpl_cache_t dxpl_cache; /* Data transfer property cache */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ unsigned sconv_flags=0; /* Flags for the space conversion */ herr_t ret_value = SUCCEED; /* Return value */ @@ -672,12 +676,12 @@ H5D_read(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, #ifdef H5_HAVE_PARALLEL /* Collective access is not permissible without a MPI based VFD */ - if (dxpl_cache.xfer_mode==H5FD_MPIO_COLLECTIVE && !IS_H5FD_MPI(dataset->ent.file)) + if (dxpl_cache->xfer_mode==H5FD_MPIO_COLLECTIVE && !IS_H5FD_MPI(dataset->ent.file)) HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "collective access for MPI-based drivers only") /* Set the "parallel I/O possible" flag, for H5S_find(), if we are doing collective I/O */ /* (Don't set the parallel I/O possible flag for the MPI-posix driver, since it doesn't do real collective I/O) */ - if (H5S_mpi_opt_types_g && dxpl_cache.xfer_mode==H5FD_MPIO_COLLECTIVE && !IS_H5FD_MPIPOSIX(dataset->ent.file)) + if (H5S_mpi_opt_types_g && dxpl_cache->xfer_mode==H5FD_MPIO_COLLECTIVE && !IS_H5FD_MPIPOSIX(dataset->ent.file)) sconv_flags |= H5S_CONV_PAR_IO_POSSIBLE; #endif /*H5_HAVE_PARALLEL*/ @@ -760,18 +764,18 @@ H5D_read(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, #ifdef H5_HAVE_PARALLEL /* Don't reset the transfer mode if we can't or won't use it */ if(!use_par_opt_io || !H5T_path_noop(tpath)) - H5D_io_assist_mpio(dxpl_id, &dxpl_cache, &xfer_mode_changed); + H5D_io_assist_mpio(dxpl_id, dxpl_cache, &xfer_mode_changed); #endif /*H5_HAVE_PARALLEL*/ /* Determine correct I/O routine to invoke */ if(dataset->layout.type!=H5D_CHUNKED) { if(H5D_contig_read(nelmts, dataset, mem_type, mem_space, file_space, tpath, sconv, - &dxpl_cache, dxpl_id, src_id, dst_id, buf)<0) + dxpl_cache, dxpl_id, src_id, dst_id, buf)<0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") } /* end if */ else { if(H5D_chunk_read(nelmts, dataset, mem_type, mem_space, file_space, tpath, sconv, - &dxpl_cache, dxpl_id, src_id, dst_id, buf)<0) + dxpl_cache, dxpl_id, src_id, dst_id, buf)<0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") } /* end else */ @@ -851,7 +855,8 @@ H5D_write(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, #ifdef H5_HAVE_PARALLEL hbool_t xfer_mode_changed=FALSE; /* Whether the transfer mode was changed */ #endif /*H5_HAVE_PARALLEL*/ - H5D_dxpl_cache_t dxpl_cache; /* Data transfer property cache */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ unsigned sconv_flags=0; /* Flags for the space conversion */ herr_t ret_value = SUCCEED; /* Return value */ @@ -896,12 +901,12 @@ H5D_write(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, #ifdef H5_HAVE_PARALLEL /* Collective access is not permissible without a MPI based VFD */ - if (dxpl_cache.xfer_mode==H5FD_MPIO_COLLECTIVE && !IS_H5FD_MPI(dataset->ent.file)) + if (dxpl_cache->xfer_mode==H5FD_MPIO_COLLECTIVE && !IS_H5FD_MPI(dataset->ent.file)) HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "collective access for MPI-based driver only") /* Set the "parallel I/O possible" flag, for H5S_find(), if we are doing collective I/O */ /* (Don't set the parallel I/O possible flag for the MPI-posix driver, since it doesn't do real collective I/O) */ - if (H5S_mpi_opt_types_g && dxpl_cache.xfer_mode==H5FD_MPIO_COLLECTIVE && !IS_H5FD_MPIPOSIX(dataset->ent.file)) + if (H5S_mpi_opt_types_g && dxpl_cache->xfer_mode==H5FD_MPIO_COLLECTIVE && !IS_H5FD_MPIPOSIX(dataset->ent.file)) sconv_flags |= H5S_CONV_PAR_IO_POSSIBLE; #endif /*H5_HAVE_PARALLEL*/ @@ -975,18 +980,18 @@ H5D_write(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, #ifdef H5_HAVE_PARALLEL /* Don't reset the transfer mode if we can't or won't use it */ if(!use_par_opt_io || !H5T_path_noop(tpath)) - H5D_io_assist_mpio(dxpl_id, &dxpl_cache, &xfer_mode_changed); + H5D_io_assist_mpio(dxpl_id, dxpl_cache, &xfer_mode_changed); #endif /*H5_HAVE_PARALLEL*/ /* Determine correct I/O routine to invoke */ if(dataset->layout.type!=H5D_CHUNKED) { if(H5D_contig_write(nelmts, dataset, mem_type, mem_space, file_space, tpath, sconv, - &dxpl_cache, dxpl_id, src_id, dst_id, buf)<0) + dxpl_cache, dxpl_id, src_id, dst_id, buf)<0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data") } /* end if */ else { if(H5D_chunk_write(nelmts, dataset, mem_type, mem_space, file_space, tpath, sconv, - &dxpl_cache, dxpl_id, src_id, dst_id, buf)<0) + dxpl_cache, dxpl_id, src_id, dst_id, buf)<0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data") } /* end else */ diff --git a/src/H5Distore.c b/src/H5Distore.c index 94a9242..ac9283d 100644 --- a/src/H5Distore.c +++ b/src/H5Distore.c @@ -1094,7 +1094,8 @@ done: herr_t H5D_istore_flush (H5F_t *f, hid_t dxpl_id, H5D_t *dset, unsigned flags) { - H5D_dxpl_cache_t dxpl_cache; /* Cached data transfer properties */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ H5D_rdcc_t *rdcc = &(dset->cache.chunk); int nerrors=0; H5D_rdcc_ent_t *ent=NULL, *next=NULL; @@ -1113,10 +1114,10 @@ H5D_istore_flush (H5F_t *f, hid_t dxpl_id, H5D_t *dset, unsigned flags) ent->dirty = FALSE; } /* end if */ else if ((flags&H5F_FLUSH_INVALIDATE)) { - if (H5D_istore_preempt(f, &dxpl_cache, dxpl_id, dset, ent, TRUE )<0) + if (H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, ent, TRUE )<0) nerrors++; } else { - if (H5D_istore_flush_entry(f, &dxpl_cache, dxpl_id, dset, ent, FALSE)<0) + if (H5D_istore_flush_entry(f, dxpl_cache, dxpl_id, dset, ent, FALSE)<0) nerrors++; } } /* end for */ @@ -1149,7 +1150,8 @@ done: herr_t H5D_istore_dest (H5F_t *f, hid_t dxpl_id, H5D_t *dset) { - H5D_dxpl_cache_t dxpl_cache; /* Cached data transfer properties */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ H5D_rdcc_t *rdcc = &(dset->cache.chunk); int nerrors=0; H5D_rdcc_ent_t *ent=NULL, *next=NULL; @@ -1167,7 +1169,7 @@ H5D_istore_dest (H5F_t *f, hid_t dxpl_id, H5D_t *dset) HDfflush(stderr); #endif next = ent->next; - if (H5D_istore_preempt(f, &dxpl_cache, dxpl_id, dset, ent, TRUE )<0) + if (H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, ent, TRUE )<0) nerrors++; } if (nerrors) @@ -1959,7 +1961,8 @@ H5D_istore_allocated(H5F_t *f, hid_t dxpl_id, H5D_t *dset) { H5D_rdcc_t *rdcc = &(dset->cache.chunk); /*raw data chunk cache */ H5D_rdcc_ent_t *ent; /*cache entry */ - H5D_dxpl_cache_t dxpl_cache; /* Cached data transfer properties */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ H5D_istore_ud1_t udata; hsize_t ret_value; /* Return value */ @@ -1972,7 +1975,7 @@ H5D_istore_allocated(H5F_t *f, hid_t dxpl_id, H5D_t *dset) /* Search for cached chunks that haven't been written out */ for(ent = rdcc->head; ent; ent = ent->next) { /* Flush the chunk out to disk, to make certain the size is correct later */ - if (H5D_istore_flush_entry(f, &dxpl_cache, dxpl_id, dset, ent, FALSE)<0) + if (H5D_istore_flush_entry(f, dxpl_cache, dxpl_id, dset, ent, FALSE)<0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, 0, "cannot flush indexed storage buffer"); } /* end for */ @@ -2499,7 +2502,7 @@ done: *------------------------------------------------------------------------- */ herr_t -H5D_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, +H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H5D_t *dset) { H5D_rdcc_t *rdcc = &(dset->cache.chunk); /*raw data chunk cache */ @@ -2937,7 +2940,8 @@ H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset) H5D_rdcc_t *rdcc = &(dset->cache.chunk); /*raw data chunk cache */ H5D_rdcc_ent_t *ent, *next; /*cache entry */ H5D_rdcc_ent_t *old_ent; /* Old cache entry */ - H5D_dxpl_cache_t dxpl_cache; /* Cached data transfer properties */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ int srank; /*current # of dimensions (signed) */ unsigned rank; /*current # of dimensions */ hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */ @@ -2995,7 +2999,7 @@ H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset) next=old_ent->next; /* Remove the old entry from the cache */ - if (H5D_istore_preempt(f, &dxpl_cache, dxpl_id, dset, old_ent, TRUE )<0) + if (H5D_istore_preempt(f, dxpl_cache, dxpl_id, dset, old_ent, TRUE )<0) HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks"); } /* end if */ diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h index fc6ea39..42f5eb5 100644 --- a/src/H5Dprivate.h +++ b/src/H5Dprivate.h @@ -207,7 +207,7 @@ H5_DLL herr_t H5D_xfer_copy(hid_t new_plist_id, hid_t old_plist_id, void *copy_data); H5_DLL herr_t H5D_xfer_close(hid_t dxpl_id, void *close_data); H5_DLL herr_t H5D_flush(H5F_t *f, hid_t dxpl_id, unsigned flags); -H5_DLL herr_t H5D_get_dxpl_cache(hid_t dxpl_id, H5D_dxpl_cache_t *cache); +H5_DLL herr_t H5D_get_dxpl_cache(hid_t dxpl_id, H5D_dxpl_cache_t **cache); H5_DLL herr_t H5D_get_dxpl_cache_real(hid_t dxpl_id, H5D_dxpl_cache_t *cache); /* Functions that operate on byte sequences in memory and on disk */ |