From cd6a20f0c828332cfe01896a43422868ebb1591d Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Mon, 6 Oct 2003 10:17:55 -0500 Subject: [svn-r7552] Purpose: Bug/feature fix. Description: Relax restriction on parallel writing to compact datasets to allow partial I/O. Updated reference manual mentioning the issues involved. Platforms tested: FreeBSD 4.9 (sleipnir) too minor to require h5committest --- doc/html/RM_H5D.html | 6 ++++++ doc/html/RM_H5P.html | 10 ++++++++++ release_docs/RELEASE.txt | 1 + src/H5Dio.c | 8 -------- testpar/t_mdset.c | 27 ++------------------------- 5 files changed, 19 insertions(+), 33 deletions(-) diff --git a/doc/html/RM_H5D.html b/doc/html/RM_H5D.html index 0d30cc7..17505db 100644 --- a/doc/html/RM_H5D.html +++ b/doc/html/RM_H5D.html @@ -1197,6 +1197,12 @@ facilitate moving easily between them. H5Pset_fill_time and H5Pset_alloc_time.) +

+ If a dataset's storage layout is 'compact', care must be taken when + writing data to the dataset in parallel. A compact dataset's raw data + is cached in memory and may be flushed to the file from any of the + parallel processes, so parallel applications should always attempt to + write identical data to the dataset from all processes.

Parameters:
diff --git a/doc/html/RM_H5P.html b/doc/html/RM_H5P.html index e7e377b..cc3cdcb 100644 --- a/doc/html/RM_H5P.html +++ b/doc/html/RM_H5P.html @@ -2889,6 +2889,11 @@ facilitate moving easily between them.
H5Pget_layout returns the layout of the raw data for a dataset. This function is only valid for dataset creation property lists. +

+ Note that a compact storage layout may affect writing data to + the dataset with parallel applications. See note in + H5Dwrite + documentation for details.

Parameters:
@@ -6651,6 +6656,11 @@ fid=H5Fcreate("PointA",H5F_ACC_TRUNC,H5P_DEFAULT,fapl);
Store raw data separately from the object header as chunks of data in separate locations in the file.
+

+ Note that a compact storage layout may affect writing data to + the dataset with parallel applications. See note in + H5Dwrite + documentation for details.

Parameters:
hid_t plist diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt index 7a76661..bc7830d 100644 --- a/release_docs/RELEASE.txt +++ b/release_docs/RELEASE.txt @@ -56,6 +56,7 @@ Bug Fixes since HDF5-1.6.0 release Library ------- + - Allow partial parallel writing to compact datasets. QAK - 2003/10/06 - Correctly create reference to shared datatype in attribute, instead of making a copy of the shared datatype in the attribute. QAK - 2003/10/01 diff --git a/src/H5Dio.c b/src/H5Dio.c index 944825f..e03fc7f 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -792,14 +792,6 @@ H5D_write(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, !(IS_H5FD_MPIO(dataset->ent.file) || IS_H5FD_MPIPOSIX(dataset->ent.file) || IS_H5FD_FPHDF5(dataset->ent.file))) HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "collective access for MPIO driver only"); - /* If dataset is compact, collective access is only allowed when file space - * selection is H5S_ALL */ - if(doing_mpio && xfer_mode==H5FD_MPIO_COLLECTIVE - && dataset->layout.type==H5D_COMPACT) { - if(H5S_get_select_type(file_space) != H5S_SEL_ALL) - HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "collective access to compact dataset doesn't support partial access"); - } - /* Set the "parallel I/O possible" flag, for H5S_find() */ if (H5S_mpi_opt_types_g && IS_H5FD_MPIO(dataset->ent.file)) { /* Only collective write should call this since it eventually diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index 3d40307..5dfeb3b 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -105,16 +105,13 @@ void multiple_dset_write(char *filename, int ndatasets) } /* Example of using PHDF5 to create, write, and read compact dataset. - * Hyperslab is prohibited for write. */ void compact_dataset(char *filename) { int i, j, mpi_size, mpi_rank, err_num=0; hbool_t use_gpfs = FALSE; - hid_t iof, plist, dcpl, dxpl, dataset, memspace, filespace; - hssize_t chunk_origin [DIM]; - hsize_t chunk_dims [DIM], file_dims [DIM]; - hsize_t count[DIM]={1,1}; + hid_t iof, plist, dcpl, dxpl, dataset, filespace; + hsize_t file_dims [DIM]={SIZE,SIZE}; double outme [SIZE][SIZE], inme[SIZE][SIZE]; char dname[]="dataset"; herr_t ret; @@ -127,11 +124,7 @@ void compact_dataset(char *filename) plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); - /* decide the hyperslab according to process number. */ - get_slab(chunk_origin, chunk_dims, count, file_dims); - /* Define data space */ - memspace = H5Screate_simple (DIM, chunk_dims, NULL); filespace = H5Screate_simple (DIM, file_dims, NULL); /* Create a compact dataset */ @@ -145,27 +138,12 @@ void compact_dataset(char *filename) dataset = H5Dcreate (iof, dname, H5T_NATIVE_DOUBLE, filespace, dcpl); VRFY((dataset >= 0), "H5Dcreate succeeded"); - /* Define hyperslab */ - ret = H5Sselect_hyperslab (filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); - VRFY((ret>=0), "mdata hyperslab selection"); - /* set up the collective transfer properties list */ dxpl = H5Pcreate (H5P_DATASET_XFER); VRFY((dxpl >= 0), ""); ret=H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - /* calculate data to write */ - for (i = 0; i < SIZE; i++) - for (j = 0; j < SIZE; j++) - outme [i][j] = (i+j)*1000 + mpi_rank; - - /* Test hyperslab writing. Supposed to fail */ - H5E_BEGIN_TRY { - ret=H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, dxpl, outme); - } H5E_END_TRY; - VRFY((ret < 0), "H5Dwrite hyperslab write failed as expected"); - /* Recalculate data to write. Each process writes the same data. */ for (i = 0; i < SIZE; i++) for (j = 0; j < SIZE; j++) @@ -178,7 +156,6 @@ void compact_dataset(char *filename) H5Pclose (plist); H5Dclose (dataset); H5Sclose (filespace); - H5Sclose (memspace); H5Fclose (iof); /* Open the file and dataset, read and compare the data. */ -- cgit v0.12