diff options
author | Raymond Lu <songyulu@hdfgroup.org> | 2004-05-17 19:59:04 (GMT) |
---|---|---|
committer | Raymond Lu <songyulu@hdfgroup.org> | 2004-05-17 19:59:04 (GMT) |
commit | 6123fcd9475ce2812da52541402f4d98bf972864 (patch) | |
tree | 69d04504f60baf7b61f31885c856cefa3b858778 /testpar | |
parent | c56eb7f4a4e0ae5b41995850a4bcdb09adf73484 (diff) | |
download | hdf5-6123fcd9475ce2812da52541402f4d98bf972864.zip hdf5-6123fcd9475ce2812da52541402f4d98bf972864.tar.gz hdf5-6123fcd9475ce2812da52541402f4d98bf972864.tar.bz2 |
[svn-r8533] Purpose: New test.
Description: Test dataset and attribute of null dataspace for parallel.
Platforms tested: copper and verbena(only parallel is concerned)
Diffstat (limited to 'testpar')
-rw-r--r-- | testpar/t_mdset.c | 98 | ||||
-rw-r--r-- | testpar/testphdf5.c | 31 | ||||
-rw-r--r-- | testpar/testphdf5.h | 1 |
3 files changed, 121 insertions, 9 deletions
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index 953dab1..f04be6d 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -111,7 +111,8 @@ void multiple_dset_write(void) H5Fclose (iof); } -/* Example of using PHDF5 to create, write, and read compact dataset. +/* + * Example of using PHDF5 to create, write, and read compact dataset. */ void compact_dataset(void) { @@ -197,6 +198,101 @@ void compact_dataset(void) H5Fclose(iof); } +/* + * Example of using PHDF5 to create, write, and read dataset and attribute of Null dataspace. + */ +void null_dataset(void) +{ + int mpi_size, mpi_rank; + hbool_t use_gpfs = FALSE; + hid_t iof, plist, dxpl, dataset, attr, sid; + unsigned uval=2; /* Buffer for writing to dataset */ + int val=1; /* Buffer for writing to attribute */ + int nelem; + char dname[]="dataset"; + char attr_name[]="attribute"; + herr_t ret; + char *filename; + + MPI_Comm_rank (MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size (MPI_COMM_WORLD, &mpi_size); + + filename = (char *) GetTestParameters(); + VRFY((mpi_size <= SIZE), "mpi_size <= SIZE"); + + plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); + iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); + + /* Define data space */ + sid = H5Screate(H5S_NULL); + + /* Check that the null dataspace actually has 0 elements */ + nelem = H5Sget_simple_extent_npoints(sid); + VRFY((nelem== 0), "H5Sget_simple_extent_npoints"); + + /* Create a compact dataset */ + dataset = H5Dcreate (iof, dname, H5T_NATIVE_UINT, sid, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate succeeded"); + + /* set up the collective transfer properties list */ + dxpl = H5Pcreate (H5P_DATASET_XFER); + VRFY((dxpl >= 0), ""); + ret=H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + + /* Write "nothing" to the dataset (with type conversion) */ + ret=H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, &uval); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* Create an attribute for the group */ + attr=H5Acreate(dataset,attr_name,H5T_NATIVE_UINT,sid,H5P_DEFAULT); + VRFY((attr>=0), "H5Acreate"); + + /* Write "nothing" to the attribute (with type conversion) */ + ret = H5Awrite(attr, H5T_NATIVE_INT, &val); + VRFY((ret>=0), "H5Awrite"); + + H5Aclose (attr); + H5Dclose (dataset); + H5Pclose (plist); + H5Sclose (sid); + H5Fclose (iof); + + /* Open the file and dataset, read and compare the data. */ + plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); + iof = H5Fopen(filename, H5F_ACC_RDONLY, plist); + VRFY((iof >= 0), "H5Fopen succeeded"); + + /* set up the collective transfer properties list */ + dxpl = H5Pcreate (H5P_DATASET_XFER); + VRFY((dxpl >= 0), ""); + ret=H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + + dataset = H5Dopen(iof, dname); + VRFY((dataset >= 0), "H5Dcreate succeeded"); + + /* Try reading from the dataset (make certain our buffer is unmodified) */ + ret = H5Dread(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, dxpl, &uval); + VRFY((ret>=0), "H5Dread"); + VRFY((uval==2), "H5Dread"); + + /* Open the attribute for the dataset */ + attr=H5Aopen_name(dataset,attr_name); + VRFY((attr>=0), "H5Aopen_name"); + + /* Try reading from the attribute (make certain our buffer is unmodified) */ + ret = H5Aread(attr, H5T_NATIVE_INT, &val); + VRFY((ret>=0), "H5Aread"); + VRFY((val==1), "H5Aread"); + + H5Pclose(plist); + H5Pclose(dxpl); + H5Aclose (attr); + H5Dclose(dataset); + H5Fclose(iof); +} + /* Example of using PHDF5 to create "large" datasets. (>2GB, >4GB, >8GB) * Actual data is _not_ written to these datasets. Dataspaces are exact * sizes (2GB, 4GB, etc.), but the metadata for the file pushes the file over diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index 45b1ca7..990095e 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -42,22 +42,24 @@ void *old_client_data; /* previous error handler arg.*/ int doread=1; /* read test */ int dowrite=1; /* write test */ int docompact=1; /* compact dataset test */ +int donull=1; /* null dataset test */ int doindependent=1; /* independent test */ unsigned dobig=0; /* "big" dataset tests */ /* FILENAME and filenames must have the same number of names */ -const char *FILENAME[10]={ +const char *FILENAME[11]={ "ParaEg1", "ParaEg2", "ParaEg3", "ParaMdset", "ParaMgroup", "ParaCompact", + "ParaNull", "ParaIndividual", "ParaBig", "ParaFill", NULL}; -char filenames[10][PATH_MAX]; +char filenames[11][PATH_MAX]; hid_t fapl; /* file access property list */ #ifdef USE_PAUSE @@ -126,6 +128,7 @@ usage(void) printf("\t-n<n_groups>" "\tset number of groups for the multiple group test\n"); printf("\t-o\t\tno compact dataset test\n"); + printf("\t-e\t\tno null dataset test\n"); printf("\t-i\t\tno independent read test\n"); printf("\t-b\t\trun big dataset test\n"); printf("\t-v<verbosity>\tset verbose level (0-9,l,m,h)\n"); @@ -178,6 +181,8 @@ parse_options(int argc, char **argv) break; case 'o': docompact = 0; break; + case 'e': donull = 0; + break; case 'i': doindependent = 0; break; case 'b': dobig = 1; @@ -443,6 +448,8 @@ int main(int argc, char **argv) AddTest("compact", compact_dataset, NULL, "compact dataset test", filenames[5]); + AddTest("null", null_dataset, NULL, + "null dataset test", filenames[6]); collngroups_params.name = filenames[6]; collngroups_params.count = ngroups; @@ -550,13 +557,21 @@ int main(int argc, char **argv) else { MPI_BANNER("compact dataset test skipped"); } - + + if (donull){ + MPI_BANNER("null dataset test..."); + null_dataset(filenames[6]); + } + else { + MPI_BANNER("null dataset test skipped"); + } + if (doindependent){ MPI_BANNER("collective group and dataset write ..."); - collective_group_write(filenames[6], ngroups); + collective_group_write(filenames[7], ngroups); if (doread) { MPI_BANNER("independent group and dataset read ..."); - independent_group_read(filenames[6], ngroups); + independent_group_read(filenames[7], ngroups); } } else{ @@ -565,17 +580,17 @@ int main(int argc, char **argv) if (dobig && sizeof(MPI_Offset)>4){ MPI_BANNER("big dataset test..."); - big_dataset(filenames[7]); + big_dataset(filenames[8]); } else { MPI_BANNER("big dataset test skipped"); } MPI_BANNER("dataset fill value test..."); - dataset_fillvalue(filenames[8]); + dataset_fillvalue(filenames[9]); #endif - if (!(dowrite || doread || ndatasets || ngroups || docompact || doindependent || dobig )){ + if (!(dowrite || doread || ndatasets || ngroups || docompact || donull || doindependent || dobig )){ usage(); nerrors++; } diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index f757e7c..968ee28 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -159,6 +159,7 @@ void dataset_readAll(void); void extend_readInd(void); void extend_readAll(void); void compact_dataset(void); +void null_dataset(void); void big_dataset(void); void dataset_fillvalue(void); |