diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2003-09-16 17:33:00 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2003-09-16 17:33:00 (GMT) |
commit | d7bde16f45fac765f45172d88a1a9cd44a1f95fa (patch) | |
tree | be73c954ea0220b752cfbb49597e35954aa47cd9 /testpar/t_mdset.c | |
parent | bf1c2f0e8bf9788c2e47a1b1ac963cc321afab0e (diff) | |
download | hdf5-d7bde16f45fac765f45172d88a1a9cd44a1f95fa.zip hdf5-d7bde16f45fac765f45172d88a1a9cd44a1f95fa.tar.gz hdf5-d7bde16f45fac765f45172d88a1a9cd44a1f95fa.tar.bz2 |
[svn-r7480] Purpose:
Bug fix
Description:
The MPI_File_set_size() routine on ASCI Red is not able to extend files
so that they are larger than 2GB.
Solution:
Add an extra macro which controls whether MPI_File_set_size() can handle
>2GB offsets or if our "older" way of reading a byte, then writing a byte at
the appropriate offset should be used.
Platforms tested:
FreeBSD 4.9 (sleipnir)
h5committest
Diffstat (limited to 'testpar/t_mdset.c')
-rw-r--r-- | testpar/t_mdset.c | 118 |
1 files changed, 118 insertions, 0 deletions
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index b8bf4b6..3d40307 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -211,6 +211,124 @@ void compact_dataset(char *filename) H5Fclose(iof); } +/* Example of using PHDF5 to create "large" datasets. (>2GB, >4GB, >8GB) + * Actual data is _not_ written to these datasets. Dataspaces are exact + * sizes (2GB, 4GB, etc.), but the metadata for the file pushes the file over + * the boundary of interest. + */ +void big_dataset(const char *filename) +{ + int mpi_size, mpi_rank; /* MPI info */ + hbool_t use_gpfs = FALSE; /* Don't use GPFS stuff for this test */ + hid_t iof, /* File ID */ + fapl, /* File access property list ID */ + dataset, /* Dataset ID */ + filespace; /* Dataset's dataspace ID */ + hsize_t file_dims [4]; /* Dimensions of dataspace */ + char dname[]="dataset"; /* Name of dataset */ + MPI_Offset file_size; /* Size of file on disk */ + herr_t ret; /* Generic return value */ + + MPI_Comm_rank (MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size (MPI_COMM_WORLD, &mpi_size); + + VRFY((mpi_size <= SIZE), "mpi_size <= SIZE"); + + fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs); + VRFY((fapl >= 0), "create_faccess_plist succeeded"); + + /* + * Create >2GB HDF5 file + */ + iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((iof >= 0), "H5Fcreate succeeded"); + + /* Define dataspace for 2GB dataspace */ + file_dims[0]= 2; + file_dims[1]= 1024; + file_dims[2]= 1024; + file_dims[3]= 1024; + filespace = H5Screate_simple (4, file_dims, NULL); + VRFY((filespace >= 0), "H5Screate_simple succeeded"); + + dataset = H5Dcreate (iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate succeeded"); + + /* Close all file objects */ + ret=H5Dclose (dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret=H5Sclose (filespace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret=H5Fclose (iof); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* Check that file of the correct size was created */ + file_size=h5_mpi_get_file_size(filename, MPI_COMM_WORLD, MPI_INFO_NULL); + VRFY((file_size == 2147485696ULL), "File is correct size"); + + /* + * Create >4GB HDF5 file + */ + iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((iof >= 0), "H5Fcreate succeeded"); + + /* Define dataspace for 4GB dataspace */ + file_dims[0]= 4; + file_dims[1]= 1024; + file_dims[2]= 1024; + file_dims[3]= 1024; + filespace = H5Screate_simple (4, file_dims, NULL); + VRFY((filespace >= 0), "H5Screate_simple succeeded"); + + dataset = H5Dcreate (iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate succeeded"); + + /* Close all file objects */ + ret=H5Dclose (dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret=H5Sclose (filespace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret=H5Fclose (iof); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* Check that file of the correct size was created */ + file_size=h5_mpi_get_file_size(filename, MPI_COMM_WORLD, MPI_INFO_NULL); + VRFY((file_size == 4294969344ULL), "File is correct size"); + + /* + * Create >8GB HDF5 file + */ + iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((iof >= 0), "H5Fcreate succeeded"); + + /* Define dataspace for 8GB dataspace */ + file_dims[0]= 8; + file_dims[1]= 1024; + file_dims[2]= 1024; + file_dims[3]= 1024; + filespace = H5Screate_simple (4, file_dims, NULL); + VRFY((filespace >= 0), "H5Screate_simple succeeded"); + + dataset = H5Dcreate (iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate succeeded"); + + /* Close all file objects */ + ret=H5Dclose (dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret=H5Sclose (filespace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret=H5Fclose (iof); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* Check that file of the correct size was created */ + file_size=h5_mpi_get_file_size(filename, MPI_COMM_WORLD, MPI_INFO_NULL); + VRFY((file_size == 8589936640ULL), "File is correct size"); + + /* Close fapl */ + ret=H5Pclose (fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); +} + /* Write multiple groups with a chunked dataset in each group collectively. * These groups and datasets are for testing independent read later. */ |