summaryrefslogtreecommitdiffstats
path: root/testpar/t_mdset.c
diff options
context:
space:
mode:
authorRaymond Lu <songyulu@hdfgroup.org>2003-02-18 21:50:42 (GMT)
committerRaymond Lu <songyulu@hdfgroup.org>2003-02-18 21:50:42 (GMT)
commit941509ab25e444f4e66f0f9d7e355e1ecef9b245 (patch)
treef9036787cf306f868242a2e22cf0839d8588f86f /testpar/t_mdset.c
parente384b7fa03c02475f23f1d4cd67011cd45de32d1 (diff)
downloadhdf5-941509ab25e444f4e66f0f9d7e355e1ecef9b245.zip
hdf5-941509ab25e444f4e66f0f9d7e355e1ecef9b245.tar.gz
hdf5-941509ab25e444f4e66f0f9d7e355e1ecef9b245.tar.bz2
[svn-r6419]
Purpose: More test. Description: Test independent read of groups and chunked dataset. Solution: This test is similar to multiple group test. So just add it in the testphdf5.c,h. Platforms tested: modi4, eirene.
Diffstat (limited to 'testpar/t_mdset.c')
-rw-r--r--testpar/t_mdset.c147
1 files changed, 147 insertions, 0 deletions
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index dae0e0f..e3ef984 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -12,6 +12,7 @@ void write_dataset(hid_t, hid_t, hid_t);
int read_dataset(hid_t, hid_t, hid_t);
void create_group_recursive(hid_t, hid_t, hid_t, int);
void recursive_read_group(hid_t, hid_t, hid_t, int);
+void group_dataset_read(hid_t fid, int mpi_rank, int mpi_size, int m);
void write_attribute(hid_t, int, int);
int read_attribute(hid_t, int, int);
int check_value(DATATYPE *, DATATYPE *);
@@ -201,6 +202,152 @@ void compact_dataset(char *filename)
H5Fclose(iof);
}
+/* Write multiple groups with a chunked dataset in each group collectively.
+ * These groups and datasets are for testing independent read later.
+ */
+void collective_group_write(char *filename, int ngroups)
+{
+ int mpi_rank, mpi_size;
+ int i, j, m;
+ char gname[64], dname[32];
+ hid_t fid, gid, did, plist, dcpl, memspace, filespace;
+ DATATYPE outme[SIZE][SIZE];
+ hssize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
+ const hsize_t chunk_size[2] = {SIZE/2, SIZE/2}; /* Chunk dimensions */
+ herr_t ret1, ret2;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ H5Pclose(plist);
+
+ /* decide the hyperslab according to process number. */
+ get_slab(chunk_origin, chunk_dims, count, file_dims);
+
+ /* select hyperslab in memory and file spaces. These two operations are
+ * identical since the datasets are the same. */
+ memspace = H5Screate_simple(DIM, file_dims, NULL);
+ ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin,
+ chunk_dims, count, chunk_dims);
+ filespace = H5Screate_simple(DIM, file_dims, NULL);
+ ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin,
+ chunk_dims, count, chunk_dims);
+ VRFY((memspace>=0), "memspace");
+ VRFY((filespace>=0), "filespace");
+ VRFY((ret1>=0), "mgroup memspace selection");
+ VRFY((ret2>=0), "mgroup filespace selection");
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ ret1 = H5Pset_chunk (dcpl, 2, chunk_size);
+ VRFY((dcpl>=0), "dataset creation property");
+ VRFY((ret1>=0), "set chunk for dataset creation property");
+
+ /* creates ngroups groups under the root group, writes chunked
+ * datasets in parallel. */
+ for(m = 0; m < ngroups; m++) {
+ sprintf(gname, "group%d", m);
+ gid = H5Gcreate(fid, gname, 0);
+ VRFY((gid > 0), gname);
+
+ sprintf(dname, "dataset%d", m);
+ did = H5Dcreate(gid, dname, H5T_NATIVE_INT, filespace, dcpl);
+ VRFY((did > 0), dname);
+
+ for(i=0; i < SIZE; i++)
+ for(j=0; j < SIZE; j++)
+ outme[i][j] = (i+j)*1000 + mpi_rank;
+
+ H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
+ outme);
+
+ H5Dclose(did);
+ H5Gclose(gid);
+
+#ifdef BARRIER_CHECKS
+ if(! ((m+1) % 10)) {
+ printf("created %d groups\n", m+1);
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+#endif /* BARRIER_CHECKS */
+ }
+
+ H5Pclose(dcpl);
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+ H5Fclose(fid);
+}
+
+/* Let two sets of processes open and read different groups and chunked
+ * datasets independently.
+ */
+void independent_group_read(char *filename, int ngroups)
+{
+ int mpi_rank, mpi_size, m;
+ hid_t plist, fid;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ H5Pclose(plist);
+
+ /* open groups and read datasets. Odd number processes read even number
+ * groups from the end; even number processes read odd number groups
+ * from the beginning. */
+ if(mpi_rank%2==0) {
+ for(m=ngroups-1; m==0; m-=2)
+ group_dataset_read(fid, mpi_rank, mpi_size, m);
+ } else {
+ for(m=0; m<ngroups; m+=2)
+ group_dataset_read(fid, mpi_rank, mpi_size, m);
+ }
+
+ H5Fclose(fid);
+}
+
+/* Open and read datasets and compare data */
+void group_dataset_read(hid_t fid, int mpi_rank, int mpi_size, int m)
+{
+ int ret, i, j;
+ char gname[64], dname[32];
+ hid_t gid, did;
+ DATATYPE *outdata, *indata;
+
+ indata = (DATATYPE*)malloc(SIZE*SIZE*sizeof(DATATYPE));
+ outdata = (DATATYPE*)malloc(SIZE*SIZE*sizeof(DATATYPE));
+
+ /* open every group under root group. */
+ sprintf(gname, "group%d", m);
+ gid = H5Gopen(fid, gname);
+ VRFY((gid > 0), gname);
+
+ /* check the data. */
+ sprintf(dname, "dataset%d", m);
+ did = H5Dopen(gid, dname);
+ VRFY((did>0), dname);
+
+ H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, indata);
+
+ /* this is the original value */
+ for(i=0; i<SIZE; i++)
+ for(j=0; j<SIZE; j++) {
+ *outdata = (i+j)*1000 + mpi_rank;
+ outdata++;
+ }
+ outdata -= SIZE*SIZE;
+
+ /* compare the original value(outdata) to the value in file(indata).*/
+ ret = check_value(indata, outdata);
+ VRFY((ret==0), "check the data");
+
+ H5Dclose(did);
+ H5Gclose(gid);
+}
+
/*
* Example of using PHDF5 to create multiple groups. Under the root group,
* it creates ngroups groups. Under the first group just created, it creates