summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorMohamad Chaarawi <chaarawi@hdfgroup.org>2014-07-10 21:32:20 (GMT)
committerMohamad Chaarawi <chaarawi@hdfgroup.org>2014-07-10 21:32:20 (GMT)
commit515aa0b93094e1a78911f792041a8e1dba912ada (patch)
treea19bb6d8d9f28dad6a703999c2cdbd4a696cffcb /testpar
parent2700d20859e67995145677af5e3c627c19bd87b4 (diff)
downloadhdf5-515aa0b93094e1a78911f792041a8e1dba912ada.zip
hdf5-515aa0b93094e1a78911f792041a8e1dba912ada.tar.gz
hdf5-515aa0b93094e1a78911f792041a8e1dba912ada.tar.bz2
[svn-r25404] add serial and parallel regression tests for zero dset read/write bug.
(merged from trunk)
Diffstat (limited to 'testpar')
-rw-r--r--testpar/t_mdset.c57
-rw-r--r--testpar/testphdf5.c3
-rw-r--r--testpar/testphdf5.h1
3 files changed, 61 insertions, 0 deletions
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index fa1a980..f294b93 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -74,6 +74,63 @@ get_size(void)
} /* get_size() */
/*
+ * Example of using PHDF5 to create a zero sized dataset.
+ *
+ */
+void zero_dim_dset(void)
+{
+ int mpi_size, mpi_rank;
+ const char *filename;
+ hid_t fid, plist, dcpl, dsid, sid;
+ hsize_t dim, chunk_dim;
+ herr_t ret;
+ int data[1];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ filename = GetTestParameters();
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((plist>=0), "create_faccess_plist succeeded");
+
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ VRFY((fid>=0), "H5Fcreate succeeded");
+ ret = H5Pclose(plist);
+ VRFY((ret>=0), "H5Pclose succeeded");
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl>=0), "failed H5Pcreate");
+
+ /* Set 1 chunk size */
+ chunk_dim = 1;
+ ret = H5Pset_chunk(dcpl, 1, &chunk_dim);
+ VRFY((ret>=0), "failed H5Pset_chunk");
+
+ /* Create 1D dataspace with 0 dim size */
+ dim = 0;
+ sid = H5Screate_simple(1, &dim, NULL);
+ VRFY((sid>=0), "failed H5Screate_simple");
+
+ /* Create chunked dataset */
+ dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dsid>=0), "failed H5Dcreate2");
+
+ /* write 0 elements from dataset */
+ ret = H5Dwrite(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data);
+ VRFY((ret>=0), "failed H5Dwrite");
+
+ /* Read 0 elements from dataset */
+ ret = H5Dread(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data);
+ VRFY((ret>=0), "failed H5Dread");
+
+ H5Pclose(dcpl);
+ H5Dclose(dsid);
+ H5Sclose(sid);
+ H5Fclose(fid);
+}
+
+/*
* Example of using PHDF5 to create ndatasets datasets. Each process write
* a slab of array to the file.
*
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index d91c24c..713d6e1 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -377,6 +377,9 @@ int main(int argc, char **argv)
"compressed dataset collective read", PARATESTFILE);
#endif /* H5_HAVE_FILTER_DEFLATE */
+ AddTest("zerodsetr", zero_dim_dset, NULL,
+ "zero dim dset", PARATESTFILE);
+
ndsets_params.name = PARATESTFILE;
ndsets_params.count = ndatasets;
AddTest("ndsetw", multiple_dset_write, NULL,
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 100fa41..efd3424 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -236,6 +236,7 @@ extern int facc_type; /*Test file access type */
extern int dxfer_coll_type;
/* Test program prototypes */
+void zero_dim_dset(void);
void multiple_dset_write(void);
void multiple_group_write(void);
void multiple_group_read(void);