summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--config/cmake_ext_mod/runTest.cmake23
-rw-r--r--hl/test/gen_test_ds.c12
-rw-r--r--hl/test/test_ds.c36
-rw-r--r--hl/test/test_file_image.c6
-rw-r--r--hl/test/test_image.c34
-rw-r--r--hl/test/test_packet_vlen.c78
-rw-r--r--test/atomic_reader.c30
-rw-r--r--test/atomic_writer.c10
-rw-r--r--test/cmpd_dset.c2
-rw-r--r--test/istore.c2
-rw-r--r--test/tattr.c29
-rw-r--r--test/testframe.c202
-rw-r--r--test/testhdf5.h90
-rw-r--r--test/testmeta.c110
-rw-r--r--test/tfile.c5
-rw-r--r--test/th5s.c36
-rw-r--r--test/trefer.c4
-rw-r--r--test/tsohm.c12
-rw-r--r--testpar/t_bigio.c528
-rw-r--r--testpar/t_cache.c38
-rw-r--r--testpar/t_cache_image.c70
-rw-r--r--testpar/t_coll_chunk.c30
-rw-r--r--testpar/t_file.c101
-rw-r--r--testpar/t_filter_read.c66
-rw-r--r--testpar/t_mdset.c976
-rw-r--r--testpar/t_mpi.c192
-rw-r--r--testpar/t_ph5basic.c12
-rw-r--r--testpar/t_prestart.c2
-rw-r--r--testpar/t_prop.c2
-rw-r--r--testpar/t_shapesame.c54
-rw-r--r--testpar/testpar.h24
-rw-r--r--testpar/testphdf5.h58
-rw-r--r--tools/src/h5import/h5import.c2
-rw-r--r--tools/test/h5repack/h5repacktst.c2
-rw-r--r--tools/test/misc/h5perf_gentest.c666
-rw-r--r--tools/test/misc/talign.c16
36 files changed, 1777 insertions, 1783 deletions
diff --git a/config/cmake_ext_mod/runTest.cmake b/config/cmake_ext_mod/runTest.cmake
index a904855..21c8c18 100644
--- a/config/cmake_ext_mod/runTest.cmake
+++ b/config/cmake_ext_mod/runTest.cmake
@@ -125,12 +125,31 @@ message (STATUS "COMMAND Error: ${TEST_ERROR}")
# remove special output
file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
-string (FIND TEST_STREAM "_pmi_alps" "${TEST_FIND_RESULT}")
-if (TEST_FIND_RESULT GREATER 0)
+string (FIND "${TEST_STREAM}" "_pmi_alps" TEST_FIND_RESULT)
+if (TEST_FIND_RESULT GREATER -1)
string (REGEX REPLACE "^.*_pmi_alps[^\n]+\n" "" TEST_STREAM "${TEST_STREAM}")
file (WRITE ${TEST_FOLDER}/${TEST_OUTPUT} ${TEST_STREAM})
endif ()
+# remove special error output
+if (NOT TEST_ERRREF)
+ # the error stack has been appended to the output file
+ file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
+else ()
+ # the error stack remains in the .err file
+ file (READ ${TEST_FOLDER}/${TEST_OUTPUT}.err TEST_STREAM)
+endif ()
+string (FIND "${TEST_STREAM}" "no version information available" TEST_FIND_RESULT)
+if (TEST_FIND_RESULT GREATER -1)
+ string (REGEX REPLACE "^.*no version information available[^\n]+\n" "" TEST_STREAM "${TEST_STREAM}")
+ # write back the changes to the original files
+ if (NOT TEST_ERRREF)
+ file (WRITE ${TEST_FOLDER}/${TEST_OUTPUT} "${TEST_STREAM}")
+ else ()
+ file (WRITE ${TEST_FOLDER}/${TEST_OUTPUT}.err "${TEST_STREAM}")
+ endif ()
+endif ()
+
# if the output file needs Storage text removed
if (TEST_MASK)
file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
diff --git a/hl/test/gen_test_ds.c b/hl/test/gen_test_ds.c
index 273e393..285ab77 100644
--- a/hl/test/gen_test_ds.c
+++ b/hl/test/gen_test_ds.c
@@ -79,12 +79,12 @@ int main(int argc , char **argv)
if (argc < 2) {
- printf("Usage: gen_test [le | be]\n");
+ HDprintf("Usage: gen_test [le | be]\n");
return 1;
}
if ( argv[1] && (strcmp("le",argv[1])!=0) && (strcmp("be",argv[1])!=0) ) {
- printf("Usage: gen_test [le | be]\n");
+ HDprintf("Usage: gen_test [le | be]\n");
return 1;
}
@@ -103,11 +103,11 @@ int main(int argc , char **argv)
nerrors += test_long_scalenames(filename) < 0 ? 1 : 0;
if(nerrors) goto error;
- printf("Dimension scales file generation passed.\n");
+ HDprintf("Dimension scales file generation passed.\n");
return 0;
error:
- printf("***** %d DIMENSION SCALES FILE GENERATION FAILED! *****\n",nerrors);
+ HDprintf("***** %d DIMENSION SCALES FILE GENERATION FAILED! *****\n",nerrors);
return 1;
}
@@ -188,11 +188,11 @@ herr_t test_attach_scale(hid_t fid, hid_t did, const char *name, unsigned int id
if(H5DSis_attached(did, dsid, idx) == 0) {
if(H5DSattach_scale(did, dsid, idx) >= 0) {
if(H5DSis_attached(did, dsid, idx) > 0) {
- /* printf(" scale attached "); */
+ /* HDprintf(" scale attached "); */
ret_value = SUCCEED;
}
else if(H5DSis_attached(did, dsid, idx) == 0) {
- printf(" scale not attached ");
+ HDprintf(" scale not attached ");
}
}
}
diff --git a/hl/test/test_ds.c b/hl/test/test_ds.c
index 9389a0d..79e6f45 100644
--- a/hl/test/test_ds.c
+++ b/hl/test/test_ds.c
@@ -1049,7 +1049,7 @@ herr_t test_attach_scale(hid_t fid, hid_t did, const char *name, unsigned int id
ret_value = SUCCEED;
}
else if(H5DSis_attached(did, dsid, idx) == 0) {
- printf(" scale not attached ");
+ HDprintf(" scale not attached ");
}
}
}
@@ -1153,14 +1153,14 @@ static int test_detachscales(void)
/* make datasets; they are three dimensional*/
for (i=0; i < 2; i++) {
- sprintf(dname,"D%d", i);
+ HDsprintf(dname,"D%d", i);
if(H5LTmake_dataset_int(fid, dname, rank3, dims, buf) < 0)
goto out;
}
/* create datasets and make them dim. scales */
for (i=0; i < 4; i++) {
- sprintf(dname, "DS%d", i);
+ HDsprintf(dname, "DS%d", i);
if(H5LTmake_dataset_int(fid, dname, rank1, dims, buf) < 0)
goto out;
}
@@ -2255,7 +2255,7 @@ static int test_simple(void)
char snames[3];
int i, j;
- printf("Testing API functions\n");
+ HDprintf("Testing API functions\n");
/*-------------------------------------------------------------------------
* create a file for the test
@@ -2919,7 +2919,7 @@ static int test_simple(void)
if((did = H5Dopen2(gid, dname, H5P_DEFAULT)) < 0)
goto out;
for(j = 0; j < 5; j++) {
- sprintf(sname, "ds_%d", j);
+ HDsprintf(sname, "ds_%d", j);
if((dsid = H5Dopen2(gid, sname, H5P_DEFAULT)) < 0)
goto out;
if(H5DSdetach_scale(did, dsid, DIM0) < 0)
@@ -2942,7 +2942,7 @@ static int test_simple(void)
if((did = H5Dopen2(gid,dname, H5P_DEFAULT)) < 0)
goto out;
for(j=0; j<5; j++) {
- sprintf(sname,"ds_%d",j);
+ HDsprintf(sname,"ds_%d",j);
if((dsid = H5Dopen2(gid,sname, H5P_DEFAULT)) < 0)
goto out;
if(H5DSattach_scale(did,dsid,DIM0) < 0)
@@ -3740,7 +3740,7 @@ static int test_errors(void)
hid_t sidds = -1; /* space ID */
hsize_t pal_dims[] = {9,3};
- printf("Testing error conditions\n");
+ HDprintf("Testing error conditions\n");
/*-------------------------------------------------------------------------
* create a file, spaces, dataset and group ids
@@ -4119,7 +4119,7 @@ static int test_iterators(void)
char dname[30]; /* dataset name */
int i;
- printf("Testing iterators\n");
+ HDprintf("Testing iterators\n");
/*-------------------------------------------------------------------------
* create a file, spaces, dataset and group ids
@@ -4198,7 +4198,7 @@ static int test_iterators(void)
for(i=0; i<100; i++) {
/* make a DS */
- sprintf(dname,"ds_%d",i);
+ HDsprintf(dname,"ds_%d",i);
if(H5LTmake_dataset_int(fid,dname,rankds,s1_dim,NULL) < 0)
goto out;
/* open */
@@ -4311,7 +4311,7 @@ static int test_rank(void)
float buff[1]={1};
int i;
- printf("Testing ranks\n");
+ HDprintf("Testing ranks\n");
/*-------------------------------------------------------------------------
* create a file, a dataset, scales
@@ -4349,7 +4349,7 @@ static int test_rank(void)
goto out;
for(i=0; i<3; i++) {
- sprintf(name,"ds_a_%d",i);
+ HDsprintf(name,"ds_a_%d",i);
if((dsid = H5Dopen2(fid,name, H5P_DEFAULT)) < 0)
goto out;
if(H5DSattach_scale(did,dsid,(unsigned)i) < 0)
@@ -4377,7 +4377,7 @@ static int test_rank(void)
goto out;
for(i=0; i<3; i++) {
- sprintf(name,"ds_a_%d",i);
+ HDsprintf(name,"ds_a_%d",i);
if((dsid = H5Dopen2(fid,name, H5P_DEFAULT)) < 0)
goto out;
if(H5DSdetach_scale(did,dsid,(unsigned)i) < 0)
@@ -4403,7 +4403,7 @@ static int test_rank(void)
goto out;
for(i=0; i<3; i++) {
- sprintf(name,"ds_a_%d",i);
+ HDsprintf(name,"ds_a_%d",i);
if((dsid = H5Dopen2(fid,name, H5P_DEFAULT)) < 0)
goto out;
if(H5DSset_scale(dsid,name) < 0)
@@ -4517,7 +4517,7 @@ static int test_types(void)
const char *s1_str = "ABC";
const char *s2_str = "ABCD";
- printf("Testing scales with several datatypes\n");
+ HDprintf("Testing scales with several datatypes\n");
/*-------------------------------------------------------------------------
* create a file for the test
@@ -4694,7 +4694,7 @@ static int test_data(void)
float fill=-99; /* fill value */
- printf("Testing reading ASCII data and generate HDF5 data with scales\n");
+ HDprintf("Testing reading ASCII data and generate HDF5 data with scales\n");
/*-------------------------------------------------------------------------
* create a file for the test
@@ -4870,7 +4870,7 @@ static int read_data( const char* fname,
/* read first data file */
f = HDfopen(data_file, "r");
if( f == NULL ) {
- printf( "Could not open file %s\n", data_file );
+ HDprintf( "Could not open file %s\n", data_file );
return -1;
}
@@ -4892,7 +4892,7 @@ static int read_data( const char* fname,
*buf = (float*) HDmalloc (nelms * sizeof( float ));
if ( *buf == NULL ) {
- printf( "memory allocation failed\n" );
+ HDprintf( "memory allocation failed\n" );
HDfclose(f);
return -1;
}
@@ -4930,7 +4930,7 @@ static int test_errors2(void)
int nscales; /* number of scales in DIM */
int count; /* visitor data */
- printf("Testing parameter errors\n");
+ HDprintf("Testing parameter errors\n");
/*-------------------------------------------------------------------------
* create a file, a dataset, scales
diff --git a/hl/test/test_file_image.c b/hl/test/test_file_image.c
index 6cf39e8..fd2d0d2 100644
--- a/hl/test/test_file_image.c
+++ b/hl/test/test_file_image.c
@@ -112,7 +112,7 @@ test_file_image(size_t open_images, size_t nflags, unsigned *flags)
filename[i] = (char *)HDmalloc(sizeof(char) * 32);
/* create file name */
- sprintf(filename[i], "image_file%d.h5", (int)i);
+ HDsprintf(filename[i], "image_file%d.h5", (int)i);
/* create file */
if ((file_id[i] = H5Fcreate(filename[i], H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
@@ -536,11 +536,11 @@ int main( void )
nerrors += test_file_image(open_images, nflags, flags) < 0? 1 : 0;
if (nerrors) goto error;
- printf("File image tests passed.\n");
+ HDprintf("File image tests passed.\n");
return 0;
error:
- printf("***** %d IMAGE TEST%s FAILED! *****\n",nerrors, 1 == nerrors ? "" : "S");
+ HDprintf("***** %d IMAGE TEST%s FAILED! *****\n",nerrors, 1 == nerrors ? "" : "S");
return 1;
}
diff --git a/hl/test/test_image.c b/hl/test/test_image.c
index e350533..6d13419 100644
--- a/hl/test/test_image.c
+++ b/hl/test/test_image.c
@@ -75,11 +75,11 @@ int main(void)
nerrors += test_generate()<0 ?1:0;
if (nerrors) goto error;
- printf("All image tests passed.\n");
+ HDprintf("All image tests passed.\n");
return 0;
error:
- printf("***** %d IMAGE TEST%s FAILED! *****\n",nerrors, 1 == nerrors ? "" : "S");
+ HDprintf("***** %d IMAGE TEST%s FAILED! *****\n",nerrors, 1 == nerrors ? "" : "S");
return 1;
}
@@ -344,7 +344,7 @@ static int test_data(void)
if ((fid=H5Fcreate(FILE2,H5F_ACC_TRUNC,H5P_DEFAULT,H5P_DEFAULT))<0)
goto out;
- printf("Testing read ascii image data and generate images\n");
+ HDprintf("Testing read ascii image data and generate images\n");
/*-------------------------------------------------------------------------
* read 8bit image data
@@ -551,7 +551,7 @@ static int test_generate(void)
if ((fid=H5Fcreate(FILE3,H5F_ACC_TRUNC,H5P_DEFAULT,H5P_DEFAULT))<0)
goto out;
- printf("Testing read and process data and make indexed images\n");
+ HDprintf("Testing read and process data and make indexed images\n");
/*-------------------------------------------------------------------------
* read data; the file data format is described below
@@ -561,7 +561,7 @@ static int test_generate(void)
f = HDfopen( data_file, "r" ) ;
if ( f == NULL )
{
- printf( "Could not find file %s. Try set $srcdir \n", data_file );
+ HDprintf( "Could not find file %s. Try set $srcdir \n", data_file );
goto out;
}
@@ -602,11 +602,11 @@ static int test_generate(void)
if(fscanf( f, "%d %d %d", &imax, &jmax, &kmax ) < 0 && HDferror(f)) {
- printf( "fscanf error in file %s.\n", data_file );
+ HDprintf( "fscanf error in file %s.\n", data_file );
goto out;
} /* end if */
if(fscanf( f, "%f %f %f", &valex, &xmin, &xmax ) < 0 && HDferror(f)) {
- printf( "fscanf error in file %s.\n", data_file );
+ HDprintf( "fscanf error in file %s.\n", data_file );
goto out;
} /* end if */
@@ -638,7 +638,7 @@ static int test_generate(void)
for ( i = 0; i < n_elements; i++ )
{
if(fscanf( f, "%f ", &value ) < 0 && HDferror(f)) {
- printf( "fscanf error in file %s.\n", data_file );
+ HDprintf( "fscanf error in file %s.\n", data_file );
goto out;
} /* end if */
data[i] = value;
@@ -797,37 +797,37 @@ static int read_data(const char* fname, /*IN*/
*/
if(NULL == (f = HDfopen(data_file, "r"))) {
- printf( "Could not open file %s. Try set $srcdir \n", data_file );
+ HDprintf( "Could not open file %s. Try set $srcdir \n", data_file );
goto out;
}
if(fscanf(f, "%s", str) < 0 && HDferror(f)) {
- printf( "fscanf error in file %s.\n", data_file );
+ HDprintf( "fscanf error in file %s.\n", data_file );
goto out;
} /* end if */
if(fscanf(f, "%d", &color_planes) < 0 && HDferror(f)) {
- printf( "fscanf error in file %s.\n", data_file );
+ HDprintf( "fscanf error in file %s.\n", data_file );
goto out;
} /* end if */
if(fscanf(f, "%s", str) < 0 && HDferror(f)) {
- printf( "fscanf error in file %s.\n", data_file );
+ HDprintf( "fscanf error in file %s.\n", data_file );
goto out;
} /* end if */
if(fscanf(f, "%d", &h) < 0 && HDferror(f)) {
- printf( "fscanf error in file %s.\n", data_file );
+ HDprintf( "fscanf error in file %s.\n", data_file );
goto out;
} /* end if */
if(fscanf(f, "%s", str) < 0 && HDferror(f)) {
- printf( "fscanf error in file %s.\n", data_file );
+ HDprintf( "fscanf error in file %s.\n", data_file );
goto out;
} /* end if */
if(fscanf(f, "%d", &w) < 0 && HDferror(f)) {
- printf( "fscanf error in file %s.\n", data_file );
+ HDprintf( "fscanf error in file %s.\n", data_file );
goto out;
} /* end if */
@@ -861,7 +861,7 @@ static int read_data(const char* fname, /*IN*/
/* Read data elements */
for(i = 0; i < n_elements; i++) {
if(fscanf(f, "%d", &n) < 0 && HDferror(f)) {
- printf( "fscanf error in file %s.\n", data_file );
+ HDprintf( "fscanf error in file %s.\n", data_file );
goto out;
} /* end if */
image_data[i] = (unsigned char)n;
@@ -917,7 +917,7 @@ static int read_palette(const char* fname,
/* open the input file */
if (!(file = HDfopen(data_file, "r")))
{
- printf( "Could not open file %s. Try set $srcdir \n", data_file );
+ HDprintf( "Could not open file %s. Try set $srcdir \n", data_file );
return -1;
}
diff --git a/hl/test/test_packet_vlen.c b/hl/test/test_packet_vlen.c
index 4d83ae6..b41e73d 100644
--- a/hl/test/test_packet_vlen.c
+++ b/hl/test/test_packet_vlen.c
@@ -69,7 +69,7 @@ static int test_VLof_atomic(void)
for (uu = 0; uu < NRECORDS; uu++) {
writeBuf[uu].p = HDmalloc((uu + 1) * sizeof(unsigned int));
if (writeBuf[uu].p == NULL) {
- fprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
+ HDfprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
goto error;
}
writeBuf[uu].len = uu + 1;
@@ -108,7 +108,7 @@ static int test_VLof_atomic(void)
if (ret < 0)
goto error;
- sprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -120,8 +120,8 @@ static int test_VLof_atomic(void)
for (vv = 0; vv < (uu + 1); vv++)
{
if (((unsigned int *)readBuf[uu].p)[vv] != ((unsigned int *)writeBuf[uu].p)[vv]) {
- printf("Packet %u's value should be %d\n", uu, ((unsigned int *)writeBuf[uu].p)[vv]);
- printf("Packet %u's value in readBuf is %d\n", uu, ((unsigned int *)readBuf[uu].p)[vv]);
+ HDprintf("Packet %u's value should be %d\n", uu, ((unsigned int *)writeBuf[uu].p)[vv]);
+ HDprintf("Packet %u's value in readBuf is %d\n", uu, ((unsigned int *)readBuf[uu].p)[vv]);
}
}
@@ -186,7 +186,7 @@ static int test_VLof_comptype(void)
for (uu = 0; uu < NRECORDS; uu++) {
writeBuf[uu].p = HDmalloc((uu + 1) * sizeof(VLcomp_t));
if(writeBuf[uu].p == NULL) {
- fprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
+ HDfprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
goto error;
}
writeBuf[uu].len = uu + 1;
@@ -242,7 +242,7 @@ static int test_VLof_comptype(void)
if (ret < 0)
goto error;
- sprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -253,13 +253,13 @@ static int test_VLof_comptype(void)
/* Compare data read in */
for (uu = 0; uu < NRECORDS; uu++) {
if (writeBuf[uu].len != readBuf[uu].len) {
- fprintf(stderr, "%d: VL data length don't match!, writeBuf[%u].len=%d, readBuf[%u].len=%d\n", __LINE__, uu, (int)writeBuf[uu].len, uu, (int)readBuf[uu].len);
+ HDfprintf(stderr, "%d: VL data length don't match!, writeBuf[%u].len=%d, readBuf[%u].len=%d\n", __LINE__, uu, (int)writeBuf[uu].len, uu, (int)readBuf[uu].len);
continue;
} /* write len != read len */
for (vv = 0; vv < (uu + 1); vv++) {
if (((unsigned int *)writeBuf[uu].p)[vv] != ((unsigned int *)readBuf[uu].p)[vv] ) {
- fprintf(stderr, "VL data values don't match!, writeBuf[uu].p[%d]=%d, readBuf[uu].p[%d]=%d\n", vv, (int)((unsigned int *)writeBuf[uu].p)[vv], vv, (int)((unsigned int *)readBuf[uu].p)[vv]);
+ HDfprintf(stderr, "VL data values don't match!, writeBuf[uu].p[%d]=%d, readBuf[uu].p[%d]=%d\n", vv, (int)((unsigned int *)writeBuf[uu].p)[vv], vv, (int)((unsigned int *)readBuf[uu].p)[vv]);
continue;
} /* write value != read value */
}
@@ -334,7 +334,7 @@ static int test_compound_VL_VLtype(void)
writeBuf[uu].f = (float)(uu * 20) / 3.0F;
writeBuf[uu].v.p = HDmalloc((uu + L1_INCM) * sizeof(hvl_t));
if (writeBuf[uu].v.p == NULL) {
- fprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
+ HDfprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
goto error;
}
writeBuf[uu].v.len = uu + L1_INCM;
@@ -342,7 +342,7 @@ static int test_compound_VL_VLtype(void)
{
t1->p = HDmalloc((vv + L2_INCM) * sizeof(unsigned int));
if (t1->p == NULL) {
- fprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
+ HDfprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
goto error;
}
t1->len = vv + L2_INCM;
@@ -407,7 +407,7 @@ static int test_compound_VL_VLtype(void)
if (ret < 0)
goto error;
- sprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -418,27 +418,27 @@ static int test_compound_VL_VLtype(void)
/* Compare data read in */
for (uu = 0; uu < NRECORDS; uu++) {
if (writeBuf[uu].u != readBuf[uu].u) {
- fprintf(stderr, "Integer components don't match!, writeBuf[%u].u=%u, readBuf[%u].u=%u\n", uu, writeBuf[uu].u, uu, readBuf[uu].u);
+ HDfprintf(stderr, "Integer components don't match!, writeBuf[%u].u=%u, readBuf[%u].u=%u\n", uu, writeBuf[uu].u, uu, readBuf[uu].u);
continue;
} /* end if */
if (!H5_FLT_ABS_EQUAL(writeBuf[uu].f,readBuf[uu].f)) {
- fprintf(stderr, "Float components don't match!, writeBuf[%u].f=%f, readBuf[%u].f=%f\n", uu, (double)writeBuf[uu].f, uu, (double)readBuf[uu].f);
+ HDfprintf(stderr, "Float components don't match!, writeBuf[%u].f=%f, readBuf[%u].f=%f\n", uu, (double)writeBuf[uu].f, uu, (double)readBuf[uu].f);
continue;
} /* end if */
if (writeBuf[uu].v.len != readBuf[uu].v.len) {
- fprintf(stderr, "%d: VL data length don't match!, writeBuf[%d].v.len=%zu, readBuf[%d].v.len=%zu\n", __LINE__, uu, writeBuf[uu].v.len, uu, readBuf[uu].v.len);
+ HDfprintf(stderr, "%d: VL data length don't match!, writeBuf[%d].v.len=%zu, readBuf[%d].v.len=%zu\n", __LINE__, uu, writeBuf[uu].v.len, uu, readBuf[uu].v.len);
continue;
} /* end if */
for (t1 = (hvl_t *)(writeBuf[uu].v.p), t2 = (hvl_t *)(readBuf[uu].v.p), vv = 0; (size_t)vv < readBuf[uu].v.len; vv++, t1++, t2++) {
if (t1->len != t2->len) {
- fprintf(stderr, "%d: VL data length don't match!, uu=%u, vv=%u, t1->len=%zu, t2->len=%zu\n", __LINE__, uu, vv, t1->len, t2->len);
+ HDfprintf(stderr, "%d: VL data length don't match!, uu=%u, vv=%u, t1->len=%zu, t2->len=%zu\n", __LINE__, uu, vv, t1->len, t2->len);
continue;
} /* end if */
for (ww = 0; (size_t)ww < t2->len; ww++) {
if (((unsigned int *)t1->p)[ww] != ((unsigned int *)t2->p)[ww] ) {
- fprintf(stderr, "VL data values don't match!, t1->p[%u]=%u, t2->p[%u]=%u\n", ww, ((unsigned int *)t1->p)[ww], ww, ((unsigned int *)t2->p)[ww]);
+ HDfprintf(stderr, "VL data values don't match!, t1->p[%u]=%u, t2->p[%u]=%u\n", ww, ((unsigned int *)t1->p)[ww], ww, ((unsigned int *)t2->p)[ww]);
continue;
} /* end if */
} /* end for */
@@ -505,7 +505,7 @@ static int test_VLof_VLtype(void)
for (uu = 0; uu < NRECORDS; uu++) {
writeBuf[uu].p = HDmalloc((uu + 1) * sizeof(hvl_t));
if (writeBuf[uu].p == NULL) {
- fprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
+ HDfprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
goto error;
} /* end if */
writeBuf[uu].len = uu + 1;
@@ -513,7 +513,7 @@ static int test_VLof_VLtype(void)
{
t1->p = HDmalloc((vv + 1) * sizeof(unsigned int));
if (t1->p == NULL) {
- fprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
+ HDfprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
goto error;
}
t1->len = vv * 1;
@@ -559,7 +559,7 @@ static int test_VLof_VLtype(void)
if (ret < 0)
goto error;
- sprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -628,7 +628,7 @@ static int verify_ptlengthtype(hid_t fid, const char *table_name, herr_t expecte
HDstrcpy(lenthtype, "fixed-length");
if (expected_value == 1)
HDstrcpy(lenthtype, "variable-length");
- fprintf(stderr, "\nPacket table '%s' should be %s but is not\n", table_name, lenthtype);
+ HDfprintf(stderr, "\nPacket table '%s' should be %s but is not\n", table_name, lenthtype);
ret = FAIL;
}
@@ -1062,7 +1062,7 @@ static int testfl_VLof_atomic(void)
for (uu = 0; uu < NRECORDS; uu++) {
writeBuf[uu].p = HDmalloc((uu + 1) * sizeof(unsigned int));
if (writeBuf[uu].p == NULL) {
- fprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
+ HDfprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
goto error;
}
writeBuf[uu].len = uu + 1;
@@ -1101,7 +1101,7 @@ static int testfl_VLof_atomic(void)
if (ret < 0)
goto error;
- sprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -1113,8 +1113,8 @@ static int testfl_VLof_atomic(void)
for (vv = 0; vv < (uu + 1); vv++)
{
if (((unsigned int *)readBuf[uu].p)[vv] != ((unsigned int *)writeBuf[uu].p)[vv]) {
- printf("Packet %d's value should be %d\n", uu, ((unsigned int *)writeBuf[uu].p)[vv]);
- printf("Packet %d's value in readBuf is %d\n", uu, ((unsigned int *)readBuf[uu].p)[vv]);
+ HDprintf("Packet %d's value should be %d\n", uu, ((unsigned int *)writeBuf[uu].p)[vv]);
+ HDprintf("Packet %d's value in readBuf is %d\n", uu, ((unsigned int *)readBuf[uu].p)[vv]);
}
}
@@ -1179,7 +1179,7 @@ static int testfl_VLof_comptype(void)
for (uu = 0; uu < NRECORDS; uu++) {
writeBuf[uu].p = HDmalloc((uu + 1) * sizeof(VLcomp_t));
if(writeBuf[uu].p == NULL) {
- fprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
+ HDfprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
goto error;
}
writeBuf[uu].len = uu + 1;
@@ -1235,7 +1235,7 @@ static int testfl_VLof_comptype(void)
if (ret < 0)
goto error;
- sprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -1246,13 +1246,13 @@ static int testfl_VLof_comptype(void)
/* Compare data read in */
for (uu = 0; uu < NRECORDS; uu++) {
if (writeBuf[uu].len != readBuf[uu].len) {
- fprintf(stderr, "%d: VL data length don't match!, writeBuf[%u].len=%zu, readBuf[%u].len=%zu\n",__LINE__, uu, writeBuf[uu].len, uu, readBuf[uu].len);
+ HDfprintf(stderr, "%d: VL data length don't match!, writeBuf[%u].len=%zu, readBuf[%u].len=%zu\n",__LINE__, uu, writeBuf[uu].len, uu, readBuf[uu].len);
continue;
} /* write len != read len */
for (vv = 0; vv < (uu + 1); vv++) {
if (((unsigned int *)writeBuf[uu].p)[vv] != ((unsigned int *)readBuf[uu].p)[vv] ) {
- fprintf(stderr, "VL data values don't match!, writeBuf[uu].p[%u]=%u, readBuf[uu].p[%u]=%u\n", vv, ((unsigned int *)writeBuf[uu].p)[vv], vv, ((unsigned int *)readBuf[uu].p)[vv]);
+ HDfprintf(stderr, "VL data values don't match!, writeBuf[uu].p[%u]=%u, readBuf[uu].p[%u]=%u\n", vv, ((unsigned int *)writeBuf[uu].p)[vv], vv, ((unsigned int *)readBuf[uu].p)[vv]);
continue;
} /* write value != read value */
}
@@ -1327,7 +1327,7 @@ static int testfl_compound_VL_VLtype(void)
writeBuf[uu].f = (float)(uu * 20) / 3.0F;
writeBuf[uu].v.p = HDmalloc((uu + L1_INCM) * sizeof(hvl_t));
if (writeBuf[uu].v.p == NULL) {
- fprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
+ HDfprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
goto error;
}
writeBuf[uu].v.len = uu + L1_INCM;
@@ -1335,7 +1335,7 @@ static int testfl_compound_VL_VLtype(void)
{
t1->p = HDmalloc((vv + L2_INCM) * sizeof(unsigned int));
if (t1->p == NULL) {
- fprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
+ HDfprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
goto error;
}
t1->len = vv + L2_INCM;
@@ -1400,7 +1400,7 @@ static int testfl_compound_VL_VLtype(void)
if (ret < 0)
goto error;
- sprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -1411,27 +1411,27 @@ static int testfl_compound_VL_VLtype(void)
/* Compare data read in */
for (uu = 0; uu < NRECORDS; uu++) {
if (writeBuf[uu].u != readBuf[uu].u) {
- fprintf(stderr, "Integer components don't match!, writeBuf[%u].u=%u, readBuf[%u].u=%u\n", uu, writeBuf[uu].u, uu, readBuf[uu].u);
+ HDfprintf(stderr, "Integer components don't match!, writeBuf[%u].u=%u, readBuf[%u].u=%u\n", uu, writeBuf[uu].u, uu, readBuf[uu].u);
continue;
} /* end if */
if (!H5_FLT_ABS_EQUAL(writeBuf[uu].f, readBuf[uu].f)) {
- fprintf(stderr, "Float components don't match!, writeBuf[%u].f=%f, readBuf[%u].f=%f\n", uu, (double)writeBuf[uu].f, uu, (double)readBuf[uu].f);
+ HDfprintf(stderr, "Float components don't match!, writeBuf[%u].f=%f, readBuf[%u].f=%f\n", uu, (double)writeBuf[uu].f, uu, (double)readBuf[uu].f);
continue;
} /* end if */
if (writeBuf[uu].v.len != readBuf[uu].v.len) {
- fprintf(stderr, "%d: VL data length don't match!, writeBuf[%u].v.len=%zu, readBuf[%u].v.len=%zu\n", __LINE__, uu, writeBuf[uu].v.len, uu, readBuf[uu].v.len);
+ HDfprintf(stderr, "%d: VL data length don't match!, writeBuf[%u].v.len=%zu, readBuf[%u].v.len=%zu\n", __LINE__, uu, writeBuf[uu].v.len, uu, readBuf[uu].v.len);
continue;
} /* end if */
for (t1 = (hvl_t *)(writeBuf[uu].v.p), t2 = (hvl_t *)(readBuf[uu].v.p), vv = 0; (size_t)vv < readBuf[uu].v.len; vv++, t1++, t2++) {
if (t1->len != t2->len) {
- fprintf(stderr, "%d: VL data length don't match!, uu=%u, vv=%u, t1->len=%zu, t2->len=%zu\n", __LINE__, uu, vv, t1->len, t2->len);
+ HDfprintf(stderr, "%d: VL data length don't match!, uu=%u, vv=%u, t1->len=%zu, t2->len=%zu\n", __LINE__, uu, vv, t1->len, t2->len);
continue;
} /* end if */
for (ww = 0; (size_t)ww < t2->len; ww++) {
if (((unsigned int *)t1->p)[ww] != ((unsigned int *)t2->p)[ww] ) {
- fprintf(stderr, "VL data values don't match!, t1->p[%u]=%u, t2->p[%u]=%u\n", ww, ((unsigned int *)t1->p)[ww], ww, ((unsigned int *)t2->p)[ww]);
+ HDfprintf(stderr, "VL data values don't match!, t1->p[%u]=%u, t2->p[%u]=%u\n", ww, ((unsigned int *)t1->p)[ww], ww, ((unsigned int *)t2->p)[ww]);
continue;
} /* end if */
} /* end for */
@@ -1498,7 +1498,7 @@ static int testfl_VLof_VLtype(void)
for (uu = 0; uu < NRECORDS; uu++) {
writeBuf[uu].p = HDmalloc((uu + 1) * sizeof(hvl_t));
if (writeBuf[uu].p == NULL) {
- fprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
+ HDfprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
goto error;
} /* end if */
writeBuf[uu].len = uu + 1;
@@ -1506,7 +1506,7 @@ static int testfl_VLof_VLtype(void)
{
t1->p = HDmalloc((vv + 1) * sizeof(unsigned int));
if (t1->p == NULL) {
- fprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
+ HDfprintf(stderr, "Cannot allocate memory for VL data! uu=%u\n", uu);
goto error;
}
t1->len = vv + 1;
@@ -1552,7 +1552,7 @@ static int testfl_VLof_VLtype(void)
if (ret < 0)
goto error;
- sprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
diff --git a/test/atomic_reader.c b/test/atomic_reader.c
index 3e3a20a..5c1241f 100644
--- a/test/atomic_reader.c
+++ b/test/atomic_reader.c
@@ -55,7 +55,7 @@ int verify(int fd, unsigned int k);
void print_info(int *info, unsigned int lastr, unsigned iteration);
-
+
/*-------------------------------------------------------------------------
* Function: usage
*
@@ -63,7 +63,7 @@ void print_info(int *info, unsigned int lastr, unsigned iteration);
*
* Parameters: None
*
- * Return: void
+ * Return: void
*
*-------------------------------------------------------------------------
*/
@@ -78,13 +78,13 @@ usage(void)
printf("\n");
} /* usage() */
-
+
/*-------------------------------------------------------------------------
* Function: verify
*
* Purpose: To verify that the data read is the pattern expected.
* Each integer read should be the same as the index.
- * When a difference is encountered, the remaining integers
+ * When a difference is encountered, the remaining integers
* read should be the same as the previous index.
* For example, the pattern expected should be either:
* a) 01234567....n-1
@@ -93,11 +93,11 @@ usage(void)
* the remaining integers should be all "3"s as:
* 012333333333333
*
- * Parameters:
+ * Parameters:
* fd -- the file descriptor
* k -- the number of integers to read
*
- * Return:
+ * Return:
* positive on success
* negative on failure
*
@@ -106,9 +106,9 @@ usage(void)
int
verify(int fd, unsigned int k)
{
- unsigned int i; /* local index variable */
- ssize_t bytes_read; /* the number of bytes read */
- unsigned int *buf = NULL; /* buffer to hold data read */
+ unsigned int i; /* local index variable */
+ ssize_t bytes_read; /* the number of bytes read */
+ unsigned int *buf = NULL; /* buffer to hold data read */
/* Allocate buffer for data read */
if((buf = (unsigned int *)malloc(k * sizeof(unsigned int))) == NULL) {
@@ -165,13 +165,13 @@ error:
} /* end verify() */
-
+
/*-------------------------------------------------------------------------
* Function: print_info
*
* Purpose: To print the statistics gathered for re-reads
*
- * Parameters:
+ * Parameters:
* info -- the array storing the statistics for re-reads
* lastr -- the last read completed
* iteration -- the current iteration
@@ -183,7 +183,7 @@ error:
void
print_info(int *info, unsigned int lastr, unsigned iteration)
{
- unsigned j; /* local index variable */
+ unsigned j; /* local index variable */
printf("--------statistics for %u reads (iteration %u)--------\n", lastr, iteration);
@@ -193,13 +193,13 @@ print_info(int *info, unsigned int lastr, unsigned iteration)
printf("--------end statistics for %u reads (iteration %u)--------\n", lastr, iteration);
} /* print_info() */
-
+
/*-------------------------------------------------------------------------
* Function: main
*
* Purpose: To verify that the data read is the pattern expected.
- * (1) Make sure the file opens successfully and the # of bytes read is as expected
- * (2) Iterate the reader with i iterations
+ * (1) Make sure the file opens successfully and the # of bytes read is as expected
+ * (2) Iterate the reader with i iterations
* (3) Read and verify n integers for each iteration
* (4) On verification error, re-read the data at most READ_TRIES
* times to see if correct data can be obtained
diff --git a/test/atomic_writer.c b/test/atomic_writer.c
index 218d4da..b9ea03a 100644
--- a/test/atomic_writer.c
+++ b/test/atomic_writer.c
@@ -14,7 +14,7 @@
*
* Created: atomic_writer.c
*
- * Purpose: This is the "writer" part of the standalone test to check
+ * Purpose: This is the "writer" part of the standalone test to check
* atomic read-write operation on a system.
* a) atomic_writer.c--the writer (this file)
* b) atomic_reader.c--the reader
@@ -53,7 +53,7 @@
static void usage(void);
-
+
/*-------------------------------------------------------------------------
* Function: usage
*
@@ -77,12 +77,12 @@ usage(void)
} /* usage() */
-
+
/*-------------------------------------------------------------------------
* Function: main
*
* Purpose: To write a series of integers to a file for the reader to verify the data.
- * A write is atomic if the whole amount written in one operation is not interleaved
+ * A write is atomic if the whole amount written in one operation is not interleaved
* with data from any other process.
* (1) Iterate with i iterations
* (2) Write a series of integers (0 to n-1) to the file with this pattern:
@@ -196,7 +196,7 @@ main(int argc, char *argv[])
printf("WRITER: error from lseek\n");
goto error;
} /* end if */
-
+
/* Write the data */
if((bytes_wrote = write(fd, buf, ((num-n) * sizeof(unsigned int)))) < 0) {
printf("WRITER: error from write\n");
diff --git a/test/cmpd_dset.c b/test/cmpd_dset.c
index eef2070..ff3767c 100644
--- a/test/cmpd_dset.c
+++ b/test/cmpd_dset.c
@@ -2217,7 +2217,7 @@ main (int argc, char *argv[])
if (argc>1) {
if (argc>2 || strcmp("--noopt", argv[1])) {
HDfprintf(stderr, "usage: %s [--noopt]\n", argv[0]);
- exit(EXIT_FAILURE);
+ HDexit(EXIT_FAILURE);
}
H5Tunregister(H5T_PERS_DONTCARE, NULL, (hid_t)-1, (hid_t)-1, (H5T_conv_t)((void (*) (void))H5T__conv_struct_opt));
}
diff --git a/test/istore.c b/test/istore.c
index 7489b7e..8759be3 100644
--- a/test/istore.c
+++ b/test/istore.c
@@ -338,7 +338,7 @@ test_extend(hid_t f, const char *prefix,
HDfprintf(stderr,",%lu", (unsigned long)size[1]);
if (ndims > 2)
HDfprintf(stderr,",%lu", (unsigned long)size[2]);
- HD fprintf(stderr,"), %lu element%s", (unsigned long)nelmts, 1 == nelmts ? "" : "s");
+ HDfprintf(stderr,"), %lu element%s", (unsigned long)nelmts, 1 == nelmts ? "" : "s");
if (0 == nelmts)
HDfprintf(stderr," *SKIPPED*");
HDfprintf(stderr,"\n");
diff --git a/test/tattr.c b/test/tattr.c
index 50704c6..7c2b5ce 100644
--- a/test/tattr.c
+++ b/test/tattr.c
@@ -11163,17 +11163,18 @@ test_attr(void)
{
hid_t fapl = (-1), fapl2 = (-1); /* File access property lists */
hid_t fcpl = (-1), fcpl2 = (-1); /* File creation property lists */
- hid_t dcpl = -1;
- unsigned new_format; /* Whether to use the new format or not */
- unsigned use_shared; /* Whether to use shared attributes or not */
- unsigned minimize_dset_oh; /* Whether to use minimized dataset object headers */
- herr_t ret; /* Generic return value */
+ hid_t dcpl = -1; /* Dataset creation property list */
+ unsigned new_format; /* Whether to use the new format or not */
+ unsigned use_shared; /* Whether to use shared attributes or not */
+ unsigned minimize_dset_oh; /* Whether to use minimized dataset object headers */
+ herr_t ret; /* Generic return value */
MESSAGE(5, ("Testing Attributes\n"));
fapl = H5Pcreate(H5P_FILE_ACCESS);
CHECK(fapl, FAIL, "H5Pcreate");
+ /* fapl2 uses "latest version of the format" for creating objects in the file */
fapl2 = H5Pcopy(fapl);
CHECK(fapl2, FAIL, "H5Pcopy");
ret = H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
@@ -11207,6 +11208,7 @@ test_attr(void)
for(new_format = FALSE; new_format <= TRUE; new_format++) {
hid_t my_fapl;
+ /* Set the FAPL for the type of format */
if(new_format) {
MESSAGE(7, ("testing with new file format\n"));
my_fapl = fapl2;
@@ -11262,8 +11264,7 @@ test_attr(void)
test_attr_rename_invalid_name(my_fcpl, my_fapl); /* Test passing a NULL or empty attribute name to H5Arename(_by_name) */
test_attr_get_name_invalid_buf(my_fcpl, my_fapl); /* Test passing NULL buffer to H5Aget_name(_by_idx) */
- /* New attribute API routine tests
- */
+ /* New attribute API routine tests */
test_attr_info_by_idx(new_format, my_fcpl, my_fapl); /* Test querying attribute info by index */
test_attr_delete_by_idx(new_format, my_fcpl, my_fapl); /* Test deleting attribute by index */
test_attr_iterate2(new_format, my_fcpl, my_fapl); /* Test iterating over attributes by index */
@@ -11271,8 +11272,7 @@ test_attr(void)
test_attr_open_by_name(new_format, my_fcpl, my_fapl); /* Test opening attributes by name */
test_attr_create_by_name(new_format, my_fcpl, my_fapl); /* Test creating attributes by name */
- /* Tests that address specific bugs
- */
+ /* Tests that address specific bugs */
test_attr_bug1(my_fcpl, my_fapl); /* Test odd allocation operations */
test_attr_bug2(my_fcpl, my_fapl); /* Test many deleted attributes */
test_attr_bug3(my_fcpl, my_fapl); /* Test "self referential" attributes */
@@ -11285,8 +11285,7 @@ test_attr(void)
test_attr_bug8(my_fcpl, my_fapl); /* Test attribute expanding object header with undecoded messages */
test_attr_bug9(my_fcpl, my_fapl); /* Test large attributes converting to dense storage */
- /* tests specific to the "new format"
- */
+ /* tests specific to the "new format" */
if (new_format == TRUE) {
/* General attribute tests */
test_attr_dense_create(my_fcpl, my_fapl); /* Test dense attribute storage creation */
@@ -11297,8 +11296,7 @@ test_attr(void)
test_attr_dense_limits(my_fcpl, my_fapl); /* Test dense attribute storage limits */
test_attr_dense_dup_ids(my_fcpl, my_fapl); /* Test duplicated IDs for dense attribute storage */
- /* Attribute creation order tests
- */
+ /* Attribute creation order tests */
test_attr_corder_create_basic(my_fcpl, my_fapl);/* Test creating an object w/attribute creation order info */
test_attr_corder_create_compact(my_fcpl, my_fapl); /* Test compact attribute storage on an object w/attribute creation order info */
test_attr_corder_create_dense(my_fcpl, my_fapl);/* Test dense attribute storage on an object w/attribute creation order info */
@@ -11306,8 +11304,7 @@ test_attr(void)
test_attr_corder_transition(my_fcpl, my_fapl); /* Test attribute storage transitions on an object w/attribute creation order info */
test_attr_corder_delete(my_fcpl, my_fapl); /* Test deleting object using dense storage w/attribute creation order info */
- /* More complex tests with exclusively both "new format" and "shared" attributes
- */
+ /* More complex tests with exclusively both "new format" and "shared" attributes */
if(use_shared == TRUE) {
test_attr_shared_write(my_fcpl, my_fapl); /* Test writing to shared attributes in compact & dense storage */
test_attr_shared_rename(my_fcpl, my_fapl); /* Test renaming shared attributes in compact & dense storage */
@@ -11365,6 +11362,6 @@ test_attr(void)
void
cleanup_attr(void)
{
- remove(FILENAME);
+ HDremove(FILENAME);
}
diff --git a/test/testframe.c b/test/testframe.c
index a096f36..68c66ec 100644
--- a/test/testframe.c
+++ b/test/testframe.c
@@ -42,10 +42,10 @@ typedef struct TestStruct {
* Variables used by testing framework.
*/
static int enable_error_stack = 0; /* enable error stack; disable=0 enable=1 */
-static int num_errs = 0; /* Total number of errors during testing */
+static int num_errs = 0; /* Total number of errors during testing */
int TestVerbosity = VERBO_DEF; /* Default Verbosity is Low */
-static int Summary = 0; /* Show test summary. Default is no. */
-static int CleanUp = 1; /* Do cleanup or not. Default is yes. */
+static int Summary = 0; /* Show test summary. Default is no. */
+static int CleanUp = 1; /* Do cleanup or not. Default is yes. */
static int TestExpress = -1; /* Do TestExpress or not. -1 means not set yet. */
static TestStruct *Test = NULL; /* Array of tests */
static unsigned TestAlloc = 0; /* Size of the Test array */
@@ -74,14 +74,12 @@ AddTest(const char *TheName, void (*TheCall) (void), void (*Cleanup) (void), con
{
/* Sanity checking */
if (HDstrlen(TheDescr) >= MAXTESTDESC) {
- HDprintf("Test description ('%s') too long, increase MAXTESTDESC(%d).\n",
- TheDescr, MAXTESTDESC);
- exit(EXIT_FAILURE);
+ HDprintf("Test description ('%s') too long, increase MAXTESTDESC(%d).\n", TheDescr, MAXTESTDESC);
+ HDexit(EXIT_FAILURE);
} /* end if */
if (HDstrlen(TheName) >= MAXTESTNAME) {
- HDprintf("Test name too long, increase MAXTESTNAME(%d).\n",
- MAXTESTNAME);
- exit(EXIT_FAILURE);
+ HDprintf("Test name too long, increase MAXTESTNAME(%d).\n", MAXTESTNAME);
+ HDexit(EXIT_FAILURE);
} /* end if */
/* Check for increasing the Test array size */
@@ -167,7 +165,7 @@ void TestUsage(void)
unsigned i;
print_func("Usage: %s [-v[erbose] (l[ow]|m[edium]|h[igh]|0-9)] %s\n",
- TestProgName, (TestPrivateUsage ? "<extra options>" : ""));
+ TestProgName, (TestPrivateUsage ? "<extra options>" : ""));
print_func(" [-[e]x[clude] name]+ \n");
print_func(" [-o[nly] name]+ \n");
print_func(" [-b[egin] name] \n");
@@ -228,78 +226,80 @@ void TestParseCmdLine(int argc, char *argv[])
int ret_code;
while (argv++, --argc > 0){
- if ((HDstrcmp(*argv, "-verbose") == 0) ||
- (HDstrcmp(*argv, "-v") == 0)) {
- if (argc > 0){
- --argc; ++argv;
- ParseTestVerbosity(*argv);
- }else{
- TestUsage();
- exit(EXIT_FAILURE);
+ if ((HDstrcmp(*argv, "-verbose") == 0) || (HDstrcmp(*argv, "-v") == 0)) {
+ if (argc > 0){
+ --argc; ++argv;
+ ParseTestVerbosity(*argv);
+ }
+ else{
+ TestUsage();
+ HDexit(EXIT_FAILURE);
+ }
}
- }
- else if (((HDstrcmp(*argv, "-exclude") == 0) ||
- (HDstrcmp(*argv, "-x") == 0))) {
- if (argc > 0){
- --argc; ++argv;
- SetTest(*argv, SKIPTEST);
- }else{
- TestUsage();
- exit(EXIT_FAILURE);
+ else if (((HDstrcmp(*argv, "-exclude") == 0) ||
+ (HDstrcmp(*argv, "-x") == 0))) {
+ if (argc > 0){
+ --argc; ++argv;
+ SetTest(*argv, SKIPTEST);
+ }
+ else{
+ TestUsage();
+ HDexit(EXIT_FAILURE);
+ }
}
- }
- else if (((HDstrcmp(*argv, "-begin") == 0) ||
- (HDstrcmp(*argv, "-b") == 0))) {
- if (argc > 0){
- --argc; ++argv;
- SetTest(*argv, BEGINTEST);
- }else{
- TestUsage();
- exit(EXIT_FAILURE);
+ else if (((HDstrcmp(*argv, "-begin") == 0) ||
+ (HDstrcmp(*argv, "-b") == 0))) {
+ if (argc > 0){
+ --argc; ++argv;
+ SetTest(*argv, BEGINTEST);
+ }
+ else{
+ TestUsage();
+ HDexit(EXIT_FAILURE);
+ }
}
- }
- else if (((HDstrcmp(*argv, "-only") == 0) ||
- (HDstrcmp(*argv, "-o") == 0))) {
- if(argc > 0) {
- unsigned Loop;
+ else if (((HDstrcmp(*argv, "-only") == 0) ||
+ (HDstrcmp(*argv, "-o") == 0))) {
+ if(argc > 0) {
+ unsigned Loop;
- --argc; ++argv;
+ --argc; ++argv;
- /* Skip all tests, then activate only one. */
+ /* Skip all tests, then activate only one. */
if(!skipped_all) {
for(Loop = 0; Loop < Index; Loop++)
Test[Loop].SkipFlag = 1;
skipped_all = TRUE;
} /* end if */
- SetTest(*argv, ONLYTEST);
- } /* end if */
- else {
- TestUsage();
- exit(EXIT_FAILURE);
+ SetTest(*argv, ONLYTEST);
+ }
+ else{
+ TestUsage();
+ HDexit(EXIT_FAILURE);
+ }
+ }
+ else if ((HDstrcmp(*argv, "-summary") == 0) || (HDstrcmp(*argv, "-s") == 0))
+ Summary = 1;
+ else if (HDstrcmp(*argv, "-enable-error-stack") == 0)
+ enable_error_stack = 1;
+ else if ((HDstrcmp(*argv, "-help") == 0) || (HDstrcmp(*argv, "-h") == 0)) {
+ TestUsage();
+ HDexit(EXIT_SUCCESS);
+ }
+ else if ((HDstrcmp(*argv, "-cleanoff") == 0) || (HDstrcmp(*argv, "-c") == 0))
+ SetTestNoCleanup();
+ else {
+ /* non-standard option. Break out. */
+ break;
}
- }
- else if ((HDstrcmp(*argv, "-summary") == 0) || (HDstrcmp(*argv, "-s") == 0))
- Summary = 1;
- else if (HDstrcmp(*argv, "-enable-error-stack") == 0)
- enable_error_stack = 1;
- else if ((HDstrcmp(*argv, "-help") == 0) || (HDstrcmp(*argv, "-h") == 0)) {
- TestUsage();
- exit(EXIT_SUCCESS);
- }
- else if ((HDstrcmp(*argv, "-cleanoff") == 0) || (HDstrcmp(*argv, "-c") == 0))
- SetTestNoCleanup();
- else {
- /* non-standard option. Break out. */
- break;
- }
}
/* Call extra parsing function if provided. */
if (NULL != TestPrivateParser){
- ret_code=TestPrivateParser(argc+1, argv-1);
- if (ret_code != 0)
- exit(EXIT_FAILURE);
+ ret_code=TestPrivateParser(argc+1, argv-1);
+ if (ret_code != 0)
+ HDexit(EXIT_FAILURE);
}
}
@@ -318,10 +318,10 @@ void PerformTests(void)
MESSAGE(2, ("Testing -- %s (%s) \n", Test[Loop].Description, Test[Loop].Name));
MESSAGE(5, ("===============================================\n"));
Test[Loop].NumErrors = num_errs;
- Test_parameters = Test[Loop].Parameters;
- ALARM_ON;
+ Test_parameters = Test[Loop].Parameters;
+ ALARM_ON;
Test[Loop].Call();
- ALARM_OFF;
+ ALARM_OFF;
Test[Loop].NumErrors = num_errs - Test[Loop].NumErrors;
MESSAGE(5, ("===============================================\n"));
MESSAGE(5, ("There were %d errors detected.\n\n", (int)Test[Loop].NumErrors));
@@ -440,18 +440,18 @@ int GetTestExpress(void)
/* set it here for now. Should be done in something like h5test_init(). */
if(TestExpress==-1)
{
- env_val = getenv("HDF5TestExpress");
-
- if(env_val == NULL)
- SetTestExpress(1);
- else if(strcmp(env_val, "0") == 0)
- SetTestExpress(0);
- else if(strcmp(env_val, "1") == 0)
- SetTestExpress(1);
- else if(strcmp(env_val, "2") == 0)
- SetTestExpress(2);
- else
- SetTestExpress(3);
+ env_val = getenv("HDF5TestExpress");
+
+ if(env_val == NULL)
+ SetTestExpress(1);
+ else if(strcmp(env_val, "0") == 0)
+ SetTestExpress(0);
+ else if(strcmp(env_val, "1") == 0)
+ SetTestExpress(1);
+ else if(strcmp(env_val, "2") == 0)
+ SetTestExpress(2);
+ else
+ SetTestExpress(3);
}
return(TestExpress);
@@ -508,13 +508,13 @@ int SetTestNoCleanup(void)
void ParseTestVerbosity(char *argv)
{
if (*argv == 'l')
- SetTestVerbosity(VERBO_LO);
+ SetTestVerbosity(VERBO_LO);
else if (*argv == 'm')
- SetTestVerbosity(VERBO_MED);
+ SetTestVerbosity(VERBO_MED);
else if (*argv == 'h')
- SetTestVerbosity(VERBO_HI);
+ SetTestVerbosity(VERBO_HI);
else
- SetTestVerbosity(atoi(argv));
+ SetTestVerbosity(atoi(argv));
}
@@ -582,29 +582,29 @@ void SetTest(const char *testname, int action)
switch (action){
case SKIPTEST:
for (Loop = 0; Loop < Index; Loop++)
- if (HDstrcmp(testname, Test[Loop].Name) == 0){
- Test[Loop].SkipFlag = 1;
- break;
- }
+ if (HDstrcmp(testname, Test[Loop].Name) == 0){
+ Test[Loop].SkipFlag = 1;
+ break;
+ }
break;
case BEGINTEST:
for (Loop = 0; Loop < Index; Loop++) {
- if (HDstrcmp(testname, Test[Loop].Name) != 0)
- Test[Loop].SkipFlag = 1;
- else{
- /* Found it. Set it to run. Done. */
- Test[Loop].SkipFlag = 0;
- break;
- }
+ if (HDstrcmp(testname, Test[Loop].Name) != 0)
+ Test[Loop].SkipFlag = 1;
+ else{
+ /* Found it. Set it to run. Done. */
+ Test[Loop].SkipFlag = 0;
+ break;
+ }
}
break;
case ONLYTEST:
for (Loop = 0; Loop < Index; Loop++) {
- if (HDstrcmp(testname, Test[Loop].Name) == 0) {
- /* Found it. Set it to run. Break to skip the rest. */
- Test[Loop].SkipFlag = 0;
- break;
- }
+ if (HDstrcmp(testname, Test[Loop].Name) == 0) {
+ /* Found it. Set it to run. Break to skip the rest. */
+ Test[Loop].SkipFlag = 0;
+ break;
+ }
}
break;
default:
diff --git a/test/testhdf5.h b/test/testhdf5.h
index 1f3f6a0..ef3b784 100644
--- a/test/testhdf5.h
+++ b/test/testhdf5.h
@@ -177,53 +177,53 @@ extern "C" {
#endif
/* Prototypes for the test routines */
-void test_metadata(void);
-void test_checksum(void);
-void test_tst(void);
-void test_heap(void);
-void test_refstr(void);
-void test_file(void);
-void test_h5o(void);
-void test_h5t(void);
-void test_h5s(void);
-void test_coords(void);
-void test_h5d(void);
-void test_attr(void);
-void test_select(void);
-void test_time(void);
-void test_reference(void);
-void test_vltypes(void);
-void test_vlstrings(void);
-void test_iterate(void);
-void test_array(void);
-void test_genprop(void);
-void test_configure(void);
-void test_misc(void);
-void test_ids(void);
-void test_skiplist(void);
-void test_sohm(void);
-void test_unicode(void);
+void test_metadata(void);
+void test_checksum(void);
+void test_tst(void);
+void test_heap(void);
+void test_refstr(void);
+void test_file(void);
+void test_h5o(void);
+void test_h5t(void);
+void test_h5s(void);
+void test_coords(void);
+void test_h5d(void);
+void test_attr(void);
+void test_select(void);
+void test_time(void);
+void test_reference(void);
+void test_vltypes(void);
+void test_vlstrings(void);
+void test_iterate(void);
+void test_array(void);
+void test_genprop(void);
+void test_configure(void);
+void test_misc(void);
+void test_ids(void);
+void test_skiplist(void);
+void test_sohm(void);
+void test_unicode(void);
/* Prototypes for the cleanup routines */
-void cleanup_metadata(void);
-void cleanup_checksum(void);
-void cleanup_file(void);
-void cleanup_h5o(void);
-void cleanup_h5s(void);
-void cleanup_coords(void);
-void cleanup_attr(void);
-void cleanup_select(void);
-void cleanup_time(void);
-void cleanup_reference(void);
-void cleanup_vltypes(void);
-void cleanup_vlstrings(void);
-void cleanup_iterate(void);
-void cleanup_array(void);
-void cleanup_genprop(void);
-void cleanup_configure(void);
-void cleanup_sohm(void);
-void cleanup_misc(void);
-void cleanup_unicode(void);
+void cleanup_metadata(void);
+void cleanup_checksum(void);
+void cleanup_file(void);
+void cleanup_h5o(void);
+void cleanup_h5s(void);
+void cleanup_coords(void);
+void cleanup_attr(void);
+void cleanup_select(void);
+void cleanup_time(void);
+void cleanup_reference(void);
+void cleanup_vltypes(void);
+void cleanup_vlstrings(void);
+void cleanup_iterate(void);
+void cleanup_array(void);
+void cleanup_genprop(void);
+void cleanup_configure(void);
+void cleanup_sohm(void);
+void cleanup_misc(void);
+void cleanup_unicode(void);
#ifdef __cplusplus
}
diff --git a/test/testmeta.c b/test/testmeta.c
index b97eb68..9a40e6f 100644
--- a/test/testmeta.c
+++ b/test/testmeta.c
@@ -23,34 +23,34 @@
*/
-#include "hdf5.h"
+#include "h5test.h"
-#define FILEN "testmeta.h5"
+#define FILEN "testmeta.h5"
-#define CHUNK_SIZE 512
+#define CHUNK_SIZE 512
-#define NDATAARRAYS 3
-/*#define NPOINTS 2048*/
-#define NPOINTS 20
-#define NEXTARRAYS 10
-#define NDATAOBJECTS 100000
+#define NDATAARRAYS 3
+/*#define NPOINTS 2048*/
+#define NPOINTS 20
+#define NEXTARRAYS 10
+#define NDATAOBJECTS 100000
int main(void)
{
- hid_t file_id, prop_id, memspace_id, type_id;
- hid_t group_id;
- hid_t dataset_id, dataspace_id;
- herr_t status;
- hsize_t dims[1];
- hsize_t maxdims[1];
- float data[NPOINTS];
- float floatval;
- unsigned numdataobj = 0;
- unsigned i, j;
- char name[80];
- hsize_t start[1] = {0};
- hsize_t stride[1] = {1};
- hsize_t count[1] = {1};
+ hid_t file_id, prop_id, memspace_id, type_id;
+ hid_t group_id;
+ hid_t dataset_id, dataspace_id;
+ herr_t status;
+ hsize_t dims[1];
+ hsize_t maxdims[1];
+ float data[NPOINTS];
+ float floatval;
+ unsigned numdataobj = 0;
+ unsigned i, j;
+ char name[80];
+ hsize_t start[1] = {0};
+ hsize_t stride[1] = {1};
+ hsize_t count[1] = {1};
/* Create a file */
file_id = H5Fcreate(FILEN, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
@@ -64,8 +64,7 @@ int main(void)
H5T_NATIVE_UINT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
/* Write value to NumDataObj dataset */
- status = H5Dwrite(dataset_id, H5T_NATIVE_UINT, H5S_ALL,
- H5S_ALL, H5P_DEFAULT, &numdataobj);
+ status = H5Dwrite(dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &numdataobj);
/* Close the identifiers */
status = H5Dclose(dataset_id);
@@ -78,16 +77,14 @@ int main(void)
status = H5Pset_chunk(prop_id, 1, dims);
/* Create dataspace */
- dims[0]=1;
- maxdims[0]=H5S_UNLIMITED;
+ dims[0] = 1;
+ maxdims[0] = H5S_UNLIMITED;
dataspace_id = H5Screate_simple(1, dims, maxdims);
- for(i=0; i<NEXTARRAYS; i++)
- {
+ for(i = 0; i < NEXTARRAYS; i++) {
/* Create dataset */
sprintf(name, "/ExtArray%06d", i);
- dataset_id = H5Dcreate2(file_id, name,
- H5T_NATIVE_FLOAT, dataspace_id, H5P_DEFAULT, prop_id, H5P_DEFAULT);
+ dataset_id = H5Dcreate2(file_id, name, H5T_NATIVE_FLOAT, dataspace_id, H5P_DEFAULT, prop_id, H5P_DEFAULT);
/* Close the identifier */
status = H5Dclose(dataset_id);
@@ -101,47 +98,43 @@ int main(void)
group_id = H5Gcreate2(file_id, "/DataArray", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
H5Gclose(group_id);
- for(j=0; j<NDATAOBJECTS; j++)
- {
+ for(j = 0; j < NDATAOBJECTS; j++) {
/* Removed print statement as it would lock system resources on Windows */
/*
- * printf("\rWriting Object #%d of %d", j+1, NDATAOBJECTS);
+ * HDprintf("\rWriting Object #%d of %d", j+1, NDATAOBJECTS);
* fflush(stdout);
*/
floatval = (float)j;
/* Create group to hold data arrays for this object */
- sprintf(name, "/DataArray/%06d", j);
+ HDsprintf(name, "/DataArray/%06d", j);
group_id = H5Gcreate2(file_id, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
if(group_id < 0) {
- fprintf(stderr, "Failed to create DataArray group.\n");
+ HDfprintf(stderr, "Failed to create DataArray group.\n");
status = H5Fclose(file_id);
return -1;
}
/* Loop over data arrays */
- for(i=0; i<NDATAARRAYS; i++)
- {
+ for(i = 0; i < NDATAARRAYS; i++) {
/* Create dataspace */
dims[0]=NPOINTS;
maxdims[0]=NPOINTS;
dataspace_id = H5Screate_simple(1 ,dims, maxdims);
/* Create dataset */
- sprintf(name, "DataArray%06d", i);
- dataset_id = H5Dcreate2(group_id, name,
- H5T_NATIVE_FLOAT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ HDsprintf(name, "DataArray%06d", i);
+ dataset_id = H5Dcreate2(group_id, name, H5T_NATIVE_FLOAT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
if(dataset_id < 0) {
- fprintf(stderr, "Failed to create DataArray dataset.\n");
+ HDfprintf(stderr, "Failed to create DataArray dataset.\n");
status = H5Fclose(file_id);
return -1;
}
/* Write the data array data */
- status = H5Dwrite(dataset_id, H5T_NATIVE_FLOAT, H5S_ALL,
- H5S_ALL, H5P_DEFAULT, data);
+ status = H5Dwrite(dataset_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
if(status < 0) {
- fprintf(stderr, "Failed to write DataArray dataset.\n");
+ HDfprintf(stderr, "Failed to write DataArray dataset.\n");
status = H5Fclose(file_id);
return -1;
}
@@ -154,17 +147,16 @@ int main(void)
/* Open NumDataObj dataset */
dataset_id = H5Dopen2(file_id, "/NumDataObj", H5P_DEFAULT);
if(dataset_id < 0) {
- fprintf(stderr, "Failed to open NumDataObj dataset.\n");
+ HDfprintf(stderr, "Failed to open NumDataObj dataset.\n");
status = H5Fclose(file_id);
return -1;
}
/* Write value to NumDataObj dataset */
numdataobj = j + 1;
- status = H5Dwrite(dataset_id, H5T_NATIVE_UINT, H5S_ALL,
- H5S_ALL, H5P_DEFAULT, &numdataobj);
+ status = H5Dwrite(dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &numdataobj);
if(status < 0) {
- fprintf(stderr, "Failed to write NumDataObj dataset.\n");
+ HDfprintf(stderr, "Failed to write NumDataObj dataset.\n");
status = H5Fclose(file_id);
return -1;
}
@@ -176,10 +168,10 @@ int main(void)
/* Extend attribute arrays */
for(i = 0; i < NEXTARRAYS; i++) {
/* Open extendable dataset */
- sprintf(name, "/ExtArray%06d", i);
+ HDsprintf(name, "/ExtArray%06d", i);
dataset_id = H5Dopen2(file_id, name, H5P_DEFAULT);
if(dataset_id < 0) {
- fprintf(stderr, "Failed to open ExtArray dataset.\n");
+ HDfprintf(stderr, "Failed to open ExtArray dataset.\n");
status = H5Fclose(file_id);
return -1;
} /* end if */
@@ -188,7 +180,7 @@ int main(void)
dims[0] = (hsize_t)j + 1;
status = H5Dset_extent(dataset_id, dims);
if(status < 0) {
- fprintf(stderr, "Failed to extend DataArray dataset.\n");
+ HDfprintf(stderr, "Failed to extend DataArray dataset.\n");
status = H5Fclose(file_id);
return -1;
} /* end if */
@@ -200,16 +192,12 @@ int main(void)
type_id = H5Dget_type(dataset_id);
start[0] = 0;
- status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET,
- start, stride, count, NULL);
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, start, stride, count, NULL);
start[0] = (hssize_t)j;
- status = H5Sselect_hyperslab(dataspace_id, H5S_SELECT_SET,
- start, stride, count, NULL);
- status = H5Dwrite(dataset_id, type_id, memspace_id,
- dataspace_id, H5P_DEFAULT, &floatval);
- if(status < 0)
- {
- fprintf(stderr, "Failed to write DataArray dataset.\n");
+ status = H5Sselect_hyperslab(dataspace_id, H5S_SELECT_SET, start, stride, count, NULL);
+ status = H5Dwrite(dataset_id, type_id, memspace_id, dataspace_id, H5P_DEFAULT, &floatval);
+ if(status < 0) {
+ HDfprintf(stderr, "Failed to write DataArray dataset.\n");
status = H5Fclose(file_id);
return -1;
}
@@ -226,7 +214,7 @@ int main(void)
/* Close the file */
status = H5Fclose(file_id);
- printf("\n");
+ HDprintf("\n");
return 0;
}
diff --git a/test/tfile.c b/test/tfile.c
index 40c2d4b..f39da5a 100644
--- a/test/tfile.c
+++ b/test/tfile.c
@@ -966,7 +966,7 @@ test_file_close(void)
****************************************************************/
static void
create_objects(hid_t fid1, hid_t fid2, hid_t *ret_did, hid_t *ret_gid1,
- hid_t *ret_gid2, hid_t *ret_gid3)
+ hid_t *ret_gid2, hid_t *ret_gid3)
{
ssize_t oid_count;
herr_t ret;
@@ -1120,8 +1120,7 @@ test_get_obj_ids(void)
/* Call the public function H5F_get_obj_ids to use H5F__get_objects. User reported having problem here.
* that the returned size (ret_count) from H5Fget_obj_ids is one greater than the size passed in
- * (oid_list_size).
- */
+ * (oid_list_size) */
ret_count = H5Fget_obj_ids(fid, H5F_OBJ_ALL, (size_t)oid_list_size, oid_list);
CHECK(ret_count, FAIL, "H5Fget_obj_ids");
VERIFY(ret_count, oid_list_size, "H5Fget_obj_count");
diff --git a/test/th5s.c b/test/th5s.c
index 16e20b1..1a4456a 100644
--- a/test/th5s.c
+++ b/test/th5s.c
@@ -1184,24 +1184,24 @@ test_h5s_zero_dim(void)
static void
test_h5s_encode(H5F_libver_t low, H5F_libver_t high)
{
- hid_t sid1, sid2, sid3; /* Dataspace ID */
- hid_t decoded_sid1, decoded_sid2, decoded_sid3;
- int rank; /* Logical rank of dataspace */
- hid_t fapl = -1; /* File access property list ID */
- hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
- size_t sbuf_size=0, null_size=0, scalar_size=0;
- unsigned char *sbuf=NULL, *null_sbuf=NULL, *scalar_buf=NULL;
- hsize_t tdims[4]; /* Dimension array to test with */
- hssize_t n; /* Number of dataspace elements */
- hsize_t start[] = {0, 0, 0};
- hsize_t stride[] = {2, 5, 3};
- hsize_t count[] = {2, 2, 2};
- hsize_t block[] = {1, 3, 1};
- H5S_sel_type sel_type;
- H5S_class_t space_type;
- hssize_t nblocks;
- hid_t ret_id; /* Generic hid_t return value */
- herr_t ret; /* Generic return value */
+ hid_t sid1, sid2, sid3; /* Dataspace ID */
+ hid_t decoded_sid1, decoded_sid2, decoded_sid3;
+ int rank; /* Logical rank of dataspace */
+ hid_t fapl = -1; /* File access property list ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ size_t sbuf_size=0, null_size=0, scalar_size=0;
+ unsigned char *sbuf=NULL, *null_sbuf=NULL, *scalar_buf=NULL;
+ hsize_t tdims[4]; /* Dimension array to test with */
+ hssize_t n; /* Number of dataspace elements */
+ hsize_t start[] = {0, 0, 0};
+ hsize_t stride[] = {2, 5, 3};
+ hsize_t count[] = {2, 2, 2};
+ hsize_t block[] = {1, 3, 1};
+ H5S_sel_type sel_type;
+ H5S_class_t space_type;
+ hssize_t nblocks;
+ hid_t ret_id; /* Generic hid_t return value */
+ herr_t ret; /* Generic return value */
/* Output message about test being performed */
MESSAGE(5, ("Testing Dataspace Encoding and Decoding\n"));
diff --git a/test/trefer.c b/test/trefer.c
index 9af5c89..7158984 100644
--- a/test/trefer.c
+++ b/test/trefer.c
@@ -21,7 +21,7 @@
#include "testhdf5.h"
-#define FILE1 "trefer1.h5"
+#define FILE1 "trefer1.h5"
#define FILE2 "trefer2.h5"
#define FILE3 "trefer3.h5"
@@ -941,7 +941,7 @@ test_reference_region_1D(H5F_libver_t libver_low, H5F_libver_t libver_high)
uint8_t *tu8; /* Temporary pointer to uint8 data */
H5O_type_t obj_type; /* Object type */
int i; /* counting variables */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* Output message about test being performed */
MESSAGE(5, ("Testing 1-D Dataset Region Reference Functions\n"));
diff --git a/test/tsohm.c b/test/tsohm.c
index 9216b3e..a064940 100644
--- a/test/tsohm.c
+++ b/test/tsohm.c
@@ -3730,8 +3730,7 @@ test_sohm_external_dtype(void)
ret = H5Tinsert (s1_tid, "b", HOFFSET(s1_t,b), H5T_NATIVE_INT);
CHECK_I(ret, "H5Tinsert");
- /* Set up dataset in first file
- */
+ /* Set up dataset in first file */
file1 = H5Fcreate(FILENAME_SRC, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT);
CHECK_I(file1, "H5Fcreate");
@@ -3767,8 +3766,7 @@ test_sohm_external_dtype(void)
ret = H5Dclose(dataset1);
CHECK_I(ret, "H5Dclose");
- /* Create dataset in second file using datatype from dataset in the first file
- */
+ /* Create dataset in second file using datatype from dataset in the first file */
file2 = H5Fcreate(FILENAME_DST, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT);
CHECK_I(file2, "H5Fcreate");
@@ -3797,8 +3795,7 @@ test_sohm_external_dtype(void)
ret = H5Fclose(file1);
CHECK_I(ret, "H5Fclose");
- /* Verify that datatype details are still accessible by second file
- */
+ /* Verify that datatype details are still accessible by second file */
dataset2 = H5Dopen2(file2, "dataset_2", H5P_DEFAULT);
CHECK_I(dataset2, "H5Dopen2");
@@ -3808,8 +3805,7 @@ test_sohm_external_dtype(void)
dtype_class = H5Tget_class(dset2_tid);
VERIFY(dtype_class, H5T_COMPOUND, "H5Tget_class");
- /* Cleanup
- */
+ /* Cleanup */
ret = H5Tclose(dset2_tid);
CHECK_I(ret, "H5Tclose");
ret = H5Dclose(dataset2);
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index 1d882b8..9ca077c 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -1,7 +1,7 @@
#include "hdf5.h"
#include "testphdf5.h"
-#include "H5Dprivate.h" /* For Chunk tests */
+#include "H5Dprivate.h" /* For Chunk tests */
/* FILENAME and filenames must have the same number of names */
const char *FILENAME[2]={ "bigio_test.h5",
@@ -17,7 +17,7 @@ const char *FILENAME[2]={ "bigio_test.h5",
#define MAINPROCESS (!mpi_rank) /* define process 0 as main process */
/* Constants definitions */
-#define RANK 2
+#define RANK 2
#define IN_ORDER 1
#define OUT_OF_ORDER 2
@@ -33,12 +33,12 @@ const char *FILENAME[2]={ "bigio_test.h5",
#define HYPER 1
#define POINT 2
-#define ALL 3
+#define ALL 3
/* Dataset data type. Int's can be easily octo dumped. */
typedef hsize_t B_DATATYPE;
-int facc_type = FACC_MPIO; /*Test file access type */
+int facc_type = FACC_MPIO; /*Test file access type */
int dxfer_coll_type = DXFER_COLLECTIVE_IO;
size_t bigcount = DXFER_BIGCOUNT;
int nerrors = 0;
@@ -54,12 +54,12 @@ hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type);
/*
* Setup the coordinates for point selection.
*/
-static void
+static void
set_coords(hsize_t start[],
hsize_t count[],
hsize_t stride[],
hsize_t block[],
- size_t num_points,
+ size_t num_points,
hsize_t coords[],
int order)
{
@@ -99,10 +99,10 @@ fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE * dataset)
/* put some trivial data in the data_array */
for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- *dataptr = (B_DATATYPE)((i+start[0])*100 + (j+start[1]+1));
- dataptr++;
- }
+ for (j=0; j < block[1]; j++){
+ *dataptr = (B_DATATYPE)((i+start[0])*100 + (j+start[1]+1));
+ dataptr++;
+ }
}
}
@@ -113,7 +113,7 @@ void point_set(hsize_t start[],
hsize_t count[],
hsize_t stride[],
hsize_t block[],
- size_t num_points,
+ size_t num_points,
hsize_t coords[],
int order)
{
@@ -143,13 +143,13 @@ void point_set(hsize_t start[],
}
if(VERBOSE_MED) {
- printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
+ HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
(unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
(unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
(unsigned long)(block[0] * block[1] * count[0] * count[1]));
k = 0;
for(i = 0; i < num_points ; i++) {
- printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
+ HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
k += 2;
}
}
@@ -165,19 +165,19 @@ dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE * dataset)
hsize_t i, j;
/* print the column heading */
- printf("%-8s", "Cols:");
+ HDprintf("%-8s", "Cols:");
for (j=0; j < block[1]; j++){
- printf("%3lu ", (unsigned long)(start[1]+j));
+ HDprintf("%3lu ", (unsigned long)(start[1]+j));
}
- printf("\n");
+ HDprintf("\n");
/* print the slab data */
for (i=0; i < block[0]; i++){
- printf("Row %2lu: ", (unsigned long)(i+start[0]));
- for (j=0; j < block[1]; j++){
- printf("%llu ", *dataptr++);
- }
- printf("\n");
+ HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
+ for (j=0; j < block[1]; j++){
+ HDprintf("%llu ", *dataptr++);
+ }
+ HDprintf("\n");
}
}
@@ -193,90 +193,90 @@ verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
/* print it if VERBOSE_MED */
if(VERBOSE_MED) {
- printf("verify_data dumping:::\n");
- printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
- printf("original values:\n");
- dataset_print(start, block, original);
- printf("compared values:\n");
- dataset_print(start, block, dataset);
+ HDprintf("verify_data dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ dataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ dataset_print(start, block, dataset);
}
vrfyerrs = 0;
for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(*dataset != *original){
- if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
- printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %llu, got %llu\n",
+ for (j=0; j < block[1]; j++){
+ if(*dataset != *original){
+ if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %llu, got %llu\n",
(unsigned long)i, (unsigned long)j,
(unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
*(original), *(dataset));
- }
- dataset++;
- original++;
- }
- }
+ }
+ dataset++;
+ original++;
+ }
+ }
}
if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
+ HDprintf("[more errors ...]\n");
if(vrfyerrs)
- printf("%d errors found in verify_data\n", vrfyerrs);
+ HDprintf("%d errors found in verify_data\n", vrfyerrs);
return(vrfyerrs);
}
/* Set up the selection */
static void
ccslab_set(int mpi_rank,
- int mpi_size,
- hsize_t start[],
- hsize_t count[],
- hsize_t stride[],
- hsize_t block[],
- int mode)
+ int mpi_size,
+ hsize_t start[],
+ hsize_t count[],
+ hsize_t stride[],
+ hsize_t block[],
+ int mode)
{
switch (mode){
case BYROW_CONT:
- /* Each process takes a slabs of rows. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = space_dim1;
- count[1] = space_dim2;
- start[0] = mpi_rank*count[0];
- start[1] = 0;
-
- break;
+ /* Each process takes a slabs of rows. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = space_dim1;
+ count[1] = space_dim2;
+ start[0] = mpi_rank*count[0];
+ start[1] = 0;
+
+ break;
case BYROW_DISCONT:
- /* Each process takes several disjoint blocks. */
- block[0] = 1;
- block[1] = 1;
+ /* Each process takes several disjoint blocks. */
+ block[0] = 1;
+ block[1] = 1;
stride[0] = 3;
stride[1] = 3;
count[0] = space_dim1/(stride[0]*block[0]);
count[1] = (space_dim2)/(stride[1]*block[1]);
- start[0] = space_dim1*mpi_rank;
- start[1] = 0;
+ start[0] = space_dim1*mpi_rank;
+ start[1] = 0;
- break;
+ break;
case BYROW_SELECTNONE:
- /* Each process takes a slabs of rows, there are
+ /* Each process takes a slabs of rows, there are
no selections for the last process. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = ((mpi_rank >= MAX(1,(mpi_size-2)))?0:space_dim1);
- count[1] = space_dim2;
- start[0] = mpi_rank*count[0];
- start[1] = 0;
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = ((mpi_rank >= MAX(1,(mpi_size-2)))?0:space_dim1);
+ count[1] = space_dim2;
+ start[0] = mpi_rank*count[0];
+ start[1] = 0;
- break;
+ break;
case BYROW_SELECTUNBALANCE:
/* The first one-third of the number of processes only
@@ -284,14 +284,14 @@ ccslab_set(int mpi_rank,
half of the domain. */
block[0] = 1;
- count[0] = 2;
+ count[0] = 2;
stride[0] = space_dim1*mpi_size/4+1;
block[1] = space_dim2;
count[1] = 1;
start[1] = 0;
stride[1] = 1;
- if((mpi_rank *3)<(mpi_size*2)) start[0] = mpi_rank;
- else start[0] = 1 + space_dim1*mpi_size/2 + (mpi_rank-2*mpi_size/3);
+ if((mpi_rank *3)<(mpi_size*2)) start[0] = mpi_rank;
+ else start[0] = 1 + space_dim1*mpi_size/2 + (mpi_rank-2*mpi_size/3);
break;
case BYROW_SELECTINCHUNK:
@@ -299,33 +299,33 @@ ccslab_set(int mpi_rank,
block[0] = 1;
count[0] = 1;
- start[0] = mpi_rank*space_dim1;
+ start[0] = mpi_rank*space_dim1;
stride[0]= 1;
- block[1] = space_dim2;
- count[1] = 1;
- stride[1]= 1;
- start[1] = 0;
+ block[1] = space_dim2;
+ count[1] = 1;
+ stride[1]= 1;
+ start[1] = 0;
break;
default:
- /* Unknown mode. Set it to cover the whole dataset. */
- block[0] = space_dim1*mpi_size;
- block[1] = space_dim2;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = 0;
-
- break;
+ /* Unknown mode. Set it to cover the whole dataset. */
+ block[0] = space_dim1*mpi_size;
+ block[1] = space_dim2;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+
+ break;
}
if (VERBOSE_MED){
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0]*block[1]*count[0]*count[1]));
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0]*block[1]*count[0]*count[1]));
}
}
@@ -336,10 +336,10 @@ ccslab_set(int mpi_rank,
*/
static void
ccdataset_fill(hsize_t start[],
- hsize_t stride[],
- hsize_t count[],
- hsize_t block[],
- DATATYPE * dataset,
+ hsize_t stride[],
+ hsize_t count[],
+ hsize_t block[],
+ DATATYPE * dataset,
int mem_selection)
{
DATATYPE *dataptr = dataset;
@@ -377,28 +377,28 @@ ccdataset_fill(hsize_t start[],
*/
static void
ccdataset_print(hsize_t start[],
- hsize_t block[],
- DATATYPE * dataset)
+ hsize_t block[],
+ DATATYPE * dataset)
{
DATATYPE *dataptr = dataset;
hsize_t i, j;
/* print the column heading */
- printf("Print only the first block of the dataset\n");
- printf("%-8s", "Cols:");
+ HDprintf("Print only the first block of the dataset\n");
+ HDprintf("%-8s", "Cols:");
for (j=0; j < block[1]; j++){
- printf("%3lu ", (unsigned long)(start[1]+j));
+ HDprintf("%3lu ", (unsigned long)(start[1]+j));
}
- printf("\n");
+ HDprintf("\n");
/* print the slab data */
for (i=0; i < block[0]; i++){
- printf("Row %2lu: ", (unsigned long)(i+start[0]));
- for (j=0; j < block[1]; j++){
- printf("%03d ", *dataptr++);
- }
- printf("\n");
+ HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
+ for (j=0; j < block[1]; j++){
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
}
}
@@ -407,11 +407,11 @@ ccdataset_print(hsize_t start[],
*/
static int
ccdataset_vrfy(hsize_t start[],
- hsize_t count[],
- hsize_t stride[],
- hsize_t block[],
- DATATYPE *dataset,
- DATATYPE *original,
+ hsize_t count[],
+ hsize_t stride[],
+ hsize_t block[],
+ DATATYPE *dataset,
+ DATATYPE *original,
int mem_selection)
{
hsize_t i, j,k1,k2,k=0;
@@ -420,14 +420,14 @@ ccdataset_vrfy(hsize_t start[],
/* print it if VERBOSE_MED */
if (VERBOSE_MED) {
- printf("dataset_vrfy dumping:::\n");
- printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
- printf("original values:\n");
- ccdataset_print(start, block, original);
- printf("compared values:\n");
- ccdataset_print(start, block, dataset);
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ ccdataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ ccdataset_print(start, block, dataset);
}
vrfyerrs = 0;
@@ -449,7 +449,7 @@ ccdataset_vrfy(hsize_t start[],
}
if (*dataptr != *oriptr){
if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
- printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
+ HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
(unsigned long)i, (unsigned long)j,
*(oriptr), *(dataptr));
}
@@ -459,9 +459,9 @@ ccdataset_vrfy(hsize_t start[],
}
}
if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
+ HDprintf("[more errors ...]\n");
if (vrfyerrs)
- printf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
+ HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
return(vrfyerrs);
}
@@ -478,28 +478,28 @@ static void
dataset_big_write(void)
{
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset;
- hid_t datatype; /* Datatype ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hid_t datatype; /* Datatype ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
hsize_t *coords = NULL;
int i;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
+ hid_t acc_tpl; /* File access templates */
hsize_t h;
size_t num_points;
B_DATATYPE * wdata;
/* allocate memory for data buffer */
- wdata = (B_DATATYPE *)malloc(bigcount*sizeof(B_DATATYPE));
+ wdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
VRFY((wdata != NULL), "wdata malloc succeeded");
/* setup file access template */
@@ -552,8 +552,8 @@ dataset_big_write(void)
fill_datasets(start, block, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, wdata);
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
}
/* set up the collective transfer properties list */
@@ -580,7 +580,7 @@ dataset_big_write(void)
/* Each process takes a slabs of cols. */
- if (mpi_rank == 0)
+ if (mpi_rank == 0)
HDprintf("\nTesting Dataset2 write by COL\n");
/* Create a large dataset */
dims[0] = bigcount;
@@ -615,8 +615,8 @@ dataset_big_write(void)
fill_datasets(start, block, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, wdata);
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
}
/* set up the collective transfer properties list */
@@ -690,7 +690,7 @@ dataset_big_write(void)
fill_datasets(start, dims, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
+ MESG("data_array created");
}
ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
@@ -729,7 +729,7 @@ dataset_big_write(void)
num_points = bigcount;
- coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t));
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
VRFY((coords != NULL), "coords malloc succeeded");
set_coords (start, count, stride, block, num_points, coords, IN_ORDER);
@@ -744,8 +744,8 @@ dataset_big_write(void)
fill_datasets(start, block, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, wdata);
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
}
/* create a memory dataspace */
@@ -778,7 +778,7 @@ dataset_big_write(void)
ret = H5Dclose(dataset);
VRFY((ret >= 0), "H5Dclose1 succeeded");
- free(wdata);
+ HDfree(wdata);
H5Fclose(fid);
}
@@ -795,30 +795,30 @@ static void
dataset_big_read(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset;
- B_DATATYPE *rdata = NULL; /* data buffer */
- B_DATATYPE *wdata = NULL; /* expected data buffer */
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ B_DATATYPE *rdata = NULL; /* data buffer */
+ B_DATATYPE *wdata = NULL; /* expected data buffer */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
int i,j,k;
hsize_t h;
size_t num_points;
hsize_t *coords = NULL;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* allocate memory for data buffer */
- rdata = (B_DATATYPE *)malloc(bigcount*sizeof(B_DATATYPE));
+ rdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
VRFY((rdata != NULL), "rdata malloc succeeded");
- wdata = (B_DATATYPE *)malloc(bigcount*sizeof(B_DATATYPE));
+ wdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
VRFY((wdata != NULL), "wdata malloc succeeded");
- memset(rdata, 0, bigcount*sizeof(B_DATATYPE));
+ HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
/* setup file access template */
acc_tpl = H5Pcreate (H5P_FILE_ACCESS);
@@ -865,7 +865,7 @@ dataset_big_read(void)
fill_datasets(start, block, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
+ MESG("data_array created");
}
/* set up the collective transfer properties list */
@@ -885,7 +885,7 @@ dataset_big_read(void)
/* verify the read data with original expected data */
ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+ if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -897,7 +897,7 @@ dataset_big_read(void)
if (mpi_rank == 0)
HDprintf("\nRead Testing Dataset2 by ROW\n");
- memset(rdata, 0, bigcount*sizeof(B_DATATYPE));
+ HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
dataset = H5Dopen2(fid, DATASET2, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dopen2 succeeded");
@@ -927,7 +927,7 @@ dataset_big_read(void)
fill_datasets(start, block, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
+ MESG("data_array created");
}
/* set up the collective transfer properties list */
@@ -947,7 +947,7 @@ dataset_big_read(void)
/* verify the read data with original expected data */
ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+ if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -958,7 +958,7 @@ dataset_big_read(void)
if (mpi_rank == 0)
HDprintf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n");
- memset(rdata, 0, bigcount*sizeof(B_DATATYPE));
+ HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
dataset = H5Dopen2(fid, DATASET3, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dopen2 succeeded");
@@ -989,7 +989,7 @@ dataset_big_read(void)
fill_datasets(start, dims, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
+ MESG("data_array created");
}
/* set up the collective transfer properties list */
@@ -1010,7 +1010,7 @@ dataset_big_read(void)
if(mpi_rank == 0) {
/* verify the read data with original expected data */
ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+ if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
}
/* release all temporary handles. */
@@ -1040,13 +1040,13 @@ dataset_big_read(void)
fill_datasets(start, block, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, wdata);
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
}
num_points = bigcount;
- coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t));
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
VRFY((coords != NULL), "coords malloc succeeded");
set_coords (start, count, stride, block, num_points, coords, IN_ORDER);
@@ -1056,7 +1056,7 @@ dataset_big_read(void)
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
- if(coords) free(coords);
+ if(coords) HDfree(coords);
/* create a memory dataspace */
/* Warning: H5Screate_simple requires an array of hsize_t elements
@@ -1082,7 +1082,7 @@ dataset_big_read(void)
VRFY((ret >= 0), "H5Dread dataset1 succeeded");
ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+ if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -1091,8 +1091,8 @@ dataset_big_read(void)
ret = H5Dclose(dataset);
VRFY((ret >= 0), "H5Dclose1 succeeded");
- free(wdata);
- free(rdata);
+ HDfree(wdata);
+ HDfree(rdata);
wdata = NULL;
rdata = NULL;
@@ -1115,8 +1115,8 @@ dataset_big_read(void)
H5Fclose(fid);
/* release data buffers */
- if(rdata) free(rdata);
- if(wdata) free(wdata);
+ if(rdata) HDfree(rdata);
+ if(wdata) HDfree(wdata);
} /* dataset_large_readAll */
@@ -1129,7 +1129,7 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
{
hid_t ret_pl = -1;
herr_t ret; /* generic return value */
- int mpi_rank; /* mpi variables */
+ int mpi_rank; /* mpi variables */
/* need the rank for error checking macros */
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -1138,36 +1138,36 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
if (l_facc_type == FACC_DEFAULT)
- return (ret_pl);
+ return (ret_pl);
if (l_facc_type == FACC_MPIO){
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(ret_pl, comm, info);
- VRFY((ret >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY((ret >= 0), "");
ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
- VRFY((ret >= 0), "");
+ VRFY((ret >= 0), "");
ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
- VRFY((ret >= 0), "");
- return(ret_pl);
+ VRFY((ret >= 0), "");
+ return(ret_pl);
}
if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){
- hid_t mpio_pl;
-
- mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((mpio_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
- VRFY((ret >= 0), "");
-
- /* setup file access template */
- ret_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((ret_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
- VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
- H5Pclose(mpio_pl);
- return(ret_pl);
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return(ret_pl);
}
/* unknown file access types */
@@ -1176,17 +1176,17 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
/*-------------------------------------------------------------------------
- * Function: coll_chunk1
+ * Function: coll_chunk1
*
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
selection with a single chunk
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * July 12th, 2004
+ * Programmer: Unknown
+ * July 12th, 2004
*
* Modifications:
*
@@ -1215,7 +1215,7 @@ coll_chunk1(void)
{
const char *filename = FILENAME[0];
if (mpi_rank == 0)
- printf("coll_chunk1\n");
+ HDprintf("coll_chunk1\n");
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1230,17 +1230,17 @@ coll_chunk1(void)
/*-------------------------------------------------------------------------
- * Function: coll_chunk2
+ * Function: coll_chunk2
*
- * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT
+ * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT
selection with a single chunk
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * July 12th, 2004
+ * Programmer: Unknown
+ * July 12th, 2004
*
* Modifications:
*
@@ -1269,7 +1269,7 @@ coll_chunk2(void)
{
const char *filename = FILENAME[0];
if (mpi_rank == 0)
- printf("coll_chunk2\n");
+ HDprintf("coll_chunk2\n");
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1284,17 +1284,17 @@ coll_chunk2(void)
/*-------------------------------------------------------------------------
- * Function: coll_chunk3
+ * Function: coll_chunk3
*
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
selection with at least number of 2*mpi_size chunks
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * July 12th, 2004
+ * Programmer: Unknown
+ * July 12th, 2004
*
* Modifications:
*
@@ -1324,7 +1324,7 @@ coll_chunk3(void)
{
const char *filename = FILENAME[0];
if (mpi_rank == 0)
- printf("coll_chunk3\n");
+ HDprintf("coll_chunk3\n");
coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1341,17 +1341,17 @@ coll_chunk3(void)
//-------------------------------------------------------------------------
// Borrowed/Modified (slightly) from t_coll_chunk.c
/*-------------------------------------------------------------------------
- * Function: coll_chunktest
+ * Function: coll_chunktest
*
* Purpose: The real testing routine for regular selection of collective
chunking storage
testing both write and read,
- If anything fails, it may be read or write. There is no
- separation test between read and write.
+ If anything fails, it may be read or write. There is no
+ separation test between read and write.
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
* Modifications:
* Remove invalid temporary property checkings for API_LINK_HARD and
@@ -1359,8 +1359,8 @@ coll_chunk3(void)
* Programmer: Jonathan Kim
* Date: 2012-10-10
*
- * Programmer: Unknown
- * July 12th, 2004
+ * Programmer: Unknown
+ * July 12th, 2004
*
* Modifications:
*
@@ -1369,14 +1369,14 @@ coll_chunk3(void)
static void
coll_chunktest(const char* filename,
- int chunk_factor,
- int select_factor,
+ int chunk_factor,
+ int select_factor,
int api_option,
int file_selection,
int mem_selection,
int mode)
{
- hid_t file, dataset, file_dataspace, mem_dataspace;
+ hid_t file, dataset, file_dataspace, mem_dataspace;
hid_t acc_plist,xfer_plist,crp_plist;
hsize_t dims[RANK], chunk_dims[RANK];
@@ -1524,41 +1524,41 @@ coll_chunktest(const char* filename,
}
switch(api_option){
- case API_LINK_HARD:
- status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_ONE_IO);
+ case API_LINK_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_ONE_IO);
VRFY((status>= 0),"collective chunk optimization succeeded");
break;
- case API_MULTI_HARD:
- status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_MULTI_IO);
- VRFY((status>= 0),"collective chunk optimization succeeded ");
+ case API_MULTI_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY((status>= 0),"collective chunk optimization succeeded ");
break;
- case API_LINK_TRUE:
+ case API_LINK_TRUE:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,2);
- VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
break;
- case API_LINK_FALSE:
+ case API_LINK_FALSE:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,6);
VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
break;
- case API_MULTI_COLL:
+ case API_MULTI_COLL:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
- status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50);
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50);
VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
break;
- case API_MULTI_IND:
+ case API_MULTI_IND:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
- status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100);
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100);
VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
break;
- default:
+ default:
;
}
@@ -1615,7 +1615,7 @@ coll_chunktest(const char* filename,
/* write data collectively */
status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((status >= 0),"dataset write succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
@@ -1820,22 +1820,22 @@ coll_chunktest(const char* filename,
/*****************************************************************************
*
- * Function: do_express_test()
+ * Function: do_express_test()
*
- * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
- * by GetTestExpress() across all processes. Return this
- * value.
+ * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
+ * by GetTestExpress() across all processes. Return this
+ * value.
*
- * Envirmoment variables can be different across different
- * processes. This function ensures that all processes agree
- * on whether to do an express test.
+ * Envirmoment variables can be different across different
+ * processes. This function ensures that all processes agree
+ * on whether to do an express test.
*
- * Return: Success: Maximum of the values returned by
- * GetTestExpress() across all processes.
+ * Return: Success: Maximum of the values returned by
+ * GetTestExpress() across all processes.
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: JRM -- 4/25/06
+ * Programmer: JRM -- 4/25/06
*
*****************************************************************************/
static int
@@ -1868,7 +1868,7 @@ do_express_test(int world_mpi_rank)
} /* do_express_test() */
-int main(int argc, char **argv)
+int main(int argc, char **argv)
{
int ExpressMode = 0;
hsize_t newsize = 1048576;
@@ -1894,12 +1894,12 @@ int main(int argc, char **argv)
* calls. By then, MPI calls may not work.
*/
if (H5dont_atexit() < 0){
- HDprintf("Failed to turn off atexit processing. Continue.\n");
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
};
/* set alarm. */
ALARM_ON;
-
+
ExpressMode = do_express_test(mpi_rank);
dataset_big_write();
@@ -1923,7 +1923,7 @@ int main(int argc, char **argv)
/* turn off alarm */
ALARM_OFF;
- if (mpi_rank == 0)
+ if (mpi_rank == 0)
HDremove(FILENAME[0]);
/* close HDF5 library */
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index 50e6d50..cde19fe 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -622,7 +622,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: MPI_Comm_group() failed with error %d.\n",
world_mpi_rank, FUNC, mpi_result);
}
@@ -641,7 +641,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: MPI_Group_excl() failed with error %d.\n",
world_mpi_rank, FUNC, mpi_result);
}
@@ -658,7 +658,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: MPI_Comm_create() failed with error %d.\n",
world_mpi_rank, FUNC, mpi_result);
}
@@ -672,7 +672,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: file_mpi_comm == MPI_COMM_NULL.\n",
world_mpi_rank, FUNC);
}
@@ -686,7 +686,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: file_mpi_comm != MPI_COMM_NULL.\n",
world_mpi_rank, FUNC);
}
@@ -704,7 +704,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: MPI_Comm_size() failed with error %d.\n",
world_mpi_rank, FUNC, mpi_result);
}
@@ -720,7 +720,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: MPI_Comm_rank() failed with error %d.\n",
world_mpi_rank, FUNC, mpi_result);
}
@@ -7457,7 +7457,7 @@ main(int argc, char **argv)
* calls. By then, MPI calls may not work.
*/
if (H5dont_atexit() < 0){
- printf("%d:Failed to turn off atexit processing. Continue.\n",
+ HDprintf("%d:Failed to turn off atexit processing. Continue.\n",
mpi_rank);
};
H5open();
@@ -7476,24 +7476,24 @@ main(int argc, char **argv)
}
#ifdef H5_HAVE_MPE
- if ( MAINPROCESS ) { printf(" Tests compiled for MPE.\n"); }
+ if ( MAINPROCESS ) { HDprintf(" Tests compiled for MPE.\n"); }
virt_num_data_entries = MPE_VIRT_NUM_DATA_ENTIES;
#endif /* H5_HAVE_MPE */
if (MAINPROCESS){
- printf("===================================\n");
- printf("Parallel metadata cache tests\n");
- printf(" mpi_size = %d\n", mpi_size);
- printf(" express_test = %d\n", express_test);
- printf("===================================\n");
+ HDprintf("===================================\n");
+ HDprintf("Parallel metadata cache tests\n");
+ HDprintf(" mpi_size = %d\n", mpi_size);
+ HDprintf(" express_test = %d\n", express_test);
+ HDprintf("===================================\n");
}
if ( mpi_size < 3 ) {
if ( MAINPROCESS ) {
- printf(" Need at least 3 processes. Exiting.\n");
+ HDprintf(" Need at least 3 processes. Exiting.\n");
}
goto finish;
}
@@ -7639,15 +7639,15 @@ finish:
*/
MPI_Barrier(MPI_COMM_WORLD);
if (MAINPROCESS){ /* only process 0 reports */
- printf("===================================\n");
+ HDprintf("===================================\n");
if (failures){
- printf("***metadata cache tests detected %d failures***\n",
+ HDprintf("***metadata cache tests detected %d failures***\n",
failures);
}
else{
- printf("metadata cache tests finished with no failures\n");
+ HDprintf("metadata cache tests finished with no failures\n");
}
- printf("===================================\n");
+ HDprintf("===================================\n");
}
takedown_derived_types();
diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c
index e158d69..08d455d 100644
--- a/testpar/t_cache_image.c
+++ b/testpar/t_cache_image.c
@@ -519,7 +519,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
/* create the dataset */
if ( pass ) {
- sprintf(dset_name, "/dset%03d", i);
+ HDsprintf(dset_name, "/dset%03d", i);
dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE,
dataspace_id, H5P_DEFAULT,
properties, H5P_DEFAULT);
@@ -713,7 +713,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"Chunk (%0d, %0d) in /dset%03d is invalid.\n",
i, j, m);
}
@@ -830,7 +830,7 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
while ( ( pass ) && ( i <= max_dset ) )
{
- sprintf(dset_name, "/dset%03d", i);
+ HDsprintf(dset_name, "/dset%03d", i);
if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
@@ -1187,7 +1187,7 @@ open_hdf5_file(const hbool_t create_file,
} else {
- file_ptr = (struct H5F_t *)H5VL_object_verify(file_id, H5I_FILE);
+ file_ptr = (struct H5F_t *)H5I_object_verify(file_id, H5I_FILE);
if ( file_ptr == NULL ) {
@@ -1446,7 +1446,7 @@ par_create_dataset(int dset_num,
show_progress = (show_progress && (mpi_rank == 0));
verbose = (verbose && (mpi_rank == 0));
- sprintf(dset_name, "/dset%03d", dset_num);
+ HDsprintf(dset_name, "/dset%03d", dset_num);
if ( show_progress ) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -1750,7 +1750,7 @@ par_create_dataset(int dset_num,
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"Chunk (%0d, %0d) in /dset%03d is invalid.\n",
i, j, dset_num);
}
@@ -1848,7 +1848,7 @@ par_delete_dataset(int dset_num,
show_progress = (show_progress && (mpi_rank == 0));
- sprintf(dset_name, "/dset%03d", dset_num);
+ HDsprintf(dset_name, "/dset%03d", dset_num);
if ( show_progress ) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -1932,8 +1932,8 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
int child_status;
pid_t child_pid;
- sprintf(file_name_idx_str, "%d", file_name_idx);
- sprintf(mpi_size_str, "%d", mpi_size);
+ HDsprintf(file_name_idx_str, "%d", file_name_idx);
+ HDsprintf(mpi_size_str, "%d", mpi_size);
child_pid = fork();
@@ -1954,7 +1954,7 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
HDfprintf(stdout,
"execl() of ici process failed. errno = %d(%s)\n",
errno, strerror(errno));
- exit(1);
+ HDexit(1);
}
} else if ( child_pid != -1 ) {
@@ -2056,7 +2056,7 @@ par_verify_dataset(int dset_num,
show_progress = (show_progress && (mpi_rank == 0));
verbose = (verbose && (mpi_rank == 0));
- sprintf(dset_name, "/dset%03d", dset_num);
+ HDsprintf(dset_name, "/dset%03d", dset_num);
if ( show_progress ) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -2233,7 +2233,7 @@ par_verify_dataset(int dset_num,
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"Chunk (%0d, %0d) in /dset%03d is invalid.\n",
i, j, dset_num);
}
@@ -2448,7 +2448,7 @@ serial_verify_dataset(int dset_num,
hid_t dset_id = -1;
hid_t filespace_id = -1;
- sprintf(dset_name, "/dset%03d", dset_num);
+ HDsprintf(dset_name, "/dset%03d", dset_num);
if ( show_progress ) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -2603,7 +2603,7 @@ serial_verify_dataset(int dset_num,
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"Chunk (%0d, %0d) in /dset%03d is invalid.\n",
j, k, dset_num);
}
@@ -2801,7 +2801,7 @@ usage(void)
int i = 0;
while(s[i] != NULL) {
- fprintf(stdout, "%s", s[i]);
+ HDfprintf(stdout, "%s", s[i]);
i++;
}
@@ -2876,7 +2876,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
/* open the dataset */
if ( pass ) {
- sprintf(dset_name, "/dset%03d", i);
+ HDsprintf(dset_name, "/dset%03d", i);
dataset_ids[i] = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
if ( dataset_ids[i] < 0 ) {
@@ -3016,7 +3016,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"Chunk (%0d, %0d) in /dset%03d is invalid.\n",
i, j, m);
}
@@ -4137,31 +4137,31 @@ main(int argc, char **argv)
if ( pass ) {
- printf("done.\n");
+ HDprintf("done.\n");
HDfflush(stdout);
} else {
- printf("failed.\n");
- exit(1);
+ HDprintf("failed.\n");
+ HDexit(1);
}
i++;
}
HDfprintf(stdout, "Test file construction complete.\n");
- exit(0);
+ HDexit(0);
} else if ( ici ) {
if ( serial_insert_cache_image(file_idx, mpi_size) ) {
- exit(0);
+ HD exit(0);
} else {
HDfprintf(stderr, "\n\nCache image insertion failed.\n");
HDfprintf(stderr, " failure mssg = \"%s\"\n", failure_mssg);
- exit(1);
+ HDexit(1);
}
}
@@ -4178,24 +4178,24 @@ main(int argc, char **argv)
* calls. By then, MPI calls may not work.
*/
if (H5dont_atexit() < 0){
- printf("%d:Failed to turn off atexit processing. Continue.\n",
+ HDprintf("%d:Failed to turn off atexit processing. Continue.\n",
mpi_rank);
};
H5open();
if ( mpi_rank == 0 ) {
- printf("===================================\n");
- printf("Parallel metadata cache image tests\n");
- printf(" mpi_size = %d\n", mpi_size);
- printf("===================================\n");
+ HDprintf("===================================\n");
+ HDprintf("Parallel metadata cache image tests\n");
+ HDprintf(" mpi_size = %d\n", mpi_size);
+ HDprintf("===================================\n");
}
if ( mpi_size < 2 ) {
if ( mpi_rank == 0 ) {
- printf(" Need at least 2 processes. Exiting.\n");
+ HDprintf(" Need at least 2 processes. Exiting.\n");
}
goto finish;
}
@@ -4220,7 +4220,7 @@ main(int argc, char **argv)
HDfprintf(stdout,
"execl() of setup process failed. errno = %d(%s)\n",
errno, strerror(errno));
- exit(1);
+ HDexit(1);
}
} else if ( child_pid != -1 ) {
@@ -4271,16 +4271,16 @@ finish:
MPI_Barrier(MPI_COMM_WORLD);
if ( mpi_rank == 0 ) { /* only process 0 reports */
- sleep(10);
- printf("===================================\n");
+ HDsleep(10);
+ HDprintf("===================================\n");
if ( nerrs > 0 ) {
- printf("***metadata cache image tests detected %d failures***\n",
+ HDprintf("***metadata cache image tests detected %d failures***\n",
nerrs);
}
else {
- printf("metadata cache image tests finished with no failures\n");
+ HDprintf("metadata cache image tests finished with no failures\n");
}
- printf("===================================\n");
+ HDprintf("===================================\n");
}
/* takedown_derived_types(); */
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index c6fa3d4..40cc1ca 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -1134,7 +1134,7 @@ ccslab_set(int mpi_rank,
break;
}
if (VERBOSE_MED){
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
(unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
(unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
(unsigned long)(block[0]*block[1]*count[0]*count[1]));
@@ -1197,20 +1197,20 @@ ccdataset_print(hsize_t start[],
hsize_t i, j;
/* print the column heading */
- printf("Print only the first block of the dataset\n");
- printf("%-8s", "Cols:");
+ HDprintf("Print only the first block of the dataset\n");
+ HDprintf("%-8s", "Cols:");
for (j=0; j < block[1]; j++){
- printf("%3lu ", (unsigned long)(start[1]+j));
+ HDprintf("%3lu ", (unsigned long)(start[1]+j));
}
- printf("\n");
+ HDprintf("\n");
/* print the slab data */
for (i=0; i < block[0]; i++){
- printf("Row %2lu: ", (unsigned long)(i+start[0]));
+ HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
for (j=0; j < block[1]; j++){
- printf("%03d ", *dataptr++);
+ HDprintf("%03d ", *dataptr++);
}
- printf("\n");
+ HDprintf("\n");
}
}
@@ -1233,13 +1233,13 @@ ccdataset_vrfy(hsize_t start[],
/* print it if VERBOSE_MED */
if (VERBOSE_MED) {
- printf("dataset_vrfy dumping:::\n");
- printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
(unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
(unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
- printf("original values:\n");
+ HDprintf("original values:\n");
ccdataset_print(start, block, original);
- printf("compared values:\n");
+ HDprintf("compared values:\n");
ccdataset_print(start, block, dataset);
}
@@ -1262,7 +1262,7 @@ ccdataset_vrfy(hsize_t start[],
}
if (*dataptr != *oriptr){
if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
- printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
+ HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
(unsigned long)i, (unsigned long)j,
*(oriptr), *(dataptr));
}
@@ -1272,8 +1272,8 @@ ccdataset_vrfy(hsize_t start[],
}
}
if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
+ HDprintf("[more errors ...]\n");
if (vrfyerrs)
- printf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
+ HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
return(vrfyerrs);
}
diff --git a/testpar/t_file.c b/testpar/t_file.c
index a3c007d..e3ce346 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -24,14 +24,14 @@
/*
* This file needs to access private information from the H5F package.
*/
-#define H5AC_FRIEND /*suppress error about including H5ACpkg */
+#define H5AC_FRIEND /*suppress error about including H5ACpkg */
#include "H5ACpkg.h"
-#define H5C_FRIEND /*suppress error about including H5Cpkg */
+#define H5C_FRIEND /*suppress error about including H5Cpkg */
#include "H5Cpkg.h"
-#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
#define H5F_TESTING
#include "H5Fpkg.h"
-#define H5MF_FRIEND /*suppress error about including H5MFpkg */
+#define H5MF_FRIEND /*suppress error about including H5MFpkg */
#include "H5MFpkg.h"
#define NUM_DSETS 5
@@ -39,7 +39,7 @@
int mpi_size, mpi_rank;
static int create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy);
-static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
+static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
hsize_t page_size, size_t page_buffer_size);
/*
@@ -60,15 +60,15 @@ test_split_comm_access(void)
MPI_Info info = MPI_INFO_NULL;
int is_old, mrc;
int newrank, newprocs;
- hid_t fid; /* file IDs */
- hid_t acc_tpl; /* File access properties */
- herr_t ret; /* generic return value */
+ hid_t fid; /* file IDs */
+ hid_t acc_tpl; /* File access properties */
+ herr_t ret; /* generic return value */
const char *filename;
filename = (const char *)GetTestParameters();
if (VERBOSE_MED)
- printf("Split Communicator access test on file %s\n",
- filename);
+ HDprintf("Split Communicator access test on file %s\n",
+ filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -80,35 +80,35 @@ test_split_comm_access(void)
MPI_Comm_rank(comm,&newrank);
if (is_old){
- /* odd-rank processes */
- mrc = MPI_Barrier(comm);
- VRFY((mrc==MPI_SUCCESS), "");
+ /* odd-rank processes */
+ mrc = MPI_Barrier(comm);
+ VRFY((mrc==MPI_SUCCESS), "");
}else{
- /* even-rank processes */
- int sub_mpi_rank; /* rank in the sub-comm */
- MPI_Comm_rank(comm,&sub_mpi_rank);
-
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* create the file collectively */
- fid=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* Release file-access template */
- ret=H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
-
- /* close the file */
- ret=H5Fclose(fid);
- VRFY((ret >= 0), "");
-
- /* delete the test file */
- if (sub_mpi_rank == 0){
- mrc = MPI_File_delete((char *)filename, info);
- /*VRFY((mrc==MPI_SUCCESS), ""); */
- }
+ /* even-rank processes */
+ int sub_mpi_rank; /* rank in the sub-comm */
+ MPI_Comm_rank(comm,&sub_mpi_rank);
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret=H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* close the file */
+ ret=H5Fclose(fid);
+ VRFY((ret >= 0), "");
+
+ /* delete the test file */
+ if (sub_mpi_rank == 0){
+ mrc = MPI_File_delete((char *)filename, info);
+ /*VRFY((mrc==MPI_SUCCESS), ""); */
+ }
}
mrc = MPI_Comm_free(&comm);
VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free succeeded");
@@ -126,7 +126,7 @@ test_page_buffer_access(void)
haddr_t raw_addr, meta_addr;
int *data;
H5F_t *f = NULL;
- herr_t ret; /* generic return value */
+ herr_t ret; /* generic return value */
const char *filename;
hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
@@ -136,7 +136,7 @@ test_page_buffer_access(void)
filename = (const char *)GetTestParameters();
if (VERBOSE_MED)
- printf("Page Buffer Usage in Parallel %s\n", filename);
+ HDprintf("Page Buffer Usage in Parallel %s\n", filename);
fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
VRFY((fapl >= 0), "create_faccess_plist succeeded");
@@ -178,7 +178,6 @@ test_page_buffer_access(void)
/* intialize all the elements to have a value of -1 */
for(i=0 ; i<num_elements ; i++)
data[i] = -1;
-
if(MAINPROCESS) {
hid_t fapl_self;
@@ -400,7 +399,7 @@ test_page_buffer_access(void)
ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 50; i++)
+ for (i=0; i < 50; i++)
VRFY((data[i] == -1), "Read different values than written");
/* close the file */
@@ -504,28 +503,28 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
VRFY((mem_dataspace >= 0), "");
for(k=0 ; k<NUM_DSETS; k++) {
- sprintf(dset_name, "D1dset%d", k);
+ HDsprintf(dset_name, "D1dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- sprintf(dset_name, "D2dset%d", k);
+ HDsprintf(dset_name, "D2dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- sprintf(dset_name, "D3dset%d", k);
+ HDsprintf(dset_name, "D3dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- sprintf(dset_name, "dset%d", k);
+ HDsprintf(dset_name, "dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
@@ -549,13 +548,13 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
for (i=0; i < num_elements; i++)
VRFY((data_array[i] == mpi_rank+1), "Dataset Verify failed");
- sprintf(dset_name, "D1dset%d", k);
+ HDsprintf(dset_name, "D1dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
- sprintf(dset_name, "D2dset%d", k);
+ HDsprintf(dset_name, "D2dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
- sprintf(dset_name, "D3dset%d", k);
+ HDsprintf(dset_name, "D3dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
}
@@ -657,7 +656,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
VRFY((mem_dataspace >= 0), "");
for(k=0 ; k<NUM_DSETS; k++) {
- sprintf(dset_name, "dset%d", k);
+ HDsprintf(dset_name, "dset%d", k);
dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
@@ -741,7 +740,7 @@ void
test_file_properties(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t fapl_id; /* File access plist */
+ hid_t fapl_id; /* File access plist */
hbool_t is_coll;
const char *filename;
MPI_Comm comm = MPI_COMM_WORLD;
diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c
index 4556b01..28baed5 100644
--- a/testpar/t_filter_read.c
+++ b/testpar/t_filter_read.c
@@ -37,9 +37,9 @@ static int mpi_size, mpi_rank;
#define HS_DIM1 200
#define HS_DIM2 100
-
+
/*-------------------------------------------------------------------------
- * Function: filter_read_internal
+ * Function: filter_read_internal
*
* Purpose: Tests parallel reading of a 2D dataset written serially using
* filters. During the parallel reading phase, the dataset is
@@ -54,13 +54,13 @@ static void
filter_read_internal(const char *filename, hid_t dcpl,
hsize_t *dset_size)
{
- hid_t file, dataset; /* HDF5 IDs */
- hid_t access_plist; /* Access property list ID */
- hid_t sid, memspace; /* Dataspace IDs */
- hsize_t size[2]; /* Dataspace dimensions */
- hsize_t hs_offset[2]; /* Hyperslab offset */
- hsize_t hs_size[2]; /* Hyperslab size */
- size_t i, j; /* Local index variables */
+ hid_t file, dataset; /* HDF5 IDs */
+ hid_t access_plist; /* Access property list ID */
+ hid_t sid, memspace; /* Dataspace IDs */
+ hsize_t size[2]; /* Dataspace dimensions */
+ hsize_t hs_offset[2]; /* Hyperslab offset */
+ hsize_t hs_size[2]; /* Hyperslab size */
+ size_t i, j; /* Local index variables */
char name[32] = "dataset";
herr_t hrc; /* Error status */
int *points = NULL; /* Writing buffer for entire dataset */
@@ -151,17 +151,17 @@ filter_read_internal(const char *filename, hid_t dcpl,
for (j=0; j<hs_size[1]; j++) {
if(points[i*size[1]+(size_t)hs_offset[1]+j] !=
check[i*hs_size[1]+j]) {
- fprintf(stderr," Read different values than written.\n");
- fprintf(stderr," At index %lu,%lu\n",
- (unsigned long)(i),
- (unsigned long)(hs_offset[1]+j));
- fprintf(stderr," At original: %d\n",
- (int)points[i*size[1]+(size_t)hs_offset[1]+j]);
- fprintf(stderr," At returned: %d\n",
- (int)check[i*hs_size[1]+j]);
+ HDfprintf(stderr," Read different values than written.\n");
+ HDfprintf(stderr," At index %lu,%lu\n",
+ (unsigned long)(i),
+ (unsigned long)(hs_offset[1]+j));
+ HDfprintf(stderr," At original: %d\n",
+ (int)points[i*size[1]+(size_t)hs_offset[1]+j]);
+ HDfprintf(stderr," At returned: %d\n",
+ (int)check[i*hs_size[1]+j]);
VRFY(FALSE, "");
- }
- }
+ }
+ }
}
/* Get the storage size of the dataset */
@@ -194,10 +194,10 @@ filter_read_internal(const char *filename, hid_t dcpl,
/*-------------------------------------------------------------------------
* Function: test_filter_read
*
- * Purpose: Tests parallel reading of datasets written serially using
+ * Purpose: Tests parallel reading of datasets written serially using
* several (combinations of) filters.
*
- * Programmer: Christian Chilan
+ * Programmer: Christian Chilan
* Tuesday, May 15, 2007
*
* Modifications:
@@ -208,7 +208,7 @@ filter_read_internal(const char *filename, hid_t dcpl,
void
test_filter_read(void)
{
- hid_t dc; /* HDF5 IDs */
+ hid_t dc; /* HDF5 IDs */
const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */
hsize_t null_size; /* Size of dataset without filters */
unsigned chunk_opts; /* Chunk options */
@@ -236,7 +236,7 @@ test_filter_read(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Parallel reading of dataset written with filters %s\n", filename);
+ HDprintf("Parallel reading of dataset written with filters %s\n", filename);
/*----------------------------------------------------------
* STEP 0: Test without filters.
@@ -448,10 +448,10 @@ test_filter_read(void)
/* Make sure encoding is enabled */
if(h5_szip_can_encode() == 1) {
- hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
VRFY(hrc>=0, "H5Pset_szip");
- filter_read_internal(filename,dc,&combo_size);
+ filter_read_internal(filename,dc,&combo_size);
}
/* Clean up objects used for this test */
@@ -461,25 +461,25 @@ test_filter_read(void)
/* Testing shuffle+szip(with encoder)+checksum filters(checksum last) */
/* Make sure encoding is enabled */
if(h5_szip_can_encode() == 1) {
- dc = H5Pcreate(H5P_DATASET_CREATE);
+ dc = H5Pcreate(H5P_DATASET_CREATE);
VRFY(dc>=0, "H5Pcreate");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
+ hrc = H5Pset_chunk (dc, 2, chunk_size);
VRFY(hrc>=0, "H5Pset_chunk");
- hrc = H5Pset_shuffle (dc);
+ hrc = H5Pset_shuffle (dc);
VRFY(hrc>=0, "H5Pset_shuffle");
- hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
VRFY(hrc>=0, "H5Pset_szip");
- hrc = H5Pset_fletcher32 (dc);
+ hrc = H5Pset_fletcher32 (dc);
VRFY(hrc>=0, "H5Pset_fletcher32");
- filter_read_internal(filename,dc,&combo_size);
+ filter_read_internal(filename,dc,&combo_size);
- /* Clean up objects used for this test */
- hrc = H5Pclose (dc);
+ /* Clean up objects used for this test */
+ hrc = H5Pclose (dc);
VRFY(hrc>=0, "H5Pclose");
}
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index 7f75d20..e9f4101 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -21,7 +21,7 @@
enum obj_type { is_group, is_dset };
-static int get_size(void);
+static int get_size(void);
static void write_dataset(hid_t, hid_t, hid_t);
static int read_dataset(hid_t, hid_t, hid_t);
static void create_group_recursive(hid_t, hid_t, hid_t, int);
@@ -54,13 +54,9 @@ get_size(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
if(mpi_size > size ) {
-
if((mpi_size % 2) == 0 ) {
-
size = mpi_size;
-
} else {
-
size = mpi_size + 1;
}
}
@@ -79,7 +75,7 @@ get_size(void)
void zero_dim_dset(void)
{
int mpi_size, mpi_rank;
- const char *filename;
+ const char *filename;
hid_t fid, plist, dcpl, dsid, sid;
hsize_t dim, chunk_dim;
herr_t ret;
@@ -133,27 +129,27 @@ void zero_dim_dset(void)
* Example of using PHDF5 to create ndatasets datasets. Each process write
* a slab of array to the file.
*
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
+ * Changes: Updated function to use a dynamically calculated size,
+ * instead of the old SIZE #define. This should allow it
+ * to function with an arbitrary number of processors.
*
- * JRM - 8/11/04
+ * JRM - 8/11/04
*/
void multiple_dset_write(void)
{
- int i, j, n, mpi_size, mpi_rank, size;
+ int i, j, n, mpi_size, mpi_rank, size;
hid_t iof, plist, dataset, memspace, filespace;
hid_t dcpl; /* Dataset creation property list */
hsize_t chunk_origin [DIM];
hsize_t chunk_dims [DIM], file_dims [DIM];
hsize_t count[DIM]={1,1};
- double * outme = NULL;
+ double *outme = NULL;
double fill=1.0; /* Fill value */
- char dname [100];
+ char dname [100];
herr_t ret;
- const H5Ptest_param_t *pt;
- char *filename;
- int ndatasets;
+ const H5Ptest_param_t *pt;
+ char *filename;
+ int ndatasets;
pt = GetTestParameters();
filename = pt->name;
@@ -190,23 +186,23 @@ void multiple_dset_write(void)
VRFY((ret>=0), "set fill-value succeeded");
for(n = 0; n < ndatasets; n++) {
- sprintf(dname, "dataset %d", n);
- dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset > 0), dname);
+ HDsprintf(dname, "dataset %d", n);
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset > 0), dname);
- /* calculate data to write */
- for(i = 0; i < size; i++)
- for(j = 0; j < size; j++)
- outme [(i * size) + j] = n*1000 + mpi_rank;
+ /* calculate data to write */
+ for(i = 0; i < size; i++)
+ for(j = 0; j < size; j++)
+ outme [(i * size) + j] = n*1000 + mpi_rank;
- H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme);
+ H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme);
- H5Dclose(dataset);
+ H5Dclose(dataset);
#ifdef BARRIER_CHECKS
- if(!((n+1) % 10)) {
- printf("created %d datasets\n", n+1);
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ if(!((n+1) % 10)) {
+ HDprintf("created %d datasets\n", n+1);
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
#endif /* BARRIER_CHECKS */
}
@@ -221,21 +217,21 @@ void multiple_dset_write(void)
/* Example of using PHDF5 to create, write, and read compact dataset.
*
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
+ * Changes: Updated function to use a dynamically calculated size,
+ * instead of the old SIZE #define. This should allow it
+ * to function with an arbitrary number of processors.
*
- * JRM - 8/11/04
+ * JRM - 8/11/04
*/
void compact_dataset(void)
{
- int i, j, mpi_size, mpi_rank, size, err_num=0;
- hid_t iof, plist, dcpl, dxpl, dataset, filespace;
+ int i, j, mpi_size, mpi_rank, size, err_num=0;
+ hid_t iof, plist, dcpl, dxpl, dataset, filespace;
hsize_t file_dims [DIM];
- double * outme;
- double * inme;
- char dname[]="dataset";
- herr_t ret;
+ double *outme;
+ double *inme;
+ char dname[]="dataset";
+ herr_t ret;
const char *filename;
size = get_size();
@@ -278,15 +274,15 @@ void compact_dataset(void)
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
/* Recalculate data to write. Each process writes the same data. */
for(i = 0; i < size; i++)
for(j = 0; j < size; j++)
- outme[(i * size) + j] =(i + j) * 1000;
+ outme[(i * size) + j] =(i + j) * 1000;
ret = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, outme);
VRFY((ret >= 0), "H5Dwrite succeeded");
@@ -308,8 +304,8 @@ void compact_dataset(void)
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
dataset = H5Dopen2(iof, dname, H5P_DEFAULT);
@@ -338,7 +334,7 @@ void compact_dataset(void)
for(j = 0; j < size; j++)
if(inme[(i * size) + j] != outme[(i * size) + j])
if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, outme[(i * size) + j], inme[(i * size) + j]);
+ HDprintf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, outme[(i * size) + j], inme[(i * size) + j]);
H5Pclose(plist);
H5Pclose(dxpl);
@@ -352,24 +348,24 @@ void compact_dataset(void)
* Example of using PHDF5 to create, write, and read dataset and attribute
* of Null dataspace.
*
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
+ * Changes: Removed the assert that mpi_size <= the SIZE #define.
+ * As best I can tell, this assert isn't needed here,
+ * and in any case, the SIZE #define is being removed
+ * in an update of the functions in this file to run
+ * with an arbitrary number of processes.
*
* JRM - 8/24/04
*/
void null_dataset(void)
{
- int mpi_size, mpi_rank;
- hid_t iof, plist, dxpl, dataset, attr, sid;
+ int mpi_size, mpi_rank;
+ hid_t iof, plist, dxpl, dataset, attr, sid;
unsigned uval=2; /* Buffer for writing to dataset */
- int val=1; /* Buffer for writing to attribute */
- int nelem;
- char dname[]="dataset";
- char attr_name[]="attribute";
- herr_t ret;
+ int val=1; /* Buffer for writing to attribute */
+ int nelem;
+ char dname[]="dataset";
+ char attr_name[]="attribute";
+ herr_t ret;
const char *filename;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -397,8 +393,8 @@ void null_dataset(void)
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
@@ -431,8 +427,8 @@ void null_dataset(void)
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
@@ -464,11 +460,11 @@ void null_dataset(void)
* sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over
* the boundary of interest.
*
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
+ * Changes: Removed the assert that mpi_size <= the SIZE #define.
+ * As best I can tell, this assert isn't needed here,
+ * and in any case, the SIZE #define is being removed
+ * in an update of the functions in this file to run
+ * with an arbitrary number of processes.
*
* JRM - 8/11/04
*/
@@ -592,13 +588,13 @@ void big_dataset(void)
* not have actual data written to the entire raw data area and relies on the
* default fill value of zeros to work correctly.
*
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
+ * Changes: Removed the assert that mpi_size <= the SIZE #define.
+ * As best I can tell, this assert isn't needed here,
+ * and in any case, the SIZE #define is being removed
+ * in an update of the functions in this file to run
+ * with an arbitrary number of processes.
*
- * Also added code to free dynamically allocated buffers.
+ * Also added code to free dynamically allocated buffers.
*
* JRM - 8/11/04
*/
@@ -619,7 +615,7 @@ void dataset_fillvalue(void)
hsize_t dset_size; /* Dataset size */
int *rdata, *wdata; /* Buffers for data to read and write */
int *twdata, *trdata; /* Temporary pointer into buffer */
- int acc, i, j, k, l, ii; /* Local index variables */
+ int acc, i, ii, j, k, l; /* Local index variables */
herr_t ret; /* Generic return value */
const char *filename;
@@ -707,11 +703,11 @@ void dataset_fillvalue(void)
for(l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
if(*trdata != 0)
if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i, j, k, l, *trdata);
+ HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i, j, k, l, *trdata);
if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
+ HDprintf("[more errors ...]\n");
if(err_num) {
- printf("%d errors found in check_value\n", err_num);
+ HDprintf("%d errors found in check_value\n", err_num);
nerrors++;
}
}
@@ -798,21 +794,21 @@ void dataset_fillvalue(void)
if(i<mpi_size) {
if(*twdata != *trdata )
if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata);
+ HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata);
} /* end if */
else {
if(*trdata != 0)
if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,j,k,l, *trdata);
+ HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,j,k,l, *trdata);
} /* end else */
if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
+ HDprintf("[more errors ...]\n");
if(err_num){
- printf("%d errors found in check_value\n", err_num);
+ HDprintf("%d errors found in check_value\n", err_num);
nerrors++;
}
}
-
+
/* Close all file objects */
ret = H5Dclose(dataset);
VRFY((ret >= 0), "H5Dclose succeeded");
@@ -866,8 +862,8 @@ void collective_group_write(void)
hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */
herr_t ret1, ret2;
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char *filename;
+ int ngroups;
pt = GetTestParameters();
filename = pt->name;
@@ -912,11 +908,11 @@ void collective_group_write(void)
/* creates ngroups groups under the root group, writes chunked
* datasets in parallel. */
for(m = 0; m < ngroups; m++) {
- sprintf(gname, "group%d", m);
+ HDsprintf(gname, "group%d", m);
gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((gid > 0), gname);
- sprintf(dname, "dataset%d", m);
+ HDsprintf(dname, "dataset%d", m);
did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -932,7 +928,7 @@ void collective_group_write(void)
#ifdef BARRIER_CHECKS
if(!((m+1) % 10)) {
- printf("created %d groups\n", m+1);
+ HDprintf("created %d groups\n", m+1);
MPI_Barrier(MPI_COMM_WORLD);
}
#endif /* BARRIER_CHECKS */
@@ -954,8 +950,8 @@ void independent_group_read(void)
int mpi_rank, m;
hid_t plist, fid;
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char *filename;
+ int ngroups;
pt = GetTestParameters();
filename = pt->name;
@@ -989,9 +985,9 @@ void independent_group_read(void)
* instead of the old SIZE #define. This should allow it
* to function with an arbitrary number of processors.
*
- * Also added code to verify the results of dynamic memory
- * allocations, and to free dynamically allocated memeory
- * when we are done with it.
+ * Also added code to verify the results of dynamic memory
+ * allocations, and to free dynamically allocated memeory
+ * when we are done with it.
*
* JRM - 8/16/04
*/
@@ -1013,12 +1009,12 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
/* open every group under root group. */
- sprintf(gname, "group%d", m);
+ HDsprintf(gname, "group%d", m);
gid = H5Gopen2(fid, gname, H5P_DEFAULT);
VRFY((gid > 0), gname);
/* check the data. */
- sprintf(dname, "dataset%d", m);
+ HDsprintf(dname, "dataset%d", m);
did = H5Dopen2(gid, dname, H5P_DEFAULT);
VRFY((did>0), dname);
@@ -1083,8 +1079,8 @@ void multiple_group_write(void)
hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
herr_t ret;
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char *filename;
+ int ngroups;
pt = GetTestParameters();
filename = pt->name;
@@ -1119,23 +1115,23 @@ void multiple_group_write(void)
/* creates ngroups groups under the root group, writes datasets in
* parallel. */
for(m = 0; m < ngroups; m++) {
- sprintf(gname, "group%d", m);
+ HDsprintf(gname, "group%d", m);
gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((gid > 0), gname);
/* create attribute for these groups. */
- write_attribute(gid, is_group, m);
+ write_attribute(gid, is_group, m);
if(m != 0)
- write_dataset(memspace, filespace, gid);
+ write_dataset(memspace, filespace, gid);
H5Gclose(gid);
#ifdef BARRIER_CHECKS
if(!((m+1) % 10)) {
- printf("created %d groups\n", m+1);
+ HDprintf("created %d groups\n", m+1);
MPI_Barrier(MPI_COMM_WORLD);
- }
+ }
#endif /* BARRIER_CHECKS */
}
@@ -1181,13 +1177,13 @@ write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
VRFY((outme != NULL), "HDmalloc succeeded for outme");
for(n = 0; n < NDATASET; n++) {
- sprintf(dname, "dataset%d", n);
+ HDsprintf(dname, "dataset%d", n);
did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((did > 0), dname);
for(i = 0; i < size; i++)
for(j = 0; j < size; j++)
- outme[(i * size) + j] = n * 1000 + mpi_rank;
+ outme[(i * size) + j] = n * 1000 + mpi_rank;
H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
@@ -1214,12 +1210,12 @@ create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
#ifdef BARRIER_CHECKS
if(!((counter+1) % 10)) {
- printf("created %dth child groups\n", counter+1);
+ HDprintf("created %dth child groups\n", counter+1);
MPI_Barrier(MPI_COMM_WORLD);
}
#endif /* BARRIER_CHECKS */
- sprintf(gname, "%dth_child_group", counter+1);
+ HDsprintf(gname, "%dth_child_group", counter+1);
child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((child_gid > 0), gname);
@@ -1251,8 +1247,8 @@ void multiple_group_read(void)
hsize_t chunk_origin[DIM];
hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char *filename;
+ int ngroups;
pt = GetTestParameters();
filename = pt->name;
@@ -1280,19 +1276,19 @@ void multiple_group_read(void)
/* open every group under root group. */
for(m=0; m<ngroups; m++) {
- sprintf(gname, "group%d", m);
+ HDsprintf(gname, "group%d", m);
gid = H5Gopen2(fid, gname, H5P_DEFAULT);
VRFY((gid > 0), gname);
/* check the data. */
if(m != 0)
if((error_num = read_dataset(memspace, filespace, gid))>0)
- nerrors += error_num;
+ nerrors += error_num;
/* check attribute.*/
error_num = 0;
if((error_num = read_attribute(gid, is_group, m))>0 )
- nerrors += error_num;
+ nerrors += error_num;
H5Gclose(gid);
@@ -1344,7 +1340,7 @@ read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
for(n=0; n<NDATASET; n++) {
- sprintf(dname, "dataset%d", n);
+ HDsprintf(dname, "dataset%d", n);
did = H5Dopen2(gid, dname, H5P_DEFAULT);
VRFY((did>0), dname);
@@ -1353,10 +1349,10 @@ read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
/* this is the original value */
for(i=0; i<size; i++)
- for(j=0; j<size; j++) {
- *outdata = n*1000 + mpi_rank;
+ for(j=0; j<size; j++) {
+ *outdata = n*1000 + mpi_rank;
outdata++;
- }
+ }
outdata -= size * size;
/* compare the original value(outdata) to the value in file(indata).*/
@@ -1396,7 +1392,7 @@ recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter)
nerrors += err_num;
if(counter < GROUP_DEPTH ) {
- sprintf(gname, "%dth_child_group", counter+1);
+ HDsprintf(gname, "%dth_child_group", counter+1);
child_gid = H5Gopen2(gid, gname, H5P_DEFAULT);
VRFY((child_gid>0), gname);
recursive_read_group(memspace, filespace, child_gid, counter+1);
@@ -1418,7 +1414,7 @@ write_attribute(hid_t obj_id, int this_type, int num)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if(this_type == is_group) {
- sprintf(attr_name, "Group Attribute %d", num);
+ HDsprintf(attr_name, "Group Attribute %d", num);
sid = H5Screate(H5S_SCALAR);
aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
H5Awrite(aid, H5T_NATIVE_INT, &num);
@@ -1426,7 +1422,7 @@ write_attribute(hid_t obj_id, int this_type, int num)
H5Sclose(sid);
} /* end if */
else if(this_type == is_dset) {
- sprintf(attr_name, "Dataset Attribute %d", num);
+ HDsprintf(attr_name, "Dataset Attribute %d", num);
for(i=0; i<8; i++)
attr_data[i] = i;
sid = H5Screate_simple(dspace_rank, dspace_dims, NULL);
@@ -1450,23 +1446,23 @@ read_attribute(hid_t obj_id, int this_type, int num)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if(this_type == is_group) {
- sprintf(attr_name, "Group Attribute %d", num);
+ HDsprintf(attr_name, "Group Attribute %d", num);
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
if(MAINPROCESS) {
H5Aread(aid, H5T_NATIVE_INT, &in_num);
vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
- }
+ }
H5Aclose(aid);
}
else if(this_type == is_dset) {
- sprintf(attr_name, "Dataset Attribute %d", num);
+ HDsprintf(attr_name, "Dataset Attribute %d", num);
for(i=0; i<8; i++)
out_data[i] = i;
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
if(MAINPROCESS) {
H5Aread(aid, H5T_NATIVE_INT, in_data);
vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data);
- }
+ }
H5Aclose(aid);
}
@@ -1476,11 +1472,11 @@ read_attribute(hid_t obj_id, int this_type, int num)
/* This functions compares the original data with the read-in data for its
* hyperslab part only by process ID.
*
- * Changes: Modified function to use a passed in size parameter
- * instead of the old SIZE #define. This should let us
- * run with an arbitrary number of processes.
+ * Changes: Modified function to use a passed in size parameter
+ * instead of the old SIZE #define. This should let us
+ * run with an arbitrary number of processes.
*
- * JRM - 8/16/04
+ * JRM - 8/16/04
*/
static int
check_value(DATATYPE *indata, DATATYPE *outdata, int size)
@@ -1500,23 +1496,23 @@ check_value(DATATYPE *indata, DATATYPE *outdata, int size)
for(i=chunk_origin[0]; i<(chunk_origin[0]+chunk_dims[0]); i++)
for(j=chunk_origin[1]; j<(chunk_origin[1]+chunk_dims[1]); j++) {
if(*indata != *outdata )
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",(unsigned long)i,(unsigned long)j,(unsigned long)i,(unsigned long)j, *outdata, *indata);
- }
+ if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",(unsigned long)i,(unsigned long)j,(unsigned long)i,(unsigned long)j, *outdata, *indata);
+ }
if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
+ HDprintf("[more errors ...]\n");
if(err_num)
- printf("%d errors found in check_value\n", err_num);
+ HDprintf("%d errors found in check_value\n", err_num);
return err_num;
}
/* Decide the portion of data chunk in dataset by process ID.
*
- * Changes: Modified function to use a passed in size parameter
- * instead of the old SIZE #define. This should let us
- * run with an arbitrary number of processes.
+ * Changes: Modified function to use a passed in size parameter
+ * instead of the old SIZE #define. This should let us
+ * run with an arbitrary number of processes.
*
- * JRM - 8/11/04
+ * JRM - 8/11/04
*/
static void
@@ -1560,7 +1556,7 @@ get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[],
* on failure.
* JRM - 9/13/04
*
- * Changes: None.
+ * Changes: None.
*/
#define N 4
@@ -1595,10 +1591,10 @@ void io_mode_confusion(void)
* test bed related variables
*/
- const char * fcn_name = "io_mode_confusion";
- const hbool_t verbose = FALSE;
- const H5Ptest_param_t * pt;
- char * filename;
+ const char * fcn_name = "io_mode_confusion";
+ const hbool_t verbose = FALSE;
+ const H5Ptest_param_t * pt;
+ char * filename;
pt = GetTestParameters();
@@ -1799,13 +1795,13 @@ void io_mode_confusion(void)
/*
* At present, the object header code maintains an image of its on disk
* representation, which is updates as necessary instead of generating on
- * request.
+ * request.
*
* Prior to the fix that this test in designed to verify, the image of the
* on disk representation was only updated on flush -- not when the object
* header was marked clean.
*
- * This worked perfectly well as long as all writes of a given object
+ * This worked perfectly well as long as all writes of a given object
* header were written from a single process. However, with the implementation
* of round robin metadata data writes in parallel HDF5, this is no longer
* the case -- it is possible for a given object header to be flushed from
@@ -1813,14 +1809,14 @@ void io_mode_confusion(void)
* clean in all other processes on each flush. This resulted in NULL or
* out of data object header information being written to disk.
*
- * To repair this, I modified the object header code to update its
- * on disk image both on flush on when marked clean.
+ * To repair this, I modified the object header code to update its
+ * on disk image both on flush on when marked clean.
*
* This test is directed at verifying that the fix performs as expected.
*
* The test functions by creating a HDF5 file with several small datasets,
- * and then flushing the file. This should result of at least one of
- * the associated object headers being flushed by a process other than
+ * and then flushing the file. This should result of at least one of
+ * the associated object headers being flushed by a process other than
* process 0.
*
* Then for each data set, add an attribute and flush the file again.
@@ -1830,26 +1826,26 @@ void io_mode_confusion(void)
* Open the each of the data sets in turn. If all opens are successful,
* the test passes. Otherwise the test fails.
*
- * Note that this test will probably become irrelevent shortly, when we
+ * Note that this test will probably become irrelevent shortly, when we
* land the journaling modifications on the trunk -- at which point all
* cache clients will have to construct on disk images on demand.
*
- * JRM -- 10/13/10
+ * JRM -- 10/13/10
*
* Changes:
- * Break it into two parts, a writer to write the file and a reader
- * the correctness of the writer. AKC -- 2010/10/27
+ * Break it into two parts, a writer to write the file and a reader
+ * the correctness of the writer. AKC -- 2010/10/27
*/
-#define NUM_DATA_SETS 4
-#define LOCAL_DATA_SIZE 4
-#define LARGE_ATTR_SIZE 256
+#define NUM_DATA_SETS 4
+#define LOCAL_DATA_SIZE 4
+#define LARGE_ATTR_SIZE 256
/* Since all even and odd processes are split into writer and reader comm
* respectively, process 0 and 1 in COMM_WORLD become the root process of
* the writer and reader comm respectively.
*/
-#define Writer_Root 0
-#define Reader_Root 1
+#define Writer_Root 0
+#define Reader_Root 1
#define Reader_wait(mpi_err, xsteps) \
mpi_err = MPI_Bcast(&xsteps, 1, MPI_INT, Writer_Root, MPI_COMM_WORLD)
#define Reader_result(mpi_err, xsteps_done) \
@@ -1861,26 +1857,26 @@ void io_mode_confusion(void)
/* object names used by both rr_obj_hdr_flush_confusion and
* rr_obj_hdr_flush_confusion_reader.
*/
-const char * dataset_name[NUM_DATA_SETS] =
- {
- "dataset_0",
- "dataset_1",
- "dataset_2",
- "dataset_3"
+const char * dataset_name[NUM_DATA_SETS] =
+ {
+ "dataset_0",
+ "dataset_1",
+ "dataset_2",
+ "dataset_3"
};
-const char * att_name[NUM_DATA_SETS] =
- {
- "attribute_0",
- "attribute_1",
- "attribute_2",
- "attribute_3"
+const char * att_name[NUM_DATA_SETS] =
+ {
+ "attribute_0",
+ "attribute_1",
+ "attribute_2",
+ "attribute_3"
};
-const char * lg_att_name[NUM_DATA_SETS] =
- {
- "large_attribute_0",
- "large_attribute_1",
- "large_attribute_2",
- "large_attribute_3"
+const char * lg_att_name[NUM_DATA_SETS] =
+ {
+ "large_attribute_0",
+ "large_attribute_1",
+ "large_attribute_2",
+ "large_attribute_3"
};
void rr_obj_hdr_flush_confusion(void)
@@ -1889,14 +1885,14 @@ void rr_obj_hdr_flush_confusion(void)
/* private communicator size and rank */
int mpi_size;
int mpi_rank;
- int mrc; /* mpi error code */
- int is_reader; /* 1 for reader process; 0 for writer process. */
+ int mrc; /* mpi error code */
+ int is_reader; /* 1 for reader process; 0 for writer process. */
MPI_Comm comm;
/* test bed related variables */
- const char * fcn_name = "rr_obj_hdr_flush_confusion";
- const hbool_t verbose = FALSE;
+ const char * fcn_name = "rr_obj_hdr_flush_confusion";
+ const hbool_t verbose = FALSE;
/* Create two new private communicators from MPI_COMM_WORLD.
* Even and odd ranked processes go to comm_writers and comm_readers
@@ -1919,9 +1915,9 @@ void rr_obj_hdr_flush_confusion(void)
* step. When all steps are done, they inform readers to end.
*/
if (is_reader)
- rr_obj_hdr_flush_confusion_reader(comm);
+ rr_obj_hdr_flush_confusion_reader(comm);
else
- rr_obj_hdr_flush_confusion_writer(comm);
+ rr_obj_hdr_flush_confusion_writer(comm);
MPI_Comm_free(&comm);
if(verbose )
@@ -1965,16 +1961,16 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
/* private communicator size and rank */
int mpi_size;
int mpi_rank;
- int mrc; /* mpi error code */
+ int mrc; /* mpi error code */
/* steps to verify and have been verified */
int steps = 0;
int steps_done = 0;
/* test bed related variables */
- const char * fcn_name = "rr_obj_hdr_flush_confusion_writer";
- const hbool_t verbose = FALSE;
- const H5Ptest_param_t * pt;
- char * filename;
+ const char * fcn_name = "rr_obj_hdr_flush_confusion_writer";
+ const hbool_t verbose = FALSE;
+ const H5Ptest_param_t * pt;
+ char * filename;
/*
* setup test bed related variables:
@@ -2008,7 +2004,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n",
+ HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n",
mpi_rank, fcn_name, filename);
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
@@ -2023,7 +2019,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: Creating the datasets.\n",
+ HDfprintf(stdout, "%0d:%s: Creating the datasets.\n",
mpi_rank, fcn_name);
disk_size[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_size);
@@ -2032,15 +2028,15 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
disk_space[i] = H5Screate_simple(1, disk_size, NULL);
- VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n");
+ VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n");
- dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE,
+ dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE,
disk_space[i], H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset[i] >= 0), "H5Dcreate(1) failed.\n");
}
- /*
+ /*
* setup data transfer property list
*/
@@ -2051,11 +2047,11 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
- VRFY((err >= 0),
+ VRFY((err >= 0),
"H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
- /*
- * write data to the data sets
+ /*
+ * write data to the data sets
*/
if(verbose )
@@ -2071,22 +2067,22 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
}
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
+ err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
NULL, disk_count, NULL);
VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
mem_space[i] = H5Screate_simple(1, mem_size, NULL);
- VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
- err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
+ VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
+ err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
mem_start, NULL, mem_count, NULL);
VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
- err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
+ err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
disk_space[i], dxpl_id, data);
VRFY((err >= 0), "H5Dwrite(1) failed.\n");
for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- data[j] *= 10.0;
+ data[j] *= 10.0;
}
- /*
+ /*
* close the data spaces
*/
@@ -2102,12 +2098,12 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
/* End of Step 1: create the data sets and write data. */
- /*
+ /*
* flush the metadata cache
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
mpi_rank, fcn_name);
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "H5Fflush(1) failed.\n");
@@ -2131,7 +2127,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
att_space[i] = H5Screate_simple(1, att_size, NULL);
VRFY((att_space[i] >= 0), "H5Screate_simple(3) failed.\n");
- att_id[i] = H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE,
+ att_id[i] = H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE,
att_space[i], H5P_DEFAULT, H5P_DEFAULT);
VRFY((att_id[i] >= 0), "H5Acreate(1) failed.\n");
err = H5Awrite(att_id[i], H5T_NATIVE_DOUBLE, att);
@@ -2142,11 +2138,11 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
}
/*
- * close attribute IDs and spaces
+ * close attribute IDs and spaces
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n",
+ HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n",
mpi_rank, fcn_name);
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
@@ -2159,12 +2155,12 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
/* End of Step 2: write attributes to each dataset */
- /*
+ /*
* flush the metadata cache again
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
mpi_rank, fcn_name);
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "H5Fflush(2) failed.\n");
@@ -2178,7 +2174,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: writing large attributes.\n",
+ HDfprintf(stdout, "%0d:%s: writing large attributes.\n",
mpi_rank, fcn_name);
lg_att_size[0] = (hsize_t)(LARGE_ATTR_SIZE);
@@ -2190,7 +2186,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
lg_att_space[i] = H5Screate_simple(1, lg_att_size, NULL);
VRFY((lg_att_space[i] >= 0), "H5Screate_simple(4) failed.\n");
- lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE,
+ lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE,
lg_att_space[i], H5P_DEFAULT, H5P_DEFAULT);
VRFY((lg_att_id[i] >= 0), "H5Acreate(2) failed.\n");
err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att);
@@ -2199,21 +2195,21 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
lg_att[j] /= 10.0;
}
}
-
+
/* Step 3: write large attributes to each dataset */
- /*
+ /*
* flush the metadata cache yet again to clean the object headers.
*
* This is an attempt to crate a situation where we have dirty
* object header continuation chunks, but clean opject headers
* to verify a speculative bug fix -- it doesn't seem to work,
- * but I will leave the code in anyway, as the object header
+ * but I will leave the code in anyway, as the object header
* code is going to change a lot in the near future.
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
mpi_rank, fcn_name);
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "H5Fflush(3) failed.\n");
@@ -2227,7 +2223,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: writing different large attributes.\n",
+ HDfprintf(stdout, "%0d:%s: writing different large attributes.\n",
mpi_rank, fcn_name);
for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
@@ -2244,11 +2240,11 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
/* End of Step 4: write different large attributes to each dataset */
- /*
+ /*
* flush the metadata cache again
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
mpi_rank, fcn_name);
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "H5Fflush(3) failed.\n");
@@ -2260,11 +2256,11 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
/* Step 5: Close all objects and the file */
/*
- * close large attribute IDs and spaces
+ * close large attribute IDs and spaces
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n",
+ HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n",
mpi_rank, fcn_name);
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
@@ -2276,7 +2272,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
}
- /*
+ /*
* close the data sets
*/
@@ -2308,7 +2304,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
err = H5Fclose(file_id);
VRFY((err >= 0 ), "H5Fclose(1) failed");
-
+
/* End of Step 5: Close all objects and the file */
/* Tell the reader to check the file up to steps. */
steps++;
@@ -2359,15 +2355,15 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
/* private communicator size and rank */
int mpi_size;
int mpi_rank;
- int mrc; /* mpi error code */
- int steps = -1; /* How far (steps) to verify the file */
- int steps_done = -1; /* How far (steps) have been verified */
+ int mrc; /* mpi error code */
+ int steps = -1; /* How far (steps) to verify the file */
+ int steps_done = -1; /* How far (steps) have been verified */
/* test bed related variables */
- const char * fcn_name = "rr_obj_hdr_flush_confusion_reader";
- const hbool_t verbose = FALSE;
- const H5Ptest_param_t * pt;
- char * filename;
+ const char * fcn_name = "rr_obj_hdr_flush_confusion_reader";
+ const hbool_t verbose = FALSE;
+ const H5Ptest_param_t * pt;
+ char * filename;
/*
* setup test bed related variables:
@@ -2384,291 +2380,291 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
/* Repeatedly re-open the file and verify its contents until it is */
/* told to end (when steps=0). */
while (steps_done != 0){
- Reader_wait(mrc, steps);
- VRFY((mrc >= 0), "Reader_wait failed");
- steps_done = 0;
+ Reader_wait(mrc, steps);
+ VRFY((mrc >= 0), "Reader_wait failed");
+ steps_done = 0;
- if (steps > 0 ){
- /*
- * Set up file access property list with parallel I/O access
- */
+ if (steps > 0 ){
+ /*
+ * Set up file access property list with parallel I/O access
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
- mpi_rank, fcn_name);
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
+ mpi_rank, fcn_name);
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
- err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
- VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed");
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
+ err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
+ VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed");
- /*
- * Create a new file collectively and release property list identifier.
- */
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n",
- mpi_rank, fcn_name, filename);
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n",
+ mpi_rank, fcn_name, filename);
- file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id);
- VRFY((file_id >= 0 ), "H5Fopen() failed");
- err = H5Pclose(fapl_id);
- VRFY((err >= 0 ), "H5Pclose(fapl_id) failed");
+ file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id);
+ VRFY((file_id >= 0 ), "H5Fopen() failed");
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0 ), "H5Pclose(fapl_id) failed");
#if 1
- if (steps >= 1){
- /*=====================================================*
- * Step 1: open the data sets and read data.
- *=====================================================*/
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: opening the datasets.\n",
- mpi_rank, fcn_name);
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- dataset[i] = -1;
- }
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT);
- VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n");
- disk_space[i] = H5Dget_space(dataset[i]);
- VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n");
- }
-
- /*
- * setup data transfer property list
- */
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
-
- dxpl_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
- err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
- VRFY((err >= 0),
- "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
-
- /*
- * read data from the data sets
- */
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name);
-
- disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
- disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
-
- mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
-
- mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
- mem_start[0] = (hsize_t)(0);
-
- /* set up expected data for verification */
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
- data[j] = (double)(mpi_rank + 1);
- }
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
- NULL, disk_count, NULL);
- VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
- mem_space[i] = H5Screate_simple(1, mem_size, NULL);
- VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
- err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
- mem_start, NULL, mem_count, NULL);
- VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
- err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
- disk_space[i], dxpl_id, data_read);
- VRFY((err >= 0), "H5Dread(1) failed.\n");
-
- /* compare read data with expected data */
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- if (data_read[j] != data[j]){
- HDfprintf(stdout,
- "%0d:%s: Reading datasets value failed in "
- "Dataset %d, at position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, data[j], data_read[j]);
- nerrors++;
- }
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- data[j] *= 10.0;
- }
-
- /*
- * close the data spaces
- */
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- err = H5Sclose(disk_space[i]);
- VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
- err = H5Sclose(mem_space[i]);
- VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
- }
- steps_done++;
- }
- /* End of Step 1: open the data sets and read data. */
+ if (steps >= 1){
+ /*=====================================================*
+ * Step 1: open the data sets and read data.
+ *=====================================================*/
+
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: opening the datasets.\n",
+ mpi_rank, fcn_name);
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ dataset[i] = -1;
+ }
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT);
+ VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n");
+ disk_space[i] = H5Dget_space(dataset[i]);
+ VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n");
+ }
+
+ /*
+ * setup data transfer property list
+ */
+
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
+ err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
+ VRFY((err >= 0),
+ "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
+
+ /*
+ * read data from the data sets
+ */
+
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name);
+
+ disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
+
+ mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
+
+ mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ mem_start[0] = (hsize_t)(0);
+
+ /* set up expected data for verification */
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ data[j] = (double)(mpi_rank + 1);
+ }
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
+ NULL, disk_count, NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
+ mem_space[i] = H5Screate_simple(1, mem_size, NULL);
+ VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
+ err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
+ mem_start, NULL, mem_count, NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
+ err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
+ disk_space[i], dxpl_id, data_read);
+ VRFY((err >= 0), "H5Dread(1) failed.\n");
+
+ /* compare read data with expected data */
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
+ if (data_read[j] != data[j]){
+ HDfprintf(stdout,
+ "%0d:%s: Reading datasets value failed in "
+ "Dataset %d, at position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, data[j], data_read[j]);
+ nerrors++;
+ }
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
+ data[j] *= 10.0;
+ }
+
+ /*
+ * close the data spaces
+ */
+
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ err = H5Sclose(disk_space[i]);
+ VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
+ err = H5Sclose(mem_space[i]);
+ VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
+ }
+ steps_done++;
+ }
+ /* End of Step 1: open the data sets and read data. */
#endif
#if 1
- /*=====================================================*
- * Step 2: reading attributes from each dataset
- *=====================================================*/
-
- if (steps >= 2){
- if(verbose )
- HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name);
-
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
-
- att[j] = (double)(j + 1);
- }
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- hid_t att_id, att_type;
-
- att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT);
- VRFY((att_id >= 0), "H5Aopen failed.\n");
- att_type = H5Aget_type(att_id);
- VRFY((att_type >= 0), "H5Aget_type failed.\n");
- tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE);
- VRFY((tri_err >= 0), "H5Tequal failed.\n");
- if (tri_err==0){
- HDfprintf(stdout,
- "%0d:%s: Mismatched Attribute type of Dataset %d.\n",
- mpi_rank, fcn_name, i);
- nerrors++;
- }else{
- /* should verify attribute size before H5Aread */
- err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read);
- VRFY((err >= 0), "H5Aread failed.\n");
- /* compare read attribute data with expected data */
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- if (att_read[j] != att[j]){
- HDfprintf(stdout,
- "%0d:%s: Mismatched attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, att[j], att_read[j]);
- nerrors++;
- }
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
-
- att[j] /= 10.0;
- }
- }
- err = H5Aclose(att_id);
- VRFY((err >= 0), "H5Aclose failed.\n");
- }
- steps_done++;
- }
- /* End of Step 2: reading attributes from each dataset */
+ /*=====================================================*
+ * Step 2: reading attributes from each dataset
+ *=====================================================*/
+
+ if (steps >= 2){
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name);
+
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+
+ att[j] = (double)(j + 1);
+ }
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ hid_t att_id, att_type;
+
+ att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT);
+ VRFY((att_id >= 0), "H5Aopen failed.\n");
+ att_type = H5Aget_type(att_id);
+ VRFY((att_type >= 0), "H5Aget_type failed.\n");
+ tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE);
+ VRFY((tri_err >= 0), "H5Tequal failed.\n");
+ if (tri_err==0){
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched Attribute type of Dataset %d.\n",
+ mpi_rank, fcn_name, i);
+ nerrors++;
+ }else{
+ /* should verify attribute size before H5Aread */
+ err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read);
+ VRFY((err >= 0), "H5Aread failed.\n");
+ /* compare read attribute data with expected data */
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
+ if (att_read[j] != att[j]){
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, att[j], att_read[j]);
+ nerrors++;
+ }
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+
+ att[j] /= 10.0;
+ }
+ }
+ err = H5Aclose(att_id);
+ VRFY((err >= 0), "H5Aclose failed.\n");
+ }
+ steps_done++;
+ }
+ /* End of Step 2: reading attributes from each dataset */
#endif
#if 1
- /*=====================================================*
- * Step 3 or 4: read large attributes from each dataset.
- * Step 4 has different attribute value from step 3.
- *=====================================================*/
-
- if (steps >= 3){
- if(verbose )
- HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name);
-
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
-
- lg_att[j] = (steps==3) ? (double)(j + 1) : (double)(j+2);
- }
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT);
- VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n");
- lg_att_type[i] = H5Aget_type(lg_att_id[i]);
- VRFY((err >= 0), "H5Aget_type failed.\n");
- tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE);
- VRFY((tri_err >= 0), "H5Tequal failed.\n");
- if (tri_err==0){
- HDfprintf(stdout,
- "%0d:%s: Mismatched Large attribute type of Dataset %d.\n",
- mpi_rank, fcn_name, i);
- nerrors++;
- }else{
- /* should verify large attribute size before H5Aread */
- err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read);
- VRFY((err >= 0), "H5Aread failed.\n");
- /* compare read attribute data with expected data */
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ )
- if (lg_att_read[j] != lg_att[j]){
- HDfprintf(stdout,
- "%0d:%s: Mismatched large attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]);
- nerrors++;
- }
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
-
- lg_att[j] /= 10.0;
- }
- }
- err = H5Tclose(lg_att_type[i]);
- VRFY((err >= 0), "H5Tclose failed.\n");
- err = H5Aclose(lg_att_id[i]);
- VRFY((err >= 0), "H5Aclose failed.\n");
- }
- /* Both step 3 and 4 use this same read checking code. */
- steps_done = (steps==3) ? 3 : 4;
- }
-
- /* End of Step 3 or 4: read large attributes from each dataset */
+ /*=====================================================*
+ * Step 3 or 4: read large attributes from each dataset.
+ * Step 4 has different attribute value from step 3.
+ *=====================================================*/
+
+ if (steps >= 3){
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name);
+
+ for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
+
+ lg_att[j] = (steps==3) ? (double)(j + 1) : (double)(j+2);
+ }
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT);
+ VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n");
+ lg_att_type[i] = H5Aget_type(lg_att_id[i]);
+ VRFY((err >= 0), "H5Aget_type failed.\n");
+ tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE);
+ VRFY((tri_err >= 0), "H5Tequal failed.\n");
+ if (tri_err==0){
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched Large attribute type of Dataset %d.\n",
+ mpi_rank, fcn_name, i);
+ nerrors++;
+ }else{
+ /* should verify large attribute size before H5Aread */
+ err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read);
+ VRFY((err >= 0), "H5Aread failed.\n");
+ /* compare read attribute data with expected data */
+ for ( j = 0; j < LARGE_ATTR_SIZE; j++ )
+ if (lg_att_read[j] != lg_att[j]){
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched large attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]);
+ nerrors++;
+ }
+ for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
+
+ lg_att[j] /= 10.0;
+ }
+ }
+ err = H5Tclose(lg_att_type[i]);
+ VRFY((err >= 0), "H5Tclose failed.\n");
+ err = H5Aclose(lg_att_id[i]);
+ VRFY((err >= 0), "H5Aclose failed.\n");
+ }
+ /* Both step 3 and 4 use this same read checking code. */
+ steps_done = (steps==3) ? 3 : 4;
+ }
+
+ /* End of Step 3 or 4: read large attributes from each dataset */
#endif
- /*=====================================================*
- * Step 5: read all objects from the file
- *=====================================================*/
- if (steps>=5){
- /* nothing extra to verify. The file is closed normally. */
- /* Just increment steps_done */
- steps_done++;
- }
-
- /*
- * Close the data sets
- */
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: closing datasets again.\n",
- mpi_rank, fcn_name);
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- if ( dataset[i] >= 0 ) {
- err = H5Dclose(dataset[i]);
- VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
- }
- }
-
- /*
- * close the data transfer property list.
- */
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
-
- err = H5Pclose(dxpl_id);
- VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
-
- /*
- * Close the file
- */
- if(verbose)
- HDfprintf(stdout, "%0d:%s: closing file again.\n",
- mpi_rank, fcn_name);
- err = H5Fclose(file_id);
- VRFY((err >= 0 ), "H5Fclose(1) failed");
-
- } /* else if (steps_done==0) */
- Reader_result(mrc, steps_done);
+ /*=====================================================*
+ * Step 5: read all objects from the file
+ *=====================================================*/
+ if (steps>=5){
+ /* nothing extra to verify. The file is closed normally. */
+ /* Just increment steps_done */
+ steps_done++;
+ }
+
+ /*
+ * Close the data sets
+ */
+
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: closing datasets again.\n",
+ mpi_rank, fcn_name);
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ if ( dataset[i] >= 0 ) {
+ err = H5Dclose(dataset[i]);
+ VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
+ }
+ }
+
+ /*
+ * close the data transfer property list.
+ */
+
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
+
+ err = H5Pclose(dxpl_id);
+ VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
+
+ /*
+ * Close the file
+ */
+ if(verbose)
+ HDfprintf(stdout, "%0d:%s: closing file again.\n",
+ mpi_rank, fcn_name);
+ err = H5Fclose(file_id);
+ VRFY((err >= 0 ), "H5Fclose(1) failed");
+
+ } /* else if (steps_done==0) */
+ Reader_result(mrc, steps_done);
} /* end while(1) */
if(verbose )
diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c
index 94ecbfa..890a918 100644
--- a/testpar/t_mpi.c
+++ b/testpar/t_mpi.c
@@ -53,7 +53,7 @@ static int test_mpio_overlap_writes(char *filename) {
MPI_Status mpi_stat;
if (VERBOSE_MED)
- printf("MPIO independent overlapping writes test on file %s\n",
+ HDprintf("MPIO independent overlapping writes test on file %s\n",
filename);
nerrs = 0;
@@ -64,8 +64,8 @@ static int test_mpio_overlap_writes(char *filename) {
/* Need at least 2 processes */
if (mpi_size < 2) {
if (MAINPROCESS)
- printf("Need at least 2 processes to run MPIO test.\n");
- printf(" -SKIP- \n");
+ HDprintf("Need at least 2 processes to run MPIO test.\n");
+ HDprintf(" -SKIP- \n");
return 0;
}
@@ -138,13 +138,13 @@ static int test_mpio_overlap_writes(char *filename) {
expected = (unsigned char) (mpi_off + i);
if ((expected != buf[i])
&& (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) {
- printf(
+ HDprintf(
"proc %d: found data error at [%ld], expect %u, got %u\n",
mpi_rank, (long) (mpi_off + i), expected, buf[i]);
}
}
if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("proc %d: [more errors ...]\n", mpi_rank);
+ HDprintf("proc %d: [more errors ...]\n", mpi_rank);
nerrs += vrfyerrs;
}
@@ -204,7 +204,7 @@ static int test_mpio_gb_file(char *filename) {
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if (VERBOSE_MED)
- printf("MPI_Offset range test\n");
+ HDprintf("MPI_Offset range test\n");
/* figure out the signness and sizeof MPI_Offset */
mpi_off = 0;
@@ -216,10 +216,10 @@ static int test_mpio_gb_file(char *filename) {
* sizes.
*/
if (MAINPROCESS) { /* only process 0 needs to check it*/
- printf("MPI_Offset is %s %d bytes integeral type\n",
+ HDprintf("MPI_Offset is %s %d bytes integeral type\n",
is_signed ? "signed" : "unsigned", (int) sizeof(MPI_Offset));
if (sizeof_mpi_offset <= 4 && is_signed) {
- printf("Skipped 2GB range test "
+ HDprintf("Skipped 2GB range test "
"because MPI_Offset cannot support it\n");
} else {
/* verify correctness of assigning 2GB sizes */
@@ -241,7 +241,7 @@ static int test_mpio_gb_file(char *filename) {
}
if (sizeof_mpi_offset <= 4) {
- printf("Skipped 4GB range test "
+ HDprintf("Skipped 4GB range test "
"because MPI_Offset cannot support it\n");
} else {
/* verify correctness of assigning 4GB sizes */
@@ -267,10 +267,10 @@ static int test_mpio_gb_file(char *filename) {
* Verify if we can write to a file of multiple GB sizes.
*/
if (VERBOSE_MED)
- printf("MPIO GB file test %s\n", filename);
+ HDprintf("MPIO GB file test %s\n", filename);
if (sizeof_mpi_offset <= 4) {
- printf("Skipped GB file range test "
+ HDprintf("Skipped GB file range test "
"because MPI_Offset cannot support it\n");
} else {
buf = (char *) HDmalloc(MB);
@@ -286,7 +286,7 @@ static int test_mpio_gb_file(char *filename) {
MPI_MODE_CREATE | MPI_MODE_RDWR, info, &fh);
VRFY((mrc == MPI_SUCCESS), "MPI_FILE_OPEN");
- printf("MPIO GB file write test %s\n", filename);
+ HDprintf("MPIO GB file write test %s\n", filename);
/* instead of writing every bytes of the file, we will just write
* some data around the 2 and 4 GB boundaries. That should cover
@@ -328,9 +328,9 @@ static int test_mpio_gb_file(char *filename) {
*/
/* open it again to verify the data written */
/* but only if there was no write errors */
- printf("MPIO GB file read test %s\n", filename);
+ HDprintf("MPIO GB file read test %s\n", filename);
if (errors_sum(writerrs) > 0) {
- printf("proc %d: Skip read test due to previous write errors\n",
+ HDprintf("proc %d: Skip read test due to previous write errors\n",
mpi_rank);
goto finish;
}
@@ -356,14 +356,14 @@ static int test_mpio_gb_file(char *filename) {
for (j = 0; j < MB; j++) {
if ((*(buf + j) != expected)
&& (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) {
- printf(
+ HDprintf(
"proc %d: found data error at [%ld+%d], expect %d, got %d\n",
mpi_rank, (long) mpi_off, j, expected,
*(buf + j));
}
}
if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("proc %d: [more errors ...]\n", mpi_rank);
+ HDprintf("proc %d: [more errors ...]\n", mpi_rank);
nerrs += vrfyerrs;
}
@@ -380,7 +380,7 @@ static int test_mpio_gb_file(char *filename) {
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc == MPI_SUCCESS), "Sync before leaving test");
- printf("Test if MPI_File_get_size works correctly with %s\n", filename);
+ HDprintf("Test if MPI_File_get_size works correctly with %s\n", filename);
mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info,
&fh);
@@ -428,7 +428,7 @@ static int test_mpio_gb_file(char *filename) {
*/
#define DIMSIZE 32 /* Dimension size. */
-#define PRINTID printf("Proc %d: ", mpi_rank)
+#define PRINTID HDprintf("Proc %d: ", mpi_rank)
#define USENONE 0
#define USEATOM 1 /* request atomic I/O */
#define USEFSYNC 2 /* request file_sync */
@@ -452,24 +452,24 @@ static int test_mpio_1wMr(char *filename, int special_request) {
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if (MAINPROCESS && VERBOSE_MED) {
- printf("Testing one process writes, all processes read.\n");
- printf("Using %d processes accessing file %s\n", mpi_size, filename);
- printf(" (Filename can be specified via program argument)\n");
+ HDprintf("Testing one process writes, all processes read.\n");
+ HDprintf("Using %d processes accessing file %s\n", mpi_size, filename);
+ HDprintf(" (Filename can be specified via program argument)\n");
}
/* show the hostname so that we can tell where the processes are running */
if (VERBOSE_DEF) {
#ifdef H5_HAVE_GETHOSTNAME
if(HDgethostname(hostname, sizeof(hostname)) < 0) {
- printf("gethostname failed\n");
+ HDprintf("gethostname failed\n");
hostname[0] = '\0';
}
#else
- printf("gethostname unavailable\n");
+ HDprintf("gethostname unavailable\n");
hostname[0] = '\0';
#endif
PRINTID;
- printf("hostname=%s\n", hostname);
+ HDprintf("hostname=%s\n", hostname);
}
/* Delete any old file in order to start anew. */
@@ -483,7 +483,7 @@ static int test_mpio_1wMr(char *filename, int special_request) {
!= MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
- printf("MPI_File_open failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_open failed (%s)\n", mpi_err_str);
return 1;
}
@@ -495,29 +495,29 @@ static int test_mpio_1wMr(char *filename, int special_request) {
if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
- printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
}
if (VERBOSE_HI)
- printf("Initial atomicity = %d\n", atomicity);
+ HDprintf("Initial atomicity = %d\n", atomicity);
if ((mpi_err = MPI_File_set_atomicity(fh, 1)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
- printf("MPI_File_set_atomicity failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_set_atomicity failed (%s)\n", mpi_err_str);
}
if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
- printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
}
if (VERBOSE_HI)
- printf("After set_atomicity atomicity = %d\n", atomicity);
+ HDprintf("After set_atomicity atomicity = %d\n", atomicity);
}
/* This barrier is not necessary but do it anyway. */
MPI_Barrier(MPI_COMM_WORLD);
if (VERBOSE_HI) {
PRINTID;
- printf("between MPI_Barrier and MPI_File_write_at\n");
+ HDprintf("between MPI_Barrier and MPI_File_write_at\n");
}
/* ==================================================
@@ -533,13 +533,13 @@ static int test_mpio_1wMr(char *filename, int special_request) {
if (mpi_rank == irank) {
if (VERBOSE_HI) {
PRINTID;
- printf("wrote %d bytes at %ld\n", DIMSIZE, (long) mpi_off);
+ HDprintf("wrote %d bytes at %ld\n", DIMSIZE, (long) mpi_off);
}
if ((mpi_err = MPI_File_write_at(fh, mpi_off, writedata, DIMSIZE,
MPI_BYTE, &mpi_stat)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
- printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
+ HDprintf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
(long) mpi_off, DIMSIZE, mpi_err_str);
return 1;
};
@@ -550,7 +550,7 @@ static int test_mpio_1wMr(char *filename, int special_request) {
MPI_Bcast(&mpi_err, 1, MPI_INT, irank, MPI_COMM_WORLD);
if (VERBOSE_HI) {
PRINTID;
- printf("MPI_Bcast: mpi_err = %d\n", mpi_err);
+ HDprintf("MPI_Bcast: mpi_err = %d\n", mpi_err);
}
if (special_request & USEFSYNC) {
@@ -559,19 +559,19 @@ static int test_mpio_1wMr(char *filename, int special_request) {
* should not need this.
* ==================================================*/
if (VERBOSE_HI)
- printf("Apply MPI_File_sync\n");
+ HDprintf("Apply MPI_File_sync\n");
/* call file_sync to force the write out */
if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
- printf("MPI_File_sync failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_sync failed (%s)\n", mpi_err_str);
}
MPI_Barrier(MPI_COMM_WORLD);
/* call file_sync to force the write out */
if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
- printf("MPI_File_sync failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_sync failed (%s)\n", mpi_err_str);
}
}
@@ -580,7 +580,7 @@ static int test_mpio_1wMr(char *filename, int special_request) {
MPI_Barrier(MPI_COMM_WORLD);
if (VERBOSE_HI) {
PRINTID;
- printf("after MPI_Barrier\n");
+ HDprintf("after MPI_Barrier\n");
}
/* ==================================================
@@ -592,7 +592,7 @@ static int test_mpio_1wMr(char *filename, int special_request) {
&mpi_stat)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
PRINTID;
- printf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n",
+ HDprintf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n",
(long) mpi_off, DIMSIZE, mpi_err_str);
return 1;
};
@@ -600,7 +600,7 @@ static int test_mpio_1wMr(char *filename, int special_request) {
expect_val = irank * DIMSIZE + i;
if (readdata[i] != expect_val) {
PRINTID;
- printf("read data[%d:%d] got %02x, expect %02x\n", irank, i,
+ HDprintf("read data[%d:%d] got %02x, expect %02x\n", irank, i,
readdata[i], expect_val);
nerrs++;
}
@@ -610,7 +610,7 @@ static int test_mpio_1wMr(char *filename, int special_request) {
if (VERBOSE_HI) {
PRINTID;
- printf("%d data errors detected\n", nerrs);
+ HDprintf("%d data errors detected\n", nerrs);
}
mpi_err = MPI_Barrier(MPI_COMM_WORLD);
@@ -664,7 +664,7 @@ static int test_mpio_1wMr(char *filename, int special_request) {
2. This test will verify whether the complicated derived datatype is working on
the current platform.
- If this bug has been fixed in the previous not-working package, this test will issue a printf message to tell the developer to change
+ If this bug has been fixed in the previous not-working package, this test will issue a HDprintf message to tell the developer to change
the configuration specific file of HDF5 so that we can change our configurationsetting to support collective IO for irregular selections.
If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message to inform the corresponding failure so that
@@ -703,7 +703,7 @@ static int test_mpio_derived_dtype(char *filename) {
MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh))
!= MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_open failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_open failed (%s)\n", mpi_err_str);
return 1;
}
@@ -717,13 +717,13 @@ static int test_mpio_derived_dtype(char *filename) {
if ((mpi_err = MPI_Type_create_hindexed(count, blocklens, offsets, MPI_BYTE,
&filetype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
return 1;
}
if ((mpi_err = MPI_Type_commit(&filetype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str);
return 1;
}
@@ -733,13 +733,13 @@ static int test_mpio_derived_dtype(char *filename) {
if ((mpi_err = MPI_Type_create_hindexed(count, blocklens, offsets, MPI_BYTE,
&filetypenew)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
return 1;
}
if ((mpi_err = MPI_Type_commit(&filetypenew)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str);
return 1;
}
@@ -754,52 +754,52 @@ static int test_mpio_derived_dtype(char *filename) {
if ((mpi_err = MPI_Type_create_struct(outcount, adv_blocklens, adv_disp,
bas_filetype, &adv_filetype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_create_struct failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_create_struct failed (%s)\n", mpi_err_str);
return 1;
}
if ((mpi_err = MPI_Type_commit(&adv_filetype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str);
return 1;
}
if ((mpi_err = MPI_File_set_view(fh, disp, etype, adv_filetype, "native",
MPI_INFO_NULL)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_set_view failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str);
return 1;
}
if ((mpi_err = MPI_File_write(fh, buf, 3, MPI_BYTE, &Status))
!= MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_write failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_write failed (%s)\n", mpi_err_str);
return 1;
}
if ((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_close failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_close failed (%s)\n", mpi_err_str);
return 1;
}
if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY,
MPI_INFO_NULL, &fh)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_open failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_open failed (%s)\n", mpi_err_str);
return 1;
}
if ((mpi_err = MPI_File_set_view(fh, 0, MPI_BYTE, MPI_BYTE, "native",
MPI_INFO_NULL)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_set_view failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str);
return 1;
}
if ((mpi_err = MPI_File_read(fh, outbuf, 3, MPI_BYTE, &Status))
!= MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_read failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_read failed (%s)\n", mpi_err_str);
return 1;
}
@@ -807,9 +807,9 @@ static int test_mpio_derived_dtype(char *filename) {
retcode = 0;
} else {
/* if(mpi_rank == 0) {
- printf("complicated derived datatype is NOT working at this platform\n");
- printf("go back to hdf5/config and find the corresponding\n");
- printf("configure-specific file and change ?????\n");
+ HDprintf("complicated derived datatype is NOT working at this platform\n");
+ HDprintf("go back to hdf5/config and find the corresponding\n");
+ HDprintf("configure-specific file and change ?????\n");
}
*/
retcode = -1;
@@ -817,16 +817,16 @@ static int test_mpio_derived_dtype(char *filename) {
if ((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_close failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_close failed (%s)\n", mpi_err_str);
return 1;
}
mpi_err = MPI_Barrier(MPI_COMM_WORLD);
if (retcode == -1) {
if (mpi_rank == 0) {
- printf(
+ HDprintf(
"Complicated derived datatype is NOT working at this platform\n");
- printf(" Please report to help@hdfgroup.org about this problem.\n");
+ HDprintf(" Please report to help@hdfgroup.org about this problem.\n");
}
retcode = 1;
}
@@ -851,7 +851,7 @@ static int test_mpio_derived_dtype(char *filename) {
2. This test will fail with the MPI-IO package that doesn't support this. For example,
mpich 1.2.6.
- If this bug has been fixed in the previous not-working package, this test will issue a printf message to tell the developer to change
+ If this bug has been fixed in the previous not-working package, this test will issue a HDprintf message to tell the developer to change
the configuration specific file of HDF5 so that we can change our configurationsetting to support special collective IO; currently only special collective IO.
If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message to inform the corresponding failure so that
@@ -898,26 +898,26 @@ static int test_mpio_special_collective(char *filename) {
if ((mpi_err = MPI_Type_create_hindexed(2, blocklens, offsets, etype,
&filetype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
if ((mpi_err = MPI_Type_commit(&filetype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
if ((mpi_err = MPI_Type_create_hindexed(2, blocklens, offsets, etype,
&buftype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
if ((mpi_err = MPI_Type_commit(&buftype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
} /* end if */
@@ -931,7 +931,7 @@ static int test_mpio_special_collective(char *filename) {
MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh))
!= MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_open failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_open failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
@@ -943,7 +943,7 @@ static int test_mpio_special_collective(char *filename) {
if ((mpi_err = MPI_File_set_view(fh, mpi_off, MPI_BYTE, filetype, filerep,
MPI_INFO_NULL)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_set_view failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
@@ -951,7 +951,7 @@ static int test_mpio_special_collective(char *filename) {
if ((mpi_err = MPI_File_write_at_all(fh, mpi_off, writedata, bufcount,
buftype, &mpi_stat)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
+ HDprintf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
(long) mpi_off, bufcount, mpi_err_str);
return 1;
} /* end if */
@@ -959,7 +959,7 @@ static int test_mpio_special_collective(char *filename) {
/* Close the file */
if ((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_close failed. \n");
+ HDprintf("MPI_File_close failed. \n");
return 1;
} /* end if */
@@ -967,8 +967,8 @@ static int test_mpio_special_collective(char *filename) {
mpi_err = MPI_Barrier(MPI_COMM_WORLD);
if (retcode != 0) {
if (mpi_rank == 0) {
- printf("special collective IO is NOT working at this platform\n");
- printf(" Please report to help@hdfgroup.org about this problem.\n");
+ HDprintf("special collective IO is NOT working at this platform\n");
+ HDprintf(" Please report to help@hdfgroup.org about this problem.\n");
} /* end if */
retcode = 1;
} /* end if */
@@ -1024,15 +1024,15 @@ static int parse_options(int argc, char **argv) {
for (i = 0; i < n; i++)
if (h5_fixname(FILENAME[i], plist, filenames[i],
sizeof(filenames[i])) == NULL) {
- printf("h5_fixname failed\n");
+ HDprintf("h5_fixname failed\n");
nerrors++;
return (1);
}
H5Pclose(plist);
if (VERBOSE_MED) {
- printf("Test filenames are:\n");
+ HDprintf("Test filenames are:\n");
for (i = 0; i < n; i++)
- printf(" %s\n", filenames[i]);
+ HDprintf(" %s\n", filenames[i]);
}
}
@@ -1043,10 +1043,10 @@ static int parse_options(int argc, char **argv) {
* Show command usage
*/
static void usage(void) {
- printf("Usage: t_mpi [-v<verbosity>] [-f <prefix>]\n");
- printf("\t-v<verbosity>\tset verbose level (0-9,l,m,h)\n");
- printf("\t-f <prefix>\tfilename prefix\n");
- printf("\n");
+ HDprintf("Usage: t_mpi [-v<verbosity>] [-f <prefix>]\n");
+ HDprintf("\t-v<verbosity>\tset verbose level (0-9,l,m,h)\n");
+ HDprintf("\t-f <prefix>\tfilename prefix\n");
+ HDprintf("\n");
}
/*
@@ -1072,7 +1072,7 @@ int main(int argc, char **argv) {
* calls. By then, MPI calls may not work.
*/
if (H5dont_atexit() < 0) {
- printf("Failed to turn off atexit processing. Continue.\n");
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
};
H5open();
if (parse_options(argc, argv) != 0) {
@@ -1082,9 +1082,9 @@ int main(int argc, char **argv) {
}
if (MAINPROCESS) {
- printf("===================================\n");
- printf("MPI functionality tests\n");
- printf("===================================\n");
+ HDprintf("===================================\n");
+ HDprintf("MPI functionality tests\n");
+ HDprintf("===================================\n");
}
if (VERBOSE_MED)
@@ -1103,7 +1103,7 @@ int main(int argc, char **argv) {
ret_code = test_mpio_1wMr(filenames[0], USENONE);
ret_code = errors_sum(ret_code);
if (mpi_rank == 0 && ret_code > 0) {
- printf("***FAILED with %d total errors\n", ret_code);
+ HDprintf("***FAILED with %d total errors\n", ret_code);
nerrors += ret_code;
}
@@ -1114,7 +1114,7 @@ int main(int argc, char **argv) {
ret_code = test_mpio_1wMr(filenames[0], USEATOM);
ret_code = errors_sum(ret_code);
if (mpi_rank == 0 && ret_code > 0) {
- printf("***FAILED with %d total errors\n", ret_code);
+ HDprintf("***FAILED with %d total errors\n", ret_code);
nerrors += ret_code;
}
@@ -1122,7 +1122,7 @@ int main(int argc, char **argv) {
ret_code = test_mpio_1wMr(filenames[0], USEFSYNC);
ret_code = errors_sum(ret_code);
if (mpi_rank == 0 && ret_code > 0) {
- printf("***FAILED with %d total errors\n", ret_code);
+ HDprintf("***FAILED with %d total errors\n", ret_code);
nerrors += ret_code;
}
}
@@ -1135,12 +1135,12 @@ int main(int argc, char **argv) {
ret_code = test_mpio_gb_file(filenames[0]);
ret_code = errors_sum(ret_code);
if (mpi_rank == 0 && ret_code > 0) {
- printf("***FAILED with %d total errors\n", ret_code);
+ HDprintf("***FAILED with %d total errors\n", ret_code);
nerrors += ret_code;
}
#else
if (mpi_rank==0)
- printf(" will be skipped on Windows (JIRA HDDFV-8064)\n");
+ HDprintf(" will be skipped on Windows (JIRA HDDFV-8064)\n");
#endif
/*=======================================
@@ -1150,7 +1150,7 @@ int main(int argc, char **argv) {
ret_code = test_mpio_overlap_writes(filenames[0]);
ret_code = errors_sum(ret_code);
if (mpi_rank == 0 && ret_code > 0) {
- printf("***FAILED with %d total errors\n", ret_code);
+ HDprintf("***FAILED with %d total errors\n", ret_code);
nerrors += ret_code;
}
@@ -1161,7 +1161,7 @@ int main(int argc, char **argv) {
ret_code = test_mpio_derived_dtype(filenames[0]);
ret_code = errors_sum(ret_code);
if (mpi_rank == 0 && ret_code > 0) {
- printf("***FAILED with %d total errors\n", ret_code);
+ HDprintf("***FAILED with %d total errors\n", ret_code);
nerrors += ret_code;
}
@@ -1171,7 +1171,7 @@ int main(int argc, char **argv) {
if (mpi_size < 4) {
MPI_BANNER("MPIO special collective io test SKIPPED.");
if (mpi_rank == 0)
- printf("This test needs at least four processes to run.\n");
+ HDprintf("This test needs at least four processes to run.\n");
ret_code = 0;
goto sc_finish;
} /* end if */
@@ -1181,7 +1181,7 @@ int main(int argc, char **argv) {
sc_finish: ret_code = errors_sum(ret_code);
if (mpi_rank == 0 && ret_code > 0) {
- printf("***FAILED with %d total errors\n", ret_code);
+ HDprintf("***FAILED with %d total errors\n", ret_code);
nerrors += ret_code;
}
@@ -1191,14 +1191,14 @@ int main(int argc, char **argv) {
*/
MPI_Barrier(MPI_COMM_WORLD);
if (MAINPROCESS) { /* only process 0 reports */
- printf("===================================\n");
+ HDprintf("===================================\n");
if (nerrors) {
- printf("***MPI tests detected %d errors***\n", nerrors);
+ HDprintf("***MPI tests detected %d errors***\n", nerrors);
}
else {
- printf("MPI tests finished with no errors\n");
+ HDprintf("MPI tests finished with no errors\n");
}
- printf("===================================\n");
+ HDprintf("===================================\n");
}
/* turn off alarm */
diff --git a/testpar/t_ph5basic.c b/testpar/t_ph5basic.c
index 574591c..8e554ee 100644
--- a/testpar/t_ph5basic.c
+++ b/testpar/t_ph5basic.c
@@ -50,13 +50,13 @@ test_fapl_mpio_dup(void)
int nkeys, nkeys_tmp;
if (VERBOSE_MED)
- printf("Verify fapl_mpio duplicates communicator and INFO objects\n");
+ HDprintf("Verify fapl_mpio duplicates communicator and INFO objects\n");
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
if (VERBOSE_MED)
- printf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size);
+ HDprintf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size);
/* Create a new communicator that has the same processes as MPI_COMM_WORLD.
* Use MPI_Comm_split because it is simplier than MPI_Comm_create
@@ -66,7 +66,7 @@ test_fapl_mpio_dup(void)
MPI_Comm_size(comm,&mpi_size_old);
MPI_Comm_rank(comm,&mpi_rank_old);
if (VERBOSE_MED)
- printf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old);
+ HDprintf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old);
/* create a new INFO object with some trivial information. */
mrc = MPI_Info_create(&info);
@@ -103,7 +103,7 @@ test_fapl_mpio_dup(void)
MPI_Comm_size(comm_tmp,&mpi_size_tmp);
MPI_Comm_rank(comm_tmp,&mpi_rank_tmp);
if (VERBOSE_MED)
- printf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n",
+ HDprintf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n",
mpi_rank_tmp, mpi_size_tmp);
VRFY((mpi_size_tmp==mpi_size), "MPI_Comm_size");
VRFY((mpi_rank_tmp==mpi_rank), "MPI_Comm_rank");
@@ -151,7 +151,7 @@ test_fapl_mpio_dup(void)
MPI_Comm_size(comm_tmp,&mpi_size_tmp);
MPI_Comm_rank(comm_tmp,&mpi_rank_tmp);
if (VERBOSE_MED)
- printf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n",
+ HDprintf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n",
mpi_rank_tmp, mpi_size_tmp);
VRFY((mpi_size_tmp==mpi_size), "MPI_Comm_size");
VRFY((mpi_rank_tmp==mpi_rank), "MPI_Comm_rank");
@@ -171,7 +171,7 @@ test_fapl_mpio_dup(void)
MPI_Comm_size(comm_tmp,&mpi_size_tmp);
MPI_Comm_rank(comm_tmp,&mpi_rank_tmp);
if (VERBOSE_MED)
- printf("After Property list closed: rank/size of comm are %d/%d\n",
+ HDprintf("After Property list closed: rank/size of comm are %d/%d\n",
mpi_rank_tmp, mpi_size_tmp);
if (MPI_INFO_NULL != info_tmp){
mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
diff --git a/testpar/t_prestart.c b/testpar/t_prestart.c
index 719d150..d75e627 100644
--- a/testpar/t_prestart.c
+++ b/testpar/t_prestart.c
@@ -107,7 +107,7 @@ main (int argc, char **argv)
for (i=0; i < block[0]; i++){
for (j=0; j < block[1]; j++){
if(*dataptr != mpi_rank+1) {
- printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
(unsigned long)i, (unsigned long)j,
(unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
mpi_rank+1, *(dataptr));
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
index 2eb3914..fd89c6a 100644
--- a/testpar/t_prop.c
+++ b/testpar/t_prop.c
@@ -141,7 +141,7 @@ test_plist_ed(void)
herr_t ret; /* Generic return value */
if(VERBOSE_MED)
- printf("Encode/Decode DCPLs\n");
+ HDprintf("Encode/Decode DCPLs\n");
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c
index b65e219..abbfbb3 100644
--- a/testpar/t_shapesame.c
+++ b/testpar/t_shapesame.c
@@ -4656,10 +4656,10 @@ void pause_proc(void)
if (MAINPROCESS)
while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){
if (!loops++){
- printf("Proc %d (%*s, %d): to debug, attach %d\n",
+ HDprintf("Proc %d (%*s, %d): to debug, attach %d\n",
mpi_rank, mpi_namelen, mpi_name, pid, pid);
}
- printf("waiting(%ds) for file %s ...\n", time_int, greenlight);
+ HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight);
fflush(stdout);
HDsleep(time_int);
}
@@ -4683,18 +4683,18 @@ int MPI_Init(int *argc, char ***argv)
static void
usage(void)
{
- printf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
+ HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
"[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
- printf("\t-m<n_datasets>"
+ HDprintf("\t-m<n_datasets>"
"\tset number of datasets for the multiple dataset test\n");
- printf("\t-n<n_groups>"
+ HDprintf("\t-n<n_groups>"
"\tset number of groups for the multiple group test\n");
- printf("\t-f <prefix>\tfilename prefix\n");
- printf("\t-2\t\tuse Split-file together with MPIO\n");
- printf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n",
+ HDprintf("\t-f <prefix>\tfilename prefix\n");
+ HDprintf("\t-2\t\tuse Split-file together with MPIO\n");
+ HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n",
ROW_FACTOR, COL_FACTOR);
- printf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
- printf("\n");
+ HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
+ HDprintf("\n");
}
@@ -4772,7 +4772,7 @@ parse_options(int argc, char **argv)
break;
case 'h': /* print help message--return with nerrors set */
return(1);
- default: printf("Illegal option(%s)\n", *argv);
+ default: HDprintf("Illegal option(%s)\n", *argv);
nerrors++;
return(1);
}
@@ -4781,12 +4781,12 @@ parse_options(int argc, char **argv)
/* check validity of dimension and chunk sizes */
if (dim0 <= 0 || dim1 <= 0){
- printf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
+ HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
nerrors++;
return(1);
}
if (chunkdim0 <= 0 || chunkdim1 <= 0){
- printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
+ HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
nerrors++;
return(1);
}
@@ -4794,7 +4794,7 @@ parse_options(int argc, char **argv)
/* Make sure datasets can be divided into equal portions by the processes */
if ((dim0 % mpi_size) || (dim1 % mpi_size)){
if (MAINPROCESS)
- printf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n",
+ HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n",
dim0, dim1, mpi_size);
nerrors++;
return(1);
@@ -4809,13 +4809,13 @@ parse_options(int argc, char **argv)
for (i=0; i < n; i++)
if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i]))
== NULL){
- printf("h5_fixname failed\n");
+ HDprintf("h5_fixname failed\n");
nerrors++;
return(1);
}
- printf("Test filenames are:\n");
+ HDprintf("Test filenames are:\n");
for (i=0; i < n; i++)
- printf(" %s\n", filenames[i]);
+ HDprintf(" %s\n", filenames[i]);
}
return(0);
@@ -4952,10 +4952,10 @@ int main(int argc, char **argv)
dim1 = COL_FACTOR*mpi_size;
if (MAINPROCESS){
- printf("===================================\n");
- printf("Shape Same Tests Start\n");
- printf(" express_test = %d.\n", GetTestExpress());
- printf("===================================\n");
+ HDprintf("===================================\n");
+ HDprintf("Shape Same Tests Start\n");
+ HDprintf(" express_test = %d.\n", GetTestExpress());
+ HDprintf("===================================\n");
}
/* Attempt to turn off atexit post processing so that in case errors
@@ -4964,7 +4964,7 @@ int main(int argc, char **argv)
* calls. By then, MPI calls may not work.
*/
if (H5dont_atexit() < 0){
- printf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank);
+ HDprintf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank);
};
H5open();
h5_show_hostname();
@@ -5003,7 +5003,7 @@ int main(int argc, char **argv)
TestParseCmdLine(argc, argv);
if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS){
- printf("===================================\n"
+ HDprintf("===================================\n"
" Using Independent I/O with file set view to replace collective I/O \n"
"===================================\n");
}
@@ -5034,12 +5034,12 @@ int main(int argc, char **argv)
}
if (MAINPROCESS){ /* only process 0 reports */
- printf("===================================\n");
+ HDprintf("===================================\n");
if (nerrors)
- printf("***Shape Same tests detected %d errors***\n", nerrors);
+ HDprintf("***Shape Same tests detected %d errors***\n", nerrors);
else
- printf("Shape Same tests finished with no errors\n");
- printf("===================================\n");
+ HDprintf("Shape Same tests finished with no errors\n");
+ HDprintf("===================================\n");
}
/* close HDF5 library */
diff --git a/testpar/testpar.h b/testpar/testpar.h
index 84c073f..4fbe8d8 100644
--- a/testpar/testpar.h
+++ b/testpar/testpar.h
@@ -32,7 +32,7 @@
*/
#define MESG(mesg) \
if (VERBOSE_MED && *mesg != '\0') \
- printf("%s\n", mesg)
+ HDprintf("%s\n", mesg)
/*
* VRFY: Verify if the condition val is true.
@@ -48,14 +48,14 @@
if (val) { \
MESG(mesg); \
} else { \
- printf("Proc %d: ", mpi_rank); \
- printf("*** Parallel ERROR ***\n"); \
- printf(" VRFY (%s) failed at line %4d in %s\n", \
+ HDprintf("Proc %d: ", mpi_rank); \
+ HDprintf("*** Parallel ERROR ***\n"); \
+ HDprintf(" VRFY (%s) failed at line %4d in %s\n", \
mesg, (int)__LINE__, __FILE__); \
++nerrors; \
fflush(stdout); \
if (!VERBOSE_MED) { \
- printf("aborting MPI processes\n"); \
+ HDprintf("aborting MPI processes\n"); \
MPI_Abort(MPI_COMM_WORLD, 1); \
} \
} \
@@ -70,9 +70,9 @@
if (val) { \
MESG(mesg); \
} else { \
- printf("Proc %d: ", mpi_rank); \
- printf("*** PHDF5 REMARK (not an error) ***\n"); \
- printf(" Condition (%s) failed at line %4d in %s\n", \
+ HDprintf("Proc %d: ", mpi_rank); \
+ HDprintf("*** PHDF5 REMARK (not an error) ***\n"); \
+ HDprintf(" Condition (%s) failed at line %4d in %s\n", \
mesg, (int)__LINE__, __FILE__); \
fflush(stdout); \
} \
@@ -80,10 +80,10 @@
#define MPI_BANNER(mesg) do { \
if (VERBOSE_MED || MAINPROCESS){ \
- printf("--------------------------------\n"); \
- printf("Proc %d: ", mpi_rank); \
- printf("*** %s\n", mesg); \
- printf("--------------------------------\n"); \
+ HDprintf("--------------------------------\n"); \
+ HDprintf("Proc %d: ", mpi_rank); \
+ HDprintf("*** %s\n", mesg); \
+ HDprintf("--------------------------------\n"); \
} \
} while(0)
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 9fece12..cf611b7 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -19,7 +19,7 @@
#include "testpar.h"
enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
- API_MULTI_HARD,API_LINK_TRUE,API_LINK_FALSE,
+ API_MULTI_HARD,API_LINK_TRUE,API_LINK_FALSE,
API_MULTI_COLL,API_MULTI_IND};
#ifndef FALSE
@@ -32,20 +32,20 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
/* Constants definitions */
-#define DIM0 600 /* Default dataset sizes. */
-#define DIM1 1200 /* Values are from a monitor pixel sizes */
-#define ROW_FACTOR 8 /* Nominal row factor for dataset size */
-#define COL_FACTOR 16 /* Nominal column factor for dataset size */
-#define RANK 2
-#define DATASETNAME1 "Data1"
-#define DATASETNAME2 "Data2"
-#define DATASETNAME3 "Data3"
-#define DATASETNAME4 "Data4"
-#define DATASETNAME5 "Data5"
-#define DATASETNAME6 "Data6"
-#define DATASETNAME7 "Data7"
-#define DATASETNAME8 "Data8"
-#define DATASETNAME9 "Data9"
+#define DIM0 600 /* Default dataset sizes. */
+#define DIM1 1200 /* Values are from a monitor pixel sizes */
+#define ROW_FACTOR 8 /* Nominal row factor for dataset size */
+#define COL_FACTOR 16 /* Nominal column factor for dataset size */
+#define RANK 2
+#define DATASETNAME1 "Data1"
+#define DATASETNAME2 "Data2"
+#define DATASETNAME3 "Data3"
+#define DATASETNAME4 "Data4"
+#define DATASETNAME5 "Data5"
+#define DATASETNAME6 "Data6"
+#define DATASETNAME7 "Data7"
+#define DATASETNAME8 "Data8"
+#define DATASETNAME9 "Data9"
/* point selection order */
#define IN_ORDER 1
@@ -179,14 +179,14 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
/* Definitions of the selection mode for the no_collective_cause_tests function. */
#define TEST_COLLECTIVE 0x001
-#define TEST_SET_INDEPENDENT 0x002
+#define TEST_SET_INDEPENDENT 0x002
#define TEST_DATATYPE_CONVERSION 0x004
#define TEST_DATA_TRANSFORMS 0x008
#define TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES 0x010
#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT 0x020
#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL 0x040
#define TEST_FILTERS 0x080
-/* TEST_FILTERS will take place of this after supporting mpio + filter for
+/* TEST_FILTERS will take place of this after supporting mpio + filter for
* H5Dcreate and H5Dwrite */
#define TEST_FILTERS_READ 0x100
@@ -209,8 +209,8 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
/* type definitions */
typedef struct H5Ptest_param_t /* holds extra test parameters */
{
- char *name;
- int count;
+ char *name;
+ int count;
} H5Ptest_param_t;
/* Dataset data type. Int's can be easily octo dumped. */
@@ -218,19 +218,19 @@ typedef int DATATYPE;
/* Shape Same Tests Definitions */
typedef enum {
- IND_CONTIG, /* Independent IO on contigous datasets */
- COL_CONTIG, /* Collective IO on contigous datasets */
- IND_CHUNKED, /* Independent IO on chunked datasets */
- COL_CHUNKED /* Collective IO on chunked datasets */
+ IND_CONTIG, /* Independent IO on contigous datasets */
+ COL_CONTIG, /* Collective IO on contigous datasets */
+ IND_CHUNKED, /* Independent IO on chunked datasets */
+ COL_CHUNKED /* Collective IO on chunked datasets */
} ShapeSameTestMethods;
/* Shared global variables */
-extern int dim0, dim1; /*Dataset dimensions */
-extern int chunkdim0, chunkdim1; /*Chunk dimensions */
-extern int nerrors; /*errors count */
-extern H5E_auto2_t old_func; /* previous error handler */
-extern void *old_client_data; /*previous error handler arg.*/
-extern int facc_type; /*Test file access type */
+extern int dim0, dim1; /*Dataset dimensions */
+extern int chunkdim0, chunkdim1; /*Chunk dimensions */
+extern int nerrors; /*errors count */
+extern H5E_auto2_t old_func; /* previous error handler */
+extern void *old_client_data; /*previous error handler arg.*/
+extern int facc_type; /*Test file access type */
extern int dxfer_coll_type;
/* Test program prototypes */
diff --git a/tools/src/h5import/h5import.c b/tools/src/h5import/h5import.c
index 16c0d8c..1eef5ab 100644
--- a/tools/src/h5import/h5import.c
+++ b/tools/src/h5import/h5import.c
@@ -3753,7 +3753,7 @@ void setDefaultValues(struct Input *in, int count)
in->path.count = 1;
HDstrcpy(temp, "dataset");
- sprintf(num, "%d", count);
+ HDsprintf(num, "%d", count);
HDstrcat(temp, num);
HDstrcpy(in->path.group[0], temp);
diff --git a/tools/test/h5repack/h5repacktst.c b/tools/test/h5repack/h5repacktst.c
index 057cbbd..ec8df3c 100644
--- a/tools/test/h5repack/h5repacktst.c
+++ b/tools/test/h5repack/h5repacktst.c
@@ -4120,7 +4120,7 @@ int write_dset_in(hid_t loc_id,
dbuf = (double*)HDmalloc( size );
if (NULL == dbuf)
{
- printf ("\nError: Cannot allocate memory for \"arrayd\" data buffer size %dMB.\n", (int) size / 1000000 );
+ HDprintf ("\nError: Cannot allocate memory for \"arrayd\" data buffer size %dMB.\n", (int) size / 1000000 );
goto out;
}
diff --git a/tools/test/misc/h5perf_gentest.c b/tools/test/misc/h5perf_gentest.c
index 3784278..f50e5fb 100644
--- a/tools/test/misc/h5perf_gentest.c
+++ b/tools/test/misc/h5perf_gentest.c
@@ -10,8 +10,8 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
- This test generates attributes, groups, and datasets of many types. It
- creates a large number of attributes, groups, and datasets by specifying
+ This test generates attributes, groups, and datasets of many types. It
+ creates a large number of attributes, groups, and datasets by specifying
-a, -g, -d options respectively. Using "-h" option to see details.
Programmer: Peter Cao <xcao@hdfgroup.org>, Jan. 2013
@@ -31,7 +31,7 @@
#define MAXVLEN 10
#define FIXED_LEN 8
-typedef enum { SOLID=0, LIQUID, GAS, PLASMA } phase_t;
+typedef enum { SOLID=0, LIQUID, GAS, PLASMA } phase_t;
typedef struct {
int i;
@@ -43,24 +43,24 @@ typedef struct {
float f_array[FIXED_LEN];
hvl_t i_vlen;
char *s_vlen;
-} test_comp_t;
+} test_comp_t;
typedef struct {
int zipcode;
char *city;
-} zipcode_t;
+} zipcode_t;
int add_attrs(hid_t oid, int idx);
int add_attr(hid_t oid, const char *name, hid_t tid, hid_t sid, void *buf) ;
-herr_t create_perf_test_file(const char *fname, int ngrps, int ndsets,
- int nattrs, hsize_t nrows, hsize_t dim0, hsize_t chunk, int vlen,
- int compressed, int latest);
+herr_t create_perf_test_file(const char *fname, int ngrps, int ndsets,
+ int nattrs, hsize_t nrows, hsize_t dim0, hsize_t chunk, int vlen,
+ int compressed, int latest);
int main (int argc, char *argv[])
{
char fname[32];
- int i, ngrps=NGROUPS, ndsets=NDSETS, nattrs=NATTRS, dim0=DIM0,
- chunk=DIM0/10+1, nrows=NROWS, vlen=MAXVLEN, l=0, z=0;
+ int i, ngrps=NGROUPS, ndsets=NDSETS, nattrs=NATTRS, dim0=DIM0,
+ chunk=DIM0/10+1, nrows=NROWS, vlen=MAXVLEN, l=0, z=0;
memset(fname, 0, 32);
for (i=1; i<argc; i++) {
@@ -83,92 +83,92 @@ int main (int argc, char *argv[])
else if (strcmp(argv[i], "-l")==0)
l = 1;
else if (strcmp(argv[i], "-z")==0)
- z = 1;
+ z = 1;
else if (strcmp(argv[i], "-h")==0) {
- printf("\nOPTONS:\n");
- printf("\t-f F:\tname of the test file (default: %s).\n", FNAME);
- printf("\t-g N:\tnumber of top level groups (default: %d).\n", NGROUPS);
- printf("\t-d N:\tnumber of datasets (default: %d).\n", NDSETS);
- printf("\t-a N:\tnumber of attributes (default: %d).\n", NATTRS);
- printf("\t-r N:\tnumber of rows in the large compound dataset (default: %d).\n", NROWS);
- printf("\t-s N:\tsize of dim0 in datasets (default: %d).\n", DIM0);
- printf("\t-c N:\tchunk size of dim0 (default: %d).\n", (DIM0/10+1));
- printf("\t-v N:\tmax vlen size (default: %d).\n", MAXVLEN);
- printf("\t-l:\tuse latest format (default: no).\n");
- printf("\t-z:\tuse gzip compression (default: no).\n");
- printf("\t-h:\tthis help information.\n");
- printf("Example:\n");
- printf("\t./a.out -f test.h5 -g 10000 -d 5000 -a 500 -r 10000 -s 200 -c 20 -v 40 -l -z\n\n");
+ HDprintf("\nOPTONS:\n");
+ HDprintf("\t-f F:\tname of the test file (default: %s).\n", FNAME);
+ HDprintf("\t-g N:\tnumber of top level groups (default: %d).\n", NGROUPS);
+ HDprintf("\t-d N:\tnumber of datasets (default: %d).\n", NDSETS);
+ HDprintf("\t-a N:\tnumber of attributes (default: %d).\n", NATTRS);
+ HDprintf("\t-r N:\tnumber of rows in the large compound dataset (default: %d).\n", NROWS);
+ HDprintf("\t-s N:\tsize of dim0 in datasets (default: %d).\n", DIM0);
+ HDprintf("\t-c N:\tchunk size of dim0 (default: %d).\n", (DIM0/10+1));
+ HDprintf("\t-v N:\tmax vlen size (default: %d).\n", MAXVLEN);
+ HDprintf("\t-l:\tuse latest format (default: no).\n");
+ HDprintf("\t-z:\tuse gzip compression (default: no).\n");
+ HDprintf("\t-h:\tthis help information.\n");
+ HDprintf("Example:\n");
+ HDprintf("\t./a.out -f test.h5 -g 10000 -d 5000 -a 500 -r 10000 -s 200 -c 20 -v 40 -l -z\n\n");
exit(0);
}
}
-
+
if (strlen(fname)<=0)
- sprintf(fname, FNAME);
+ HDsprintf(fname, FNAME);
+
+ create_perf_test_file(fname, ngrps, ndsets, nattrs, (hsize_t)nrows,
+ (hsize_t)dim0, (hsize_t)chunk, vlen, z, l);
- create_perf_test_file(fname, ngrps, ndsets, nattrs, (hsize_t)nrows,
- (hsize_t)dim0, (hsize_t)chunk, vlen, z, l);
-
return 0;
}
/*****************************************************************************
- This function generates attributes, groups, and datasets of many types.
+ This function generates attributes, groups, and datasets of many types.
Parameters:
- fname: file_name.
- ngrps: number of top level groups.
- ndsets: number of datasets.
- attrs: number of attributes.
- nrow: number of rows in a dataset.
- chunk: chunk size (single number).
- vlen: max vlen size.
- comp: use latest format.
- latest: use gzip comnpression.
-
+ fname: file_name.
+ ngrps: number of top level groups.
+ ndsets: number of datasets.
+ attrs: number of attributes.
+ nrow: number of rows in a dataset.
+ chunk: chunk size (single number).
+ vlen: max vlen size.
+ comp: use latest format.
+ latest: use gzip comnpression.
+
Return: Non-negative on success/Negative on failure
-
+
Programmer: Peter Cao <xcao@hdfgroup.org>, Jan. 2013
****************************************************************************/
-herr_t create_perf_test_file(const char *fname, int ngrps, int ndsets,
- int nattrs, hsize_t nrows, hsize_t dim0, hsize_t chunk, int vlen,
- int compressed, int latest)
+herr_t create_perf_test_file(const char *fname, int ngrps, int ndsets,
+ int nattrs, hsize_t nrows, hsize_t dim0, hsize_t chunk, int vlen,
+ int compressed, int latest)
{
int i, j, k;
- hid_t fid, sid_null, sid_scalar, sid_1d, sid_2d, did, aid, sid_2, sid_large,
- fapl=H5P_DEFAULT, dcpl=H5P_DEFAULT, gid1, gid2, cmp_tid, tid_str,
- tid_enum, tid_array_f, tid_vlen_i, tid_vlen_s;
+ hid_t fid, sid_null, sid_scalar, sid_1d, sid_2d, did, aid, sid_2, sid_large,
+ fapl=H5P_DEFAULT, dcpl=H5P_DEFAULT, gid1, gid2, cmp_tid, tid_str,
+ tid_enum, tid_array_f, tid_vlen_i, tid_vlen_s;
char name[32], tmp_name1[32], tmp_name2[32], tmp_name3[32];
- hsize_t dims[1]={dim0}, dims2d[2]={dim0, (dim0/4+1)}, dims_array[1]={FIXED_LEN},
- dim1[1]={2};
+ hsize_t dims[1]={dim0}, dims2d[2]={dim0, (dim0/4+1)}, dims_array[1]={FIXED_LEN},
+ dim1[1]={2};
char *enum_names[4] = {"SOLID", "LIQUID", "GAS", "PLASMA"};
test_comp_t *buf_comp=NULL, *buf_comp_large=NULL;
int *buf_int=NULL;
float (*buf_float_a)[FIXED_LEN]=NULL;
double **buf_double2d=NULL;
hvl_t *buf_vlen_i=NULL;
- char (*buf_str)[FIXED_LEN];
- char **buf_vlen_s=NULL;
- hobj_ref_t buf_ref[2];
- hdset_reg_ref_t buf_reg_ref[2];
+ char (*buf_str)[FIXED_LEN];
+ char **buf_vlen_s=NULL;
+ hobj_ref_t buf_ref[2];
+ hdset_reg_ref_t buf_reg_ref[2];
size_t offset, len;
herr_t status;
- char *names[NTYPES] = { "int", "ulong", "float", "double", "fixed string",
- "enum", "fixed float array", "vlen int array", "vlen strings"};
- hid_t types[NTYPES] = { H5T_NATIVE_INT, H5T_NATIVE_UINT64, H5T_NATIVE_FLOAT,
+ char *names[NTYPES] = { "int", "ulong", "float", "double", "fixed string",
+ "enum", "fixed float array", "vlen int array", "vlen strings"};
+ hid_t types[NTYPES] = { H5T_NATIVE_INT, H5T_NATIVE_UINT64, H5T_NATIVE_FLOAT,
H5T_NATIVE_DOUBLE, tid_str, tid_enum, tid_array_f, tid_vlen_i, tid_vlen_s};
- hsize_t coords[4][2] = { {0, 1}, {3, 5}, {1, 0}, {2, 4}}, start=0, stride=1, count=1;
-
- if (nrows < NROWS) nrows = NROWS;
+ hsize_t coords[4][2] = { {0, 1}, {3, 5}, {1, 0}, {2, 4}}, start=0, stride=1, count=1;
+
+ if (nrows < NROWS) nrows = NROWS;
if (ngrps<NGROUPS) ngrps=NGROUPS;
- if (ndsets<NDSETS) ndsets=NDSETS;
- if (nattrs<NATTRS) nattrs=NATTRS;
- if (dim0<DIM0) dim0=DIM0;
+ if (ndsets<NDSETS) ndsets=NDSETS;
+ if (nattrs<NATTRS) nattrs=NATTRS;
+ if (dim0<DIM0) dim0=DIM0;
if (chunk>dim0) chunk=dim0/4;
if (chunk<1) chunk = 1;
- if (vlen<1) vlen = MAXVLEN;
+ if (vlen<1) vlen = MAXVLEN;
- /* create fixed string datatype */
+ /* create fixed string datatype */
types[4] = tid_str = H5Tcopy (H5T_C_S1);
H5Tset_size (tid_str, FIXED_LEN);
@@ -181,15 +181,15 @@ herr_t create_perf_test_file(const char *fname, int ngrps, int ndsets,
/* create float array datatype */
types[6] = tid_array_f = H5Tarray_create (H5T_NATIVE_FLOAT, 1, dims_array);
-
+
/* create variable length integer datatypes */
types[7] = tid_vlen_i = H5Tvlen_create (H5T_NATIVE_INT);
-
+
/* create variable length string datatype */
types[8] = tid_vlen_s = H5Tcopy (H5T_C_S1);
H5Tset_size (tid_vlen_s, H5T_VARIABLE);
-
- /* create compound datatypes */
+
+ /* create compound datatypes */
cmp_tid = H5Tcreate (H5T_COMPOUND, sizeof (test_comp_t));
offset = 0;
for (i=0; i<NTYPES-2; i++) {
@@ -197,31 +197,31 @@ herr_t create_perf_test_file(const char *fname, int ngrps, int ndsets,
offset += H5Tget_size(types[i]);
}
- H5Tinsert(cmp_tid, names[7], offset, types[7]);
- offset += sizeof (hvl_t);
- H5Tinsert(cmp_tid, names[8], offset, types[8]);
+ H5Tinsert(cmp_tid, names[7], offset, types[7]);
+ offset += sizeof (hvl_t);
+ H5Tinsert(cmp_tid, names[8], offset, types[8]);
- /* create dataspace */
+ /* create dataspace */
sid_1d = H5Screate_simple (1, dims, NULL);
sid_2d = H5Screate_simple (2, dims2d, NULL);
sid_2 = H5Screate_simple (1, dim1, NULL);
- sid_large = H5Screate_simple (1, &nrows, NULL);
- sid_null = H5Screate (H5S_NULL);
- sid_scalar = H5Screate (H5S_SCALAR);
-
- /* create fid access property */
- fapl = H5Pcreate (H5P_FILE_ACCESS);
+ sid_large = H5Screate_simple (1, &nrows, NULL);
+ sid_null = H5Screate (H5S_NULL);
+ sid_scalar = H5Screate (H5S_SCALAR);
+
+ /* create fid access property */
+ fapl = H5Pcreate (H5P_FILE_ACCESS);
H5Pset_libver_bounds (fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
- /* create dataset creation property */
+ /* create dataset creation property */
dcpl = H5Pcreate (H5P_DATASET_CREATE);
- /* set dataset chunk */
+ /* set dataset chunk */
if (chunk>0) {
H5Pset_chunk (dcpl, 1, &chunk);
}
- /* set dataset compression */
+ /* set dataset compression */
if (compressed) {
if (chunk<=0) {
chunk = dim0/10+1;;
@@ -229,54 +229,54 @@ herr_t create_perf_test_file(const char *fname, int ngrps, int ndsets,
}
H5Pset_shuffle (dcpl);
H5Pset_deflate (dcpl, 6);
- }
+ }
- /* allocate buffers */
+ /* allocate buffers */
buf_comp = (test_comp_t *)calloc(dim0, sizeof(test_comp_t));
buf_comp_large = (test_comp_t *)calloc(nrows, sizeof(test_comp_t));
buf_int = (int *)calloc(dim0, sizeof(int));
buf_float_a = malloc(dim0*sizeof(*buf_float_a));
- buf_vlen_i = (hvl_t *)calloc(dim0, sizeof (hvl_t));
+ buf_vlen_i = (hvl_t *)calloc(dim0, sizeof (hvl_t));
buf_vlen_s = (char **)calloc(dim0, sizeof(char *));
- buf_str = malloc(dim0*sizeof (*buf_str));
+ buf_str = malloc(dim0*sizeof (*buf_str));
- /* allocate array of doulbe pointers */
- buf_double2d = (double **)calloc(dims2d[0],sizeof(double *));
- /* allocate a contigous chunk of memory for the data */
- buf_double2d[0] = (double *)calloc( dims2d[0]*dims2d[1],sizeof(double) );
- /* assign memory city to pointer array */
- for (i=1; i <dims2d[0]; i++) buf_double2d[i] = buf_double2d[0]+i*dims2d[1];
+ /* allocate array of doulbe pointers */
+ buf_double2d = (double **)calloc(dims2d[0],sizeof(double *));
+ /* allocate a contigous chunk of memory for the data */
+ buf_double2d[0] = (double *)calloc( dims2d[0]*dims2d[1],sizeof(double) );
+ /* assign memory city to pointer array */
+ for (i=1; i <dims2d[0]; i++) buf_double2d[i] = buf_double2d[0]+i*dims2d[1];
- /* fill buffer values */
- len = 1;
+ /* fill buffer values */
+ len = 1;
for (i=0; i<dims[0]; i++) {
buf_comp[i].i = buf_int[i] = i-2147483648;
buf_comp[i].l = 0xffffffffffffffff-i;
buf_comp[i].f = 1.0/(i+1.0);
buf_comp[i].d = 987654321.0*i+1.0/(i+1.0);
buf_comp[i].e = (phase_t) (i % (int) (PLASMA + 1));
-
- for (j=0; j<FIXED_LEN; j++) {
- buf_comp[i].f_array[j] = buf_float_a[i][j] = i*100+j;
- buf_str[i][j] = 'a' + (i%26);
- }
- buf_str[i][FIXED_LEN-1] = 0;
+
+ for (j=0; j<FIXED_LEN; j++) {
+ buf_comp[i].f_array[j] = buf_float_a[i][j] = i*100+j;
+ buf_str[i][j] = 'a' + (i%26);
+ }
+ buf_str[i][FIXED_LEN-1] = 0;
strcpy(buf_comp[i].s, buf_str[i]);
-
- len = (1-cos(i/8.0))/2*vlen+1;
- if (!i) len = vlen;
- buf_vlen_i[i].len = len;
- buf_vlen_i[i].p = (int *)calloc(len, sizeof(int));
- for (j=0; j<len; j++) ((int*)(buf_vlen_i[i].p))[j] = i*100+j;
- buf_comp[i].i_vlen = buf_vlen_i[i];
-
- buf_vlen_s[i] = (char *)calloc(len, sizeof(char));
- for (j=0; j<len-1; j++)
- buf_vlen_s[i][j] = j%26+'A';
- buf_comp[i].s_vlen = buf_vlen_s[i];
-
- for (j=0; j<dims2d[1]; j++)
- buf_double2d[i][j] = i+j/10000.0;
+
+ len = (1-cos(i/8.0))/2*vlen+1;
+ if (!i) len = vlen;
+ buf_vlen_i[i].len = len;
+ buf_vlen_i[i].p = (int *)calloc(len, sizeof(int));
+ for (j=0; j<len; j++) ((int*)(buf_vlen_i[i].p))[j] = i*100+j;
+ buf_comp[i].i_vlen = buf_vlen_i[i];
+
+ buf_vlen_s[i] = (char *)calloc(len, sizeof(char));
+ for (j=0; j<len-1; j++)
+ buf_vlen_s[i][j] = j%26+'A';
+ buf_comp[i].s_vlen = buf_vlen_s[i];
+
+ for (j=0; j<dims2d[1]; j++)
+ buf_double2d[i][j] = i+j/10000.0;
}
for (i=0; i<nrows; i++) {
@@ -289,147 +289,147 @@ herr_t create_perf_test_file(const char *fname, int ngrps, int ndsets,
buf_comp_large[i].f_array[j] = i*100+j;
buf_comp_large[i].s[j] = 'a' + (i%26);
}
- len = i%vlen+1;
+ len = i%vlen+1;
buf_comp_large[i].i_vlen.len = len;
buf_comp_large[i].i_vlen.p = (int *)calloc(len, sizeof(int));
for (j=0; j<len; j++) ((int*)(buf_comp_large[i].i_vlen.p))[j] = i*100+j;
buf_comp_large[i].s_vlen = (char *)calloc(i+2, sizeof(char));
for (j=0; j<i+1; j++) (buf_comp_large[i].s_vlen)[j] = j%26+'A';
}
-
- /* create file */
+
+ /* create file */
if (latest)
fid = H5Fcreate (fname, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
else
fid = H5Fcreate (fname, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- add_attrs(fid, 0);
+ add_attrs(fid, 0);
- sprintf(name, "a cmp ds of %d rows", nrows);
- did = H5Dcreate (fid, name, cmp_tid, sid_large, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- H5Dwrite (did, cmp_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_comp_large);
- add_attrs(did, 0);
- H5Dclose(did);
+ HDsprintf(name, "a cmp ds of %d rows", nrows);
+ did = H5Dcreate (fid, name, cmp_tid, sid_large, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ H5Dwrite (did, cmp_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_comp_large);
+ add_attrs(did, 0);
+ H5Dclose(did);
- // /* add attributes*/
+ // /* add attributes*/
gid1 = H5Gcreate (fid, "attributes", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- if (nattrs<1) nattrs = 1;
- i=0;
- while (i<nattrs) i += add_attrs(gid1, i);
- H5Gclose(gid1);
-
- /* add many sub groups to a group*/
+ if (nattrs<1) nattrs = 1;
+ i=0;
+ while (i<nattrs) i += add_attrs(gid1, i);
+ H5Gclose(gid1);
+
+ /* add many sub groups to a group*/
gid1 = H5Gcreate (fid, "groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- add_attrs(gid1, 0);
+ add_attrs(gid1, 0);
for (i=0; i<ngrps; i++) {
- /* create sub groups */
- sprintf(name, "g%02d", i);
+ /* create sub groups */
+ HDsprintf(name, "g%02d", i);
gid2 = H5Gcreate (gid1, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- if (i<10) add_attrs(gid2, 0);
- H5Gclose(gid2);
- }
- H5Gclose(gid1);
-
- /* add many datasets to a group */
- gid1 = H5Gcreate (fid, "datasets", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- add_attrs(gid1, 0);
+ if (i<10) add_attrs(gid2, 0);
+ H5Gclose(gid2);
+ }
+ H5Gclose(gid1);
+
+ /* add many datasets to a group */
+ gid1 = H5Gcreate (fid, "datasets", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ add_attrs(gid1, 0);
for (j=0; j<ndsets; j+=12) {
- /* 1 add a null dataset */
- sprintf(name, "%05d null dataset", j);
+ /* 1 add a null dataset */
+ HDsprintf(name, "%05d null dataset", j);
did = H5Dcreate (gid1, name, H5T_STD_I32LE, sid_null, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- if (!j) add_attrs(did, j);
- H5Dclose(did);
+ if (!j) add_attrs(did, j);
+ H5Dclose(did);
- /* 2 add scalar int point */
- sprintf(name, "%05d scalar int point", j);
+ /* 2 add scalar int point */
+ HDsprintf(name, "%05d scalar int point", j);
did = H5Dcreate (gid1, name, H5T_NATIVE_INT, sid_scalar, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- H5Dwrite (did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &j);
- if (!j) add_attrs(did, j);
- H5Dclose(did);
-
- /* 3 scalar vlen string */
- sprintf(name, "%05d scalar vlen string", j);
+ H5Dwrite (did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &j);
+ if (!j) add_attrs(did, j);
+ H5Dclose(did);
+
+ /* 3 scalar vlen string */
+ HDsprintf(name, "%05d scalar vlen string", j);
did = H5Dcreate (gid1, name, tid_vlen_s, sid_scalar, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- H5Dwrite (did, tid_vlen_s, H5S_ALL, H5S_ALL, H5P_DEFAULT, &buf_vlen_s[0]);
- if (!j) add_attrs(did, j);
- H5Dclose(did);
-
- /* 4 add fixed-length float array */
- sprintf(name, "%05d fixed-length float array", j);
- did = H5Dcreate (gid1, name, tid_array_f, sid_1d, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- H5Dwrite (did, tid_array_f, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_float_a);
- if (!j) add_attrs(did, j);
- H5Dclose(did);
-
- /* 5 add fixed-length strings */
- sprintf(name, "%05d fixed-length strings", j);
- did = H5Dcreate (gid1, name, tid_str, sid_1d, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- H5Dwrite (did, tid_str, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_str);
- if (!j) add_attrs(did, j);
- H5Dclose(did);
-
- /* 6 add compound data */
- sprintf(name, "%05d compund data", j);
- did = H5Dcreate (gid1, name, cmp_tid, sid_1d, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- H5Dwrite (did, cmp_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_comp);
- if (!j) add_attrs(did, j);
- H5Dclose(did);
-
- /* 7 add 2D double */
- sprintf(name, "%05d 2D double", j);
- strcpy (tmp_name1, name);
- did = H5Dcreate (gid1, name, H5T_NATIVE_DOUBLE, sid_2d, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- H5Dwrite (did, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_double2d[0]);
- if (!j) add_attrs(did, j);
- H5Dclose(did);
-
- /* 8 add 1D int array */
- sprintf(name, "%05d 1D int array", j);
+ H5Dwrite (did, tid_vlen_s, H5S_ALL, H5S_ALL, H5P_DEFAULT, &buf_vlen_s[0]);
+ if (!j) add_attrs(did, j);
+ H5Dclose(did);
+
+ /* 4 add fixed-length float array */
+ HDsprintf(name, "%05d fixed-length float array", j);
+ did = H5Dcreate (gid1, name, tid_array_f, sid_1d, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ H5Dwrite (did, tid_array_f, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_float_a);
+ if (!j) add_attrs(did, j);
+ H5Dclose(did);
+
+ /* 5 add fixed-length strings */
+ HDsprintf(name, "%05d fixed-length strings", j);
+ did = H5Dcreate (gid1, name, tid_str, sid_1d, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ H5Dwrite (did, tid_str, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_str);
+ if (!j) add_attrs(did, j);
+ H5Dclose(did);
+
+ /* 6 add compound data */
+ HDsprintf(name, "%05d compund data", j);
+ did = H5Dcreate (gid1, name, cmp_tid, sid_1d, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ H5Dwrite (did, cmp_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_comp);
+ if (!j) add_attrs(did, j);
+ H5Dclose(did);
+
+ /* 7 add 2D double */
+ HDsprintf(name, "%05d 2D double", j);
+ strcpy (tmp_name1, name);
+ did = H5Dcreate (gid1, name, H5T_NATIVE_DOUBLE, sid_2d, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Dwrite (did, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_double2d[0]);
+ if (!j) add_attrs(did, j);
+ H5Dclose(did);
+
+ /* 8 add 1D int array */
+ HDsprintf(name, "%05d 1D int array", j);
did = H5Dcreate (gid1, name, H5T_NATIVE_INT, sid_1d, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- H5Dwrite (did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_int);
- if (!j) add_attrs(did, j);
- H5Dclose(did);
-
- /* 9 add vlen int array */
- sprintf(name, "%05d vlen int array", j);
- strcpy (tmp_name2, name);
+ H5Dwrite (did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_int);
+ if (!j) add_attrs(did, j);
+ H5Dclose(did);
+
+ /* 9 add vlen int array */
+ HDsprintf(name, "%05d vlen int array", j);
+ strcpy (tmp_name2, name);
did = H5Dcreate (gid1, name, tid_vlen_i, sid_1d, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- H5Dwrite (did, tid_vlen_i, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_vlen_i);
- if (!j) add_attrs(did, j);
- H5Dclose(did);
+ H5Dwrite (did, tid_vlen_i, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_vlen_i);
+ if (!j) add_attrs(did, j);
+ H5Dclose(did);
- /* 10 add vlen strings */
- sprintf(name, "%05d vlen strings", j);
- strcpy (tmp_name3, name);
+ /* 10 add vlen strings */
+ HDsprintf(name, "%05d vlen strings", j);
+ strcpy (tmp_name3, name);
did = H5Dcreate (gid1, name, tid_vlen_s, sid_1d, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- H5Dwrite (did, tid_vlen_s, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_vlen_s);
- if (!j) add_attrs(did, j);
- H5Dclose(did);
-
- /* 11 add object refs */
- H5Rcreate(&buf_ref[0],gid1, ".", H5R_OBJECT, (hid_t)-1);
- H5Rcreate(&buf_ref[1],gid1, tmp_name3, H5R_OBJECT, (hid_t)-1);
- sprintf(name, "%05d obj refs", j);
+ H5Dwrite (did, tid_vlen_s, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_vlen_s);
+ if (!j) add_attrs(did, j);
+ H5Dclose(did);
+
+ /* 11 add object refs */
+ H5Rcreate(&buf_ref[0],gid1, ".", H5R_OBJECT, (hid_t)-1);
+ H5Rcreate(&buf_ref[1],gid1, tmp_name3, H5R_OBJECT, (hid_t)-1);
+ HDsprintf(name, "%05d obj refs", j);
did = H5Dcreate (gid1, name, H5T_STD_REF_OBJ, sid_2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- H5Dwrite (did, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_ref);
- if (!j) add_attrs(did, j);
- H5Dclose(did);
-
- /* 12 add region refs */
- H5Sselect_elements (sid_2d, H5S_SELECT_SET, 4, coords[0]);
- H5Rcreate(&buf_reg_ref[0],gid1, tmp_name1, H5R_DATASET_REGION, sid_2d);
- H5Sselect_none(sid_2d);
- count = dims[0]/2+1;
- H5Sselect_hyperslab (sid_1d, H5S_SELECT_SET, &start, &stride, &count,NULL);
- H5Rcreate(&buf_reg_ref[1],gid1, tmp_name2, H5R_DATASET_REGION, sid_1d);
- H5Sselect_none(sid_1d);
- sprintf(name, "%05d region refs", j);
+ H5Dwrite (did, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_ref);
+ if (!j) add_attrs(did, j);
+ H5Dclose(did);
+
+ /* 12 add region refs */
+ H5Sselect_elements (sid_2d, H5S_SELECT_SET, 4, coords[0]);
+ H5Rcreate(&buf_reg_ref[0],gid1, tmp_name1, H5R_DATASET_REGION, sid_2d);
+ H5Sselect_none(sid_2d);
+ count = dims[0]/2+1;
+ H5Sselect_hyperslab (sid_1d, H5S_SELECT_SET, &start, &stride, &count,NULL);
+ H5Rcreate(&buf_reg_ref[1],gid1, tmp_name2, H5R_DATASET_REGION, sid_1d);
+ H5Sselect_none(sid_1d);
+ HDsprintf(name, "%05d region refs", j);
did = H5Dcreate (gid1, name, H5T_STD_REF_DSETREG, sid_2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- H5Dwrite (did, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_reg_ref);
- if (!j) add_attrs(did, j);
- H5Dclose(did);
- }
- H5Gclose(gid1);
-
+ H5Dwrite (did, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_reg_ref);
+ if (!j) add_attrs(did, j);
+ H5Dclose(did);
+ }
+ H5Gclose(gid1);
+
H5Tclose (tid_array_f);
H5Tclose (tid_vlen_i);
H5Tclose (tid_vlen_s);
@@ -447,22 +447,22 @@ herr_t create_perf_test_file(const char *fname, int ngrps, int ndsets,
H5Fclose (fid);
for (i=0; i<dims[0]; i++) {
- if (buf_vlen_i[i].p) free(buf_vlen_i[i].p);
- if (buf_vlen_s[i]) free(buf_vlen_s[i]);
- }
+ if (buf_vlen_i[i].p) free(buf_vlen_i[i].p);
+ if (buf_vlen_s[i]) free(buf_vlen_s[i]);
+ }
for (i=0; i<nrows; i++) {
- if (buf_comp_large[i].i_vlen.p) free(buf_comp_large[i].i_vlen.p);
- if (buf_comp_large[i].s_vlen) free(buf_comp_large[i].s_vlen);
- }
-
+ if (buf_comp_large[i].i_vlen.p) free(buf_comp_large[i].i_vlen.p);
+ if (buf_comp_large[i].s_vlen) free(buf_comp_large[i].s_vlen);
+ }
+
free (buf_comp);
free (buf_comp_large);
free (buf_int);
free (buf_float_a);
free (buf_double2d[0]);
free (buf_double2d);
- free (buf_str);
+ free (buf_str);
free(buf_vlen_i);
free(buf_vlen_s);
@@ -470,34 +470,34 @@ herr_t create_perf_test_file(const char *fname, int ngrps, int ndsets,
}
/* add a single attribute */
-int add_attr(hid_t oid, const char *name, hid_t tid, hid_t sid, void *buf)
+int add_attr(hid_t oid, const char *name, hid_t tid, hid_t sid, void *buf)
{
hid_t aid;
aid = H5Acreate (oid, name, tid, sid, H5P_DEFAULT, H5P_DEFAULT);
- if (aid <0)
- return 0;
-
- H5Awrite(aid, tid, buf);
+ if (aid <0)
+ return 0;
+
+ H5Awrite(aid, tid, buf);
H5Aclose(aid);
-
- return 1;
+
+ return 1;
}
-/*
+/*
adds different types of attributes to an object.
-
- returns the number of attributes added to the objects.
+
+ returns the number of attributes added to the objects.
*/
-int add_attrs(hid_t oid, int idx)
+int add_attrs(hid_t oid, int idx)
{
char name[32];
int i0, i1, i2, j, nattrs=0;
- hid_t aid, tid, tid1, sid;
+ hid_t aid, tid, tid1, sid;
hvl_t i_vlen[4];
- hobj_ref_t ref;
- zipcode_t cmp_data[4];
+ hobj_ref_t ref;
+ zipcode_t cmp_data[4];
unsigned int i = 0xffffffff;
long long l = -2147483647;
float f = 123456789.987654321;
@@ -505,22 +505,22 @@ int add_attrs(hid_t oid, int idx)
char *s[7] = {"Parting", "is such", "sweeter", "sorrow."};
float f_array[4] = {1.0, 2.22, 3.333, 4.444};
char *s_vlen[4] = {"Parting", "is such", "sweet", "sorrow."};
- hsize_t dims1[1]={1}, dims2[1]={4}, dims3[2]={3,5};
- int int3d[4][3][5];
- size_t offset = 0;
-
- for (i0=0; i0<4; i0++) {
- i_vlen[i0].len = (i0+1);
- i_vlen[i0].p = (int *)calloc(i_vlen[i0].len, sizeof(int));
- for (j=0; j<i_vlen[i0].len; j++)
- ((int *)i_vlen[i0].p)[j] = i0*100+j;
- for (i1=0; i1<3; i1++) {
- for (i2=0; i2<5; i2++)
- int3d[i0][i1][i2] = i0*i1-i1*i2+i0*i2;
- }
+ hsize_t dims1[1]={1}, dims2[1]={4}, dims3[2]={3,5};
+ int int3d[4][3][5];
+ size_t offset = 0;
+
+ for (i0=0; i0<4; i0++) {
+ i_vlen[i0].len = (i0+1);
+ i_vlen[i0].p = (int *)calloc(i_vlen[i0].len, sizeof(int));
+ for (j=0; j<i_vlen[i0].len; j++)
+ ((int *)i_vlen[i0].p)[j] = i0*100+j;
+ for (i1=0; i1<3; i1++) {
+ for (i2=0; i2<5; i2++)
+ int3d[i0][i1][i2] = i0*i1-i1*i2+i0*i2;
+ }
}
- cmp_data[0].zipcode = 01001;
+ cmp_data[0].zipcode = 01001;
cmp_data[0].city = "Agawam, Massachusetts";
cmp_data[1].zipcode = 99950;
cmp_data[1].city = "Ketchikan, Alaska";
@@ -528,82 +528,82 @@ int add_attrs(hid_t oid, int idx)
cmp_data[2].city = "Holtsville, New York";
cmp_data[3].zipcode = 61820;
cmp_data[3].city = "Champaign, Illinois";
-
- /* 1 scalar point */
- sid = H5Screate (H5S_SCALAR);
- sprintf(name, "%05d scalar int", idx);
- nattrs += add_attr(oid, name, H5T_NATIVE_UINT, sid, &i);
- sprintf(name, "%05d scalar ulong", idx);
- nattrs += add_attr(oid, name, H5T_NATIVE_INT64, sid, &l);
- sprintf(name, "%05d scalar str", idx);
- tid = H5Tcopy (H5T_C_S1);
+
+ /* 1 scalar point */
+ sid = H5Screate (H5S_SCALAR);
+ HDsprintf(name, "%05d scalar int", idx);
+ nattrs += add_attr(oid, name, H5T_NATIVE_UINT, sid, &i);
+ HDsprintf(name, "%05d scalar ulong", idx);
+ nattrs += add_attr(oid, name, H5T_NATIVE_INT64, sid, &l);
+ HDsprintf(name, "%05d scalar str", idx);
+ tid = H5Tcopy (H5T_C_S1);
H5Tset_size (tid, H5T_VARIABLE);
- nattrs += add_attr(oid, name, tid, sid, &s[2]);
- H5Tclose(tid);
- H5Sclose(sid);
+ nattrs += add_attr(oid, name, tid, sid, &s[2]);
+ H5Tclose(tid);
+ H5Sclose(sid);
- /* 4 single point */
- sid = H5Screate_simple (1, dims1, NULL);
+ /* 4 single point */
+ sid = H5Screate_simple (1, dims1, NULL);
H5Rcreate(&ref, oid, ".", H5R_OBJECT, (hid_t)-1);
- sprintf(name, "%05d single float", idx);
- nattrs += add_attr(oid, name, H5T_NATIVE_FLOAT, sid, &f);
- sprintf(name, "%05d single double", idx);
- nattrs += add_attr(oid, name, H5T_NATIVE_DOUBLE, sid, &d);
- sprintf(name, "%05d single obj_ref", idx);
- nattrs += add_attr(oid, name, H5T_STD_REF_OBJ, sid, &ref);
- H5Sclose(sid);
-
- /* 7 fixed length 1D array */
- sid = H5Screate_simple (1, dims1, NULL);
- tid = H5Tarray_create (H5T_NATIVE_FLOAT, 1, dims2);
- sprintf(name, "%05d array float", idx);
+ HDsprintf(name, "%05d single float", idx);
+ nattrs += add_attr(oid, name, H5T_NATIVE_FLOAT, sid, &f);
+ HDsprintf(name, "%05d single double", idx);
+ nattrs += add_attr(oid, name, H5T_NATIVE_DOUBLE, sid, &d);
+ HDsprintf(name, "%05d single obj_ref", idx);
+ nattrs += add_attr(oid, name, H5T_STD_REF_OBJ, sid, &ref);
+ H5Sclose(sid);
+
+ /* 7 fixed length 1D array */
+ sid = H5Screate_simple (1, dims1, NULL);
+ tid = H5Tarray_create (H5T_NATIVE_FLOAT, 1, dims2);
+ HDsprintf(name, "%05d array float", idx);
nattrs += add_attr(oid, name, tid, sid, &f_array[0]);
- H5Tclose(tid);
- tid = H5Tcopy (H5T_C_S1);
- H5Tset_size (tid, strlen(s[0])+1);
- tid1 = H5Tarray_create (tid, 1, dims2);
- sprintf(name, "%05d array str", idx);
- nattrs += add_attr(oid, name, tid1, sid, s);
- H5Tclose(tid1);
- H5Tclose(tid);
- H5Sclose(sid);
-
- /* 9 fixed length 2D int arrays */
- sid = H5Screate_simple (1, dims2, NULL);
- tid = H5Tarray_create (H5T_NATIVE_INT, 2, dims3);
- sprintf(name, "%05d array int 2D", idx);
+ H5Tclose(tid);
+ tid = H5Tcopy (H5T_C_S1);
+ H5Tset_size (tid, strlen(s[0])+1);
+ tid1 = H5Tarray_create (tid, 1, dims2);
+ HDsprintf(name, "%05d array str", idx);
+ nattrs += add_attr(oid, name, tid1, sid, s);
+ H5Tclose(tid1);
+ H5Tclose(tid);
+ H5Sclose(sid);
+
+ /* 9 fixed length 2D int arrays */
+ sid = H5Screate_simple (1, dims2, NULL);
+ tid = H5Tarray_create (H5T_NATIVE_INT, 2, dims3);
+ HDsprintf(name, "%05d array int 2D", idx);
nattrs += add_attr(oid, name, tid, sid, int3d[0][0]);
- H5Tclose(tid);
- H5Sclose(sid);
-
- /* 10 variable length arrays */
- sid = H5Screate_simple (1, dims2, NULL);
- tid = H5Tcopy (H5T_C_S1);
- H5Tset_size (tid, H5T_VARIABLE);
- sprintf(name, "%05d vlen strings", idx);
+ H5Tclose(tid);
+ H5Sclose(sid);
+
+ /* 10 variable length arrays */
+ sid = H5Screate_simple (1, dims2, NULL);
+ tid = H5Tcopy (H5T_C_S1);
+ H5Tset_size (tid, H5T_VARIABLE);
+ HDsprintf(name, "%05d vlen strings", idx);
nattrs += add_attr(oid, name, tid, sid, s_vlen);
- H5Tclose(tid);
- tid = H5Tvlen_create (H5T_NATIVE_INT);;
- sprintf(name, "%05d vlen int array", idx);
+ H5Tclose(tid);
+ tid = H5Tvlen_create (H5T_NATIVE_INT);;
+ HDsprintf(name, "%05d vlen int array", idx);
nattrs += add_attr(oid, name, tid, sid, i_vlen);
- H5Tclose(tid);
- H5Sclose(sid);
-
- /* 12 compound data */
- sid = H5Screate_simple (1, dims2, NULL);
- tid = H5Tcreate (H5T_COMPOUND, sizeof (zipcode_t));
- tid1 = H5Tcopy (H5T_C_S1);
- H5Tset_size (tid1, H5T_VARIABLE);
+ H5Tclose(tid);
+ H5Sclose(sid);
+
+ /* 12 compound data */
+ sid = H5Screate_simple (1, dims2, NULL);
+ tid = H5Tcreate (H5T_COMPOUND, sizeof (zipcode_t));
+ tid1 = H5Tcopy (H5T_C_S1);
+ H5Tset_size (tid1, H5T_VARIABLE);
H5Tinsert (tid, "zip code", 0, H5T_NATIVE_INT); offset += sizeof(H5T_NATIVE_INT);
H5Tinsert (tid, "City", offset, tid1); offset += sizeof(char *);
- sprintf(name, "%05d compound data", idx);
+ HDsprintf(name, "%05d compound data", idx);
nattrs += add_attr(oid, name, tid, sid, cmp_data);
- H5Tclose(tid1);
- H5Tclose(tid);
- H5Sclose(sid);
-
- for (i0=0; i0<4; i0++)
- free(i_vlen[i0].p);
-
- return nattrs;
+ H5Tclose(tid1);
+ H5Tclose(tid);
+ H5Sclose(sid);
+
+ for (i0=0; i0<4; i0++)
+ free(i_vlen[i0].p);
+
+ return nattrs;
}
diff --git a/tools/test/misc/talign.c b/tools/test/misc/talign.c
index 7f1f038..944674d 100644
--- a/tools/test/misc/talign.c
+++ b/tools/test/misc/talign.c
@@ -53,12 +53,12 @@ int main(void)
HDprintf("%-70s", "Testing alignment in compound datatypes");
- HDstrcpy(string5, "Hi!");
+ strcpy(string5, "Hi!");
HDunlink(fname);
fil = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
if (fil < 0) {
- HDputs("*FAILED*");
+ puts("*FAILED*");
return 1;
}
@@ -123,8 +123,8 @@ int main(void)
data = (char *)HDmalloc(H5Tget_size(fix));
if(!data) {
- HDperror("malloc() failed");
- HDabort();
+ perror("malloc() failed");
+ abort();
}
set = H5Dopen2(fil, setname, H5P_DEFAULT);
@@ -136,7 +136,7 @@ int main(void)
out:
if(error < 0) {
result = 1;
- HDputs("*FAILED - HDF5 library error*");
+ puts("*FAILED - HDF5 library error*");
} else if(!(H5_FLT_ABS_EQUAL(fok[0], fptr[0]))
|| !(H5_FLT_ABS_EQUAL(fok[1], fptr[1]))
|| !(H5_FLT_ABS_EQUAL(fnok[0], fptr[2]))
@@ -183,9 +183,9 @@ out:
(double)fok[1], (double)fptr[1],
(double)fnok[0], (double)fptr[2],
(double)fnok[1], (double)fptr[3]);
- HDputs("*FAILED - compound type alignmnent problem*");
+ puts("*FAILED - compound type alignmnent problem*");
} else {
- HDputs(" PASSED");
+ puts(" PASSED");
}
if(data)
@@ -200,7 +200,7 @@ out:
H5Pclose(plist);
H5Fclose(fil);
HDunlink(fname);
- HDfflush(stdout);
+ fflush(stdout);
return result;
}