diff options
author | Allen Byrne <byrn@hdfgroup.org> | 2019-08-15 15:28:40 (GMT) |
---|---|---|
committer | Allen Byrne <byrn@hdfgroup.org> | 2019-08-15 15:28:40 (GMT) |
commit | 98c8c6e45b1cc0603d9730fa235956712373701a (patch) | |
tree | 6ad63118dd770ab2edff764a461beb93c41c288d /testpar | |
parent | f5a250e77cf0455e14fc3c2bcd78d5373cf6d01e (diff) | |
download | hdf5-98c8c6e45b1cc0603d9730fa235956712373701a.zip hdf5-98c8c6e45b1cc0603d9730fa235956712373701a.tar.gz hdf5-98c8c6e45b1cc0603d9730fa235956712373701a.tar.bz2 |
Mostly whitespace in testpar, addl changes for tmp dir
Diffstat (limited to 'testpar')
-rw-r--r-- | testpar/t_chunk_alloc.c | 159 | ||||
-rw-r--r-- | testpar/testphdf5.c | 366 |
2 files changed, 262 insertions, 263 deletions
diff --git a/testpar/t_chunk_alloc.c b/testpar/t_chunk_alloc.c index 2340ae0..bfa0bfe 100644 --- a/testpar/t_chunk_alloc.c +++ b/testpar/t_chunk_alloc.c @@ -20,23 +20,23 @@ */ #include "testphdf5.h" -static int mpi_size, mpi_rank; +static int mpi_size, mpi_rank; #define DSET_NAME "ExtendibleArray" -#define CHUNK_SIZE 1000 /* #elements per chunk */ -#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */ +#define CHUNK_SIZE 1000 /* #elements per chunk */ +#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */ #define CLOSE 1 #define NO_CLOSE 0 static MPI_Offset get_filesize(const char *filename) { - int mpierr; - MPI_File fd; - MPI_Offset filesize; + int mpierr; + MPI_File fd; + MPI_Offset filesize; mpierr = MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY, - MPI_INFO_NULL, &fd); + MPI_INFO_NULL, &fd); VRFY((mpierr == MPI_SUCCESS), ""); mpierr = MPI_File_get_size(fd, &filesize); @@ -85,8 +85,8 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_ long nchunks; herr_t hrc; - MPI_Offset filesize, /* actual file size */ - est_filesize; /* estimated file size */ + MPI_Offset filesize, /* actual file size */ + est_filesize; /* estimated file size */ /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); @@ -95,34 +95,33 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_ /* Only MAINPROCESS should create the file. Others just wait. */ if (MAINPROCESS){ nchunks=chunk_factor*mpi_size; - dims[0]=nchunks*CHUNK_SIZE; - /* Create the data space with unlimited dimensions. */ - dataspace = H5Screate_simple (1, dims, maxdims); - VRFY((dataspace >= 0), ""); + dims[0]=nchunks*CHUNK_SIZE; + /* Create the data space with unlimited dimensions. */ + dataspace = H5Screate_simple (1, dims, maxdims); + VRFY((dataspace >= 0), ""); - memspace = H5Screate_simple(1, chunk_dims, NULL); - VRFY((memspace >= 0), ""); + memspace = H5Screate_simple(1, chunk_dims, NULL); + VRFY((memspace >= 0), ""); - /* Create a new file. If file exists its contents will be overwritten. */ - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, - H5P_DEFAULT); - VRFY((file_id >= 0), "H5Fcreate"); + /* Create a new file. If file exists its contents will be overwritten. */ + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + VRFY((file_id >= 0), "H5Fcreate"); - /* Modify dataset creation properties, i.e. enable chunking */ - cparms = H5Pcreate(H5P_DATASET_CREATE); - VRFY((cparms >= 0), ""); + /* Modify dataset creation properties, i.e. enable chunking */ + cparms = H5Pcreate(H5P_DATASET_CREATE); + VRFY((cparms >= 0), ""); - hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY); - VRFY((hrc >= 0), ""); + hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY); + VRFY((hrc >= 0), ""); - hrc = H5Pset_chunk(cparms, 1, chunk_dims); - VRFY((hrc >= 0), ""); + hrc = H5Pset_chunk(cparms, 1, chunk_dims); + VRFY((hrc >= 0), ""); - /* Create a new dataset within the file using cparms creation properties. */ - dataset = H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT); - VRFY((dataset >= 0), ""); + /* Create a new dataset within the file using cparms creation properties. */ + dataset = H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT); + VRFY((dataset >= 0), ""); - if(write_pattern == sec_last) { + if(write_pattern == sec_last) { HDmemset(buffer, 100, CHUNK_SIZE); count[0] = 1; @@ -138,28 +137,28 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_ VRFY((hrc >= 0), "H5Dwrite"); } /* end if */ - /* Close resources */ - hrc = H5Dclose (dataset); - VRFY((hrc >= 0), ""); - dataset = -1; + /* Close resources */ + hrc = H5Dclose (dataset); + VRFY((hrc >= 0), ""); + dataset = -1; - hrc = H5Sclose (dataspace); - VRFY((hrc >= 0), ""); + hrc = H5Sclose (dataspace); + VRFY((hrc >= 0), ""); - hrc = H5Sclose (memspace); - VRFY((hrc >= 0), ""); + hrc = H5Sclose (memspace); + VRFY((hrc >= 0), ""); - hrc = H5Pclose (cparms); - VRFY((hrc >= 0), ""); + hrc = H5Pclose (cparms); + VRFY((hrc >= 0), ""); - hrc = H5Fclose (file_id); - VRFY((hrc >= 0), ""); - file_id = -1; + hrc = H5Fclose (file_id); + VRFY((hrc >= 0), ""); + file_id = -1; - /* verify file size */ - filesize = get_filesize(filename); - est_filesize = nchunks * CHUNK_SIZE * sizeof(unsigned char); - VRFY((filesize >= est_filesize), "file size check"); + /* verify file size */ + filesize = get_filesize(filename); + est_filesize = nchunks * CHUNK_SIZE * sizeof(unsigned char); + VRFY((filesize >= est_filesize), "file size check"); } @@ -200,8 +199,8 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti int i; long nchunks; /* MPI Gubbins */ - MPI_Offset filesize, /* actual file size */ - est_filesize; /* estimated file size */ + MPI_Offset filesize, /* actual file size */ + est_filesize; /* estimated file size */ /* Initialize MPI */ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); @@ -241,19 +240,19 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti /* all chunks are written by all the processes in an interleaved way*/ case write_all: - memset(buffer, mpi_rank+1, CHUNK_SIZE); - count[0] = 1; - stride[0] = 1; - block[0] = chunk_dims[0]; - for (i=0; i<nchunks/mpi_size; i++){ - offset[0] = (i*mpi_size+mpi_rank)*chunk_dims[0]; + HDmemset(buffer, mpi_rank+1, CHUNK_SIZE); + count[0] = 1; + stride[0] = 1; + block[0] = chunk_dims[0]; + for (i=0; i<nchunks/mpi_size; i++) { + offset[0] = (i*mpi_size+mpi_rank)*chunk_dims[0]; - hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); - VRFY((hrc >= 0), ""); + hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); + VRFY((hrc >= 0), ""); - /* Write the buffer out */ - hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer); - VRFY((hrc >= 0), "H5Dwrite"); + /* Write the buffer out */ + hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer); + VRFY((hrc >= 0), "H5Dwrite"); } break; @@ -318,7 +317,7 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti * interleaved pattern. */ static void -verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose, +verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose, hid_t *file_id, hid_t *dataset) { /* HDF5 gubbins */ @@ -372,8 +371,8 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in stride[0] = 1; block[0] = chunk_dims[0]; for (i=0; i<nchunks; i++){ - /* reset buffer values */ - memset(buffer, -1, CHUNK_SIZE); + /* reset buffer values */ + HDmemset(buffer, -1, CHUNK_SIZE); offset[0] = i*chunk_dims[0]; @@ -385,22 +384,22 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in VRFY((hrc >= 0), "H5Dread"); /* set expected value according the write pattern */ - switch (write_pattern) { - case all: - value = i%mpi_size + 1; - break; - case none: - value = 0; - break; - case sec_last: - if (i==nchunks-2) - value = 100; - else - value = 0; + switch (write_pattern) { + case all: + value = i%mpi_size + 1; + break; + case none: + value = 0; + break; + case sec_last: + if (i==nchunks-2) + value = 100; + else + value = 0; break; default: HDassert(0); - } + } /* verify content of the chunk */ for (index_l = 0; index_l < CHUNK_SIZE; index_l++) @@ -408,10 +407,10 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in } hrc = H5Sclose (dataspace); - VRFY((hrc >= 0), ""); + VRFY((hrc >= 0), ""); - hrc = H5Sclose (memspace); - VRFY((hrc >= 0), ""); + hrc = H5Sclose (memspace); + VRFY((hrc >= 0), ""); /* Can close some plists */ hrc = H5Pclose(access_plist); @@ -469,7 +468,7 @@ test_chunk_alloc(void) filename = (const char*)GetTestParameters(); if (VERBOSE_MED) - printf("Extend Chunked allocation test on file %s\n", filename); + HDprintf("Extend Chunked allocation test on file %s\n", filename); /* Case 1 */ /* Create chunked dataset without writing anything.*/ diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index e1ccbed..b89c790 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -29,7 +29,7 @@ int chunkdim1; int nerrors = 0; /* errors count */ int ndatasets = 300; /* number of datasets to create*/ int ngroups = 512; /* number of groups to create in root - * group. */ + * group. */ int facc_type = FACC_MPIO; /*Test file access type */ int dxfer_coll_type = DXFER_COLLECTIVE_IO; @@ -79,15 +79,15 @@ void pause_proc(void) MPI_Get_processor_name(mpi_name, &mpi_namelen); if (MAINPROCESS) - while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){ - if (!loops++){ - printf("Proc %d (%*s, %d): to debug, attach %d\n", - mpi_rank, mpi_namelen, mpi_name, pid, pid); - } - printf("waiting(%ds) for file %s ...\n", time_int, greenlight); - fflush(stdout); + while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){ + if (!loops++){ + HDprintf("Proc %d (%*s, %d): to debug, attach %d\n", + mpi_rank, mpi_namelen, mpi_name, pid, pid); + } + HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight); + HDfflush(stdout); HDsleep(time_int); - } + } MPI_Barrier(MPI_COMM_WORLD); } @@ -108,18 +108,18 @@ int MPI_Init(int *argc, char ***argv) static void usage(void) { - printf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] " - "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n"); - printf("\t-m<n_datasets>" - "\tset number of datasets for the multiple dataset test\n"); - printf("\t-n<n_groups>" - "\tset number of groups for the multiple group test\n"); - printf("\t-f <prefix>\tfilename prefix\n"); - printf("\t-2\t\tuse Split-file together with MPIO\n"); - printf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", - ROW_FACTOR, COL_FACTOR); - printf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); - printf("\n"); + HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] " + "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n"); + HDprintf("\t-m<n_datasets>" + "\tset number of datasets for the multiple dataset test\n"); + HDprintf("\t-n<n_groups>" + "\tset number of groups for the multiple group test\n"); + HDprintf("\t-f <prefix>\tfilename prefix\n"); + HDprintf("\t-2\t\tuse Split-file together with MPIO\n"); + HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", + ROW_FACTOR, COL_FACTOR); + HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); + HDprintf("\n"); } @@ -140,44 +140,44 @@ parse_options(int argc, char **argv) chunkdim1 = (dim1+9)/10; while (--argc){ - if (**(++argv) != '-'){ - break; - }else{ - switch(*(*argv+1)){ - case 'm': ndatasets = atoi((*argv+1)+1); - if (ndatasets < 0){ + if (**(++argv) != '-'){ + break; + }else{ + switch(*(*argv+1)){ + case 'm': ndatasets = atoi((*argv+1)+1); + if (ndatasets < 0){ nerrors++; return(1); - } - break; + } + break; case 'n': ngroups = atoi((*argv+1)+1); - if (ngroups < 0){ - nerrors++; - return(1); - } - break; - case 'f': if (--argc < 1) { + if (ngroups < 0){ nerrors++; return(1); - } - if (**(++argv) == '-') { + } + break; + case 'f': if (--argc < 1) { nerrors++; return(1); - } - paraprefix = *argv; - break; - case 'i': /* Collective MPI-IO access with independent IO */ + } + if (**(++argv) == '-') { + nerrors++; + return(1); + } + paraprefix = *argv; + break; + case 'i': /* Collective MPI-IO access with independent IO */ dxfer_coll_type = DXFER_INDEPENDENT_IO; break; - case '2': /* Use the split-file driver with MPIO access */ + case '2': /* Use the split-file driver with MPIO access */ /* Can use $HDF5_METAPREFIX to define the */ /* meta-file-prefix. */ facc_type = FACC_MPIO | FACC_SPLIT; break; - case 'd': /* dimensizes */ + case 'd': /* dimensizes */ if (--argc < 2){ - nerrors++; - return(1); + nerrors++; + return(1); } dim0 = atoi(*(++argv))*mpi_size; argc--; @@ -186,61 +186,61 @@ parse_options(int argc, char **argv) chunkdim0 = (dim0+9)/10; chunkdim1 = (dim1+9)/10; break; - case 'c': /* chunk dimensions */ + case 'c': /* chunk dimensions */ if (--argc < 2){ - nerrors++; - return(1); + nerrors++; + return(1); } chunkdim0 = atoi(*(++argv)); argc--; chunkdim1 = atoi(*(++argv)); break; - case 'h': /* print help message--return with nerrors set */ - return(1); - default: printf("Illegal option(%s)\n", *argv); - nerrors++; + case 'h': /* print help message--return with nerrors set */ return(1); + default: HDprintf("Illegal option(%s)\n", *argv); + nerrors++; + return(1); + } } - } } /*while*/ /* check validity of dimension and chunk sizes */ if (dim0 <= 0 || dim1 <= 0){ - printf("Illegal dim sizes (%d, %d)\n", dim0, dim1); - nerrors++; - return(1); + HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1); + nerrors++; + return(1); } if (chunkdim0 <= 0 || chunkdim1 <= 0){ - printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); - nerrors++; - return(1); + HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); + nerrors++; + return(1); } /* Make sure datasets can be divided into equal portions by the processes */ if ((dim0 % mpi_size) || (dim1 % mpi_size)){ - if (MAINPROCESS) - printf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", - dim0, dim1, mpi_size); - nerrors++; - return(1); + if (MAINPROCESS) + HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", + dim0, dim1, mpi_size); + nerrors++; + return(1); } /* compose the test filenames */ { - int i, n; + int i, n; - n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */ + n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */ - for (i=0; i < n; i++) - if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i])) - == NULL){ - printf("h5_fixname failed\n"); - nerrors++; - return(1); - } - printf("Test filenames are:\n"); - for (i=0; i < n; i++) - printf(" %s\n", filenames[i]); + for (i=0; i < n; i++) + if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i])) + == NULL){ + HDprintf("h5_fixname failed\n"); + nerrors++; + return(1); + } + HDprintf("Test filenames are:\n"); + for (i=0; i < n; i++) + HDprintf(" %s\n", filenames[i]); } return(0); @@ -264,36 +264,36 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) VRFY((ret_pl >= 0), "H5P_FILE_ACCESS"); if (l_facc_type == FACC_DEFAULT) - return (ret_pl); + return (ret_pl); if (l_facc_type == FACC_MPIO){ - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(ret_pl, comm, info); - VRFY((ret >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(ret_pl, comm, info); + VRFY((ret >= 0), ""); ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE); - VRFY((ret >= 0), ""); + VRFY((ret >= 0), ""); ret = H5Pset_coll_metadata_write(ret_pl, TRUE); - VRFY((ret >= 0), ""); - return(ret_pl); + VRFY((ret >= 0), ""); + return(ret_pl); } if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){ - hid_t mpio_pl; - - mpio_pl = H5Pcreate (H5P_FILE_ACCESS); - VRFY((mpio_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(mpio_pl, comm, info); - VRFY((ret >= 0), ""); - - /* setup file access template */ - ret_pl = H5Pcreate (H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); - VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); - H5Pclose(mpio_pl); - return(ret_pl); + hid_t mpio_pl; + + mpio_pl = H5Pcreate (H5P_FILE_ACCESS); + VRFY((mpio_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(mpio_pl, comm, info); + VRFY((ret >= 0), ""); + + /* setup file access template */ + ret_pl = H5Pcreate (H5P_FILE_ACCESS); + VRFY((ret_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); + VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); + H5Pclose(mpio_pl); + return(ret_pl); } /* unknown file access types */ @@ -323,18 +323,18 @@ int main(int argc, char **argv) dim1 = COL_FACTOR*mpi_size; if (MAINPROCESS){ - printf("===================================\n"); - printf("PHDF5 TESTS START\n"); - printf("===================================\n"); + HDprintf("===================================\n"); + HDprintf("PHDF5 TESTS START\n"); + HDprintf("===================================\n"); } /* Attempt to turn off atexit post processing so that in case errors - * happen during the test and the process is aborted, it will not get - * hang in the atexit post processing in which it may try to make MPI - * calls. By then, MPI calls may not work. - */ + * happen during the test and the process is aborted, it will not get + * hang in the atexit post processing in which it may try to make MPI + * calls. By then, MPI calls may not work. + */ if (H5dont_atexit() < 0){ - printf("Failed to turn off atexit processing. Continue.\n"); + HDprintf("Failed to turn off atexit processing. Continue.\n"); }; H5open(); h5_show_hostname(); @@ -344,10 +344,10 @@ int main(int argc, char **argv) /* Tests are generally arranged from least to most complexity... */ AddTest("mpiodup", test_fapl_mpio_dup, NULL, - "fapl_mpio duplicate", NULL); + "fapl_mpio duplicate", NULL); AddTest("split", test_split_comm_access, NULL, - "dataset using split communicators", PARATESTFILE); + "dataset using split communicators", PARATESTFILE); #ifdef PB_OUT /* temporary: disable page buffering when parallel */ AddTest("page_buffer", test_page_buffer_access, NULL, @@ -355,57 +355,57 @@ int main(int argc, char **argv) #endif AddTest("props", test_file_properties, NULL, - "Coll Metadata file property settings", PARATESTFILE); + "Coll Metadata file property settings", PARATESTFILE); AddTest("idsetw", dataset_writeInd, NULL, - "dataset independent write", PARATESTFILE); + "dataset independent write", PARATESTFILE); AddTest("idsetr", dataset_readInd, NULL, - "dataset independent read", PARATESTFILE); + "dataset independent read", PARATESTFILE); AddTest("cdsetw", dataset_writeAll, NULL, - "dataset collective write", PARATESTFILE); + "dataset collective write", PARATESTFILE); AddTest("cdsetr", dataset_readAll, NULL, - "dataset collective read", PARATESTFILE); + "dataset collective read", PARATESTFILE); AddTest("eidsetw", extend_writeInd, NULL, - "extendible dataset independent write", PARATESTFILE); + "extendible dataset independent write", PARATESTFILE); AddTest("eidsetr", extend_readInd, NULL, - "extendible dataset independent read", PARATESTFILE); + "extendible dataset independent read", PARATESTFILE); AddTest("ecdsetw", extend_writeAll, NULL, - "extendible dataset collective write", PARATESTFILE); + "extendible dataset collective write", PARATESTFILE); AddTest("ecdsetr", extend_readAll, NULL, - "extendible dataset collective read", PARATESTFILE); + "extendible dataset collective read", PARATESTFILE); AddTest("eidsetw2", extend_writeInd2, NULL, - "extendible dataset independent write #2", PARATESTFILE); + "extendible dataset independent write #2", PARATESTFILE); AddTest("selnone", none_selection_chunk, NULL, "chunked dataset with none-selection", PARATESTFILE); AddTest("calloc", test_chunk_alloc, NULL, "parallel extend Chunked allocation on serial file", PARATESTFILE); AddTest("fltread", test_filter_read, NULL, - "parallel read of dataset written serially with filters", PARATESTFILE); + "parallel read of dataset written serially with filters", PARATESTFILE); #ifdef H5_HAVE_FILTER_DEFLATE AddTest("cmpdsetr", compress_readAll, NULL, - "compressed dataset collective read", PARATESTFILE); + "compressed dataset collective read", PARATESTFILE); #endif /* H5_HAVE_FILTER_DEFLATE */ AddTest("zerodsetr", zero_dim_dset, NULL, - "zero dim dset", PARATESTFILE); + "zero dim dset", PARATESTFILE); ndsets_params.name = PARATESTFILE; ndsets_params.count = ndatasets; AddTest("ndsetw", multiple_dset_write, NULL, - "multiple datasets write", &ndsets_params); + "multiple datasets write", &ndsets_params); ngroups_params.name = PARATESTFILE; ngroups_params.count = ngroups; AddTest("ngrpw", multiple_group_write, NULL, - "multiple groups write", &ngroups_params); + "multiple groups write", &ngroups_params); AddTest("ngrpr", multiple_group_read, NULL, - "multiple groups read", &ngroups_params); + "multiple groups read", &ngroups_params); AddTest("compact", compact_dataset, NULL, - "compact dataset test", PARATESTFILE); + "compact dataset test", PARATESTFILE); collngroups_params.name = PARATESTFILE; collngroups_params.count = ngroups; @@ -417,79 +417,79 @@ int main(int argc, char **argv) AddTest("bigdset", big_dataset, NULL, "big dataset test", PARATESTFILE); #else - printf("big dataset test will be skipped on Windows (JIRA HDDFV-8064)\n"); + HDprintf("big dataset test will be skipped on Windows (JIRA HDDFV-8064)\n"); #endif AddTest("fill", dataset_fillvalue, NULL, - "dataset fill value", PARATESTFILE); + "dataset fill value", PARATESTFILE); AddTest("cchunk1", - coll_chunk1,NULL, "simple collective chunk io",PARATESTFILE); + coll_chunk1,NULL, "simple collective chunk io",PARATESTFILE); AddTest("cchunk2", - coll_chunk2,NULL, "noncontiguous collective chunk io",PARATESTFILE); + coll_chunk2,NULL, "noncontiguous collective chunk io",PARATESTFILE); AddTest("cchunk3", - coll_chunk3,NULL, "multi-chunk collective chunk io",PARATESTFILE); + coll_chunk3,NULL, "multi-chunk collective chunk io",PARATESTFILE); AddTest("cchunk4", - coll_chunk4,NULL, "collective chunk io with partial non-selection ",PARATESTFILE); + coll_chunk4,NULL, "collective chunk io with partial non-selection ",PARATESTFILE); if((mpi_size < 3)&& MAINPROCESS ) { - printf("Collective chunk IO optimization APIs "); - printf("needs at least 3 processes to participate\n"); - printf("Collective chunk IO API tests will be skipped \n"); + HDprintf("Collective chunk IO optimization APIs "); + HDprintf("needs at least 3 processes to participate\n"); + HDprintf("Collective chunk IO API tests will be skipped \n"); } AddTest((mpi_size <3)? "-cchunk5":"cchunk5" , - coll_chunk5,NULL, - "linked chunk collective IO without optimization",PARATESTFILE); + coll_chunk5,NULL, + "linked chunk collective IO without optimization",PARATESTFILE); AddTest((mpi_size < 3)? "-cchunk6" : "cchunk6", - coll_chunk6,NULL, - "multi-chunk collective IO with direct request",PARATESTFILE); + coll_chunk6,NULL, + "multi-chunk collective IO with direct request",PARATESTFILE); AddTest((mpi_size < 3)? "-cchunk7" : "cchunk7", - coll_chunk7,NULL, - "linked chunk collective IO with optimization",PARATESTFILE); + coll_chunk7,NULL, + "linked chunk collective IO with optimization",PARATESTFILE); AddTest((mpi_size < 3)? "-cchunk8" : "cchunk8", - coll_chunk8,NULL, - "linked chunk collective IO transferring to multi-chunk",PARATESTFILE); + coll_chunk8,NULL, + "linked chunk collective IO transferring to multi-chunk",PARATESTFILE); AddTest((mpi_size < 3)? "-cchunk9" : "cchunk9", - coll_chunk9,NULL, - "multiple chunk collective IO with optimization",PARATESTFILE); + coll_chunk9,NULL, + "multiple chunk collective IO with optimization",PARATESTFILE); AddTest((mpi_size < 3)? "-cchunk10" : "cchunk10", - coll_chunk10,NULL, - "multiple chunk collective IO transferring to independent IO",PARATESTFILE); + coll_chunk10,NULL, + "multiple chunk collective IO transferring to independent IO",PARATESTFILE); -/* irregular collective IO tests*/ + /* irregular collective IO tests*/ AddTest("ccontw", - coll_irregular_cont_write,NULL, - "collective irregular contiguous write",PARATESTFILE); + coll_irregular_cont_write,NULL, + "collective irregular contiguous write",PARATESTFILE); AddTest("ccontr", - coll_irregular_cont_read,NULL, - "collective irregular contiguous read",PARATESTFILE); + coll_irregular_cont_read,NULL, + "collective irregular contiguous read",PARATESTFILE); AddTest("cschunkw", - coll_irregular_simple_chunk_write,NULL, - "collective irregular simple chunk write",PARATESTFILE); + coll_irregular_simple_chunk_write,NULL, + "collective irregular simple chunk write",PARATESTFILE); AddTest("cschunkr", - coll_irregular_simple_chunk_read,NULL, - "collective irregular simple chunk read",PARATESTFILE); + coll_irregular_simple_chunk_read,NULL, + "collective irregular simple chunk read",PARATESTFILE); AddTest("ccchunkw", - coll_irregular_complex_chunk_write,NULL, - "collective irregular complex chunk write",PARATESTFILE); + coll_irregular_complex_chunk_write,NULL, + "collective irregular complex chunk write",PARATESTFILE); AddTest("ccchunkr", - coll_irregular_complex_chunk_read,NULL, - "collective irregular complex chunk read",PARATESTFILE); + coll_irregular_complex_chunk_read,NULL, + "collective irregular complex chunk read",PARATESTFILE); AddTest("null", null_dataset, NULL, - "null dataset test", PARATESTFILE); + "null dataset test", PARATESTFILE); io_mode_confusion_params.name = PARATESTFILE; io_mode_confusion_params.count = 0; /* value not used */ AddTest("I/Omodeconf", io_mode_confusion, NULL, - "I/O mode confusion test -- hangs quickly on failure", + "I/O mode confusion test -- hangs quickly on failure", &io_mode_confusion_params); if((mpi_size < 3) && MAINPROCESS) { - printf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n"); - printf("rr_obj_hdr_flush_confusion test will be skipped \n"); + HDprintf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n"); + HDprintf("rr_obj_hdr_flush_confusion test will be skipped \n"); } if(mpi_size > 2) { rr_obj_flush_confusion_params.name = PARATESTFILE; @@ -523,21 +523,21 @@ int main(int argc, char **argv) PARATESTFILE); AddTest("edpl", test_plist_ed, NULL, - "encode/decode Property Lists", NULL); + "encode/decode Property Lists", NULL); if((mpi_size < 2) && MAINPROCESS) { - printf("File Image Ops daisy chain test needs at least 2 processes.\n"); - printf("File Image Ops daisy chain test will be skipped \n"); + HDprintf("File Image Ops daisy chain test needs at least 2 processes.\n"); + HDprintf("File Image Ops daisy chain test will be skipped \n"); } AddTest((mpi_size < 2)? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL, "file image ops daisy chain", NULL); if((mpi_size < 2)&& MAINPROCESS ) { - printf("Atomicity tests need at least 2 processes to participate\n"); - printf("8 is more recommended.. Atomicity tests will be skipped \n"); + HDprintf("Atomicity tests need at least 2 processes to participate\n"); + HDprintf("8 is more recommended.. Atomicity tests will be skipped \n"); } else if (facc_type != FACC_MPIO && MAINPROCESS) { - printf("Atomicity tests will not work with a non MPIO VFD\n"); + HDprintf("Atomicity tests will not work with a non MPIO VFD\n"); } else if(mpi_size >= 2 && facc_type == FACC_MPIO){ AddTest("atomicity", dataset_atomicity, NULL, @@ -545,7 +545,7 @@ int main(int argc, char **argv) } AddTest("denseattr", test_dense_attr, NULL, - "Store Dense Attributes", PARATESTFILE); + "Store Dense Attributes", PARATESTFILE); AddTest("noselcollmdread", test_partial_no_selection_coll_md_read, NULL, "Collective Metadata read with some ranks having no selection", PARATESTFILE); @@ -565,9 +565,9 @@ int main(int argc, char **argv) TestParseCmdLine(argc, argv); if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS){ - printf("===================================\n" - " Using Independent I/O with file set view to replace collective I/O \n" - "===================================\n"); + HDprintf("===================================\n" + " Using Independent I/O with file set view to replace collective I/O \n" + "===================================\n"); } @@ -575,8 +575,8 @@ int main(int argc, char **argv) PerformTests(); /* make sure all processes are finished before final report, cleanup - * and exit. - */ + * and exit. + */ MPI_Barrier(MPI_COMM_WORLD); /* Display test summary, if requested */ @@ -592,16 +592,16 @@ int main(int argc, char **argv) { int temp; MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); - nerrors=temp; + nerrors=temp; } if (MAINPROCESS){ /* only process 0 reports */ - printf("===================================\n"); - if (nerrors) - printf("***PHDF5 tests detected %d errors***\n", nerrors); - else - printf("PHDF5 tests finished with no errors\n"); - printf("===================================\n"); + HDprintf("===================================\n"); + if (nerrors) + HDprintf("***PHDF5 tests detected %d errors***\n", nerrors); + else + HDprintf("PHDF5 tests finished with no errors\n"); + HDprintf("===================================\n"); } /* close HDF5 library */ |