diff options
Diffstat (limited to 'testpar')
-rw-r--r-- | testpar/t_bigio.c | 528 | ||||
-rw-r--r-- | testpar/t_cache.c | 38 | ||||
-rw-r--r-- | testpar/t_cache_image.c | 70 | ||||
-rw-r--r-- | testpar/t_coll_chunk.c | 30 | ||||
-rw-r--r-- | testpar/t_file.c | 101 | ||||
-rw-r--r-- | testpar/t_filter_read.c | 66 | ||||
-rw-r--r-- | testpar/t_mdset.c | 976 | ||||
-rw-r--r-- | testpar/t_mpi.c | 192 | ||||
-rw-r--r-- | testpar/t_ph5basic.c | 12 | ||||
-rw-r--r-- | testpar/t_prestart.c | 2 | ||||
-rw-r--r-- | testpar/t_prop.c | 2 | ||||
-rw-r--r-- | testpar/t_shapesame.c | 54 | ||||
-rw-r--r-- | testpar/testpar.h | 24 | ||||
-rw-r--r-- | testpar/testphdf5.h | 58 |
14 files changed, 1074 insertions, 1079 deletions
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c index 1d882b8..9ca077c 100644 --- a/testpar/t_bigio.c +++ b/testpar/t_bigio.c @@ -1,7 +1,7 @@ #include "hdf5.h" #include "testphdf5.h" -#include "H5Dprivate.h" /* For Chunk tests */ +#include "H5Dprivate.h" /* For Chunk tests */ /* FILENAME and filenames must have the same number of names */ const char *FILENAME[2]={ "bigio_test.h5", @@ -17,7 +17,7 @@ const char *FILENAME[2]={ "bigio_test.h5", #define MAINPROCESS (!mpi_rank) /* define process 0 as main process */ /* Constants definitions */ -#define RANK 2 +#define RANK 2 #define IN_ORDER 1 #define OUT_OF_ORDER 2 @@ -33,12 +33,12 @@ const char *FILENAME[2]={ "bigio_test.h5", #define HYPER 1 #define POINT 2 -#define ALL 3 +#define ALL 3 /* Dataset data type. Int's can be easily octo dumped. */ typedef hsize_t B_DATATYPE; -int facc_type = FACC_MPIO; /*Test file access type */ +int facc_type = FACC_MPIO; /*Test file access type */ int dxfer_coll_type = DXFER_COLLECTIVE_IO; size_t bigcount = DXFER_BIGCOUNT; int nerrors = 0; @@ -54,12 +54,12 @@ hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type); /* * Setup the coordinates for point selection. */ -static void +static void set_coords(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], - size_t num_points, + size_t num_points, hsize_t coords[], int order) { @@ -99,10 +99,10 @@ fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE * dataset) /* put some trivial data in the data_array */ for (i=0; i < block[0]; i++){ - for (j=0; j < block[1]; j++){ - *dataptr = (B_DATATYPE)((i+start[0])*100 + (j+start[1]+1)); - dataptr++; - } + for (j=0; j < block[1]; j++){ + *dataptr = (B_DATATYPE)((i+start[0])*100 + (j+start[1]+1)); + dataptr++; + } } } @@ -113,7 +113,7 @@ void point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], - size_t num_points, + size_t num_points, hsize_t coords[], int order) { @@ -143,13 +143,13 @@ void point_set(hsize_t start[], } if(VERBOSE_MED) { - printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n", + HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n", (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1], (unsigned long)(block[0] * block[1] * count[0] * count[1])); k = 0; for(i = 0; i < num_points ; i++) { - printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); k += 2; } } @@ -165,19 +165,19 @@ dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE * dataset) hsize_t i, j; /* print the column heading */ - printf("%-8s", "Cols:"); + HDprintf("%-8s", "Cols:"); for (j=0; j < block[1]; j++){ - printf("%3lu ", (unsigned long)(start[1]+j)); + HDprintf("%3lu ", (unsigned long)(start[1]+j)); } - printf("\n"); + HDprintf("\n"); /* print the slab data */ for (i=0; i < block[0]; i++){ - printf("Row %2lu: ", (unsigned long)(i+start[0])); - for (j=0; j < block[1]; j++){ - printf("%llu ", *dataptr++); - } - printf("\n"); + HDprintf("Row %2lu: ", (unsigned long)(i+start[0])); + for (j=0; j < block[1]; j++){ + HDprintf("%llu ", *dataptr++); + } + HDprintf("\n"); } } @@ -193,90 +193,90 @@ verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], /* print it if VERBOSE_MED */ if(VERBOSE_MED) { - printf("verify_data dumping:::\n"); - printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], - (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]); - printf("original values:\n"); - dataset_print(start, block, original); - printf("compared values:\n"); - dataset_print(start, block, dataset); + HDprintf("verify_data dumping:::\n"); + HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], + (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]); + HDprintf("original values:\n"); + dataset_print(start, block, original); + HDprintf("compared values:\n"); + dataset_print(start, block, dataset); } vrfyerrs = 0; for (i=0; i < block[0]; i++){ - for (j=0; j < block[1]; j++){ - if(*dataset != *original){ - if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){ - printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %llu, got %llu\n", + for (j=0; j < block[1]; j++){ + if(*dataset != *original){ + if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){ + HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %llu, got %llu\n", (unsigned long)i, (unsigned long)j, (unsigned long)(i+start[0]), (unsigned long)(j+start[1]), *(original), *(dataset)); - } - dataset++; - original++; - } - } + } + dataset++; + original++; + } + } } if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); + HDprintf("[more errors ...]\n"); if(vrfyerrs) - printf("%d errors found in verify_data\n", vrfyerrs); + HDprintf("%d errors found in verify_data\n", vrfyerrs); return(vrfyerrs); } /* Set up the selection */ static void ccslab_set(int mpi_rank, - int mpi_size, - hsize_t start[], - hsize_t count[], - hsize_t stride[], - hsize_t block[], - int mode) + int mpi_size, + hsize_t start[], + hsize_t count[], + hsize_t stride[], + hsize_t block[], + int mode) { switch (mode){ case BYROW_CONT: - /* Each process takes a slabs of rows. */ - block[0] = 1; - block[1] = 1; - stride[0] = 1; - stride[1] = 1; - count[0] = space_dim1; - count[1] = space_dim2; - start[0] = mpi_rank*count[0]; - start[1] = 0; - - break; + /* Each process takes a slabs of rows. */ + block[0] = 1; + block[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = space_dim1; + count[1] = space_dim2; + start[0] = mpi_rank*count[0]; + start[1] = 0; + + break; case BYROW_DISCONT: - /* Each process takes several disjoint blocks. */ - block[0] = 1; - block[1] = 1; + /* Each process takes several disjoint blocks. */ + block[0] = 1; + block[1] = 1; stride[0] = 3; stride[1] = 3; count[0] = space_dim1/(stride[0]*block[0]); count[1] = (space_dim2)/(stride[1]*block[1]); - start[0] = space_dim1*mpi_rank; - start[1] = 0; + start[0] = space_dim1*mpi_rank; + start[1] = 0; - break; + break; case BYROW_SELECTNONE: - /* Each process takes a slabs of rows, there are + /* Each process takes a slabs of rows, there are no selections for the last process. */ - block[0] = 1; - block[1] = 1; - stride[0] = 1; - stride[1] = 1; - count[0] = ((mpi_rank >= MAX(1,(mpi_size-2)))?0:space_dim1); - count[1] = space_dim2; - start[0] = mpi_rank*count[0]; - start[1] = 0; + block[0] = 1; + block[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = ((mpi_rank >= MAX(1,(mpi_size-2)))?0:space_dim1); + count[1] = space_dim2; + start[0] = mpi_rank*count[0]; + start[1] = 0; - break; + break; case BYROW_SELECTUNBALANCE: /* The first one-third of the number of processes only @@ -284,14 +284,14 @@ ccslab_set(int mpi_rank, half of the domain. */ block[0] = 1; - count[0] = 2; + count[0] = 2; stride[0] = space_dim1*mpi_size/4+1; block[1] = space_dim2; count[1] = 1; start[1] = 0; stride[1] = 1; - if((mpi_rank *3)<(mpi_size*2)) start[0] = mpi_rank; - else start[0] = 1 + space_dim1*mpi_size/2 + (mpi_rank-2*mpi_size/3); + if((mpi_rank *3)<(mpi_size*2)) start[0] = mpi_rank; + else start[0] = 1 + space_dim1*mpi_size/2 + (mpi_rank-2*mpi_size/3); break; case BYROW_SELECTINCHUNK: @@ -299,33 +299,33 @@ ccslab_set(int mpi_rank, block[0] = 1; count[0] = 1; - start[0] = mpi_rank*space_dim1; + start[0] = mpi_rank*space_dim1; stride[0]= 1; - block[1] = space_dim2; - count[1] = 1; - stride[1]= 1; - start[1] = 0; + block[1] = space_dim2; + count[1] = 1; + stride[1]= 1; + start[1] = 0; break; default: - /* Unknown mode. Set it to cover the whole dataset. */ - block[0] = space_dim1*mpi_size; - block[1] = space_dim2; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = 0; - - break; + /* Unknown mode. Set it to cover the whole dataset. */ + block[0] = space_dim1*mpi_size; + block[1] = space_dim2; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = 0; + + break; } if (VERBOSE_MED){ - printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], - (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1], - (unsigned long)(block[0]*block[1]*count[0]*count[1])); + HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], + (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0]*block[1]*count[0]*count[1])); } } @@ -336,10 +336,10 @@ ccslab_set(int mpi_rank, */ static void ccdataset_fill(hsize_t start[], - hsize_t stride[], - hsize_t count[], - hsize_t block[], - DATATYPE * dataset, + hsize_t stride[], + hsize_t count[], + hsize_t block[], + DATATYPE * dataset, int mem_selection) { DATATYPE *dataptr = dataset; @@ -377,28 +377,28 @@ ccdataset_fill(hsize_t start[], */ static void ccdataset_print(hsize_t start[], - hsize_t block[], - DATATYPE * dataset) + hsize_t block[], + DATATYPE * dataset) { DATATYPE *dataptr = dataset; hsize_t i, j; /* print the column heading */ - printf("Print only the first block of the dataset\n"); - printf("%-8s", "Cols:"); + HDprintf("Print only the first block of the dataset\n"); + HDprintf("%-8s", "Cols:"); for (j=0; j < block[1]; j++){ - printf("%3lu ", (unsigned long)(start[1]+j)); + HDprintf("%3lu ", (unsigned long)(start[1]+j)); } - printf("\n"); + HDprintf("\n"); /* print the slab data */ for (i=0; i < block[0]; i++){ - printf("Row %2lu: ", (unsigned long)(i+start[0])); - for (j=0; j < block[1]; j++){ - printf("%03d ", *dataptr++); - } - printf("\n"); + HDprintf("Row %2lu: ", (unsigned long)(i+start[0])); + for (j=0; j < block[1]; j++){ + HDprintf("%03d ", *dataptr++); + } + HDprintf("\n"); } } @@ -407,11 +407,11 @@ ccdataset_print(hsize_t start[], */ static int ccdataset_vrfy(hsize_t start[], - hsize_t count[], - hsize_t stride[], - hsize_t block[], - DATATYPE *dataset, - DATATYPE *original, + hsize_t count[], + hsize_t stride[], + hsize_t block[], + DATATYPE *dataset, + DATATYPE *original, int mem_selection) { hsize_t i, j,k1,k2,k=0; @@ -420,14 +420,14 @@ ccdataset_vrfy(hsize_t start[], /* print it if VERBOSE_MED */ if (VERBOSE_MED) { - printf("dataset_vrfy dumping:::\n"); - printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], - (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]); - printf("original values:\n"); - ccdataset_print(start, block, original); - printf("compared values:\n"); - ccdataset_print(start, block, dataset); + HDprintf("dataset_vrfy dumping:::\n"); + HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], + (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]); + HDprintf("original values:\n"); + ccdataset_print(start, block, original); + HDprintf("compared values:\n"); + ccdataset_print(start, block, dataset); } vrfyerrs = 0; @@ -449,7 +449,7 @@ ccdataset_vrfy(hsize_t start[], } if (*dataptr != *oriptr){ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){ - printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n", + HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n", (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr)); } @@ -459,9 +459,9 @@ ccdataset_vrfy(hsize_t start[], } } if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); + HDprintf("[more errors ...]\n"); if (vrfyerrs) - printf("%d errors found in ccdataset_vrfy\n", vrfyerrs); + HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs); return(vrfyerrs); } @@ -478,28 +478,28 @@ static void dataset_big_write(void) { - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ hid_t dataset; - hid_t datatype; /* Datatype ID */ - hsize_t dims[RANK]; /* dataset dim sizes */ - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ + hid_t datatype; /* Datatype ID */ + hsize_t dims[RANK]; /* dataset dim sizes */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ hsize_t *coords = NULL; int i; - herr_t ret; /* Generic return value */ + herr_t ret; /* Generic return value */ hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ + hid_t acc_tpl; /* File access templates */ hsize_t h; size_t num_points; B_DATATYPE * wdata; /* allocate memory for data buffer */ - wdata = (B_DATATYPE *)malloc(bigcount*sizeof(B_DATATYPE)); + wdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE)); VRFY((wdata != NULL), "wdata malloc succeeded"); /* setup file access template */ @@ -552,8 +552,8 @@ dataset_big_write(void) fill_datasets(start, block, wdata); MESG("data_array initialized"); if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, wdata); + MESG("data_array created"); + dataset_print(start, block, wdata); } /* set up the collective transfer properties list */ @@ -580,7 +580,7 @@ dataset_big_write(void) /* Each process takes a slabs of cols. */ - if (mpi_rank == 0) + if (mpi_rank == 0) HDprintf("\nTesting Dataset2 write by COL\n"); /* Create a large dataset */ dims[0] = bigcount; @@ -615,8 +615,8 @@ dataset_big_write(void) fill_datasets(start, block, wdata); MESG("data_array initialized"); if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, wdata); + MESG("data_array created"); + dataset_print(start, block, wdata); } /* set up the collective transfer properties list */ @@ -690,7 +690,7 @@ dataset_big_write(void) fill_datasets(start, dims, wdata); MESG("data_array initialized"); if(VERBOSE_MED){ - MESG("data_array created"); + MESG("data_array created"); } ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, @@ -729,7 +729,7 @@ dataset_big_write(void) num_points = bigcount; - coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t)); + coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t)); VRFY((coords != NULL), "coords malloc succeeded"); set_coords (start, count, stride, block, num_points, coords, IN_ORDER); @@ -744,8 +744,8 @@ dataset_big_write(void) fill_datasets(start, block, wdata); MESG("data_array initialized"); if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, wdata); + MESG("data_array created"); + dataset_print(start, block, wdata); } /* create a memory dataspace */ @@ -778,7 +778,7 @@ dataset_big_write(void) ret = H5Dclose(dataset); VRFY((ret >= 0), "H5Dclose1 succeeded"); - free(wdata); + HDfree(wdata); H5Fclose(fid); } @@ -795,30 +795,30 @@ static void dataset_big_read(void) { hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ hid_t dataset; - B_DATATYPE *rdata = NULL; /* data buffer */ - B_DATATYPE *wdata = NULL; /* expected data buffer */ - hsize_t dims[RANK]; /* dataset dim sizes */ - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ + B_DATATYPE *rdata = NULL; /* data buffer */ + B_DATATYPE *wdata = NULL; /* expected data buffer */ + hsize_t dims[RANK]; /* dataset dim sizes */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ int i,j,k; hsize_t h; size_t num_points; hsize_t *coords = NULL; - herr_t ret; /* Generic return value */ + herr_t ret; /* Generic return value */ /* allocate memory for data buffer */ - rdata = (B_DATATYPE *)malloc(bigcount*sizeof(B_DATATYPE)); + rdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE)); VRFY((rdata != NULL), "rdata malloc succeeded"); - wdata = (B_DATATYPE *)malloc(bigcount*sizeof(B_DATATYPE)); + wdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE)); VRFY((wdata != NULL), "wdata malloc succeeded"); - memset(rdata, 0, bigcount*sizeof(B_DATATYPE)); + HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE)); /* setup file access template */ acc_tpl = H5Pcreate (H5P_FILE_ACCESS); @@ -865,7 +865,7 @@ dataset_big_read(void) fill_datasets(start, block, wdata); MESG("data_array initialized"); if(VERBOSE_MED){ - MESG("data_array created"); + MESG("data_array created"); } /* set up the collective transfer properties list */ @@ -885,7 +885,7 @@ dataset_big_read(void) /* verify the read data with original expected data */ ret = verify_data(start, count, stride, block, rdata, wdata); - if(ret) {fprintf(stderr, "verify failed\n"); exit(1);} + if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);} /* release all temporary handles. */ H5Sclose(file_dataspace); @@ -897,7 +897,7 @@ dataset_big_read(void) if (mpi_rank == 0) HDprintf("\nRead Testing Dataset2 by ROW\n"); - memset(rdata, 0, bigcount*sizeof(B_DATATYPE)); + HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE)); dataset = H5Dopen2(fid, DATASET2, H5P_DEFAULT); VRFY((dataset >= 0), "H5Dopen2 succeeded"); @@ -927,7 +927,7 @@ dataset_big_read(void) fill_datasets(start, block, wdata); MESG("data_array initialized"); if(VERBOSE_MED){ - MESG("data_array created"); + MESG("data_array created"); } /* set up the collective transfer properties list */ @@ -947,7 +947,7 @@ dataset_big_read(void) /* verify the read data with original expected data */ ret = verify_data(start, count, stride, block, rdata, wdata); - if(ret) {fprintf(stderr, "verify failed\n"); exit(1);} + if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);} /* release all temporary handles. */ H5Sclose(file_dataspace); @@ -958,7 +958,7 @@ dataset_big_read(void) if (mpi_rank == 0) HDprintf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n"); - memset(rdata, 0, bigcount*sizeof(B_DATATYPE)); + HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE)); dataset = H5Dopen2(fid, DATASET3, H5P_DEFAULT); VRFY((dataset >= 0), "H5Dopen2 succeeded"); @@ -989,7 +989,7 @@ dataset_big_read(void) fill_datasets(start, dims, wdata); MESG("data_array initialized"); if(VERBOSE_MED){ - MESG("data_array created"); + MESG("data_array created"); } /* set up the collective transfer properties list */ @@ -1010,7 +1010,7 @@ dataset_big_read(void) if(mpi_rank == 0) { /* verify the read data with original expected data */ ret = verify_data(start, count, stride, block, rdata, wdata); - if(ret) {fprintf(stderr, "verify failed\n"); exit(1);} + if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);} } /* release all temporary handles. */ @@ -1040,13 +1040,13 @@ dataset_big_read(void) fill_datasets(start, block, wdata); MESG("data_array initialized"); if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, wdata); + MESG("data_array created"); + dataset_print(start, block, wdata); } num_points = bigcount; - coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t)); + coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t)); VRFY((coords != NULL), "coords malloc succeeded"); set_coords (start, count, stride, block, num_points, coords, IN_ORDER); @@ -1056,7 +1056,7 @@ dataset_big_read(void) ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); VRFY((ret >= 0), "H5Sselect_elements succeeded"); - if(coords) free(coords); + if(coords) HDfree(coords); /* create a memory dataspace */ /* Warning: H5Screate_simple requires an array of hsize_t elements @@ -1082,7 +1082,7 @@ dataset_big_read(void) VRFY((ret >= 0), "H5Dread dataset1 succeeded"); ret = verify_data(start, count, stride, block, rdata, wdata); - if(ret) {fprintf(stderr, "verify failed\n"); exit(1);} + if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);} /* release all temporary handles. */ H5Sclose(file_dataspace); @@ -1091,8 +1091,8 @@ dataset_big_read(void) ret = H5Dclose(dataset); VRFY((ret >= 0), "H5Dclose1 succeeded"); - free(wdata); - free(rdata); + HDfree(wdata); + HDfree(rdata); wdata = NULL; rdata = NULL; @@ -1115,8 +1115,8 @@ dataset_big_read(void) H5Fclose(fid); /* release data buffers */ - if(rdata) free(rdata); - if(wdata) free(wdata); + if(rdata) HDfree(rdata); + if(wdata) HDfree(wdata); } /* dataset_large_readAll */ @@ -1129,7 +1129,7 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) { hid_t ret_pl = -1; herr_t ret; /* generic return value */ - int mpi_rank; /* mpi variables */ + int mpi_rank; /* mpi variables */ /* need the rank for error checking macros */ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -1138,36 +1138,36 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) VRFY((ret_pl >= 0), "H5P_FILE_ACCESS"); if (l_facc_type == FACC_DEFAULT) - return (ret_pl); + return (ret_pl); if (l_facc_type == FACC_MPIO){ - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(ret_pl, comm, info); - VRFY((ret >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(ret_pl, comm, info); + VRFY((ret >= 0), ""); ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE); - VRFY((ret >= 0), ""); + VRFY((ret >= 0), ""); ret = H5Pset_coll_metadata_write(ret_pl, TRUE); - VRFY((ret >= 0), ""); - return(ret_pl); + VRFY((ret >= 0), ""); + return(ret_pl); } if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){ - hid_t mpio_pl; - - mpio_pl = H5Pcreate (H5P_FILE_ACCESS); - VRFY((mpio_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(mpio_pl, comm, info); - VRFY((ret >= 0), ""); - - /* setup file access template */ - ret_pl = H5Pcreate (H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); - VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); - H5Pclose(mpio_pl); - return(ret_pl); + hid_t mpio_pl; + + mpio_pl = H5Pcreate (H5P_FILE_ACCESS); + VRFY((mpio_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(mpio_pl, comm, info); + VRFY((ret >= 0), ""); + + /* setup file access template */ + ret_pl = H5Pcreate (H5P_FILE_ACCESS); + VRFY((ret_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); + VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); + H5Pclose(mpio_pl); + return(ret_pl); } /* unknown file access types */ @@ -1176,17 +1176,17 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) /*------------------------------------------------------------------------- - * Function: coll_chunk1 + * Function: coll_chunk1 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with a single chunk * - * Return: Success: 0 + * Return: Success: 0 * - * Failure: -1 + * Failure: -1 * - * Programmer: Unknown - * July 12th, 2004 + * Programmer: Unknown + * July 12th, 2004 * * Modifications: * @@ -1215,7 +1215,7 @@ coll_chunk1(void) { const char *filename = FILENAME[0]; if (mpi_rank == 0) - printf("coll_chunk1\n"); + HDprintf("coll_chunk1\n"); coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); @@ -1230,17 +1230,17 @@ coll_chunk1(void) /*------------------------------------------------------------------------- - * Function: coll_chunk2 + * Function: coll_chunk2 * - * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT + * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT selection with a single chunk * - * Return: Success: 0 + * Return: Success: 0 * - * Failure: -1 + * Failure: -1 * - * Programmer: Unknown - * July 12th, 2004 + * Programmer: Unknown + * July 12th, 2004 * * Modifications: * @@ -1269,7 +1269,7 @@ coll_chunk2(void) { const char *filename = FILENAME[0]; if (mpi_rank == 0) - printf("coll_chunk2\n"); + HDprintf("coll_chunk2\n"); coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); @@ -1284,17 +1284,17 @@ coll_chunk2(void) /*------------------------------------------------------------------------- - * Function: coll_chunk3 + * Function: coll_chunk3 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * - * Return: Success: 0 + * Return: Success: 0 * - * Failure: -1 + * Failure: -1 * - * Programmer: Unknown - * July 12th, 2004 + * Programmer: Unknown + * July 12th, 2004 * * Modifications: * @@ -1324,7 +1324,7 @@ coll_chunk3(void) { const char *filename = FILENAME[0]; if (mpi_rank == 0) - printf("coll_chunk3\n"); + HDprintf("coll_chunk3\n"); coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); @@ -1341,17 +1341,17 @@ coll_chunk3(void) //------------------------------------------------------------------------- // Borrowed/Modified (slightly) from t_coll_chunk.c /*------------------------------------------------------------------------- - * Function: coll_chunktest + * Function: coll_chunktest * * Purpose: The real testing routine for regular selection of collective chunking storage testing both write and read, - If anything fails, it may be read or write. There is no - separation test between read and write. + If anything fails, it may be read or write. There is no + separation test between read and write. * - * Return: Success: 0 + * Return: Success: 0 * - * Failure: -1 + * Failure: -1 * * Modifications: * Remove invalid temporary property checkings for API_LINK_HARD and @@ -1359,8 +1359,8 @@ coll_chunk3(void) * Programmer: Jonathan Kim * Date: 2012-10-10 * - * Programmer: Unknown - * July 12th, 2004 + * Programmer: Unknown + * July 12th, 2004 * * Modifications: * @@ -1369,14 +1369,14 @@ coll_chunk3(void) static void coll_chunktest(const char* filename, - int chunk_factor, - int select_factor, + int chunk_factor, + int select_factor, int api_option, int file_selection, int mem_selection, int mode) { - hid_t file, dataset, file_dataspace, mem_dataspace; + hid_t file, dataset, file_dataspace, mem_dataspace; hid_t acc_plist,xfer_plist,crp_plist; hsize_t dims[RANK], chunk_dims[RANK]; @@ -1524,41 +1524,41 @@ coll_chunktest(const char* filename, } switch(api_option){ - case API_LINK_HARD: - status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_ONE_IO); + case API_LINK_HARD: + status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_ONE_IO); VRFY((status>= 0),"collective chunk optimization succeeded"); break; - case API_MULTI_HARD: - status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_MULTI_IO); - VRFY((status>= 0),"collective chunk optimization succeeded "); + case API_MULTI_HARD: + status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_MULTI_IO); + VRFY((status>= 0),"collective chunk optimization succeeded "); break; - case API_LINK_TRUE: + case API_LINK_TRUE: status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,2); - VRFY((status>= 0),"collective chunk optimization set chunk number succeeded"); + VRFY((status>= 0),"collective chunk optimization set chunk number succeeded"); break; - case API_LINK_FALSE: + case API_LINK_FALSE: status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,6); VRFY((status>= 0),"collective chunk optimization set chunk number succeeded"); break; - case API_MULTI_COLL: + case API_MULTI_COLL: status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */ VRFY((status>= 0),"collective chunk optimization set chunk number succeeded"); - status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50); + status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50); VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded"); break; - case API_MULTI_IND: + case API_MULTI_IND: status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */ VRFY((status>= 0),"collective chunk optimization set chunk number succeeded"); - status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100); + status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100); VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded"); break; - default: + default: ; } @@ -1615,7 +1615,7 @@ coll_chunktest(const char* filename, /* write data collectively */ status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + xfer_plist, data_array1); VRFY((status >= 0),"dataset write succeeded"); #ifdef H5_HAVE_INSTRUMENTED_LIBRARY @@ -1820,22 +1820,22 @@ coll_chunktest(const char* filename, /***************************************************************************** * - * Function: do_express_test() + * Function: do_express_test() * - * Purpose: Do an MPI_Allreduce to obtain the maximum value returned - * by GetTestExpress() across all processes. Return this - * value. + * Purpose: Do an MPI_Allreduce to obtain the maximum value returned + * by GetTestExpress() across all processes. Return this + * value. * - * Envirmoment variables can be different across different - * processes. This function ensures that all processes agree - * on whether to do an express test. + * Envirmoment variables can be different across different + * processes. This function ensures that all processes agree + * on whether to do an express test. * - * Return: Success: Maximum of the values returned by - * GetTestExpress() across all processes. + * Return: Success: Maximum of the values returned by + * GetTestExpress() across all processes. * - * Failure: -1 + * Failure: -1 * - * Programmer: JRM -- 4/25/06 + * Programmer: JRM -- 4/25/06 * *****************************************************************************/ static int @@ -1868,7 +1868,7 @@ do_express_test(int world_mpi_rank) } /* do_express_test() */ -int main(int argc, char **argv) +int main(int argc, char **argv) { int ExpressMode = 0; hsize_t newsize = 1048576; @@ -1894,12 +1894,12 @@ int main(int argc, char **argv) * calls. By then, MPI calls may not work. */ if (H5dont_atexit() < 0){ - HDprintf("Failed to turn off atexit processing. Continue.\n"); + HDprintf("Failed to turn off atexit processing. Continue.\n"); }; /* set alarm. */ ALARM_ON; - + ExpressMode = do_express_test(mpi_rank); dataset_big_write(); @@ -1923,7 +1923,7 @@ int main(int argc, char **argv) /* turn off alarm */ ALARM_OFF; - if (mpi_rank == 0) + if (mpi_rank == 0) HDremove(FILENAME[0]); /* close HDF5 library */ diff --git a/testpar/t_cache.c b/testpar/t_cache.c index 50e6d50..cde19fe 100644 --- a/testpar/t_cache.c +++ b/testpar/t_cache.c @@ -622,7 +622,7 @@ set_up_file_communicator(void) nerrors++; success = FALSE; if ( verbose ) { - fprintf(stdout, + HDfprintf(stdout, "%d:%s: MPI_Comm_group() failed with error %d.\n", world_mpi_rank, FUNC, mpi_result); } @@ -641,7 +641,7 @@ set_up_file_communicator(void) nerrors++; success = FALSE; if ( verbose ) { - fprintf(stdout, + HDfprintf(stdout, "%d:%s: MPI_Group_excl() failed with error %d.\n", world_mpi_rank, FUNC, mpi_result); } @@ -658,7 +658,7 @@ set_up_file_communicator(void) nerrors++; success = FALSE; if ( verbose ) { - fprintf(stdout, + HDfprintf(stdout, "%d:%s: MPI_Comm_create() failed with error %d.\n", world_mpi_rank, FUNC, mpi_result); } @@ -672,7 +672,7 @@ set_up_file_communicator(void) nerrors++; success = FALSE; if ( verbose ) { - fprintf(stdout, + HDfprintf(stdout, "%d:%s: file_mpi_comm == MPI_COMM_NULL.\n", world_mpi_rank, FUNC); } @@ -686,7 +686,7 @@ set_up_file_communicator(void) nerrors++; success = FALSE; if ( verbose ) { - fprintf(stdout, + HDfprintf(stdout, "%d:%s: file_mpi_comm != MPI_COMM_NULL.\n", world_mpi_rank, FUNC); } @@ -704,7 +704,7 @@ set_up_file_communicator(void) nerrors++; success = FALSE; if ( verbose ) { - fprintf(stdout, + HDfprintf(stdout, "%d:%s: MPI_Comm_size() failed with error %d.\n", world_mpi_rank, FUNC, mpi_result); } @@ -720,7 +720,7 @@ set_up_file_communicator(void) nerrors++; success = FALSE; if ( verbose ) { - fprintf(stdout, + HDfprintf(stdout, "%d:%s: MPI_Comm_rank() failed with error %d.\n", world_mpi_rank, FUNC, mpi_result); } @@ -7457,7 +7457,7 @@ main(int argc, char **argv) * calls. By then, MPI calls may not work. */ if (H5dont_atexit() < 0){ - printf("%d:Failed to turn off atexit processing. Continue.\n", + HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank); }; H5open(); @@ -7476,24 +7476,24 @@ main(int argc, char **argv) } #ifdef H5_HAVE_MPE - if ( MAINPROCESS ) { printf(" Tests compiled for MPE.\n"); } + if ( MAINPROCESS ) { HDprintf(" Tests compiled for MPE.\n"); } virt_num_data_entries = MPE_VIRT_NUM_DATA_ENTIES; #endif /* H5_HAVE_MPE */ if (MAINPROCESS){ - printf("===================================\n"); - printf("Parallel metadata cache tests\n"); - printf(" mpi_size = %d\n", mpi_size); - printf(" express_test = %d\n", express_test); - printf("===================================\n"); + HDprintf("===================================\n"); + HDprintf("Parallel metadata cache tests\n"); + HDprintf(" mpi_size = %d\n", mpi_size); + HDprintf(" express_test = %d\n", express_test); + HDprintf("===================================\n"); } if ( mpi_size < 3 ) { if ( MAINPROCESS ) { - printf(" Need at least 3 processes. Exiting.\n"); + HDprintf(" Need at least 3 processes. Exiting.\n"); } goto finish; } @@ -7639,15 +7639,15 @@ finish: */ MPI_Barrier(MPI_COMM_WORLD); if (MAINPROCESS){ /* only process 0 reports */ - printf("===================================\n"); + HDprintf("===================================\n"); if (failures){ - printf("***metadata cache tests detected %d failures***\n", + HDprintf("***metadata cache tests detected %d failures***\n", failures); } else{ - printf("metadata cache tests finished with no failures\n"); + HDprintf("metadata cache tests finished with no failures\n"); } - printf("===================================\n"); + HDprintf("===================================\n"); } takedown_derived_types(); diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c index e158d69..08d455d 100644 --- a/testpar/t_cache_image.c +++ b/testpar/t_cache_image.c @@ -519,7 +519,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset) /* create the dataset */ if ( pass ) { - sprintf(dset_name, "/dset%03d", i); + HDsprintf(dset_name, "/dset%03d", i); dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE, dataspace_id, H5P_DEFAULT, properties, H5P_DEFAULT); @@ -713,7 +713,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset) if ( verbose ) { - fprintf(stdout, + HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", i, j, m); } @@ -830,7 +830,7 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset) while ( ( pass ) && ( i <= max_dset ) ) { - sprintf(dset_name, "/dset%03d", i); + HDsprintf(dset_name, "/dset%03d", i); if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) { @@ -1187,7 +1187,7 @@ open_hdf5_file(const hbool_t create_file, } else { - file_ptr = (struct H5F_t *)H5VL_object_verify(file_id, H5I_FILE); + file_ptr = (struct H5F_t *)H5I_object_verify(file_id, H5I_FILE); if ( file_ptr == NULL ) { @@ -1446,7 +1446,7 @@ par_create_dataset(int dset_num, show_progress = (show_progress && (mpi_rank == 0)); verbose = (verbose && (mpi_rank == 0)); - sprintf(dset_name, "/dset%03d", dset_num); + HDsprintf(dset_name, "/dset%03d", dset_num); if ( show_progress ) { HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name); @@ -1750,7 +1750,7 @@ par_create_dataset(int dset_num, if ( verbose ) { - fprintf(stdout, + HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", i, j, dset_num); } @@ -1848,7 +1848,7 @@ par_delete_dataset(int dset_num, show_progress = (show_progress && (mpi_rank == 0)); - sprintf(dset_name, "/dset%03d", dset_num); + HDsprintf(dset_name, "/dset%03d", dset_num); if ( show_progress ) { HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name); @@ -1932,8 +1932,8 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size ) int child_status; pid_t child_pid; - sprintf(file_name_idx_str, "%d", file_name_idx); - sprintf(mpi_size_str, "%d", mpi_size); + HDsprintf(file_name_idx_str, "%d", file_name_idx); + HDsprintf(mpi_size_str, "%d", mpi_size); child_pid = fork(); @@ -1954,7 +1954,7 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size ) HDfprintf(stdout, "execl() of ici process failed. errno = %d(%s)\n", errno, strerror(errno)); - exit(1); + HDexit(1); } } else if ( child_pid != -1 ) { @@ -2056,7 +2056,7 @@ par_verify_dataset(int dset_num, show_progress = (show_progress && (mpi_rank == 0)); verbose = (verbose && (mpi_rank == 0)); - sprintf(dset_name, "/dset%03d", dset_num); + HDsprintf(dset_name, "/dset%03d", dset_num); if ( show_progress ) { HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name); @@ -2233,7 +2233,7 @@ par_verify_dataset(int dset_num, if ( verbose ) { - fprintf(stdout, + HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", i, j, dset_num); } @@ -2448,7 +2448,7 @@ serial_verify_dataset(int dset_num, hid_t dset_id = -1; hid_t filespace_id = -1; - sprintf(dset_name, "/dset%03d", dset_num); + HDsprintf(dset_name, "/dset%03d", dset_num); if ( show_progress ) { HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name); @@ -2603,7 +2603,7 @@ serial_verify_dataset(int dset_num, if ( verbose ) { - fprintf(stdout, + HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", j, k, dset_num); } @@ -2801,7 +2801,7 @@ usage(void) int i = 0; while(s[i] != NULL) { - fprintf(stdout, "%s", s[i]); + HDfprintf(stdout, "%s", s[i]); i++; } @@ -2876,7 +2876,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset) /* open the dataset */ if ( pass ) { - sprintf(dset_name, "/dset%03d", i); + HDsprintf(dset_name, "/dset%03d", i); dataset_ids[i] = H5Dopen2(file_id, dset_name, H5P_DEFAULT); if ( dataset_ids[i] < 0 ) { @@ -3016,7 +3016,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset) if ( verbose ) { - fprintf(stdout, + HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", i, j, m); } @@ -4137,31 +4137,31 @@ main(int argc, char **argv) if ( pass ) { - printf("done.\n"); + HDprintf("done.\n"); HDfflush(stdout); } else { - printf("failed.\n"); - exit(1); + HDprintf("failed.\n"); + HDexit(1); } i++; } HDfprintf(stdout, "Test file construction complete.\n"); - exit(0); + HDexit(0); } else if ( ici ) { if ( serial_insert_cache_image(file_idx, mpi_size) ) { - exit(0); + HD exit(0); } else { HDfprintf(stderr, "\n\nCache image insertion failed.\n"); HDfprintf(stderr, " failure mssg = \"%s\"\n", failure_mssg); - exit(1); + HDexit(1); } } @@ -4178,24 +4178,24 @@ main(int argc, char **argv) * calls. By then, MPI calls may not work. */ if (H5dont_atexit() < 0){ - printf("%d:Failed to turn off atexit processing. Continue.\n", + HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank); }; H5open(); if ( mpi_rank == 0 ) { - printf("===================================\n"); - printf("Parallel metadata cache image tests\n"); - printf(" mpi_size = %d\n", mpi_size); - printf("===================================\n"); + HDprintf("===================================\n"); + HDprintf("Parallel metadata cache image tests\n"); + HDprintf(" mpi_size = %d\n", mpi_size); + HDprintf("===================================\n"); } if ( mpi_size < 2 ) { if ( mpi_rank == 0 ) { - printf(" Need at least 2 processes. Exiting.\n"); + HDprintf(" Need at least 2 processes. Exiting.\n"); } goto finish; } @@ -4220,7 +4220,7 @@ main(int argc, char **argv) HDfprintf(stdout, "execl() of setup process failed. errno = %d(%s)\n", errno, strerror(errno)); - exit(1); + HDexit(1); } } else if ( child_pid != -1 ) { @@ -4271,16 +4271,16 @@ finish: MPI_Barrier(MPI_COMM_WORLD); if ( mpi_rank == 0 ) { /* only process 0 reports */ - sleep(10); - printf("===================================\n"); + HDsleep(10); + HDprintf("===================================\n"); if ( nerrs > 0 ) { - printf("***metadata cache image tests detected %d failures***\n", + HDprintf("***metadata cache image tests detected %d failures***\n", nerrs); } else { - printf("metadata cache image tests finished with no failures\n"); + HDprintf("metadata cache image tests finished with no failures\n"); } - printf("===================================\n"); + HDprintf("===================================\n"); } /* takedown_derived_types(); */ diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c index c6fa3d4..40cc1ca 100644 --- a/testpar/t_coll_chunk.c +++ b/testpar/t_coll_chunk.c @@ -1134,7 +1134,7 @@ ccslab_set(int mpi_rank, break; } if (VERBOSE_MED){ - printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n", + HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n", (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1], (unsigned long)(block[0]*block[1]*count[0]*count[1])); @@ -1197,20 +1197,20 @@ ccdataset_print(hsize_t start[], hsize_t i, j; /* print the column heading */ - printf("Print only the first block of the dataset\n"); - printf("%-8s", "Cols:"); + HDprintf("Print only the first block of the dataset\n"); + HDprintf("%-8s", "Cols:"); for (j=0; j < block[1]; j++){ - printf("%3lu ", (unsigned long)(start[1]+j)); + HDprintf("%3lu ", (unsigned long)(start[1]+j)); } - printf("\n"); + HDprintf("\n"); /* print the slab data */ for (i=0; i < block[0]; i++){ - printf("Row %2lu: ", (unsigned long)(i+start[0])); + HDprintf("Row %2lu: ", (unsigned long)(i+start[0])); for (j=0; j < block[1]; j++){ - printf("%03d ", *dataptr++); + HDprintf("%03d ", *dataptr++); } - printf("\n"); + HDprintf("\n"); } } @@ -1233,13 +1233,13 @@ ccdataset_vrfy(hsize_t start[], /* print it if VERBOSE_MED */ if (VERBOSE_MED) { - printf("dataset_vrfy dumping:::\n"); - printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", + HDprintf("dataset_vrfy dumping:::\n"); + HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]); - printf("original values:\n"); + HDprintf("original values:\n"); ccdataset_print(start, block, original); - printf("compared values:\n"); + HDprintf("compared values:\n"); ccdataset_print(start, block, dataset); } @@ -1262,7 +1262,7 @@ ccdataset_vrfy(hsize_t start[], } if (*dataptr != *oriptr){ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){ - printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n", + HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n", (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr)); } @@ -1272,8 +1272,8 @@ ccdataset_vrfy(hsize_t start[], } } if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); + HDprintf("[more errors ...]\n"); if (vrfyerrs) - printf("%d errors found in ccdataset_vrfy\n", vrfyerrs); + HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs); return(vrfyerrs); } diff --git a/testpar/t_file.c b/testpar/t_file.c index a3c007d..e3ce346 100644 --- a/testpar/t_file.c +++ b/testpar/t_file.c @@ -24,14 +24,14 @@ /* * This file needs to access private information from the H5F package. */ -#define H5AC_FRIEND /*suppress error about including H5ACpkg */ +#define H5AC_FRIEND /*suppress error about including H5ACpkg */ #include "H5ACpkg.h" -#define H5C_FRIEND /*suppress error about including H5Cpkg */ +#define H5C_FRIEND /*suppress error about including H5Cpkg */ #include "H5Cpkg.h" -#define H5F_FRIEND /*suppress error about including H5Fpkg */ +#define H5F_FRIEND /*suppress error about including H5Fpkg */ #define H5F_TESTING #include "H5Fpkg.h" -#define H5MF_FRIEND /*suppress error about including H5MFpkg */ +#define H5MF_FRIEND /*suppress error about including H5MFpkg */ #include "H5MFpkg.h" #define NUM_DSETS 5 @@ -39,7 +39,7 @@ int mpi_size, mpi_rank; static int create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy); -static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy, +static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size, size_t page_buffer_size); /* @@ -60,15 +60,15 @@ test_split_comm_access(void) MPI_Info info = MPI_INFO_NULL; int is_old, mrc; int newrank, newprocs; - hid_t fid; /* file IDs */ - hid_t acc_tpl; /* File access properties */ - herr_t ret; /* generic return value */ + hid_t fid; /* file IDs */ + hid_t acc_tpl; /* File access properties */ + herr_t ret; /* generic return value */ const char *filename; filename = (const char *)GetTestParameters(); if (VERBOSE_MED) - printf("Split Communicator access test on file %s\n", - filename); + HDprintf("Split Communicator access test on file %s\n", + filename); /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); @@ -80,35 +80,35 @@ test_split_comm_access(void) MPI_Comm_rank(comm,&newrank); if (is_old){ - /* odd-rank processes */ - mrc = MPI_Barrier(comm); - VRFY((mrc==MPI_SUCCESS), ""); + /* odd-rank processes */ + mrc = MPI_Barrier(comm); + VRFY((mrc==MPI_SUCCESS), ""); }else{ - /* even-rank processes */ - int sub_mpi_rank; /* rank in the sub-comm */ - MPI_Comm_rank(comm,&sub_mpi_rank); - - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* create the file collectively */ - fid=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret=H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* close the file */ - ret=H5Fclose(fid); - VRFY((ret >= 0), ""); - - /* delete the test file */ - if (sub_mpi_rank == 0){ - mrc = MPI_File_delete((char *)filename, info); - /*VRFY((mrc==MPI_SUCCESS), ""); */ - } + /* even-rank processes */ + int sub_mpi_rank; /* rank in the sub-comm */ + MPI_Comm_rank(comm,&sub_mpi_rank); + + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* create the file collectively */ + fid=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret=H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* close the file */ + ret=H5Fclose(fid); + VRFY((ret >= 0), ""); + + /* delete the test file */ + if (sub_mpi_rank == 0){ + mrc = MPI_File_delete((char *)filename, info); + /*VRFY((mrc==MPI_SUCCESS), ""); */ + } } mrc = MPI_Comm_free(&comm); VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free succeeded"); @@ -126,7 +126,7 @@ test_page_buffer_access(void) haddr_t raw_addr, meta_addr; int *data; H5F_t *f = NULL; - herr_t ret; /* generic return value */ + herr_t ret; /* generic return value */ const char *filename; hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */ @@ -136,7 +136,7 @@ test_page_buffer_access(void) filename = (const char *)GetTestParameters(); if (VERBOSE_MED) - printf("Page Buffer Usage in Parallel %s\n", filename); + HDprintf("Page Buffer Usage in Parallel %s\n", filename); fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); VRFY((fapl >= 0), "create_faccess_plist succeeded"); @@ -178,7 +178,6 @@ test_page_buffer_access(void) /* intialize all the elements to have a value of -1 */ for(i=0 ; i<num_elements ; i++) data[i] = -1; - if(MAINPROCESS) { hid_t fapl_self; @@ -400,7 +399,7 @@ test_page_buffer_access(void) ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data); VRFY((ret == 0), ""); VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); - for (i=0; i < 50; i++) + for (i=0; i < 50; i++) VRFY((data[i] == -1), "Read different values than written"); /* close the file */ @@ -504,28 +503,28 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str VRFY((mem_dataspace >= 0), ""); for(k=0 ; k<NUM_DSETS; k++) { - sprintf(dset_name, "D1dset%d", k); + HDsprintf(dset_name, "D1dset%d", k); dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((dset_id >= 0), ""); ret = H5Dclose(dset_id); VRFY((ret == 0), ""); - sprintf(dset_name, "D2dset%d", k); + HDsprintf(dset_name, "D2dset%d", k); dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((dset_id >= 0), ""); ret = H5Dclose(dset_id); VRFY((ret == 0), ""); - sprintf(dset_name, "D3dset%d", k); + HDsprintf(dset_name, "D3dset%d", k); dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((dset_id >= 0), ""); ret = H5Dclose(dset_id); VRFY((ret == 0), ""); - sprintf(dset_name, "dset%d", k); + HDsprintf(dset_name, "dset%d", k); dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((dset_id >= 0), ""); @@ -549,13 +548,13 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str for (i=0; i < num_elements; i++) VRFY((data_array[i] == mpi_rank+1), "Dataset Verify failed"); - sprintf(dset_name, "D1dset%d", k); + HDsprintf(dset_name, "D1dset%d", k); ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); VRFY((ret == 0), ""); - sprintf(dset_name, "D2dset%d", k); + HDsprintf(dset_name, "D2dset%d", k); ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); VRFY((ret == 0), ""); - sprintf(dset_name, "D3dset%d", k); + HDsprintf(dset_name, "D3dset%d", k); ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); VRFY((ret == 0), ""); } @@ -657,7 +656,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, VRFY((mem_dataspace >= 0), ""); for(k=0 ; k<NUM_DSETS; k++) { - sprintf(dset_name, "dset%d", k); + HDsprintf(dset_name, "dset%d", k); dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT); VRFY((dset_id >= 0), ""); @@ -741,7 +740,7 @@ void test_file_properties(void) { hid_t fid; /* HDF5 file ID */ - hid_t fapl_id; /* File access plist */ + hid_t fapl_id; /* File access plist */ hbool_t is_coll; const char *filename; MPI_Comm comm = MPI_COMM_WORLD; diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c index 4556b01..28baed5 100644 --- a/testpar/t_filter_read.c +++ b/testpar/t_filter_read.c @@ -37,9 +37,9 @@ static int mpi_size, mpi_rank; #define HS_DIM1 200 #define HS_DIM2 100 - + /*------------------------------------------------------------------------- - * Function: filter_read_internal + * Function: filter_read_internal * * Purpose: Tests parallel reading of a 2D dataset written serially using * filters. During the parallel reading phase, the dataset is @@ -54,13 +54,13 @@ static void filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size) { - hid_t file, dataset; /* HDF5 IDs */ - hid_t access_plist; /* Access property list ID */ - hid_t sid, memspace; /* Dataspace IDs */ - hsize_t size[2]; /* Dataspace dimensions */ - hsize_t hs_offset[2]; /* Hyperslab offset */ - hsize_t hs_size[2]; /* Hyperslab size */ - size_t i, j; /* Local index variables */ + hid_t file, dataset; /* HDF5 IDs */ + hid_t access_plist; /* Access property list ID */ + hid_t sid, memspace; /* Dataspace IDs */ + hsize_t size[2]; /* Dataspace dimensions */ + hsize_t hs_offset[2]; /* Hyperslab offset */ + hsize_t hs_size[2]; /* Hyperslab size */ + size_t i, j; /* Local index variables */ char name[32] = "dataset"; herr_t hrc; /* Error status */ int *points = NULL; /* Writing buffer for entire dataset */ @@ -151,17 +151,17 @@ filter_read_internal(const char *filename, hid_t dcpl, for (j=0; j<hs_size[1]; j++) { if(points[i*size[1]+(size_t)hs_offset[1]+j] != check[i*hs_size[1]+j]) { - fprintf(stderr," Read different values than written.\n"); - fprintf(stderr," At index %lu,%lu\n", - (unsigned long)(i), - (unsigned long)(hs_offset[1]+j)); - fprintf(stderr," At original: %d\n", - (int)points[i*size[1]+(size_t)hs_offset[1]+j]); - fprintf(stderr," At returned: %d\n", - (int)check[i*hs_size[1]+j]); + HDfprintf(stderr," Read different values than written.\n"); + HDfprintf(stderr," At index %lu,%lu\n", + (unsigned long)(i), + (unsigned long)(hs_offset[1]+j)); + HDfprintf(stderr," At original: %d\n", + (int)points[i*size[1]+(size_t)hs_offset[1]+j]); + HDfprintf(stderr," At returned: %d\n", + (int)check[i*hs_size[1]+j]); VRFY(FALSE, ""); - } - } + } + } } /* Get the storage size of the dataset */ @@ -194,10 +194,10 @@ filter_read_internal(const char *filename, hid_t dcpl, /*------------------------------------------------------------------------- * Function: test_filter_read * - * Purpose: Tests parallel reading of datasets written serially using + * Purpose: Tests parallel reading of datasets written serially using * several (combinations of) filters. * - * Programmer: Christian Chilan + * Programmer: Christian Chilan * Tuesday, May 15, 2007 * * Modifications: @@ -208,7 +208,7 @@ filter_read_internal(const char *filename, hid_t dcpl, void test_filter_read(void) { - hid_t dc; /* HDF5 IDs */ + hid_t dc; /* HDF5 IDs */ const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */ hsize_t null_size; /* Size of dataset without filters */ unsigned chunk_opts; /* Chunk options */ @@ -236,7 +236,7 @@ test_filter_read(void) filename = GetTestParameters(); if(VERBOSE_MED) - printf("Parallel reading of dataset written with filters %s\n", filename); + HDprintf("Parallel reading of dataset written with filters %s\n", filename); /*---------------------------------------------------------- * STEP 0: Test without filters. @@ -448,10 +448,10 @@ test_filter_read(void) /* Make sure encoding is enabled */ if(h5_szip_can_encode() == 1) { - hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); + hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); VRFY(hrc>=0, "H5Pset_szip"); - filter_read_internal(filename,dc,&combo_size); + filter_read_internal(filename,dc,&combo_size); } /* Clean up objects used for this test */ @@ -461,25 +461,25 @@ test_filter_read(void) /* Testing shuffle+szip(with encoder)+checksum filters(checksum last) */ /* Make sure encoding is enabled */ if(h5_szip_can_encode() == 1) { - dc = H5Pcreate(H5P_DATASET_CREATE); + dc = H5Pcreate(H5P_DATASET_CREATE); VRFY(dc>=0, "H5Pcreate"); - hrc = H5Pset_chunk (dc, 2, chunk_size); + hrc = H5Pset_chunk (dc, 2, chunk_size); VRFY(hrc>=0, "H5Pset_chunk"); - hrc = H5Pset_shuffle (dc); + hrc = H5Pset_shuffle (dc); VRFY(hrc>=0, "H5Pset_shuffle"); - hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); + hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); VRFY(hrc>=0, "H5Pset_szip"); - hrc = H5Pset_fletcher32 (dc); + hrc = H5Pset_fletcher32 (dc); VRFY(hrc>=0, "H5Pset_fletcher32"); - filter_read_internal(filename,dc,&combo_size); + filter_read_internal(filename,dc,&combo_size); - /* Clean up objects used for this test */ - hrc = H5Pclose (dc); + /* Clean up objects used for this test */ + hrc = H5Pclose (dc); VRFY(hrc>=0, "H5Pclose"); } diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index 7f75d20..e9f4101 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -21,7 +21,7 @@ enum obj_type { is_group, is_dset }; -static int get_size(void); +static int get_size(void); static void write_dataset(hid_t, hid_t, hid_t); static int read_dataset(hid_t, hid_t, hid_t); static void create_group_recursive(hid_t, hid_t, hid_t, int); @@ -54,13 +54,9 @@ get_size(void) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); if(mpi_size > size ) { - if((mpi_size % 2) == 0 ) { - size = mpi_size; - } else { - size = mpi_size + 1; } } @@ -79,7 +75,7 @@ get_size(void) void zero_dim_dset(void) { int mpi_size, mpi_rank; - const char *filename; + const char *filename; hid_t fid, plist, dcpl, dsid, sid; hsize_t dim, chunk_dim; herr_t ret; @@ -133,27 +129,27 @@ void zero_dim_dset(void) * Example of using PHDF5 to create ndatasets datasets. Each process write * a slab of array to the file. * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. + * Changes: Updated function to use a dynamically calculated size, + * instead of the old SIZE #define. This should allow it + * to function with an arbitrary number of processors. * - * JRM - 8/11/04 + * JRM - 8/11/04 */ void multiple_dset_write(void) { - int i, j, n, mpi_size, mpi_rank, size; + int i, j, n, mpi_size, mpi_rank, size; hid_t iof, plist, dataset, memspace, filespace; hid_t dcpl; /* Dataset creation property list */ hsize_t chunk_origin [DIM]; hsize_t chunk_dims [DIM], file_dims [DIM]; hsize_t count[DIM]={1,1}; - double * outme = NULL; + double *outme = NULL; double fill=1.0; /* Fill value */ - char dname [100]; + char dname [100]; herr_t ret; - const H5Ptest_param_t *pt; - char *filename; - int ndatasets; + const H5Ptest_param_t *pt; + char *filename; + int ndatasets; pt = GetTestParameters(); filename = pt->name; @@ -190,23 +186,23 @@ void multiple_dset_write(void) VRFY((ret>=0), "set fill-value succeeded"); for(n = 0; n < ndatasets; n++) { - sprintf(dname, "dataset %d", n); - dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset > 0), dname); + HDsprintf(dname, "dataset %d", n); + dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset > 0), dname); - /* calculate data to write */ - for(i = 0; i < size; i++) - for(j = 0; j < size; j++) - outme [(i * size) + j] = n*1000 + mpi_rank; + /* calculate data to write */ + for(i = 0; i < size; i++) + for(j = 0; j < size; j++) + outme [(i * size) + j] = n*1000 + mpi_rank; - H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme); + H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme); - H5Dclose(dataset); + H5Dclose(dataset); #ifdef BARRIER_CHECKS - if(!((n+1) % 10)) { - printf("created %d datasets\n", n+1); - MPI_Barrier(MPI_COMM_WORLD); - } + if(!((n+1) % 10)) { + HDprintf("created %d datasets\n", n+1); + MPI_Barrier(MPI_COMM_WORLD); + } #endif /* BARRIER_CHECKS */ } @@ -221,21 +217,21 @@ void multiple_dset_write(void) /* Example of using PHDF5 to create, write, and read compact dataset. * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. + * Changes: Updated function to use a dynamically calculated size, + * instead of the old SIZE #define. This should allow it + * to function with an arbitrary number of processors. * - * JRM - 8/11/04 + * JRM - 8/11/04 */ void compact_dataset(void) { - int i, j, mpi_size, mpi_rank, size, err_num=0; - hid_t iof, plist, dcpl, dxpl, dataset, filespace; + int i, j, mpi_size, mpi_rank, size, err_num=0; + hid_t iof, plist, dcpl, dxpl, dataset, filespace; hsize_t file_dims [DIM]; - double * outme; - double * inme; - char dname[]="dataset"; - herr_t ret; + double *outme; + double *inme; + char dname[]="dataset"; + herr_t ret; const char *filename; size = get_size(); @@ -278,15 +274,15 @@ void compact_dataset(void) ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); } /* Recalculate data to write. Each process writes the same data. */ for(i = 0; i < size; i++) for(j = 0; j < size; j++) - outme[(i * size) + j] =(i + j) * 1000; + outme[(i * size) + j] =(i + j) * 1000; ret = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, outme); VRFY((ret >= 0), "H5Dwrite succeeded"); @@ -308,8 +304,8 @@ void compact_dataset(void) ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); } dataset = H5Dopen2(iof, dname, H5P_DEFAULT); @@ -338,7 +334,7 @@ void compact_dataset(void) for(j = 0; j < size; j++) if(inme[(i * size) + j] != outme[(i * size) + j]) if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, outme[(i * size) + j], inme[(i * size) + j]); + HDprintf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, outme[(i * size) + j], inme[(i * size) + j]); H5Pclose(plist); H5Pclose(dxpl); @@ -352,24 +348,24 @@ void compact_dataset(void) * Example of using PHDF5 to create, write, and read dataset and attribute * of Null dataspace. * - * Changes: Removed the assert that mpi_size <= the SIZE #define. - * As best I can tell, this assert isn't needed here, - * and in any case, the SIZE #define is being removed - * in an update of the functions in this file to run - * with an arbitrary number of processes. + * Changes: Removed the assert that mpi_size <= the SIZE #define. + * As best I can tell, this assert isn't needed here, + * and in any case, the SIZE #define is being removed + * in an update of the functions in this file to run + * with an arbitrary number of processes. * * JRM - 8/24/04 */ void null_dataset(void) { - int mpi_size, mpi_rank; - hid_t iof, plist, dxpl, dataset, attr, sid; + int mpi_size, mpi_rank; + hid_t iof, plist, dxpl, dataset, attr, sid; unsigned uval=2; /* Buffer for writing to dataset */ - int val=1; /* Buffer for writing to attribute */ - int nelem; - char dname[]="dataset"; - char attr_name[]="attribute"; - herr_t ret; + int val=1; /* Buffer for writing to attribute */ + int nelem; + char dname[]="dataset"; + char attr_name[]="attribute"; + herr_t ret; const char *filename; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -397,8 +393,8 @@ void null_dataset(void) ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); } @@ -431,8 +427,8 @@ void null_dataset(void) ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); } @@ -464,11 +460,11 @@ void null_dataset(void) * sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over * the boundary of interest. * - * Changes: Removed the assert that mpi_size <= the SIZE #define. - * As best I can tell, this assert isn't needed here, - * and in any case, the SIZE #define is being removed - * in an update of the functions in this file to run - * with an arbitrary number of processes. + * Changes: Removed the assert that mpi_size <= the SIZE #define. + * As best I can tell, this assert isn't needed here, + * and in any case, the SIZE #define is being removed + * in an update of the functions in this file to run + * with an arbitrary number of processes. * * JRM - 8/11/04 */ @@ -592,13 +588,13 @@ void big_dataset(void) * not have actual data written to the entire raw data area and relies on the * default fill value of zeros to work correctly. * - * Changes: Removed the assert that mpi_size <= the SIZE #define. - * As best I can tell, this assert isn't needed here, - * and in any case, the SIZE #define is being removed - * in an update of the functions in this file to run - * with an arbitrary number of processes. + * Changes: Removed the assert that mpi_size <= the SIZE #define. + * As best I can tell, this assert isn't needed here, + * and in any case, the SIZE #define is being removed + * in an update of the functions in this file to run + * with an arbitrary number of processes. * - * Also added code to free dynamically allocated buffers. + * Also added code to free dynamically allocated buffers. * * JRM - 8/11/04 */ @@ -619,7 +615,7 @@ void dataset_fillvalue(void) hsize_t dset_size; /* Dataset size */ int *rdata, *wdata; /* Buffers for data to read and write */ int *twdata, *trdata; /* Temporary pointer into buffer */ - int acc, i, j, k, l, ii; /* Local index variables */ + int acc, i, ii, j, k, l; /* Local index variables */ herr_t ret; /* Generic return value */ const char *filename; @@ -707,11 +703,11 @@ void dataset_fillvalue(void) for(l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++) if(*trdata != 0) if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i, j, k, l, *trdata); + HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i, j, k, l, *trdata); if(err_num > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); + HDprintf("[more errors ...]\n"); if(err_num) { - printf("%d errors found in check_value\n", err_num); + HDprintf("%d errors found in check_value\n", err_num); nerrors++; } } @@ -798,21 +794,21 @@ void dataset_fillvalue(void) if(i<mpi_size) { if(*twdata != *trdata ) if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata); + HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata); } /* end if */ else { if(*trdata != 0) if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,j,k,l, *trdata); + HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,j,k,l, *trdata); } /* end else */ if(err_num > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); + HDprintf("[more errors ...]\n"); if(err_num){ - printf("%d errors found in check_value\n", err_num); + HDprintf("%d errors found in check_value\n", err_num); nerrors++; } } - + /* Close all file objects */ ret = H5Dclose(dataset); VRFY((ret >= 0), "H5Dclose succeeded"); @@ -866,8 +862,8 @@ void collective_group_write(void) hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */ herr_t ret1, ret2; const H5Ptest_param_t *pt; - char *filename; - int ngroups; + char *filename; + int ngroups; pt = GetTestParameters(); filename = pt->name; @@ -912,11 +908,11 @@ void collective_group_write(void) /* creates ngroups groups under the root group, writes chunked * datasets in parallel. */ for(m = 0; m < ngroups; m++) { - sprintf(gname, "group%d", m); + HDsprintf(gname, "group%d", m); gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((gid > 0), gname); - sprintf(dname, "dataset%d", m); + HDsprintf(dname, "dataset%d", m); did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT); VRFY((did > 0), dname); @@ -932,7 +928,7 @@ void collective_group_write(void) #ifdef BARRIER_CHECKS if(!((m+1) % 10)) { - printf("created %d groups\n", m+1); + HDprintf("created %d groups\n", m+1); MPI_Barrier(MPI_COMM_WORLD); } #endif /* BARRIER_CHECKS */ @@ -954,8 +950,8 @@ void independent_group_read(void) int mpi_rank, m; hid_t plist, fid; const H5Ptest_param_t *pt; - char *filename; - int ngroups; + char *filename; + int ngroups; pt = GetTestParameters(); filename = pt->name; @@ -989,9 +985,9 @@ void independent_group_read(void) * instead of the old SIZE #define. This should allow it * to function with an arbitrary number of processors. * - * Also added code to verify the results of dynamic memory - * allocations, and to free dynamically allocated memeory - * when we are done with it. + * Also added code to verify the results of dynamic memory + * allocations, and to free dynamically allocated memeory + * when we are done with it. * * JRM - 8/16/04 */ @@ -1013,12 +1009,12 @@ group_dataset_read(hid_t fid, int mpi_rank, int m) VRFY((outdata != NULL), "HDmalloc succeeded for outdata"); /* open every group under root group. */ - sprintf(gname, "group%d", m); + HDsprintf(gname, "group%d", m); gid = H5Gopen2(fid, gname, H5P_DEFAULT); VRFY((gid > 0), gname); /* check the data. */ - sprintf(dname, "dataset%d", m); + HDsprintf(dname, "dataset%d", m); did = H5Dopen2(gid, dname, H5P_DEFAULT); VRFY((did>0), dname); @@ -1083,8 +1079,8 @@ void multiple_group_write(void) hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; herr_t ret; const H5Ptest_param_t *pt; - char *filename; - int ngroups; + char *filename; + int ngroups; pt = GetTestParameters(); filename = pt->name; @@ -1119,23 +1115,23 @@ void multiple_group_write(void) /* creates ngroups groups under the root group, writes datasets in * parallel. */ for(m = 0; m < ngroups; m++) { - sprintf(gname, "group%d", m); + HDsprintf(gname, "group%d", m); gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((gid > 0), gname); /* create attribute for these groups. */ - write_attribute(gid, is_group, m); + write_attribute(gid, is_group, m); if(m != 0) - write_dataset(memspace, filespace, gid); + write_dataset(memspace, filespace, gid); H5Gclose(gid); #ifdef BARRIER_CHECKS if(!((m+1) % 10)) { - printf("created %d groups\n", m+1); + HDprintf("created %d groups\n", m+1); MPI_Barrier(MPI_COMM_WORLD); - } + } #endif /* BARRIER_CHECKS */ } @@ -1181,13 +1177,13 @@ write_dataset(hid_t memspace, hid_t filespace, hid_t gid) VRFY((outme != NULL), "HDmalloc succeeded for outme"); for(n = 0; n < NDATASET; n++) { - sprintf(dname, "dataset%d", n); + HDsprintf(dname, "dataset%d", n); did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((did > 0), dname); for(i = 0; i < size; i++) for(j = 0; j < size; j++) - outme[(i * size) + j] = n * 1000 + mpi_rank; + outme[(i * size) + j] = n * 1000 + mpi_rank; H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme); @@ -1214,12 +1210,12 @@ create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter) #ifdef BARRIER_CHECKS if(!((counter+1) % 10)) { - printf("created %dth child groups\n", counter+1); + HDprintf("created %dth child groups\n", counter+1); MPI_Barrier(MPI_COMM_WORLD); } #endif /* BARRIER_CHECKS */ - sprintf(gname, "%dth_child_group", counter+1); + HDsprintf(gname, "%dth_child_group", counter+1); child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((child_gid > 0), gname); @@ -1251,8 +1247,8 @@ void multiple_group_read(void) hsize_t chunk_origin[DIM]; hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; const H5Ptest_param_t *pt; - char *filename; - int ngroups; + char *filename; + int ngroups; pt = GetTestParameters(); filename = pt->name; @@ -1280,19 +1276,19 @@ void multiple_group_read(void) /* open every group under root group. */ for(m=0; m<ngroups; m++) { - sprintf(gname, "group%d", m); + HDsprintf(gname, "group%d", m); gid = H5Gopen2(fid, gname, H5P_DEFAULT); VRFY((gid > 0), gname); /* check the data. */ if(m != 0) if((error_num = read_dataset(memspace, filespace, gid))>0) - nerrors += error_num; + nerrors += error_num; /* check attribute.*/ error_num = 0; if((error_num = read_attribute(gid, is_group, m))>0 ) - nerrors += error_num; + nerrors += error_num; H5Gclose(gid); @@ -1344,7 +1340,7 @@ read_dataset(hid_t memspace, hid_t filespace, hid_t gid) VRFY((outdata != NULL), "HDmalloc succeeded for outdata"); for(n=0; n<NDATASET; n++) { - sprintf(dname, "dataset%d", n); + HDsprintf(dname, "dataset%d", n); did = H5Dopen2(gid, dname, H5P_DEFAULT); VRFY((did>0), dname); @@ -1353,10 +1349,10 @@ read_dataset(hid_t memspace, hid_t filespace, hid_t gid) /* this is the original value */ for(i=0; i<size; i++) - for(j=0; j<size; j++) { - *outdata = n*1000 + mpi_rank; + for(j=0; j<size; j++) { + *outdata = n*1000 + mpi_rank; outdata++; - } + } outdata -= size * size; /* compare the original value(outdata) to the value in file(indata).*/ @@ -1396,7 +1392,7 @@ recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter) nerrors += err_num; if(counter < GROUP_DEPTH ) { - sprintf(gname, "%dth_child_group", counter+1); + HDsprintf(gname, "%dth_child_group", counter+1); child_gid = H5Gopen2(gid, gname, H5P_DEFAULT); VRFY((child_gid>0), gname); recursive_read_group(memspace, filespace, child_gid, counter+1); @@ -1418,7 +1414,7 @@ write_attribute(hid_t obj_id, int this_type, int num) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); if(this_type == is_group) { - sprintf(attr_name, "Group Attribute %d", num); + HDsprintf(attr_name, "Group Attribute %d", num); sid = H5Screate(H5S_SCALAR); aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); H5Awrite(aid, H5T_NATIVE_INT, &num); @@ -1426,7 +1422,7 @@ write_attribute(hid_t obj_id, int this_type, int num) H5Sclose(sid); } /* end if */ else if(this_type == is_dset) { - sprintf(attr_name, "Dataset Attribute %d", num); + HDsprintf(attr_name, "Dataset Attribute %d", num); for(i=0; i<8; i++) attr_data[i] = i; sid = H5Screate_simple(dspace_rank, dspace_dims, NULL); @@ -1450,23 +1446,23 @@ read_attribute(hid_t obj_id, int this_type, int num) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); if(this_type == is_group) { - sprintf(attr_name, "Group Attribute %d", num); + HDsprintf(attr_name, "Group Attribute %d", num); aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT); if(MAINPROCESS) { H5Aread(aid, H5T_NATIVE_INT, &in_num); vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num); - } + } H5Aclose(aid); } else if(this_type == is_dset) { - sprintf(attr_name, "Dataset Attribute %d", num); + HDsprintf(attr_name, "Dataset Attribute %d", num); for(i=0; i<8; i++) out_data[i] = i; aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT); if(MAINPROCESS) { H5Aread(aid, H5T_NATIVE_INT, in_data); vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data); - } + } H5Aclose(aid); } @@ -1476,11 +1472,11 @@ read_attribute(hid_t obj_id, int this_type, int num) /* This functions compares the original data with the read-in data for its * hyperslab part only by process ID. * - * Changes: Modified function to use a passed in size parameter - * instead of the old SIZE #define. This should let us - * run with an arbitrary number of processes. + * Changes: Modified function to use a passed in size parameter + * instead of the old SIZE #define. This should let us + * run with an arbitrary number of processes. * - * JRM - 8/16/04 + * JRM - 8/16/04 */ static int check_value(DATATYPE *indata, DATATYPE *outdata, int size) @@ -1500,23 +1496,23 @@ check_value(DATATYPE *indata, DATATYPE *outdata, int size) for(i=chunk_origin[0]; i<(chunk_origin[0]+chunk_dims[0]); i++) for(j=chunk_origin[1]; j<(chunk_origin[1]+chunk_dims[1]); j++) { if(*indata != *outdata ) - if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",(unsigned long)i,(unsigned long)j,(unsigned long)i,(unsigned long)j, *outdata, *indata); - } + if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) + HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",(unsigned long)i,(unsigned long)j,(unsigned long)i,(unsigned long)j, *outdata, *indata); + } if(err_num > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); + HDprintf("[more errors ...]\n"); if(err_num) - printf("%d errors found in check_value\n", err_num); + HDprintf("%d errors found in check_value\n", err_num); return err_num; } /* Decide the portion of data chunk in dataset by process ID. * - * Changes: Modified function to use a passed in size parameter - * instead of the old SIZE #define. This should let us - * run with an arbitrary number of processes. + * Changes: Modified function to use a passed in size parameter + * instead of the old SIZE #define. This should let us + * run with an arbitrary number of processes. * - * JRM - 8/11/04 + * JRM - 8/11/04 */ static void @@ -1560,7 +1556,7 @@ get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], * on failure. * JRM - 9/13/04 * - * Changes: None. + * Changes: None. */ #define N 4 @@ -1595,10 +1591,10 @@ void io_mode_confusion(void) * test bed related variables */ - const char * fcn_name = "io_mode_confusion"; - const hbool_t verbose = FALSE; - const H5Ptest_param_t * pt; - char * filename; + const char * fcn_name = "io_mode_confusion"; + const hbool_t verbose = FALSE; + const H5Ptest_param_t * pt; + char * filename; pt = GetTestParameters(); @@ -1799,13 +1795,13 @@ void io_mode_confusion(void) /* * At present, the object header code maintains an image of its on disk * representation, which is updates as necessary instead of generating on - * request. + * request. * * Prior to the fix that this test in designed to verify, the image of the * on disk representation was only updated on flush -- not when the object * header was marked clean. * - * This worked perfectly well as long as all writes of a given object + * This worked perfectly well as long as all writes of a given object * header were written from a single process. However, with the implementation * of round robin metadata data writes in parallel HDF5, this is no longer * the case -- it is possible for a given object header to be flushed from @@ -1813,14 +1809,14 @@ void io_mode_confusion(void) * clean in all other processes on each flush. This resulted in NULL or * out of data object header information being written to disk. * - * To repair this, I modified the object header code to update its - * on disk image both on flush on when marked clean. + * To repair this, I modified the object header code to update its + * on disk image both on flush on when marked clean. * * This test is directed at verifying that the fix performs as expected. * * The test functions by creating a HDF5 file with several small datasets, - * and then flushing the file. This should result of at least one of - * the associated object headers being flushed by a process other than + * and then flushing the file. This should result of at least one of + * the associated object headers being flushed by a process other than * process 0. * * Then for each data set, add an attribute and flush the file again. @@ -1830,26 +1826,26 @@ void io_mode_confusion(void) * Open the each of the data sets in turn. If all opens are successful, * the test passes. Otherwise the test fails. * - * Note that this test will probably become irrelevent shortly, when we + * Note that this test will probably become irrelevent shortly, when we * land the journaling modifications on the trunk -- at which point all * cache clients will have to construct on disk images on demand. * - * JRM -- 10/13/10 + * JRM -- 10/13/10 * * Changes: - * Break it into two parts, a writer to write the file and a reader - * the correctness of the writer. AKC -- 2010/10/27 + * Break it into two parts, a writer to write the file and a reader + * the correctness of the writer. AKC -- 2010/10/27 */ -#define NUM_DATA_SETS 4 -#define LOCAL_DATA_SIZE 4 -#define LARGE_ATTR_SIZE 256 +#define NUM_DATA_SETS 4 +#define LOCAL_DATA_SIZE 4 +#define LARGE_ATTR_SIZE 256 /* Since all even and odd processes are split into writer and reader comm * respectively, process 0 and 1 in COMM_WORLD become the root process of * the writer and reader comm respectively. */ -#define Writer_Root 0 -#define Reader_Root 1 +#define Writer_Root 0 +#define Reader_Root 1 #define Reader_wait(mpi_err, xsteps) \ mpi_err = MPI_Bcast(&xsteps, 1, MPI_INT, Writer_Root, MPI_COMM_WORLD) #define Reader_result(mpi_err, xsteps_done) \ @@ -1861,26 +1857,26 @@ void io_mode_confusion(void) /* object names used by both rr_obj_hdr_flush_confusion and * rr_obj_hdr_flush_confusion_reader. */ -const char * dataset_name[NUM_DATA_SETS] = - { - "dataset_0", - "dataset_1", - "dataset_2", - "dataset_3" +const char * dataset_name[NUM_DATA_SETS] = + { + "dataset_0", + "dataset_1", + "dataset_2", + "dataset_3" }; -const char * att_name[NUM_DATA_SETS] = - { - "attribute_0", - "attribute_1", - "attribute_2", - "attribute_3" +const char * att_name[NUM_DATA_SETS] = + { + "attribute_0", + "attribute_1", + "attribute_2", + "attribute_3" }; -const char * lg_att_name[NUM_DATA_SETS] = - { - "large_attribute_0", - "large_attribute_1", - "large_attribute_2", - "large_attribute_3" +const char * lg_att_name[NUM_DATA_SETS] = + { + "large_attribute_0", + "large_attribute_1", + "large_attribute_2", + "large_attribute_3" }; void rr_obj_hdr_flush_confusion(void) @@ -1889,14 +1885,14 @@ void rr_obj_hdr_flush_confusion(void) /* private communicator size and rank */ int mpi_size; int mpi_rank; - int mrc; /* mpi error code */ - int is_reader; /* 1 for reader process; 0 for writer process. */ + int mrc; /* mpi error code */ + int is_reader; /* 1 for reader process; 0 for writer process. */ MPI_Comm comm; /* test bed related variables */ - const char * fcn_name = "rr_obj_hdr_flush_confusion"; - const hbool_t verbose = FALSE; + const char * fcn_name = "rr_obj_hdr_flush_confusion"; + const hbool_t verbose = FALSE; /* Create two new private communicators from MPI_COMM_WORLD. * Even and odd ranked processes go to comm_writers and comm_readers @@ -1919,9 +1915,9 @@ void rr_obj_hdr_flush_confusion(void) * step. When all steps are done, they inform readers to end. */ if (is_reader) - rr_obj_hdr_flush_confusion_reader(comm); + rr_obj_hdr_flush_confusion_reader(comm); else - rr_obj_hdr_flush_confusion_writer(comm); + rr_obj_hdr_flush_confusion_writer(comm); MPI_Comm_free(&comm); if(verbose ) @@ -1965,16 +1961,16 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) /* private communicator size and rank */ int mpi_size; int mpi_rank; - int mrc; /* mpi error code */ + int mrc; /* mpi error code */ /* steps to verify and have been verified */ int steps = 0; int steps_done = 0; /* test bed related variables */ - const char * fcn_name = "rr_obj_hdr_flush_confusion_writer"; - const hbool_t verbose = FALSE; - const H5Ptest_param_t * pt; - char * filename; + const char * fcn_name = "rr_obj_hdr_flush_confusion_writer"; + const hbool_t verbose = FALSE; + const H5Ptest_param_t * pt; + char * filename; /* * setup test bed related variables: @@ -2008,7 +2004,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) */ if(verbose ) - HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n", + HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n", mpi_rank, fcn_name, filename); file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); @@ -2023,7 +2019,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) */ if(verbose ) - HDfprintf(stdout, "%0d:%s: Creating the datasets.\n", + HDfprintf(stdout, "%0d:%s: Creating the datasets.\n", mpi_rank, fcn_name); disk_size[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_size); @@ -2032,15 +2028,15 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) for ( i = 0; i < NUM_DATA_SETS; i++ ) { disk_space[i] = H5Screate_simple(1, disk_size, NULL); - VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n"); + VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n"); - dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE, + dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE, disk_space[i], H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((dataset[i] >= 0), "H5Dcreate(1) failed.\n"); } - /* + /* * setup data transfer property list */ @@ -2051,11 +2047,11 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n"); err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE); - VRFY((err >= 0), + VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n"); - /* - * write data to the data sets + /* + * write data to the data sets */ if(verbose ) @@ -2071,22 +2067,22 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) } for ( i = 0; i < NUM_DATA_SETS; i++ ) { - err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, + err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count, NULL); VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n"); mem_space[i] = H5Screate_simple(1, mem_size, NULL); - VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n"); - err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, + VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n"); + err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL); VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n"); - err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], + err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id, data); VRFY((err >= 0), "H5Dwrite(1) failed.\n"); for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) - data[j] *= 10.0; + data[j] *= 10.0; } - /* + /* * close the data spaces */ @@ -2102,12 +2098,12 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) /* End of Step 1: create the data sets and write data. */ - /* + /* * flush the metadata cache */ if(verbose ) - HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", + HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); VRFY((err >= 0), "H5Fflush(1) failed.\n"); @@ -2131,7 +2127,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) for ( i = 0; i < NUM_DATA_SETS; i++ ) { att_space[i] = H5Screate_simple(1, att_size, NULL); VRFY((att_space[i] >= 0), "H5Screate_simple(3) failed.\n"); - att_id[i] = H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE, + att_id[i] = H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE, att_space[i], H5P_DEFAULT, H5P_DEFAULT); VRFY((att_id[i] >= 0), "H5Acreate(1) failed.\n"); err = H5Awrite(att_id[i], H5T_NATIVE_DOUBLE, att); @@ -2142,11 +2138,11 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) } /* - * close attribute IDs and spaces + * close attribute IDs and spaces */ if(verbose ) - HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n", + HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n", mpi_rank, fcn_name); for ( i = 0; i < NUM_DATA_SETS; i++ ) { @@ -2159,12 +2155,12 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) /* End of Step 2: write attributes to each dataset */ - /* + /* * flush the metadata cache again */ if(verbose ) - HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", + HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); VRFY((err >= 0), "H5Fflush(2) failed.\n"); @@ -2178,7 +2174,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) */ if(verbose ) - HDfprintf(stdout, "%0d:%s: writing large attributes.\n", + HDfprintf(stdout, "%0d:%s: writing large attributes.\n", mpi_rank, fcn_name); lg_att_size[0] = (hsize_t)(LARGE_ATTR_SIZE); @@ -2190,7 +2186,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) for ( i = 0; i < NUM_DATA_SETS; i++ ) { lg_att_space[i] = H5Screate_simple(1, lg_att_size, NULL); VRFY((lg_att_space[i] >= 0), "H5Screate_simple(4) failed.\n"); - lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE, + lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE, lg_att_space[i], H5P_DEFAULT, H5P_DEFAULT); VRFY((lg_att_id[i] >= 0), "H5Acreate(2) failed.\n"); err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att); @@ -2199,21 +2195,21 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) lg_att[j] /= 10.0; } } - + /* Step 3: write large attributes to each dataset */ - /* + /* * flush the metadata cache yet again to clean the object headers. * * This is an attempt to crate a situation where we have dirty * object header continuation chunks, but clean opject headers * to verify a speculative bug fix -- it doesn't seem to work, - * but I will leave the code in anyway, as the object header + * but I will leave the code in anyway, as the object header * code is going to change a lot in the near future. */ if(verbose ) - HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", + HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); VRFY((err >= 0), "H5Fflush(3) failed.\n"); @@ -2227,7 +2223,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) */ if(verbose ) - HDfprintf(stdout, "%0d:%s: writing different large attributes.\n", + HDfprintf(stdout, "%0d:%s: writing different large attributes.\n", mpi_rank, fcn_name); for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) { @@ -2244,11 +2240,11 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) /* End of Step 4: write different large attributes to each dataset */ - /* + /* * flush the metadata cache again */ if(verbose ) - HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", + HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); VRFY((err >= 0), "H5Fflush(3) failed.\n"); @@ -2260,11 +2256,11 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) /* Step 5: Close all objects and the file */ /* - * close large attribute IDs and spaces + * close large attribute IDs and spaces */ if(verbose ) - HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n", + HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n", mpi_rank, fcn_name); for ( i = 0; i < NUM_DATA_SETS; i++ ) { @@ -2276,7 +2272,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) } - /* + /* * close the data sets */ @@ -2308,7 +2304,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) err = H5Fclose(file_id); VRFY((err >= 0 ), "H5Fclose(1) failed"); - + /* End of Step 5: Close all objects and the file */ /* Tell the reader to check the file up to steps. */ steps++; @@ -2359,15 +2355,15 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm) /* private communicator size and rank */ int mpi_size; int mpi_rank; - int mrc; /* mpi error code */ - int steps = -1; /* How far (steps) to verify the file */ - int steps_done = -1; /* How far (steps) have been verified */ + int mrc; /* mpi error code */ + int steps = -1; /* How far (steps) to verify the file */ + int steps_done = -1; /* How far (steps) have been verified */ /* test bed related variables */ - const char * fcn_name = "rr_obj_hdr_flush_confusion_reader"; - const hbool_t verbose = FALSE; - const H5Ptest_param_t * pt; - char * filename; + const char * fcn_name = "rr_obj_hdr_flush_confusion_reader"; + const hbool_t verbose = FALSE; + const H5Ptest_param_t * pt; + char * filename; /* * setup test bed related variables: @@ -2384,291 +2380,291 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm) /* Repeatedly re-open the file and verify its contents until it is */ /* told to end (when steps=0). */ while (steps_done != 0){ - Reader_wait(mrc, steps); - VRFY((mrc >= 0), "Reader_wait failed"); - steps_done = 0; + Reader_wait(mrc, steps); + VRFY((mrc >= 0), "Reader_wait failed"); + steps_done = 0; - if (steps > 0 ){ - /* - * Set up file access property list with parallel I/O access - */ + if (steps > 0 ){ + /* + * Set up file access property list with parallel I/O access + */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: Setting up property list.\n", - mpi_rank, fcn_name); + if(verbose ) + HDfprintf(stdout, "%0d:%s: Setting up property list.\n", + mpi_rank, fcn_name); - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed"); - err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL); - VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed"); + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed"); + err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL); + VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed"); - /* - * Create a new file collectively and release property list identifier. - */ + /* + * Create a new file collectively and release property list identifier. + */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n", - mpi_rank, fcn_name, filename); + if(verbose ) + HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n", + mpi_rank, fcn_name, filename); - file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id); - VRFY((file_id >= 0 ), "H5Fopen() failed"); - err = H5Pclose(fapl_id); - VRFY((err >= 0 ), "H5Pclose(fapl_id) failed"); + file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0 ), "H5Fopen() failed"); + err = H5Pclose(fapl_id); + VRFY((err >= 0 ), "H5Pclose(fapl_id) failed"); #if 1 - if (steps >= 1){ - /*=====================================================* - * Step 1: open the data sets and read data. - *=====================================================*/ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: opening the datasets.\n", - mpi_rank, fcn_name); - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - dataset[i] = -1; - } - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT); - VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n"); - disk_space[i] = H5Dget_space(dataset[i]); - VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n"); - } - - /* - * setup data transfer property list - */ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name); - - dxpl_id = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n"); - err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE); - VRFY((err >= 0), - "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n"); - - /* - * read data from the data sets - */ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name); - - disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE); - disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank); - - mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE); - - mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE); - mem_start[0] = (hsize_t)(0); - - /* set up expected data for verification */ - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) { - data[j] = (double)(mpi_rank + 1); - } - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, - NULL, disk_count, NULL); - VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n"); - mem_space[i] = H5Screate_simple(1, mem_size, NULL); - VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n"); - err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, - mem_start, NULL, mem_count, NULL); - VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n"); - err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], - disk_space[i], dxpl_id, data_read); - VRFY((err >= 0), "H5Dread(1) failed.\n"); - - /* compare read data with expected data */ - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) - if (data_read[j] != data[j]){ - HDfprintf(stdout, - "%0d:%s: Reading datasets value failed in " - "Dataset %d, at position %d: expect %f, got %f.\n", - mpi_rank, fcn_name, i, j, data[j], data_read[j]); - nerrors++; - } - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) - data[j] *= 10.0; - } - - /* - * close the data spaces - */ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name); - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - err = H5Sclose(disk_space[i]); - VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n"); - err = H5Sclose(mem_space[i]); - VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n"); - } - steps_done++; - } - /* End of Step 1: open the data sets and read data. */ + if (steps >= 1){ + /*=====================================================* + * Step 1: open the data sets and read data. + *=====================================================*/ + + if(verbose ) + HDfprintf(stdout, "%0d:%s: opening the datasets.\n", + mpi_rank, fcn_name); + + for ( i = 0; i < NUM_DATA_SETS; i++ ) { + dataset[i] = -1; + } + + for ( i = 0; i < NUM_DATA_SETS; i++ ) { + dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT); + VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n"); + disk_space[i] = H5Dget_space(dataset[i]); + VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n"); + } + + /* + * setup data transfer property list + */ + + if(verbose ) + HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name); + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n"); + err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE); + VRFY((err >= 0), + "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n"); + + /* + * read data from the data sets + */ + + if(verbose ) + HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name); + + disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE); + disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank); + + mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE); + + mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE); + mem_start[0] = (hsize_t)(0); + + /* set up expected data for verification */ + for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) { + data[j] = (double)(mpi_rank + 1); + } + + for ( i = 0; i < NUM_DATA_SETS; i++ ) { + err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, + NULL, disk_count, NULL); + VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n"); + mem_space[i] = H5Screate_simple(1, mem_size, NULL); + VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n"); + err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, + mem_start, NULL, mem_count, NULL); + VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n"); + err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], + disk_space[i], dxpl_id, data_read); + VRFY((err >= 0), "H5Dread(1) failed.\n"); + + /* compare read data with expected data */ + for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) + if (data_read[j] != data[j]){ + HDfprintf(stdout, + "%0d:%s: Reading datasets value failed in " + "Dataset %d, at position %d: expect %f, got %f.\n", + mpi_rank, fcn_name, i, j, data[j], data_read[j]); + nerrors++; + } + for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) + data[j] *= 10.0; + } + + /* + * close the data spaces + */ + + if(verbose ) + HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name); + + for ( i = 0; i < NUM_DATA_SETS; i++ ) { + err = H5Sclose(disk_space[i]); + VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n"); + err = H5Sclose(mem_space[i]); + VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n"); + } + steps_done++; + } + /* End of Step 1: open the data sets and read data. */ #endif #if 1 - /*=====================================================* - * Step 2: reading attributes from each dataset - *=====================================================*/ - - if (steps >= 2){ - if(verbose ) - HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name); - - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) { - - att[j] = (double)(j + 1); - } - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - hid_t att_id, att_type; - - att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT); - VRFY((att_id >= 0), "H5Aopen failed.\n"); - att_type = H5Aget_type(att_id); - VRFY((att_type >= 0), "H5Aget_type failed.\n"); - tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE); - VRFY((tri_err >= 0), "H5Tequal failed.\n"); - if (tri_err==0){ - HDfprintf(stdout, - "%0d:%s: Mismatched Attribute type of Dataset %d.\n", - mpi_rank, fcn_name, i); - nerrors++; - }else{ - /* should verify attribute size before H5Aread */ - err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read); - VRFY((err >= 0), "H5Aread failed.\n"); - /* compare read attribute data with expected data */ - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) - if (att_read[j] != att[j]){ - HDfprintf(stdout, - "%0d:%s: Mismatched attribute data read in Dataset %d, at position %d: expect %f, got %f.\n", - mpi_rank, fcn_name, i, j, att[j], att_read[j]); - nerrors++; - } - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) { - - att[j] /= 10.0; - } - } - err = H5Aclose(att_id); - VRFY((err >= 0), "H5Aclose failed.\n"); - } - steps_done++; - } - /* End of Step 2: reading attributes from each dataset */ + /*=====================================================* + * Step 2: reading attributes from each dataset + *=====================================================*/ + + if (steps >= 2){ + if(verbose ) + HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name); + + for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) { + + att[j] = (double)(j + 1); + } + + for ( i = 0; i < NUM_DATA_SETS; i++ ) { + hid_t att_id, att_type; + + att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT); + VRFY((att_id >= 0), "H5Aopen failed.\n"); + att_type = H5Aget_type(att_id); + VRFY((att_type >= 0), "H5Aget_type failed.\n"); + tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE); + VRFY((tri_err >= 0), "H5Tequal failed.\n"); + if (tri_err==0){ + HDfprintf(stdout, + "%0d:%s: Mismatched Attribute type of Dataset %d.\n", + mpi_rank, fcn_name, i); + nerrors++; + }else{ + /* should verify attribute size before H5Aread */ + err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read); + VRFY((err >= 0), "H5Aread failed.\n"); + /* compare read attribute data with expected data */ + for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) + if (att_read[j] != att[j]){ + HDfprintf(stdout, + "%0d:%s: Mismatched attribute data read in Dataset %d, at position %d: expect %f, got %f.\n", + mpi_rank, fcn_name, i, j, att[j], att_read[j]); + nerrors++; + } + for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) { + + att[j] /= 10.0; + } + } + err = H5Aclose(att_id); + VRFY((err >= 0), "H5Aclose failed.\n"); + } + steps_done++; + } + /* End of Step 2: reading attributes from each dataset */ #endif #if 1 - /*=====================================================* - * Step 3 or 4: read large attributes from each dataset. - * Step 4 has different attribute value from step 3. - *=====================================================*/ - - if (steps >= 3){ - if(verbose ) - HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name); - - for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) { - - lg_att[j] = (steps==3) ? (double)(j + 1) : (double)(j+2); - } - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT); - VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n"); - lg_att_type[i] = H5Aget_type(lg_att_id[i]); - VRFY((err >= 0), "H5Aget_type failed.\n"); - tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE); - VRFY((tri_err >= 0), "H5Tequal failed.\n"); - if (tri_err==0){ - HDfprintf(stdout, - "%0d:%s: Mismatched Large attribute type of Dataset %d.\n", - mpi_rank, fcn_name, i); - nerrors++; - }else{ - /* should verify large attribute size before H5Aread */ - err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read); - VRFY((err >= 0), "H5Aread failed.\n"); - /* compare read attribute data with expected data */ - for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) - if (lg_att_read[j] != lg_att[j]){ - HDfprintf(stdout, - "%0d:%s: Mismatched large attribute data read in Dataset %d, at position %d: expect %f, got %f.\n", - mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]); - nerrors++; - } - for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) { - - lg_att[j] /= 10.0; - } - } - err = H5Tclose(lg_att_type[i]); - VRFY((err >= 0), "H5Tclose failed.\n"); - err = H5Aclose(lg_att_id[i]); - VRFY((err >= 0), "H5Aclose failed.\n"); - } - /* Both step 3 and 4 use this same read checking code. */ - steps_done = (steps==3) ? 3 : 4; - } - - /* End of Step 3 or 4: read large attributes from each dataset */ + /*=====================================================* + * Step 3 or 4: read large attributes from each dataset. + * Step 4 has different attribute value from step 3. + *=====================================================*/ + + if (steps >= 3){ + if(verbose ) + HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name); + + for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) { + + lg_att[j] = (steps==3) ? (double)(j + 1) : (double)(j+2); + } + + for ( i = 0; i < NUM_DATA_SETS; i++ ) { + lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT); + VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n"); + lg_att_type[i] = H5Aget_type(lg_att_id[i]); + VRFY((err >= 0), "H5Aget_type failed.\n"); + tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE); + VRFY((tri_err >= 0), "H5Tequal failed.\n"); + if (tri_err==0){ + HDfprintf(stdout, + "%0d:%s: Mismatched Large attribute type of Dataset %d.\n", + mpi_rank, fcn_name, i); + nerrors++; + }else{ + /* should verify large attribute size before H5Aread */ + err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read); + VRFY((err >= 0), "H5Aread failed.\n"); + /* compare read attribute data with expected data */ + for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) + if (lg_att_read[j] != lg_att[j]){ + HDfprintf(stdout, + "%0d:%s: Mismatched large attribute data read in Dataset %d, at position %d: expect %f, got %f.\n", + mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]); + nerrors++; + } + for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) { + + lg_att[j] /= 10.0; + } + } + err = H5Tclose(lg_att_type[i]); + VRFY((err >= 0), "H5Tclose failed.\n"); + err = H5Aclose(lg_att_id[i]); + VRFY((err >= 0), "H5Aclose failed.\n"); + } + /* Both step 3 and 4 use this same read checking code. */ + steps_done = (steps==3) ? 3 : 4; + } + + /* End of Step 3 or 4: read large attributes from each dataset */ #endif - /*=====================================================* - * Step 5: read all objects from the file - *=====================================================*/ - if (steps>=5){ - /* nothing extra to verify. The file is closed normally. */ - /* Just increment steps_done */ - steps_done++; - } - - /* - * Close the data sets - */ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: closing datasets again.\n", - mpi_rank, fcn_name); - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - if ( dataset[i] >= 0 ) { - err = H5Dclose(dataset[i]); - VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n"); - } - } - - /* - * close the data transfer property list. - */ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name); - - err = H5Pclose(dxpl_id); - VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n"); - - /* - * Close the file - */ - if(verbose) - HDfprintf(stdout, "%0d:%s: closing file again.\n", - mpi_rank, fcn_name); - err = H5Fclose(file_id); - VRFY((err >= 0 ), "H5Fclose(1) failed"); - - } /* else if (steps_done==0) */ - Reader_result(mrc, steps_done); + /*=====================================================* + * Step 5: read all objects from the file + *=====================================================*/ + if (steps>=5){ + /* nothing extra to verify. The file is closed normally. */ + /* Just increment steps_done */ + steps_done++; + } + + /* + * Close the data sets + */ + + if(verbose ) + HDfprintf(stdout, "%0d:%s: closing datasets again.\n", + mpi_rank, fcn_name); + + for ( i = 0; i < NUM_DATA_SETS; i++ ) { + if ( dataset[i] >= 0 ) { + err = H5Dclose(dataset[i]); + VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n"); + } + } + + /* + * close the data transfer property list. + */ + + if(verbose ) + HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name); + + err = H5Pclose(dxpl_id); + VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n"); + + /* + * Close the file + */ + if(verbose) + HDfprintf(stdout, "%0d:%s: closing file again.\n", + mpi_rank, fcn_name); + err = H5Fclose(file_id); + VRFY((err >= 0 ), "H5Fclose(1) failed"); + + } /* else if (steps_done==0) */ + Reader_result(mrc, steps_done); } /* end while(1) */ if(verbose ) diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c index 94ecbfa..890a918 100644 --- a/testpar/t_mpi.c +++ b/testpar/t_mpi.c @@ -53,7 +53,7 @@ static int test_mpio_overlap_writes(char *filename) { MPI_Status mpi_stat; if (VERBOSE_MED) - printf("MPIO independent overlapping writes test on file %s\n", + HDprintf("MPIO independent overlapping writes test on file %s\n", filename); nerrs = 0; @@ -64,8 +64,8 @@ static int test_mpio_overlap_writes(char *filename) { /* Need at least 2 processes */ if (mpi_size < 2) { if (MAINPROCESS) - printf("Need at least 2 processes to run MPIO test.\n"); - printf(" -SKIP- \n"); + HDprintf("Need at least 2 processes to run MPIO test.\n"); + HDprintf(" -SKIP- \n"); return 0; } @@ -138,13 +138,13 @@ static int test_mpio_overlap_writes(char *filename) { expected = (unsigned char) (mpi_off + i); if ((expected != buf[i]) && (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) { - printf( + HDprintf( "proc %d: found data error at [%ld], expect %u, got %u\n", mpi_rank, (long) (mpi_off + i), expected, buf[i]); } } if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - printf("proc %d: [more errors ...]\n", mpi_rank); + HDprintf("proc %d: [more errors ...]\n", mpi_rank); nerrs += vrfyerrs; } @@ -204,7 +204,7 @@ static int test_mpio_gb_file(char *filename) { MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); if (VERBOSE_MED) - printf("MPI_Offset range test\n"); + HDprintf("MPI_Offset range test\n"); /* figure out the signness and sizeof MPI_Offset */ mpi_off = 0; @@ -216,10 +216,10 @@ static int test_mpio_gb_file(char *filename) { * sizes. */ if (MAINPROCESS) { /* only process 0 needs to check it*/ - printf("MPI_Offset is %s %d bytes integeral type\n", + HDprintf("MPI_Offset is %s %d bytes integeral type\n", is_signed ? "signed" : "unsigned", (int) sizeof(MPI_Offset)); if (sizeof_mpi_offset <= 4 && is_signed) { - printf("Skipped 2GB range test " + HDprintf("Skipped 2GB range test " "because MPI_Offset cannot support it\n"); } else { /* verify correctness of assigning 2GB sizes */ @@ -241,7 +241,7 @@ static int test_mpio_gb_file(char *filename) { } if (sizeof_mpi_offset <= 4) { - printf("Skipped 4GB range test " + HDprintf("Skipped 4GB range test " "because MPI_Offset cannot support it\n"); } else { /* verify correctness of assigning 4GB sizes */ @@ -267,10 +267,10 @@ static int test_mpio_gb_file(char *filename) { * Verify if we can write to a file of multiple GB sizes. */ if (VERBOSE_MED) - printf("MPIO GB file test %s\n", filename); + HDprintf("MPIO GB file test %s\n", filename); if (sizeof_mpi_offset <= 4) { - printf("Skipped GB file range test " + HDprintf("Skipped GB file range test " "because MPI_Offset cannot support it\n"); } else { buf = (char *) HDmalloc(MB); @@ -286,7 +286,7 @@ static int test_mpio_gb_file(char *filename) { MPI_MODE_CREATE | MPI_MODE_RDWR, info, &fh); VRFY((mrc == MPI_SUCCESS), "MPI_FILE_OPEN"); - printf("MPIO GB file write test %s\n", filename); + HDprintf("MPIO GB file write test %s\n", filename); /* instead of writing every bytes of the file, we will just write * some data around the 2 and 4 GB boundaries. That should cover @@ -328,9 +328,9 @@ static int test_mpio_gb_file(char *filename) { */ /* open it again to verify the data written */ /* but only if there was no write errors */ - printf("MPIO GB file read test %s\n", filename); + HDprintf("MPIO GB file read test %s\n", filename); if (errors_sum(writerrs) > 0) { - printf("proc %d: Skip read test due to previous write errors\n", + HDprintf("proc %d: Skip read test due to previous write errors\n", mpi_rank); goto finish; } @@ -356,14 +356,14 @@ static int test_mpio_gb_file(char *filename) { for (j = 0; j < MB; j++) { if ((*(buf + j) != expected) && (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) { - printf( + HDprintf( "proc %d: found data error at [%ld+%d], expect %d, got %d\n", mpi_rank, (long) mpi_off, j, expected, *(buf + j)); } } if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - printf("proc %d: [more errors ...]\n", mpi_rank); + HDprintf("proc %d: [more errors ...]\n", mpi_rank); nerrs += vrfyerrs; } @@ -380,7 +380,7 @@ static int test_mpio_gb_file(char *filename) { mrc = MPI_Barrier(MPI_COMM_WORLD); VRFY((mrc == MPI_SUCCESS), "Sync before leaving test"); - printf("Test if MPI_File_get_size works correctly with %s\n", filename); + HDprintf("Test if MPI_File_get_size works correctly with %s\n", filename); mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh); @@ -428,7 +428,7 @@ static int test_mpio_gb_file(char *filename) { */ #define DIMSIZE 32 /* Dimension size. */ -#define PRINTID printf("Proc %d: ", mpi_rank) +#define PRINTID HDprintf("Proc %d: ", mpi_rank) #define USENONE 0 #define USEATOM 1 /* request atomic I/O */ #define USEFSYNC 2 /* request file_sync */ @@ -452,24 +452,24 @@ static int test_mpio_1wMr(char *filename, int special_request) { MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); if (MAINPROCESS && VERBOSE_MED) { - printf("Testing one process writes, all processes read.\n"); - printf("Using %d processes accessing file %s\n", mpi_size, filename); - printf(" (Filename can be specified via program argument)\n"); + HDprintf("Testing one process writes, all processes read.\n"); + HDprintf("Using %d processes accessing file %s\n", mpi_size, filename); + HDprintf(" (Filename can be specified via program argument)\n"); } /* show the hostname so that we can tell where the processes are running */ if (VERBOSE_DEF) { #ifdef H5_HAVE_GETHOSTNAME if(HDgethostname(hostname, sizeof(hostname)) < 0) { - printf("gethostname failed\n"); + HDprintf("gethostname failed\n"); hostname[0] = '\0'; } #else - printf("gethostname unavailable\n"); + HDprintf("gethostname unavailable\n"); hostname[0] = '\0'; #endif PRINTID; - printf("hostname=%s\n", hostname); + HDprintf("hostname=%s\n", hostname); } /* Delete any old file in order to start anew. */ @@ -483,7 +483,7 @@ static int test_mpio_1wMr(char *filename, int special_request) { != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; - printf("MPI_File_open failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_open failed (%s)\n", mpi_err_str); return 1; } @@ -495,29 +495,29 @@ static int test_mpio_1wMr(char *filename, int special_request) { if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; - printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str); } if (VERBOSE_HI) - printf("Initial atomicity = %d\n", atomicity); + HDprintf("Initial atomicity = %d\n", atomicity); if ((mpi_err = MPI_File_set_atomicity(fh, 1)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; - printf("MPI_File_set_atomicity failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_set_atomicity failed (%s)\n", mpi_err_str); } if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; - printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str); } if (VERBOSE_HI) - printf("After set_atomicity atomicity = %d\n", atomicity); + HDprintf("After set_atomicity atomicity = %d\n", atomicity); } /* This barrier is not necessary but do it anyway. */ MPI_Barrier(MPI_COMM_WORLD); if (VERBOSE_HI) { PRINTID; - printf("between MPI_Barrier and MPI_File_write_at\n"); + HDprintf("between MPI_Barrier and MPI_File_write_at\n"); } /* ================================================== @@ -533,13 +533,13 @@ static int test_mpio_1wMr(char *filename, int special_request) { if (mpi_rank == irank) { if (VERBOSE_HI) { PRINTID; - printf("wrote %d bytes at %ld\n", DIMSIZE, (long) mpi_off); + HDprintf("wrote %d bytes at %ld\n", DIMSIZE, (long) mpi_off); } if ((mpi_err = MPI_File_write_at(fh, mpi_off, writedata, DIMSIZE, MPI_BYTE, &mpi_stat)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; - printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n", + HDprintf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n", (long) mpi_off, DIMSIZE, mpi_err_str); return 1; }; @@ -550,7 +550,7 @@ static int test_mpio_1wMr(char *filename, int special_request) { MPI_Bcast(&mpi_err, 1, MPI_INT, irank, MPI_COMM_WORLD); if (VERBOSE_HI) { PRINTID; - printf("MPI_Bcast: mpi_err = %d\n", mpi_err); + HDprintf("MPI_Bcast: mpi_err = %d\n", mpi_err); } if (special_request & USEFSYNC) { @@ -559,19 +559,19 @@ static int test_mpio_1wMr(char *filename, int special_request) { * should not need this. * ==================================================*/ if (VERBOSE_HI) - printf("Apply MPI_File_sync\n"); + HDprintf("Apply MPI_File_sync\n"); /* call file_sync to force the write out */ if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; - printf("MPI_File_sync failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_sync failed (%s)\n", mpi_err_str); } MPI_Barrier(MPI_COMM_WORLD); /* call file_sync to force the write out */ if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; - printf("MPI_File_sync failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_sync failed (%s)\n", mpi_err_str); } } @@ -580,7 +580,7 @@ static int test_mpio_1wMr(char *filename, int special_request) { MPI_Barrier(MPI_COMM_WORLD); if (VERBOSE_HI) { PRINTID; - printf("after MPI_Barrier\n"); + HDprintf("after MPI_Barrier\n"); } /* ================================================== @@ -592,7 +592,7 @@ static int test_mpio_1wMr(char *filename, int special_request) { &mpi_stat)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; - printf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n", + HDprintf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n", (long) mpi_off, DIMSIZE, mpi_err_str); return 1; }; @@ -600,7 +600,7 @@ static int test_mpio_1wMr(char *filename, int special_request) { expect_val = irank * DIMSIZE + i; if (readdata[i] != expect_val) { PRINTID; - printf("read data[%d:%d] got %02x, expect %02x\n", irank, i, + HDprintf("read data[%d:%d] got %02x, expect %02x\n", irank, i, readdata[i], expect_val); nerrs++; } @@ -610,7 +610,7 @@ static int test_mpio_1wMr(char *filename, int special_request) { if (VERBOSE_HI) { PRINTID; - printf("%d data errors detected\n", nerrs); + HDprintf("%d data errors detected\n", nerrs); } mpi_err = MPI_Barrier(MPI_COMM_WORLD); @@ -664,7 +664,7 @@ static int test_mpio_1wMr(char *filename, int special_request) { 2. This test will verify whether the complicated derived datatype is working on the current platform. - If this bug has been fixed in the previous not-working package, this test will issue a printf message to tell the developer to change + If this bug has been fixed in the previous not-working package, this test will issue a HDprintf message to tell the developer to change the configuration specific file of HDF5 so that we can change our configurationsetting to support collective IO for irregular selections. If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message to inform the corresponding failure so that @@ -703,7 +703,7 @@ static int test_mpio_derived_dtype(char *filename) { MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_open failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_open failed (%s)\n", mpi_err_str); return 1; } @@ -717,13 +717,13 @@ static int test_mpio_derived_dtype(char *filename) { if ((mpi_err = MPI_Type_create_hindexed(count, blocklens, offsets, MPI_BYTE, &filetype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); return 1; } if ((mpi_err = MPI_Type_commit(&filetype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_commit failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str); return 1; } @@ -733,13 +733,13 @@ static int test_mpio_derived_dtype(char *filename) { if ((mpi_err = MPI_Type_create_hindexed(count, blocklens, offsets, MPI_BYTE, &filetypenew)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); return 1; } if ((mpi_err = MPI_Type_commit(&filetypenew)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_commit failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str); return 1; } @@ -754,52 +754,52 @@ static int test_mpio_derived_dtype(char *filename) { if ((mpi_err = MPI_Type_create_struct(outcount, adv_blocklens, adv_disp, bas_filetype, &adv_filetype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_create_struct failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_create_struct failed (%s)\n", mpi_err_str); return 1; } if ((mpi_err = MPI_Type_commit(&adv_filetype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_commit failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str); return 1; } if ((mpi_err = MPI_File_set_view(fh, disp, etype, adv_filetype, "native", MPI_INFO_NULL)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_set_view failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str); return 1; } if ((mpi_err = MPI_File_write(fh, buf, 3, MPI_BYTE, &Status)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_write failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_write failed (%s)\n", mpi_err_str); return 1; } if ((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_close failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_close failed (%s)\n", mpi_err_str); return 1; } if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_open failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_open failed (%s)\n", mpi_err_str); return 1; } if ((mpi_err = MPI_File_set_view(fh, 0, MPI_BYTE, MPI_BYTE, "native", MPI_INFO_NULL)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_set_view failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str); return 1; } if ((mpi_err = MPI_File_read(fh, outbuf, 3, MPI_BYTE, &Status)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_read failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_read failed (%s)\n", mpi_err_str); return 1; } @@ -807,9 +807,9 @@ static int test_mpio_derived_dtype(char *filename) { retcode = 0; } else { /* if(mpi_rank == 0) { - printf("complicated derived datatype is NOT working at this platform\n"); - printf("go back to hdf5/config and find the corresponding\n"); - printf("configure-specific file and change ?????\n"); + HDprintf("complicated derived datatype is NOT working at this platform\n"); + HDprintf("go back to hdf5/config and find the corresponding\n"); + HDprintf("configure-specific file and change ?????\n"); } */ retcode = -1; @@ -817,16 +817,16 @@ static int test_mpio_derived_dtype(char *filename) { if ((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_close failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_close failed (%s)\n", mpi_err_str); return 1; } mpi_err = MPI_Barrier(MPI_COMM_WORLD); if (retcode == -1) { if (mpi_rank == 0) { - printf( + HDprintf( "Complicated derived datatype is NOT working at this platform\n"); - printf(" Please report to help@hdfgroup.org about this problem.\n"); + HDprintf(" Please report to help@hdfgroup.org about this problem.\n"); } retcode = 1; } @@ -851,7 +851,7 @@ static int test_mpio_derived_dtype(char *filename) { 2. This test will fail with the MPI-IO package that doesn't support this. For example, mpich 1.2.6. - If this bug has been fixed in the previous not-working package, this test will issue a printf message to tell the developer to change + If this bug has been fixed in the previous not-working package, this test will issue a HDprintf message to tell the developer to change the configuration specific file of HDF5 so that we can change our configurationsetting to support special collective IO; currently only special collective IO. If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message to inform the corresponding failure so that @@ -898,26 +898,26 @@ static int test_mpio_special_collective(char *filename) { if ((mpi_err = MPI_Type_create_hindexed(2, blocklens, offsets, etype, &filetype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); return 1; } /* end if */ if ((mpi_err = MPI_Type_commit(&filetype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_commit failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str); return 1; } /* end if */ if ((mpi_err = MPI_Type_create_hindexed(2, blocklens, offsets, etype, &buftype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); return 1; } /* end if */ if ((mpi_err = MPI_Type_commit(&buftype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_commit failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str); return 1; } /* end if */ } /* end if */ @@ -931,7 +931,7 @@ static int test_mpio_special_collective(char *filename) { MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_open failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_open failed (%s)\n", mpi_err_str); return 1; } /* end if */ @@ -943,7 +943,7 @@ static int test_mpio_special_collective(char *filename) { if ((mpi_err = MPI_File_set_view(fh, mpi_off, MPI_BYTE, filetype, filerep, MPI_INFO_NULL)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_set_view failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str); return 1; } /* end if */ @@ -951,7 +951,7 @@ static int test_mpio_special_collective(char *filename) { if ((mpi_err = MPI_File_write_at_all(fh, mpi_off, writedata, bufcount, buftype, &mpi_stat)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n", + HDprintf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n", (long) mpi_off, bufcount, mpi_err_str); return 1; } /* end if */ @@ -959,7 +959,7 @@ static int test_mpio_special_collective(char *filename) { /* Close the file */ if ((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_close failed. \n"); + HDprintf("MPI_File_close failed. \n"); return 1; } /* end if */ @@ -967,8 +967,8 @@ static int test_mpio_special_collective(char *filename) { mpi_err = MPI_Barrier(MPI_COMM_WORLD); if (retcode != 0) { if (mpi_rank == 0) { - printf("special collective IO is NOT working at this platform\n"); - printf(" Please report to help@hdfgroup.org about this problem.\n"); + HDprintf("special collective IO is NOT working at this platform\n"); + HDprintf(" Please report to help@hdfgroup.org about this problem.\n"); } /* end if */ retcode = 1; } /* end if */ @@ -1024,15 +1024,15 @@ static int parse_options(int argc, char **argv) { for (i = 0; i < n; i++) if (h5_fixname(FILENAME[i], plist, filenames[i], sizeof(filenames[i])) == NULL) { - printf("h5_fixname failed\n"); + HDprintf("h5_fixname failed\n"); nerrors++; return (1); } H5Pclose(plist); if (VERBOSE_MED) { - printf("Test filenames are:\n"); + HDprintf("Test filenames are:\n"); for (i = 0; i < n; i++) - printf(" %s\n", filenames[i]); + HDprintf(" %s\n", filenames[i]); } } @@ -1043,10 +1043,10 @@ static int parse_options(int argc, char **argv) { * Show command usage */ static void usage(void) { - printf("Usage: t_mpi [-v<verbosity>] [-f <prefix>]\n"); - printf("\t-v<verbosity>\tset verbose level (0-9,l,m,h)\n"); - printf("\t-f <prefix>\tfilename prefix\n"); - printf("\n"); + HDprintf("Usage: t_mpi [-v<verbosity>] [-f <prefix>]\n"); + HDprintf("\t-v<verbosity>\tset verbose level (0-9,l,m,h)\n"); + HDprintf("\t-f <prefix>\tfilename prefix\n"); + HDprintf("\n"); } /* @@ -1072,7 +1072,7 @@ int main(int argc, char **argv) { * calls. By then, MPI calls may not work. */ if (H5dont_atexit() < 0) { - printf("Failed to turn off atexit processing. Continue.\n"); + HDprintf("Failed to turn off atexit processing. Continue.\n"); }; H5open(); if (parse_options(argc, argv) != 0) { @@ -1082,9 +1082,9 @@ int main(int argc, char **argv) { } if (MAINPROCESS) { - printf("===================================\n"); - printf("MPI functionality tests\n"); - printf("===================================\n"); + HDprintf("===================================\n"); + HDprintf("MPI functionality tests\n"); + HDprintf("===================================\n"); } if (VERBOSE_MED) @@ -1103,7 +1103,7 @@ int main(int argc, char **argv) { ret_code = test_mpio_1wMr(filenames[0], USENONE); ret_code = errors_sum(ret_code); if (mpi_rank == 0 && ret_code > 0) { - printf("***FAILED with %d total errors\n", ret_code); + HDprintf("***FAILED with %d total errors\n", ret_code); nerrors += ret_code; } @@ -1114,7 +1114,7 @@ int main(int argc, char **argv) { ret_code = test_mpio_1wMr(filenames[0], USEATOM); ret_code = errors_sum(ret_code); if (mpi_rank == 0 && ret_code > 0) { - printf("***FAILED with %d total errors\n", ret_code); + HDprintf("***FAILED with %d total errors\n", ret_code); nerrors += ret_code; } @@ -1122,7 +1122,7 @@ int main(int argc, char **argv) { ret_code = test_mpio_1wMr(filenames[0], USEFSYNC); ret_code = errors_sum(ret_code); if (mpi_rank == 0 && ret_code > 0) { - printf("***FAILED with %d total errors\n", ret_code); + HDprintf("***FAILED with %d total errors\n", ret_code); nerrors += ret_code; } } @@ -1135,12 +1135,12 @@ int main(int argc, char **argv) { ret_code = test_mpio_gb_file(filenames[0]); ret_code = errors_sum(ret_code); if (mpi_rank == 0 && ret_code > 0) { - printf("***FAILED with %d total errors\n", ret_code); + HDprintf("***FAILED with %d total errors\n", ret_code); nerrors += ret_code; } #else if (mpi_rank==0) - printf(" will be skipped on Windows (JIRA HDDFV-8064)\n"); + HDprintf(" will be skipped on Windows (JIRA HDDFV-8064)\n"); #endif /*======================================= @@ -1150,7 +1150,7 @@ int main(int argc, char **argv) { ret_code = test_mpio_overlap_writes(filenames[0]); ret_code = errors_sum(ret_code); if (mpi_rank == 0 && ret_code > 0) { - printf("***FAILED with %d total errors\n", ret_code); + HDprintf("***FAILED with %d total errors\n", ret_code); nerrors += ret_code; } @@ -1161,7 +1161,7 @@ int main(int argc, char **argv) { ret_code = test_mpio_derived_dtype(filenames[0]); ret_code = errors_sum(ret_code); if (mpi_rank == 0 && ret_code > 0) { - printf("***FAILED with %d total errors\n", ret_code); + HDprintf("***FAILED with %d total errors\n", ret_code); nerrors += ret_code; } @@ -1171,7 +1171,7 @@ int main(int argc, char **argv) { if (mpi_size < 4) { MPI_BANNER("MPIO special collective io test SKIPPED."); if (mpi_rank == 0) - printf("This test needs at least four processes to run.\n"); + HDprintf("This test needs at least four processes to run.\n"); ret_code = 0; goto sc_finish; } /* end if */ @@ -1181,7 +1181,7 @@ int main(int argc, char **argv) { sc_finish: ret_code = errors_sum(ret_code); if (mpi_rank == 0 && ret_code > 0) { - printf("***FAILED with %d total errors\n", ret_code); + HDprintf("***FAILED with %d total errors\n", ret_code); nerrors += ret_code; } @@ -1191,14 +1191,14 @@ int main(int argc, char **argv) { */ MPI_Barrier(MPI_COMM_WORLD); if (MAINPROCESS) { /* only process 0 reports */ - printf("===================================\n"); + HDprintf("===================================\n"); if (nerrors) { - printf("***MPI tests detected %d errors***\n", nerrors); + HDprintf("***MPI tests detected %d errors***\n", nerrors); } else { - printf("MPI tests finished with no errors\n"); + HDprintf("MPI tests finished with no errors\n"); } - printf("===================================\n"); + HDprintf("===================================\n"); } /* turn off alarm */ diff --git a/testpar/t_ph5basic.c b/testpar/t_ph5basic.c index 574591c..8e554ee 100644 --- a/testpar/t_ph5basic.c +++ b/testpar/t_ph5basic.c @@ -50,13 +50,13 @@ test_fapl_mpio_dup(void) int nkeys, nkeys_tmp; if (VERBOSE_MED) - printf("Verify fapl_mpio duplicates communicator and INFO objects\n"); + HDprintf("Verify fapl_mpio duplicates communicator and INFO objects\n"); /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); if (VERBOSE_MED) - printf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size); + HDprintf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size); /* Create a new communicator that has the same processes as MPI_COMM_WORLD. * Use MPI_Comm_split because it is simplier than MPI_Comm_create @@ -66,7 +66,7 @@ test_fapl_mpio_dup(void) MPI_Comm_size(comm,&mpi_size_old); MPI_Comm_rank(comm,&mpi_rank_old); if (VERBOSE_MED) - printf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old); + HDprintf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old); /* create a new INFO object with some trivial information. */ mrc = MPI_Info_create(&info); @@ -103,7 +103,7 @@ test_fapl_mpio_dup(void) MPI_Comm_size(comm_tmp,&mpi_size_tmp); MPI_Comm_rank(comm_tmp,&mpi_rank_tmp); if (VERBOSE_MED) - printf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", + HDprintf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp); VRFY((mpi_size_tmp==mpi_size), "MPI_Comm_size"); VRFY((mpi_rank_tmp==mpi_rank), "MPI_Comm_rank"); @@ -151,7 +151,7 @@ test_fapl_mpio_dup(void) MPI_Comm_size(comm_tmp,&mpi_size_tmp); MPI_Comm_rank(comm_tmp,&mpi_rank_tmp); if (VERBOSE_MED) - printf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", + HDprintf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp); VRFY((mpi_size_tmp==mpi_size), "MPI_Comm_size"); VRFY((mpi_rank_tmp==mpi_rank), "MPI_Comm_rank"); @@ -171,7 +171,7 @@ test_fapl_mpio_dup(void) MPI_Comm_size(comm_tmp,&mpi_size_tmp); MPI_Comm_rank(comm_tmp,&mpi_rank_tmp); if (VERBOSE_MED) - printf("After Property list closed: rank/size of comm are %d/%d\n", + HDprintf("After Property list closed: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp); if (MPI_INFO_NULL != info_tmp){ mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp); diff --git a/testpar/t_prestart.c b/testpar/t_prestart.c index 719d150..d75e627 100644 --- a/testpar/t_prestart.c +++ b/testpar/t_prestart.c @@ -107,7 +107,7 @@ main (int argc, char **argv) for (i=0; i < block[0]; i++){ for (j=0; j < block[1]; j++){ if(*dataptr != mpi_rank+1) { - printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n", + HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n", (unsigned long)i, (unsigned long)j, (unsigned long)(i+start[0]), (unsigned long)(j+start[1]), mpi_rank+1, *(dataptr)); diff --git a/testpar/t_prop.c b/testpar/t_prop.c index 2eb3914..fd89c6a 100644 --- a/testpar/t_prop.c +++ b/testpar/t_prop.c @@ -141,7 +141,7 @@ test_plist_ed(void) herr_t ret; /* Generic return value */ if(VERBOSE_MED) - printf("Encode/Decode DCPLs\n"); + HDprintf("Encode/Decode DCPLs\n"); /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c index b65e219..abbfbb3 100644 --- a/testpar/t_shapesame.c +++ b/testpar/t_shapesame.c @@ -4656,10 +4656,10 @@ void pause_proc(void) if (MAINPROCESS) while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){ if (!loops++){ - printf("Proc %d (%*s, %d): to debug, attach %d\n", + HDprintf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid, pid); } - printf("waiting(%ds) for file %s ...\n", time_int, greenlight); + HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight); fflush(stdout); HDsleep(time_int); } @@ -4683,18 +4683,18 @@ int MPI_Init(int *argc, char ***argv) static void usage(void) { - printf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] " + HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] " "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n"); - printf("\t-m<n_datasets>" + HDprintf("\t-m<n_datasets>" "\tset number of datasets for the multiple dataset test\n"); - printf("\t-n<n_groups>" + HDprintf("\t-n<n_groups>" "\tset number of groups for the multiple group test\n"); - printf("\t-f <prefix>\tfilename prefix\n"); - printf("\t-2\t\tuse Split-file together with MPIO\n"); - printf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", + HDprintf("\t-f <prefix>\tfilename prefix\n"); + HDprintf("\t-2\t\tuse Split-file together with MPIO\n"); + HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR, COL_FACTOR); - printf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); - printf("\n"); + HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); + HDprintf("\n"); } @@ -4772,7 +4772,7 @@ parse_options(int argc, char **argv) break; case 'h': /* print help message--return with nerrors set */ return(1); - default: printf("Illegal option(%s)\n", *argv); + default: HDprintf("Illegal option(%s)\n", *argv); nerrors++; return(1); } @@ -4781,12 +4781,12 @@ parse_options(int argc, char **argv) /* check validity of dimension and chunk sizes */ if (dim0 <= 0 || dim1 <= 0){ - printf("Illegal dim sizes (%d, %d)\n", dim0, dim1); + HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1); nerrors++; return(1); } if (chunkdim0 <= 0 || chunkdim1 <= 0){ - printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); + HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); nerrors++; return(1); } @@ -4794,7 +4794,7 @@ parse_options(int argc, char **argv) /* Make sure datasets can be divided into equal portions by the processes */ if ((dim0 % mpi_size) || (dim1 % mpi_size)){ if (MAINPROCESS) - printf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", + HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size); nerrors++; return(1); @@ -4809,13 +4809,13 @@ parse_options(int argc, char **argv) for (i=0; i < n; i++) if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i])) == NULL){ - printf("h5_fixname failed\n"); + HDprintf("h5_fixname failed\n"); nerrors++; return(1); } - printf("Test filenames are:\n"); + HDprintf("Test filenames are:\n"); for (i=0; i < n; i++) - printf(" %s\n", filenames[i]); + HDprintf(" %s\n", filenames[i]); } return(0); @@ -4952,10 +4952,10 @@ int main(int argc, char **argv) dim1 = COL_FACTOR*mpi_size; if (MAINPROCESS){ - printf("===================================\n"); - printf("Shape Same Tests Start\n"); - printf(" express_test = %d.\n", GetTestExpress()); - printf("===================================\n"); + HDprintf("===================================\n"); + HDprintf("Shape Same Tests Start\n"); + HDprintf(" express_test = %d.\n", GetTestExpress()); + HDprintf("===================================\n"); } /* Attempt to turn off atexit post processing so that in case errors @@ -4964,7 +4964,7 @@ int main(int argc, char **argv) * calls. By then, MPI calls may not work. */ if (H5dont_atexit() < 0){ - printf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank); + HDprintf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank); }; H5open(); h5_show_hostname(); @@ -5003,7 +5003,7 @@ int main(int argc, char **argv) TestParseCmdLine(argc, argv); if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS){ - printf("===================================\n" + HDprintf("===================================\n" " Using Independent I/O with file set view to replace collective I/O \n" "===================================\n"); } @@ -5034,12 +5034,12 @@ int main(int argc, char **argv) } if (MAINPROCESS){ /* only process 0 reports */ - printf("===================================\n"); + HDprintf("===================================\n"); if (nerrors) - printf("***Shape Same tests detected %d errors***\n", nerrors); + HDprintf("***Shape Same tests detected %d errors***\n", nerrors); else - printf("Shape Same tests finished with no errors\n"); - printf("===================================\n"); + HDprintf("Shape Same tests finished with no errors\n"); + HDprintf("===================================\n"); } /* close HDF5 library */ diff --git a/testpar/testpar.h b/testpar/testpar.h index 84c073f..4fbe8d8 100644 --- a/testpar/testpar.h +++ b/testpar/testpar.h @@ -32,7 +32,7 @@ */ #define MESG(mesg) \ if (VERBOSE_MED && *mesg != '\0') \ - printf("%s\n", mesg) + HDprintf("%s\n", mesg) /* * VRFY: Verify if the condition val is true. @@ -48,14 +48,14 @@ if (val) { \ MESG(mesg); \ } else { \ - printf("Proc %d: ", mpi_rank); \ - printf("*** Parallel ERROR ***\n"); \ - printf(" VRFY (%s) failed at line %4d in %s\n", \ + HDprintf("Proc %d: ", mpi_rank); \ + HDprintf("*** Parallel ERROR ***\n"); \ + HDprintf(" VRFY (%s) failed at line %4d in %s\n", \ mesg, (int)__LINE__, __FILE__); \ ++nerrors; \ fflush(stdout); \ if (!VERBOSE_MED) { \ - printf("aborting MPI processes\n"); \ + HDprintf("aborting MPI processes\n"); \ MPI_Abort(MPI_COMM_WORLD, 1); \ } \ } \ @@ -70,9 +70,9 @@ if (val) { \ MESG(mesg); \ } else { \ - printf("Proc %d: ", mpi_rank); \ - printf("*** PHDF5 REMARK (not an error) ***\n"); \ - printf(" Condition (%s) failed at line %4d in %s\n", \ + HDprintf("Proc %d: ", mpi_rank); \ + HDprintf("*** PHDF5 REMARK (not an error) ***\n"); \ + HDprintf(" Condition (%s) failed at line %4d in %s\n", \ mesg, (int)__LINE__, __FILE__); \ fflush(stdout); \ } \ @@ -80,10 +80,10 @@ #define MPI_BANNER(mesg) do { \ if (VERBOSE_MED || MAINPROCESS){ \ - printf("--------------------------------\n"); \ - printf("Proc %d: ", mpi_rank); \ - printf("*** %s\n", mesg); \ - printf("--------------------------------\n"); \ + HDprintf("--------------------------------\n"); \ + HDprintf("Proc %d: ", mpi_rank); \ + HDprintf("*** %s\n", mesg); \ + HDprintf("--------------------------------\n"); \ } \ } while(0) diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index 9fece12..cf611b7 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -19,7 +19,7 @@ #include "testpar.h" enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD, - API_MULTI_HARD,API_LINK_TRUE,API_LINK_FALSE, + API_MULTI_HARD,API_LINK_TRUE,API_LINK_FALSE, API_MULTI_COLL,API_MULTI_IND}; #ifndef FALSE @@ -32,20 +32,20 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD, /* Constants definitions */ -#define DIM0 600 /* Default dataset sizes. */ -#define DIM1 1200 /* Values are from a monitor pixel sizes */ -#define ROW_FACTOR 8 /* Nominal row factor for dataset size */ -#define COL_FACTOR 16 /* Nominal column factor for dataset size */ -#define RANK 2 -#define DATASETNAME1 "Data1" -#define DATASETNAME2 "Data2" -#define DATASETNAME3 "Data3" -#define DATASETNAME4 "Data4" -#define DATASETNAME5 "Data5" -#define DATASETNAME6 "Data6" -#define DATASETNAME7 "Data7" -#define DATASETNAME8 "Data8" -#define DATASETNAME9 "Data9" +#define DIM0 600 /* Default dataset sizes. */ +#define DIM1 1200 /* Values are from a monitor pixel sizes */ +#define ROW_FACTOR 8 /* Nominal row factor for dataset size */ +#define COL_FACTOR 16 /* Nominal column factor for dataset size */ +#define RANK 2 +#define DATASETNAME1 "Data1" +#define DATASETNAME2 "Data2" +#define DATASETNAME3 "Data3" +#define DATASETNAME4 "Data4" +#define DATASETNAME5 "Data5" +#define DATASETNAME6 "Data6" +#define DATASETNAME7 "Data7" +#define DATASETNAME8 "Data8" +#define DATASETNAME9 "Data9" /* point selection order */ #define IN_ORDER 1 @@ -179,14 +179,14 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD, /* Definitions of the selection mode for the no_collective_cause_tests function. */ #define TEST_COLLECTIVE 0x001 -#define TEST_SET_INDEPENDENT 0x002 +#define TEST_SET_INDEPENDENT 0x002 #define TEST_DATATYPE_CONVERSION 0x004 #define TEST_DATA_TRANSFORMS 0x008 #define TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES 0x010 #define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT 0x020 #define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL 0x040 #define TEST_FILTERS 0x080 -/* TEST_FILTERS will take place of this after supporting mpio + filter for +/* TEST_FILTERS will take place of this after supporting mpio + filter for * H5Dcreate and H5Dwrite */ #define TEST_FILTERS_READ 0x100 @@ -209,8 +209,8 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD, /* type definitions */ typedef struct H5Ptest_param_t /* holds extra test parameters */ { - char *name; - int count; + char *name; + int count; } H5Ptest_param_t; /* Dataset data type. Int's can be easily octo dumped. */ @@ -218,19 +218,19 @@ typedef int DATATYPE; /* Shape Same Tests Definitions */ typedef enum { - IND_CONTIG, /* Independent IO on contigous datasets */ - COL_CONTIG, /* Collective IO on contigous datasets */ - IND_CHUNKED, /* Independent IO on chunked datasets */ - COL_CHUNKED /* Collective IO on chunked datasets */ + IND_CONTIG, /* Independent IO on contigous datasets */ + COL_CONTIG, /* Collective IO on contigous datasets */ + IND_CHUNKED, /* Independent IO on chunked datasets */ + COL_CHUNKED /* Collective IO on chunked datasets */ } ShapeSameTestMethods; /* Shared global variables */ -extern int dim0, dim1; /*Dataset dimensions */ -extern int chunkdim0, chunkdim1; /*Chunk dimensions */ -extern int nerrors; /*errors count */ -extern H5E_auto2_t old_func; /* previous error handler */ -extern void *old_client_data; /*previous error handler arg.*/ -extern int facc_type; /*Test file access type */ +extern int dim0, dim1; /*Dataset dimensions */ +extern int chunkdim0, chunkdim1; /*Chunk dimensions */ +extern int nerrors; /*errors count */ +extern H5E_auto2_t old_func; /* previous error handler */ +extern void *old_client_data; /*previous error handler arg.*/ +extern int facc_type; /*Test file access type */ extern int dxfer_coll_type; /* Test program prototypes */ |