diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2008-09-16 15:52:51 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2008-09-16 15:52:51 (GMT) |
commit | bdd7d59902483885dd8b883f3b2393e77383e5e8 (patch) | |
tree | aaf20ab132d057b95b3c016d50fc22b77719084b /testpar | |
parent | 8bc0d5ed9019a681e1ea20c24264415d01c1cf2a (diff) | |
download | hdf5-bdd7d59902483885dd8b883f3b2393e77383e5e8.zip hdf5-bdd7d59902483885dd8b883f3b2393e77383e5e8.tar.gz hdf5-bdd7d59902483885dd8b883f3b2393e77383e5e8.tar.bz2 |
[svn-r15628] Description:
Remove trailing whitespace from C/C++ source files, with the following
script:
foreach f (*.[ch] *.cpp)
sed 's/[[:blank:]]*$//' $f > sed.out && mv sed.out $f
end
Tested on:
Mac OS X/32 10.5.5 (amazon)
No need for h5committest, just whitespace changes...
Diffstat (limited to 'testpar')
-rw-r--r-- | testpar/t_cache.c | 74 | ||||
-rw-r--r-- | testpar/t_coll_chunk.c | 104 | ||||
-rw-r--r-- | testpar/t_dset.c | 2 | ||||
-rw-r--r-- | testpar/t_filter_read.c | 72 | ||||
-rw-r--r-- | testpar/t_mdset.c | 2 | ||||
-rw-r--r-- | testpar/t_mpi.c | 8 | ||||
-rw-r--r-- | testpar/t_pflush1.c | 22 | ||||
-rw-r--r-- | testpar/t_pflush2.c | 14 | ||||
-rw-r--r-- | testpar/t_span_tree.c | 54 | ||||
-rw-r--r-- | testpar/testphdf5.c | 6 |
10 files changed, 179 insertions, 179 deletions
diff --git a/testpar/t_cache.c b/testpar/t_cache.c index c5f8aae..0a7cb72 100644 --- a/testpar/t_cache.c +++ b/testpar/t_cache.c @@ -98,8 +98,8 @@ long local_pins = 0; * value must be positive, and may not be larger than len. * * The field exists to allow us change the sizes of entries - * in the cache without upsetting the server. This value - * is only used locally, and is never sent to the server. + * in the cache without upsetting the server. This value + * is only used locally, and is never sent to the server. * * ver: Version number of the entry. This number is initialize * to zero, and incremented each time the entry is modified. @@ -363,7 +363,7 @@ void pin_protected_entry(H5C_t * cache_ptr, H5F_t * file_ptr, int32_t idx, hbool_t global); void rename_entry(H5C_t * cache_ptr, H5F_t * file_ptr, int32_t old_idx, int32_t new_idx); -void resize_entry(H5C_t * cache_ptr, H5F_t * file_ptr, +void resize_entry(H5C_t * cache_ptr, H5F_t * file_ptr, int32_t idx, size_t new_size); hbool_t setup_cache_for_test(hid_t * fid_ptr, H5F_t ** file_ptr_ptr, H5C_t ** cache_ptr_ptr); @@ -2092,8 +2092,8 @@ load_datum(H5F_t UNUSED *f, * JRM -- 7/11/06 * Modified function to return the local_len field instead * of the len field. These two fields usually contain the - * same value, but if the size of an entry is changed, we - * store the altered size in local_len without changing + * same value, but if the size of an entry is changed, we + * store the altered size in local_len without changing * len. Note that local_len must be positive, and may * not exceed len. * @@ -2179,7 +2179,7 @@ expunge_entry(H5C_t * cache_ptr, if ( nerrors == 0 ) { - result = H5AC_expunge_entry(file_ptr, -1, &(types[0]), + result = H5AC_expunge_entry(file_ptr, -1, &(types[0]), entry_ptr->header.addr); if ( result < 0 ) { @@ -2683,7 +2683,7 @@ lock_and_unlock_random_entry(H5C_t * cache_ptr, * Modifications: * * JRM -- 7/11/06 - * Modified asserts to handle the new local_len field in + * Modified asserts to handle the new local_len field in * datum. * *****************************************************************************/ @@ -2989,7 +2989,7 @@ pin_protected_entry(H5C_t * cache_ptr, nerrors++; if ( verbose ) { - HDfprintf(stdout, + HDfprintf(stdout, "%d:%s: Error in H5AC_pin_protected entry().\n", world_mpi_rank, fcn_name); } @@ -3004,7 +3004,7 @@ pin_protected_entry(H5C_t * cache_ptr, } else { entry_ptr->local_pinned = TRUE; - + local_pins++; } @@ -3034,7 +3034,7 @@ pin_protected_entry(H5C_t * cache_ptr, * * Modifications: * - * 7/11/06 -- JRM + * 7/11/06 -- JRM * Added support for the phony_len field in datum. * *****************************************************************************/ @@ -3168,14 +3168,14 @@ resize_entry(H5C_t * cache_ptr, HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE ); HDassert( !(entry_ptr->header.is_protected) ); HDassert( !(entry_ptr->locked) ); - HDassert( ( entry_ptr->global_pinned ) && + HDassert( ( entry_ptr->global_pinned ) && ( ! entry_ptr->local_pinned ) ); HDassert( ( entry_ptr->header.size == entry_ptr->len ) || ( entry_ptr->header.size == entry_ptr->local_len ) ); HDassert( new_size > 0 ); HDassert( new_size <= entry_ptr->len ); - result = H5AC_resize_pinned_entry(file_ptr, (void *)entry_ptr, + result = H5AC_resize_pinned_entry(file_ptr, (void *)entry_ptr, new_size); if ( result < 0 ) { @@ -3738,7 +3738,7 @@ unpin_entry(H5C_t * cache_ptr, if ( dirty ) { - mark_pinned_entry_dirty(cache_ptr, file_ptr, idx, FALSE, + mark_pinned_entry_dirty(cache_ptr, file_ptr, idx, FALSE, (size_t)0); } @@ -4811,7 +4811,7 @@ smoke_check_4(void) /* Insert some entries pinned, and then unpin them * immediately. We have tested pinned entries elsewhere, - * so it should be sufficient to verify that the + * so it should be sufficient to verify that the * entries are in fact pinned (which unpin_entry() should do). */ insert_entry(cache_ptr, file_ptr, i, H5C__PIN_ENTRY_FLAG); @@ -5033,7 +5033,7 @@ smoke_check_4(void) * Modifications: * * JRM -- 7/12/06 - * Added test code for H5AC_expunge_entry() and + * Added test code for H5AC_expunge_entry() and * H5AC_resize_pinned_entry(). * *****************************************************************************/ @@ -5235,7 +5235,7 @@ smoke_check_5(void) * H5AC level, so all calls have to go through H5AC. Thus it * is more convenient to test trace file capabilities in the * parallel cache test which works at the H5AC level, instead - * of in the serial test code which does everything at the + * of in the serial test code which does everything at the * H5C level. * * The function must test trace file output in the following @@ -5265,9 +5265,9 @@ smoke_check_5(void) * Modifications: * * JRM -- 7/11/06 - * Updated fro H5AC_expunge_entry() and + * Updated fro H5AC_expunge_entry() and * H5AC_resize_pinned_entry(). - * + * *****************************************************************************/ hbool_t @@ -5363,12 +5363,12 @@ trace_file_check(void) config.version = H5AC__CURR_CACHE_CONFIG_VERSION; - if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config) + if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED ) { nerrors++; - HDfprintf(stdout, - "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", + HDfprintf(stdout, + "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank, fcn_name); } else { @@ -5376,11 +5376,11 @@ trace_file_check(void) config.open_trace_file = TRUE; strcpy(config.trace_file_name, "t_cache_trace.txt"); - if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config) + if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED ) { nerrors++; - HDfprintf(stdout, + HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank, fcn_name); } @@ -5417,7 +5417,7 @@ trace_file_check(void) if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) { nerrors++; if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", + HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, fcn_name); } } @@ -5426,12 +5426,12 @@ trace_file_check(void) config.version = H5AC__CURR_CACHE_CONFIG_VERSION; - if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config) + if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED ) { nerrors++; - HDfprintf(stdout, - "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", + HDfprintf(stdout, + "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank, fcn_name); } else { @@ -5440,11 +5440,11 @@ trace_file_check(void) config.close_trace_file = TRUE; config.trace_file_name[0] = '\0'; - if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config) + if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED ) { nerrors++; - HDfprintf(stdout, + HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank, fcn_name); } @@ -5463,7 +5463,7 @@ trace_file_check(void) } } - /* verify that all instance of datum are back where the started + /* verify that all instance of datum are back where the started * and are clean. */ @@ -5491,22 +5491,22 @@ trace_file_check(void) nerrors++; if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", + HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, fcn_name); } } } if ( nerrors == 0 ) { - - sprintf(trace_file_name, "t_cache_trace.txt.%d", + + sprintf(trace_file_name, "t_cache_trace.txt.%d", (int)file_mpi_rank); if ( (trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL ) { nerrors++; if ( verbose ) { - HDfprintf(stdout, "%d:%s: HDfopen failed.\n", + HDfprintf(stdout, "%d:%s: HDfopen failed.\n", world_mpi_rank, fcn_name); } } @@ -5542,7 +5542,7 @@ trace_file_check(void) nerrors++; if ( verbose ) { - HDfprintf(stdout, + HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank, fcn_name, i); HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n", @@ -5783,10 +5783,10 @@ main(int argc, char **argv) #endif #if 1 smoke_check_5(); -#endif +#endif #if 1 trace_file_check(); -#endif +#endif finish: /* make sure all processes are finished before final report, cleanup diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c index 8da2e12..e9f8c11 100644 --- a/testpar/t_coll_chunk.c +++ b/testpar/t_coll_chunk.c @@ -36,7 +36,7 @@ static void coll_chunktest(const char* filename,int chunk_factor,int select_fact /*------------------------------------------------------------------------- * Function: coll_chunk1 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with a single chunk * * Return: Success: 0 @@ -53,7 +53,7 @@ static void coll_chunktest(const char* filename,int chunk_factor,int select_fact /* ------------------------------------------------------------------------ * Descriptions for the selection: One big singluar selection inside one chunk - * Two dimensions, + * Two dimensions, * * dim1 = SPACE_DIM1(5760)*mpi_size * dim2 = SPACE_DIM2(3) @@ -83,7 +83,7 @@ coll_chunk1(void) /*------------------------------------------------------------------------- * Function: coll_chunk2 * - * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT + * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT selection with a single chunk * * Return: Success: 0 @@ -100,7 +100,7 @@ coll_chunk1(void) /* ------------------------------------------------------------------------ * Descriptions for the selection: many disjoint selections inside one chunk - * Two dimensions, + * Two dimensions, * * dim1 = SPACE_DIM1*mpi_size(5760) * dim2 = SPACE_DIM2(3) @@ -112,7 +112,7 @@ coll_chunk1(void) * count1 = SPACE_DIM2/stride(3/3 = 1) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ void @@ -130,7 +130,7 @@ coll_chunk2(void) /*------------------------------------------------------------------------- * Function: coll_chunk3 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -159,7 +159,7 @@ coll_chunk2(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -181,7 +181,7 @@ coll_chunk3(void) /*------------------------------------------------------------------------- * Function: coll_chunk4 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -210,7 +210,7 @@ coll_chunk3(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -231,7 +231,7 @@ coll_chunk4(void) /*------------------------------------------------------------------------- * Function: coll_chunk4 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -260,7 +260,7 @@ coll_chunk4(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -281,7 +281,7 @@ coll_chunk5(void) /*------------------------------------------------------------------------- * Function: coll_chunk6 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -310,7 +310,7 @@ coll_chunk5(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -331,7 +331,7 @@ coll_chunk6(void) /*------------------------------------------------------------------------- * Function: coll_chunk7 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -360,7 +360,7 @@ coll_chunk6(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -381,7 +381,7 @@ coll_chunk7(void) /*------------------------------------------------------------------------- * Function: coll_chunk8 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -410,7 +410,7 @@ coll_chunk7(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -431,7 +431,7 @@ coll_chunk8(void) /*------------------------------------------------------------------------- * Function: coll_chunk9 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -460,7 +460,7 @@ coll_chunk8(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -481,7 +481,7 @@ coll_chunk9(void) /*------------------------------------------------------------------------- * Function: coll_chunk10 * - * Purpose: Wrapper to test the collective chunk IO for regular JOINT + * Purpose: Wrapper to test the collective chunk IO for regular JOINT selection with at least number of 2*mpi_size chunks * * Return: Success: 0 @@ -510,7 +510,7 @@ coll_chunk9(void) * count1 = SPACE_DIM2(3) * start0 = mpi_rank*SPACE_DIM1 * start1 = 0 - * + * * ------------------------------------------------------------------------ */ @@ -535,7 +535,7 @@ coll_chunk10(void) * Purpose: The real testing routine for regular selection of collective chunking storage testing both write and read, - If anything fails, it may be read or write. There is no + If anything fails, it may be read or write. There is no separation test between read and write. * * Return: Success: 0 @@ -614,7 +614,7 @@ coll_chunktest(const char* filename, chunk_dims[0] = dims[0]/chunk_factor; /* to decrease the testing time, maintain bigger chunk size */ - + (chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2/2); status = H5Pset_chunk(crp_plist, 2, chunk_dims); VRFY((status >= 0),"chunk creation property list succeeded"); @@ -676,8 +676,8 @@ coll_chunktest(const char* filename, break; default: ; - } - + } + #ifdef H5_HAVE_INSTRUMENTED_LIBRARY if(facc_type == FACC_MPIO) { switch(api_option) { @@ -695,14 +695,14 @@ coll_chunktest(const char* filename, break; case API_MULTI_HARD: - prop_value = H5D_XFER_COLL_CHUNK_DEF; + prop_value = H5D_XFER_COLL_CHUNK_DEF; status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); VRFY((status >= 0),"testing property list inserted succeeded"); break; case API_LINK_TRUE: - prop_value = H5D_XFER_COLL_CHUNK_DEF; + prop_value = H5D_XFER_COLL_CHUNK_DEF; status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); VRFY((status >= 0),"testing property list inserted succeeded"); @@ -715,21 +715,21 @@ coll_chunktest(const char* filename, break; case API_LINK_FALSE: - prop_value = H5D_XFER_COLL_CHUNK_DEF; + prop_value = H5D_XFER_COLL_CHUNK_DEF; status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); VRFY((status >= 0),"testing property list inserted succeeded"); break; case API_MULTI_COLL: - prop_value = H5D_XFER_COLL_CHUNK_DEF; + prop_value = H5D_XFER_COLL_CHUNK_DEF; status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); VRFY((status >= 0),"testing property list inserted succeeded"); break; case API_MULTI_IND: - prop_value = H5D_XFER_COLL_CHUNK_DEF; + prop_value = H5D_XFER_COLL_CHUNK_DEF; status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); VRFY((status >= 0),"testing property list inserted succeeded"); @@ -737,10 +737,10 @@ coll_chunktest(const char* filename, default: ; - } + } } #endif - + /* write data collectively */ status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, file_dataspace, xfer_plist, data_array1); @@ -789,7 +789,7 @@ coll_chunktest(const char* filename, break; default: ; - } + } } #endif @@ -807,7 +807,7 @@ coll_chunktest(const char* filename, if (data_array1) HDfree(data_array1); - + /* Use collective read to verify the correctness of collective write. */ /* allocate memory for data buffer */ @@ -884,12 +884,12 @@ coll_chunktest(const char* filename, /* Set up the selection */ static void -ccslab_set(int mpi_rank, - int mpi_size, - hsize_t start[], +ccslab_set(int mpi_rank, + int mpi_size, + hsize_t start[], hsize_t count[], - hsize_t stride[], - hsize_t block[], + hsize_t stride[], + hsize_t block[], int mode) { @@ -938,7 +938,7 @@ ccslab_set(int mpi_rank, case BYROW_SELECTUNBALANCE: /* The first one-third of the number of processes only select top half of the domain, The rest will select the bottom - half of the domain. */ + half of the domain. */ block[0] = 1; count[0] = 2; @@ -948,9 +948,9 @@ ccslab_set(int mpi_rank, start[1] = 0; stride[1] = 1; if((mpi_rank *3)<(mpi_size*2)) start[0] = mpi_rank; - else start[0] = 1 + SPACE_DIM1*mpi_size/2 + (mpi_rank-2*mpi_size/3); + else start[0] = 1 + SPACE_DIM1*mpi_size/2 + (mpi_rank-2*mpi_size/3); break; - + case BYROW_SELECTINCHUNK: /* Each process will only select one chunk */ @@ -992,10 +992,10 @@ ccslab_set(int mpi_rank, * Assume dimension rank is 2. */ static void -ccdataset_fill(hsize_t start[], +ccdataset_fill(hsize_t start[], hsize_t stride[], - hsize_t count[], - hsize_t block[], + hsize_t count[], + hsize_t block[], DATATYPE * dataset) { DATATYPE *dataptr = dataset; @@ -1027,8 +1027,8 @@ ccdataset_fill(hsize_t start[], * Print the first block of the content of the dataset. */ static void -ccdataset_print(hsize_t start[], - hsize_t block[], +ccdataset_print(hsize_t start[], + hsize_t block[], DATATYPE * dataset) { @@ -1058,11 +1058,11 @@ ccdataset_print(hsize_t start[], * Print the content of the dataset. */ static int -ccdataset_vrfy(hsize_t start[], - hsize_t count[], - hsize_t stride[], - hsize_t block[], - DATATYPE *dataset, +ccdataset_vrfy(hsize_t start[], + hsize_t count[], + hsize_t stride[], + hsize_t block[], + DATATYPE *dataset, DATATYPE *original) { hsize_t i, j,k1,k2; diff --git a/testpar/t_dset.c b/testpar/t_dset.c index ce4742b..21e25b6 100644 --- a/testpar/t_dset.c +++ b/testpar/t_dset.c @@ -827,7 +827,7 @@ dataset_writeAll(void) VRFY((ret>= 0),"set independent IO collectively succeeded"); } - + /* write data collectively */ MESG("writeAll with scalar dataspace"); diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c index e2d39c3..aa642b4 100644 --- a/testpar/t_filter_read.c +++ b/testpar/t_filter_read.c @@ -15,7 +15,7 @@ /* * This verifies the correctness of parallel reading of a dataset that has been - * written serially using filters. + * written serially using filters. * * Created by: Christian Chilan * Date: 2007/05/15 @@ -31,11 +31,11 @@ static int mpi_size, mpi_rank; /* Chunk sizes */ -#define CHUNK_DIM1 7 +#define CHUNK_DIM1 7 #define CHUNK_DIM2 27 /* Sizes of the vertical hyperslabs. Total dataset size is - {HS_DIM1, HS_DIM2 * mpi_size } */ + {HS_DIM1, HS_DIM2 * mpi_size } */ #define HS_DIM1 200 #define HS_DIM2 100 @@ -44,16 +44,16 @@ static int mpi_size, mpi_rank; * Function: filter_read_internal * * Purpose: Tests parallel reading of a 2D dataset written serially using - * filters. During the parallel reading phase, the dataset is - * divided evenly among the processors in vertical hyperslabs. + * filters. During the parallel reading phase, the dataset is + * divided evenly among the processors in vertical hyperslabs. * - * Programmer: Christian Chilan - * Tuesday, May 15, 2007 + * Programmer: Christian Chilan + * Tuesday, May 15, 2007 * *------------------------------------------------------------------------- */ -static void -filter_read_internal(const char *filename, hid_t dcpl, +static void +filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size) { hid_t file, dataset; /* HDF5 IDs */ @@ -65,14 +65,14 @@ filter_read_internal(const char *filename, hid_t dcpl, size_t i, j; /* Local index variables */ char name[32] = "dataset"; herr_t hrc; /* Error status */ - int *points = NULL; /* Writing buffer for entire dataset */ + int *points = NULL; /* Writing buffer for entire dataset */ int *check = NULL; /* Reading buffer for selected hyperslab */ /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - /* set sizes for dataset and hyperslabs */ + /* set sizes for dataset and hyperslabs */ hs_size[0] = size[0] = HS_DIM1; hs_size[1] = HS_DIM2; @@ -80,7 +80,7 @@ filter_read_internal(const char *filename, hid_t dcpl, hs_offset[0] = 0; hs_offset[1] = hs_size[1] * mpi_rank; - + /* Create the data space */ sid = H5Screate_simple(2, size, NULL); VRFY(sid>=0, "H5Screate_simple"); @@ -89,7 +89,7 @@ filter_read_internal(const char *filename, hid_t dcpl, points = (int *)HDmalloc(size[0] * size[1] * sizeof(int)); VRFY(points!=NULL, "HDmalloc"); - check = (int *)HDmalloc(hs_size[0] * hs_size[1] * sizeof(int)); + check = (int *)HDmalloc(hs_size[0] * hs_size[1] * sizeof(int)); VRFY(check!=NULL, "HDmalloc"); /* Initialize writing buffer with random data */ @@ -124,7 +124,7 @@ filter_read_internal(const char *filename, hid_t dcpl, MPI_Barrier(MPI_COMM_WORLD); - /* Parallel read phase */ + /* Parallel read phase */ /* Set up MPIO file access property lists */ access_plist = H5Pcreate(H5P_FILE_ACCESS); VRFY((access_plist >= 0), "H5Pcreate"); @@ -144,7 +144,7 @@ filter_read_internal(const char *filename, hid_t dcpl, memspace = H5Screate_simple(2, hs_size, NULL); VRFY(memspace>=0, "H5Screate_simple"); - + hrc = H5Dread (dataset, H5T_NATIVE_INT, memspace, sid, H5P_DEFAULT, check); VRFY(hrc>=0, "H5Dread"); @@ -179,13 +179,13 @@ filter_read_internal(const char *filename, hid_t dcpl, hrc = H5Sclose (memspace); VRFY(hrc>=0, "H5Sclose"); - + hrc = H5Pclose (access_plist); VRFY(hrc>=0, "H5Pclose"); - + hrc = H5Fclose (file); VRFY(hrc>=0, "H5Fclose"); - + free(points); free(check); @@ -194,10 +194,10 @@ filter_read_internal(const char *filename, hid_t dcpl, /*------------------------------------------------------------------------- - * Function: test_filter_read + * Function: test_filter_read * * Purpose: Tests parallel reading of datasets written serially using - * several (combinations of) filters. + * several (combinations of) filters. * * Programmer: Christian Chilan * Tuesday, May 15, 2007 @@ -247,16 +247,16 @@ test_filter_read(void) *---------------------------------------------------------- */ dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc>=0,"H5Pcreate"); + VRFY(dc>=0,"H5Pcreate"); hrc = H5Pset_chunk (dc, 2, chunk_size); - VRFY(hrc>=0,"H5Pset_chunk"); + VRFY(hrc>=0,"H5Pset_chunk"); filter_read_internal(filename,dc,&null_size); /* Clean up objects used for this test */ hrc = H5Pclose (dc); - VRFY(hrc>=0,"H5Pclose"); + VRFY(hrc>=0,"H5Pclose"); /*---------------------------------------------------------- * STEP 1: Test Fletcher32 Checksum by itself. @@ -265,13 +265,13 @@ test_filter_read(void) #ifdef H5_HAVE_FILTER_FLETCHER32 dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc>=0,"H5Pset_filter"); - + VRFY(dc>=0,"H5Pset_filter"); + hrc = H5Pset_chunk (dc, 2, chunk_size); - VRFY(hrc>=0,"H5Pset_filter"); - + VRFY(hrc>=0,"H5Pset_filter"); + hrc = H5Pset_filter (dc,H5Z_FILTER_FLETCHER32,0,0,NULL); - VRFY(hrc>=0,"H5Pset_filter"); + VRFY(hrc>=0,"H5Pset_filter"); filter_read_internal(filename,dc,&fletcher32_size); VRFY(fletcher32_size > null_size,"Size after checksumming is incorrect."); @@ -296,7 +296,7 @@ test_filter_read(void) hrc = H5Pset_deflate (dc, 6); VRFY(hrc>=0, "H5Pset_deflate"); - + filter_read_internal(filename,dc,&deflate_size); /* Clean up objects used for this test */ @@ -344,7 +344,7 @@ test_filter_read(void) filter_read_internal(filename,dc,&shuffle_size); VRFY(shuffle_size==null_size,"Shuffled size not the same as uncompressed size."); - + /* Clean up objects used for this test */ hrc = H5Pclose (dc); VRFY(hrc>=0, "H5Pclose"); @@ -362,13 +362,13 @@ test_filter_read(void) hrc = H5Pset_chunk (dc, 2, chunk_size); VRFY(hrc>=0, "H5Pset_chunk"); - + hrc = H5Pset_fletcher32 (dc); VRFY(hrc>=0, "H5Pset_fletcher32"); - + hrc = H5Pset_shuffle (dc); VRFY(hrc>=0, "H5Pset_shuffle"); - + hrc = H5Pset_deflate (dc, 6); VRFY(hrc>=0, "H5Pset_deflate"); @@ -381,7 +381,7 @@ test_filter_read(void) /* Testing shuffle+deflate+checksum filters (checksum last) */ dc = H5Pcreate(H5P_DATASET_CREATE); VRFY(dc>=0, "H5Pcreate"); - + hrc = H5Pset_chunk (dc, 2, chunk_size); VRFY(hrc>=0, "H5Pset_chunk"); @@ -408,7 +408,7 @@ test_filter_read(void) */ #if defined H5_HAVE_FILTER_SZIP && defined H5_HAVE_FILTER_SHUFFLE && defined H5_HAVE_FILTER_FLETCHER32 - /* Testing shuffle+szip(with encoder)+checksum filters(checksum first) */ + /* Testing shuffle+szip(with encoder)+checksum filters(checksum first) */ dc = H5Pcreate(H5P_DATASET_CREATE); VRFY(dc>=0, "H5Pcreate"); @@ -433,7 +433,7 @@ test_filter_read(void) hrc = H5Pclose (dc); VRFY(hrc>=0, "H5Pclose"); - /* Testing shuffle+szip(with encoder)+checksum filters(checksum last) */ + /* Testing shuffle+szip(with encoder)+checksum filters(checksum last) */ /* Make sure encoding is enabled */ if(h5_szip_can_encode() == 1) { dc = H5Pcreate(H5P_DATASET_CREATE); diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index 2559d3e..da48378 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -368,7 +368,7 @@ void null_dataset(void) VRFY((ret>= 0),"set independent IO collectively succeeded"); } - + dataset = H5Dopen2(iof, dname, H5P_DEFAULT); VRFY((dataset >= 0), "H5Dopen2 succeeded"); diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c index f68c92b..14ba6c6 100644 --- a/testpar/t_mpi.c +++ b/testpar/t_mpi.c @@ -379,10 +379,10 @@ test_mpio_gb_file(char *filename) mrc = MPI_Barrier(MPI_COMM_WORLD); VRFY((mrc==MPI_SUCCESS), "Sync before leaving test"); - /* - * Check if MPI_File_get_size works correctly. Some systems (only SGI Altix + /* + * Check if MPI_File_get_size works correctly. Some systems (only SGI Altix * Propack 4 so far) return wrong file size. It can be avoided by reconfiguring - * with "--disable-mpi-size". + * with "--disable-mpi-size". */ #ifdef H5_HAVE_MPI_GET_SIZE printf("Test if MPI_File_get_size works correctly with %s\n", filename); @@ -396,7 +396,7 @@ test_mpio_gb_file(char *filename) mrc=stat(filename, &stat_buf); VRFY((mrc==0), ""); - + /* Hopefully this casting is safe */ if(size != (MPI_Offset)(stat_buf.st_size)) { printf("Warning: MPI_File_get_size doesn't return correct file size. To avoid using it in the library, reconfigure and rebuild the library with --disable-mpi-size.\n"); diff --git a/testpar/t_pflush1.c b/testpar/t_pflush1.c index 8734bfb..29d5c58 100644 --- a/testpar/t_pflush1.c +++ b/testpar/t_pflush1.c @@ -51,12 +51,12 @@ static double the_data[100][100]; *------------------------------------------------------------------------- */ hid_t create_file(char* name, hid_t fapl) -{ +{ hid_t file, dcpl, space, dset, groups, grp, plist; hsize_t ds_size[2] = {100, 100}; hsize_t ch_size[2] = {5, 5}; hsize_t i, j; - + if((file=H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) goto error; @@ -70,8 +70,8 @@ hid_t create_file(char* name, hid_t fapl) plist = H5Pcreate(H5P_DATASET_XFER); H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE); - - + + /* Write some data */ for(i = 0; i < ds_size[0]; i++) { /* @@ -113,7 +113,7 @@ error: * Modifications: * Leon Arber * Sept. 26, 2006, expand test to check for failure if H5Fflush is not called. - * + * * *------------------------------------------------------------------------- */ @@ -131,15 +131,15 @@ main(int argc, char* argv[]) MPI_Init(&argc, &argv); MPI_Comm_size(comm, &mpi_size); - MPI_Comm_rank(comm, &mpi_rank); + MPI_Comm_rank(comm, &mpi_rank); fapl = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(fapl, comm, info); - if(mpi_rank == 0) + if(mpi_rank == 0) TESTING("H5Fflush (part1)"); envval = HDgetenv("HDF5_DRIVER"); - if(envval == NULL) + if(envval == NULL) envval = "nomatch"; if(HDstrcmp(envval, "split")) { /* Create the file */ @@ -153,7 +153,7 @@ main(int argc, char* argv[]) file2 = create_file(name, fapl); - if(mpi_rank == 0) + if(mpi_rank == 0) PASSED(); fflush(stdout); fflush(stderr); @@ -163,7 +163,7 @@ main(int argc, char* argv[]) SKIPPED(); puts(" Test not compatible with current Virtual File Driver"); } - + /* * Some systems like Linux with mpich, if you just _exit without MPI_Finalize * called, it would terminate but left the launching process waiting forever. @@ -174,7 +174,7 @@ main(int argc, char* argv[]) * Note that MPIO VFD returns the address of the file-handle in the VFD struct * because MPI_File_close wants to modify the file-handle variable. */ - + /* close file1 */ if(H5Fget_vfd_handle(file1, fapl, (void **)&mpifh_p) < 0){ printf("H5Fget_vfd_handle for file1 failed\n"); diff --git a/testpar/t_pflush2.c b/testpar/t_pflush2.c index 4ca6a96..82c112f 100644 --- a/testpar/t_pflush2.c +++ b/testpar/t_pflush2.c @@ -144,7 +144,7 @@ main(int argc, char* argv[]) MPI_Init(&argc, &argv); MPI_Comm_size(comm, &mpi_size); - MPI_Comm_rank(comm, &mpi_rank); + MPI_Comm_rank(comm, &mpi_rank); fapl1 = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(fapl1, comm, info); @@ -152,10 +152,10 @@ main(int argc, char* argv[]) fapl2 = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(fapl2, comm, info); - + if(mpi_rank == 0) TESTING("H5Fflush (part2 with flush)"); - + /* Don't run this test using the core or split file drivers */ envval = HDgetenv("HDF5_DRIVER"); if (envval == NULL) @@ -168,14 +168,14 @@ main(int argc, char* argv[]) H5_FAILED() goto error; } - else if(mpi_rank == 0) + else if(mpi_rank == 0) { PASSED() } - + /* Check the case where the file was not flushed. This should give an error * so we turn off the error stack temporarily */ - if(mpi_rank == 0) + if(mpi_rank == 0) TESTING("H5Fflush (part2 without flush)"); H5Eget_auto2(H5E_DEFAULT,&func,NULL); H5Eset_auto2(H5E_DEFAULT, NULL, NULL); @@ -183,7 +183,7 @@ main(int argc, char* argv[]) h5_fixname(FILENAME[1], fapl2, name, sizeof name); if(check_file(name, fapl2)) { - if(mpi_rank == 0) + if(mpi_rank == 0) { PASSED() } diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c index 71d526b..800ad41 100644 --- a/testpar/t_span_tree.c +++ b/testpar/t_span_tree.c @@ -46,7 +46,7 @@ static void coll_read_test(int chunk_factor); /*------------------------------------------------------------------------- * Function: coll_irregular_cont_write * - * Purpose: Wrapper to test the collectively irregular hyperslab write in + * Purpose: Wrapper to test the collectively irregular hyperslab write in contiguous storage * * Return: Success: 0 @@ -73,7 +73,7 @@ coll_irregular_cont_write(void) /*------------------------------------------------------------------------- * Function: coll_irregular_cont_read * - * Purpose: Wrapper to test the collectively irregular hyperslab read in + * Purpose: Wrapper to test the collectively irregular hyperslab read in contiguous storage * * Return: Success: 0 @@ -99,7 +99,7 @@ coll_irregular_cont_read(void) /*------------------------------------------------------------------------- * Function: coll_irregular_simple_chunk_write * - * Purpose: Wrapper to test the collectively irregular hyperslab write in + * Purpose: Wrapper to test the collectively irregular hyperslab write in chunk storage(1 chunk) * * Return: Success: 0 @@ -207,7 +207,7 @@ coll_irregular_complex_chunk_read(void) * Purpose: To test the collectively irregular hyperslab write in chunk storage * Input: number of chunks on each dimension - if number is equal to 0, contiguous storage + if number is equal to 0, contiguous storage * Return: Success: 0 * * Failure: -1 @@ -283,7 +283,7 @@ void coll_write_test(int chunk_factor) mdim[1] = MSPACE_DIM2*mpi_size; fsdim[0] = FSPACE_DIM1; fsdim[1] = FSPACE_DIM2*mpi_size; - + vector = (int*)HDmalloc(sizeof(int)*mdim1[0]*mpi_size); matrix_out = (int*)HDmalloc(sizeof(int)*mdim[0]*mdim[1]*mpi_size); matrix_out1 = (int*)HDmalloc(sizeof(int)*mdim[0]*mdim[1]*mpi_size); @@ -342,7 +342,7 @@ void coll_write_test(int chunk_factor) /* The First selection for FILE * * block (3,2) - * stride(4,3) + * stride(4,3) * count (1,768/mpi_size) * start (0,1+768*3*mpi_rank/mpi_size) * @@ -361,10 +361,10 @@ void coll_write_test(int chunk_factor) VRFY((ret >= 0),"hyperslab selection succeeded"); /* The Second selection for FILE - * + * * block (3,768) * stride (1,1) - * count (1,1) + * count (1,1) * start (4,768*mpi_rank/mpi_size) * */ @@ -460,11 +460,11 @@ void coll_write_test(int chunk_factor) * Open the file. */ - /*** - - For testing collective hyperslab selection write + /*** + + For testing collective hyperslab selection write In this test, we are using independent read to check - the correctedness of collective write compared with + the correctedness of collective write compared with independent write, In order to throughly test this feature, we choose @@ -502,7 +502,7 @@ void coll_write_test(int chunk_factor) /* The First selection for FILE to read * * block (1,1) - * stride(1.1) + * stride(1.1) * count (3,768/mpi_size) * start (1,2+768*mpi_rank/mpi_size) * @@ -528,7 +528,7 @@ void coll_write_test(int chunk_factor) /* The Second selection for FILE to read * * block (1,1) - * stride(1.1) + * stride(1.1) * count (3,1536/mpi_size) * start (2,4+1536*mpi_rank/mpi_size) * @@ -566,7 +566,7 @@ void coll_write_test(int chunk_factor) * Only the starting point is different. * The first selection * block (1,1) - * stride(1.1) + * stride(1.1) * count (3,768/mpi_size) * start (0,768*mpi_rank/mpi_size) * @@ -591,7 +591,7 @@ void coll_write_test(int chunk_factor) * Only the starting point is different. * The second selection * block (1,1) - * stride(1,1) + * stride(1,1) * count (3,1536/mpi_size) * start (1,2+1536*mpi_rank/mpi_size) * @@ -622,7 +622,7 @@ void coll_write_test(int chunk_factor) H5P_DEFAULT, matrix_out); VRFY((ret >= 0),"H5D independent read succeed"); - + ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out1); VRFY((ret >= 0),"H5D independent read succeed"); @@ -633,7 +633,7 @@ void coll_write_test(int chunk_factor) if(matrix_out[i]!=matrix_out1[i]) ret = -1; if(ret < 0) break; } - + VRFY((ret >= 0),"H5D irregular collective write succeed"); /* @@ -676,7 +676,7 @@ void coll_write_test(int chunk_factor) * Purpose: To test the collectively irregular hyperslab read in chunk storage * Input: number of chunks on each dimension - if number is equal to 0, contiguous storage + if number is equal to 0, contiguous storage * Return: Success: 0 * * Failure: -1 @@ -685,8 +685,8 @@ void coll_write_test(int chunk_factor) * Dec 2nd, 2004 * * Modifications: Oct 18th, 2005 - * Note: This test must be used with the correpsonding - coll_write_test. + * Note: This test must be used with the correpsonding + coll_write_test. *------------------------------------------------------------------------- */ void coll_read_test(int chunk_factor) @@ -706,7 +706,7 @@ void coll_read_test(int chunk_factor) dataset on the disk */ #endif - hsize_t mdim[2]; + hsize_t mdim[2]; hsize_t start[2]; /* Start of hyperslab */ hsize_t stride[2]; /* Stride of hyperslab */ hsize_t count[2]; /* Block count */ @@ -739,7 +739,7 @@ void coll_read_test(int chunk_factor) /* Initialize the buffer */ - + mdim[0] = MSPACE_DIM1; mdim[1] = MSPACE_DIM2*mpi_size; matrix_out =(int*)HDmalloc(sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size); @@ -772,7 +772,7 @@ void coll_read_test(int chunk_factor) /* The First selection for FILE to read * * block (1,1) - * stride(1.1) + * stride(1.1) * count (3,768/mpi_size) * start (1,2+768*mpi_rank/mpi_size) * @@ -792,7 +792,7 @@ void coll_read_test(int chunk_factor) /* The Second selection for FILE to read * * block (1,1) - * stride(1.1) + * stride(1.1) * count (3,1536/mpi_size) * start (2,4+1536*mpi_rank/mpi_size) * @@ -821,7 +821,7 @@ void coll_read_test(int chunk_factor) * Only the starting point is different. * The first selection * block (1,1) - * stride(1.1) + * stride(1.1) * count (3,768/mpi_size) * start (0,768*mpi_rank/mpi_size) * @@ -844,7 +844,7 @@ void coll_read_test(int chunk_factor) * Only the starting point is different. * The second selection * block (1,1) - * stride(1,1) + * stride(1,1) * count (3,1536/mpi_size) * start (1,2+1536*mpi_rank/mpi_size) * diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index 2760a8b..114ef49 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -399,7 +399,7 @@ int main(int argc, char **argv) "collective group and dataset write", &collngroups_params); AddTest("ingrpr", independent_group_read, NULL, "independent group and dataset read", &collngroups_params); - AddTest("bigdset", big_dataset, NULL, + AddTest("bigdset", big_dataset, NULL, "big dataset test", PARATESTFILE); AddTest("fill", dataset_fillvalue, NULL, "dataset fill value", PARATESTFILE); @@ -436,8 +436,8 @@ int main(int argc, char **argv) AddTest((mpi_size < 3)? "-cchunk10" : "cchunk10", coll_chunk10,NULL, "multiple chunk collective IO transferring to independent IO",PARATESTFILE); - - + + /* irregular collective IO tests*/ AddTest("ccontw", |